repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
RoliKhanna/Anchor-Free
|
[
"e3d599b7cbdc988ad7720c1e8324cabe87917d59"
] |
[
"baseline/spa.py"
] |
[
"\nfrom nltk.corpus import reuters\nimport sys\nimport numpy as np\nfrom scipy import optimize\n\n# Loading data here\ntrain_documents, train_categories = zip(*[(reuters.raw(i), reuters.categories(i)) for i in reuters.fileids() if i.startswith('training/')])\ntest_documents, test_categories = zip(*[(reuters.raw(i), reuters.categories(i)) for i in reuters.fileids() if i.startswith('test/')])\n\ndef SPA(X, r):\n cols = []\n m, n = X.shape\n assert(m == n)\n for _ in xrange(r):\n col_norms = np.sum(np.abs(X) ** 2, axis=0)\n col_ind = np.argmax(col_norms)\n cols.append(col_ind)\n col = np.reshape(X[:, col_ind], (n, 1))\n X = np.dot((np.eye(n) - np.dot(col, col.T) / col_norms[col_ind]), X)\n return cols\n\ndef col2norm(X):\n return np.sum(np.abs(X) ** 2,axis=0)\n\ndef NNLSFrob(X, cols):\n\n ncols = X.shape[1]\n H = np.zeros((len(cols), ncols))\n for i in xrange(ncols):\n sol, res = optimize.nnls(X[:, cols], X[:, i])\n H[:, i] = sol\n rel_res = np.linalg.norm(X - np.dot(X[:, cols], H), 'fro')\n rel_res /= np.linalg.norm(X, 'fro')\n return H, rel_res\n\ndef ComputeNMF(data, colnorms, r):\n\n data = np.copy(data)\n colinv = np.linalg.pinv(np.diag(colnorms))\n\n A = np.dot(data, colinv)\n _, S, Vt = np.linalg.svd(A)\n A = np.dot(np.diag(S), Vt)\n cols = SPA(A, r)\n\n H, rel_res = NNLSFrob(data, cols)\n return cols, H, rel_res\n\ndef ParseMatrix(matpath):\n\tmatrix = []\n\twith open(matpath, 'r') as f:\n for row in f:\n matrix.append([float(v) for v in row.split()[1:]])\n\treturn np.array(matrix)\n\ndef ParseColnorms(colpath):\n\tnorms = []\n\twith open(colpath, 'r') as f:\n for line in f:\n norms.append(float(line.split()[-1]))\n\treturn norms\n\ndata = ParseMatrix(train_documents)\ncolnorms = ParseColnorms(train_categories)\nr = 4\ncols, H, rel_res = ComputeNMF(data, colnorms, r)\ncols.sort()\n\nprint(\"Final result: \", rel_res)\n\n# r is separation rank, X is dataset\n"
] |
[
[
"numpy.diag",
"numpy.dot",
"numpy.linalg.svd",
"numpy.abs",
"numpy.reshape",
"numpy.eye",
"scipy.optimize.nnls",
"numpy.linalg.norm",
"numpy.copy",
"numpy.argmax",
"numpy.array"
]
] |
AllenNeuralDynamics/ephys-framework-tests
|
[
"ee940afeab54e5e25765a903a6b65f2e95be4c48",
"ee940afeab54e5e25765a903a6b65f2e95be4c48"
] |
[
"pgaf/ingest_stimulus.py",
"django/helpers.py"
] |
[
"from allensdk.brain_observatory.ecephys.ecephys_project_cache import EcephysProjectCache\nimport os\nfrom sqlalchemy import delete\nfrom sqlalchemy.orm import sessionmaker\n\nimport json\nimport numpy as np\nimport pandas as pd\nfrom datetime import date,datetime,timedelta\n\nimport sqla_schema as sch\n\nimport ingest\n\ndata_directory = 'C:\\\\Users\\\\yoni.browning\\\\Documents\\\\DataJoint\\\\AllenData'\nmanifest_path = os.path.join(data_directory, \"manifest.json\")\n\ndef get_first_value(value):\n if isinstance(value,str):\n if '[' in value and ']' in value:\n value = np.fromstring(value[1:-1],sep = ',')\n value = value[0]\n else:\n value = float(value)\n elif isinstance(value,np.ndarray) and len(value)==1:\n value = value[0]\n elif isinstance(value,int) or isinstance(value,float):\n value =value\n else:\n value = None\n return value\n\ndef query_to_df(Q):\n df = pd.read_sql(Q.statement, Q.session.bind)\n return df\n\ndef ingest_session_stimulus(session, engine):\n with sessionmaker(engine)() as dbsession:\n rf_stim_table = session.stimulus_presentations\n rf_stim_table.reset_index(inplace=True)\n rf_stim_table.insert(0,'session_id',\\\n (session.ecephys_session_id*np.ones(len(rf_stim_table))).\\\n astype(int))\n K = rf_stim_table.keys()\n # Fix 'null' to be None variables\n for ii in range(len(K)):\n rf_stim_table[K[ii]][rf_stim_table[K[ii]]=='null'] = None\n # Convert strings to arrays\n rf_stim_table['phase'] = rf_stim_table['phase'].apply(get_first_value)\n rf_stim_table['size'] = rf_stim_table['size'].apply(get_first_value)\n rf_stim_table['temporal_frequency'] =\\\n rf_stim_table['temporal_frequency'].apply(get_first_value).astype(float)\n rf_stim_table['spatial_frequency'] = \\\n rf_stim_table['spatial_frequency'].apply(get_first_value).astype(float)\n rf_stim_table.to_sql('stimulus', engine, index_label='id', if_exists='append')\n \n\n\n\ndef main():\n # Connect to the database (kinda worthless without this)\n engine = ingest.connect_to_db()\n print('Connected to engine')\n # Distroy any existing versions of these table\n #sch.Base.metadata.drop_all(engine, tables=(sch.StimulusType.__table__,))\n #sch.Base.metadata.drop_all(engine, tables=(sch.Stimulus.__table__,))\n print('Killed old tables')\n\n # Generate a new version of these tables\n sch.Base.metadata.create_all(engine, tables=(sch.StimulusType.__table__,))\n #sch.Base.metadata.create_all(engine, tables=(sch.Stimulus.__table__,))\n print('Spawned new tables')\n\n # \n cache = ingest.get_ecephys_cache(manifest = manifest_path)\n \n # Add Stimulus Type arguments. \n # This bit is a little funky, but you only need to do it once.\n session = cache.get_session_data(715093703)\n print('Grabbed some data')\n\n rf_stim_table = session.stimulus_presentations\n stimulus_name_df = \\\n pd.DataFrame(data={'stimulus_name': rf_stim_table['stimulus_name'].unique()})\n stimulus_name_df.to_sql('stimulus_type', engine, index_label='id', if_exists='append')\n print('Added to DB')\n\n # loop through these sessions, get the data\n #ingest_session_stimulus(session, engine)\n # Actually do the ingest\n \n\n\n# Run it!!!!\nif __name__ == '__main__':main()\n \n \n ",
"import numpy as np\nimport numbers\nimport ast\nfrom allensdk.brain_observatory.ecephys.ecephys_project_cache import EcephysProjectCache\n\ndef get_session(session_id):\n manifest = '/allen/scratch/aindtemp/david.feng/epc/manifest.json'\n cache = EcephysProjectCache(manifest=manifest)\n return cache.get_session_data(session_id)\n\ndef spike_count(start_trigger,end_trigger,spike_ts):\n count = [None]*len(start_trigger)\n for ii,trigger in enumerate(start_trigger):\n count[ii] = np.sum(np.logical_and(spike_ts>=trigger,spike_ts<end_trigger[ii]))\n return count\n\ndef clean_string(v):\n if v is None:\n return None\n\n if isinstance(v, numbers.Number):\n return v\n\n v = ast.literal_eval(v.strip())\n\n if isinstance(v, list):\n return v[0]\n return v\n"
] |
[
[
"numpy.fromstring",
"pandas.read_sql"
],
[
"numpy.logical_and"
]
] |
mamingjie-China/Paddle
|
[
"68c6160e639be38c57a7dd831f7b841b33e92676"
] |
[
"python/paddle/fluid/tests/unittests/test_sort_op.py"
] |
[
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport paddle\nimport paddle.fluid as fluid\nimport paddle.imperative as imperative\nimport paddle.fluid.layers as layers\nimport numpy as np\nimport six\nimport paddle.fluid.core as core\n\n\nclass TestSortOnCPU(unittest.TestCase):\n def setUp(self):\n self.place = core.CPUPlace()\n\n def test_api_0(self):\n with fluid.program_guard(fluid.Program()):\n input = fluid.data(name=\"input\", shape=[2, 3, 4], dtype=\"float32\")\n output = paddle.sort(x=input)\n exe = fluid.Executor(self.place)\n data = np.array(\n [[[5, 8, 9, 5], [0, 0, 1, 7], [6, 9, 2, 4]],\n [[5, 2, 4, 2], [4, 7, 7, 9], [1, 7, 0, 6]]],\n dtype='float32')\n result, = exe.run(feed={'input': data}, fetch_list=[output])\n np_result = np.sort(result)\n self.assertEqual((result == np_result).all(), True)\n\n def test_api_1(self):\n with fluid.program_guard(fluid.Program()):\n input = fluid.data(name=\"input\", shape=[2, 3, 4], dtype=\"float32\")\n output = paddle.sort(x=input, axis=1)\n exe = fluid.Executor(self.place)\n data = np.array(\n [[[5, 8, 9, 5], [0, 0, 1, 7], [6, 9, 2, 4]],\n [[5, 2, 4, 2], [4, 7, 7, 9], [1, 7, 0, 6]]],\n dtype='float32')\n result, = exe.run(feed={'input': data}, fetch_list=[output])\n np_result = np.sort(result, axis=1)\n self.assertEqual((result == np_result).all(), True)\n\n\nclass TestSortOnGPU(TestSortOnCPU):\n def init_place(self):\n if core.is_compiled_with_cuda():\n self.place = core.CUDAPlace(0)\n else:\n self.place = core.CPUPlace()\n\n\nclass TestSortDygraph(unittest.TestCase):\n def setUp(self):\n self.input_data = np.random.rand(10, 10)\n if core.is_compiled_with_cuda():\n self.place = core.CUDAPlace(0)\n else:\n self.place = core.CPUPlace()\n\n def test_api_0(self):\n with imperative.guard(self.place):\n var_x = imperative.to_variable(self.input_data)\n out = paddle.sort(var_x)\n self.assertEqual((np.sort(self.input_data) == out.numpy()).all(),\n True)\n\n def test_api_1(self):\n with imperative.guard(self.place):\n var_x = imperative.to_variable(self.input_data)\n out = paddle.sort(var_x, axis=-1)\n self.assertEqual(\n (np.sort(\n self.input_data, axis=-1) == out.numpy()).all(), True)\n"
] |
[
[
"numpy.array",
"numpy.random.rand",
"numpy.sort"
]
] |
pg1647/self_supervised_rotnet
|
[
"5edfa31dd7b2dde8e1d093037c6c8c9745ba8f71"
] |
[
"rotnet_nonlinear_finetuned.py"
] |
[
"from __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport torch\r\nimport torchvision\r\nfrom torchvision import datasets, transforms\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\n\r\nimport models as mdl\r\n\r\nimport numpy as np\r\nimport argparse\r\nimport pickle\r\nimport os\r\nimport datetime\r\nimport time\r\nimport math\r\nimport shutil\r\n\r\ndef get_parser():\r\n parser = argparse.ArgumentParser(description='Non Linear Classifier on RotNet with RotNet also Finetuned')\r\n\r\n parser.add_argument('--batch_size', type=int, default=128,\r\n help='input batch size for training (default: 128)')\r\n\r\n parser.add_argument('--epochs', type=int, default=1,\r\n help='number of epochs to train (default: 1)')\r\n\r\n parser.add_argument('--lr', type=float, default=0.1,\r\n help='learning rate (default: 0.1)')\r\n\r\n parser.add_argument('--device', type=str, default='cuda',\r\n help='Device to be used (acceptable values: cuda, cpu) (default: cuda)')\r\n\r\n parser.add_argument('--milestones', nargs=\"+\", type=int, default=[20,40,45,50],\r\n help='Milestones for learning rate decay (default: [20,40,45,50])')\r\n\r\n parser.add_argument('--nins', type=int, default=4,\r\n help='number of nin blocks to comprise the model (default: 4)')\r\n\r\n # not sure if I need\r\n parser.add_argument('--layer', type=int, default=2,\r\n help='rotnet layer to take features from to use for classifier (default: 2)')\r\n\r\n parser.add_argument('--opt', type=str, default='sgd',\r\n help='Optimizer to be used (acceptable values: sgd, adam) (default: sgd)')\r\n\r\n parser.add_argument('--momentum', type=float, default=0.9,\r\n help='Momentum for optimizer (default: 0.1)')\r\n\r\n parser.add_argument('--weight_decay', default=5e-4, type=float)\r\n\r\n parser.add_argument('--print_after_batches', type=int, default=100,\r\n help='Print training progress every print_after_batches batches (default: 2)')\r\n\r\n parser.add_argument('--results_dir', default='results/', type=str)\r\n\r\n parser.add_argument('--suffix', default='', type=str, \r\n help=\"When I need to custom name the final results folder, must begin with _\")\r\n\r\n parser.add_argument('--suffix_rot', default='', type=str, \r\n help=\"When I need to load a custom named rotnet model\")\r\n\r\n parser.add_argument('--rot_model_type', default='', type=str, \r\n help=\"Which rotation model to load. Options: '', '_best_acc', '_epoch_100' (default: '') \")\r\n\r\n return parser \r\n\r\n\r\ndef train(args, rot_network, class_network, train_loader, rot_optimizer, class_optimizer, mult, rot_scheduler, class_scheduler, epoch, in_features):\r\n rot_network.train()\r\n class_network.train()\r\n total_images_till_now = 0\r\n total_images = len(train_loader.dataset)*mult\r\n for batch_idx, (data, target) in enumerate(train_loader):\r\n data = data.to(args.device)\r\n target = target.to(args.device)\r\n rot_optimizer.zero_grad()\r\n class_optimizer.zero_grad()\r\n _, out_dict, layer_num2name_dict = rot_network(data, [args.layer])\r\n output = class_network(out_dict[layer_num2name_dict[args.layer]].view(-1, in_features))\r\n loss = F.cross_entropy(output, target)\r\n loss.backward()\r\n rot_optimizer.step()\r\n class_optimizer.step()\r\n total_images_till_now = total_images_till_now + len(data)\r\n if batch_idx % args.print_after_batches == 0:\r\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\r\n epoch+1, total_images_till_now, total_images,\r\n 100. * total_images_till_now/total_images, loss.item()))\r\n \r\n\r\n rot_scheduler.step()\r\n class_scheduler.step()\r\n\r\n return\r\n\r\n\r\ndef test(args, rot_network, class_network, test_loader, mult, datatype, in_features):\r\n rot_network.eval()\r\n class_network.eval()\r\n test_loss = 0\r\n correct = 0\r\n for data, target in test_loader:\r\n data = data.to(args.device)\r\n target = target.to(args.device)\r\n _, out_dict, layer_num2name_dict = rot_network(data, [args.layer])\r\n output = class_network(out_dict[layer_num2name_dict[args.layer]].view(-1, in_features))\r\n test_loss += F.cross_entropy(output, target, reduction='sum').item() # sum up batch loss\r\n pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability\r\n correct += pred.eq(target.data.view_as(pred)).cpu().sum()\r\n\r\n total_images = len(test_loader.dataset)*mult\r\n test_loss /= total_images\r\n test_acc = 100. * correct / total_images\r\n print('\\n{} set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\r\n datatype, test_loss, correct, total_images, test_acc))\r\n\r\n return test_loss, test_acc\r\n \r\n\r\n\r\n\r\ndef main(args):\r\n # hard coded values\r\n in_channels = 3 # rgb channels of orignal image fed to rotnet\r\n if args.layer == 1:\r\n in_features = int(96*16*16)\r\n else:\r\n in_features = int(192*8*8)\r\n rot_classes = 4\r\n out_classes = 10 \r\n lr_decay_rate = 0.2 # lr is multiplied by decay rate after a milestone epoch is reached\r\n mult = 1 # data become mult times \r\n ####################\r\n\r\n train_transform = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), \r\n transforms.Normalize((125.3/255, 123.0/255, 113.9/255), (63.0/255, 62.1/255, 66.7/255))])\r\n test_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((125.3/255, 123.0/255, 113.9/255), (63.0/255, 62.1/255, 66.7/255))])\r\n \r\n trainset = datasets.CIFAR10(root='results/', train=True, download=True, transform=train_transform)\r\n testset = datasets.CIFAR10(root='results/', train=False, download=True, transform=test_transform)\r\n\r\n train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=0)\r\n test_loader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=0)\r\n\r\n rot_network = mdl.RotNet(in_channels=in_channels, num_nin_blocks=args.nins, out_classes=rot_classes).to(args.device) \r\n class_network = mdl.NonLinearClassifier(in_channels=in_features, out_classes=out_classes).to(args.device)\r\n\r\n if args.opt == 'adam':\r\n rot_optimizer = optim.Adam(rot_network.parameters(), lr=args.lr, weight_decay=args.weight_decay)\r\n class_optimizer = optim.Adam(class_network.parameters(), lr=args.lr, weight_decay=args.weight_decay)\r\n else:\r\n rot_optimizer = optim.SGD(rot_network.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)\r\n class_optimizer = optim.SGD(class_network.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) \r\n\r\n rot_scheduler = optim.lr_scheduler.MultiStepLR(rot_optimizer, milestones=args.milestones, gamma=lr_decay_rate)\r\n class_scheduler = optim.lr_scheduler.MultiStepLR(class_optimizer, milestones=args.milestones, gamma=lr_decay_rate)\r\n\r\n ####################################### Saving information\r\n results_dict = {}\r\n # These will store the values for best test accuracy model\r\n results_dict['train_loss'] = -1\r\n results_dict['train_acc'] = -1\r\n results_dict['test_loss'] = -1\r\n results_dict['test_acc'] = -1\r\n results_dict['best_acc_epoch'] = -1\r\n # For storing training history\r\n results_dict['train_loss_hist'] = []\r\n results_dict['train_acc_hist'] = []\r\n results_dict['test_loss_hist'] = []\r\n results_dict['test_acc_hist'] = []\r\n\r\n # directories to save models\r\n checkpoint_path = os.path.join(args.results_dir, 'model.pth')\r\n checkpoint_path_best_acc = os.path.join(args.results_dir, 'model_best_acc.pth')\r\n\r\n # Setting rotation model weights and setting it in eval only model\r\n rot_network_file = os.path.join('results/rotnet_'+str(args.nins)+'_ninblocks'+args.suffix_rot, 'model'+args.rot_model_type+'.pth')\r\n rot_model_dict = torch.load(rot_network_file)\r\n rot_network.load_state_dict(rot_model_dict['model_state_dict'])\r\n\r\n\r\n #########\r\n test_acc_max = -math.inf\r\n loop_start_time = time.time()\r\n checkpoint = {}\r\n for epoch in range(args.epochs):\r\n train(args, rot_network, class_network, train_loader, rot_optimizer, class_optimizer, mult, rot_scheduler, class_scheduler, epoch, in_features)\r\n \r\n train_loss, train_acc = test(args, rot_network, class_network, train_loader, mult, 'Train', in_features)\r\n results_dict['train_loss_hist'].append(train_loss)\r\n results_dict['train_acc_hist'].append(train_acc)\r\n \r\n test_loss, test_acc = test(args, rot_network, class_network, test_loader, mult, 'Test', in_features)\r\n results_dict['test_loss_hist'].append(test_loss)\r\n results_dict['test_acc_hist'].append(test_acc)\r\n print('Epoch {} finished --------------------------------------------------------------------------'.format(epoch+1))\r\n \r\n checkpoint = {'class_model_state_dict': class_network.state_dict(), \r\n 'class_optimizer_state_dict': class_optimizer.state_dict(), \r\n 'rot_model_state_dict': rot_network.state_dict(), \r\n 'rot_optimizer_state_dict': rot_optimizer.state_dict(), \r\n 'epoch':epoch+1, \r\n 'train_loss':train_loss, \r\n 'train_acc':train_acc, \r\n 'test_loss':test_loss, \r\n 'test_acc':test_acc}\r\n\r\n if test_acc > test_acc_max:\r\n test_acc_max = test_acc\r\n if os.path.isfile(checkpoint_path_best_acc):\r\n os.remove(checkpoint_path_best_acc)\r\n\r\n torch.save(checkpoint, checkpoint_path_best_acc)\r\n \r\n results_dict['best_acc_epoch'] = epoch+1\r\n results_dict['train_loss'] = train_loss\r\n results_dict['train_acc'] = train_acc\r\n results_dict['test_loss'] = test_loss\r\n results_dict['test_acc'] = test_acc\r\n\r\n\r\n torch.save(checkpoint, checkpoint_path)\r\n \r\n print('Total time for training loop = ', time.time()-loop_start_time)\r\n\r\n return results_dict\r\n\r\n\r\n\r\n# Starting the program execution from here\r\nif __name__ == '__main__':\r\n start_time = time.time()\r\n parser = get_parser()\r\n args = parser.parse_args()\r\n\r\n assert (args.layer >= 1 and args.layer <=5)\r\n\r\n args.results_dir = os.path.join(args.results_dir, 'rotnet_nonlinear_classifier_finetuned', 'nins_'+str(args.nins)+'_layer_'+str(args.layer)+args.suffix)\r\n\r\n assert (not os.path.exists(args.results_dir))\r\n\r\n if not os.path.exists(args.results_dir):\r\n os.makedirs(args.results_dir)\r\n\r\n results_file = os.path.join(args.results_dir, 'results_dict.pickle')\r\n\r\n print('--------------------------------------------------------')\r\n print('--------------------------------------------------------')\r\n print('Experiment starting at ', datetime.datetime.now())\r\n print(' ')\r\n options = vars(args)\r\n keys = options.keys()\r\n for key in keys:\r\n print(key, ': ', options[key])\r\n print(' ')\r\n print('--------------------------------------------------------')\r\n print('--------------------------------------------------------')\r\n print(' ')\r\n print(' ')\r\n\r\n results_dict = main(args)\r\n \r\n # saving the configuration \r\n for key in keys:\r\n new_key = 'config_' + key\r\n results_dict[new_key] = options[key]\r\n\r\n with open(results_file, 'wb') as f:\r\n pickle.dump(results_dict, f)\r\n \r\n print('--------------------------------------------------------')\r\n print('--------------------------------------------------------')\r\n print('Total time for experiment: ', time.time()-start_time, ' seconds')\r\n print('--------------------------------------------------------')\r\n print('--------------------------------------------------------') \r\n\r\n"
] |
[
[
"torch.optim.lr_scheduler.MultiStepLR",
"torch.load",
"torch.nn.functional.cross_entropy",
"torch.utils.data.DataLoader",
"torch.save"
]
] |
kayfuku/fsdl-text-recognizer-2021-labs
|
[
"91fbf12d0fbee248c7bc97bcb47b71710fb62655"
] |
[
"lab1/text_recognizer/lit_models/base.py"
] |
[
"import argparse\nimport pytorch_lightning as pl\nimport torch\n\n\nOPTIMIZER = \"Adam\"\nLR = 1e-3\nLOSS = \"cross_entropy\"\nONE_CYCLE_TOTAL_STEPS = 100\n\n\nclass Accuracy(pl.metrics.Accuracy):\n \"\"\"Accuracy Metric with a hack.\"\"\"\n\n def update(self, preds: torch.Tensor, target: torch.Tensor) -> None:\n \"\"\"\n Metrics in Pytorch-lightning 1.2+ versions expect preds to be between 0 and 1 else fails with the ValueError:\n \"The `preds` should be probabilities, but values were detected outside of [0,1] range.\"\n This is being tracked as a bug in https://github.com/PyTorchLightning/metrics/issues/60.\n This method just hacks around it by normalizing preds before passing it in.\n Normalized preds are not necessary for accuracy computation as we just care about argmax().\n \"\"\"\n if preds.min() < 0 or preds.max() > 1:\n preds = torch.nn.functional.softmax(preds, dim=-1)\n super().update(preds=preds, target=target)\n\n\nclass BaseLitModel(pl.LightningModule): # pylint: disable=too-many-ancestors\n \"\"\"\n Generic PyTorch-Lightning class that must be initialized with a PyTorch module.\n \"\"\"\n\n def __init__(self, model, args: argparse.Namespace = None):\n super().__init__()\n self.model = model\n self.args = vars(args) if args is not None else {}\n\n optimizer = self.args.get(\"optimizer\", OPTIMIZER)\n self.optimizer_class = getattr(torch.optim, optimizer)\n\n self.lr = self.args.get(\"lr\", LR)\n\n loss = self.args.get(\"loss\", LOSS)\n if loss not in (\"ctc\", \"transformer\"):\n self.loss_fn = getattr(torch.nn.functional, loss)\n\n self.one_cycle_max_lr = self.args.get(\"one_cycle_max_lr\", None)\n self.one_cycle_total_steps = self.args.get(\n \"one_cycle_total_steps\", ONE_CYCLE_TOTAL_STEPS)\n\n self.train_acc = Accuracy()\n self.val_acc = Accuracy()\n self.test_acc = Accuracy()\n\n @staticmethod\n def add_to_argparse(parser):\n parser.add_argument(\n \"--optimizer\", type=str, default=OPTIMIZER,\n help=\"optimizer class from torch.optim\")\n parser.add_argument(\"--lr\", type=float, default=LR)\n parser.add_argument(\"--one_cycle_max_lr\", type=float, default=None)\n parser.add_argument(\"--one_cycle_total_steps\",\n type=int, default=ONE_CYCLE_TOTAL_STEPS)\n parser.add_argument(\n \"--loss\", type=str, default=LOSS,\n help=\"loss function from torch.nn.functional\")\n return parser\n\n def configure_optimizers(self):\n optimizer = self.optimizer_class(self.parameters(), lr=self.lr)\n if self.one_cycle_max_lr is None:\n return optimizer\n scheduler = torch.optim.lr_scheduler.OneCycleLR(\n optimizer=optimizer, max_lr=self.one_cycle_max_lr,\n total_steps=self.one_cycle_total_steps)\n return {\"optimizer\": optimizer, \"lr_scheduler\": scheduler, \"monitor\": \"val_loss\"}\n\n def forward(self, x):\n return self.model(x)\n\n def training_step(self, batch, batch_idx): # pylint: disable=unused-argument\n x, y = batch\n logits = self(x)\n loss = self.loss_fn(logits, y)\n self.log(\"train_loss\", loss)\n self.train_acc(logits, y)\n self.log(\"train_acc\", self.train_acc, on_step=False, on_epoch=True)\n return loss\n\n def validation_step(self, batch, batch_idx): # pylint: disable=unused-argument\n x, y = batch\n logits = self(x)\n loss = self.loss_fn(logits, y)\n self.log(\"val_loss\", loss, prog_bar=True)\n self.val_acc(logits, y)\n self.log(\"val_acc\", self.val_acc, on_step=False,\n on_epoch=True, prog_bar=True)\n\n def test_step(self, batch, batch_idx): # pylint: disable=unused-argument\n x, y = batch\n logits = self(x)\n self.test_acc(logits, y)\n self.log(\"test_acc\", self.test_acc, on_step=False, on_epoch=True)\n"
] |
[
[
"torch.optim.lr_scheduler.OneCycleLR",
"torch.nn.functional.softmax"
]
] |
igormq/sbrt2017
|
[
"a66c33a55970fb56d91d31a9509cac7ae7ccf4c5"
] |
[
"preprocessing.py"
] |
[
"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport math\nimport decimal\nimport string\nimport numpy as np\nfrom unidecode import unidecode\nimport logging\n\nfrom scipy import signal\nfrom scipy.fftpack import dct\nimport librosa\n\n\ndef round_half_up(number):\n return int(decimal.Decimal(number).quantize(decimal.Decimal('1'),\n rounding=decimal.ROUND_HALF_UP\n ))\n\n\ndef framesig(sig, frame_len, frame_step, winfunc=lambda x: np.ones((x,))):\n \"\"\"Frame a signal into overlapping frames.\n :param sig: the audio signal to frame.\n :param frame_len: length of each frame measured in samples.\n :param frame_step: number of samples after the start of the previous frame\n that the next frame should begin.\n :param winfunc: the analysis window to apply to each frame. By default no\n window is applied.\n :returns: an array of frames. Size is NUMFRAMES by frame_len.\n \"\"\"\n slen = len(sig)\n frame_len = int(round_half_up(frame_len))\n frame_step = int(round_half_up(frame_step))\n if slen <= frame_len:\n numframes = 1\n else:\n numframes = 1 + int(math.ceil((1.0 * slen - frame_len) / frame_step))\n\n padlen = int((numframes - 1) * frame_step + frame_len)\n\n zeros = np.zeros((padlen - slen,))\n padsignal = np.concatenate((sig, zeros))\n\n indices = np.tile(\n np.arange(\n 0, frame_len),\n (numframes, 1)) + np.tile(\n np.arange(\n 0, numframes * frame_step, frame_step), (frame_len, 1)).T\n\n indices = np.array(indices, dtype=np.int32)\n frames = padsignal[indices]\n win = np.tile(winfunc(frame_len), (numframes, 1))\n return frames * win\n\n\ndef deframesig(frames, siglen, frame_len, frame_step,\n winfunc=lambda x: np.ones((x,))):\n \"\"\"Does overlap-add procedure to undo the action of framesig.\n :param frames: the array of frames.\n :param siglen: the length of the desired signal, use 0 if unknown. Output\n will be truncated to siglen samples.\n :param frame_len: length of each frame measured in samples.\n :param frame_step: number of samples after the start of the previous frame\n that the next frame should begin.\n :param winfunc: the analysis window to apply to each frame. By default no\n window is applied.\n :returns: a 1-D signal.\n \"\"\"\n frame_len = round_half_up(frame_len)\n frame_step = round_half_up(frame_step)\n numframes = np.shape(frames)[0]\n assert np.shape(frames)[1] == frame_len, '\"frames\" matrix is wrong\\\n size, 2nd dim is not equal to frame_len'\n\n indices = np.tile(\n np.arange(\n 0, frame_len), (numframes, 1)) + np.tile(\n np.arange(\n 0, numframes * frame_step, frame_step), (frame_len, 1)).T\n\n indices = np.array(indices, dtype=np.int32)\n padlen = (numframes - 1) * frame_step + frame_len\n\n if siglen <= 0:\n siglen = padlen\n\n rec_signal = np.zeros((padlen,))\n window_correction = np.zeros((padlen,))\n win = winfunc(frame_len)\n\n for i in range(0, numframes):\n # add a little bit so it is never zero\n window_correction[indices[i, :]] = window_correction[indices[i, :]] + \\\n win + 1e-15\n rec_signal[indices[i, :]] = rec_signal[indices[i, :]] + frames[i, :]\n\n rec_signal = rec_signal / window_correction\n return rec_signal[0:siglen]\n\n\ndef magspec(frames, NFFT):\n \"\"\"Compute the magnitude spectrum of each frame in frames. If frames is an\n NxD matrix, output will be NxNFFT.\n :param frames: the array of frames. Each row is a frame.\n :param NFFT: the FFT length to use. If NFFT > frame_len, the frames are\n zero-padded.\n :returns: If frames is an NxD matrix, output will be NxNFFT. Each row will\n be the magnitude spectrum of the corresponding frame.\n \"\"\"\n complex_spec = np.fft.rfft(frames, NFFT)\n return np.absolute(complex_spec)\n\n\ndef powspec(frames, NFFT):\n \"\"\"Compute the power spectrum of each frame in frames. If frames is an NxD\n matrix, output will be NxNFFT.\n :param frames: the array of frames. Each row is a frame.\n :param NFFT: the FFT length to use. If NFFT > frame_len, the frames are\n zero-padded.\n :returns: If frames is an NxD matrix, output will be NxNFFT. Each row will\n be the power spectrum of the corresponding frame.\n \"\"\"\n return 1.0 / NFFT * np.square(magspec(frames, NFFT))\n\n\ndef logpowspec(frames, NFFT, norm=1):\n \"\"\"Compute the log power spectrum of each frame in frames. If frames is an\n NxD matrix, output will be NxNFFT.\n :param frames: the array of frames. Each row is a frame.\n :param NFFT: the FFT length to use. If NFFT > frame_len, the frames are\n zero-padded.\n :param norm: If norm=1, the log power spectrum is normalised so that the\n max value (across all frames) is 1.\n :returns: If frames is an NxD matrix, output will be NxNFFT. Each row will\n be the log power spectrum of the corresponding frame.\n \"\"\"\n ps = powspec(frames, NFFT)\n ps[ps <= 1e-30] = 1e-30\n lps = 10 * np.log10(ps)\n if norm:\n return lps - np.max(lps)\n else:\n return lps\n\n\ndef preemphasis(signal, coeff=0.95):\n \"\"\"perform preemphasis on the input signal.\n\n :param signal: The signal to filter.\n :param coeff: The preemphasis coefficient. 0 is no filter, default is 0.95.\n :returns: the filtered signal.\n \"\"\"\n return np.append(signal[0], signal[1:] - coeff * signal[:-1])\n\n\ndef delta(feat, N):\n \"\"\"Compute delta features from a feature vector sequence.\n\n :param feat: A numpy array of size (NUMFRAMES by number of features)\n containing features. Each row holds 1 feature vector.\n :param N: For each frame, calculate delta features based on preceding and\n following N frames\n :returns: A numpy array of size (NUMFRAMES by number of features)\n containing delta features. Each row holds 1 delta feature vector.\n \"\"\"\n NUMFRAMES = len(feat)\n feat = np.concatenate(([feat[0] for i in range(N)], feat, [feat[-1] for\n i in\n range(N)]))\n denom = sum([2 * i * i for i in range(1, N + 1)])\n dfeat = []\n for j in range(NUMFRAMES):\n dfeat.append(np.sum([n * feat[N + j + n]\n for n in range(-1 * N, N + 1)], axis=0) /\n denom)\n return dfeat\n\nclass Feature(object):\n \"\"\" Base class for features calculation\n All children class must implement __str__ and _call function.\n\n # Arguments\n fs: sampling frequency of audio signal. If the audio has not this fs,\n it will be resampled\n eps\n \"\"\"\n\n def __init__(self, fs=16e3, eps=1e-8,\n mean_norm=True, var_norm=True):\n self.fs = fs\n self.eps = eps\n\n self.mean_norm = mean_norm\n self.var_norm = var_norm\n\n self._logger = logging.getLogger('%s.%s' % (__name__,\n self.__class__.__name__))\n\n def __call__(self, audio):\n \"\"\" This method load the audio and do the transformation of signal\n\n # Inputs\n audio:\n if audio is a string and the file exists, the wave file will\n be loaded and resampled (if necessary) to fs\n if audio is a ndarray or list and is not empty, it will make\n the transformation without any resampling\n\n # Exception\n TypeError if audio were not recognized\n\n \"\"\"\n if ((isinstance(audio, str) or isinstance(audio, unicode))\n and os.path.isfile(audio)):\n audio, current_fs = librosa.audio.load(audio)\n audio = librosa.core.resample(audio, current_fs, self.fs)\n feats = self._call(audio)\n elif type(audio) in (np.ndarray, list) and len(audio) > 1:\n feats = self._call(audio)\n else:\n TypeError(\"audio type is not support\")\n\n return self._standarize(feats)\n\n def _call(self, data):\n raise NotImplementedError(\"__call__ must be overrided\")\n\n def _standarize(self, feats):\n if self.mean_norm:\n feats -= np.mean(feats, axis=0, keepdims=True)\n if self.var_norm:\n feats /= (np.std(feats, axis=0, keepdims=True) + self.eps)\n return feats\n\n def __str__(self):\n raise NotImplementedError(\"__str__ must be overrided\")\n\n @property\n def num_feats(self):\n return self._num_feats\n\n\nclass FBank(Feature):\n \"\"\"Compute Mel-filterbank energy features from an audio signal.\n\n # Arguments\n win_len: the length of the analysis window in seconds.\n Default is 0.025s (25 milliseconds)\n win_step: the step between successive windows in seconds.\n Default is 0.01s (10 milliseconds)\n num_filt: the number of filters in the filterbank, default 40.\n nfft: the FFT size. Default is 512.\n low_freq: lowest band edge of mel filters in Hz.\n Default is 20.\n high_freq: highest band edge of mel filters in Hz.\n Default is 7800\n pre_emph: apply preemphasis filter with preemph as coefficient.\n 0 is no filter. Default is 0.97.\n win_func: the analysis window to apply to each frame.\n By default hamming window is applied.\n \"\"\"\n\n def __init__(self, win_len=0.025, win_step=0.01,\n num_filt=40, nfft=512, low_freq=20, high_freq=7800,\n pre_emph=0.97, win_fun=signal.hamming, **kwargs):\n\n super(FBank, self).__init__(**kwargs)\n\n if high_freq > self.fs / 2:\n raise ValueError(\"high_freq must be less or equal than fs/2\")\n\n self.win_len = win_len\n self.win_step = win_step\n self.num_filt = num_filt\n self.nfft = nfft\n self.low_freq = low_freq\n self.high_freq = high_freq or self.fs / 2\n self.pre_emph = pre_emph\n self.win_fun = win_fun\n self._filterbanks = self._get_filterbanks()\n\n self._num_feats = self.num_filt\n\n @property\n def mel_points(self):\n return np.linspace(self._low_mel, self._high_mel, self.num_filt + 2)\n\n @property\n def low_freq(self):\n return self._low_freq\n\n @low_freq.setter\n def low_freq(self, value):\n self._low_mel = self._hz2mel(value)\n self._low_freq = value\n\n @property\n def high_freq(self):\n return self._high_freq\n\n @high_freq.setter\n def high_freq(self, value):\n self._high_mel = self._hz2mel(value)\n self._high_freq = value\n\n def _call(self, signal):\n \"\"\"Compute Mel-filterbank energy features from an audio signal.\n :param signal: the audio signal from which to compute features. Should\n be an N*1 array\n\n Returns:\n 2 values. The first is a numpy array of size (NUMFRAMES by nfilt)\n containing features. Each row holds 1 feature vector. The\n second return value is the energy in each frame (total energy,\n unwindowed)\n \"\"\"\n\n signal = preemphasis(signal, self.pre_emph)\n\n frames = framesig(signal,\n self.win_len * self.fs,\n self.win_step * self.fs,\n self.win_fun)\n\n pspec = powspec(frames, self.nfft)\n # this stores the total energy in each frame\n energy = np.sum(pspec, 1)\n # if energy is zero, we get problems with log\n energy = np.where(energy == 0, np.finfo(float).eps, energy)\n\n # compute the filterbank energies\n feat = np.dot(pspec, self._filterbanks.T)\n # if feat is zero, we get problems with log\n feat = np.where(feat == 0, np.finfo(float).eps, feat)\n\n return feat, energy\n\n def _get_filterbanks(self):\n \"\"\"Compute a Mel-filterbank. The filters are stored in the rows, the\n columns correspond\n to fft bins. The filters are returned as an array of size nfilt *\n (nfft / 2 + 1)\n\n Returns:\n A numpy array of size num_filt * (nfft/2 + 1) containing\n filterbank. Each row holds 1 filter.\n \"\"\"\n\n # our points are in Hz, but we use fft bins, so we have to convert\n # from Hz to fft bin number\n bin = np.floor((self.nfft + 1) * self._mel2hz(self.mel_points) /\n self.fs)\n\n fbank = np.zeros([self.num_filt, int(self.nfft / 2 + 1)])\n for j in xrange(0, self.num_filt):\n for i in xrange(int(bin[j]), int(bin[j + 1])):\n fbank[j, i] = (i - bin[j]) / (bin[j + 1] - bin[j])\n for i in xrange(int(bin[j + 1]), int(bin[j + 2])):\n fbank[j, i] = (bin[j + 2] - i) / (bin[j + 2] - bin[j + 1])\n return fbank\n\n def _hz2mel(self, hz):\n \"\"\"Convert a value in Hertz to Mels\n\n Args:\n hz: a value in Hz. This can also be a numpy array, conversion\n proceeds element-wise.\n\n Returns:\n A value in Mels. If an array was passed in, an identical sized\n array is returned.\n \"\"\"\n return 2595 * np.log10(1 + hz / 700.0)\n\n def _mel2hz(self, mel):\n \"\"\"Convert a value in Mels to Hertz\n\n Args:\n mel: a value in Mels. This can also be a numpy array, conversion\n proceeds element-wise.\n\n Returns:\n A value in Hertz. If an array was passed in, an identical sized\n array is returned.\n \"\"\"\n return 700 * (10**(mel / 2595.0) - 1)\n\n def __str__(self):\n return \"fbank\"\n\n\nclass MFCC(FBank):\n \"\"\"Compute MFCC features from an audio signal.\n\n # Arguments\n num_cep: the number of cepstrum to return. Default 13.\n cep_lifter: apply a lifter to final cepstral coefficients. 0 is\n no lifter. Default is 22.\n append_energy: if this is true, the zeroth cepstral coefficient\n is replaced with the log of the total frame energy.\n d: if True add deltas coeficients. Default True\n dd: if True add delta-deltas coeficients. Default True\n norm: if 'cmn' performs the cepstral mean normalization. elif 'cmvn'\n performs the cepstral mean and variance normalizastion. Default 'cmn'\n \"\"\"\n\n def __init__(self, num_cep=13, cep_lifter=22, append_energy=True,\n d=True, dd=True, **kwargs):\n\n super(MFCC, self).__init__(**kwargs)\n\n self.num_cep = num_cep\n self.cep_lifter = cep_lifter\n self.append_energy = append_energy\n self.d = d\n self.dd = dd\n self._num_feats = (1 + self.d + self.dd) * self.num_cep\n\n self._logger = logging.getLogger('%s.%s' % (__name__,\n self.__class__.__name__))\n\n def _call(self, signal):\n \"\"\"Compute MFCC features from an audio signal.\n\n Args:\n signal: the audio signal from which to compute features. Should be\n an N*1 array\n\n Returns:\n A numpy array of size (NUMFRAMES by numcep) containing features.\n Each row holds 1 feature vector.\n \"\"\"\n feat, energy = super(MFCC, self)._call(signal)\n\n feat = np.log(feat)\n feat = dct(feat, type=2, axis=1, norm='ortho')[:, :self.num_cep]\n feat = self._lifter(feat, self.cep_lifter)\n\n if self.append_energy:\n # replace first cepstral coefficient with log of frame energy\n feat[:, 0] = np.log(energy + self.eps)\n\n if self.d:\n d = delta(feat, 2)\n feat = np.hstack([feat, d])\n\n if self.dd:\n feat = np.hstack([feat, delta(d, 2)])\n\n return feat\n\n def _lifter(self, cepstra, L=22):\n \"\"\"Apply a cepstral lifter the the matrix of cepstra.\n\n This has the effect of increasing the magnitude of the high frequency\n DCT coeffs.\n\n Args:\n cepstra: the matrix of mel-cepstra, will be numframes * numcep in\n size.\n L: the liftering coefficient to use. Default is 22. L <= 0 disables\n lifter.\n \"\"\"\n if L > 0:\n nframes, ncoeff = np.shape(cepstra)\n n = np.arange(ncoeff)\n lift = 1 + (L / 2) * np.sin(np.pi * n / L)\n return lift * cepstra\n else:\n # values of L <= 0, do nothing\n return cepstra\n\n def __str__(self):\n return \"mfcc\"\n\n\nclass SimpleCharParser(object):\n \"\"\" Class responsible to map any text in a certain character vocabulary\n\n # Arguments\n mode: Which type of vacabulary will be generated. Modes can be\n concatenated by using pipeline '|'\n 'space' or 's': accepts space character\n 'accents' or 'a': accepts pt-br accents\n 'punctuation' or 'p': accepts punctuation defined in\n string.punctuation\n 'digits': accepts all digits\n 'sensitive' or 'S': characters will be case sensitive\n 'all': shortcut that enables all modes\n \"\"\"\n\n def __init__(self):\n\n self._vocab, self._inv_vocab = self._gen_vocab()\n\n def map(self, txt, sanitize=True):\n if sanitize:\n label = np.array([self._vocab[c] for c in self._sanitize(txt)],\n dtype='int32')\n else:\n label = np.array([self._vocab[c] for c in txt], dtype='int32')\n\n return label\n\n def imap(self, labels):\n txt = ''.join([self._inv_vocab[l] for l in labels])\n\n return txt\n\n def _sanitize(self, text):\n # removing duplicated spaces\n text = ' '.join(text.split())\n\n # removing digits\n text = ''.join([c for c in text if not c.isdigit()])\n\n # removing accents\n text = unidecode(text)\n\n # removnig punctuations\n text = text.translate(\n string.maketrans(\"-'\", ' ')).translate(None,\n string.punctuation)\n\n # remove uppercase\n text = text.lower()\n\n return text\n\n def is_valid(self, text):\n # verify if the text is valid without sanitization\n try:\n _ = self.map(text, sanitize=False)\n return True\n except KeyError:\n return False\n\n def _gen_vocab(self):\n\n vocab = {chr(value + ord('a')): (value)\n for value in xrange(ord('z') - ord('a') + 1)}\n\n vocab[' '] = len(vocab)\n\n inv_vocab = {v: k for (k, v) in vocab.iteritems()}\n\n # Add blank label\n inv_vocab[len(inv_vocab)] = '<b>'\n\n return vocab, inv_vocab\n\n def __call__(self, _input):\n return self.map(_input)\n"
] |
[
[
"numpy.dot",
"numpy.linspace",
"numpy.concatenate",
"numpy.max",
"numpy.mean",
"scipy.fftpack.dct",
"numpy.hstack",
"numpy.arange",
"numpy.finfo",
"numpy.sin",
"numpy.std",
"numpy.zeros",
"numpy.log",
"numpy.append",
"numpy.log10",
"numpy.array",
"numpy.sum",
"numpy.absolute",
"numpy.fft.rfft",
"numpy.ones",
"numpy.shape"
]
] |
icedcoffeeee/manim
|
[
"880e8999a81f1e85bc1d20921b1efb9c4ed1565a"
] |
[
"manim/mobject/opengl_mobject.py"
] |
[
"import copy\nimport itertools as it\nimport random\nimport sys\nfrom functools import wraps\nfrom math import ceil\nfrom typing import Iterable, Optional, Tuple, Union\n\nimport moderngl\nimport numpy as np\nfrom colour import Color\n\nfrom .. import config\nfrom ..constants import *\nfrom ..utils.bezier import interpolate\nfrom ..utils.color import *\nfrom ..utils.config_ops import _Data, _Uniforms\n\n# from ..utils.iterables import batch_by_property\nfrom ..utils.iterables import (\n batch_by_property,\n list_update,\n listify,\n make_even,\n resize_array,\n resize_preserving_order,\n resize_with_interpolation,\n)\nfrom ..utils.paths import straight_path\nfrom ..utils.simple_functions import get_parameters\nfrom ..utils.space_ops import (\n angle_between_vectors,\n normalize,\n rotation_matrix_transpose,\n)\n\n\nclass OpenGLMobject:\n \"\"\"\n Mathematical Object\n \"\"\"\n\n shader_dtype = [\n (\"point\", np.float32, (3,)),\n ]\n shader_folder = \"\"\n\n # _Data and _Uniforms are set as class variables to tell manim how to handle setting/getting these attributes later.\n points = _Data()\n bounding_box = _Data()\n rgbas = _Data()\n\n is_fixed_in_frame = _Uniforms()\n gloss = _Uniforms()\n shadow = _Uniforms()\n\n def __init__(\n self,\n color=WHITE,\n opacity=1,\n dim=3, # TODO, get rid of this\n # Lighting parameters\n # Positive gloss up to 1 makes it reflect the light.\n gloss=0.0,\n # Positive shadow up to 1 makes a side opposite the light darker\n shadow=0.0,\n # For shaders\n render_primitive=moderngl.TRIANGLE_STRIP,\n texture_paths=None,\n depth_test=False,\n # If true, the mobject will not get rotated according to camera position\n is_fixed_in_frame=False,\n # Must match in attributes of vert shader\n # Event listener\n listen_to_events=False,\n model_matrix=None,\n **kwargs,\n ):\n # getattr in case data/uniforms are already defined in parent classes.\n self.data = getattr(self, \"data\", {})\n self.uniforms = getattr(self, \"uniforms\", {})\n\n self.color = Color(color)\n self.opacity = opacity\n self.dim = dim # TODO, get rid of this\n # Lighting parameters\n # Positive gloss up to 1 makes it reflect the light.\n self.gloss = gloss\n # Positive shadow up to 1 makes a side opposite the light darker\n self.shadow = shadow\n # For shaders\n self.render_primitive = render_primitive\n self.texture_paths = texture_paths\n self.depth_test = depth_test\n # If true, the mobject will not get rotated according to camera position\n self.is_fixed_in_frame = float(is_fixed_in_frame)\n # Must match in attributes of vert shader\n # Event listener\n self.listen_to_events = listen_to_events\n\n self.submobjects = []\n self.parents = []\n self.parent = None\n self.family = [self]\n self.locked_data_keys = set()\n self.needs_new_bounding_box = True\n if model_matrix is None:\n self.model_matrix = np.eye(4)\n else:\n self.model_matrix = model_matrix\n\n self.init_data()\n self.init_updaters()\n # self.init_event_listners()\n self.init_points()\n self.init_colors()\n\n self.shader_indices = None\n\n if self.depth_test:\n self.apply_depth_test()\n\n def __str__(self):\n return self.__class__.__name__\n\n def init_data(self):\n \"\"\"Initializes the ``points``, ``bounding_box`` and ``rgbas`` attributes and groups them into self.data.\n Subclasses can inherit and overwrite this method to extend `self.data`.\"\"\"\n self.points = np.zeros((0, 3))\n self.bounding_box = np.zeros((3, 3))\n self.rgbas = np.zeros((1, 4))\n\n def init_colors(self):\n self.set_color(self.color, self.opacity)\n\n def init_points(self):\n # Typically implemented in subclass, unlpess purposefully left blank\n pass\n\n def set_data(self, data):\n for key in data:\n self.data[key] = data[key].copy()\n return self\n\n def set_uniforms(self, uniforms):\n for key in uniforms:\n self.uniforms[key] = uniforms[key] # Copy?\n return self\n\n @property\n def animate(self):\n # Borrowed from https://github.com/ManimCommunity/manim/\n return _AnimationBuilder(self)\n\n @property\n def width(self):\n \"\"\"The width of the mobject.\n\n Returns\n -------\n :class:`float`\n\n Examples\n --------\n .. manim:: WidthExample\n\n class WidthExample(Scene):\n def construct(self):\n decimal = DecimalNumber().to_edge(UP)\n rect = Rectangle(color=BLUE)\n rect_copy = rect.copy().set_stroke(GRAY, opacity=0.5)\n\n decimal.add_updater(lambda d: d.set_value(rect.width))\n\n self.add(rect_copy, rect, decimal)\n self.play(rect.animate.set(width=7))\n self.wait()\n\n See also\n --------\n :meth:`length_over_dim`\n\n \"\"\"\n\n # Get the length across the X dimension\n return self.length_over_dim(0)\n\n # Only these methods should directly affect points\n @width.setter\n def width(self, value):\n self.rescale_to_fit(value, 0, stretch=False)\n\n @property\n def height(self):\n \"\"\"The height of the mobject.\n\n Returns\n -------\n :class:`float`\n\n Examples\n --------\n .. manim:: HeightExample\n\n class HeightExample(Scene):\n def construct(self):\n decimal = DecimalNumber().to_edge(UP)\n rect = Rectangle(color=BLUE)\n rect_copy = rect.copy().set_stroke(GRAY, opacity=0.5)\n\n decimal.add_updater(lambda d: d.set_value(rect.height))\n\n self.add(rect_copy, rect, decimal)\n self.play(rect.animate.set(height=5))\n self.wait()\n\n See also\n --------\n :meth:`length_over_dim`\n\n \"\"\"\n\n # Get the length across the Y dimension\n return self.length_over_dim(1)\n\n @height.setter\n def height(self, value):\n self.rescale_to_fit(value, 1, stretch=False)\n\n @property\n def depth(self):\n \"\"\"The depth of the mobject.\n\n Returns\n -------\n :class:`float`\n\n See also\n --------\n :meth:`length_over_dim`\n\n \"\"\"\n\n # Get the length across the Z dimension\n return self.length_over_dim(2)\n\n @depth.setter\n def depth(self, value):\n self.rescale_to_fit(value, 2, stretch=False)\n\n def resize_points(self, new_length, resize_func=resize_array):\n if new_length != len(self.points):\n self.points = resize_func(self.points, new_length)\n self.refresh_bounding_box()\n return self\n\n def set_points(self, points):\n if len(points) == len(self.points):\n self.points[:] = points\n elif isinstance(points, np.ndarray):\n self.points = points.copy()\n else:\n self.points = np.array(points)\n self.refresh_bounding_box()\n return self\n\n def append_points(self, new_points):\n self.points = np.vstack([self.points, new_points])\n self.refresh_bounding_box()\n return self\n\n def reverse_points(self):\n for mob in self.get_family():\n for key in mob.data:\n mob.data[key] = mob.data[key][::-1]\n return self\n\n def apply_points_function(\n self, func, about_point=None, about_edge=ORIGIN, works_on_bounding_box=False\n ):\n if about_point is None and about_edge is not None:\n about_point = self.get_bounding_box_point(about_edge)\n\n for mob in self.get_family():\n arrs = []\n if mob.has_points():\n arrs.append(mob.get_points())\n if works_on_bounding_box:\n arrs.append(mob.get_bounding_box())\n\n for arr in arrs:\n if about_point is None:\n arr[:] = func(arr)\n else:\n arr[:] = func(arr - about_point) + about_point\n\n if not works_on_bounding_box:\n self.refresh_bounding_box(recurse_down=True)\n else:\n for parent in self.parents:\n parent.refresh_bounding_box()\n return self\n\n # Others related to points\n\n def match_points(self, mobject):\n self.set_points(mobject.get_points())\n\n def get_points(self):\n return self.points\n\n def clear_points(self):\n self.resize_points(0)\n\n def get_num_points(self):\n return len(self.points)\n\n def get_all_points(self):\n if self.submobjects:\n return np.vstack([sm.get_points() for sm in self.get_family()])\n else:\n return self.points\n\n def has_points(self):\n return self.get_num_points() > 0\n\n def get_bounding_box(self):\n if self.needs_new_bounding_box:\n self.bounding_box = self.compute_bounding_box()\n self.needs_new_bounding_box = False\n return self.bounding_box\n\n def compute_bounding_box(self):\n all_points = np.vstack(\n [\n self.points,\n *(\n mob.get_bounding_box()\n for mob in self.get_family()[1:]\n if mob.has_points()\n ),\n ]\n )\n if len(all_points) == 0:\n return np.zeros((3, self.dim))\n else:\n # Lower left and upper right corners\n mins = all_points.min(0)\n maxs = all_points.max(0)\n mids = (mins + maxs) / 2\n return np.array([mins, mids, maxs])\n\n def refresh_bounding_box(self, recurse_down=False, recurse_up=True):\n for mob in self.get_family(recurse_down):\n mob.needs_new_bounding_box = True\n if recurse_up:\n for parent in self.parents:\n parent.refresh_bounding_box()\n return self\n\n def is_point_touching(self, point, buff=MED_SMALL_BUFF):\n bb = self.get_bounding_box()\n mins = bb[0] - buff\n maxs = bb[2] + buff\n return (point >= mins).all() and (point <= maxs).all()\n\n # Family matters\n\n def __getitem__(self, value):\n if isinstance(value, slice):\n GroupClass = self.get_group_class()\n return GroupClass(*self.split().__getitem__(value))\n return self.split().__getitem__(value)\n\n def __iter__(self):\n return iter(self.split())\n\n def __len__(self):\n return len(self.split())\n\n def split(self):\n return self.submobjects\n\n def assemble_family(self):\n sub_families = (sm.get_family() for sm in self.submobjects)\n self.family = [self, *it.chain(*sub_families)]\n self.refresh_has_updater_status()\n self.refresh_bounding_box()\n for parent in self.parents:\n parent.assemble_family()\n return self\n\n def get_family(self, recurse=True):\n if recurse:\n return self.family\n else:\n return [self]\n\n def family_members_with_points(self):\n return [m for m in self.get_family() if m.has_points()]\n\n def add(self, *mobjects, update_parent=False):\n if update_parent:\n assert len(mobjects) == 1, \"Can't set multiple parents.\"\n mobjects[0].parent = self\n\n if self in mobjects:\n raise Exception(\"Mobject cannot contain self\")\n for mobject in mobjects:\n if mobject not in self.submobjects:\n self.submobjects.append(mobject)\n if self not in mobject.parents:\n mobject.parents.append(self)\n self.assemble_family()\n return self\n\n def remove(self, *mobjects, update_parent=False):\n if update_parent:\n assert len(mobjects) == 1, \"Can't remove multiple parents.\"\n mobjects[0].parent = None\n\n for mobject in mobjects:\n if mobject in self.submobjects:\n self.submobjects.remove(mobject)\n if self in mobject.parents:\n mobject.parents.remove(self)\n self.assemble_family()\n return self\n\n def add_to_back(self, *mobjects):\n self.set_submobjects(list_update(mobjects, self.submobjects))\n return self\n\n def replace_submobject(self, index, new_submob):\n old_submob = self.submobjects[index]\n if self in old_submob.parents:\n old_submob.parents.remove(self)\n self.submobjects[index] = new_submob\n self.assemble_family()\n return self\n\n def set_submobjects(self, submobject_list):\n self.remove(*self.submobjects)\n self.add(*submobject_list)\n return self\n\n def invert(self, recursive=False):\n \"\"\"Inverts the list of :attr:`submobjects`.\n\n Parameters\n ----------\n recursive\n If ``True``, all submobject lists of this mobject's family are inverted.\n\n Examples\n --------\n\n .. manim:: InvertSumobjectsExample\n\n class InvertSumobjectsExample(Scene):\n def construct(self):\n s = VGroup(*[Dot().shift(i*0.1*RIGHT) for i in range(-20,20)])\n s2 = s.copy()\n s2.invert()\n s2.shift(DOWN)\n self.play(Write(s), Write(s2))\n \"\"\"\n if recursive:\n for submob in self.submobjects:\n submob.invert(recursive=True)\n list.reverse(self.submobjects)\n self.assemble_family()\n\n def digest_mobject_attrs(self):\n \"\"\"\n Ensures all attributes which are mobjects are included\n in the submobjects list.\n \"\"\"\n mobject_attrs = [\n x for x in list(self.__dict__.values()) if isinstance(x, OpenGLMobject)\n ]\n self.set_submobjects(list_update(self.submobjects, mobject_attrs))\n return self\n\n # Submobject organization\n\n def arrange(self, direction=RIGHT, center=True, **kwargs):\n for m1, m2 in zip(self.submobjects, self.submobjects[1:]):\n m2.next_to(m1, direction, **kwargs)\n if center:\n self.center()\n return self\n\n def arrange_in_grid(\n self,\n rows: Optional[int] = None,\n cols: Optional[int] = None,\n buff: Union[float, Tuple[float, float]] = MED_SMALL_BUFF,\n cell_alignment: np.ndarray = ORIGIN,\n row_alignments: Optional[str] = None, # \"ucd\"\n col_alignments: Optional[str] = None, # \"lcr\"\n row_heights: Optional[Iterable[Optional[float]]] = None,\n col_widths: Optional[Iterable[Optional[float]]] = None,\n flow_order: str = \"rd\",\n **kwargs,\n ) -> \"OpenGLMobject\":\n \"\"\"Arrange submobjects in a grid.\n\n Parameters\n ----------\n rows\n The number of rows in the grid.\n cols\n The number of columns in the grid.\n buff\n The gap between grid cells. To specify a different buffer in the horizontal and\n vertical directions, a tuple of two values can be given - ``(row, col)``.\n cell_alignment\n The way each submobject is aligned in its grid cell.\n row_alignments\n The vertical alignment for each row (top to bottom). Accepts the following characters: ``\"u\"`` -\n up, ``\"c\"`` - center, ``\"d\"`` - down.\n col_alignments\n The horizontal alignment for each column (left to right). Accepts the following characters ``\"l\"`` - left,\n ``\"c\"`` - center, ``\"r\"`` - right.\n row_heights\n Defines a list of heights for certain rows (top to bottom). If the list contains\n ``None``, the corresponding row will fit its height automatically based\n on the highest element in that row.\n col_widths\n Defines a list of widths for certain columns (left to right). If the list contains ``None``, the\n corresponding column will fit its width automatically based on the widest element in that column.\n flow_order\n The order in which submobjects fill the grid. Can be one of the following values:\n \"rd\", \"dr\", \"ld\", \"dl\", \"ru\", \"ur\", \"lu\", \"ul\". (\"rd\" -> fill rightwards then downwards)\n\n Returns\n -------\n Mobject\n The mobject.\n\n NOTES\n -----\n\n If only one of ``cols`` and ``rows`` is set implicitly, the other one will be chosen big\n enough to fit all submobjects. If neither is set, they will be chosen to be about the same,\n tending towards ``cols`` > ``rows`` (simply because videos are wider than they are high).\n\n If both ``cell_alignment`` and ``row_alignments`` / ``col_alignments`` are\n defined, the latter has higher priority.\n\n\n Raises\n ------\n ValueError\n If ``rows`` and ``cols`` are too small to fit all submobjects.\n ValueError\n If :code:`cols`, :code:`col_alignments` and :code:`col_widths` or :code:`rows`,\n :code:`row_alignments` and :code:`row_heights` have mismatching sizes.\n\n Examples\n --------\n .. manim:: ExampleBoxes\n :save_last_frame:\n\n class ExampleBoxes(Scene):\n def construct(self):\n boxes=VGroup(*[Square() for s in range(0,6)])\n boxes.arrange_in_grid(rows=2, buff=0.1)\n self.add(boxes)\n\n\n .. manim:: ArrangeInGrid\n :save_last_frame:\n\n class ArrangeInGrid(Scene):\n def construct(self):\n #Add some numbered boxes:\n np.random.seed(3)\n boxes = VGroup(*[\n Rectangle(WHITE, np.random.random()+.5, np.random.random()+.5).add(Text(str(i+1)).scale(0.5))\n for i in range(22)\n ])\n self.add(boxes)\n\n boxes.arrange_in_grid(\n buff=(0.25,0.5),\n col_alignments=\"lccccr\",\n row_alignments=\"uccd\",\n col_widths=[2, *[None]*4, 2],\n flow_order=\"dr\"\n )\n\n\n \"\"\"\n from .geometry import Line\n\n mobs = self.submobjects.copy()\n start_pos = self.get_center()\n\n # get cols / rows values if given (implicitly)\n def init_size(num, alignments, sizes):\n if num is not None:\n return num\n if alignments is not None:\n return len(alignments)\n if sizes is not None:\n return len(sizes)\n\n cols = init_size(cols, col_alignments, col_widths)\n rows = init_size(rows, row_alignments, row_heights)\n\n # calculate rows cols\n if rows is None and cols is None:\n cols = ceil(np.sqrt(len(mobs)))\n # make the grid as close to quadratic as possible.\n # choosing cols first can results in cols>rows.\n # This is favored over rows>cols since in general\n # the sceene is wider than high.\n if rows is None:\n rows = ceil(len(mobs) / cols)\n if cols is None:\n cols = ceil(len(mobs) / rows)\n if rows * cols < len(mobs):\n raise ValueError(\"Too few rows and columns to fit all submobjetcs.\")\n # rows and cols are now finally valid.\n\n if isinstance(buff, tuple):\n buff_x = buff[0]\n buff_y = buff[1]\n else:\n buff_x = buff_y = buff\n\n # Initialize alignments correctly\n def init_alignments(alignments, num, mapping, name, dir):\n if alignments is None:\n # Use cell_alignment as fallback\n return [cell_alignment * dir] * num\n if len(alignments) != num:\n raise ValueError(\"{}_alignments has a mismatching size.\".format(name))\n alignments = list(alignments)\n for i in range(num):\n alignments[i] = mapping[alignments[i]]\n return alignments\n\n row_alignments = init_alignments(\n row_alignments, rows, {\"u\": UP, \"c\": ORIGIN, \"d\": DOWN}, \"row\", RIGHT\n )\n col_alignments = init_alignments(\n col_alignments, cols, {\"l\": LEFT, \"c\": ORIGIN, \"r\": RIGHT}, \"col\", UP\n )\n # Now row_alignment[r] + col_alignment[c] is the alignment in cell [r][c]\n\n mapper = {\n \"dr\": lambda r, c: (rows - r - 1) + c * rows,\n \"dl\": lambda r, c: (rows - r - 1) + (cols - c - 1) * rows,\n \"ur\": lambda r, c: r + c * rows,\n \"ul\": lambda r, c: r + (cols - c - 1) * rows,\n \"rd\": lambda r, c: (rows - r - 1) * cols + c,\n \"ld\": lambda r, c: (rows - r - 1) * cols + (cols - c - 1),\n \"ru\": lambda r, c: r * cols + c,\n \"lu\": lambda r, c: r * cols + (cols - c - 1),\n }\n if flow_order not in mapper:\n raise ValueError(\n 'flow_order must be one of the following values: \"dr\", \"rd\", \"ld\" \"dl\", \"ru\", \"ur\", \"lu\", \"ul\".'\n )\n flow_order = mapper[flow_order]\n\n # Reverse row_alignments and row_heights. Necessary since the\n # grid filling is handled bottom up for simplicity reasons.\n def reverse(maybe_list):\n if maybe_list is not None:\n maybe_list = list(maybe_list)\n maybe_list.reverse()\n return maybe_list\n\n row_alignments = reverse(row_alignments)\n row_heights = reverse(row_heights)\n\n placeholder = OpenGLMobject()\n # Used to fill up the grid temporarily, doesn't get added to the scene.\n # In this case a Mobject is better than None since it has width and height\n # properties of 0.\n\n mobs.extend([placeholder] * (rows * cols - len(mobs)))\n grid = [[mobs[flow_order(r, c)] for c in range(cols)] for r in range(rows)]\n\n measured_heigths = [\n max([grid[r][c].height for c in range(cols)]) for r in range(rows)\n ]\n measured_widths = [\n max([grid[r][c].width for r in range(rows)]) for c in range(cols)\n ]\n\n # Initialize row_heights / col_widths correctly using measurements as fallback\n def init_sizes(sizes, num, measures, name):\n if sizes is None:\n sizes = [None] * num\n if len(sizes) != num:\n raise ValueError(\"{} has a mismatching size.\".format(name))\n return [\n sizes[i] if sizes[i] is not None else measures[i] for i in range(num)\n ]\n\n heights = init_sizes(row_heights, rows, measured_heigths, \"row_heights\")\n widths = init_sizes(col_widths, cols, measured_widths, \"col_widths\")\n\n x, y = 0, 0\n for r in range(rows):\n x = 0\n for c in range(cols):\n if grid[r][c] is not placeholder:\n alignment = row_alignments[r] + col_alignments[c]\n line = Line(\n x * RIGHT + y * UP,\n (x + widths[c]) * RIGHT + (y + heights[r]) * UP,\n )\n # Use a mobject to avoid rewriting align inside\n # box code that Mobject.move_to(Mobject) already\n # includes.\n\n grid[r][c].move_to(line, alignment)\n x += widths[c] + buff_x\n y += heights[r] + buff_y\n\n self.move_to(start_pos)\n return self\n\n def get_grid(self, n_rows, n_cols, height=None, **kwargs):\n \"\"\"\n Returns a new mobject containing multiple copies of this one\n arranged in a grid\n \"\"\"\n grid = self.get_group_class()(*(self.copy() for n in range(n_rows * n_cols)))\n grid.arrange_in_grid(n_rows, n_cols, **kwargs)\n if height is not None:\n grid.set_height(height)\n return grid\n\n def sort(self, point_to_num_func=lambda p: p[0], submob_func=None):\n if submob_func is not None:\n self.submobjects.sort(key=submob_func)\n else:\n self.submobjects.sort(key=lambda m: point_to_num_func(m.get_center()))\n return self\n\n def shuffle(self, recurse=False):\n if recurse:\n for submob in self.submobjects:\n submob.shuffle(recurse=True)\n random.shuffle(self.submobjects)\n self.assemble_family()\n return self\n\n # Copying\n\n def copy(self, shallow: bool = False):\n \"\"\"Copies the mobject.\n\n Parameters\n ----------\n shallow\n Controls whether a shallow copy is returned.\n \"\"\"\n if not shallow:\n return self.deepcopy()\n\n # TODO, either justify reason for shallow copy, or\n # remove this redundancy everywhere\n # return self.deepcopy()\n\n parents = self.parents\n self.parents = []\n copy_mobject = copy.copy(self)\n self.parents = parents\n\n copy_mobject.data = dict(self.data)\n for key in self.data:\n copy_mobject.data[key] = self.data[key].copy()\n\n # TODO, are uniforms ever numpy arrays?\n copy_mobject.uniforms = dict(self.uniforms)\n\n copy_mobject.submobjects = []\n copy_mobject.add(*[sm.copy() for sm in self.submobjects])\n copy_mobject.match_updaters(self)\n\n copy_mobject.needs_new_bounding_box = self.needs_new_bounding_box\n\n # Make sure any mobject or numpy array attributes are copied\n family = self.get_family()\n for attr, value in list(self.__dict__.items()):\n if (\n isinstance(value, OpenGLMobject)\n and value in family\n and value is not self\n ):\n setattr(copy_mobject, attr, value.copy())\n if isinstance(value, np.ndarray):\n setattr(copy_mobject, attr, value.copy())\n # if isinstance(value, ShaderWrapper):\n # setattr(copy_mobject, attr, value.copy())\n return copy_mobject\n\n def deepcopy(self):\n parents = self.parents\n self.parents = []\n result = copy.deepcopy(self)\n self.parents = parents\n return result\n\n def generate_target(self, use_deepcopy=False):\n self.target = None # Prevent exponential explosion\n if use_deepcopy:\n self.target = self.deepcopy()\n else:\n self.target = self.copy()\n return self.target\n\n def save_state(self, use_deepcopy=False):\n if hasattr(self, \"saved_state\"):\n # Prevent exponential growth of data\n self.saved_state = None\n if use_deepcopy:\n self.saved_state = self.deepcopy()\n else:\n self.saved_state = self.copy()\n return self\n\n def restore(self):\n if not hasattr(self, \"saved_state\") or self.save_state is None:\n raise Exception(\"Trying to restore without having saved\")\n self.become(self.saved_state)\n return self\n\n # Updating\n\n def init_updaters(self):\n self.time_based_updaters = []\n self.non_time_updaters = []\n self.has_updaters = False\n self.updating_suspended = False\n\n def update(self, dt=0, recurse=True):\n if not self.has_updaters or self.updating_suspended:\n return self\n for updater in self.time_based_updaters:\n updater(self, dt)\n for updater in self.non_time_updaters:\n updater(self)\n if recurse:\n for submob in self.submobjects:\n submob.update(dt, recurse)\n return self\n\n def get_time_based_updaters(self):\n return self.time_based_updaters\n\n def has_time_based_updater(self):\n return len(self.time_based_updaters) > 0\n\n def get_updaters(self):\n return self.time_based_updaters + self.non_time_updaters\n\n def get_family_updaters(self):\n return list(it.chain(*[sm.get_updaters() for sm in self.get_family()]))\n\n def add_updater(self, update_function, index=None, call_updater=True):\n if \"dt\" in get_parameters(update_function):\n updater_list = self.time_based_updaters\n else:\n updater_list = self.non_time_updaters\n\n if index is None:\n updater_list.append(update_function)\n else:\n updater_list.insert(index, update_function)\n\n self.refresh_has_updater_status()\n if call_updater:\n self.update()\n return self\n\n def remove_updater(self, update_function):\n for updater_list in [self.time_based_updaters, self.non_time_updaters]:\n while update_function in updater_list:\n updater_list.remove(update_function)\n self.refresh_has_updater_status()\n return self\n\n def clear_updaters(self, recurse=True):\n self.time_based_updaters = []\n self.non_time_updaters = []\n self.refresh_has_updater_status()\n if recurse:\n for submob in self.submobjects:\n submob.clear_updaters()\n return self\n\n def match_updaters(self, mobject):\n self.clear_updaters()\n for updater in mobject.get_updaters():\n self.add_updater(updater)\n return self\n\n def suspend_updating(self, recurse=True):\n self.updating_suspended = True\n if recurse:\n for submob in self.submobjects:\n submob.suspend_updating(recurse)\n return self\n\n def resume_updating(self, recurse=True, call_updater=True):\n self.updating_suspended = False\n if recurse:\n for submob in self.submobjects:\n submob.resume_updating(recurse)\n for parent in self.parents:\n parent.resume_updating(recurse=False, call_updater=False)\n if call_updater:\n self.update(dt=0, recurse=recurse)\n return self\n\n def refresh_has_updater_status(self):\n self.has_updaters = any(mob.get_updaters() for mob in self.get_family())\n return self\n\n # Transforming operations\n\n def shift(self, vector):\n self.apply_points_function(\n lambda points: points + vector,\n about_edge=None,\n works_on_bounding_box=True,\n )\n return self\n\n def scale(self, scale_factor, **kwargs):\n \"\"\"\n Default behavior is to scale about the center of the mobject.\n The argument about_edge can be a vector, indicating which side of\n the mobject to scale about, e.g., mob.scale(about_edge = RIGHT)\n scales about mob.get_right().\n\n Otherwise, if about_point is given a value, scaling is done with\n respect to that point.\n \"\"\"\n self.apply_points_function(\n lambda points: scale_factor * points, works_on_bounding_box=True, **kwargs\n )\n return self\n\n def stretch(self, factor, dim, **kwargs):\n def func(points):\n points[:, dim] *= factor\n return points\n\n self.apply_points_function(func, works_on_bounding_box=True, **kwargs)\n return self\n\n def rotate_about_origin(self, angle, axis=OUT):\n return self.rotate(angle, axis, about_point=ORIGIN)\n\n def rotate(\n self,\n angle,\n axis=OUT,\n **kwargs,\n ):\n rot_matrix_T = rotation_matrix_transpose(angle, axis)\n self.apply_points_function(\n lambda points: np.dot(points, rot_matrix_T), **kwargs\n )\n return self\n\n def flip(self, axis=UP, **kwargs):\n return self.rotate(TAU / 2, axis, **kwargs)\n\n def apply_function(self, function, **kwargs):\n # Default to applying matrix about the origin, not mobjects center\n if len(kwargs) == 0:\n kwargs[\"about_point\"] = ORIGIN\n self.apply_points_function(\n lambda points: np.array([function(p) for p in points]), **kwargs\n )\n return self\n\n def apply_function_to_position(self, function):\n self.move_to(function(self.get_center()))\n return self\n\n def apply_function_to_submobject_positions(self, function):\n for submob in self.submobjects:\n submob.apply_function_to_position(function)\n return self\n\n def apply_matrix(self, matrix, **kwargs):\n # Default to applying matrix about the origin, not mobjects center\n if (\"about_point\" not in kwargs) and (\"about_edge\" not in kwargs):\n kwargs[\"about_point\"] = ORIGIN\n full_matrix = np.identity(self.dim)\n matrix = np.array(matrix)\n full_matrix[: matrix.shape[0], : matrix.shape[1]] = matrix\n self.apply_points_function(\n lambda points: np.dot(points, full_matrix.T), **kwargs\n )\n return self\n\n def apply_complex_function(self, function, **kwargs):\n def R3_func(point):\n x, y, z = point\n xy_complex = function(complex(x, y))\n return [xy_complex.real, xy_complex.imag, z]\n\n return self.apply_function(R3_func)\n\n def hierarchical_model_matrix(self):\n if self.parent is None:\n return self.model_matrix\n\n model_matrices = [self.model_matrix]\n current_object = self\n while current_object.parent is not None:\n model_matrices.append(current_object.parent.model_matrix)\n current_object = current_object.parent\n return np.linalg.multi_dot(list(reversed(model_matrices)))\n\n def wag(self, direction=RIGHT, axis=DOWN, wag_factor=1.0):\n for mob in self.family_members_with_points():\n alphas = np.dot(mob.get_points(), np.transpose(axis))\n alphas -= min(alphas)\n alphas /= max(alphas)\n alphas = alphas ** wag_factor\n mob.set_points(\n mob.get_points()\n + np.dot(\n alphas.reshape((len(alphas), 1)),\n np.array(direction).reshape((1, mob.dim)),\n )\n )\n return self\n\n # Positioning methods\n\n def center(self):\n self.shift(-self.get_center())\n return self\n\n def align_on_border(self, direction, buff=DEFAULT_MOBJECT_TO_EDGE_BUFFER):\n \"\"\"\n Direction just needs to be a vector pointing towards side or\n corner in the 2d plane.\n \"\"\"\n target_point = np.sign(direction) * (\n config[\"frame_x_radius\"],\n config[\"frame_y_radius\"],\n 0,\n )\n point_to_align = self.get_bounding_box_point(direction)\n shift_val = target_point - point_to_align - buff * np.array(direction)\n shift_val = shift_val * abs(np.sign(direction))\n self.shift(shift_val)\n return self\n\n def to_corner(self, corner=LEFT + DOWN, buff=DEFAULT_MOBJECT_TO_EDGE_BUFFER):\n return self.align_on_border(corner, buff)\n\n def to_edge(self, edge=LEFT, buff=DEFAULT_MOBJECT_TO_EDGE_BUFFER):\n return self.align_on_border(edge, buff)\n\n def next_to(\n self,\n mobject_or_point,\n direction=RIGHT,\n buff=DEFAULT_MOBJECT_TO_MOBJECT_BUFFER,\n aligned_edge=ORIGIN,\n submobject_to_align=None,\n index_of_submobject_to_align=None,\n coor_mask=np.array([1, 1, 1]),\n ):\n if isinstance(mobject_or_point, OpenGLMobject):\n mob = mobject_or_point\n if index_of_submobject_to_align is not None:\n target_aligner = mob[index_of_submobject_to_align]\n else:\n target_aligner = mob\n target_point = target_aligner.get_bounding_box_point(\n aligned_edge + direction\n )\n else:\n target_point = mobject_or_point\n if submobject_to_align is not None:\n aligner = submobject_to_align\n elif index_of_submobject_to_align is not None:\n aligner = self[index_of_submobject_to_align]\n else:\n aligner = self\n point_to_align = aligner.get_bounding_box_point(aligned_edge - direction)\n self.shift((target_point - point_to_align + buff * direction) * coor_mask)\n return self\n\n def shift_onto_screen(self, **kwargs):\n space_lengths = [config[\"frame_x_radius\"], config[\"frame_y_radius\"]]\n for vect in UP, DOWN, LEFT, RIGHT:\n dim = np.argmax(np.abs(vect))\n buff = kwargs.get(\"buff\", DEFAULT_MOBJECT_TO_EDGE_BUFFER)\n max_val = space_lengths[dim] - buff\n edge_center = self.get_edge_center(vect)\n if np.dot(edge_center, vect) > max_val:\n self.to_edge(vect, **kwargs)\n return self\n\n def is_off_screen(self):\n if self.get_left()[0] > config[\"frame_x_radius\"]:\n return True\n if self.get_right()[0] < -config[\"frame_x_radius\"]:\n return True\n if self.get_bottom()[1] > config[\"frame_y_radius\"]:\n return True\n if self.get_top()[1] < -config[\"frame_y_radius\"]:\n return True\n return False\n\n def stretch_about_point(self, factor, dim, point):\n return self.stretch(factor, dim, about_point=point)\n\n def stretch_in_place(self, factor, dim):\n # Now redundant with stretch\n return self.stretch(factor, dim)\n\n def rescale_to_fit(self, length, dim, stretch=False, **kwargs):\n old_length = self.length_over_dim(dim)\n if old_length == 0:\n return self\n if stretch:\n self.stretch(length / old_length, dim, **kwargs)\n else:\n self.scale(length / old_length, **kwargs)\n return self\n\n def stretch_to_fit_width(self, width, **kwargs):\n return self.rescale_to_fit(width, 0, stretch=True, **kwargs)\n\n def stretch_to_fit_height(self, height, **kwargs):\n return self.rescale_to_fit(height, 1, stretch=True, **kwargs)\n\n def stretch_to_fit_depth(self, depth, **kwargs):\n return self.rescale_to_fit(depth, 1, stretch=True, **kwargs)\n\n def set_width(self, width, stretch=False, **kwargs):\n return self.rescale_to_fit(width, 0, stretch=stretch, **kwargs)\n\n scale_to_fit_width = set_width\n\n def set_height(self, height, stretch=False, **kwargs):\n return self.rescale_to_fit(height, 1, stretch=stretch, **kwargs)\n\n scale_to_fit_height = set_height\n\n def set_depth(self, depth, stretch=False, **kwargs):\n return self.rescale_to_fit(depth, 2, stretch=stretch, **kwargs)\n\n def set_coord(self, value, dim, direction=ORIGIN):\n curr = self.get_coord(dim, direction)\n shift_vect = np.zeros(self.dim)\n shift_vect[dim] = value - curr\n self.shift(shift_vect)\n return self\n\n def set_x(self, x, direction=ORIGIN):\n return self.set_coord(x, 0, direction)\n\n def set_y(self, y, direction=ORIGIN):\n return self.set_coord(y, 1, direction)\n\n def set_z(self, z, direction=ORIGIN):\n return self.set_coord(z, 2, direction)\n\n def space_out_submobjects(self, factor=1.5, **kwargs):\n self.scale(factor, **kwargs)\n for submob in self.submobjects:\n submob.scale(1.0 / factor)\n return self\n\n def move_to(\n self, point_or_mobject, aligned_edge=ORIGIN, coor_mask=np.array([1, 1, 1])\n ):\n if isinstance(point_or_mobject, OpenGLMobject):\n target = point_or_mobject.get_bounding_box_point(aligned_edge)\n else:\n target = point_or_mobject\n point_to_align = self.get_bounding_box_point(aligned_edge)\n self.shift((target - point_to_align) * coor_mask)\n return self\n\n def replace(self, mobject, dim_to_match=0, stretch=False):\n if not mobject.get_num_points() and not mobject.submobjects:\n self.scale(0)\n return self\n if stretch:\n for i in range(self.dim):\n self.rescale_to_fit(mobject.length_over_dim(i), i, stretch=True)\n else:\n self.rescale_to_fit(\n mobject.length_over_dim(dim_to_match), dim_to_match, stretch=False\n )\n self.shift(mobject.get_center() - self.get_center())\n return self\n\n def surround(self, mobject, dim_to_match=0, stretch=False, buff=MED_SMALL_BUFF):\n self.replace(mobject, dim_to_match, stretch)\n length = mobject.length_over_dim(dim_to_match)\n self.scale((length + buff) / length)\n return self\n\n def put_start_and_end_on(self, start, end):\n curr_start, curr_end = self.get_start_and_end()\n curr_vect = curr_end - curr_start\n if np.all(curr_vect == 0):\n raise Exception(\"Cannot position endpoints of closed loop\")\n target_vect = np.array(end) - np.array(start)\n axis = (\n normalize(np.cross(curr_vect, target_vect))\n if np.linalg.norm(np.cross(curr_vect, target_vect)) != 0\n else OUT\n )\n self.scale(\n np.linalg.norm(target_vect) / np.linalg.norm(curr_vect),\n about_point=curr_start,\n )\n self.rotate(\n angle_between_vectors(curr_vect, target_vect),\n about_point=curr_start,\n axis=axis,\n )\n self.shift(start - curr_start)\n return self\n\n # Color functions\n\n def set_rgba_array(self, color=None, opacity=None, name=\"rgbas\", recurse=True):\n if color is not None:\n rgbs = np.array([color_to_rgb(c) for c in listify(color)])\n if opacity is not None:\n opacities = listify(opacity)\n\n # Color only\n if color is not None and opacity is None:\n for mob in self.get_family(recurse):\n mob.data[name] = resize_array(mob.data[name], len(rgbs))\n mob.data[name][:, :3] = rgbs\n\n # Opacity only\n if color is None and opacity is not None:\n for mob in self.get_family(recurse):\n mob.data[name] = resize_array(mob.data[name], len(opacities))\n mob.data[name][:, 3] = opacities\n\n # Color and opacity\n if color is not None and opacity is not None:\n rgbas = np.array([[*rgb, o] for rgb, o in zip(*make_even(rgbs, opacities))])\n for mob in self.get_family(recurse):\n mob.data[name] = rgbas.copy()\n return self\n\n def set_color(self, color, opacity=None, recurse=True):\n self.set_rgba_array(color, opacity, recurse=False)\n # Recurse to submobjects differently from how set_rgba_array\n # in case they implement set_color differently\n if recurse:\n for submob in self.submobjects:\n submob.set_color(color, recurse=True)\n return self\n\n def set_opacity(self, opacity, recurse=True):\n self.set_rgba_array(color=None, opacity=opacity, recurse=False)\n if recurse:\n for submob in self.submobjects:\n submob.set_opacity(opacity, recurse=True)\n return self\n\n def get_color(self):\n return rgb_to_hex(self.rgbas[0, :3])\n\n def get_opacity(self):\n return self.rgbas[0, 3]\n\n def set_color_by_gradient(self, *colors):\n self.set_submobject_colors_by_gradient(*colors)\n return self\n\n def set_submobject_colors_by_gradient(self, *colors):\n if len(colors) == 0:\n raise Exception(\"Need at least one color\")\n elif len(colors) == 1:\n return self.set_color(*colors)\n\n # mobs = self.family_members_with_points()\n mobs = self.submobjects\n new_colors = color_gradient(colors, len(mobs))\n\n for mob, color in zip(mobs, new_colors):\n mob.set_color(color)\n return self\n\n def fade(self, darkness=0.5, recurse=True):\n self.set_opacity(1.0 - darkness, recurse=recurse)\n\n def get_gloss(self):\n return self.gloss\n\n def set_gloss(self, gloss, recurse=True):\n for mob in self.get_family(recurse):\n mob.gloss = gloss\n return self\n\n def get_shadow(self):\n return self.shadow\n\n def set_shadow(self, shadow, recurse=True):\n for mob in self.get_family(recurse):\n mob.shadow = shadow\n return self\n\n # Background rectangle\n\n def add_background_rectangle(self, color=None, opacity=0.75, **kwargs):\n # TODO, this does not behave well when the mobject has points,\n # since it gets displayed on top\n from ..mobject.shape_matchers import BackgroundRectangle\n\n self.background_rectangle = BackgroundRectangle(\n self, color=color, fill_opacity=opacity, **kwargs\n )\n self.add_to_back(self.background_rectangle)\n return self\n\n def add_background_rectangle_to_submobjects(self, **kwargs):\n for submobject in self.submobjects:\n submobject.add_background_rectangle(**kwargs)\n return self\n\n def add_background_rectangle_to_family_members_with_points(self, **kwargs):\n for mob in self.family_members_with_points():\n mob.add_background_rectangle(**kwargs)\n return self\n\n # Getters\n\n def get_bounding_box_point(self, direction):\n bb = self.get_bounding_box()\n indices = (np.sign(direction) + 1).astype(int)\n return np.array([bb[indices[i]][i] for i in range(3)])\n\n def get_edge_center(self, direction):\n return self.get_bounding_box_point(direction)\n\n def get_corner(self, direction):\n return self.get_bounding_box_point(direction)\n\n def get_center(self):\n return self.get_bounding_box()[1]\n\n def get_center_of_mass(self):\n return self.get_all_points().mean(0)\n\n def get_boundary_point(self, direction):\n all_points = self.get_all_points()\n boundary_directions = all_points - self.get_center()\n norms = np.linalg.norm(boundary_directions, axis=1)\n boundary_directions /= np.repeat(norms, 3).reshape((len(norms), 3))\n index = np.argmax(np.dot(boundary_directions, np.array(direction).T))\n return all_points[index]\n\n def get_continuous_bounding_box_point(self, direction):\n dl, center, ur = self.get_bounding_box()\n corner_vect = ur - center\n return center + direction / np.max(\n np.abs(\n np.true_divide(\n direction,\n corner_vect,\n out=np.zeros(len(direction)),\n where=((corner_vect) != 0),\n )\n )\n )\n\n def get_top(self):\n return self.get_edge_center(UP)\n\n def get_bottom(self):\n return self.get_edge_center(DOWN)\n\n def get_right(self):\n return self.get_edge_center(RIGHT)\n\n def get_left(self):\n return self.get_edge_center(LEFT)\n\n def get_zenith(self):\n return self.get_edge_center(OUT)\n\n def get_nadir(self):\n return self.get_edge_center(IN)\n\n def length_over_dim(self, dim):\n bb = self.get_bounding_box()\n return abs((bb[2] - bb[0])[dim])\n\n def get_width(self):\n return self.length_over_dim(0)\n\n def get_height(self):\n return self.length_over_dim(1)\n\n def get_depth(self):\n return self.length_over_dim(2)\n\n def get_coord(self, dim, direction=ORIGIN):\n \"\"\"\n Meant to generalize get_x, get_y, get_z\n \"\"\"\n return self.get_bounding_box_point(direction)[dim]\n\n def get_x(self, direction=ORIGIN):\n return self.get_coord(0, direction)\n\n def get_y(self, direction=ORIGIN):\n return self.get_coord(1, direction)\n\n def get_z(self, direction=ORIGIN):\n return self.get_coord(2, direction)\n\n def get_start(self):\n self.throw_error_if_no_points()\n return np.array(self.points[0])\n\n def get_end(self):\n self.throw_error_if_no_points()\n return np.array(self.points[-1])\n\n def get_start_and_end(self):\n return self.get_start(), self.get_end()\n\n def point_from_proportion(self, alpha):\n points = self.points\n i, subalpha = integer_interpolate(0, len(points) - 1, alpha)\n return interpolate(points[i], points[i + 1], subalpha)\n\n def pfp(self, alpha):\n \"\"\"Abbreviation for point_from_proportion\"\"\"\n return self.point_from_proportion(alpha)\n\n def get_pieces(self, n_pieces):\n template = self.copy()\n template.set_submobjects([])\n alphas = np.linspace(0, 1, n_pieces + 1)\n return OpenGLGroup(\n *[\n template.copy().pointwise_become_partial(self, a1, a2)\n for a1, a2 in zip(alphas[:-1], alphas[1:])\n ]\n )\n\n def get_z_index_reference_point(self):\n # TODO, better place to define default z_index_group?\n z_index_group = getattr(self, \"z_index_group\", self)\n return z_index_group.get_center()\n\n # Match other mobject properties\n\n def match_color(self, mobject):\n return self.set_color(mobject.get_color())\n\n def match_dim_size(self, mobject, dim, **kwargs):\n return self.rescale_to_fit(mobject.length_over_dim(dim), dim, **kwargs)\n\n def match_width(self, mobject, **kwargs):\n return self.match_dim_size(mobject, 0, **kwargs)\n\n def match_height(self, mobject, **kwargs):\n return self.match_dim_size(mobject, 1, **kwargs)\n\n def match_depth(self, mobject, **kwargs):\n return self.match_dim_size(mobject, 2, **kwargs)\n\n def match_coord(self, mobject, dim, direction=ORIGIN):\n return self.set_coord(\n mobject.get_coord(dim, direction),\n dim=dim,\n direction=direction,\n )\n\n def match_x(self, mobject, direction=ORIGIN):\n return self.match_coord(mobject, 0, direction)\n\n def match_y(self, mobject, direction=ORIGIN):\n return self.match_coord(mobject, 1, direction)\n\n def match_z(self, mobject, direction=ORIGIN):\n return self.match_coord(mobject, 2, direction)\n\n def align_to(self, mobject_or_point, direction=ORIGIN):\n \"\"\"\n Examples:\n mob1.align_to(mob2, UP) moves mob1 vertically so that its\n top edge lines ups with mob2's top edge.\n\n mob1.align_to(mob2, alignment_vect = RIGHT) moves mob1\n horizontally so that it's center is directly above/below\n the center of mob2\n \"\"\"\n if isinstance(mobject_or_point, OpenGLMobject):\n point = mobject_or_point.get_bounding_box_point(direction)\n else:\n point = mobject_or_point\n\n for dim in range(self.dim):\n if direction[dim] != 0:\n self.set_coord(point[dim], dim, direction)\n return self\n\n def get_group_class(self):\n return OpenGLGroup\n\n # Alignment\n\n def align_data_and_family(self, mobject):\n self.align_family(mobject)\n self.align_data(mobject)\n\n def align_data(self, mobject):\n # In case any data arrays get resized when aligned to shader data\n # self.refresh_shader_data()\n for mob1, mob2 in zip(self.get_family(), mobject.get_family()):\n # Separate out how points are treated so that subclasses\n # can handle that case differently if they choose\n mob1.align_points(mob2)\n for key in mob1.data.keys() & mob2.data.keys():\n if key == \"points\":\n continue\n arr1 = mob1.data[key]\n arr2 = mob2.data[key]\n if len(arr2) > len(arr1):\n mob1.data[key] = resize_preserving_order(arr1, len(arr2))\n elif len(arr1) > len(arr2):\n mob2.data[key] = resize_preserving_order(arr2, len(arr1))\n\n def align_points(self, mobject):\n max_len = max(self.get_num_points(), mobject.get_num_points())\n for mob in (self, mobject):\n mob.resize_points(max_len, resize_func=resize_preserving_order)\n return self\n\n def align_family(self, mobject):\n mob1 = self\n mob2 = mobject\n n1 = len(mob1)\n n2 = len(mob2)\n if n1 != n2:\n mob1.add_n_more_submobjects(max(0, n2 - n1))\n mob2.add_n_more_submobjects(max(0, n1 - n2))\n # Recurse\n for sm1, sm2 in zip(mob1.submobjects, mob2.submobjects):\n sm1.align_family(sm2)\n return self\n\n def push_self_into_submobjects(self):\n copy = self.deepcopy()\n copy.set_submobjects([])\n self.resize_points(0)\n self.add(copy)\n return self\n\n def add_n_more_submobjects(self, n):\n if n == 0:\n return self\n\n curr = len(self.submobjects)\n if curr == 0:\n # If empty, simply add n point mobjects\n null_mob = self.copy()\n null_mob.set_points([self.get_center()])\n self.set_submobjects([null_mob.copy() for k in range(n)])\n return self\n target = curr + n\n repeat_indices = (np.arange(target) * curr) // target\n split_factors = [(repeat_indices == i).sum() for i in range(curr)]\n new_submobs = []\n for submob, sf in zip(self.submobjects, split_factors):\n new_submobs.append(submob)\n for _ in range(1, sf):\n new_submob = submob.copy()\n # If the submobject is at all transparent, then\n # make the copy completely transparent\n if submob.get_opacity() < 1:\n new_submob.set_opacity(0)\n new_submobs.append(new_submob)\n self.set_submobjects(new_submobs)\n return self\n\n # Interpolate\n\n def interpolate(self, mobject1, mobject2, alpha, path_func=straight_path):\n for key in self.data:\n if key in self.locked_data_keys:\n continue\n if len(self.data[key]) == 0:\n continue\n if key not in mobject1.data or key not in mobject2.data:\n continue\n\n if key in (\"points\", \"bounding_box\"):\n func = path_func\n else:\n func = interpolate\n\n self.data[key][:] = func(mobject1.data[key], mobject2.data[key], alpha)\n for key in self.uniforms:\n self.uniforms[key] = interpolate(\n mobject1.uniforms[key], mobject2.uniforms[key], alpha\n )\n return self\n\n def pointwise_become_partial(self, mobject, a, b):\n \"\"\"\n Set points in such a way as to become only\n part of mobject.\n Inputs 0 <= a < b <= 1 determine what portion\n of mobject to become.\n \"\"\"\n pass # To implement in subclass\n\n def become(self, mobject):\n \"\"\"\n Edit all data and submobjects to be identical\n to another mobject\n \"\"\"\n self.align_family(mobject)\n for sm1, sm2 in zip(self.get_family(), mobject.get_family()):\n sm1.set_data(sm2.data)\n sm1.set_uniforms(sm2.uniforms)\n self.refresh_bounding_box(recurse_down=True)\n return self\n\n # Locking data\n\n def lock_data(self, keys):\n \"\"\"\n To speed up some animations, particularly transformations,\n it can be handy to acknowledge which pieces of data\n won't change during the animation so that calls to\n interpolate can skip this, and so that it's not\n read into the shader_wrapper objects needlessly\n \"\"\"\n if self.has_updaters:\n return\n # Be sure shader data has most up to date information\n self.refresh_shader_data()\n self.locked_data_keys = set(keys)\n\n def lock_matching_data(self, mobject1, mobject2):\n for sm, sm1, sm2 in zip(\n self.get_family(), mobject1.get_family(), mobject2.get_family()\n ):\n keys = sm.data.keys() & sm1.data.keys() & sm2.data.keys()\n sm.lock_data(\n list(\n filter(\n lambda key: np.all(sm1.data[key] == sm2.data[key]),\n keys,\n )\n )\n )\n return self\n\n def unlock_data(self):\n for mob in self.get_family():\n mob.locked_data_keys = set()\n\n # Operations touching shader uniforms\n\n def affects_shader_info_id(func):\n @wraps(func)\n def wrapper(self):\n for mob in self.get_family():\n func(mob)\n # mob.refresh_shader_wrapper_id()\n return self\n\n return wrapper\n\n @affects_shader_info_id\n def fix_in_frame(self):\n self.is_fixed_in_frame = 1.0\n return self\n\n @affects_shader_info_id\n def unfix_from_frame(self):\n self.is_fixed_in_frame = 0.0\n return self\n\n @affects_shader_info_id\n def apply_depth_test(self):\n self.depth_test = True\n return self\n\n @affects_shader_info_id\n def deactivate_depth_test(self):\n self.depth_test = False\n return self\n\n # Shader code manipulation\n\n def replace_shader_code(self, old, new):\n # TODO, will this work with VMobject structure, given\n # that it does not simpler return shader_wrappers of\n # family?\n for wrapper in self.get_shader_wrapper_list():\n wrapper.replace_code(old, new)\n return self\n\n def set_color_by_code(self, glsl_code):\n \"\"\"\n Takes a snippet of code and inserts it into a\n context which has the following variables:\n vec4 color, vec3 point, vec3 unit_normal.\n The code should change the color variable\n \"\"\"\n self.replace_shader_code(\"///// INSERT COLOR FUNCTION HERE /////\", glsl_code)\n return self\n\n def set_color_by_xyz_func(\n self, glsl_snippet, min_value=-5.0, max_value=5.0, colormap=\"viridis\"\n ):\n \"\"\"\n Pass in a glsl expression in terms of x, y and z which returns\n a float.\n \"\"\"\n # TODO, add a version of this which changes the point data instead\n # of the shader code\n for char in \"xyz\":\n glsl_snippet = glsl_snippet.replace(char, \"point.\" + char)\n rgb_list = get_colormap_list(colormap)\n self.set_color_by_code(\n \"color.rgb = float_to_color({}, {}, {}, {});\".format(\n glsl_snippet,\n float(min_value),\n float(max_value),\n get_colormap_code(rgb_list),\n )\n )\n return self\n\n # For shader data\n\n # def refresh_shader_wrapper_id(self):\n # self.shader_wrapper.refresh_id()\n # return self\n\n def get_shader_wrapper(self):\n from ..renderer.shader_wrapper import ShaderWrapper\n\n self.shader_wrapper = ShaderWrapper(\n vert_data=self.get_shader_data(),\n vert_indices=self.get_shader_vert_indices(),\n uniforms=self.get_shader_uniforms(),\n depth_test=self.depth_test,\n texture_paths=self.texture_paths,\n render_primitive=self.render_primitive,\n shader_folder=self.__class__.shader_folder,\n )\n return self.shader_wrapper\n\n def get_shader_wrapper_list(self):\n shader_wrappers = it.chain(\n [self.get_shader_wrapper()],\n *[sm.get_shader_wrapper_list() for sm in self.submobjects],\n )\n batches = batch_by_property(shader_wrappers, lambda sw: sw.get_id())\n\n result = []\n for wrapper_group, _ in batches:\n shader_wrapper = wrapper_group[0]\n if not shader_wrapper.is_valid():\n continue\n shader_wrapper.combine_with(*wrapper_group[1:])\n if len(shader_wrapper.vert_data) > 0:\n result.append(shader_wrapper)\n return result\n\n def check_data_alignment(self, array, data_key):\n # Makes sure that self.data[key] can be broadcast into\n # the given array, meaning its length has to be either 1\n # or the length of the array\n d_len = len(self.data[data_key])\n if d_len != 1 and d_len != len(array):\n self.data[data_key] = resize_with_interpolation(\n self.data[data_key], len(array)\n )\n return self\n\n def get_resized_shader_data_array(self, length):\n # If possible, try to populate an existing array, rather\n # than recreating it each frame\n points = self.points\n shader_data = np.zeros(len(points), dtype=self.shader_dtype)\n return shader_data\n\n def read_data_to_shader(self, shader_data, shader_data_key, data_key):\n if data_key in self.locked_data_keys:\n return\n self.check_data_alignment(shader_data, data_key)\n shader_data[shader_data_key] = self.data[data_key]\n\n def get_shader_data(self):\n shader_data = self.get_resized_shader_data_array(self.get_num_points())\n self.read_data_to_shader(shader_data, \"point\", \"points\")\n return shader_data\n\n def refresh_shader_data(self):\n self.get_shader_data()\n\n def get_shader_uniforms(self):\n return self.uniforms\n\n def get_shader_vert_indices(self):\n return self.shader_indices\n\n # Event Handlers\n \"\"\"\n Event handling follows the Event Bubbling model of DOM in javascript.\n Return false to stop the event bubbling.\n To learn more visit https://www.quirksmode.org/js/events_order.html\n\n Event Callback Argument is a callable function taking two arguments:\n 1. Mobject\n 2. EventData\n \"\"\"\n\n def init_event_listners(self):\n self.event_listners = []\n\n def add_event_listner(self, event_type, event_callback):\n event_listner = EventListner(self, event_type, event_callback)\n self.event_listners.append(event_listner)\n EVENT_DISPATCHER.add_listner(event_listner)\n return self\n\n def remove_event_listner(self, event_type, event_callback):\n event_listner = EventListner(self, event_type, event_callback)\n while event_listner in self.event_listners:\n self.event_listners.remove(event_listner)\n EVENT_DISPATCHER.remove_listner(event_listner)\n return self\n\n def clear_event_listners(self, recurse=True):\n self.event_listners = []\n if recurse:\n for submob in self.submobjects:\n submob.clear_event_listners(recurse=recurse)\n return self\n\n def get_event_listners(self):\n return self.event_listners\n\n def get_family_event_listners(self):\n return list(it.chain(*[sm.get_event_listners() for sm in self.get_family()]))\n\n def get_has_event_listner(self):\n return any(mob.get_event_listners() for mob in self.get_family())\n\n def add_mouse_motion_listner(self, callback):\n self.add_event_listner(EventType.MouseMotionEvent, callback)\n\n def remove_mouse_motion_listner(self, callback):\n self.remove_event_listner(EventType.MouseMotionEvent, callback)\n\n def add_mouse_press_listner(self, callback):\n self.add_event_listner(EventType.MousePressEvent, callback)\n\n def remove_mouse_press_listner(self, callback):\n self.remove_event_listner(EventType.MousePressEvent, callback)\n\n def add_mouse_release_listner(self, callback):\n self.add_event_listner(EventType.MouseReleaseEvent, callback)\n\n def remove_mouse_release_listner(self, callback):\n self.remove_event_listner(EventType.MouseReleaseEvent, callback)\n\n def add_mouse_drag_listner(self, callback):\n self.add_event_listner(EventType.MouseDragEvent, callback)\n\n def remove_mouse_drag_listner(self, callback):\n self.remove_event_listner(EventType.MouseDragEvent, callback)\n\n def add_mouse_scroll_listner(self, callback):\n self.add_event_listner(EventType.MouseScrollEvent, callback)\n\n def remove_mouse_scroll_listner(self, callback):\n self.remove_event_listner(EventType.MouseScrollEvent, callback)\n\n def add_key_press_listner(self, callback):\n self.add_event_listner(EventType.KeyPressEvent, callback)\n\n def remove_key_press_listner(self, callback):\n self.remove_event_listner(EventType.KeyPressEvent, callback)\n\n def add_key_release_listner(self, callback):\n self.add_event_listner(EventType.KeyReleaseEvent, callback)\n\n def remove_key_release_listner(self, callback):\n self.remove_event_listner(EventType.KeyReleaseEvent, callback)\n\n # Errors\n\n def throw_error_if_no_points(self):\n if not self.has_points():\n message = \"Cannot call Mobject.{} \" + \"for a Mobject with no points\"\n caller_name = sys._getframe(1).f_code.co_name\n raise Exception(message.format(caller_name))\n\n\nclass OpenGLGroup(OpenGLMobject):\n def __init__(self, *mobjects, **kwargs):\n if not all([isinstance(m, OpenGLMobject) for m in mobjects]):\n raise Exception(\"All submobjects must be of type Mobject\")\n OpenGLMobject.__init__(self, **kwargs)\n self.add(*mobjects)\n\n\nclass OpenGLPoint(OpenGLMobject):\n def __init__(\n self, location=ORIGIN, artificial_width=1e-6, artificial_height=1e-6, **kwargs\n ):\n self.artificial_width = artificial_width\n self.artificial_height = artificial_height\n OpenGLMobject.__init__(self, **kwargs)\n self.set_location(location)\n\n def get_width(self):\n return self.artificial_width\n\n def get_height(self):\n return self.artificial_height\n\n def get_location(self):\n return self.points[0].copy()\n\n def get_bounding_box_point(self, *args, **kwargs):\n return self.get_location()\n\n def set_location(self, new_loc):\n self.set_points(np.array(new_loc, ndmin=2, dtype=float))\n\n\nclass _AnimationBuilder:\n def __init__(self, mobject):\n self.mobject = mobject\n self.mobject.generate_target()\n\n self.overridden_animation = None\n self.is_chaining = False\n self.methods = []\n\n # Whether animation args can be passed\n self.cannot_pass_args = False\n self.anim_args = {}\n\n def __call__(self, **kwargs):\n if self.cannot_pass_args:\n raise ValueError(\n \"Animation arguments must be passed before accessing methods and can only be passed once\"\n )\n\n self.anim_args = kwargs\n self.cannot_pass_args = True\n\n return self\n\n def __getattr__(self, method_name):\n method = getattr(self.mobject.target, method_name)\n self.methods.append(method)\n has_overridden_animation = hasattr(method, \"_override_animate\")\n\n if (self.is_chaining and has_overridden_animation) or self.overridden_animation:\n raise NotImplementedError(\n \"Method chaining is currently not supported for \"\n \"overridden animations\"\n )\n\n def update_target(*method_args, **method_kwargs):\n if has_overridden_animation:\n self.overridden_animation = method._override_animate(\n self.mobject,\n *method_args,\n anim_args=self.anim_args,\n **method_kwargs,\n )\n else:\n method(*method_args, **method_kwargs)\n return self\n\n self.is_chaining = True\n self.cannot_pass_args = True\n\n return update_target\n\n def build(self):\n from ..animation.transform import _MethodAnimation\n\n if self.overridden_animation:\n anim = self.overridden_animation\n else:\n anim = _MethodAnimation(self.mobject, self.methods)\n\n for attr, value in self.anim_args.items():\n setattr(anim, attr, value)\n\n return anim\n\n\ndef override_animate(method):\n def decorator(animation_method):\n method._override_animate = animation_method\n return animation_method\n\n return decorator\n"
] |
[
[
"numpy.cross",
"numpy.dot",
"numpy.abs",
"numpy.linspace",
"numpy.arange",
"numpy.eye",
"numpy.linalg.norm",
"numpy.all",
"numpy.sign",
"numpy.identity",
"numpy.transpose",
"numpy.repeat",
"numpy.array",
"numpy.zeros",
"numpy.vstack"
]
] |
Sanofi-Public/IDD-papers-avoiding_failure_modes
|
[
"3463a277c3be20ce08602b912e3fa73388097e9b"
] |
[
"guacamol_baselines/smiles_lstm_ppo/goal_directed_generation.py"
] |
[
"from __future__ import print_function\n\nimport argparse\nimport json\nimport logging\nimport os\n\nimport numpy as np\nfrom guacamol.assess_goal_directed_generation import assess_goal_directed_generation\nfrom guacamol.utils.helpers import setup_default_logger\n\nfrom smiles_lstm_ppo.ppo_directed_generator import PPODirectedGenerator\n\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.NullHandler())\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--n_jobs', type=int, default=-1)\n parser.add_argument('--episode_size', type=int, default=8192)\n parser.add_argument('--batch_size', type=int, default=1024)\n parser.add_argument('--entropy_weight', type=int, default=1)\n parser.add_argument('--kl_div_weight', type=int, default=10)\n parser.add_argument('--output_dir', default=None)\n parser.add_argument('--clip_param', type=int, default=0.2)\n parser.add_argument('--num_epochs', type=int, default=20)\n parser.add_argument('--model_path', default=None)\n parser.add_argument('--seed', type=int, default=42)\n parser.add_argument('--suite', default='v2')\n\n args = parser.parse_args()\n\n np.random.seed(args.seed)\n\n setup_default_logger()\n\n if args.output_dir is None:\n args.output_dir = os.path.dirname(os.path.realpath(__file__))\n\n if args.model_path is None:\n dir_path = os.path.dirname(os.path.realpath(__file__))\n args.model_path = os.path.join(dir_path, 'pretrained_model', 'model_final_0.473.pt')\n\n # save command line args\n with open(os.path.join(args.output_dir, 'goal_directed_params.json'), 'w') as jf:\n json.dump(vars(args), jf, sort_keys=True, indent=4)\n\n optimiser = PPODirectedGenerator(pretrained_model_path=args.model_path,\n num_epochs=args.num_epochs,\n episode_size=args.episode_size,\n batch_size=args.batch_size,\n entropy_weight=args.entropy_weight,\n kl_div_weight=args.kl_div_weight,\n clip_param=args.clip_param\n )\n\n json_file_path = os.path.join(args.output_dir, 'goal_directed_results.json')\n assess_goal_directed_generation(optimiser, json_output_file=json_file_path,\n benchmark_version=args.suite)\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"numpy.random.seed"
]
] |
dangom/niworkflows
|
[
"e1eabe54e963b2829c1f7dc71abe80ca5767d687"
] |
[
"niworkflows/interfaces/tests/test_nibabel.py"
] |
[
"\"\"\"test nibabel interfaces.\"\"\"\nimport os\nimport numpy as np\nimport nibabel as nb\n\nfrom ..nibabel import Binarize\n\n\ndef test_Binarize(tmp_path):\n \"\"\"Test binarization interface.\"\"\"\n os.chdir(str(tmp_path))\n\n mask = np.zeros((20, 20, 20), dtype=bool)\n mask[5:15, 5:15, 5:15] = bool\n\n data = np.zeros_like(mask, dtype='float32')\n data[mask] = np.random.gamma(2, size=mask.sum())\n\n in_file = tmp_path / 'input.nii.gz'\n nb.Nifti1Image(data, np.eye(4), None).to_filename(str(in_file))\n\n binif = Binarize(thresh_low=0.0, in_file=str(in_file)).run()\n newmask = nb.load(binif.outputs.out_mask).get_fdata().astype(bool)\n assert np.all(mask == newmask)\n"
] |
[
[
"numpy.all",
"numpy.eye",
"numpy.zeros",
"numpy.zeros_like"
]
] |
maodoc33/ReinforcementLearning
|
[
"11cea9bd4b3c8482811a825c839cdb2cabe73206"
] |
[
"footballTests/GfootballEnv.py"
] |
[
"# from __future__ import absolute_import\n# from __future__ import division\n# from __future__ import print_function\nimport gym\nimport tensorflow as tf\nimport numpy as np\nfrom tf_agents.trajectories import time_step as ts\n\nimport abc\nimport tensorflow as tf\nimport numpy as np\nfrom tf_agents.environments import py_environment\nfrom tf_agents.environments import tf_environment\nfrom tf_agents.environments import tf_py_environment\nfrom tf_agents.environments import utils\nfrom tf_agents.specs import array_spec\nfrom tf_agents.environments import wrappers\nfrom tf_agents.environments import suite_gym\nfrom tf_agents.trajectories import time_step as ts\nfrom tf_agents.utils import common\n\nimport gfootball\nimport gfootball.env as football_env\n\ndata_dic = {\n 'active': array_spec.BoundedArraySpec(shape=(1,), dtype=np.int32, minimum=-1, maximum=11, name='active'),\n 'ball': array_spec.BoundedArraySpec(shape=(3,), dtype=np.float32, name='ball'),\n 'ball_direction': array_spec.BoundedArraySpec(shape=(3,), dtype=np.float32, name='ball_direction'),\n 'ball_owned_player': array_spec.BoundedArraySpec(shape=(1,), dtype=np.int32, minimum=0, maximum=11,name='ball_owned_player'),\n 'ball_owned_team': array_spec.BoundedArraySpec(shape=(1,), dtype=np.int32, minimum=0, maximum=2,name='ball_owned_team'),\n 'ball_rotation': array_spec.BoundedArraySpec(shape=(3,), dtype=np.float32, name='ball_rotation'),\n 'designated': array_spec.BoundedArraySpec(shape=(1,), dtype=np.int32, minimum=-1, maximum=11, name='designated'),\n 'game_mode': array_spec.BoundedArraySpec(shape=(1,), dtype=np.int32, minimum=0, maximum=6, name='game_mode'),\n 'left_team': array_spec.BoundedArraySpec(shape=(11, 2), dtype=np.float32, minimum=-1.5, maximum=1.5, name='left_team'),\n 'left_team_active': array_spec.BoundedArraySpec(shape=(11,), dtype=np.int32, minimum=0, maximum=1, name='left_team_active'),\n 'left_team_direction': array_spec.BoundedArraySpec(shape=(11, 2), dtype=np.float32, minimum=-1.5, maximum=1.5,name='left_team_direction'),\n 'left_team_roles': array_spec.BoundedArraySpec(shape=(1,), dtype=np.int32, minimum=0, maximum=10, name='left_team_roles'),\n 'left_team_tired_factor': array_spec.BoundedArraySpec(shape=(11,), dtype=np.float32, minimum=0.0, name='left_team_tired_factor'),\n 'left_team_yellow_card': array_spec.BoundedArraySpec(shape=(11,), dtype=np.int32, minimum=0, maximum=1,name='left_team_yellow_card'),\n 'right_team_active': array_spec.BoundedArraySpec(shape=(11,), dtype=np.int32, minimum=0, maximum=1, name='right_team_active'),\n 'score': array_spec.BoundedArraySpec(shape=(2,), dtype=np.float32, minimum=0.0, name='score'),\n 'steps_left': array_spec.BoundedArraySpec(shape=(1,), dtype=np.float32, minimum=0.0, name='steps_left'),\n 'sticky_actions': array_spec.BoundedArraySpec(shape=(10,), dtype=np.int32, minimum=0, maximum=1,name='sticky_actions'),\n 'right_team': array_spec.BoundedArraySpec(shape=(11, 2), dtype=np.float32, minimum=-1.5, maximum=1.5,name='right_team'),\n 'right_team_direction': array_spec.BoundedArraySpec(shape=(11, 2), dtype=np.float32, minimum=-1.5, maximum=1.5,name='right_team_direction'),\n 'right_team_tired_factor': array_spec.BoundedArraySpec(shape=(11,), dtype=np.float32, minimum=0.0,name='right_team_tired_factor'),\n 'right_team_roles': array_spec.BoundedArraySpec(shape=(1,), dtype=np.int32, minimum=0, maximum=10,name='right_team_roles'),\n 'right_team_yellow_card': array_spec.BoundedArraySpec(shape=(11,), dtype=np.int32, minimum=0, maximum=1,name='right_team_yellow_card'),\n}\n\nclass GfootballEnv(py_environment.PyEnvironment):\n\n @staticmethod\n def convert_observation_to_tf(obs, data_cfg, propertie_list=[]):\n\n if (len(propertie_list) == 0):\n propertie_list = ['right_team_direction', 'right_team_tired_factor',\n 'left_team_roles', 'left_team_direction', 'ball_direction',\n 'ball_owned_player', 'right_team_yellow_card', 'ball',\n 'right_team', 'steps_left', 'ball_rotation',\n 'ball_owned_team', 'game_mode', 'left_team_yellow_card',\n 'left_team', 'right_team_roles', 'right_team_active',\n 'left_team_active', 'left_team_tired_factor', 'score',\n 'designated', 'active', 'sticky_actions']\n rp = {}\n\n for name in propertie_list:\n\n if (('ball_owned_player' == name) and (obs[0][name] == -1)):\n obs[0][name] = 11\n if (('ball_owned_team' == name) and (obs[0][name] == -1)):\n obs[0][name] = 2\n\n if ('right_team_roles' == name):\n if ((obs[0]['ball_owned_team'] == 1) and (\n (obs[0]['ball_owned_player'] != -1) or (obs[0]['ball_owned_player'] != 11))):\n obs[0][name] = obs[0][name][obs[0]['ball_owned_player']]\n else:\n obs[0][name] = 10\n\n if ('left_team_roles' == name):\n if ((obs[0]['ball_owned_team'] == 0) and (\n (obs[0]['ball_owned_player'] != -1) or (obs[0]['ball_owned_player'] != 11))):\n obs[0][name] = obs[0][name][obs[0]['ball_owned_player']]\n else:\n obs[0][name] = 10\n\n if (data_cfg[name].shape == (1,)):\n rp[name] = np.array([np.squeeze(obs[0][name])]).astype(data_cfg[name].dtype)\n else:\n a = np.zeros(data_cfg[name].shape, dtype=data_cfg[name].dtype)\n a[:len(obs[0][name])] = obs[0][name]\n obs[0][name] = a.copy()\n rp[name] = np.array(obs[0][name]).astype(data_cfg[name].dtype)\n\n return rp\n\n @staticmethod\n def construct_obs_spec(data_cfg, cfg):\n rp = {}\n\n for name in cfg:\n rp[name] = data_cfg[name]\n\n return rp\n\n def __init__(self, propertie_list=[], scenario='11_vs_11_kaggle'):\n super().__init__()\n self._data_dic = data_dic\n\n if (len(propertie_list) == 0):\n propertie_list = ['right_team_direction', 'right_team_tired_factor',\n 'left_team_roles', 'left_team_direction', 'ball_direction',\n 'ball_owned_player', 'right_team_yellow_card', 'ball',\n 'right_team', 'steps_left', 'ball_rotation',\n 'ball_owned_team', 'game_mode', 'left_team_yellow_card',\n 'left_team', 'right_team_roles', 'right_team_active',\n 'left_team_active', 'left_team_tired_factor', 'score',\n 'designated', 'active', 'sticky_actions']\n\n self.propertie_list = propertie_list\n\n self.env = football_env.create_environment(\n env_name=scenario,\n stacked=False,\n representation='raw',\n rewards='scoring, checkpoints',\n write_goal_dumps=False,\n write_full_episode_dumps=False,\n render=False,\n write_video=False,\n dump_frequency=1,\n logdir='./',\n extra_players=None,\n number_of_left_players_agent_controls=1,\n number_of_right_players_agent_controls=0\n )\n\n self._state = self.convert_observation_to_tf(self.env.reset(), self._data_dic, self.propertie_list)\n\n self._action_spec = array_spec.BoundedArraySpec(\n shape=(), dtype=np.int32, minimum=0, maximum=18, name='action')\n\n # representation of the enviroment: price + open position state\n self._observation_spec = self.construct_obs_spec(self._data_dic, self.propertie_list)\n\n # used for idndication of the end of episode\n self._episode_ended = False\n pass\n\n def action_spec(self):\n return self._action_spec\n\n def observation_spec(self):\n return self._observation_spec\n\n def _reset(self):\n self._episode_ended = False\n self._state = self.convert_observation_to_tf(self.env.reset(), self._data_dic, self.propertie_list)\n return ts.restart(self._state)\n\n def _step(self, action):\n\n if self._episode_ended:\n # The last action ended the episode. Ignore the current action and start\n # a new episode.\n return self.reset()\n\n self._state, reward, self._episode_ended, info = self.env.step(action)\n self._state = self.convert_observation_to_tf(self._state, self._data_dic, self.propertie_list)\n\n if self._episode_ended:\n return ts.termination(self._state, reward)\n else:\n return ts.transition(self._state, reward=reward, discount=1.0)\n\n\n"
] |
[
[
"numpy.squeeze",
"numpy.array",
"numpy.zeros"
]
] |
JoaoLages/relational-transformers
|
[
"7bf401afe8e2b12531dc75f5402c2f9ffd99aaf7"
] |
[
"src/ratransformers/t5.py"
] |
[
"from transformers.models.t5.modeling_t5 import T5Attention\nimport torch.nn as nn\nimport torch\n\n\nclass T5RelationalAttention(T5Attention):\n def __init__(self, *args, num_relation_kinds: int, use_same_relation_kv_emb: bool = True, **kwargs):\n super().__init__(*args, **kwargs)\n self.num_relation_kinds = num_relation_kinds\n self.relation_k_emb = nn.Embedding(num_relation_kinds + 1, self.inner_dim // self.n_heads, padding_idx=0)\n if use_same_relation_kv_emb:\n self.relation_v_emb = self.relation_k_emb\n else:\n self.relation_v_emb = nn.Embedding(num_relation_kinds + 1, self.inner_dim // self.n_heads, padding_idx=0)\n self.input_relation_kinds = [] # will hold (batch, seq_length, seq_length, num_relation_kinds)\n\n def forward(\n self,\n hidden_states,\n mask=None,\n key_value_states=None,\n position_bias=None,\n past_key_value=None,\n layer_head_mask=None,\n query_length=None,\n use_cache=False,\n output_attentions=False,\n ):\n \"\"\"\n Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).\n \"\"\"\n\n # Input is (batch_size, seq_length, dim)\n # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length)\n # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head)\n batch_size, seq_length = hidden_states.shape[:2]\n\n assert len(self.input_relation_kinds) == 1\n input_relation_kinds = self.input_relation_kinds[0]\n assert input_relation_kinds.shape == (batch_size, seq_length, seq_length)\n\n # (batch_size, seq_length, seq_length, self.num_relation_kinds, self.inner_dim // num_relation_kinds)\n relation_k_embeds = self.relation_k_emb(input_relation_kinds)\n relation_v_embeds = self.relation_v_emb(input_relation_kinds)\n\n real_seq_length = seq_length\n\n if past_key_value is not None:\n assert (\n len(past_key_value) == 2\n ), f\"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)} past states\"\n real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length\n\n key_length = real_seq_length if key_value_states is None else key_value_states.shape[1]\n\n def shape(states):\n \"\"\"projection\"\"\"\n return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)\n\n def unshape(states):\n \"\"\"reshape\"\"\"\n return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim)\n\n def project(hidden_states, proj_layer, key_value_states, past_key_value):\n \"\"\"projects hidden states correctly to key/query states\"\"\"\n if key_value_states is None:\n # self-attn\n # (batch_size, n_heads, seq_length, dim_per_head)\n hidden_states = shape(proj_layer(hidden_states))\n elif past_key_value is None:\n # cross-attn\n # (batch_size, n_heads, seq_length, dim_per_head)\n hidden_states = shape(proj_layer(key_value_states))\n\n if past_key_value is not None:\n if key_value_states is None:\n # self-attn\n # (batch_size, n_heads, key_length, dim_per_head)\n hidden_states = torch.cat([past_key_value, hidden_states], dim=2)\n else:\n # cross-attn\n hidden_states = past_key_value\n return hidden_states\n\n # get query states\n query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head)\n\n # get key/value states\n key_states = project(\n hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None\n )\n value_states = project(\n hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None\n )\n\n # compute scores\n scores = torch.matmul(\n query_states, key_states.transpose(3, 2)\n ) # equivalent of torch.einsum(\"bnqd,bnkd->bnqk\", query_states, key_states), compatible with onnx op>9\n\n # q_t is [batch, seq_length, n_heads, dim_per_head]\n q_t = query_states.permute(0, 2, 1, 3)\n\n # r_t is [batch, seq_length, dim_per_head, seq_length]\n r_t = relation_k_embeds.transpose(-2, -1)\n\n q_tr_t_matmul = torch.matmul(q_t, r_t) # [batch, seq_length, n_heads, seq_length]\n q_tr_tmatmul_t = q_tr_t_matmul.permute(0, 2, 1, 3) # [batch, n_heads, seq_length, seq_length]\n\n # Add to scores\n scores += q_tr_tmatmul_t\n\n if position_bias is None:\n if not self.has_relative_attention_bias:\n position_bias = torch.zeros(\n (1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype\n )\n if self.gradient_checkpointing and self.training:\n position_bias.requires_grad = True\n else:\n position_bias = self.compute_bias(real_seq_length, key_length)\n\n # if key and values are already calculated\n # we want only the last query position bias\n if past_key_value is not None:\n position_bias = position_bias[:, :, -hidden_states.size(1) :, :]\n\n if mask is not None:\n position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length)\n\n scores += position_bias\n attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(\n scores\n ) # (batch_size, n_heads, seq_length, key_length)\n attn_weights = nn.functional.dropout(\n attn_weights, p=self.dropout, training=self.training\n ) # (batch_size, n_heads, seq_length, key_length)\n\n # Mask heads if we want to\n if layer_head_mask is not None:\n attn_weights = attn_weights * layer_head_mask\n\n # [batch, n_heads, seq_length, seq_length]\n wv_matmul = torch.matmul(attn_weights, value_states)\n\n # w_t is [batch, seq_length, n_heads, seq_length]\n w_t = attn_weights.permute(0, 2, 1, 3)\n\n # [batch, seq_length, n_heads, seq_length]\n w_tr_matmul = torch.matmul(w_t, relation_v_embeds)\n\n attn_output = unshape(wv_matmul + w_tr_matmul.permute(0, 2, 1, 3)) # (batch_size, seq_length, dim)\n attn_output = self.o(attn_output)\n\n present_key_value_state = (key_states, value_states) if (self.is_decoder and use_cache) else None\n outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)\n\n if output_attentions:\n outputs = outputs + (attn_weights,)\n return outputs\n"
] |
[
[
"torch.zeros",
"torch.cat",
"torch.nn.functional.dropout",
"torch.nn.Embedding",
"torch.matmul"
]
] |
Ellon-M/visualizations
|
[
"5a42c213ea8fd0597e2035778d9ae6460eb9e821",
"5a42c213ea8fd0597e2035778d9ae6460eb9e821",
"5a42c213ea8fd0597e2035778d9ae6460eb9e821"
] |
[
"src/hextiles/hextiles.py",
"src/area-charts/stackedarea.py",
"src/dendrograms/dendrogram.py"
] |
[
"# hextiles\n\n# holoviews\nimport numpy as np\nimport holoviews as hv\nfrom holoviews import opts\nfrom holoviews import dim\nhv.extension('bokeh')\n\nnp.random.seed(44)\nhex_tiles = hv.HexTiles(np.random.randn(100000, 2))\nhex_tiles.opts(opts.HexTiles(width=500, height=400, tools=['hover'], colorbar=True))\n",
"#Stacked area plots -\n#These work in the same way as area graphs do, but they use multiple data series that start each point \n#from the point left by the previous data series. The entire graph represents the total of all data plotted. \n#They are useful, overall, for comparing multiple variables changing over an interval.\n\n#It can be said that they represent the evolution of values of similar groups over a period of time on the same graphic.\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.cm as cmp\nimport matplotlib.colors as cl\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mp\nimport plotly.graph_objects as go\nimport plotly.express as px\nfrom scipy import stats\n\n#Matplotlib representation of a stacked area plot\n\narea_labels = ['Breaststroke', 'Backstroke', 'Butterfly', 'Medley']\n\nfig, ax = plt.subplots(figsize=(10, 7))\n\n# patches for the legend\nbreast_patch = mp.Patch( color ='dodgerblue',\n label ='Breaststroke')\nback_patch = mp.Patch(color ='firebrick',\n label ='Backstroke')\nbutter_patch = mp.Patch(color ='springgreen',\n label ='Butterfly')\nmedley_patch = mp.Patch(color ='rebeccapurple',\n label ='Medley')\n\nplt.title(\"MEN'S WEIGHT VARIATION \\nFROM 1950 W.R.T DIFFRENT STROKES\", fontsize=20,\n color= 'rebeccapurple',\n pad=20,\n loc=\"center\")\nplt.xlabel('Games')\nplt.ylabel('Weight (in kg)')\nax.stackplot(x, y1, y2, y3, y4, colors=['dodgerblue', 'firebrick', 'springgreen', 'rebeccapurple'])\nbox = ax.get_position()\nax.set_position([box.x0, box.y0 + box.height * 0.1,\n box.width, box.height * 0.9])\nplt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.12), ncol=5, fancybox=True, shadow=True, handles = [breast_patch, back_patch, butter_patch, medley_patch])\nplt.show()\n\n\n#Plotly express representation of the above chart\n\nfig = px.area(strokes, x=\"Year\", y=\"Weight\",\n color=\"Event\", title=\"MEN'S WEIGHT VARIATION \\nFROM 1950 W.R.T DIFFRENT STROKES\"\n\t )\nfig.update_layout(\n font_color=\"rebeccapurple\",\n title_font_family=\"Times New Roman\",\n title_font_color=\"navy\",\n legend_title_font_color=\"rebeccapurple\"\n)\nfig.update_xaxes(title_font_family=\"Arial\")\nfig.show()\n\n\n\n# Plotly - graph objects\n\nfig = go.Figure()\nfig.add_trace(go.Scatter(\n x=strokes['Year'], y=y1,\n hoverinfo='x+y',\n mode='lines',\n line=dict(width=0.5, color='rgb(131, 90, 241)'),\n stackgroup='one',\n name=\"Breaststroke\"\n))\nfig.add_trace(go.Scatter(\n x=strokes['Year'], y=y2,\n hoverinfo='x+y',\n mode='lines',\n line=dict(width=0.5, color='rgb(111, 231, 219)'),\n stackgroup='one',\n name=\"Backstroke\",\n))\nfig.add_trace(go.Scatter(\n x=strokes['Year'], y=y3,\n hoverinfo='x+y',\n mode='lines',\n line=dict(width=0.5, color='rgb(184, 247, 212)'),\n stackgroup='one',\n name=\"Butterfly\",\n))\nfig.add_trace(go.Scatter(\n x=strokes['Year'], y=y4,\n hoverinfo='x+y',\n mode='lines',\n line=dict(width=0.5, color='rgb(144, 200, 272)'),\n stackgroup='one',\n name=\"Medley\"\n))\n\nfig.update_layout( title=\"MEN'S WEIGHT VARIATION \\nFROM 1950 W.R.T DIFFRENT STROKES\",\n xaxis_title=\"Games\",\n yaxis_title=\"Weight (kg)\",\n legend_title=\"Strokes\",\n font=dict(\n# family=\"Courier New, monospace\",\n size=18,\n color=\"RebeccaPurple\"\n ))\n\nfig.show()\n\n\n\n\n",
"# dendrogram\n\n\nimport scipy.cluster.hierarchy as shc\n\n# Import Data\ndf = pd.read_csv('https://raw.githubusercontent.com/selva86/datasets/master/USArrests.csv')\n\n# Plot\nplt.figure(figsize=(16, 10), dpi= 80) \nplt.title(\"USArrests Dendograms\", fontsize=22) \ndend = shc.dendrogram(shc.linkage(df[['Murder', 'Assault', 'UrbanPop', 'Rape']], method='ward'), labels=df.State.values, color_threshold=100) \nplt.xticks(fontsize=12)\nplt.show()\n"
] |
[
[
"numpy.random.randn",
"numpy.random.seed"
],
[
"matplotlib.pyplot.legend",
"matplotlib.patches.Patch",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"scipy.cluster.hierarchy.linkage"
]
] |
Pang1987/Python-code-PIGP-PINN
|
[
"e22f54a3ec4ce1644386a7a667fb56e6f99ab51c"
] |
[
"github_code/Discrete_GP_inv_2D_NS_equ_backward_Euler1.py"
] |
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Sep 17 12:28:19 2019\r\n\r\n@author: gpang\r\n\"\"\"\r\n\r\n\r\n\r\n\r\n\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport time\r\nfrom SALib.sample import sobol_sequence\r\nimport scipy as sci\r\nimport scipy.io as sio\r\n\r\n\r\nclass one_GP:\r\n \r\n def __init__(self):\r\n pass\r\n \r\n \r\n def model(self, dataset, dt,previous_cov_mat, un_u, un_f, un_t, kernel_type = 'SE', is_noise = True): \r\n self.xu_train = dataset['xu_train']\r\n self.yu_train = dataset['yu_train']\r\n self.xf_train = dataset['xf_train']\r\n self.yf_train = dataset['yf_train']\r\n self.xu_test = dataset['xu_test']\r\n self.yu_test = dataset['yu_test']\r\n self.xf_test = dataset['xf_test']\r\n self.yf_test = dataset['yf_test'] \r\n self.un_u = un_u\r\n self.un_f = un_f\r\n self.un_t = un_t\r\n self.kernel_type = kernel_type\r\n self.dt = dt\r\n\r\n self.previous_cov_mat=previous_cov_mat\r\n self.dim = self.xf_train.shape[1]\r\n self.is_noise = is_noise\r\n\r\n \r\n \r\n def ku1u1(self, X, Y, t1, t2, sigf1, diag=False):\r\n x1 = X[:,0:1]\r\n y1 = X[:,1:2]\r\n x2 = Y[:,0:1].T\r\n y2 = Y[:,1:2].T\r\n if diag==False:\r\n k = tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(t2 + y1 - y2)*(t2 - y1 + y2)/t2**4 \r\n return sigf1**2*k\r\n else:\r\n k = tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(t2 + y1 - y2)*(t2 - y1 + y2)/t2**4 \r\n return sigf1**2*tf.reshape(tf.diag_part(k),(-1,1))\r\n \r\n \r\n def ku1v1(self, X, Y, t1, t2, sigf1, diag=False):\r\n x1 = X[:,0:1]\r\n y1 = X[:,1:2]\r\n x2 = Y[:,0:1].T\r\n y2 = Y[:,1:2].T\r\n if diag==False:\r\n k = -(x1 - x2)*(y1 - y2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))/(t1**2*t2**2)\r\n return -sigf1**2*k\r\n else:\r\n k = -(x1 - x2)*(y1 - y2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))/(t1**2*t2**2)\r\n return -sigf1**2*tf.reshape(tf.diag_part(k),(-1,1)) \r\n\r\n\r\n def ku1u0(self, X, Y, t1, t2, sigf1,un_x, un_y, diag=False):\r\n \r\n x1 = X[:,0:1]\r\n y1 = X[:,1:2]\r\n x2 = Y[:,0:1].T\r\n y2 = Y[:,1:2].T \r\n \r\n unx = np.ndarray.flatten(un_x[0])\r\n vnx = np.ndarray.flatten(un_x[1])\r\n\r\n uny = np.ndarray.flatten(un_y[0])\r\n vny = np.ndarray.flatten(un_y[1])\r\n\r\n unx = tf.diag(unx)\r\n vnx = tf.diag(vnx)\r\n \r\n uny = tf.diag(uny) \r\n vny = tf.diag(vny) \r\n \r\n k1 = tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(t2 + y1 - y2)*(t2 - y1 + y2)/t2**4\r\n \r\n k2 = (x1 - x2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(t2 + y1 - y2)*(t2 - y1 + y2)/(t1**2*t2**4)\r\n \r\n k3 = (y1 - y2)*(3*t2**2 - y1**2 + 2*y1*y2 - y2**2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))/t2**6\r\n \r\n k4 = -3*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(((t1 + x1 - x2)*(t1 - x1 + x2)*t2**6)/3 + (t1**4 - (y1 - y2)**2*t1**2/3 + (y1 - y2)**2*(x1 - x2)**2/3)*t2**4 - 2*t1**4*(y1 - y2)**2*t2**2 + t1**4*(y1 - y2)**4/3)/(t1**4*t2**8)\r\n \r\n if diag==False:\r\n k = k1 + self.lambda1 * self.dt * tf.matmul(k2, uny) \\\r\n + self.lambda1*self.dt*tf.matmul(k3, vny) - self.lambda2*self.dt * k4\r\n return sigf1**2*k\r\n else:\r\n k = k1 + self.lambda1 * self.dt * tf.matmul(k2, uny) \\\r\n + self.lambda1*self.dt*tf.matmul(k3, vny) - self.lambda2*self.dt * k4\r\n return sigf1**2*tf.reshape(tf.diag_part(k),(-1,1)) \r\n \r\n\r\n def ku1v0(self, X, Y, t1, t2, sigf1,un_x, un_y, diag=False):\r\n \r\n x1 = X[:,0:1]\r\n y1 = X[:,1:2]\r\n x2 = Y[:,0:1].T\r\n y2 = Y[:,1:2].T \r\n \r\n unx = np.ndarray.flatten(un_x[0])\r\n vnx = np.ndarray.flatten(un_x[1])\r\n\r\n uny = np.ndarray.flatten(un_y[0])\r\n vny = np.ndarray.flatten(un_y[1])\r\n\r\n unx = tf.diag(unx)\r\n vnx = tf.diag(vnx)\r\n \r\n uny = tf.diag(uny) \r\n vny = tf.diag(vny) \r\n \r\n k1 = -(x1 - x2)*(y1 - y2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))/(t1**2*t2**2)\r\n \r\n k2 = (y1 - y2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(t1 + x1 - x2)*(t1 - x1 + x2)/(t1**4*t2**2)\r\n \r\n k3 = (x1 - x2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(t2 + y1 - y2)*(t2 - y1 + y2)/(t1**2*t2**4)\r\n \r\n k4 = 3*(y1 - y2)*((t2**2 - (y1 - y2)**2/3)*t1**4 + t1**2*t2**4 - (x1 - x2)**2*t2**4/3)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(x1 - x2)/(t1**6*t2**6)\r\n \r\n if diag==False:\r\n k = k1 + self.lambda1 * self.dt * tf.matmul(k2, uny) \\\r\n + self.lambda1*self.dt*tf.matmul(k3, vny) - self.lambda2*self.dt * k4\r\n return -sigf1**2*k\r\n else:\r\n k = k1 + self.lambda1 * self.dt * tf.matmul(k2, uny) \\\r\n + self.lambda1*self.dt*tf.matmul(k3, vny) - self.lambda2*self.dt * k4\r\n return -sigf1**2*tf.reshape(tf.diag_part(k),(-1,1)) \r\n\r\n def kv1v1(self, X, Y, t1, t2, sigf1,diag=False):\r\n x1 = X[:,0:1]\r\n y1 = X[:,1:2]\r\n x2 = Y[:,0:1].T\r\n y2 = Y[:,1:2].T\r\n if diag==False:\r\n k = tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(t1 + x1 - x2)*(t1 - x1 + x2)/t1**4\r\n return sigf1**2*k\r\n else:\r\n k = tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(t1 + x1 - x2)*(t1 - x1 + x2)/t1**4\r\n return sigf1**2*tf.reshape(tf.diag_part(k),(-1,1)) \r\n\r\n\r\n def kv1u0(self, X, Y, t1, t2, sigf1,un_x, un_y, diag=False):\r\n \r\n x1 = X[:,0:1]\r\n y1 = X[:,1:2]\r\n x2 = Y[:,0:1].T\r\n y2 = Y[:,1:2].T \r\n \r\n unx = np.ndarray.flatten(un_x[0])\r\n vnx = np.ndarray.flatten(un_x[1])\r\n\r\n uny = np.ndarray.flatten(un_y[0])\r\n vny = np.ndarray.flatten(un_y[1])\r\n\r\n unx = tf.diag(unx)\r\n vnx = tf.diag(vnx)\r\n \r\n uny = tf.diag(uny) \r\n vny = tf.diag(vny) \r\n \r\n k1 = -(x1 - x2)*(y1 - y2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))/(t1**2*t2**2)\r\n \r\n k2 = (y1 - y2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(t1 + x1 - x2)*(t1 - x1 + x2)/(t1**4*t2**2)\r\n \r\n k3 = (x1 - x2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(t2 + y1 - y2)*(t2 - y1 + y2)/(t1**2*t2**4)\r\n \r\n k4 = 3*(y1 - y2)*((t2**2 - (y1 - y2)**2/3)*t1**4 + t1**2*t2**4 - (x1 - x2)**2*t2**4/3)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(x1 - x2)/(t1**6*t2**6)\r\n if diag==False:\r\n k = k1 + self.lambda1 * self.dt * tf.matmul(k2, uny) \\\r\n + self.lambda1*self.dt*tf.matmul(k3, vny) - self.lambda2*self.dt * k4\r\n return -sigf1**2*k\r\n else:\r\n k = k1 + self.lambda1 * self.dt * tf.matmul(k2,uny) \\\r\n + self.lambda1*self.dt*tf.matmul(k3, vny) - self.lambda2*self.dt * k4\r\n return -sigf1**2*tf.reshape(tf.diag_part(k),(-1,1)) \r\n\r\n\r\n def kv1v0(self, X, Y, t1, t2, sigf1,un_x, un_y, diag=False):\r\n \r\n x1 = X[:,0:1]\r\n y1 = X[:,1:2]\r\n x2 = Y[:,0:1].T\r\n y2 = Y[:,1:2].T \r\n \r\n unx = np.ndarray.flatten(un_x[0])\r\n vnx = np.ndarray.flatten(un_x[1])\r\n\r\n uny = np.ndarray.flatten(un_y[0])\r\n vny = np.ndarray.flatten(un_y[1])\r\n\r\n unx = tf.diag(unx)\r\n vnx = tf.diag(vnx)\r\n \r\n uny = tf.diag(uny) \r\n vny = tf.diag(vny) \r\n \r\n \r\n k1 = tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(t1 + x1 - x2)*(t1 - x1 + x2)/t1**4\r\n \r\n k2 = (3*t1**2 - x1**2 + 2*x1*x2 - x2**2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(x1 - x2)/t1**6\r\n \r\n k3 = (y1 - y2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(t1 + x1 - x2)*(t1 - x1 + x2)/(t1**4*t2**2)\r\n \r\n k4 = -((t2 + y1 - y2)*(t2 - y1 + y2)*t1**6 + (3*t2**4 - (x1 - x2)**2*t2**2 + (y1 - y2)**2*(x1 - x2)**2)*t1**4 - 6*t2**4*(x1 - x2)**2*t1**2 + t2**4*(x1 - x2)**4)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))/(t1**8*t2**4)\r\n \r\n\r\n if diag==False:\r\n k = k1 + self.lambda1 * self.dt * tf.matmul(k2, uny) \\\r\n + self.lambda1*self.dt*tf.matmul(k3, vny) - self.lambda2*self.dt * k4\r\n return sigf1**2*k\r\n else:\r\n k = k1 + self.lambda1 * self.dt * tf.matmul(k2, uny) \\\r\n + self.lambda1*self.dt*tf.matmul(k3, vny) - self.lambda2*self.dt * k4\r\n return sigf1**2*tf.reshape(tf.diag_part(k),(-1,1)) \r\n\r\n\r\n\r\n\r\n def ku0u0(self, X, Y, t1, t2, s1, s2, sigf1,sigf2,un_x, un_y, diag=False):\r\n\r\n# sess1 = tf.Session()\r\n# sess1.run(tf.global_variables_initializer())\r\n\r\n x1 = X[:,0:1]\r\n y1 = X[:,1:2]\r\n x2 = Y[:,0:1].T\r\n y2 = Y[:,1:2].T \r\n \r\n unx = np.ndarray.flatten(un_x[0])\r\n vnx = np.ndarray.flatten(un_x[1])\r\n\r\n uny = np.ndarray.flatten(un_y[0])\r\n vny = np.ndarray.flatten(un_y[1])\r\n\r\n unx = tf.diag(unx)\r\n vnx = tf.diag(vnx)\r\n \r\n uny = tf.diag(uny) \r\n vny = tf.diag(vny) \r\n\r\n k1 = tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(t2 + y1 - y2)*(t2 - y1 + y2)/t2**4\r\n k2 = -(x1 - x2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(t2 + y1 - y2)*(t2 - y1 + y2)/(t1**2*t2**4)\r\n k3 = -(y1 - y2)*(3*t2**2 - y1**2 + 2*y1*y2 - y2**2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))/t2**6\r\n k4 = -3*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(((t1 + x1 - x2)*(t1 - x1 + x2)*t2**6)/3 + (t1**4 - (y1 - y2)**2*t1**2/3 + (y1 - y2)**2*(x1 - x2)**2/3)*t2**4 - 2*t1**4*(y1 - y2)**2*t2**2 + t1**4*(y1 - y2)**4/3)/(t1**4*t2**8)\r\n k5 = (x1 - x2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(t2 + y1 - y2)*(t2 - y1 + y2)/(t1**2*t2**4)\r\n k6 = tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(t2 + y1 - y2)*(t2 - y1 + y2)*(t1 + x1 - x2)*(t1 - x1 + x2)/(t1**4*t2**4)\r\n k7 = -(y1 - y2)*(3*t2**2 - y1**2 + 2*y1*y2 - y2**2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(x1 - x2)/(t1**2*t2**6)\r\n k8 = -3*((t1**2 - (x1 - x2)**2/3)*t2**6 + (t1**4 - (y1 - y2)**2*t1**2 + (y1 - y2)**2*(x1 - x2)**2/3)*t2**4 - 2*t1**4*(y1 - y2)**2*t2**2 + t1**4*(y1 - y2)**4/3)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(x1 - x2)/(t1**6*t2**8)\r\n k9 = (y1 - y2)*(3*t2**2 - y1**2 + 2*y1*y2 - y2**2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))/t2**6\r\n k10 = -(y1 - y2)*(3*t2**2 - y1**2 + 2*y1*y2 - y2**2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(x1 - x2)/(t1**2*t2**6)\r\n k11 = tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(3*t2**4 - 6*t2**2*y1**2 + 12*t2**2*y1*y2 - 6*t2**2*y2**2 + y1**4 - 4*y1**3*y2 + 6*y1**2*y2**2 - 4*y1*y2**3 + y2**4)/t2**8\r\n k12 = -15*(y1 - y2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(((t1 + x1 - x2)*(t1 - x1 + x2)*t2**6)/5 + (-((t1 + x1 - x2)*(t1 - x1 + x2)*y1**2)/15 + (2*y2*(t1 + x1 - x2)*(t1 - x1 + x2)*y1)/15 - ((t1 + x1 - x2)*(t1 - x1 + x2)*y2**2)/15 + t1**4)*t2**4 - (2*t1**4*(y1 - y2)**2*t2**2)/3 + t1**4*(y1 - y2)**4/15)/(t1**4*t2**10)\r\n k13 = -3*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(((t1 + x1 - x2)*(t1 - x1 + x2)*t2**6)/3 + (t1**4 - (y1 - y2)**2*t1**2/3 + (y1 - y2)**2*(x1 - x2)**2/3)*t2**4 - 2*t1**4*(y1 - y2)**2*t2**2 + t1**4*(y1 - y2)**4/3)/(t1**4*t2**8)\r\n k14 = 3*((t1**2 - (x1 - x2)**2/3)*t2**6 + (t1**4 - (y1 - y2)**2*t1**2 + (y1 - y2)**2*(x1 - x2)**2/3)*t2**4 - 2*t1**4*(y1 - y2)**2*t2**2 + t1**4*(y1 - y2)**4/3)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(x1 - x2)/(t1**6*t2**8)\r\n k15 = 15*(y1 - y2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(((t1 + x1 - x2)*(t1 - x1 + x2)*t2**6)/5 + (-((t1 + x1 - x2)*(t1 - x1 + x2)*y1**2)/15 + (2*y2*(t1 + x1 - x2)*(t1 - x1 + x2)*y1)/15 - ((t1 + x1 - x2)*(t1 - x1 + x2)*y2**2)/15 + t1**4)*t2**4 - (2*t1**4*(y1 - y2)**2*t2**2)/3 + t1**4*(y1 - y2)**4/15)/(t1**4*t2**10)\r\n k16 = 15*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*((t1**4/5 - (2*(x1 - x2)**2*t1**2)/5 + (x1 - x2)**4/15)*t2**10 + ((2*t1**6)/5 + (-y1**2/5 + (2*y1*y2)/5 - y2**2/5 - (2*(x1 - x2)**2)/5)*t1**4 + (2*(y1 - y2)**2*(x1 - x2)**2*t1**2)/5 - (y1 - y2)**2*(x1 - x2)**4/15)*t2**8 + (t1**4 - (4*(y1 - y2)**2*t1**2)/5 + (4*(y1 - y2)**2*(x1 - x2)**2)/5)*t1**4*t2**6 - 3*(y1 - y2)**2*(t1**4 - (2*(y1 - y2)**2*t1**2)/45 + (2*(y1 - y2)**2*(x1 - x2)**2)/45)*t1**4*t2**4 + t1**8*(y1 - y2)**4*t2**2 - t1**8*(y1 - y2)**6/15)/(t1**8*t2**12)\r\n \r\n kpx2x1 = tf.exp((-(y1 - y2)**2*s1**2 - (x1 - x2)**2*s2**2)/(2*s1**2*s2**2))*(s1 + x1 - x2)*(s1 - x1 + x2)/s1**4 \r\n\r\n if diag==False:\r\n k = k1 + self.lambda1 * self.dt * tf.matmul(unx, k2) \\\r\n + self.lambda1*self.dt*tf.matmul(vnx, k3) - self.lambda2*self.dt * k4 \\\r\n +self.lambda1*self.dt*tf.matmul(k5, uny) + self.lambda1**2*self.dt**2*tf.matmul(tf.matmul(unx, k6), uny) \\\r\n +self.lambda1**2*self.dt**2*tf.matmul(tf.matmul(vnx,k7),uny)-self.lambda2*self.dt**2*self.lambda1*tf.matmul(k8, uny) \\\r\n +self.lambda1*self.dt*tf.matmul(k9, vny) + self.lambda1**2*self.dt**2*tf.matmul(tf.matmul(unx, k10), vny) \\\r\n +self.lambda1**2*self.dt**2*tf.matmul(tf.matmul(vnx, k11),vny) - self.dt**2*self.lambda1*self.lambda2*tf.matmul(k12, vny)\\\r\n -self.dt*self.lambda2*k13 - self.lambda1*self.dt**2*self.lambda2*tf.matmul(unx, k14) \\\r\n -self.lambda1*self.lambda2*self.dt**2*tf.matmul(vnx, k15) + self.dt**2 * self.lambda2**2*k16\r\n k = sigf1**2 * k + sigf2**2*self.dt**2 * kpx2x1\r\n\r\n# aaa = sess1.run(k)\r\n# print (np.max(np.abs(aaa-aaa.T)))\r\n# ssf=3\r\n\r\n return k\r\n else:\r\n k = k1 + self.lambda1 * self.dt * tf.matmul(unx, k2) \\\r\n + self.lambda1*self.dt*tf.matmul(vnx, k3) - self.lambda2*self.dt * k4 \\\r\n +self.lambda1*self.dt*tf.matmul(k5, uny) + self.lambda1**2*self.dt**2*tf.matmul(tf.matmul(unx, k6), uny) \\\r\n +self.lambda1**2*self.dt**2*tf.matmul(tf.matmul(vnx,k7),uny)-self.lambda2*self.dt**2*self.lambda1*tf.matmul(k8, uny) \\\r\n +self.lambda1*self.dt*tf.matmul(k9, vny) + self.lambda1**2*self.dt**2*tf.matmul(tf.matmul(unx, k10), vny) \\\r\n +self.lambda1**2*self.dt**2*tf.matmul(tf.matmul(vnx, k11),vny) - self.dt**2*self.lambda1*self.lambda2*tf.matmul(k12, vny)\\\r\n -self.dt*self.lambda2*k13 - self.lambda1*self.dt**2*self.lambda2*tf.matmul(unx, k14) \\\r\n -self.lambda1*self.lambda2*self.dt**2*tf.matmul(vnx, k15) + self.dt**2 * self.lambda2**2*k16 \r\n k = sigf1**2*k+sigf2**2*self.dt**2 * kpx2x1\r\n\r\n\r\n\r\n return tf.reshape(tf.diag_part(k),(-1,1)) \r\n\r\n\r\n\r\n def ku0v0(self, X, Y, t1, t2, s1, s2, sigf1,sigf2,un_x, un_y, diag=False):\r\n x1 = X[:,0:1]\r\n y1 = X[:,1:2]\r\n x2 = Y[:,0:1].T\r\n y2 = Y[:,1:2].T \r\n \r\n unx = np.ndarray.flatten(un_x[0])\r\n vnx = np.ndarray.flatten(un_x[1])\r\n\r\n uny = np.ndarray.flatten(un_y[0])\r\n vny = np.ndarray.flatten(un_y[1])\r\n\r\n unx = tf.diag(unx)\r\n vnx = tf.diag(vnx)\r\n \r\n uny = tf.diag(uny) \r\n vny = tf.diag(vny) \r\n\r\n k1 = -(x1 - x2)*(y1 - y2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))/(t1**2*t2**2)\r\n k2 = -(y1 - y2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(t1 + x1 - x2)*(t1 - x1 + x2)/(t1**4*t2**2)\r\n k3 = -(x1 - x2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(t2 + y1 - y2)*(t2 - y1 + y2)/(t1**2*t2**4)\r\n k4 = 3*(y1 - y2)*((t2**2 - (y1 - y2)**2/3)*t1**4 + t1**2*t2**4 - (x1 - x2)**2*t2**4/3)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(x1 - x2)/(t1**6*t2**6)\r\n k5 = (y1 - y2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(t1 + x1 - x2)*(t1 - x1 + x2)/(t1**4*t2**2)\r\n k6 = -(y1 - y2)*(x1 - x2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(3*t1**2 - x1**2 + 2*x1*x2 - x2**2)/(t1**6*t2**2)\r\n k7 = tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(t2 + y1 - y2)*(t2 - y1 + y2)*(t1 + x1 - x2)*(t1 - x1 + x2)/(t1**4*t2**4)\r\n k8 = -3*(y1 - y2)*((t2**2 - (y1 - y2)**2/3)*t1**6 + (t2**4 - (x1 - x2)**2*t2**2 + (y1 - y2)**2*(x1 - x2)**2/3)*t1**4 - 2*t2**4*(x1 - x2)**2*t1**2 + t2**4*(x1 - x2)**4/3)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))/(t1**8*t2**6)\r\n k9 = (x1 - x2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(t2 + y1 - y2)*(t2 - y1 + y2)/(t1**2*t2**4)\r\n k10 = tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(t2 + y1 - y2)*(t2 - y1 + y2)*(t1 + x1 - x2)*(t1 - x1 + x2)/(t1**4*t2**4)\r\n k11 = -(y1 - y2)*(3*t2**2 - y1**2 + 2*y1*y2 - y2**2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(x1 - x2)/(t1**2*t2**6)\r\n k12 = -3*((t1**2 - (x1 - x2)**2/3)*t2**6 + (t1**4 - (y1 - y2)**2*t1**2 + (y1 - y2)**2*(x1 - x2)**2/3)*t2**4 - 2*t1**4*(y1 - y2)**2*t2**2 + t1**4*(y1 - y2)**4/3)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(x1 - x2)/(t1**6*t2**8)\r\n k13 = 3*(y1 - y2)*((t2**2 - (y1 - y2)**2/3)*t1**4 + t1**2*t2**4 - (x1 - x2)**2*t2**4/3)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(x1 - x2)/(t1**6*t2**6)\r\n k14 = 3*(y1 - y2)*((t2**2 - (y1 - y2)**2/3)*t1**6 + (t2**4 - (x1 - x2)**2*t2**2 + (y1 - y2)**2*(x1 - x2)**2/3)*t1**4 - 2*t2**4*(x1 - x2)**2*t1**2 + t2**4*(x1 - x2)**4/3)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))/(t1**8*t2**6)\r\n k15 = 3*((t1**2 - (x1 - x2)**2/3)*t2**6 + (t1**4 - (y1 - y2)**2*t1**2 + (y1 - y2)**2*(x1 - x2)**2/3)*t2**4 - 2*t1**4*(y1 - y2)**2*t2**2 + t1**4*(y1 - y2)**4/3)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(x1 - x2)/(t1**6*t2**8)\r\n k16 = -15*(y1 - y2)*((t2**4 - (2*(y1 - y2)**2*t2**2)/3 + (y1 - y2)**4/15)*t1**8 + (6*t2**4*(t2**2 - (y1 - y2)**2/3)*t1**6)/5 + t2**4*(t2**4 - (2*(x1 - x2)**2*t2**2)/5 + (2*(y1 - y2)**2*(x1 - x2)**2)/15)*t1**4 - (2*(x1 - x2)**2*t1**2*t2**8)/3 + (x1 - x2)**4*t2**8/15)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(x1 - x2)/(t1**10*t2**10)\r\n \r\n kpy2x1 = -(y1 - y2)*(x1 - x2)*tf.exp((-(y1 - y2)**2*s1**2 - (x1 - x2)**2*s2**2)/(2*s1**2*s2**2))/(s1**2*s2**2)\r\n if diag==False:\r\n k = k1 + self.lambda1 * self.dt * tf.matmul(unx, k2) \\\r\n + self.lambda1*self.dt*tf.matmul(vnx, k3) - self.lambda2*self.dt * k4 \\\r\n +self.lambda1*self.dt*tf.matmul(k5, uny) + self.lambda1**2*self.dt**2*tf.matmul(tf.matmul(unx, k6), uny) \\\r\n +self.lambda1**2*self.dt**2*tf.matmul(tf.matmul(vnx,k7),uny)-self.lambda2*self.dt**2*self.lambda1*tf.matmul(k8, uny) \\\r\n +self.lambda1*self.dt*tf.matmul(k9, vny) + self.lambda1**2*self.dt**2*tf.matmul(tf.matmul(unx, k10), vny) \\\r\n +self.lambda1**2*self.dt**2*tf.matmul(tf.matmul(vnx, k11),vny) - self.dt**2*self.lambda1*self.lambda2*tf.matmul(k12, vny)\\\r\n -self.dt*self.lambda2*k13 - self.lambda1*self.dt**2*self.lambda2*tf.matmul(unx, k14) \\\r\n -self.lambda1*self.lambda2*self.dt**2*tf.matmul(vnx, k15) + self.dt**2 * self.lambda2**2*k16\r\n k = -sigf1**2*k +sigf2**2*self.dt**2 * kpy2x1\r\n return k\r\n else:\r\n k = k1 + self.lambda1 * self.dt * tf.matmul(unx, k2) \\\r\n + self.lambda1*self.dt*tf.matmul(vnx, k3) - self.lambda2*self.dt * k4 \\\r\n +self.lambda1*self.dt*tf.matmul(k5, uny) + self.lambda1**2*self.dt**2*tf.matmul(tf.matmul(unx, k6), uny) \\\r\n +self.lambda1**2*self.dt**2*tf.matmul(tf.matmul(vnx,k7),uny)-self.lambda2*self.dt**2*self.lambda1*tf.matmul(k8, uny) \\\r\n +self.lambda1*self.dt*tf.matmul(k9, vny) + self.lambda1**2*self.dt**2*tf.matmul(tf.matmul(unx, k10), vny) \\\r\n +self.lambda1**2*self.dt**2*tf.matmul(tf.matmul(vnx, k11),vny) - self.dt**2*self.lambda1*self.lambda2*tf.matmul(k12, vny)\\\r\n -self.dt*self.lambda2*k13 - self.lambda1*self.dt**2*self.lambda2*tf.matmul(unx, k14) \\\r\n -self.lambda1*self.lambda2*self.dt**2*tf.matmul(vnx, k15) + self.dt**2 * self.lambda2**2*k16 \r\n k = -sigf1**2*k + sigf2**2*self.dt**2 * kpy2x1\r\n return tf.reshape(tf.diag_part(k),(-1,1)) \r\n\r\n\r\n\r\n def kv0v0(self, X, Y, t1, t2, s1, s2, sigf1,sigf2,un_x, un_y, diag=False):\r\n\r\n\r\n\r\n x1 = X[:,0:1]\r\n y1 = X[:,1:2]\r\n x2 = Y[:,0:1].T\r\n y2 = Y[:,1:2].T \r\n\r\n \r\n \r\n unx = np.ndarray.flatten(un_x[0])\r\n vnx = np.ndarray.flatten(un_x[1])\r\n\r\n uny = np.ndarray.flatten(un_y[0])\r\n vny = np.ndarray.flatten(un_y[1])\r\n\r\n unx = tf.diag(unx)\r\n vnx = tf.diag(vnx)\r\n \r\n uny = tf.diag(uny) \r\n vny = tf.diag(vny) \r\n\r\n k1 = tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(t1 + x1 - x2)*(t1 - x1 + x2)/t1**4\r\n k2 = -(3.0*t1**2 - x1**2 + 2*x1*x2 - x2**2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(x1 - x2)/t1**6\r\n k3 = -(y1 - y2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(t1 + x1 - x2)*(t1 - x1 + x2)/(t1**4*t2**2)\r\n k4 = -((t2 + y1 - y2)*(t2 - y1 + y2)*t1**6 + (3*t2**4 - (x1 - x2)**2*t2**2 + (y1 - y2)**2*(x1 - x2)**2)*t1**4 - 6*t2**4*(x1 - x2)**2*t1**2 + t2**4*(x1 - x2)**4)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))/(t1**8*t2**4)\r\n k5 = (3.0*t1**2 - x1**2 + 2*x1*x2 - x2**2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(x1 - x2)/t1**6\r\n k6 = tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(3*t1**4 - 6*t1**2*x1**2 + 12*t1**2*x1*x2 - 6*t1**2*x2**2 + x1**4 - 4*x1**3*x2 + 6*x1**2*x2**2 - 4*x1*x2**3 + x2**4)/t1**8\r\n k7 = -(y1 - y2)*(x1 - x2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(3*t1**2 - x1**2 + 2*x1*x2 - x2**2)/(t1**6*t2**2)\r\n k8 = -3.0*((t2 + y1 - y2)*(t2 - y1 + y2)*t1**6 + (-((t2 + y1 - y2)*(t2 - y1 + y2)*x1**2)/3 + (2*x2*(t2 + y1 - y2)*(t2 - y1 + y2)*x1)/3 - ((t2 + y1 - y2)*(t2 - y1 + y2)*x2**2)/3 + 5*t2**4)*t1**4 - (10*t2**4*(x1 - x2)**2*t1**2)/3 + t2**4*(x1 - x2)**4/3)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(x1 - x2)/(t1**10*t2**4)\r\n k9 = (y1 - y2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(t1 + x1 - x2)*(t1 - x1 + x2)/(t1**4*t2**2)\r\n k10 =-(y1 - y2)*(x1 - x2)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(3*t1**2 - x1**2 + 2*x1*x2 - x2**2)/(t1**6*t2**2)\r\n k11 = tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(t2 + y1 - y2)*(t2 - y1 + y2)*(t1 + x1 - x2)*(t1 - x1 + x2)/(t1**4*t2**4)\r\n k12 = -3.0*(y1 - y2)*((t2**2 - (y1 - y2)**2/3)*t1**6 + (t2**4 - (x1 - x2)**2*t2**2 + (y1 - y2)**2*(x1 - x2)**2/3)*t1**4 - 2*t2**4*(x1 - x2)**2*t1**2 + t2**4*(x1 - x2)**4/3.0)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))/(t1**8*t2**6)\r\n k13 = -((t2 + y1 - y2)*(t2 - y1 + y2)*t1**6 + (3*t2**4 - (x1 - x2)**2*t2**2 + (y1 - y2)**2*(x1 - x2)**2)*t1**4 - 6*t2**4*(x1 - x2)**2*t1**2 + t2**4*(x1 - x2)**4)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))/(t1**8*t2**4)\r\n k14 =3.0*((t2 + y1 - y2)*(t2 - y1 + y2)*t1**6 + (-((t2 + y1 - y2)*(t2 - y1 + y2)*x1**2)/3 + (2*x2*(t2 + y1 - y2)*(t2 - y1 + y2)*x1)/3 - ((t2 + y1 - y2)*(t2 - y1 + y2)*x2**2)/3 + 5*t2**4)*t1**4 - (10*t2**4*(x1 - x2)**2*t1**2)/3 + t2**4*(x1 - x2)**4/3)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))*(x1 - x2)/(t1**10*t2**4)\r\n k15 =3.0*(y1 - y2)*((t2**2 - (y1 - y2)**2/3)*t1**6 + (t2**4 - (x1 - x2)**2*t2**2 + (y1 - y2)**2*(x1 - x2)**2/3)*t1**4 - 2*t2**4*(x1 - x2)**2*t1**2 + t2**4*(x1 - x2)**4/3.0)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))/(t1**8*t2**6)\r\n k16 =3.0*((t2**4 - 2*(y1 - y2)**2*t2**2 + (y1 - y2)**4/3)*t1**10 + (2*t2**6 + (-x1**2 + 2*x1*x2 - x2**2 - 2*(y1 - y2)**2)*t2**4 + 2*(y1 - y2)**2*(x1 - x2)**2*t2**2 - (y1 - y2)**4*(x1 - x2)**2/3)*t1**8 + 5*t2**4*(t2**4 - (4*(x1 - x2)**2*t2**2)/5 + (4*(y1 - y2)**2*(x1 - x2)**2)/5)*t1**6 - 15*t2**4*(x1 - x2)**2*(t2**4 - (2*(x1 - x2)**2*t2**2)/45.0 + (2*(y1 - y2)**2*(x1 - x2)**2)/45)*t1**4 + 5*t2**8*(x1 - x2)**4*t1**2 - t2**8*(x1 - x2)**6/3)*tf.exp((-(y1 - y2)**2*t1**2 - (x1 - x2)**2*t2**2)/(2*t1**2*t2**2))/(t1**12*t2**8)\r\n\r\n \r\n kpy2y1 = tf.exp((-(y1 - y2)**2*s1**2 - (x1 - x2)**2*s2**2)/(2*s1**2*s2**2))*(s2 + y1 - y2)*(s2 - y1 + y2)/s2**4\r\n\r\n\r\n if diag==False:\r\n k = k1 + self.lambda1 * self.dt * tf.matmul(unx, k2) \\\r\n + self.lambda1*self.dt*tf.matmul(vnx, k3) - self.lambda2*self.dt * k4 \\\r\n +self.lambda1*self.dt*tf.matmul(k5, uny) + self.lambda1**2*self.dt**2*tf.matmul(tf.matmul(unx, k6), uny) \\\r\n +self.lambda1**2*self.dt**2*tf.matmul(tf.matmul(vnx,k7),uny)-self.lambda2*self.dt**2*self.lambda1*tf.matmul(k8, uny) \\\r\n +self.lambda1*self.dt*tf.matmul(k9, vny) + self.lambda1**2*self.dt**2*tf.matmul(tf.matmul(unx, k10), vny) \\\r\n +self.lambda1**2*self.dt**2*tf.matmul(tf.matmul(vnx, k11),vny) - self.dt**2*self.lambda1*self.lambda2*tf.matmul(k12, vny)\\\r\n -self.dt*self.lambda2*k13 - self.lambda1*self.dt**2*self.lambda2*tf.matmul(unx, k14) \\\r\n -self.lambda1*self.lambda2*self.dt**2*tf.matmul(vnx, k15) + self.dt**2 * self.lambda2**2*k16 \r\n k=sigf1**2*k + sigf2**2*self.dt**2 * kpy2y1\r\n\r\n \r\n \r\n\r\n return k \r\n else:\r\n k = k1 + self.lambda1 * self.dt * tf.matmul (unx, k2) \\\r\n + self.lambda1*self.dt*tf.matmul(vnx, k3) - self.lambda2*self.dt * k4 \\\r\n +self.lambda1*self.dt*tf.matmul(k5, uny) + self.lambda1**2*self.dt**2*tf.matmul(tf.matmul(unx, k6), uny) \\\r\n +self.lambda1**2*self.dt**2*tf.matmul(tf.matmul(vnx,k7),uny)-self.lambda2*self.dt**2*self.lambda1*tf.matmul(k8, uny) \\\r\n +self.lambda1*self.dt*tf.matmul(k9, vny) + self.lambda1**2*self.dt**2*tf.matmul(tf.matmul(unx, k10), vny) \\\r\n +self.lambda1**2*self.dt**2*tf.matmul(tf.matmul(vnx, k11),vny) - self.dt**2*self.lambda1*self.lambda2*tf.matmul(k12, vny)\\\r\n -self.dt*self.lambda2*k13 - self.lambda1*self.dt**2*self.lambda2*tf.matmul(unx, k14) \\\r\n -self.lambda1*self.lambda2*self.dt**2*tf.matmul(vnx, k15) + self.dt**2 * self.lambda2**2*k16 \r\n k= sigf1**2*k+sigf2**2*self.dt**2 * kpy2y1\r\n return tf.reshape(tf.diag_part(k),(-1,1)) \r\n\r\n\r\n\r\n def kernel_uf_train(self, Xu, Xf, t1, t2, s1, s2, lambda1, lambda2, sigf1, sigf2, un_u, un_f, dt, diag=False):\r\n\r\n if self.kernel_type == 'SE':\r\n if diag == False:\r\n ku1u1 = self.ku1u1(Xu, Xu, t1, t2,sigf1)\r\n ku1v1 = self.ku1v1(Xu,Xu,t1,t2,sigf1)\r\n ku1u0 = self.ku1u0(Xu,Xf,t1,t2,sigf1,un_u,un_f) \r\n ku1v0 = self.ku1v0(Xu,Xf,t1,t2,sigf1,un_u,un_f)\r\n \r\n kv1v1 = self.kv1v1(Xu,Xu,t1,t2,sigf1)\r\n kv1u0 = self.kv1u0(Xu,Xf,t1,t2,sigf1,un_u,un_f)\r\n kv1v0 = self.kv1v0(Xu,Xf,t1,t2,sigf1,un_u,un_f)\r\n \r\n ku0u0 = self.ku0u0(Xf,Xf,t1,t2,s1,s2,sigf1,sigf2,un_f,un_f)\r\n ku0v0 = self.ku0v0(Xf,Xf,t1,t2,s1,s2,sigf1,sigf2,un_f,un_f)\r\n \r\n kv0v0 = self.kv0v0(Xf,Xf,t1,t2,s1,s2,sigf1,sigf2,un_f,un_f)\r\n \r\n \r\n k1 = tf.concat((ku1u1, ku1v1, ku1u0, ku1v0),axis=1)\r\n k2 = tf.concat((tf.transpose(ku1v1),kv1v1,kv1u0,kv1v0),axis=1)\r\n k3 = tf.concat((tf.transpose(ku1u0),tf.transpose(kv1u0), ku0u0, ku0v0),axis=1)\r\n k4 = tf.concat((tf.transpose(ku1v0),tf.transpose(kv1v0), tf.transpose(ku0v0),kv0v0),axis=1)\r\n \r\n k = tf.concat((k1,k2,k3,k4),axis=0)\r\n \r\n return k\r\n else:\r\n ku1u1 = self.ku1u1(Xu, Xu, t1, t2,sigf1)\r\n ku1v1 = self.ku1v1(Xu,Xu,t1,t2,sigf1)\r\n ku1u0 = self.ku1u0(Xu,Xf,t1,t2,sigf1,un_u,un_f) \r\n ku1v0 = self.ku1v0(Xu,Xf,t1,t2,sigf1,un_u,un_f)\r\n \r\n kv1v1 = self.kv1v1(Xu,Xu,t1,t2,sigf1)\r\n kv1u0 = self.kv1u0(Xu,Xf,t1,t2,sigf1,un_u,un_f)\r\n kv1v0 = self.kv1v0(Xu,Xf,t1,t2,sigf1,un_u,un_f)\r\n \r\n ku0u0 = self.ku0u0(Xf,Xf,t1,t2,s1,s2,sigf1,sigf2,un_f,un_f)\r\n ku0v0 = self.ku0v0(Xf,Xf,t1,t2,s1,s2,sigf1,sigf2,un_f,un_f)\r\n \r\n kv0v0 = self.kv0v0(Xf,Xf,t1,t2,s1,s2,sigf1,sigf2,un_f,un_f)\r\n \r\n \r\n k1 = tf.concat((ku1u1, ku1v1, ku1u0, ku1v0),axis=1)\r\n k2 = tf.concat((tf.transpose(ku1v1),kv1v1,kv1u0,kv1v0),axis=1)\r\n k3 = tf.concat((tf.transpose(ku1u0),tf.transpose(kv1u0), ku0u0, ku0v0),axis=1)\r\n k4 = tf.concat((tf.transpose(ku1v0),tf.transpose(kv1v0), tf.transpose(ku0v0),kv0v0),axis=1)\r\n \r\n k = tf.concat((k1,k2,k3,k4),axis=0)\r\n \r\n \r\n \r\n return tf.reshape(tf.diag_part(k),(-1,1))\r\n\r\n\r\n\r\n\r\n \r\n def kernel_u_test(self, Xt, Xu, Xf, t1, t2, s1, s2, lambda1, lambda2, sigf1, sigf2, un_f, un_t, dt):\r\n if self.kernel_type == 'SE':\r\n ku1u1 = self.ku1u1(Xt, Xu, t1, t2,sigf1)\r\n ku1v1 = self.ku1v1(Xt,Xu,t1,t2,sigf1)\r\n ku1u0 = self.ku1u0(Xt,Xf,t1,t2,sigf1,un_t,un_f) \r\n ku1v0 = self.ku1v0(Xt,Xf,t1,t2,sigf1,un_t,un_f)\r\n \r\n kv1v1 = self.kv1v1(Xt,Xu,t1,t2,sigf1)\r\n kv1u0 = self.kv1u0(Xt,Xf,t1,t2,sigf1,un_t,un_f)\r\n kv1v0 = self.kv1v0(Xt,Xf,t1,t2,sigf1,un_t,un_f)\r\n \r\n \r\n ku1v1_T = self.ku1v1(Xu, Xt, t1, t2, sigf1)\r\n \r\n \r\n k1 = tf.concat((ku1u1, ku1v1, ku1u0, ku1v0),axis=1)\r\n k2 = tf.concat((tf.transpose(ku1v1_T),kv1v1,kv1u0,kv1v0),axis=1)\r\n \r\n k = tf.concat((k1,k2),axis=0)\r\n \r\n \r\n return k\r\n\r\n\r\n def kernel_f_test(self, Xt, Xu, Xf, t1, t2, s1, s2, lambda1, lambda2, sigf1, sigf2, un_f, un_t, dt):\r\n \r\n \r\n \r\n if self.kernel_type == 'SE':\r\n \r\n \r\n \r\n ku0u0 = self.ku0u0(Xt,Xf,t1,t2,s1,s2,sigf1,sigf2,un_t,un_f)\r\n ku0v0 = self.ku0v0(Xt,Xf,t1,t2,s1,s2,sigf1,sigf2,un_t,un_f)\r\n \r\n kv0v0 = self.kv0v0(Xt,Xf,t1,t2,s1,s2,sigf1,sigf2,un_t,un_f)\r\n \r\n ku1u0_T = self.ku1u0(Xu, Xt, t1, t2, sigf1,un_f, un_t)\r\n kv1u0_T = self.kv1u0(Xu, Xt, t1, t2, sigf1,un_f, un_t)\r\n ku1v0_T = self.ku1v0(Xu, Xt, t1, t2, sigf1,un_f, un_t)\r\n kv1v0_T = self.kv1v0(Xu, Xt, t1, t2, sigf1,un_f, un_t)\r\n ku0v0_T = self.ku0v0(Xf, Xt, t1, t2, s1, s2, sigf1,sigf2,un_f, un_t)\r\n \r\n k3 = tf.concat((tf.transpose(ku1u0_T),tf.transpose(kv1u0_T), ku0u0, ku0v0),axis=1)\r\n k4 = tf.concat((tf.transpose(ku1v0_T),tf.transpose(kv1v0_T), tf.transpose(ku0v0_T),kv0v0),axis=1)\r\n \r\n k = tf.concat((k3,k4),axis=0)\r\n \r\n \r\n return k \r\n \r\n \r\n\r\n def nlml(self,Xu,Xf,Yu,Yf,dt, hyp1, hyp2, hyp3, hyp4, sig_n, lambda1, lambda2, sigf1, sigf2, un_u, un_f, kernel_type, jitter=1.0e-10): # negative logarithm marginal-likelihood\r\n\r\n\r\n \r\n\r\n N = 2 * Xu.shape[0] + 2 * Xf.shape[0]\r\n self.K0 = self.kernel_uf_train(Xu,Xf,hyp1,hyp2,hyp3, hyp4, lambda1, lambda2, sigf1, sigf2, un_u, un_f, dt)\r\n K = self.K0 + (sig_n**2+jitter)*tf.eye(N,dtype=tf.float64)\r\n\r\n self.L = tf.cholesky(K)\r\n r = np.concatenate((Yu[0],Yu[1],Yf[0],Yf[1]),axis=0)\r\n self.alpha = tf.cholesky_solve(self.L, r)\r\n temp = tf.matmul(r, self.alpha, transpose_a=True)\r\n\r\n return 0.5 * N * np.log(2.0*np.pi) + temp /2.0 \\\r\n +tf.reduce_sum(tf.log(tf.diag_part(self.L))) \r\n \r\n\r\n\r\n \r\n def training(self, optimizer = 'Adam', num_iter=10001, learning_rate = 1.0e-3, jitter = 1.0e-15): \r\n\r\n tf.reset_default_graph()\r\n\r\n self.hyp1 = tf.exp(tf.Variable(0.0,dtype=np.float64)) \r\n self.hyp2 = tf.exp(tf.Variable(0.0,dtype=np.float64))\r\n self.hyp3 = tf.exp(tf.Variable(0.0,dtype=np.float64))\r\n self.hyp4 = tf.exp(tf.Variable(0.0,dtype=np.float64))\r\n self.sigf1 = tf.exp(tf.Variable(0.0,dtype=np.float64)) \r\n self.sigf2 = tf.exp(tf.Variable(0.0,dtype=np.float64)) \r\n self.lambda1 = tf.exp(tf.Variable(0.0,dtype=np.float64))\r\n self.lambda2 = tf.exp(tf.Variable(0.0,dtype=np.float64))\r\n\r\n \r\n\r\n if self.is_noise:\r\n self.sig_n = tf.exp(tf.Variable(np.log(1.0e-4),dtype=tf.float64))\r\n else:\r\n self.sig_n = tf.Variable(0.0,dtype=tf.float64, trainable=False)\r\n\r\n\r\n self.num_iter = num_iter\r\n self.jitter = jitter\r\n self.optimizer = optimizer\r\n self.learning_rate = learning_rate\r\n \r\n \r\n \r\n\r\n Nu = self.xu_train.shape[0]\r\n Nf = self.xf_train.shape[0] \r\n Nt = self.xf_test.shape[0]\r\n\r\n nlml_tf \\\r\n = self.nlml(self.xu_train,self.xf_train, self.yu_train, self.yf_train, self.dt, self.hyp1, self.hyp2, self.hyp3, self.hyp4, self.sig_n, self.lambda1, self.lambda2, self.sigf1, self.sigf2, self.un_u, self.un_f, self.kernel_type, self.jitter)\r\n \r\n\r\n\r\n self.K_train = self.kernel_uf_train(self.xu_train, self.xf_train, self.hyp1, self.hyp2, self.hyp3, self.hyp4, self.lambda1, self.lambda2, self.sigf1, self.sigf2, self.un_u, self.un_f, self.dt)\r\n self.m_train = tf.matmul(self.K_train,self.alpha)\r\n L1 = tf.concat((tf.zeros((2*Nf,2*Nu),dtype=tf.float64),self.previous_cov_mat),axis=1) \r\n L1 = tf.concat((tf.zeros((2*Nu,2*Nu+2*Nf),dtype=tf.float64),L1),axis=0) \r\n V1 = tf.linalg.triangular_solve(self.L,tf.transpose(self.K_train))\r\n V2 = tf.cholesky_solve(self.L, tf.transpose(self.K_train))\r\n\r\n self.var_train = self.kernel_uf_train(self.xu_train, self.xf_train, self.hyp1, self.hyp2, self.hyp3, self.hyp4, self.lambda1, self.lambda2, self.sigf1, self.sigf2, self.un_u, self.un_f, self.dt, diag=True)\\\r\n - tf.reshape(tf.reduce_sum(V1*V1,axis=0),(-1,1))\r\n self.var_train = self.var_train + tf.reshape( tf.diag_part(tf.matmul(tf.matmul(tf.transpose(V2),L1),V2)),(-1,1))\r\n\r\n self.var_train = tf.maximum(self.var_train,tf.zeros((2*Nu+2*Nf,1),dtype=tf.float64) ) \r\n\r\n \r\n\r\n k_test_u = self.kernel_u_test(self.xu_test, self.xu_train, self.xf_train, self.hyp1, self.hyp2, self.hyp3, self.hyp4, self.lambda1, self.lambda2, self.sigf1, self.sigf2, self.un_f, self.un_t, self.dt)\r\n self.m_test_u = tf.matmul(k_test_u,self.alpha) \r\n V1_test_u = tf.linalg.triangular_solve(self.L,tf.transpose(k_test_u))\r\n V2_test_u = tf.cholesky_solve(self.L, tf.transpose(k_test_u))\r\n self.var_test_u = self.kernel_uf_train(self.xu_test, self.xu_test, self.hyp1, self.hyp2, self.hyp3, self.hyp4, self.lambda1, self.lambda2, self.sigf1, self.sigf2, self.un_u, self.un_u, self.dt,diag=True)[:2*Nt,0:1] - tf.reshape(tf.reduce_sum(V1_test_u*V1_test_u,axis=0),(-1,1)) +self.sig_n**2 \r\n self.var_test_u = self.var_test_u + tf.reshape( tf.diag_part(tf.matmul(tf.matmul(tf.transpose(V2_test_u),L1),V2_test_u)),(-1,1))\r\n\r\n self.var_test_u = tf.maximum(self.var_test_u,tf.zeros((2*Nt,1),dtype=tf.float64) ) \r\n \r\n\r\n \r\n k_test_f = self.kernel_f_test(self.xf_test, self.xu_train, self.xf_train, self.hyp1, self.hyp2, self.hyp3, self.hyp4, self.lambda1, self.lambda2, self.sigf1, self.sigf2, self.un_f, self.un_t, self.dt)\r\n self.m_test_f = tf.matmul(k_test_f,self.alpha) \r\n V1_test_f = tf.linalg.triangular_solve(self.L,tf.transpose(k_test_f))\r\n V2_test_f = tf.cholesky_solve(self.L, tf.transpose(k_test_f))\r\n self.var_test_f = self.kernel_uf_train(self.xf_test, self.xf_test, self.hyp1, self.hyp2, self.hyp3, self.hyp4, self.lambda1, self.lambda2, self.sigf1, self.sigf2, self.un_t, self.un_t, self.dt,diag=True)[2*Nt:,0:1] \\\r\n - tf.reshape(tf.reduce_sum(V1_test_f*V1_test_f,axis=0),(-1,1)) + self.sig_n**2\r\n self.var_test_f = self.var_test_f + tf.reshape( tf.diag_part(tf.matmul(tf.matmul(tf.transpose(V2_test_f),L1),V2_test_f)),(-1,1))\r\n\r\n self.var_test_f = tf.maximum(self.var_test_f,tf.zeros((2*Nt,1),dtype=tf.float64) ) \r\n \r\n \r\n\r\n\r\n if optimizer == 'Adam':\r\n optimizer_Adam = tf.train.AdamOptimizer(learning_rate)\r\n train_op_Adam = optimizer_Adam.minimize(nlml_tf) \r\n\r\n grad1 = tf.gradients(nlml_tf,self.hyp1)[0]\r\n grad2 = tf.gradients(nlml_tf,self.hyp2)[0]\r\n grad3 = tf.gradients(nlml_tf,self.hyp3)[0]\r\n grad4 = tf.gradients(nlml_tf,self.hyp4)[0]\r\n grads1 = tf.gradients(nlml_tf,self.sigf1)[0]\r\n grads2 = tf.gradients(nlml_tf,self.sigf2)[0]\r\n \r\n gradn = tf.gradients(nlml_tf,self.sig_n)[0]\r\n std_train = tf.sqrt(self.var_train)\r\n std_test_u = tf.sqrt(self.var_test_u)\r\n std_test_f = tf.sqrt(self.var_test_f)\r\n \r\n gradl1 = tf.gradients(nlml_tf,self.lambda1)[0]\r\n gradl2 = tf.gradients(nlml_tf,self.lambda2)[0] \r\n \r\n nlml_min = 1.0e16\r\n \r\n \r\n with tf.Session() as sess:\r\n sess.run(tf.global_variables_initializer())\r\n for i in range(self.num_iter): \r\n sess.run(train_op_Adam)\r\n if i % 5000 == 0:\r\n nlml_temp = sess.run(nlml_tf)\r\n if nlml_temp < nlml_min:\r\n nlml_min = nlml_temp\r\n self.mm_train = sess.run(self.m_train)\r\n self.ss_train = sess.run(std_train)\r\n self.mm_test_u = sess.run(self.m_test_u)\r\n self.ss_test_u = sess.run(std_test_u)\r\n self.mm_test_f = sess.run(self.m_test_f)\r\n self.ss_test_f = sess.run(std_test_f)\r\n \r\n\r\n lambda1_val, lambda2_val, nlml_val, hyp1_val, hyp2_val, hyp3_val, hyp4_val, sigf1_val, sigf2_val, sig_n, grad_f1, grad_f2, grad_f3, grad_f4, grad_s1, grad_s2, grad_n, grad_l1, grad_l2= \\\r\n sess.run([self.lambda1, self.lambda2, nlml_tf, self.hyp1, self.hyp2, self.hyp3, self.hyp4, self.sigf1, self.sigf2, \\\r\n self.sig_n ,grad1,\\\r\n grad2, grad3,\\\r\n grad4,grads1, grads2,gradn,\\\r\n gradl1,gradl2]) \r\n \r\n \r\n print ('*************************\\n')\r\n print ('Iter: ', i, ' nlml =', nlml_min, '\\n')\r\n print ('nlml: ' , nlml_val)\r\n print ('############### lambda: ', [lambda1_val, lambda2_val],'\\n')\r\n print ('hyp: ' , [hyp1_val,hyp2_val, hyp3_val, hyp4_val])\r\n print ('signal std: ', [sigf1_val, sigf2_val])\r\n print ('noise std: ',sig_n)\r\n print('grads of nlml over hyp ', [grad_f1, grad_f2, grad_f3, grad_f4]) \r\n print('grads of nlml over sigf ', [grad_s1, grad_s2]) \r\n \r\n print('grads of nlml over lambda ', [grad_l1, grad_l2]) \r\n print ('grad of nlml over sig_n', grad_n) \r\n\r\n \r\n print ('Training_err_u1:', np.linalg.norm(self.mm_train[:Nu,0:1]-self.yu_train[0],2)/np.linalg.norm(self.yu_train[0],2))\r\n print ('Training_err_u0:', np.linalg.norm(self.mm_train[(2*Nu):(2*Nu+Nf),0:1]-self.yf_train[0],2)/np.linalg.norm(self.yf_train[0],2))\r\n\r\n\r\n print ('Training_err_v1:', np.linalg.norm(self.mm_train[Nu:(2*Nu),0:1]-self.yu_train[1],2)/np.linalg.norm(self.yu_train[1],2))\r\n print ('Training_err_v0:', np.linalg.norm(self.mm_train[(2*Nu+Nf):(2*Nu+2*Nf),0:1]-self.yf_train[1],2)/np.linalg.norm(self.yf_train[1],2))\r\n\r\n print ('Test_err_u1:', np.linalg.norm(self.mm_test_u[:Nt,0:1]-self.yu_test[0],2)/np.linalg.norm(self.yu_test[0],2)) \r\n print ('Test_err_u0:', np.linalg.norm(self.mm_test_f[:Nt,0:1]-self.yf_test[0],2)/np.linalg.norm(self.yf_test[0],2)) \r\n\r\n\r\n\r\n print ('Test_err_v1:', np.linalg.norm(self.mm_test_u[Nt:(2*Nt),0:1]-self.yu_test[1],2)/np.linalg.norm(self.yu_test[1],2)) \r\n print ('Test_err_v0:', np.linalg.norm(self.mm_test_f[Nt:(2*Nt),0:1]-self.yf_test[1],2)/np.linalg.norm(self.yf_test[1],2)) \r\n\r\n\r\n\r\n\r\nu_simulation = sio.loadmat('cylinder_fine.mat')\r\n\r\nu_exa = np.real(u_simulation['U_star'])\r\nt_exa = u_simulation['t_star'].reshape((-1,1))\r\nx_exa = u_simulation['X_star']\r\n \r\n \r\n\r\n\r\ntt0 = time.time()\r\n\r\nNf = 250\r\nNu = 250\r\n\r\n\r\n\r\n\r\n\r\n\r\ndt = 0.02\r\n\r\n\r\ninit_time = 0.18\r\nnoise_rate = 0.0\r\n\r\nnp.random.seed(seed=1234)\r\n\r\n\r\nindex = np.random.permutation(np.arange(x_exa.shape[0]))\r\nindex_f = index[:Nf]\r\n\r\n\r\nindex = np.random.permutation(np.arange(x_exa.shape[0]))\r\nindex_u = index[:Nu]\r\n\r\n\r\nxf_train = x_exa[index_f,:]\r\n\r\n\r\n\r\nxf_test = xf_train\r\n\r\nyf_train = []\r\nyf_train.append(u_exa[index_f,0:1,9])\r\nyf_train.append(u_exa[index_f,1:2,9])\r\n\r\n\r\nyu_train0 = [] \r\nyu_train0.append(u_exa[index_f,0:1,9])\r\nyu_train0.append(u_exa[index_f,1:2,9])\r\n\r\nxu_train = x_exa[index_u,:]\r\n\r\nplt.contourf(np.linspace(1.0,7.5,66),np.linspace(-1.7,1.7,35), u_exa[:,0,9].reshape(66,35).T,100,cmap='jet')\r\nplt.colorbar()\r\nplt.plot(xf_train[:,0],xf_train[:,1],'bo',xu_train[:,0],xu_train[:,1],'ro')\r\nplt.show()\r\n\r\nxu_test = xf_test\r\n\r\n\r\n\r\n\r\n\r\n\r\nprevious_cov_mat = noise_rate*np.std(np.ndarray.flatten(yf_train[0]))*np.eye(2*Nf,dtype=np.float64)\r\n\r\n\r\n\r\nnoise_f = noise_rate*np.std(np.ndarray.flatten(yf_train[0]))*np.random.randn(Nf,1)\r\n\r\n\r\nNt = xf_test.shape[0]\r\n\r\nun_u = [np.ones((yu_train0[0].shape[0],1))]*2\r\nun_f = yf_train\r\nun_t = yf_train\r\n\r\n\r\nfor k in np.arange(0,1):\r\n yu_train = []\r\n yu_test = []\r\n \r\n yf_test = yf_train\r\n \r\n \r\n np.random.seed(seed=1234)\r\n \r\n \r\n yu_train.append(u_exa[index_u,0:1,10])\r\n yu_train.append(u_exa[index_u,1:2,10])\r\n \r\n \r\n yu_test.append( u_exa[index_f,0:1,10])\r\n yu_test.append( u_exa[index_f,1:2,10])\r\n \r\n \r\n\r\n \r\n dataset = {'xu_train': xu_train, 'yu_train': yu_train, \\\r\n 'xu_test': xu_test, 'yu_test': yu_test, \\\r\n 'xf_train': xf_train, 'yf_train': yf_train, \\\r\n 'xf_test': xf_test, 'yf_test': yf_test}\r\n \r\n \r\n print ('\\n t = 0.18 *********************')\r\n \r\n\r\n \r\n GP_instance = one_GP()\r\n GP_instance.model(dataset, dt, previous_cov_mat, un_u, un_f, un_t, is_noise=True)\r\n GP_instance.training(num_iter=20001,jitter=0.0)\r\n\r\n del GP_instance\r\n\r\ntt1 = time.time()\r\n\r\nprint ('CPU time ', tt1-tt0) \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n "
] |
[
[
"tensorflow.concat",
"numpy.linspace",
"tensorflow.zeros",
"tensorflow.reduce_sum",
"tensorflow.diag",
"matplotlib.pyplot.plot",
"numpy.concatenate",
"numpy.random.randn",
"tensorflow.train.AdamOptimizer",
"tensorflow.cholesky",
"tensorflow.cholesky_solve",
"tensorflow.Variable",
"tensorflow.diag_part",
"numpy.arange",
"numpy.eye",
"scipy.io.loadmat",
"tensorflow.gradients",
"numpy.real",
"tensorflow.reset_default_graph",
"tensorflow.Session",
"tensorflow.matmul",
"numpy.log",
"tensorflow.exp",
"tensorflow.global_variables_initializer",
"matplotlib.pyplot.show",
"tensorflow.transpose",
"numpy.random.seed",
"numpy.ndarray.flatten",
"tensorflow.eye",
"numpy.linalg.norm",
"numpy.ones",
"matplotlib.pyplot.colorbar",
"tensorflow.sqrt"
]
] |
lidiaToropova/openvino
|
[
"298cced3b30f056428d3f5977cd194837dd89062"
] |
[
"tools/mo/openvino/tools/mo/graph/graph.py"
] |
[
"# Copyright (C) 2018-2022 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport collections\nimport logging as log\nfrom copy import deepcopy\nfrom typing import List\n\nimport networkx as nx\nimport numpy as np\n\nfrom openvino.tools.mo.graph.port import Port\nfrom openvino.tools.mo.middle.passes.eliminate import mark_output_reachable_nodes, shape_inference, mark_undead_nodes, \\\n mark_const_producer_nodes, eliminate_dead_nodes, add_constant_operations\nfrom openvino.tools.mo.utils.error import Error\nfrom openvino.tools.mo.utils.utils import refer_to_faq_msg, deprecated_api, shrink_str_value\n\n\ndef dict_to_ordered_dict(d: dict, func=lambda t: t):\n return collections.OrderedDict(sorted(d.items(), key=lambda t: func(t[0])))\n\n\nclass Node:\n def __init__(self, graph, node: str):\n assert node in graph, \"Attempt to access node {} that not in graph\".format(node)\n\n super(Node, self).__setattr__('graph', graph)\n super(Node, self).__setattr__('node', node) # obsolete\n super(Node, self).__setattr__('id', node)\n\n def __str__(self, max_length: int = 100):\n node_dict = self.graph.node[self.id]\n print_dict = {k: v if k != 'value' else shrink_str_value(v, max_symbols=max_length) for k, v in\n node_dict.items()}\n return str(print_dict)\n\n def __setattr__(self, k, v):\n # you can assign only existing attributes\n attrs = self.graph.node[self.node]\n if not k in attrs:\n raise AttributeError(\"Attribute {} missing in {} node\".format(k, self.name))\n if k == 'version' and attrs.get(k, v) != v:\n raise AttributeError(\"Attribute 'version' cannot be updated in {} node\".format(self.name))\n\n attrs[k] = v\n\n def __getattr__(self, k):\n return self.graph.node[self.node][k]\n\n def __getitem__(self, k):\n return self.graph.node[self.node][k]\n\n def __setitem__(self, k, v):\n if k == 'version' and self.graph.node[self.node].get(k, v) != v:\n raise AttributeError(\"Attribute 'version' cannot be updated in {} node\".format(self.name))\n self.graph.node[self.node][k] = v\n\n def __contains__(self, k):\n return self.has(k)\n\n def __eq__(self, other):\n return (\n self.__class__ == other.__class__ and\n self.graph == other.graph and\n self.id == other.id\n )\n\n def __hash__(self):\n return hash((self.graph, self.id))\n\n def __delitem__(self, k):\n del self.graph.node[self.node][k]\n\n def add_input_port(self, idx, skip_if_exist=False, **kwargs):\n if not self.has_valid('_in_ports'):\n Node(self.graph, self.id)['_in_ports'] = {}\n control_flow = kwargs['control_flow'] if kwargs.get('control_flow') is not None else False\n if skip_if_exist is False and idx in self.in_ports(control_flow=control_flow):\n raise Error(\"Input port with {} index already exists for {} node.\".format(idx, self.name))\n self._in_ports.update({idx: kwargs})\n\n def delete_input_ports(self, idx_set, skip_if_absent=False):\n if len(idx_set) == 0:\n return # there is nothing to delete\n for idx in idx_set:\n self.delete_input_port(idx, skip_if_absent)\n\n def delete_input_port(self, idx, skip_if_absent=False):\n if not self.has_valid('_in_ports'):\n raise Error(\n 'Cannot removed ports with indices {} from node {} because node doesn\\'t '\n 'have _in_ports attribute'.format(idx, self.soft_get('name')))\n # no handling of control flow edges -- TODO\n control_flow = False\n if not skip_if_absent and idx not in self.in_ports(control_flow=control_flow):\n raise Error(\"Input port with index {} doesn't exist in node {}.\".format(idx, self.soft_get('name')))\n if not self.in_port(idx).disconnected():\n self.in_port(idx).disconnect()\n del self._in_ports[idx]\n # update in_ports_count for consistency but it is unlikely have any effect somewhere in the code\n self['in_ports_count'] = len(self._in_ports)\n\n def delete_output_port(self, idx, skip_if_absent=False):\n if not self.has_valid('_out_ports'):\n raise Error(\n 'Cannot removed ports with indices {} from node {} because node doesn\\'t '\n 'have _out_ports attribute'.format(idx, self.soft_get('name')))\n # no handling of control flow edges -- TODO\n control_flow = False\n if not skip_if_absent and idx not in self.out_ports(control_flow=control_flow):\n raise Error(\"Output port with index {} doesn't exist in node {}.\".format(idx, self.soft_get('name')))\n if not self.out_port(idx).disconnected():\n self.out_port(idx).disconnect()\n del self._out_ports[idx]\n # update in_ports_count for consistency but it is unlikely have any effect somewhere in the code\n self['out_ports_count'] = len(self._out_ports)\n\n def add_output_port(self, idx, skip_if_exist=False, **kwargs):\n if not self.has_valid('_out_ports'):\n Node(self.graph, self.id)['_out_ports'] = {}\n control_flow = kwargs['control_flow'] if kwargs.get('control_flow') is not None else False\n if skip_if_exist is False and idx in self.out_ports(control_flow=control_flow):\n raise Error(\"Output port with {} index already exists for {} node.\".format(idx, self.name))\n self._out_ports.update({idx: kwargs})\n\n def add_sequence_of_ports(self, type: str, rng):\n assert type in ['in', 'out']\n for idx in rng:\n if type == 'in':\n self.add_input_port(idx, skip_if_exist=True)\n if type == 'out':\n self.add_output_port(idx, skip_if_exist=True)\n\n def in_port(self, idx=None, control_flow=False) -> Port:\n if not self.has_valid('_in_ports'):\n raise Error(\"Operation {} {} has no _in_ports attribute\", self.op, self.name)\n if idx not in self._in_ports:\n raise Error(\"Input port with index {} is not in node {}\".format(idx, self.name))\n if not control_flow and 'control_flow' in self._in_ports[idx] and self._in_ports[idx]['control_flow']:\n raise Error(\"Attempt to access control flow port when it's prohibited for node {}\".format(self.name))\n return Port(node=self, idx=idx, type='in', **self._in_ports[idx])\n\n def in_ports(self, control_flow=False):\n if not self.has_valid('_in_ports'):\n raise Error(\"Operation {} {} has no _in_ports attribute\", self.op, self.name)\n ports = {}\n for idx in self._in_ports:\n if control_flow or 'control_flow' not in self._in_ports[idx] or not self._in_ports[idx]['control_flow']:\n ports.update({idx: self.in_port(idx, control_flow=control_flow)})\n return dict_to_ordered_dict(ports, func=lambda t: int(str(t).replace('control_flow_', '')))\n\n def out_port(self, idx=None, control_flow=False) -> Port:\n if not self.has_valid('_out_ports'):\n raise Error(\"Operation {} {} has no _out_ports attribute\", self.op, self.name)\n if idx not in self._out_ports:\n raise Error(\"Output port with index {} is not in node {}\".format(idx, self.name))\n if not control_flow and 'control_flow' in self._out_ports[idx] and self._out_ports[idx]['control_flow']:\n raise Error(\"Attempt to access control flow port when it's prohibited for node {}\".format(self.name))\n return Port(node=self, idx=idx, type='out', **self._out_ports[idx])\n\n def out_ports(self, control_flow=False):\n if not self.has_valid('_out_ports'):\n raise Error(\"Operation {} {} has no _out_ports attribute\", self.op, self.name)\n ports = {}\n for idx in self._out_ports:\n if control_flow or 'control_flow' not in self._out_ports[idx] or not self._out_ports[idx]['control_flow']:\n ports.update({idx: self.out_port(idx, control_flow=control_flow)})\n return dict_to_ordered_dict(ports, func=lambda t: int(str(t).replace('control_flow_', '')))\n\n def has_port(self, port_type, idx, control_flow=False):\n assert port_type in ['in', 'out'], \"Invalid usage of has_port method\"\n\n if port_type == 'in':\n return self.has_valid('_in_ports') and idx in self.in_ports(control_flow=control_flow)\n else:\n return self.has_valid('_out_ports') and idx in self.out_ports(control_flow=control_flow)\n\n def is_in_port_connected(self, idx, control_flow=False):\n return self.has_port('in', idx, control_flow) and not self.in_port(idx, control_flow).disconnected()\n\n def is_out_port_connected(self, idx, control_flow=False):\n return self.has_port('out', idx, control_flow) and not self.out_port(idx, control_flow).disconnected()\n\n def attrs(self):\n return self.graph.node[self.node]\n\n def has(self, k):\n return k in self.graph.node[self.node]\n\n def has_valid(self, k):\n return self.has(k) and not self.graph.node[self.node][k] is None\n\n def has_and_set(self, k):\n return self.has_valid(k) and self[k]\n\n def in_nodes_edges(self, control_flow: bool = False):\n return dict_to_ordered_dict({x[1]['in']: (Node(self.graph, x[0]), x[1]) for x in\n self.get_inputs(control_flow=control_flow)},\n func=lambda t: int(str(t).replace('control_flow_', '')))\n\n def in_nodes(self, control_flow: bool = False):\n if self.kind == 'op':\n return dict_to_ordered_dict({x[1]['in']: Node(self.graph, x[0]) for x in\n self.get_inputs(control_flow=control_flow)},\n func=lambda t: int(str(t).replace('control_flow_', '')))\n elif self.kind == 'data':\n return [Node(self.graph, n) for n, d in self.get_inputs(control_flow=control_flow)]\n\n def in_node(self, key=0, control_flow: bool = False):\n return self.in_nodes(control_flow=control_flow)[key]\n\n def in_edges(self, control_flow: bool = False):\n assert self.has('kind')\n assert self.kind in ['op', 'data']\n if self.kind == 'op':\n return dict_to_ordered_dict({x[1]['in']: x[1] for x in self.get_inputs(control_flow=control_flow)},\n func=lambda t: int(str(t).replace('control_flow_', '')))\n elif self.kind == 'data':\n return [d for n, d in self.get_inputs(control_flow=control_flow)]\n\n def out_nodes_edges(self, control_flow: bool = False):\n return dict_to_ordered_dict({x[1]['out']: (Node(self.graph, x[0]), x[1]) for x in\n self.get_outputs(control_flow=control_flow)},\n func=lambda t: int(str(t).replace('control_flow_', '')))\n\n def out_nodes(self, control_flow: bool = False):\n assert self.has('kind')\n assert self.kind in ['op', 'data']\n if self.kind == 'op':\n return dict_to_ordered_dict({x[1]['out']: Node(self.graph, x[0]) for x in\n self.get_outputs(control_flow=control_flow)},\n func=lambda t: int(str(t).replace('control_flow_', '')))\n elif self.kind == 'data':\n return [Node(self.graph, n) for n, d in self.get_outputs(control_flow=control_flow)]\n\n def out_edges(self, control_flow: bool = False):\n assert self.has('kind')\n assert self.kind in ['op', 'data']\n if self.kind == 'op':\n return dict_to_ordered_dict({x[1]['out']: x[1] for x in self.get_outputs(control_flow=control_flow)},\n func=lambda t: int(str(t).replace('control_flow_', '')))\n elif self.kind == 'data':\n return [d for n, d in self.get_outputs(control_flow=control_flow)]\n\n def out_node(self, key=0, control_flow: bool = False):\n return self.out_nodes(control_flow=control_flow)[key]\n\n def in_edge(self, key=0, control_flow: bool = False):\n return self.in_edges(control_flow=control_flow)[key]\n\n def out_edge(self, key=0, control_flow: bool = False):\n return self.out_edges(control_flow=control_flow)[key]\n\n def get_attrs(self):\n return self.graph.node[self.node]\n\n def get_inputs(self, edge_attr: dict = None, control_flow: bool = False):\n if edge_attr is None:\n edge_attr = {}\n in_edges = self.graph.in_edges(self.id, data=True)\n if not control_flow:\n in_edges = [(u, v, d) for u, v, d in in_edges if 'control_flow_edge' not in d or not d['control_flow_edge']]\n return [(u, d) for u, v, d in in_edges if all([attr in d and d[attr] == edge_attr[attr] for attr in edge_attr])]\n\n def get_outputs(self, edge_attr: dict = None, control_flow: bool = False):\n if edge_attr is None:\n edge_attr = {}\n out_edges = self.graph.out_edges(self.id, data=True)\n if not control_flow:\n out_edges = [(u, v, d) for u, v, d in out_edges if\n 'control_flow_edge' not in d or not d['control_flow_edge']]\n return [(v, d) for u, v, d in out_edges if\n all([attr in d and d[attr] == edge_attr[attr] for attr in edge_attr])]\n\n def get_sorted_inputs(self, control_flow: bool = False):\n return sorted([x for x in self.get_inputs(control_flow=control_flow) if 'in' in x[1]],\n key=lambda x: x[1]['in'])\n\n def get_sorted_outputs(self, control_flow: bool = False):\n return sorted([x for x in self.get_outputs(control_flow=control_flow) if 'out' in x[1]],\n key=lambda x: x[1]['out'])\n\n def soft_get(self, k, default='<UNKNOWN>'):\n return self[k] if self.has_valid(k) else default\n\n def edges(self, attrs: dict = None):\n \"\"\" Get a single edge with specified set of attributes.\n\n If none or multiple edges satisfies this criteria, exception is raised\n Edge is represented as tuple (u, v, d), where u is source node,\n v is destination node and d is edge attributes.\n \"\"\"\n edges = list(self.graph.in_edges([self.id], data=True)) + list(self.graph.out_edges([self.id], data=True))\n return [(u, v, d) for u, v, d in edges if dict_includes(d, attrs)]\n\n def edge(self, attrs: dict = None):\n \"\"\" Get a single edge with specified set of attributes.\n\n If none or multiple edges satisfies this criteria, exception is raised\n Edge is represented as tuple (u, v, d), where u is source node,\n v is destination node and d is edge attributes.\n \"\"\"\n edges = self.edges(attrs)\n assert len(edges) == 1, 'edges: {}, required attributes: {}'.format(edges, attrs)\n return edges[0]\n\n def copy_node(self, new_attrs: dict = None, dst_graph=None):\n ''' Copies node with all attributes (optionally updated) within the same graph or to different graph.'''\n if new_attrs is None:\n new_attrs = {}\n if dst_graph is None:\n dst_graph = self.graph\n\n attrs = deepcopy(self.attrs())\n new_id = dst_graph.unique_id(attrs['name']) if 'name' in attrs else dst_graph.unique_id()\n attrs['name'] = new_id\n attrs.update(new_attrs)\n dst_graph.add_node(new_id, **attrs)\n return Node(dst_graph, new_id)\n\n def insert_node_with_data_before(self, inp, new_op_class: callable, op_before_params: dict = None,\n infer_current: bool = False, additional_inputs: list = None):\n \"\"\"\n Inserts operation node with op_before_params and data node before current operation\n\n :param inp: input data node of current node\n :param new_op_class: class of operation that will be inserted before current operation node\n :param op_before_params: parameters to be added to operation that will be inserted before current operation\n\n Before calling:\n [...] -> inp -> Cur_Op -> Cur_Data -> [...]\n\n After calling:\n [...] -> inp -> New_Op_bef -> New_Data_bef -> Cur_Op -> Cur_Data -> [...]\n [op_before_params]\n \"\"\"\n graph = self.graph\n node = Node(graph, self.node)\n cls_name = new_op_class.op\n op_before_params = {} if op_before_params is None else op_before_params\n\n # operating with input\n new_op_before = new_op_class(graph, op_before_params)\n edge_attrs = deepcopy(graph.get_edge_data(inp.id, node.id)[0])\n graph.remove_edge(inp.id, node.id)\n # form a list of input nodes for a new op node combining new_out and additional_inputs\n inputs = [inp] + (additional_inputs if additional_inputs else [])\n new_inp = new_op_before.create_node_with_data(inputs, {'name': node.name + cls_name + '/Before'})\n graph.add_edge(new_inp.id, node.id, **edge_attrs)\n if infer_current:\n node.infer(node)\n\n def insert_node_with_data_after(self, out, new_op_class: callable, op_after_params: dict = None,\n additional_inputs: list = None):\n \"\"\"\n Inserts operation node with op_after_params and data node after current operation\n\n :param out: output data node of current node\n :param new_op_class: class of operation that will be inserted after current operation node\n :param op_after_params: parameters to be added to operation that will be inserted after current operation\n :param additional_inputs: other parameters for a new operation node in addition to one that is created\n at the 'out' placed; new nodes are added after 0-th input\n\n TODO Allow indexing for input parameters as well as for 'out' data node to explicitly\n specify ports that are connected to.\n\n Before calling:\n [...] -> Cur_Op -> Cur_Data -> [...]\n\n After calling:\n [...] -> Cur_Op -> Cur_Data -> New_Op_aft -> New_Data_aft(==out) -> [...]\n [op_after_params]\n \"\"\"\n # we import it here because Op imports Node and unique_id from this file\n from openvino.tools.mo.ops.op import Op\n\n graph = self.graph\n node = Node(graph, self.node)\n cls_name = new_op_class.op\n op_after_params = {} if op_after_params is None else op_after_params\n\n new_op_after = new_op_class(graph, op_after_params)\n graph.remove_edge(node.id, out.id)\n new_out = Op.create_data_node(graph, node)\n node.infer(node)\n # form a list of input nodes for a new op node combining new_out and additional_inputs\n inputs = [new_out] + (additional_inputs if additional_inputs else [])\n new_op_after.create_node_with_data(inputs, {'name': node.name + cls_name + '/After'}, data_nodes=out)\n\n def bracket_with_different_nodes_with_data(self, inp, out, new_op_class_before: callable,\n new_op_class_after: callable,\n op_before_params: dict = None, op_after_params: dict = None):\n \"\"\"\n Inserts one operation node with op_before_params and data node before current operation node and\n inserts one operation node with op_after_params and data node after current operation node\n :param inp: input data node of self.node node\n :param out: output data node of self.node node\n :param new_op_class_before: class of operation that will be inserted before current operation node\n :param new_op_class_after: class of operation that will be inserted after current operation node\n :param op_before_params: parameters to be added to operation that will be inserted before current operation\n :param op_after_params: parameters to be added to operation that will be inserted after current operation\n\n Before calling:\n [...] -> inp -> Cur_Op -> out -> [...]\n\n After calling:\n [...] -> inp -> New_Op_bef -> New_Data_bef -> Cur_Op -> Cur_Data -> New_Op_aft -> New_Data_aft(==out) -> [...]\n [op_before_params] [op_after_params]\n \"\"\"\n op_before_params = {} if op_before_params is None else op_before_params\n op_after_params = {} if op_after_params is None else op_after_params\n self.insert_node_with_data_before(inp, new_op_class_before, op_before_params)\n self.insert_node_with_data_after(out, new_op_class_after, op_after_params)\n\n def bracket_op_with_another_op(self, inp, out, new_op_class: callable,\n op_before_params: dict = None, op_after_params: dict = None):\n \"\"\"\n Covers current operation with two similar another ones of class new_op_class:\n :param inp: input data node of self.node node\n :param out: output data node of self.node node\n :param new_op_class: class of operation with which current operation will be covered\n :param op_before_params: parameters to be added to operation that will be inserted before current operation\n :param op_after_params: parameters to be added to operation that will be inserted after current operation\n\n Before calling:\n [...] -> inp -> Cur_Op -> out -> [...]\n\n After calling:\n [...] -> inp -> New_Op_bef -> New_Data_bef -> Cur_Op -> Cur_Data -> New_Op_aft -> New_Data_aft(==out) -> [...]\n [op_before_params] [op_after_params]\n \"\"\"\n self.bracket_with_different_nodes_with_data(inp=inp, out=out,\n new_op_class_before=new_op_class, new_op_class_after=new_op_class,\n op_before_params=op_before_params, op_after_params=op_after_params)\n\n def insert_node_after(self, new_node, node_out_port: int = 0):\n \"\"\"\n Insert node 'new_node' after output with index 'node_out_port' of the node 'node'. All consumers of node 'node'\n output with index 'node_out_port' will be changed to consume node 'new_node'.\n The function should be used when graph doesn't contain data nodes yet.\n :param node: node after which new node should be inserted.\n :param new_node: node to be inserted.\n :param node_out_port: the output index for the node 'node' to insert\n :return: None\n \"\"\"\n assert self.graph is new_node.graph\n assert (len([name for name in self.graph.nodes() if Node(self.graph, name).soft_get('kind') == 'data']) == 0)\n\n graph = self.graph\n old_edges = list(graph.out_edges(self.id, data=True, keys=True))\n # create new edges first and then remove all old edges. This is needed for case when 'node' has several consumers\n # getting input from 'node_out_port'.\n # save tuple (\"name of the destination edge\", \"edge key\") to be removed\n node_name_and_edge_key = []\n for _, dst_name, edge_key, edge_attrs in old_edges:\n if edge_attrs['out'] == node_out_port:\n log.debug('Create edge from \"{}\" to \"{}\"'.format(new_node.name, dst_name))\n graph.create_edge(new_node, Node(graph, dst_name), 0, edge_attrs['in'])\n node_name_and_edge_key.append((dst_name, edge_key))\n for dst_name, edge_key in node_name_and_edge_key:\n log.debug('Remove edge from \"{}\" to \"{}\"'.format(self.id, dst_name))\n graph.remove_edge(self.id, dst_name, edge_key)\n graph.create_edge(self, new_node, node_out_port, 0, {})\n\n def insert_op_on_input_port(self, in_port_idx: int, new_op_class: callable, new_op_attrs: dict,\n value: np.ndarray = None):\n \"\"\"\n Inserts new operation of new_op_class on in_port_index input port with new_op_attrs\n Connects Const operation with value to 1 input port of new node if value was passed\n\n Returns new operation node\n \"\"\"\n graph = self.graph\n name = self.soft_get('name', self.id)\n\n op_node = new_op_class(graph, new_op_attrs).create_node()\n\n assert self.has_port('in', in_port_idx), \\\n 'Node `{}` should have input port with idx `{}` but it does not'.format(name, in_port_idx)\n\n in_port_source = self.in_port(in_port_idx).get_source()\n self.in_port(in_port_idx).get_connection().set_source(op_node.out_port(0))\n op_node.in_port(0).connect(in_port_source)\n\n if value is not None:\n from openvino.tools.mo.ops.const import Const\n constant = Const(graph, {'value': value, 'name': op_node.name + '/value'}).create_node()\n op_node.in_port(1).connect(constant.out_port(0))\n\n return op_node\n\n def replace_node(self, new_node, new_node_out_port: int = None):\n \"\"\"\n Replaces node 'old_node' with a node 'new_node' preserving edge attributes.\n :param old_node: node to be replaced.\n :param new_node: node to replace with.\n :return: None\n \"\"\"\n assert self.graph is new_node.graph\n assert self.id != new_node.id, \"New node and replaceable node are the same\"\n graph = self.graph\n # save output edges and reconnect them to new node\n for _, dst_node_name, edge_attrs in graph.out_edges(self.id, data=True):\n new_edge_attrs = deepcopy(edge_attrs)\n if new_node_out_port is not None:\n assert 'out' not in edge_attrs or edge_attrs['out'] == 0, \\\n 'replace_node function can replace old node with a single output port only if new_node_out_port is ' \\\n 'specified'\n new_edge_attrs.update({'out': new_node_out_port})\n graph.add_edge(new_node.id, dst_node_name, **new_edge_attrs)\n\n # if the node for replace is output node then we propagate this attribute to a new node\n if len(self.out_nodes()) == 1 and self.out_node().has('op') and self.out_node().op == 'Result':\n graph.remove_node(self.out_node().id)\n add_opoutput(graph, new_node.id, 0, False)\n graph.remove_node(self.id)\n\n def input_ports_with(self, node):\n \"\"\"\n Returns a list of integers that specify input ports that connected to a given node.\n :param node: node in the graph that is expected to appear at input port for self node\n :return: a list of integers with port indices that are connected to self node\n \"\"\"\n return [i for i in range(len(self.in_nodes())) if self.in_node(i).id == node.id]\n\n def update_node(self):\n \"\"\"\n Update internal node attributes. Currently it just add input/output ports.\n :return: None\n \"\"\"\n in_ports_count = self.in_ports_count if self.has_valid('in_ports_count') else None\n out_ports_count = self.out_ports_count if self.has_valid('out_ports_count') else None\n\n if not self.has_valid('_in_ports'):\n Node(self.graph, self.id)['_in_ports'] = dict()\n if not self.has_valid('_out_ports'):\n Node(self.graph, self.id)['_out_ports'] = dict()\n\n if in_ports_count is not None:\n for idx in range(in_ports_count):\n if idx not in self._in_ports:\n self.add_input_port(idx=idx)\n\n if out_ports_count is not None:\n for idx in range(out_ports_count):\n if idx not in self._out_ports:\n self.add_output_port(idx=idx)\n\n def get_opset(self):\n \"\"\"\n Gets the operation set version where the operation was introduced.\n If the version is not defined then consider it an extension\n :return: the string with the opset name\n \"\"\"\n return self.soft_get('version', 'extension')\n\n\nclass Graph(nx.MultiDiGraph):\n def __init__(self, data=None, **attr):\n self.stage = None\n self.strict_mode = True\n super().__init__(data, **attr)\n\n if not hasattr(self, 'node'):\n self.node = self.nodes\n\n unique_id_count = 0\n op_names_statistic = collections.Counter()\n inputs_order = []\n outputs_order = []\n\n # SAFE API DESCRIPTION\n # all provided methods below are designed to be more safe and convenient\n # be careful while using other methods from nx.MultiDiGraph\n\n def add_node(self, node_for_adding, **attrs):\n # TODO: check required attrs for node\n super().add_node(node_for_adding, **attrs)\n node = Node(self, node_for_adding)\n node.update_node()\n\n def add_edge(self, u_for_edge, v_for_edge, key=None, **attr):\n\n # TODO: turn on strict mode\n if self.strict_mode:\n unode = Node(self, u_for_edge)\n vnode = Node(self, v_for_edge)\n\n # Check that we connect Op->Op in front phase, and data->Op or Op->data in middle(back) phase\n # Also check that all necessary ports are exists\n message = \"Attempt to connect {} to {}.\".format(u_for_edge, v_for_edge)\n if self.stage == 'front':\n assert unode.kind == 'op' and vnode.kind == 'op', \"{} Wrong add_adge usage! You can connect only two \" \\\n \"operations in front phase\".format(message)\n assert 'in' in attr and 'out' in attr, \"Missing necessary attribute in or out when adding edge \" \\\n \"between {} and {}\".format(u_for_edge, v_for_edge)\n is_control_flow = 'control_flow_edge' in attr and attr['control_flow_edge'] is True\n in_port = 'control_flow_{}'.format(attr['in']) if is_control_flow else attr['in']\n out_port = 'control_flow_{}'.format(attr['out']) if is_control_flow else attr['out']\n assert unode.has_port('out', out_port, control_flow=is_control_flow), \\\n \"{} Missing out port ({}) in {} node\".format(message, out_port, unode.soft_get('name', unode.id))\n assert vnode.has_port('in', in_port, control_flow=is_control_flow), \\\n \"{} Missing in port ({}) in {} node\".format(message, in_port, vnode.soft_get('name', vnode.id))\n elif self.stage in ['middle', 'back']:\n assert (unode.kind == 'data' and vnode.kind == 'op') or (unode.kind == 'op' and vnode.kind == 'data')\n if unode.kind == 'data' and vnode.kind == 'op':\n assert 'in' in attr, \"Attribute in is missing when adding edge to {}\".format(v_for_edge)\n assert vnode.has_port('in', attr['in']), \"{} Node {} has no in port ({})\" \\\n \"\".format(message, vnode.name, attr['in'])\n if unode.kind == 'op' and vnode.kind == 'data':\n assert 'out' in attr, \"Attribute out is missing when adding edge from {}\".format(u_for_edge)\n assert unode.has_port('out', attr['out']), \"{} Node {} has no out port ({})\" \\\n \"\".format(message, unode.name, attr['out'])\n\n return super().add_edge(u_for_edge, v_for_edge, key=key, **attr)\n\n def add_edges_from(self, ebunch_to_add, **attr):\n for e in ebunch_to_add:\n ne = len(e)\n if ne == 4:\n u, v, key, dd = e\n elif ne == 3:\n u, v, dd = e\n key = None\n elif ne == 2:\n u, v = e\n dd = {}\n key = None\n else:\n raise Error(\"Edge tuple %s must be a 2-tuple, 3-tuple or 4-tuple.\" % (e,))\n ddd = attr.copy()\n ddd.update(dd)\n self.add_edge(u, v, key=key, **ddd)\n\n def remove_edge(self, u, v, key=None):\n return super().remove_edge(u, v, key=key)\n\n def erase_node(self, node: Node):\n \"\"\"\n Erases node from the graph and reconnect edges from input node(s) to output node(s)\n Produces assertion error if the node being removed has multiple inputs or outputs.\n The function can be used in the front phase only (when there are no data nodes in the graph).\n :param node: Node to erase\n \"\"\"\n node_id = node.id\n\n inputs = list(self.in_edges(node_id, data=True))\n outputs = list(self.out_edges(node_id, data=True))\n\n assert node.kind == 'op' and (len(node.out_nodes()) == 0 or list(node.out_nodes().values())[0].kind != 'data'), \\\n \"The function must be used before the partial infer when graph doesn't contain data nodes.\"\n assert len(node.out_nodes()) <= 1, \"The node {} must produce just one output tensor\".format(\n node.soft_get('name'))\n assert len(inputs) <= 1, \"The node {} must have just one input\".format(node.soft_get('name'))\n\n if len(outputs) == 0 and len(inputs) != 0:\n from openvino.tools.mo.front.extractor import add_output_ops\n input_ids = {input_node_id: {'port': {'out': [attrs['out']]}} for input_node_id, _, attrs in inputs}\n if node.has('op') and node.op == 'Result':\n add_output_ops(self, input_ids)\n\n if len(outputs) == 0 or len(inputs) == 0:\n self.remove_node(node_id)\n return\n\n input_node_id = inputs[0][0]\n for src, dst, attrs in outputs:\n self.remove_edge(src, dst)\n # update the 'out' attribute of the edge from the node being removed\n attrs['out'] = inputs[0][2]['out']\n self.add_edge(input_node_id, dst, **attrs)\n self.remove_node(node_id)\n\n def get_edge_data(self, u, v, key=None, default=None):\n return super().get_edge_data(u, v, key=key, default=default)\n\n def get_inputs_with_ports(self, match, pattern_edges, input_names_in_pattern):\n \"\"\"\n Front replacements of multi-input nodes should specify output port to add_node-like functions\n This function is a helper to get such information out of matched nodes\n :param graph: graph to operate on\n :param match: dictionary returned by matching function\n :param pattern_edges: edges that are specified in pattern\n :param input_names_in_pattern: names of matched nodes as they were specified in pattern that should be in\n resulting list\n :return: list of tuples of node and output port\n \"\"\"\n inputs = []\n for name in input_names_in_pattern:\n assert name in match, \"node named {} not in match {}\".format(name, match)\n src = match[name]\n dst = []\n for edge in pattern_edges:\n if edge[0] == name:\n assert edge[1] in match, \"name from pattern_edges {} not in match {}\".format(edge[1], match)\n dst.append(match[edge[1]])\n if len(dst) != 1:\n raise Error('Multiple output ports detected for node {} as {} in pattern'.format(match[name].id, name))\n dst = dst[0]\n out_port = self.get_edge_data(src.id, dst.id)[0]['out']\n inputs.append((src, out_port))\n return inputs\n\n def get_node_id_by_name(self, name: str):\n nodes = self.get_nodes_with_attributes(name=name)\n if len(nodes) == 0:\n raise Error('No node with name {}. ' + refer_to_faq_msg(51), name)\n elif len(nodes) > 1:\n raise Error('Multiple nodes with name {}'.format(name))\n else:\n return nodes[0]\n\n def get_op_nodes(self, **attrs):\n nodes = self.get_nodes_with_attributes(**dict(kind='op', **attrs))\n return [Node(self, node) for node in nodes]\n\n def get_data_nodes(self, has_value=None):\n \"\"\"\n Returns list of data nodes.\n If has_value = True, returns data nodes with value\n If has_value = False, returns data nodes without value\n \"\"\"\n data_nodes = [Node(self, node) for node in self.nodes() if Node(self, node).soft_get('kind') == 'data']\n return [node for node in data_nodes if has_value is None or node.has_valid('value') == has_value]\n\n def get_nodes_with_attributes(self, **attrs: dict):\n node_attrs = self.nodes(data=True)\n return [n for n, d in node_attrs if all(a in d.items() for a in attrs.items())]\n\n def unique_id(self, prefix: str = \"\"):\n \"\"\"\n Generates a unique node id for a new node in a given graph.\n The optional string prefix can be specified.\n \"\"\"\n # TODO thread safety?\n self.unique_id_count = max(self.unique_id_count, self.number_of_nodes()) + 1\n if prefix and not self.has_node(prefix):\n return str(prefix)\n while self.has_node(prefix + str(self.unique_id_count)):\n self.unique_id_count += 1\n return prefix + str(self.unique_id_count)\n\n def check_empty_graph(self, description: str):\n if len(self.nodes()) <= 1:\n raise Error(\n \"Graph contains {} node after executing {}. It considered as error because resulting IR will be \"\n \"empty which is not usual\".format(len(self.nodes()), description))\n\n def check_shapes_consistency(self):\n data_nodes = self.get_data_nodes()\n data_nodes_with_wrong_shapes = []\n for data_node in data_nodes:\n if not data_node.has('shape'):\n data_nodes_with_wrong_shapes.append((data_node.name, \"no shape attribute\"))\n continue\n if data_node.shape is not None and not isinstance(data_node.shape, np.ndarray):\n data_nodes_with_wrong_shapes.append((data_node.name, type(data_node.shape)))\n if len(data_nodes_with_wrong_shapes) > 0:\n raise Error(\"Graph contains data nodes ({}) with inconsistent shapes: {}\".format(\n len(data_nodes_with_wrong_shapes),\n data_nodes_with_wrong_shapes\n ))\n\n def check_nodes_ports_are_consecutive(self):\n # Check that all operation nodes has consecutive ports indexes\n op_nodes = self.get_op_nodes()\n for node in op_nodes:\n for idx in range(len(node.in_ports())):\n if idx not in node.in_ports():\n raise Error(\"Node {} has not consecutive in ports indexes: {}\".format(node.name,\n list(node.in_ports().keys())))\n for idx in range(len(node.out_ports())):\n if idx not in node.out_ports():\n raise Error(\"Node {} has not consecutive out ports indexes: {}\".format(node.name,\n list(\n node.out_ports().keys())))\n\n def dump_graph_for_graphviz(self, node_attrs: list = ['kind', 'op', 'shape', 'correct_data_layout', 'nchw_layout',\n 'internal_layer_id'],\n edge_attrs: list = ['in', 'out'], nodes_to_dump: list = None,\n save_to_svg=False, highlight_nodes: list = None):\n\n from openvino.tools.mo.ops.tensor_iterator import _get_internal_output_node_id, _get_internal_input_node_id\n\n fill_color = {'op': 'lightblue', 'data': 'whitesmoke', 'highlight': 'firebrick'}\n fill_color_by_type = {'Const': 'lightpink', 'Parameter': 'yellowgreen', 'TensorIterator': 'lemonchiffon'}\n style = {'op': 'filled,bold', 'data': 'filled,rounded'}\n\n subgraphs = {}\n if highlight_nodes is None:\n highlight_nodes = []\n\n def _subgraph_label(node_id, node_attrs: dict, attrs_to_print: list):\n subgraphs[node_id] = \"cluster_{}\".format(node_id)\n label = 'subgraph \"cluster_{}\" '.format(node_id) + '{\\n'\n label += 'label = \"{}\"; \\n'.format(node_id)\n label += 'color={}; \\nstyle=\"filled,rounded\";\\n'.format(fill_color_by_type[node_attrs['op']])\n\n subgraph_name = node_attrs['sub_graphs']\n assert len(subgraph_name) == 1\n body = node_attrs[subgraph_name[0]].dump_graph_for_graphviz()\n body = body.split('\\n')[2:-1]\n label += '\\n'.join(body)\n label += '\\n}\\n'\n return label\n\n def _node_label(node_id, node_attrs: dict, attrs_to_print: list):\n label = str(node_id) + '\\\\n' + '\\\\n'.join([str(key) + '=' + str(node_attrs.get(key, 'None'))\n for key in attrs_to_print if key in node_attrs])\n if node_attrs.get('type', '') == 'Const':\n if 'value' not in attrs_to_print and 'value' in node_attrs:\n if node_attrs['value'] is not None:\n label += '\\\\nvalue=\\\\\"' + \\\n ','.join([str(val) for val in node_attrs['value'].flatten()])[:40] + '\\\\\"'\n else:\n label += '\\\\nvalue=None'\n return label\n\n def _dump_nodes_attrs():\n string = ''\n for node_id in nodes_to_dump:\n attrs = self.node[node_id]\n color = fill_color_by_type.get(attrs.get('type', ''), fill_color[attrs['kind']])\n\n if node_id in highlight_nodes or 'highlight' in node_attrs and node_attrs['highlight']:\n color = fill_color['highlight']\n\n if attrs.get('op') == 'TensorIterator':\n string += _subgraph_label(node_id, attrs, node_attrs)\n else:\n string += '\"{}\" [fillcolor={} style=\"{}\" shape=box label=\"{}\"];\\n'.format(\n node_id, color, style[attrs['kind']], _node_label(node_id, attrs, node_attrs))\n return string\n\n def _dump_edges_attrs():\n string = ''\n for src_node_id, dst_node_id, attrs in self.edges(data=True):\n if src_node_id not in nodes_to_dump or dst_node_id not in nodes_to_dump:\n continue\n\n if src_node_id in subgraphs:\n edge_label = subgraphs[src_node_id]\n edge_label_name = 'ltail'\n src_node_id = _get_internal_output_node_id(self, src_node_id, attrs['external_port_id'])\n elif dst_node_id in subgraphs:\n edge_label = subgraphs[dst_node_id]\n edge_label_name = 'lhead'\n dst_node_id = _get_internal_input_node_id(self, dst_node_id, attrs['external_port_id'])\n else:\n edge_label = ' '.join(\n [str(key) + '=' + str(attrs.get(key, 'None')) for key in edge_attrs if key in attrs])\n edge_label_name = 'label'\n\n string += '\"{}\" -> \"{}\" [{} = \"{}\"];\\n'.format(src_node_id, dst_node_id, edge_label_name, edge_label)\n return string\n\n log.debug(\"---- GRAPHVIZ OUTPUT STARTS ----\")\n\n if nodes_to_dump is None:\n nodes_to_dump = self.nodes()\n\n string = '\\ndigraph {\\n'\n\n string += _dump_nodes_attrs()\n string += _dump_edges_attrs()\n\n string += '}'\n log.debug(\"---- GRAPHVIZ OUTPUT ENDS ----\")\n\n if save_to_svg:\n try:\n import graphviz\n import os\n file_name = \"{}_{}.txt\".format(self.name.replace('/', '_'), 0)\n id = 1\n while os.path.exists(file_name):\n file_name = \"{}_{}.txt\".format(self.name.replace('/', '_'), id)\n id += 1\n with open(file_name, \"w\") as f:\n f.write(string)\n graphviz.render('dot', 'svg', file_name)\n print('Graph was saved to {}.{}'.format(file_name, 'svg'))\n except ImportError:\n raise ImportError('Can\\'t import graphviz')\n except Exception as e:\n raise Error('Can\\'t save graph to svg') from e\n\n return string\n\n def print_graph_stat(self):\n log.debug('Number of nodes in graph: {}'.format(self.number_of_nodes()))\n log.debug('Number of edges in graph: {}'.format(len(list(self.edges()))))\n ops = collections.defaultdict(int)\n for _node in self.nodes():\n node = Node(self, _node)\n kind = node.kind if node.has('kind') else '<UNDEFINED>'\n if node.has('op'):\n ops['op/' + node.op] += 1\n else:\n ops[kind] += 1\n if node.has('shape') and np.any(node.shape == 0):\n log.error(\"Found bad shape: '{}' for node '{}'\".format(node.shape, node.node))\n for k, v in ops.items():\n log.debug(' {} : {}'.format(k, v))\n\n def create_sub_graph_copy(self, nodes_to_extract: list):\n \"\"\"\n Create new graph which is a sub-graph of the 'graph' that contains just nodes from 'nodes_to_extract' list. The\n returned sub-graph is a deep copy of the provided graph nodes.\n :param graph: graph to create a sub-graph from.\n :param nodes_to_extract: list of node names to extract.\n :return: new graph.\n \"\"\"\n return self.subgraph(nodes_to_extract).copy()\n\n def create_edge(self, src_node: Node, dst_node: Node, out_port: int = 0, in_port: int = 0, edge_attrs: dict = None):\n \"\"\"\n Creates edge from node 'src_node' from output with index 'out_port' to node 'dst_node' with input index 'in_port'.\n :param src_node: node to create edge from.\n :param dst_node: node to create edge to.\n :param out_port: the index of output tensor of the 'src_node'.\n :param in_port: the input index of the node 'dst_node'.\n :param edge_attrs: dictionary with edge attrs.\n :return: None\n \"\"\"\n # edges must belong to the same graph\n assert src_node.graph is dst_node.graph\n graph = src_node.graph\n\n if edge_attrs is None:\n edge_attrs = dict()\n else:\n edge_attrs = edge_attrs.copy()\n edge_attrs.update(\n {'in': in_port, 'out': out_port, 'in_attrs': ['in', 'permutation'], 'out_attrs': ['out', 'permutation'],\n 'data_attrs': ['fw_tensor_debug_info']})\n\n # TODO: in case if in_port do not exists, we should raise an Exception here\n graph.add_edges_from([(src_node.id, dst_node.id, edge_attrs)])\n\n def dfs(self, node_name: str, visited: set):\n \"\"\"\n Implementation of the depth-first search algorithm starting from the specific node.\n :param graph: networkx graph to operate on.\n :param node_name: node name to start search from.\n :param visited: set of already visited nodes.\n :return: list of nodes in the DFS-visit order.\n \"\"\"\n order = []\n stack = [node_name]\n while len(stack) != 0:\n node_name = stack[0]\n stack.pop(0)\n visited.add(node_name)\n has_child = False\n for _, out_node_name in self.out_edges(node_name):\n if out_node_name not in visited:\n stack.insert(0, node_name)\n stack.insert(0, out_node_name)\n has_child = True\n break\n if not has_child:\n order.append(node_name)\n return order\n\n def pseudo_topological_sort(self, reverse: bool = False):\n \"\"\"\n The function performs topological sort but doesn't check for cycle existence. So it may produce wrong nodes order\n for some applications.\n :param graph: graph to pseudo-topologically sort.\n :param reverse: flag indicating whether need to reverse nodes order.\n :return: nodes in the topological sort if cycle doesn't exist and in pseudo-topological sort if not.\n \"\"\"\n nodes_without_inputs = list()\n for node_name in self.nodes():\n if len(self.in_edges(node_name)) == 0:\n nodes_without_inputs.append(node_name)\n order = list()\n visited = set()\n for node_name in nodes_without_inputs:\n if node_name not in visited:\n order.extend(self.dfs(node_name, visited))\n\n order = [Node(self, node) for node in order]\n\n if reverse:\n return order\n else:\n return list(reversed(order))\n\n def clean_up(self, undead_node_types: list = None):\n if undead_node_types is None:\n undead_node_types = []\n\n if not getattr(self.graph['cmd_params'], 'static_shape', False):\n undead_node_types.extend(['ShapeOf', 'Shape', 'slice_like'])\n\n mark_output_reachable_nodes(self)\n shape_inference(self)\n mark_undead_nodes(self, undead_node_types)\n mark_const_producer_nodes(self)\n eliminate_dead_nodes(self)\n # Add Const op for constant data nodes\n add_constant_operations(self)\n\n def get_tensor_names_set(self, use_ports = False):\n \"\"\"\n Get set of tensor names of the graph.\n \"\"\"\n tensor_names_set = set()\n for node in self.get_op_nodes():\n if self.stage is None:\n for out_edge_idx in node.out_edges():\n out_edge = node.out_edge(out_edge_idx)\n if \"fw_tensor_debug_info\" in out_edge:\n for _, tensor_name in out_edge[\"fw_tensor_debug_info\"]:\n tensor_names_set.add(tensor_name)\n else:\n for _, port in node.out_ports().items():\n tensor_names = port.get_tensor_names()\n tensor_names_set = tensor_names_set.union(set(tensor_names))\n return tensor_names_set\n\n def topological_sort(self, reverse: bool = False):\n sorted_node_ids = nx.topological_sort(self)\n\n sorted_nodes = [Node(self, node_id) for node_id in sorted_node_ids]\n\n if not reverse:\n return sorted_nodes\n else:\n return list(reversed(sorted_nodes))\n\n def set_node_attributes(self, name: str, values):\n return nx.set_node_attributes(self, values=values, name=name)\n\n\ndef fill_graph_with_nodes(graph, src_nodes, get_id: callable, get_attrs: callable):\n \"\"\"\n Go over all nodes in src_nodes that should be enumerable and create new NX nodes\n using get_id and get_attrs functions to create node id and node attributes correspondingly.\n \"\"\"\n for node in src_nodes:\n graph.add_node(get_id(node), **get_attrs(node))\n\n\ndef dict_includes_compare_attrs(attr, attr_probe):\n if callable(attr_probe) and not isinstance(attr_probe, type):\n return attr_probe(attr)\n else:\n res = (attr == attr_probe)\n # check if the result of comparison is a numpy scalar value which occur when attr is python scalar and\n # attr_probe is a numpy scalar\n if hasattr(res, 'ndim') and res.ndim == 0:\n return res.item()\n return res if isinstance(res, bool) else all(res)\n\n\ndef dict_includes(big: dict, sub_dict: dict, skip_attr_names=[]):\n \"\"\" Searches attributes from sub_dict in big and ensures that all values match.\n\n Entries in sub_dict can be of two types: callable or not callable. If callable is specified\n it is treated as probing function for attribute value from big dictionary by callable(attr) expression.\n If it is not callable, the values are compared with == operator.\n \"\"\"\n return all(\n dict_includes_compare_attrs(big.get(attr, None), sub_dict[attr])\n for attr in sub_dict.keys() if attr not in skip_attr_names\n )\n\n\ndef add_opoutput(graph: Graph, node_name: str, port: int, cut: bool = True, keep_output_port: bool = False,\n user_defined_name=None):\n \"\"\"\n Creates and connects Result node to node_name port. Cuts existing port if requested.\n :param graph: graph to operate with\n :param node_name: name of existing node in the graph that we want to add Result to\n :param port: output port of node to connect Result to\n :param cut: determines way of operating with edge specified by node_name and port\n :param keep_output_port: special attribute determines if this operation is saved in IR or not\n :param user_defined_name: User defined operation name, which should be added to tensor names list\n \"\"\"\n # we import it here because Op imports add_attrs_props and update_ie_fields from this file\n from openvino.tools.mo.ops.result import Result\n node = Node(graph, node_name)\n if cut and len(node.out_edges()) != 0:\n opoutput_node = Result(graph).create_node_on_port(node, port, {'name': node_name + '/sink_port_' + str(port),\n 'keep_output_port': keep_output_port})\n else:\n opoutput_node = Result(graph).create_node([(node, port)], {'name': node_name + '/sink_port_' + str(port),\n 'keep_output_port': keep_output_port})\n opoutput_node.in_edge()['data_attrs'] = ['fw_tensor_debug_info']\n\n if user_defined_name is not None and (graph.stage == 'front' or graph.stage is None):\n # Following code adds user_defined_name to tensor names list\n # Not applicable for middle stage\n prev_op_tensor_names = set()\n in_edge_attrs = opoutput_node.in_edge()\n if 'fw_tensor_debug_info' in in_edge_attrs:\n for _, tensor_name in opoutput_node.in_edge()['fw_tensor_debug_info']:\n prev_op_tensor_names.add(tensor_name)\n if user_defined_name not in prev_op_tensor_names:\n # TODO: This can be optimized. Tensor names can be stored as set, which is initialized after model loading.\n graph_tensor_names = graph.get_tensor_names_set()\n if user_defined_name in graph_tensor_names:\n log.warning('Could not add user defined output name {} to tensor names list of {} node as '\n 'graph contains tensor name with same name.'.format(user_defined_name,\n opoutput_node.soft_get('name')))\n else:\n if 'fw_tensor_debug_info' not in in_edge_attrs:\n in_edge_attrs['fw_tensor_debug_info'] = []\n in_edge_attrs['fw_tensor_debug_info'].append([user_defined_name, user_defined_name])\n\n log.debug('Sink: {} for node {}'.format(opoutput_node.id, node_name))\n log.debug(str(graph.node[opoutput_node.id]))\n log.debug(\"Add edge from {} to {}\".format(node_name, opoutput_node.id))\n return opoutput_node.id\n\n\n# TODO implement merging for keys with dictionary values?\ndef merge_edge_props(attrs: dict, additional_attrs: dict):\n \"\"\"\n Update edge attributes without changing 'in' and 'out' keys.\n It is necessary to copy edge attributes during merging of nodes when\n result of one subgraph call is passed as input to another subgraph call\n \"\"\"\n result = attrs\n for (key, value) in additional_attrs.items():\n if key not in ['in', 'out']:\n if type(additional_attrs[key]) is list:\n if key not in result:\n result[key] = []\n result[key].extend(additional_attrs[key])\n result[key] = list(set(result[key])) # silly solution to find unique elements\n else:\n result[key] = value\n return result\n\n\ndef rename_node(node: Node, name):\n if not node.graph.get_nodes_with_attributes(name=name):\n node.name = name\n else:\n assert 'Node with name {} already exists'.format(name)\n\n\ndef rename_nodes(nodes: List[tuple]):\n for node, name in nodes:\n rename_node(node, name)\n\n\ndef get_edge_attribute_between_nodes(node1: Node, node2: Node, attr_name: str):\n \"\"\"\n Gets edge attribute value between two nodes.\n This method is introduced for implementation of manual replacing of nodes attributes\n with tensor debug information. It is needed after removing of fake outputs.\n Also there are cases when graph transformations lead to mismatch of tensor name\n and input node, so manual attribute change is needed.\n This method should only be used during the front phase.\n And it is applicable only for cases when there is just one edge between two given nodes.\n \"\"\"\n for edge_idx in node1.out_edges():\n edge = node1.out_edge(edge_idx)\n out_port = edge['out']\n out_node = node1.out_node(out_port)\n if out_node.id == node2.id:\n if attr_name in edge:\n return edge[attr_name]\n return None\n\n\ndef set_edge_attribute_between_nodes(node1: Node, node2: Node, attr_name: str, new_value):\n \"\"\"\n Sets edge attribute value between two nodes.\n This method is introduced for implementation of manual replacing of nodes attributes\n with tensor debug information. It is needed after removing of fake outputs.\n Also there are cases when graph transformations lead to mismatch of tensor name\n and input node, so manual attribute change is needed.\n This method should only be used during the front phase.\n And it is applicable only for cases when there is just one edge between two given nodes.\n \"\"\"\n for edge_idx in node1.out_edges():\n edge = node1.out_edge(edge_idx)\n out_port = edge['out']\n out_node = node1.out_node(out_port)\n if out_node.id == node2.id:\n edge[attr_name] = new_value\n\n# All functions below are deprecated and will be removed in next release\n# Please, use methods from Graph/Node classes instead\n\n\n@deprecated_api(Graph)\ndef get_node_id_by_name(graph: Graph, name: str):\n return graph.get_node_id_by_name(name=name)\n\n\n@deprecated_api(Graph)\ndef print_graph_stat(graph: Graph):\n return graph.print_graph_stat()\n\n\n@deprecated_api(Graph)\ndef get_inputs_with_ports(graph: Graph, match, pattern_edges, input_names_in_pattern):\n \"\"\"\n Front replacements of multi-input nodes should specify output port to add_node-like functions\n This function is a helper to get such information out of matched nodes\n :param graph: graph to operate on\n :param match: dictionary returned by matching function\n :param pattern_edges: edges that are specified in pattern\n :param input_names_in_pattern: names of matched nodes as they were specified in pattern that should be in\n resulting list\n :return: list of tuples of node and output port\n \"\"\"\n return graph.get_inputs_with_ports(match=match,\n pattern_edges=pattern_edges,\n input_names_in_pattern=input_names_in_pattern)\n\n\n@deprecated_api(Graph)\ndef dump_graph_for_graphviz(graph: Graph, node_attrs: list = ['kind', 'op', 'shape'],\n edge_attrs: list = ['in', 'out'],\n nodes_to_dump: list = None, save_to_svg=False):\n return graph.dump_graph_for_graphviz(node_attrs=node_attrs,\n edge_attrs=edge_attrs,\n nodes_to_dump=nodes_to_dump,\n save_to_svg=save_to_svg)\n\n\n@deprecated_api(Graph)\ndef create_sub_graph_copy(graph: Graph, nodes_to_extract: list):\n \"\"\"\n Create new graph which is a sub-graph of the 'graph' that contains just nodes from 'nodes_to_extract' list. The\n returned sub-graph is a deep copy of the provided graph nodes.\n :param graph: graph to create a sub-graph from.\n :param nodes_to_extract: list of node names to extract.\n :return: new graph.\n \"\"\"\n return graph.create_sub_graph_copy(nodes_to_extract=nodes_to_extract)\n\n\n@deprecated_api(Graph)\ndef get_graph_ops(graph: Graph):\n return graph.get_op_nodes()\n\n\n@deprecated_api(Graph)\ndef check_empty_graph(graph: Graph, description: str):\n return graph.check_empty_graph(description=description)\n\n\n@deprecated_api(Graph)\ndef create_edge(src_node: Node, dst_node: Node, out_port: int = 0, in_port: int = 0, edge_attrs: dict = None):\n \"\"\"\n Creates edge from node 'src_node' from output with index 'out_port' to node 'dst_node' with input index 'in_port'.\n :param src_node: node to create edge from.\n :param dst_node: node to create edge to.\n :param out_port: the index of output tensor of the 'src_node'.\n :param in_port: the input index of the node 'dst_node'.\n :param edge_attrs: dictionary with edge attrs.\n :return: None\n \"\"\"\n assert src_node.graph is dst_node.graph\n graph = src_node.graph\n return graph.create_edge(src_node=src_node, dst_node=dst_node, out_port=out_port, in_port=in_port,\n edge_attrs=edge_attrs)\n\n\n@deprecated_api(Graph)\ndef erase_node(node: Node):\n \"\"\"\n Erases node from the graph and reconnect edges from input node(s) to output node(s)\n Produces assertion error if the node being removed has multiple inputs or outputs.\n The function can be used in the front phase only (when there are no data nodes in the graph).\n :param node: Node to erase\n \"\"\"\n graph = node.graph\n return graph.erase_node(node)\n\n\n@deprecated_api(Node)\ndef get_sorted_inputs(node: Node, control_flow: bool = False):\n return node.get_sorted_inputs(control_flow=control_flow)\n\n\n@deprecated_api(Node)\ndef get_sorted_outputs(node: Node, control_flow: bool = False):\n return node.get_sorted_outputs(control_flow=control_flow)\n\n\n@deprecated_api(Node)\ndef insert_node_after(node: Node, new_node: Node, node_out_port: int = 0):\n \"\"\"\n Insert node 'new_node' after output with index 'node_out_port' of the node 'node'. All consumers of node 'node'\n output with index 'node_out_port' will be changed to consume node 'new_node'.\n The function should be used when graph doesn't contain data nodes yet.\n :param node: node after which new node should be inserted.\n :param new_node: node to be inserted.\n :param node_out_port: the output index for the node 'node' to insert\n :return: None\n \"\"\"\n return node.insert_node_after(new_node=new_node, node_out_port=node_out_port)\n\n\n@deprecated_api(Node)\ndef replace_node(old_node: Node, new_node: Node, new_node_out_port: int = None):\n \"\"\"\n Replaces node 'old_node' with a node 'new_node' preserving edge attributes.\n :param old_node: node to be replaced.\n :param new_node: node to replace with.\n :return: None\n \"\"\"\n return old_node.replace_node(new_node=new_node, new_node_out_port=new_node_out_port)\n\n\n@deprecated_api(Node)\ndef copy_node(src_node: Node, new_attrs: dict = None, dst_graph: nx.MultiDiGraph = None):\n \"\"\" Copies node with all attributes (optionally updated) within the same graph or to different graph.\"\"\"\n return src_node.copy_node(new_attrs=new_attrs, dst_graph=dst_graph)\n\n\n@deprecated_api(Node)\ndef get_inputs(graph: Graph, node: str, edge_attr: dict = None, control_flow: bool = False):\n return Node(graph, node).get_inputs(edge_attr=edge_attr, control_flow=control_flow)\n\n\n@deprecated_api(Node)\ndef get_outputs(graph: Graph, node: str, edge_attr: dict = None, control_flow: bool = False):\n return Node(graph, node).get_outputs(edge_attr=edge_attr, control_flow=control_flow)\n"
] |
[
[
"numpy.any"
]
] |
victorkich/RiVAE
|
[
"a153f88a296a9fa36805e21eb33dca07ecf91706"
] |
[
"rigan.py"
] |
[
"from model import Generator, Discriminator\nfrom dataset_creator import RiVAEDataset\nfrom torchvision.utils import save_image\nfrom torch.utils.data import DataLoader\nfrom torch.backends import cudnn\nfrom torch.optim import Adam\nfrom tqdm import tqdm\nfrom torch import nn\nfrom os import path\nimport torch\n\n# get image path\npath_ = path.abspath(path.dirname(__file__))\nimg_path = f\"{path_}/data/images\"\n\n# parameters\nimg_shape = (40, 3, 64, 64) # (batch_size, c, w, h)\nepochs = 100\nlr = 0.0002\ncheckpoint_interval = 10\n\n# use gpu if available\ncuda = torch.cuda.is_available()\ndevice = torch.device('cuda' if cuda else 'cpu')\nprint(\"PyTorch CUDA:\", cuda)\ncudnn.benchmark = cuda\n\n\n# custom weights initialization called on netG and netD\ndef weights_init(m):\n class_name = m.__class__.__name__\n if class_name.find('Conv') != -1:\n nn.init.normal_(m.weight, 0., 0.02)\n elif class_name.find('BatchNorm') != -1:\n nn.init.normal_(m.weight, 1., 0.02)\n nn.init.zeros_(m.bias)\n\n\n# create a model from RiGAN auto encoder class\nnetG = Generator(img_shape[2]).to(device)\nnetG.apply(weights_init)\nnetD = Discriminator(img_shape[2]).to(device)\nnetD.apply(weights_init)\n\n# setup optimizer\noptimizerD = Adam(netD.parameters(), lr=lr, betas=(0.5, 0.999))\noptimizerG = Adam(netG.parameters(), lr=lr, betas=(0.5, 0.999))\n\n# binary cross entropy loss\ncriterion = nn.BCELoss()\n\n# define real and fake labels\nreal_label = 1\nfake_label = 0\n\n# loading the dataset using data loader\ndataset = RiVAEDataset(img_dir=img_path, img_shape=img_shape)\ndataloader = DataLoader(dataset, batch_size=img_shape[0], shuffle=True, num_workers=2)\n\nfor epoch in range(1, epochs+1):\n print(f\"Epoch {epoch} of {epochs}\")\n loss_generator = 0.\n loss_discriminator = 0.\n for i, data in tqdm(enumerate(dataloader), total=len(dataloader)):\n # update discriminator network: maximize log(D(x)) + log(1 - D(G(z)))\n netD.zero_grad()\n data = data.to(device) # train with real\n data = data.reshape(img_shape)\n label = torch.full((img_shape[0],), real_label, dtype=data.dtype, device=device)\n output = netD(data)\n errD_real = criterion(output, label)\n errD_real.backward()\n\n # train with fake\n noise = torch.randn(img_shape[0], 100, 1, 1, device=device)\n fake = netG(noise)\n label.fill_(fake_label)\n output = netD(fake.detach())\n errD_fake = criterion(output, label)\n errD_fake.backward()\n errD = errD_real + errD_fake\n loss_discriminator += errD\n optimizerD.step()\n\n # update generator network: maximize log(D(G(z)))\n netG.zero_grad()\n label.fill_(real_label) # fake labels are real for generator cost\n output = netD(fake)\n errG = criterion(output, label)\n loss_generator += errG\n errG.backward()\n optimizerG.step()\n\n # save the last batch input and output of every epoch\n if i == len(dataloader) - 1:\n both = torch.cat((fake.detach(), data))\n save_image(both.cpu(), f\"{path_}/data/outputs/output_{epoch}.png\")\n\n print(f\"Generator Loss: {loss_generator:.4f}\\nDiscriminator Loss: {loss_discriminator:.4f}\")\n if not epoch % checkpoint_interval:\n torch.save(netG.state_dict(), f\"{path_}/models/generator_{epoch}.pth\")\n torch.save(netD.state_dict(), f\"{path_}/models/discriminator_{epoch}.pth\")\n"
] |
[
[
"torch.full",
"torch.randn",
"torch.utils.data.DataLoader",
"torch.nn.BCELoss",
"torch.nn.init.normal_",
"torch.cuda.is_available",
"torch.nn.init.zeros_",
"torch.device"
]
] |
anhhuyalex/ax
|
[
"f802121b4fcc19b9ed36bf2392ebb3821ff819a9"
] |
[
"ax/service/ax_client.py"
] |
[
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\nimport json\nimport logging\nimport warnings\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nimport ax.service.utils.best_point as best_point_utils\nimport numpy as np\nimport pandas as pd\nfrom ax.core.arm import Arm\nfrom ax.core.data import Data\nfrom ax.core.experiment import Experiment\nfrom ax.core.generator_run import GeneratorRun\nfrom ax.core.trial import Trial\nfrom ax.core.types import (\n TEvaluationOutcome,\n TModelPredictArm,\n TParameterization,\n TParamValue,\n)\nfrom ax.modelbridge.generation_strategy import GenerationStrategy\nfrom ax.modelbridge.modelbridge_utils import get_pending_observation_features\nfrom ax.plot.base import AxPlotConfig\nfrom ax.plot.contour import plot_contour\nfrom ax.plot.exp_utils import exp_to_df\nfrom ax.plot.helper import _format_dict, _get_in_sample_arms\nfrom ax.plot.trace import optimization_trace_single_method\nfrom ax.service.utils.dispatch import choose_generation_strategy\nfrom ax.service.utils.instantiation import (\n data_from_evaluations,\n make_experiment,\n raw_data_to_evaluation,\n)\nfrom ax.service.utils.storage import (\n load_experiment_and_generation_strategy,\n save_experiment_and_generation_strategy,\n)\nfrom ax.storage.json_store.decoder import (\n generation_strategy_from_json,\n object_from_json,\n)\nfrom ax.storage.json_store.encoder import object_to_json\nfrom ax.utils.common.docutils import copy_doc\nfrom ax.utils.common.logger import _round_floats_for_logging, get_logger\nfrom ax.utils.common.typeutils import (\n checked_cast,\n checked_cast_dict,\n checked_cast_optional,\n not_none,\n)\nfrom botorch.utils.sampling import manual_seed\n\n\nlogger = get_logger(__name__)\n\n\ntry: # We don't require SQLAlchemy by default.\n from ax.storage.sqa_store.structs import DBSettings\nexcept ModuleNotFoundError: # pragma: no cover\n DBSettings = None\n\n\nclass AxClient:\n \"\"\"\n Convenience handler for management of experimentation cycle through a\n service-like API. External system manages scheduling of the cycle and makes\n calls to this client to get next suggestion in the experiment and log back\n data from the evaluation of that suggestion.\n\n Note: `AxClient` expects to only propose 1 arm (suggestion) per trial; support\n for use cases that require use of batches is coming soon.\n\n Two custom types used in this class for convenience are `TParamValue` and\n `TParameterization`. Those are shortcuts for `Union[str, bool, float, int]`\n and `Dict[str, Union[str, bool, float, int]]`, respectively.\n\n Args:\n generation_strategy: Optional generation strategy. If not set, one is\n intelligently chosen based on properties of search space.\n\n db_settings: Settings for saving and reloading the underlying experiment\n to a database. Expected to be of type\n ax.storage.sqa_store.structs.DBSettings and require SQLAlchemy.\n\n enforce_sequential_optimization: Whether to enforce that when it is\n reasonable to switch models during the optimization (as prescribed\n by `num_arms` in generation strategy), Ax will wait for enough trials\n to be completed with data to proceed. Defaults to True. If set to\n False, Ax will keep generating new trials from the previous model\n until enough data is gathered. Use this only if necessary;\n otherwise, it is more resource-efficient to\n optimize sequentially, by waiting until enough data is available to\n use the next model.\n\n random_seed: Optional integer random seed, set to fix the optimization\n random seed for reproducibility. Works only for Sobol quasi-random\n generator and for BoTorch-powered models. For the latter models, the\n trials generated from the same optimization setup with the same seed,\n will be mostly similar, but the exact parameter values may still vary\n and trials latter in the optimizations will diverge more and more.\n This is because a degree of randomness is essential for high performance\n of the Bayesian optimization models and is not controlled by the seed.\n\n Note: In multi-threaded environments, the random seed is thread-safe,\n but does not actually guarantee reproducibility. Whether the outcomes\n will be exactly the same for two same operations that use the random\n seed, depends on whether the threads modify the random state in the\n same order across the two operations.\n\n verbose_logging: Whether Ax should log significant optimization events,\n defaults to `True`.\n \"\"\"\n\n def __init__(\n self,\n generation_strategy: Optional[GenerationStrategy] = None,\n db_settings: Any = None,\n enforce_sequential_optimization: bool = True,\n random_seed: Optional[int] = None,\n verbose_logging: bool = True,\n ) -> None:\n if not verbose_logging:\n logger.setLevel(logging.WARNING)\n else:\n logger.info(\n \"Starting optimization with verbose logging. To disable logging, \"\n \"set the `verbose_logging` argument to `False`. Note that float \"\n \"values in the logs are rounded to 2 decimal points.\"\n )\n self._generation_strategy = generation_strategy\n if db_settings and (not DBSettings or not isinstance(db_settings, DBSettings)):\n raise ValueError(\n \"`db_settings` argument should be of type ax.storage.sqa_store.\"\n \"structs.DBSettings. To use `DBSettings`, you will need SQLAlchemy \"\n \"installed in your environment (can be installed through pip).\"\n )\n self.db_settings = db_settings\n self._experiment: Optional[Experiment] = None\n self._enforce_sequential_optimization = enforce_sequential_optimization\n self._random_seed = random_seed\n if random_seed is not None:\n logger.warning(\n f\"Random seed set to {random_seed}. Note that this setting \"\n \"only affects the Sobol quasi-random generator \"\n \"and BoTorch-powered Bayesian optimization models. For the latter \"\n \"models, setting random seed to the same number for two optimizations \"\n \"will make the generated trials similar, but not exactly the same, \"\n \"and over time the trials will diverge more.\"\n )\n # Trials, for which we received data since last `GenerationStrategy.gen`,\n # used to make sure that generation strategy is updated with new data.\n self._updated_trials: List[int] = []\n\n # ------------------------ Public API methods. ------------------------\n\n def create_experiment(\n self,\n parameters: List[Dict[str, Union[TParamValue, List[TParamValue]]]],\n name: Optional[str] = None,\n objective_name: Optional[str] = None,\n minimize: bool = False,\n parameter_constraints: Optional[List[str]] = None,\n outcome_constraints: Optional[List[str]] = None,\n status_quo: Optional[TParameterization] = None,\n overwrite_existing_experiment: bool = False,\n experiment_type: Optional[str] = None,\n ) -> None:\n \"\"\"Create a new experiment and save it if DBSettings available.\n\n Args:\n parameters: List of dictionaries representing parameters in the\n experiment search space. Required elements in the dictionaries\n are: \"name\" (name of this parameter, string), \"type\" (type of the\n parameter: \"range\", \"fixed\", or \"choice\", string), and \"bounds\"\n for range parameters (list of two values, lower bound first),\n \"values\" for choice parameters (list of values), and \"value\" for\n fixed parameters (single value).\n objective: Name of the metric used as objective in this experiment.\n This metric must be present in `raw_data` argument to `complete_trial`.\n name: Name of the experiment to be created.\n minimize: Whether this experiment represents a minimization problem.\n parameter_constraints: List of string representation of parameter\n constraints, such as \"x3 >= x4\" or \"x3 + x4 + x5 >= 2\". For sum\n constraints, any number of arguments is accepted, and acceptable\n operators are \"<=\" and \">=\".\n outcome_constraints: List of string representation of outcome\n constraints of form \"metric_name >= bound\", like \"m1 <= 3.\"\n status_quo: Parameterization of the current state of the system.\n If set, this will be added to each trial to be evaluated alongside\n test configurations.\n overwrite_existing_experiment: If `DBSettings` were provided on\n instantiation and the experiment being created has the same name\n as some experiment already stored, whether to overwrite the\n existing experiment. Defaults to False.\n \"\"\"\n if self.db_settings and not name:\n raise ValueError( # pragma: no cover\n \"Must give the experiment a name if `db_settings` is not None.\"\n )\n if self.db_settings:\n existing = None\n try:\n existing, _ = load_experiment_and_generation_strategy(\n experiment_name=not_none(name), db_settings=self.db_settings\n )\n except ValueError: # Experiment does not exist, nothing to do.\n pass\n if existing and overwrite_existing_experiment:\n logger.info(f\"Overwriting existing experiment {name}.\")\n elif existing:\n raise ValueError(\n f\"Experiment {name} exists; set the `overwrite_existing_\"\n \"experiment` to `True` to overwrite with new experiment \"\n \"or use `ax_client.load_experiment_from_database` to \"\n \"continue an existing experiment.\"\n )\n\n self._experiment = make_experiment(\n name=name,\n parameters=parameters,\n objective_name=objective_name,\n minimize=minimize,\n parameter_constraints=parameter_constraints,\n outcome_constraints=outcome_constraints,\n status_quo=status_quo,\n experiment_type=experiment_type,\n )\n if self._generation_strategy is None:\n self._generation_strategy = choose_generation_strategy(\n search_space=self._experiment.search_space,\n enforce_sequential_optimization=self._enforce_sequential_optimization,\n random_seed=self._random_seed,\n )\n self._save_experiment_and_generation_strategy_to_db_if_possible()\n\n def get_next_trial(self) -> Tuple[TParameterization, int]:\n \"\"\"\n Generate trial with the next set of parameters to try in the iteration process.\n\n Note: Service API currently supports only 1-arm trials.\n\n Returns:\n Tuple of trial parameterization, trial index\n \"\"\"\n trial = self.experiment.new_trial(generator_run=self._gen_new_generator_run())\n logger.info(\n f\"Generated new trial {trial.index} with parameters \"\n f\"{_round_floats_for_logging(item=not_none(trial.arm).parameters)}.\"\n )\n trial.mark_dispatched()\n self._updated_trials = []\n self._save_experiment_and_generation_strategy_to_db_if_possible()\n return not_none(trial.arm).parameters, trial.index\n\n def complete_trial(\n self,\n trial_index: int,\n raw_data: TEvaluationOutcome,\n metadata: Optional[Dict[str, Union[str, int]]] = None,\n sample_size: Optional[int] = None,\n ) -> None:\n \"\"\"\n Completes the trial with given metric values and adds optional metadata\n to it.\n\n Args:\n trial_index: Index of trial within the experiment.\n raw_data: Evaluation data for the trial. Can be a mapping from\n metric name to a tuple of mean and SEM, just a tuple of mean and\n SEM if only one metric in optimization, or just the mean if there\n is no SEM. Can also be a list of (fidelities, mapping from\n metric name to a tuple of mean and SEM).\n metadata: Additional metadata to track about this run.\n sample_size: Number of samples collected for the underlying arm,\n optional.\n \"\"\"\n assert isinstance(\n trial_index, int\n ), f\"Trial index must be an int, got: {trial_index}.\" # pragma: no cover\n if trial_index not in self.experiment.trials:\n raise ValueError( # pragma: no cover\n f\"Cannot complete trial #{trial_index} as it does not yet exist \"\n f\"for experiment {self.experiment.name}.\"\n )\n trial = self.experiment.trials[trial_index]\n if not isinstance(trial, Trial):\n raise NotImplementedError(\n \"The Service API only supports `Trial`, not `BatchTrial`.\"\n )\n\n if metadata is not None:\n trial._run_metadata = metadata\n\n arm_name = not_none(trial.arm).name\n evaluations = {\n arm_name: raw_data_to_evaluation(\n raw_data=raw_data, objective_name=self.objective_name\n )\n }\n sample_sizes = {arm_name: sample_size} if sample_size else {}\n data = data_from_evaluations(\n evaluations=evaluations,\n trial_index=trial.index,\n sample_sizes=sample_sizes,\n start_time=(\n checked_cast_optional(int, metadata.get(\"start_time\"))\n if metadata is not None\n else None\n ),\n end_time=(\n checked_cast_optional(int, metadata.get(\"end_time\"))\n if metadata is not None\n else None\n ),\n )\n # In service API, a trial may be completed multiple times (for multiple\n # metrics, for example).\n trial.mark_completed(allow_repeat_completion=True)\n self.experiment.attach_data(data)\n data_for_logging = _round_floats_for_logging(\n item=evaluations[next(iter(evaluations.keys()))]\n )\n logger.info(\n f\"Completed trial {trial_index} with data: \"\n f\"{_round_floats_for_logging(item=data_for_logging)}.\"\n )\n self._updated_trials.append(trial_index)\n self._save_experiment_and_generation_strategy_to_db_if_possible()\n\n def log_trial_failure(\n self, trial_index: int, metadata: Optional[Dict[str, str]] = None\n ) -> None:\n \"\"\"Mark that the given trial has failed while running.\n\n Args:\n trial_index: Index of trial within the experiment.\n metadata: Additional metadata to track about this run.\n \"\"\"\n trial = self.experiment.trials[trial_index]\n trial.mark_failed()\n logger.info(f\"Registered failure of trial {trial_index}.\")\n if metadata is not None:\n trial._run_metadata = metadata\n self._save_experiment_and_generation_strategy_to_db_if_possible()\n\n def attach_trial(\n self, parameters: TParameterization\n ) -> Tuple[TParameterization, int]:\n \"\"\"Attach a new trial with the given parameterization to the experiment.\n\n Args:\n parameters: Parameterization of the new trial.\n\n Returns:\n Tuple of parameterization and trial index from newly created trial.\n \"\"\"\n trial = self.experiment.new_trial().add_arm(Arm(parameters=parameters))\n trial.mark_dispatched()\n logger.info(\n \"Attached custom parameterization \"\n f\"{_round_floats_for_logging(item=parameters)} as trial {trial.index}.\"\n )\n self._save_experiment_and_generation_strategy_to_db_if_possible()\n return not_none(trial.arm).parameters, trial.index\n\n def get_trial_parameters(self, trial_index: int) -> TParameterization:\n \"\"\"Retrieve the parameterization of the trial by the given index.\"\"\"\n if trial_index not in self.experiment.trials:\n raise ValueError(f\"Trial {trial_index} does not yet exist.\")\n trial = checked_cast(Trial, self.experiment.trials.get(trial_index))\n return not_none(trial.arm).parameters\n\n @copy_doc(best_point_utils.get_best_parameters)\n def get_best_parameters(\n self\n ) -> Optional[Tuple[TParameterization, Optional[TModelPredictArm]]]:\n return best_point_utils.get_best_parameters(self.experiment)\n\n def get_trials_data_frame(self) -> pd.DataFrame:\n return exp_to_df(exp=self.experiment)\n\n def get_recommended_max_parallelism(self) -> List[Tuple[int, int]]:\n \"\"\"Recommends maximum number of trials that can be scheduled in parallel\n at different stages of optimization.\n\n Some optimization algorithms profit significantly from sequential\n optimization (e.g. suggest a few points, get updated with data for them,\n repeat). This setting indicates how many trials should be in flight\n (generated, but not yet completed with data).\n\n The output of this method is mapping of form\n {num_trials -> max_parallelism_setting}, where the max_parallelism_setting\n is used for num_trials trials. If max_parallelism_setting is -1, as\n many of the trials can be ran in parallel, as necessary. If num_trials\n in a tuple is -1, then the corresponding max_parallelism_setting\n should be used for all subsequent trials.\n\n For example, if the returned list is [(5, -1), (12, 6), (-1, 3)],\n the schedule could be: run 5 trials in parallel, run 6 trials in\n parallel twice, run 3 trials in parallel for as long as needed. Here,\n 'running' a trial means obtaining a next trial from `AxClient` through\n get_next_trials and completing it with data when available.\n\n Returns:\n Mapping of form {num_trials -> max_parallelism_setting}.\n \"\"\"\n parallelism_settings = []\n for step in self.generation_strategy._steps:\n parallelism_settings.append(\n (step.num_arms, step.recommended_max_parallelism or step.num_arms)\n )\n return parallelism_settings\n\n def get_optimization_trace(\n self, objective_optimum: Optional[float] = None\n ) -> AxPlotConfig:\n \"\"\"Retrieves the plot configuration for optimization trace, which shows\n the evolution of the objective mean over iterations.\n\n Args:\n objective_optimum: Optimal objective, if known, for display in the\n visualization.\n \"\"\"\n if not self.experiment.trials:\n raise ValueError(\"Cannot generate plot as there are no trials.\")\n objective_name = self.experiment.optimization_config.objective.metric.name\n best_objectives = np.array(\n [\n [\n checked_cast(Trial, trial).objective_mean\n for trial in self.experiment.trials.values()\n ]\n ]\n )\n hover_labels = [\n _format_dict(not_none(checked_cast(Trial, trial).arm).parameters)\n for trial in self.experiment.trials.values()\n ]\n return optimization_trace_single_method(\n y=(\n np.minimum.accumulate(best_objectives, axis=1)\n if self.experiment.optimization_config.objective.minimize\n else np.maximum.accumulate(best_objectives, axis=1)\n ),\n optimum=objective_optimum,\n title=\"Model performance vs. # of iterations\",\n ylabel=objective_name.capitalize(),\n hover_labels=hover_labels,\n )\n\n def get_contour_plot(\n self,\n param_x: Optional[str] = None,\n param_y: Optional[str] = None,\n metric_name: Optional[str] = None,\n ) -> AxPlotConfig:\n \"\"\"Retrieves a plot configuration for a contour plot of the response\n surface. For response surfaces with more than two parameters,\n selected two parameters will appear on the axes, and remaining parameters\n will be affixed to the middle of their range. If contour params arguments\n are not provided, the first two parameters in the search space will be\n used. If contour metrics are not provided, objective will be used.\n\n Args:\n param_x: name of parameters to use on x-axis for\n the contour response surface plots.\n param_y: name of parameters to use on y-axis for\n the contour response surface plots.\n metric_name: Name of the metric, for which to plot the response\n surface.\n \"\"\"\n if not self.experiment.trials:\n raise ValueError(\"Cannot generate plot as there are no trials.\")\n if len(self.experiment.parameters) < 2:\n raise ValueError(\n \"Cannot create a contour plot as experiment has less than 2 \"\n \"parameters, but a contour-related argument was provided.\"\n )\n if (param_x or param_y) and not (param_x and param_y):\n raise ValueError(\n \"If `param_x` is provided, `param_y` is \"\n \"required as well, and vice-versa.\"\n )\n objective_name = self.objective_name\n if not metric_name:\n metric_name = objective_name\n\n if not param_x or not param_y:\n parameter_names = list(self.experiment.parameters.keys())\n param_x = parameter_names[0]\n param_y = parameter_names[1]\n\n if param_x not in self.experiment.parameters:\n raise ValueError(\n f'Parameter \"{param_x}\" not found in the optimization search space.'\n )\n if param_y not in self.experiment.parameters:\n raise ValueError(\n f'Parameter \"{param_y}\" not found in the optimization search space.'\n )\n if metric_name not in self.experiment.metrics:\n raise ValueError(\n f'Metric \"{metric_name}\" is not associated with this optimization.'\n )\n if self.generation_strategy.model is not None:\n try:\n logger.info(\n f\"Retrieving contour plot with parameter '{param_x}' on X-axis \"\n f\"and '{param_y}' on Y-axis, for metric '{metric_name}'. \"\n \"Ramaining parameters are affixed to the middle of their range.\"\n )\n return plot_contour(\n model=not_none(self.generation_strategy.model),\n param_x=param_x,\n param_y=param_y,\n metric_name=metric_name,\n )\n\n except NotImplementedError:\n # Some models don't implement '_predict', which is needed\n # for the contour plots.\n logger.info(\n f\"Model {self.generation_strategy.model} does not implement \"\n \"`predict`, so it cannot be used to generate a response \"\n \"surface plot.\"\n )\n raise ValueError(\n f'Could not obtain contour plot of \"{metric_name}\" for parameters '\n f'\"{param_x}\" and \"{param_y}\", as a model with predictive ability, '\n \"such as a Gaussian Process, has not yet been trained in the course \"\n \"of this optimization.\"\n )\n\n def load_experiment_from_database(self, experiment_name: str) -> None:\n \"\"\"Load an existing experiment from database using the `DBSettings`\n passed to this `AxClient` on instantiation.\n\n Args:\n experiment_name: Name of the experiment.\n\n Returns:\n Experiment object.\n \"\"\"\n if not self.db_settings:\n raise ValueError( # pragma: no cover\n \"Cannot load an experiment in the absence of the DB settings.\"\n \"Please initialize `AxClient` with DBSettings.\"\n )\n experiment, generation_strategy = load_experiment_and_generation_strategy(\n experiment_name=experiment_name, db_settings=self.db_settings\n )\n self._experiment = experiment\n logger.info(f\"Loaded {experiment}.\")\n if generation_strategy is None: # pragma: no cover\n self._generation_strategy = choose_generation_strategy(\n search_space=self._experiment.search_space,\n enforce_sequential_optimization=self._enforce_sequential_optimization,\n random_seed=self._random_seed,\n )\n else:\n self._generation_strategy = generation_strategy\n logger.info(\n f\"Using generation strategy associated with the loaded experiment:\"\n f\" {generation_strategy}.\"\n )\n\n def get_model_predictions(\n self, metric_names: Optional[List[str]] = None\n ) -> Dict[int, Dict[str, Tuple[float, float]]]:\n \"\"\"Retrieve model-estimated means and covariances for all metrics.\n Note: this function retrieves the predictions for the 'in-sample' arms,\n which means that the return mapping on this function will only contain\n predictions for trials that have been completed with data.\n\n Args:\n metric_names: Names of the metrics, for which to retrieve predictions.\n All metrics on experiment will be retrieved if this argument was\n not specified.\n\n Returns:\n A mapping from trial index to a mapping of metric names to tuples\n of predicted metric mean and SEM, of form:\n { trial_index -> { metric_name: ( mean, SEM ) } }.\n \"\"\"\n if self.generation_strategy.model is None: # pragma: no cover\n raise ValueError(\"No model has been instantiated yet.\")\n if metric_names is None and self.experiment.metrics is None:\n raise ValueError( # pragma: no cover\n \"No metrics to retrieve specified on the experiment or as \"\n \"argument to `get_model_predictions`.\"\n )\n arm_info, _, _ = _get_in_sample_arms(\n model=not_none(self.generation_strategy.model),\n metric_names=set(metric_names)\n if metric_names is not None\n else set(not_none(self.experiment.metrics).keys()),\n )\n trials = checked_cast_dict(int, Trial, self.experiment.trials)\n\n return {\n trial_index: {\n m: (\n arm_info[not_none(trials[trial_index].arm).name].y_hat[m],\n arm_info[not_none(trials[trial_index].arm).name].se_hat[m],\n )\n for m in arm_info[not_none(trials[trial_index].arm).name].y_hat\n }\n for trial_index in trials\n if not_none(trials[trial_index].arm).name in arm_info\n }\n\n # ------------------ JSON serialization & storage methods. -----------------\n\n def save_to_json_file(self, filepath: str = \"ax_client_snapshot.json\") -> None:\n \"\"\"Save a JSON-serialized snapshot of this `AxClient`'s settings and state\n to a .json file by the given path.\n \"\"\"\n with open(filepath, \"w+\") as file: # pragma: no cover\n file.write(json.dumps(self.to_json_snapshot()))\n logger.info(f\"Saved JSON-serialized state of optimization to `{filepath}`.\")\n\n @staticmethod\n def load_from_json_file(filepath: str = \"ax_client_snapshot.json\") -> \"AxClient\":\n \"\"\"Restore an `AxClient` and its state from a JSON-serialized snapshot,\n residing in a .json file by the given path.\n \"\"\"\n with open(filepath, \"r\") as file: # pragma: no cover\n serialized = json.loads(file.read())\n return AxClient.from_json_snapshot(serialized=serialized)\n\n def to_json_snapshot(self) -> Dict[str, Any]:\n \"\"\"Serialize this `AxClient` to JSON to be able to interrupt and restart\n optimization and save it to file by the provided path.\n\n Returns:\n A JSON-safe dict representation of this `AxClient`.\n \"\"\"\n return {\n \"_type\": self.__class__.__name__,\n \"experiment\": object_to_json(self._experiment),\n \"generation_strategy\": object_to_json(self._generation_strategy),\n \"_enforce_sequential_optimization\": self._enforce_sequential_optimization,\n \"_updated_trials\": object_to_json(self._updated_trials),\n }\n\n @staticmethod\n def from_json_snapshot(serialized: Dict[str, Any]) -> \"AxClient\":\n \"\"\"Recreate an `AxClient` from a JSON snapshot.\"\"\"\n experiment = object_from_json(serialized.pop(\"experiment\"))\n serialized_generation_strategy = serialized.pop(\"generation_strategy\")\n ax_client = AxClient(\n generation_strategy=generation_strategy_from_json(\n generation_strategy_json=serialized_generation_strategy\n )\n if serialized_generation_strategy is not None\n else None,\n enforce_sequential_optimization=serialized.pop(\n \"_enforce_sequential_optimization\"\n ),\n )\n ax_client._experiment = experiment\n ax_client._updated_trials = object_from_json(serialized.pop(\"_updated_trials\"))\n return ax_client\n\n # ---------------------- Private helper methods. ---------------------\n\n @property\n def experiment(self) -> Experiment:\n \"\"\"Returns the experiment set on this Ax client\"\"\"\n if self._experiment is None:\n raise ValueError(\n \"Experiment not set on Ax client. Must first \"\n \"call load_experiment or create_experiment to use handler functions.\"\n )\n return not_none(self._experiment)\n\n @property\n def generation_strategy(self) -> GenerationStrategy:\n \"\"\"Returns the generation strategy, set on this experiment.\"\"\"\n if self._generation_strategy is None:\n raise ValueError(\n \"No generation strategy has been set on this optimization yet.\"\n )\n return not_none(self._generation_strategy)\n\n @property\n def objective_name(self) -> str:\n \"\"\"Returns the name of the objective in this optimization.\"\"\"\n opt_config = not_none(self.experiment.optimization_config)\n return opt_config.objective.metric.name\n\n def _save_experiment_and_generation_strategy_to_db_if_possible(self) -> bool:\n \"\"\"Saves attached experiment and generation strategy if DB settings are\n set on this AxClient instance.\n\n Returns:\n bool: Whether the experiment was saved.\n \"\"\"\n if self.db_settings is not None:\n save_experiment_and_generation_strategy(\n experiment=self.experiment,\n generation_strategy=self.generation_strategy,\n db_settings=self.db_settings,\n )\n return True\n return False\n\n def _get_new_data(self) -> Data:\n \"\"\"\n Returns new data since the last run of the generator.\n\n Returns:\n Latest data.\n \"\"\"\n return Data.from_multiple_data(\n [\n self.experiment.lookup_data_for_trial(idx)[0]\n for idx in self._updated_trials\n ]\n )\n\n def _gen_new_generator_run(self, n: int = 1) -> GeneratorRun:\n \"\"\"Generate new generator run for this experiment.\n\n Args:\n n: Number of arms to generate.\n \"\"\"\n new_data = self._get_new_data()\n # If random seed is not set for this optimization, context manager does\n # nothing; otherwise, it sets the random seed for torch, but only for the\n # scope of this call. This is important because torch seed is set globally,\n # so if we just set the seed without the context manager, it can have\n # serious negative impact on the performance of the models that employ\n # stochasticity.\n with manual_seed(seed=self._random_seed) and warnings.catch_warnings():\n # Filter out GPYTorch warnings to avoid confusing users.\n warnings.simplefilter(\"ignore\")\n return not_none(self.generation_strategy).gen(\n experiment=self.experiment,\n new_data=new_data,\n n=n,\n pending_observations=get_pending_observation_features(\n experiment=self.experiment\n ),\n )\n\n # -------- Backward-compatibility with old save / load method names. -------\n\n @staticmethod\n def load_experiment(experiment_name: str) -> None:\n raise NotImplementedError(\n \"Use `load_experiment_from_database` to load from SQL database or \"\n \"`load_from_json_file` to load optimization state from .json file.\"\n )\n\n @staticmethod\n def load(filepath: Optional[str] = None) -> None:\n raise NotImplementedError(\n \"Use `load_experiment_from_database` to load from SQL database or \"\n \"`load_from_json_file` to load optimization state from .json file.\"\n )\n\n @staticmethod\n def save(filepath: Optional[str] = None) -> None:\n raise NotImplementedError(\n \"Use `save_to_json_file` to save optimization state to .json file.\"\n )\n"
] |
[
[
"numpy.minimum.accumulate",
"numpy.maximum.accumulate"
]
] |
msra-nlc/MSParS-V2.0-
|
[
"3e215b5f6ef47040275b3612fd2e1d5591909039"
] |
[
"code/generation/OpenNMT-py-master-our-model/onmt/decoders/transformer.py"
] |
[
"\"\"\"\nImplementation of \"Attention is All You Need\"\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\n\nimport onmt\nfrom onmt.modules.position_ffn import PositionwiseFeedForward\n\nMAX_SIZE = 5000\n\n\nclass TransformerDecoderLayer(nn.Module):\n \"\"\"\n Args:\n d_model (int): the dimension of keys/values/queries in\n MultiHeadedAttention, also the input size of\n the first-layer of the PositionwiseFeedForward.\n heads (int): the number of heads for MultiHeadedAttention.\n d_ff (int): the second-layer of the PositionwiseFeedForward.\n dropout (float): dropout probability(0-1.0).\n self_attn_type (string): type of self-attention scaled-dot, average\n \"\"\"\n\n def __init__(self, d_model, heads, d_ff, dropout,\n self_attn_type=\"scaled-dot\"):\n super(TransformerDecoderLayer, self).__init__()\n\n self.self_attn_type = self_attn_type\n\n if self_attn_type == \"scaled-dot\":\n self.self_attn = onmt.modules.MultiHeadedAttention(\n heads, d_model, dropout=dropout)\n elif self_attn_type == \"average\":\n self.self_attn = onmt.modules.AverageAttention(\n d_model, dropout=dropout)\n\n self.context_attn = onmt.modules.MultiHeadedAttention(\n heads, d_model, dropout=dropout)\n self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout)\n self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-6)\n self.layer_norm_2 = nn.LayerNorm(d_model, eps=1e-6)\n self.dropout = dropout\n self.drop = nn.Dropout(dropout)\n mask = self._get_attn_subsequent_mask(MAX_SIZE)\n # Register self.mask as a buffer in TransformerDecoderLayer, so\n # it gets TransformerDecoderLayer's cuda behavior automatically.\n self.register_buffer('mask', mask)\n\n def forward(self, inputs, memory_bank, src_pad_mask, tgt_pad_mask,\n previous_input=None, layer_cache=None, step=None):\n \"\"\"\n Args:\n inputs (`FloatTensor`): `[batch_size x 1 x model_dim]`\n memory_bank (`FloatTensor`): `[batch_size x src_len x model_dim]`\n src_pad_mask (`LongTensor`): `[batch_size x 1 x src_len]`\n tgt_pad_mask (`LongTensor`): `[batch_size x 1 x 1]`\n\n Returns:\n (`FloatTensor`, `FloatTensor`, `FloatTensor`):\n\n * output `[batch_size x 1 x model_dim]`\n * attn `[batch_size x 1 x src_len]`\n * all_input `[batch_size x current_step x model_dim]`\n\n \"\"\"\n tgt_pad_mask = tgt_pad_mask.to(torch.uint8)\n dec_mask = torch.gt(tgt_pad_mask +\n self.mask[:, :tgt_pad_mask.size(1),\n :tgt_pad_mask.size(1)].to(torch.uint8), 0)\n\n input_norm = self.layer_norm_1(inputs)\n all_input = input_norm\n if previous_input is not None:\n all_input = torch.cat((previous_input, input_norm), dim=1)\n dec_mask = None\n\n if self.self_attn_type == \"scaled-dot\":\n query, attn = self.self_attn(all_input, all_input, input_norm,\n mask=dec_mask,\n layer_cache=layer_cache,\n type=\"self\")\n elif self.self_attn_type == \"average\":\n query, attn = self.self_attn(input_norm, mask=dec_mask,\n layer_cache=layer_cache, step=step)\n\n query = self.drop(query) + inputs\n\n query_norm = self.layer_norm_2(query)\n mid, attn = self.context_attn(memory_bank, memory_bank, query_norm,\n mask=src_pad_mask,\n layer_cache=layer_cache,\n type=\"context\")\n output = self.feed_forward(self.drop(mid) + query)\n\n return output, attn, all_input\n\n def _get_attn_subsequent_mask(self, size):\n \"\"\"\n Get an attention mask to avoid using the subsequent info.\n\n Args:\n size: int\n\n Returns:\n (`LongTensor`):\n\n * subsequent_mask `[1 x size x size]`\n \"\"\"\n attn_shape = (1, size, size)\n subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')\n subsequent_mask = torch.from_numpy(subsequent_mask)\n return subsequent_mask\n\n\nclass TransformerDecoder(nn.Module):\n \"\"\"\n The Transformer decoder from \"Attention is All You Need\".\n\n\n .. mermaid::\n\n graph BT\n A[input]\n B[multi-head self-attn]\n BB[multi-head src-attn]\n C[feed forward]\n O[output]\n A --> B\n B --> BB\n BB --> C\n C --> O\n\n\n Args:\n num_layers (int): number of encoder layers.\n d_model (int): size of the model\n heads (int): number of heads\n d_ff (int): size of the inner FF layer\n dropout (float): dropout parameters\n embeddings (:obj:`onmt.modules.Embeddings`):\n embeddings to use, should have positional encodings\n attn_type (str): if using a seperate copy attention\n \"\"\"\n\n def __init__(self, num_layers, d_model, heads, d_ff, attn_type,\n copy_attn, self_attn_type, dropout, embeddings):\n super(TransformerDecoder, self).__init__()\n\n # Basic attributes.\n self.decoder_type = 'transformer'\n self.num_layers = num_layers\n self.embeddings = embeddings\n self.self_attn_type = self_attn_type\n\n # Decoder State\n self.state = {}\n\n # Build TransformerDecoder.\n self.transformer_layers = nn.ModuleList(\n [TransformerDecoderLayer(d_model, heads, d_ff, dropout,\n self_attn_type=self_attn_type)\n for _ in range(num_layers)])\n\n # TransformerDecoder has its own attention mechanism.\n # Set up a separated copy attention layer, if needed.\n self._copy = False\n if copy_attn:\n self.copy_attn = onmt.modules.GlobalAttention(\n d_model, attn_type=attn_type)\n self._copy = True\n self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)\n\n def init_state(self, src, memory_bank, enc_hidden, with_cache=False):\n \"\"\" Init decoder state \"\"\"\n self.state[\"src\"] = src\n self.state[\"previous_input\"] = None\n self.state[\"previous_layer_inputs\"] = None\n self.state[\"cache\"] = None\n\n if with_cache:\n self._init_cache(memory_bank, self.num_layers,\n self.self_attn_type)\n\n def update_state(self, new_input, previous_layer_inputs):\n\n self.state[\"previous_input\"] = new_input\n self.state[\"previous_layer_inputs\"] = previous_layer_inputs\n\n def map_state(self, fn):\n def _recursive_map(struct, batch_dim=0):\n for k, v in struct.items():\n if v is not None:\n if isinstance(v, dict):\n _recursive_map(v)\n else:\n struct[k] = fn(v, batch_dim)\n\n self.state[\"src\"] = fn(self.state[\"src\"], 1)\n if self.state[\"previous_input\"] is not None:\n self.state[\"previous_input\"] = fn(self.state[\"previous_input\"], 1)\n if self.state[\"previous_layer_inputs\"] is not None:\n self.state[\"previous_layer_inputs\"] = \\\n fn(self.state[\"previous_layer_inputs\"], 1)\n if self.state[\"cache\"] is not None:\n _recursive_map(self.state[\"cache\"])\n\n def detach_state(self):\n if self.state[\"previous_input\"] is not None:\n self.state[\"previous_input\"] = \\\n self.state[\"previous_input\"].detach()\n if self.state[\"previous_layer_inputs\"] is not None:\n self.state[\"previous_layer_inputs\"] = \\\n self.state[\"previous_layer_inputs\"].detach()\n self.state[\"src\"] = self.state[\"src\"].detach()\n\n def forward(self, tgt, memory_bank, memory_lengths=None,\n step=None, cache=None):\n \"\"\"\n See :obj:`onmt.modules.RNNDecoderBase.forward()`\n \"\"\"\n src = self.state[\"src\"]\n src_words = src[:, :, 0].transpose(0, 1)\n tgt_words = tgt[:, :, 0].transpose(0, 1)\n src_batch, src_len = src_words.size()\n tgt_batch, tgt_len = tgt_words.size()\n\n # Initialize return variables.\n dec_outs = []\n attns = {\"std\": []}\n if self._copy:\n attns[\"copy\"] = []\n\n # Run the forward pass of the TransformerDecoder.\n emb = self.embeddings(tgt, step=step)\n assert emb.dim() == 3 # len x batch x embedding_dim\n\n output = emb.transpose(0, 1).contiguous()\n src_memory_bank = memory_bank.transpose(0, 1).contiguous()\n\n padding_idx = self.embeddings.word_padding_idx\n src_pad_mask = src_words.data.eq(padding_idx).unsqueeze(1) \\\n .expand(src_batch, tgt_len, src_len)\n tgt_pad_mask = tgt_words.data.eq(padding_idx).unsqueeze(1) \\\n .expand(tgt_batch, tgt_len, tgt_len)\n\n if self.state[\"cache\"] is None:\n saved_inputs = []\n\n for i in range(self.num_layers):\n prev_layer_input = None\n if self.state[\"cache\"] is None:\n if self.state[\"previous_input\"] is not None:\n prev_layer_input = self.state[\"previous_layer_inputs\"][i]\n output, attn, all_input \\\n = self.transformer_layers[i](\n output, src_memory_bank,\n src_pad_mask, tgt_pad_mask,\n previous_input=prev_layer_input,\n layer_cache=self.state[\"cache\"][\"layer_{}\".format(i)]\n if self.state[\"cache\"] is not None else None,\n step=step)\n if self.state[\"cache\"] is None:\n saved_inputs.append(all_input)\n\n if self.state[\"cache\"] is None:\n saved_inputs = torch.stack(saved_inputs)\n\n output = self.layer_norm(output)\n\n # Process the result and update the attentions.\n dec_outs = output.transpose(0, 1).contiguous()\n attn = attn.transpose(0, 1).contiguous()\n\n attns[\"std\"] = attn\n if self._copy:\n attns[\"copy\"] = attn\n\n if self.state[\"cache\"] is None:\n self.update_state(tgt, saved_inputs)\n # TODO change the way attns is returned dict => list or tuple (onnx)\n return dec_outs, attns\n\n def _init_cache(self, memory_bank, num_layers, self_attn_type):\n self.state[\"cache\"] = {}\n batch_size = memory_bank.size(1)\n depth = memory_bank.size(-1)\n\n for l in range(num_layers):\n layer_cache = {\n \"memory_keys\": None,\n \"memory_values\": None\n }\n if self_attn_type == \"scaled-dot\":\n layer_cache[\"self_keys\"] = None\n layer_cache[\"self_values\"] = None\n elif self_attn_type == \"average\":\n layer_cache[\"prev_g\"] = torch.zeros((batch_size, 1, depth))\n else:\n layer_cache[\"self_keys\"] = None\n layer_cache[\"self_values\"] = None\n self.state[\"cache\"][\"layer_{}\".format(l)] = layer_cache\n"
] |
[
[
"torch.nn.Dropout",
"torch.cat",
"torch.zeros",
"torch.from_numpy",
"torch.nn.LayerNorm",
"numpy.ones",
"torch.stack"
]
] |
ravising-h/tf-pose-estimation
|
[
"95f131ce8f082f7f788c023278465cb6a42b9420"
] |
[
"tf_pose/pose_dataset.py"
] |
[
"import logging\nimport math\nimport multiprocessing\nimport struct\nimport sys\nimport threading\n\ntry:\n from StringIO import StringIO\nexcept ImportError:\n from io import StringIO\n\nfrom contextlib import contextmanager\n\nimport os\nimport random\nimport requests\nimport cv2\nimport numpy as np\nimport time\n\nimport tensorflow as tf\n\nfrom tensorpack.dataflow import MultiThreadMapData\nfrom tensorpack.dataflow.image import MapDataComponent\nfrom tensorpack.dataflow.common import BatchData, MapData\nfrom tensorpack.dataflow.parallel import PrefetchData\nfrom tensorpack.dataflow.base import RNGDataFlow, DataFlowTerminated\n\nfrom pycocotools.coco import COCO\nfrom pose_augment import pose_flip, pose_rotation, pose_to_img, pose_crop_random, \\\n pose_resize_shortestedge_random, pose_resize_shortestedge_fixed, pose_crop_center, pose_random_scale\nfrom numba import jit\n\nlogging.getLogger(\"requests\").setLevel(logging.WARNING)\nlogger = logging.getLogger('pose_dataset')\nlogger.setLevel(logging.INFO)\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nformatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\nmplset = False\n\n\nclass CocoMetadata:\n # __coco_parts = 57\n __coco_parts = 38\n __coco_vecs = list(zip(\n [i for i in range(38)],\n [i for i in range(38)]\n ))\n\n @staticmethod\n def parse_float(four_np):\n assert len(four_np) == 4\n return struct.unpack('<f', bytes(four_np))[0]\n\n @staticmethod\n def parse_floats(four_nps, adjust=0):\n assert len(four_nps) % 4 == 0\n return [(CocoMetadata.parse_float(four_nps[x*4:x*4+4]) + adjust) for x in range(len(four_nps) // 4)]\n\n def __init__(self, idx, img_url, img_meta, annotations, sigma):\n self.idx = idx\n self.img_url = img_url\n self.img = None\n self.sigma = sigma\n\n self.height = int(img_meta['height'])\n self.width = int(img_meta['width'])\n\n joint_list = []\n for ann in annotations:\n if ann.get('num_keypoints', 0) == 0:\n continue\n\n kp = np.array(ann['keypoints'])\n xs = kp[0::3]\n ys = kp[1::3]\n vs = kp[2::3]\n\n joint_list.append([(x, y) if v >= 1 else (-1000, -1000) for x, y, v in zip(xs, ys, vs)])\n\n self.joint_list = []\n transform = list(zip(\n [i for i in range(37)],\n [i for i in range(37)]\n ))\n for prev_joint in joint_list:\n new_joint = []\n for idx1, idx2 in transform:\n j1 = prev_joint[idx1-1]\n j2 = prev_joint[idx2-1]\n\n if j1[0] <= 0 or j1[1] <= 0 or j2[0] <= 0 or j2[1] <= 0:\n new_joint.append((-1000, -1000))\n else:\n new_joint.append(((j1[0] + j2[0]) / 2, (j1[1] + j2[1]) / 2))\n\n new_joint.append((-1000, -1000))\n self.joint_list.append(new_joint)\n\n # logger.debug('joint size=%d' % len(self.joint_list))\n\n @jit\n def get_heatmap(self, target_size):\n heatmap = np.zeros((CocoMetadata.__coco_parts, self.height, self.width), dtype=np.float32)\n\n for joints in self.joint_list:\n for idx, point in enumerate(joints):\n if point[0] < 0 or point[1] < 0:\n continue\n CocoMetadata.put_heatmap(heatmap, idx, point, self.sigma)\n\n heatmap = heatmap.transpose((1, 2, 0))\n\n # background\n heatmap[:, :, -1] = np.clip(1 - np.amax(heatmap, axis=2), 0.0, 1.0)\n\n if target_size:\n heatmap = cv2.resize(heatmap, target_size, interpolation=cv2.INTER_AREA)\n\n return heatmap.astype(np.float16)\n\n @staticmethod\n @jit(nopython=True)\n def put_heatmap(heatmap, plane_idx, center, sigma):\n center_x, center_y = center\n _, height, width = heatmap.shape[:3]\n\n th = 4.6052\n delta = math.sqrt(th * 2)\n\n x0 = int(max(0, center_x - delta * sigma))\n y0 = int(max(0, center_y - delta * sigma))\n\n x1 = int(min(width, center_x + delta * sigma))\n y1 = int(min(height, center_y + delta * sigma))\n\n for y in range(y0, y1):\n for x in range(x0, x1):\n d = (x - center_x) ** 2 + (y - center_y) ** 2\n exp = d / 2.0 / sigma / sigma\n if exp > th:\n continue\n heatmap[plane_idx][y][x] = max(heatmap[plane_idx][y][x], math.exp(-exp))\n heatmap[plane_idx][y][x] = min(heatmap[plane_idx][y][x], 1.0)\n\n @jit\n def get_vectormap(self, target_size):\n vectormap = np.zeros((CocoMetadata.__coco_parts*2, self.height, self.width), dtype=np.float32)\n countmap = np.zeros((CocoMetadata.__coco_parts, self.height, self.width), dtype=np.int16)\n for joints in self.joint_list:\n for plane_idx, (j_idx1, j_idx2) in enumerate(CocoMetadata.__coco_vecs):\n j_idx1 -= 1\n j_idx2 -= 1\n\n center_from = joints[j_idx1]\n center_to = joints[j_idx2]\n\n if center_from[0] < -100 or center_from[1] < -100 or center_to[0] < -100 or center_to[1] < -100:\n continue\n\n CocoMetadata.put_vectormap(vectormap, countmap, plane_idx, center_from, center_to)\n\n vectormap = vectormap.transpose((1, 2, 0))\n nonzeros = np.nonzero(countmap)\n for p, y, x in zip(nonzeros[0], nonzeros[1], nonzeros[2]):\n if countmap[p][y][x] <= 0:\n continue\n vectormap[y][x][p*2+0] /= countmap[p][y][x]\n vectormap[y][x][p*2+1] /= countmap[p][y][x]\n\n if target_size:\n vectormap = cv2.resize(vectormap, target_size, interpolation=cv2.INTER_AREA)\n\n return vectormap.astype(np.float16)\n\n @staticmethod\n @jit(nopython=True)\n def put_vectormap(vectormap, countmap, plane_idx, center_from, center_to, threshold=8):\n _, height, width = vectormap.shape[:3]\n\n vec_x = center_to[0] - center_from[0]\n vec_y = center_to[1] - center_from[1]\n\n min_x = max(0, int(min(center_from[0], center_to[0]) - threshold))\n min_y = max(0, int(min(center_from[1], center_to[1]) - threshold))\n\n max_x = min(width, int(max(center_from[0], center_to[0]) + threshold))\n max_y = min(height, int(max(center_from[1], center_to[1]) + threshold))\n\n norm = math.sqrt(vec_x ** 2 + vec_y ** 2)\n if norm == 0:\n return\n\n vec_x /= norm\n vec_y /= norm\n\n for y in range(min_y, max_y):\n for x in range(min_x, max_x):\n bec_x = x - center_from[0]\n bec_y = y - center_from[1]\n dist = abs(bec_x * vec_y - bec_y * vec_x)\n\n if dist > threshold:\n continue\n\n countmap[plane_idx][y][x] += 1\n\n vectormap[plane_idx*2+0][y][x] = vec_x\n vectormap[plane_idx*2+1][y][x] = vec_y\n\n\nclass CocoPose(RNGDataFlow):\n @staticmethod\n def display_image(inp, heatmap, vectmap, as_numpy=False):\n global mplset\n # if as_numpy and not mplset:\n # import matplotlib as mpl\n # mpl.use('Agg')\n mplset = True\n import matplotlib.pyplot as plt\n\n fig = plt.figure()\n a = fig.add_subplot(2, 2, 1)\n a.set_title('Image')\n plt.imshow(CocoPose.get_bgimg(inp))\n\n a = fig.add_subplot(2, 2, 2)\n a.set_title('Heatmap')\n plt.imshow(CocoPose.get_bgimg(inp, target_size=(heatmap.shape[1], heatmap.shape[0])), alpha=0.5)\n tmp = np.amax(heatmap, axis=2)\n plt.imshow(tmp, cmap=plt.cm.gray, alpha=0.5)\n plt.colorbar()\n\n tmp2 = vectmap.transpose((2, 0, 1))\n tmp2_odd = np.amax(np.absolute(tmp2[::2, :, :]), axis=0)\n tmp2_even = np.amax(np.absolute(tmp2[1::2, :, :]), axis=0)\n\n a = fig.add_subplot(2, 2, 3)\n a.set_title('Vectormap-x')\n plt.imshow(CocoPose.get_bgimg(inp, target_size=(vectmap.shape[1], vectmap.shape[0])), alpha=0.5)\n plt.imshow(tmp2_odd, cmap=plt.cm.gray, alpha=0.5)\n plt.colorbar()\n\n a = fig.add_subplot(2, 2, 4)\n a.set_title('Vectormap-y')\n plt.imshow(CocoPose.get_bgimg(inp, target_size=(vectmap.shape[1], vectmap.shape[0])), alpha=0.5)\n plt.imshow(tmp2_even, cmap=plt.cm.gray, alpha=0.5)\n plt.colorbar()\n\n if not as_numpy:\n plt.show()\n else:\n fig.canvas.draw()\n data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')\n data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n fig.clear()\n plt.close()\n return data\n\n @staticmethod\n def get_bgimg(inp, target_size=None):\n inp = cv2.cvtColor(inp.astype(np.uint8), cv2.COLOR_BGR2RGB)\n if target_size:\n inp = cv2.resize(inp, target_size, interpolation=cv2.INTER_AREA)\n return inp\n\n def __init__(self, path, img_path=None, is_train=True, decode_img=True, only_idx=-1):\n self.is_train = is_train\n self.decode_img = decode_img\n self.only_idx = only_idx\n\n if is_train:\n whole_path = os.path.join(path, 'person_keypoints_train2017.json')\n else:\n whole_path = os.path.join(path, 'person_keypoints_val2017.json')\n self.img_path = (img_path if img_path is not None else '') + ('train2017/' if is_train else 'val2017/')\n self.coco = COCO(whole_path)\n\n logger.info('%s dataset %d' % (path, self.size()))\n\n def size(self):\n return len(self.coco.imgs)\n\n def get_data(self):\n idxs = np.arange(self.size())\n if self.is_train:\n self.rng.shuffle(idxs)\n else:\n pass\n\n keys = list(self.coco.imgs.keys())\n for idx in idxs:\n img_meta = self.coco.imgs[keys[idx]]\n img_idx = img_meta['id']\n ann_idx = self.coco.getAnnIds(imgIds=img_idx)\n\n if 'http://' in self.img_path:\n img_url = self.img_path + img_meta['filename']\n else:\n img_url = os.path.join(self.img_path, img_meta['filename'])\n\n anns = self.coco.loadAnns(ann_idx)\n meta = CocoMetadata(idx, img_url, img_meta, anns, sigma=8.0)\n\n total_keypoints = sum([ann.get('num_keypoints', 0) for ann in anns])\n if total_keypoints == 0 and random.uniform(0, 1) > 0.2:\n continue\n\n yield [meta]\n\n\nclass MPIIPose(RNGDataFlow):\n def __init__(self):\n pass\n\n def size(self):\n pass\n\n def get_data(self):\n pass\n\n\ndef read_image_url(metas):\n for meta in metas:\n img_str = None\n if 'http://' in meta.img_url:\n # print(meta.img_url)\n for _ in range(10):\n try:\n resp = requests.get(meta.img_url)\n if resp.status_code // 100 != 2:\n logger.warning('request failed code=%d url=%s' % (resp.status_code, meta.img_url))\n time.sleep(1.0)\n continue\n img_str = resp.content\n break\n except Exception as e:\n logger.warning('request failed url=%s, err=%s' % (meta.img_url, str(e)))\n else:\n img_str = open(meta.img_url, 'rb').read()\n\n if not img_str:\n logger.warning('image not read, path=%s' % meta.img_url)\n raise Exception()\n\n nparr = np.fromstring(img_str, np.uint8)\n meta.img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n return metas\n\n\ndef get_dataflow(path, is_train, img_path=None):\n ds = CocoPose(path, img_path, is_train) # read data from lmdb\n if is_train:\n ds = MapData(ds, read_image_url)\n ds = MapDataComponent(ds, pose_random_scale)\n ds = MapDataComponent(ds, pose_rotation)\n# ds = MapDataComponent(ds, pose_flip)\n ds = MapDataComponent(ds, pose_resize_shortestedge_random)\n ds = MapDataComponent(ds, pose_crop_random)\n ds = MapData(ds, pose_to_img)\n # augs = [\n # imgaug.RandomApplyAug(imgaug.RandomChooseAug([\n # imgaug.GaussianBlur(max_size=3)\n # ]), 0.7)\n # ]\n # ds = AugmentImageComponent(ds, augs)\n ds = PrefetchData(ds, 1000, multiprocessing.cpu_count() * 1)\n else:\n ds = MultiThreadMapData(ds, num_thread=16, map_func=read_image_url, buffer_size=1000)\n ds = MapDataComponent(ds, pose_resize_shortestedge_fixed)\n ds = MapDataComponent(ds, pose_crop_center)\n ds = MapData(ds, pose_to_img)\n ds = PrefetchData(ds, 100, multiprocessing.cpu_count())\n\n return ds\n\n\ndef _get_dataflow_onlyread(path, is_train, img_path=None):\n ds = CocoPose(path, img_path, is_train) # read data from lmdb\n ds = MapData(ds, read_image_url)\n ds = MapData(ds, pose_to_img)\n # ds = PrefetchData(ds, 1000, multiprocessing.cpu_count() * 4)\n return ds\n\n\ndef get_dataflow_batch(path, is_train, batchsize, img_path=None):\n logger.info('dataflow img_path=%s' % img_path)\n ds = get_dataflow(path, is_train, img_path=img_path)\n ds = BatchData(ds, batchsize)\n # if is_train:\n # ds = PrefetchData(ds, 10, 2)\n # else:\n # ds = PrefetchData(ds, 50, 2)\n\n return ds\n\n\nclass DataFlowToQueue(threading.Thread):\n def __init__(self, ds, placeholders, queue_size=5):\n super().__init__()\n self.daemon = True\n\n self.ds = ds\n self.placeholders = placeholders\n self.queue = tf.FIFOQueue(queue_size, [ph.dtype for ph in placeholders], shapes=[ph.get_shape() for ph in placeholders])\n self.op = self.queue.enqueue(placeholders)\n self.close_op = self.queue.close(cancel_pending_enqueues=True)\n\n self._coord = None\n self._sess = None\n\n self.last_dp = None\n\n @contextmanager\n def default_sess(self):\n if self._sess:\n with self._sess.as_default():\n yield\n else:\n logger.warning(\"DataFlowToQueue {} wasn't under a default session!\".format(self.name))\n yield\n\n def size(self):\n return self.queue.size()\n\n def start(self):\n self._sess = tf.get_default_session()\n super().start()\n\n def set_coordinator(self, coord):\n self._coord = coord\n\n def run(self):\n with self.default_sess():\n try:\n while not self._coord.should_stop():\n try:\n self.ds.reset_state()\n while True:\n for dp in self.ds.get_data():\n feed = dict(zip(self.placeholders, dp))\n self.op.run(feed_dict=feed)\n self.last_dp = dp\n except (tf.errors.CancelledError, tf.errors.OutOfRangeError, DataFlowTerminated):\n logger.error('err type1, placeholders={}'.format(self.placeholders))\n sys.exit(-1)\n except Exception as e:\n logger.error('err type2, err={}, placeholders={}'.format(str(e), self.placeholders))\n if isinstance(e, RuntimeError) and 'closed Session' in str(e):\n pass\n else:\n logger.exception(\"Exception in {}:{}\".format(self.name, str(e)))\n sys.exit(-1)\n except Exception as e:\n logger.exception(\"Exception in {}:{}\".format(self.name, str(e)))\n finally:\n try:\n self.close_op.run()\n except Exception:\n pass\n logger.info(\"{} Exited.\".format(self.name))\n\n def dequeue(self):\n return self.queue.dequeue()\n\n\nif __name__ == '__main__':\n os.environ['CUDA_VISIBLE_DEVICES'] = ''\n\n from pose_augment import set_network_input_wh, set_network_scale\n # set_network_input_wh(368, 368)\n set_network_input_wh(480, 320)\n set_network_scale(8)\n\n # df = get_dataflow('/data/public/rw/coco/annotations', True, '/data/public/rw/coco/')\n df = _get_dataflow_onlyread('/data/public/rw/coco/annotations', True, '/data/public/rw/coco/')\n # df = get_dataflow('/root/coco/annotations', False, img_path='http://gpu-twg.kakaocdn.net/braincloud/COCO/')\n\n from tensorpack.dataflow.common import TestDataSpeed\n TestDataSpeed(df).start()\n sys.exit(0)\n\n with tf.Session() as sess:\n df.reset_state()\n t1 = time.time()\n for idx, dp in enumerate(df.get_data()):\n if idx == 0:\n for d in dp:\n logger.info('%d dp shape={}'.format(d.shape))\n print(time.time() - t1)\n t1 = time.time()\n CocoPose.display_image(dp[0], dp[1].astype(np.float32), dp[2].astype(np.float32))\n print(dp[1].shape, dp[2].shape)\n pass\n\n logger.info('done')\n"
] |
[
[
"tensorflow.get_default_session",
"numpy.amax",
"matplotlib.pyplot.imshow",
"numpy.absolute",
"numpy.nonzero",
"matplotlib.pyplot.colorbar",
"numpy.fromstring",
"tensorflow.Session",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
daniel-zullo-sociomantic/mxnet
|
[
"f3e89b1324986238425121a32abb4d4b39f52cab"
] |
[
"python/mxnet/test_utils.py"
] |
[
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Tools for testing.\"\"\"\n# pylint: disable=too-many-lines\nfrom __future__ import absolute_import, print_function, division\nimport time\nimport gzip\nimport struct\nimport traceback\nimport numbers\nimport subprocess\nimport sys\nimport os\nimport errno\nimport logging\nimport bz2\nimport zipfile\nfrom contextlib import contextmanager\nimport numpy as np\nimport numpy.testing as npt\nimport numpy.random as rnd\ntry:\n import scipy.stats as ss\nexcept ImportError:\n ss = None\ntry:\n import requests\nexcept ImportError:\n # in rare cases requests may be not installed\n pass\nimport mxnet as mx\nfrom .context import Context, current_context\nfrom .ndarray.ndarray import _STORAGE_TYPE_STR_TO_ID\nfrom .ndarray import array\nfrom .symbol import Symbol\n\n\ndef default_context():\n \"\"\"Get default context for regression test.\"\"\"\n # _TODO: get context from environment variable to support\n # testing with GPUs\n return current_context()\n\n\ndef set_default_context(ctx):\n \"\"\"Set default context.\"\"\"\n Context._default_ctx.value = ctx\n\n\ndef default_dtype():\n \"\"\"Get default data type for regression test.\"\"\"\n # _TODO: get default dtype from environment variable\n return np.float32\n\n\ndef get_atol(atol=None):\n \"\"\"Get default numerical threshold for regression test.\"\"\"\n # _TODO: get from env variable, different threshold might\n # be needed for different device and dtype\n return 1e-20 if atol is None else atol\n\n\ndef get_rtol(rtol=None):\n \"\"\"Get default numerical threshold for regression test.\"\"\"\n # _TODO: get from env variable, different threshold might\n # be needed for different device and dtype\n return 1e-5 if rtol is None else rtol\n\n\ndef random_arrays(*shapes):\n \"\"\"Generate some random numpy arrays.\"\"\"\n arrays = [np.random.randn(*s).astype(default_dtype())\n for s in shapes]\n if len(arrays) == 1:\n return arrays[0]\n return arrays\n\n\ndef random_sample(population, k):\n \"\"\"Return a k length list of the elements chosen from the population sequence.\"\"\"\n assert 0 <= k <= len(population)\n population_copy = population[:]\n np.random.shuffle(population_copy)\n return population_copy[0:k]\n\n\ndef _validate_csr_generation_inputs(num_rows, num_cols, density,\n distribution=\"uniform\"):\n \"\"\"Validates inputs for csr generation helper functions\n \"\"\"\n total_nnz = int(num_rows * num_cols * density)\n if density < 0 or density > 1:\n raise ValueError(\"density has to be between 0 and 1\")\n\n if num_rows <= 0 or num_cols <= 0:\n raise ValueError(\"num_rows or num_cols should be greater than 0\")\n\n if distribution == \"powerlaw\":\n if total_nnz < 2 * num_rows:\n raise ValueError(\"not supported for this density: %s\"\n \" for this shape (%s, %s)\"\n \" Please keep :\"\n \" num_rows * num_cols * density >= 2 * num_rows\"\n % (density, num_rows, num_cols))\n\n\ndef shuffle_csr_column_indices(csr):\n \"\"\"Shuffle CSR column indices per row\n This allows validation of unordered column indices, which is not a requirement\n for a valid CSR matrix\n \"\"\"\n row_count = len(csr.indptr) - 1\n for i in range(row_count):\n start_index = csr.indptr[i]\n end_index = csr.indptr[i + 1]\n sublist = np.array(csr.indices[start_index : end_index])\n np.random.shuffle(sublist)\n csr.indices[start_index : end_index] = sublist\n\n\ndef _get_uniform_dataset_csr(num_rows, num_cols, density=0.1, dtype=None,\n data_init=None, shuffle_csr_indices=False):\n \"\"\"Returns CSRNDArray with uniform distribution\n This generates a csr matrix with totalnnz unique randomly chosen numbers\n from num_rows*num_cols and arranges them in the 2d array in the\n following way:\n row_index = (random_number_generated / num_rows)\n col_index = random_number_generated - row_index * num_cols\n \"\"\"\n _validate_csr_generation_inputs(num_rows, num_cols, density,\n distribution=\"uniform\")\n try:\n from scipy import sparse as spsp\n csr = spsp.rand(num_rows, num_cols, density, dtype=dtype, format=\"csr\")\n if data_init is not None:\n csr.data.fill(data_init)\n if shuffle_csr_indices is True:\n shuffle_csr_column_indices(csr)\n result = mx.nd.sparse.csr_matrix((csr.data, csr.indices, csr.indptr),\n shape=(num_rows, num_cols), dtype=dtype)\n except ImportError:\n assert(data_init is None), \\\n \"data_init option is not supported when scipy is absent\"\n assert(not shuffle_csr_indices), \\\n \"shuffle_csr_indices option is not supported when scipy is absent\"\n # scipy not available. try to generate one from a dense array\n dns = mx.nd.random.uniform(shape=(num_rows, num_cols), dtype=dtype)\n masked_dns = dns * (dns < density)\n result = masked_dns.tostype('csr')\n return result\n\ndef _get_powerlaw_dataset_csr(num_rows, num_cols, density=0.1, dtype=None):\n \"\"\"Returns CSRNDArray with powerlaw distribution\n with exponentially increasing number of non zeros in each row.\n Not supported for cases where total_nnz < 2*num_rows. This is because\n the algorithm first tries to ensure that there are rows with no zeros by\n putting non zeros at beginning of each row.\n \"\"\"\n\n _validate_csr_generation_inputs(num_rows, num_cols, density,\n distribution=\"powerlaw\")\n\n total_nnz = int(num_rows * num_cols * density)\n\n unused_nnz = total_nnz\n output_arr = np.zeros((num_rows, num_cols), dtype=dtype)\n # Start with ones on each row so that no row is empty\n for row in range(num_rows):\n output_arr[row][0] = 1 + rnd.uniform(0.001, 2)\n unused_nnz = unused_nnz - 1\n if unused_nnz <= 0:\n return mx.nd.array(output_arr).tostype(\"csr\")\n\n # Populate rest of matrix with 2^i items in ith row.\n # if we have used all total nnz return the sparse matrix\n # else if we reached max column size then fill up full columns until we use all nnz\n col_max = 2\n for row in range(num_rows):\n col_limit = min(num_cols, col_max)\n # In case col_limit reached assign same value to all elements, which is much faster\n if col_limit == num_cols and unused_nnz > col_limit:\n output_arr[row] = 1 + rnd.uniform(0.001, 2)\n unused_nnz = unused_nnz - col_limit + 1\n if unused_nnz <= 0:\n return mx.nd.array(output_arr).tostype(\"csr\")\n else:\n continue\n for col_index in range(1, col_limit):\n output_arr[row][col_index] = 1 + rnd.uniform(0.001, 2)\n unused_nnz = unused_nnz - 1\n if unused_nnz <= 0:\n return mx.nd.array(output_arr).tostype(\"csr\")\n col_max = col_max * 2\n\n if unused_nnz > 0:\n raise ValueError(\"not supported for this density: %s\"\n \" for this shape (%s,%s)\" % (density, num_rows, num_cols))\n else:\n return mx.nd.array(output_arr).tostype(\"csr\")\n\ndef assign_each(the_input, function):\n \"\"\"Return ndarray composed of passing each array value through some function\"\"\"\n if function is None:\n output = np.array(the_input)\n else:\n it_input = np.nditer(the_input, flags=['f_index'])\n\n output = np.zeros(the_input.shape)\n it_out = np.nditer(output, flags=['f_index'], op_flags=['writeonly'])\n\n while not it_input.finished:\n val_input = it_input[0]\n it_out[0] = function(val_input)\n it_input.iternext()\n it_out.iternext()\n\n return output\n\ndef assign_each2(input1, input2, function):\n \"\"\"Return ndarray composed of passing two array values through some function\"\"\"\n if function is None:\n output = np.array(input1)\n else:\n assert input1.shape == input2.shape\n it_input1 = np.nditer(input1, flags=['f_index'])\n it_input2 = np.nditer(input2, flags=['f_index'])\n\n output = np.zeros(input1.shape)\n it_out = np.nditer(output, flags=['f_index'], op_flags=['writeonly'])\n\n while not it_input1.finished:\n val_input1 = it_input1[0]\n val_input2 = it_input2[0]\n it_out[0] = function(val_input1, val_input2)\n it_input1.iternext()\n it_input2.iternext()\n it_out.iternext()\n\n return output\n\ndef rand_sparse_ndarray(shape, stype, density=None, dtype=None, distribution=None,\n data_init=None, rsp_indices=None, modifier_func=None,\n shuffle_csr_indices=False):\n \"\"\"Generate a random sparse ndarray. Returns the ndarray, value(np) and indices(np)\n\n Parameters\n ----------\n shape: list or tuple\n stype: str, valid values: \"csr\" or \"row_sparse\"\n density, optional: float, should be between 0 and 1\n distribution, optional: str, valid values: \"uniform\" or \"powerlaw\"\n dtype, optional: numpy.dtype, default value is None\n\n Returns\n -------\n Result of type CSRNDArray or RowSparseNDArray\n\n Examples\n --------\n Below is an example of the powerlaw distribution with csr as the stype.\n It calculates the nnz using the shape and density.\n It fills up the ndarray with exponentially increasing number of elements.\n If there are enough unused_nnzs, n+1th row will have twice more nnzs compared to nth row.\n else, remaining unused_nnzs will be used in n+1th row\n If number of cols is too small and we have already reached column size it will fill up\n all following columns in all followings rows until we reach the required density.\n\n >>> csr_arr, _ = rand_sparse_ndarray(shape=(5, 16), stype=\"csr\",\n density=0.50, distribution=\"powerlaw\")\n >>> indptr = csr_arr.indptr.asnumpy()\n >>> indices = csr_arr.indices.asnumpy()\n >>> data = csr_arr.data.asnumpy()\n >>> row2nnz = len(data[indptr[1]:indptr[2]])\n >>> row3nnz = len(data[indptr[2]:indptr[3]])\n >>> assert(row3nnz == 2*row2nnz)\n >>> row4nnz = len(data[indptr[3]:indptr[4]])\n >>> assert(row4nnz == 2*row3nnz)\n\n \"\"\"\n density = rnd.rand() if density is None else density\n dtype = default_dtype() if dtype is None else dtype\n distribution = \"uniform\" if distribution is None else distribution\n if stype == 'row_sparse':\n assert (distribution == \"uniform\"), \\\n \"Distribution %s not supported for row_sparse\" % (distribution)\n # sample index\n if rsp_indices is not None:\n indices = rsp_indices\n assert(len(indices) <= shape[0])\n else:\n idx_sample = rnd.rand(shape[0])\n indices = np.argwhere(idx_sample < density).flatten()\n if indices.shape[0] == 0:\n result = mx.nd.zeros(shape, stype='row_sparse', dtype=dtype)\n return result, (np.array([], dtype=dtype), np.array([]))\n # generate random values\n val = rnd.rand(indices.shape[0], *shape[1:]).astype(dtype)\n\n # Allow caller to override or adjust random values\n if data_init is not None:\n val.fill(data_init)\n if modifier_func is not None:\n val = assign_each(val, modifier_func)\n\n arr = mx.nd.sparse.row_sparse_array((val, indices), shape=shape, dtype=dtype)\n return arr, (val, indices)\n elif stype == 'csr':\n assert len(shape) == 2\n if distribution == \"uniform\":\n csr = _get_uniform_dataset_csr(shape[0], shape[1], density,\n data_init=data_init,\n shuffle_csr_indices=shuffle_csr_indices, dtype=dtype)\n return csr, (csr.indptr, csr.indices, csr.data)\n elif distribution == \"powerlaw\":\n csr = _get_powerlaw_dataset_csr(shape[0], shape[1], density=density, dtype=dtype)\n return csr, (csr.indptr, csr.indices, csr.data)\n else:\n assert(False), \"Distribution not supported: %s\" % (distribution)\n return False\n else:\n assert(False), \"unknown storage type\"\n return False\n\ndef rand_ndarray(shape, stype='default', density=None, dtype=None,\n modifier_func=None, shuffle_csr_indices=False, distribution=None):\n if stype == 'default':\n arr = mx.nd.array(random_arrays(shape), dtype=dtype)\n else:\n arr, _ = rand_sparse_ndarray(shape, stype, density=density,\n modifier_func=modifier_func, dtype=dtype,\n shuffle_csr_indices=shuffle_csr_indices,\n distribution=distribution)\n return arr\n\n\ndef create_sparse_array(shape, stype, data_init=None, rsp_indices=None,\n dtype=None, modifier_func=None, density=.5,\n shuffle_csr_indices=False):\n \"\"\"Create a sparse array, For Rsp, assure indices are in a canonical format\"\"\"\n if stype == 'row_sparse':\n if rsp_indices is not None:\n arr_indices = np.asarray(rsp_indices)\n arr_indices.sort()\n else:\n arr_indices = None\n arr_data, (_, _) = rand_sparse_ndarray(shape, stype,\n density=density,\n data_init=data_init,\n rsp_indices=arr_indices,\n dtype=dtype,\n modifier_func=modifier_func)\n elif stype == 'csr':\n arr_data, (_, _, _) = rand_sparse_ndarray(shape,\n stype,\n density=density,\n data_init=data_init,\n dtype=dtype,\n modifier_func=modifier_func,\n shuffle_csr_indices=shuffle_csr_indices)\n else:\n msg = \"Unknown storage type: \" + stype\n raise AssertionError(msg)\n\n return arr_data\n\n\ndef create_sparse_array_zd(shape, stype, density, data_init=None,\n rsp_indices=None, dtype=None, modifier_func=None,\n shuffle_csr_indices=False):\n \"\"\"Create sparse array, using only rsp_indices to determine density\"\"\"\n if stype == 'row_sparse':\n density = 0.0\n if rsp_indices is not None:\n assert len(rsp_indices) <= shape[0]\n return create_sparse_array(shape, stype,\n data_init=data_init,\n rsp_indices=rsp_indices,\n dtype=dtype,\n modifier_func=modifier_func,\n density=density,\n shuffle_csr_indices=shuffle_csr_indices)\n\ndef rand_shape_2d(dim0=10, dim1=10):\n return rnd.randint(1, dim0 + 1), rnd.randint(1, dim1 + 1)\n\n\ndef rand_shape_3d(dim0=10, dim1=10, dim2=10):\n return rnd.randint(1, dim0 + 1), rnd.randint(1, dim1 + 1), rnd.randint(1, dim2 + 1)\n\n\ndef rand_shape_nd(num_dim, dim=10):\n return tuple(rnd.randint(1, dim+1, size=num_dim))\n\n\ndef np_reduce(dat, axis, keepdims, numpy_reduce_func):\n \"\"\"Compatible reduce for old version of NumPy.\n\n Parameters\n ----------\n dat : np.ndarray\n Same as NumPy.\n\n axis : None or int or list-like\n Same as NumPy.\n\n keepdims : bool\n Same as NumPy.\n\n numpy_reduce_func : function\n A NumPy reducing function like ``np.sum`` or ``np.max``.\n \"\"\"\n if isinstance(axis, int):\n axis = [axis]\n else:\n axis = list(axis) if axis is not None else range(len(dat.shape))\n ret = dat\n for i in reversed(sorted(axis)):\n ret = numpy_reduce_func(ret, axis=i)\n if keepdims:\n keepdims_shape = list(dat.shape)\n for i in axis:\n keepdims_shape[i] = 1\n ret = ret.reshape(tuple(keepdims_shape))\n return ret\n\n\ndef find_max_violation(a, b, rtol=None, atol=None):\n \"\"\"Finds and returns the location of maximum violation.\"\"\"\n rtol = get_rtol(rtol)\n atol = get_atol(atol)\n diff = np.abs(a-b)\n tol = atol + rtol*np.abs(b)\n violation = diff/(tol+1e-20)\n loc = np.argmax(violation)\n idx = np.unravel_index(loc, violation.shape)\n return idx, np.max(violation)\n\n\ndef same(a, b):\n \"\"\"Test if two NumPy arrays are the same.\n\n Parameters\n ----------\n a : np.ndarray\n b : np.ndarray\n \"\"\"\n return np.array_equal(a, b)\n\ndef almost_equal(a, b, rtol=None, atol=None, equal_nan=False):\n \"\"\"Test if two numpy arrays are almost equal.\"\"\"\n # pylint: disable=unexpected-keyword-arg\n return np.allclose(a, b, rtol=get_rtol(rtol), atol=get_atol(atol), equal_nan=equal_nan)\n # pylint: enable=unexpected-keyword-arg\n\ndef assert_almost_equal(a, b, rtol=None, atol=None, names=('a', 'b'), equal_nan=False):\n \"\"\"Test that two numpy arrays are almost equal. Raise exception message if not.\n\n Parameters\n ----------\n a : np.ndarray\n b : np.ndarray\n threshold : None or float\n The checking threshold. Default threshold will be used if set to ``None``.\n \"\"\"\n rtol = get_rtol(rtol)\n atol = get_atol(atol)\n if almost_equal(a, b, rtol, atol, equal_nan=equal_nan):\n return\n index, rel = find_max_violation(a, b, rtol, atol)\n np.set_printoptions(threshold=4, suppress=True)\n msg = npt.build_err_msg([a, b],\n err_msg=\"Error %f exceeds tolerance rtol=%f, atol=%f. \"\n \" Location of maximum error:%s, a=%f, b=%f\"\n % (rel, rtol, atol, str(index), a[index], b[index]),\n names=names)\n raise AssertionError(msg)\n\n\ndef almost_equal_ignore_nan(a, b, rtol=None, atol=None):\n \"\"\"Test that two NumPy arrays are almost equal (ignoring NaN in either array).\n Combines a relative and absolute measure of approximate eqality.\n If either the relative or absolute check passes, the arrays are considered equal.\n Including an absolute check resolves issues with the relative check where all\n array values are close to zero.\n\n Parameters\n ----------\n a : np.ndarray\n b : np.ndarray\n rtol : None or float\n The relative threshold. Default threshold will be used if set to ``None``.\n atol : None or float\n The absolute threshold. Default threshold will be used if set to ``None``.\n \"\"\"\n a = np.copy(a)\n b = np.copy(b)\n nan_mask = np.logical_or(np.isnan(a), np.isnan(b))\n a[nan_mask] = 0\n b[nan_mask] = 0\n\n return almost_equal(a, b, rtol, atol)\n\n\ndef assert_almost_equal_ignore_nan(a, b, rtol=None, atol=None, names=('a', 'b')):\n \"\"\"Test that two NumPy arrays are almost equal (ignoring NaN in either array).\n Combines a relative and absolute measure of approximate eqality.\n If either the relative or absolute check passes, the arrays are considered equal.\n Including an absolute check resolves issues with the relative check where all\n array values are close to zero.\n\n Parameters\n ----------\n a : np.ndarray\n b : np.ndarray\n rtol : None or float\n The relative threshold. Default threshold will be used if set to ``None``.\n atol : None or float\n The absolute threshold. Default threshold will be used if set to ``None``.\n \"\"\"\n a = np.copy(a)\n b = np.copy(b)\n nan_mask = np.logical_or(np.isnan(a), np.isnan(b))\n a[nan_mask] = 0\n b[nan_mask] = 0\n\n assert_almost_equal(a, b, rtol, atol, names)\n\ndef assert_exception(f, exception_type, *args, **kwargs):\n \"\"\"Test that function f will throw an exception of type given by `exception_type`\"\"\"\n try:\n f(*args, **kwargs)\n assert(False)\n except exception_type:\n return\n\ndef retry(n):\n \"\"\"Retry n times before failing for stochastic test cases.\"\"\"\n assert n > 0\n def decorate(f):\n \"\"\"Decorate a test case.\"\"\"\n def wrapper(*args, **kwargs):\n \"\"\"Wrapper for tests function.\"\"\"\n for _ in range(n):\n try:\n f(*args, **kwargs)\n except AssertionError as e:\n err = e\n continue\n return\n raise err\n return wrapper\n return decorate\n\n\ndef simple_forward(sym, ctx=None, is_train=False, **inputs):\n \"\"\"A simple forward function for a symbol.\n\n Primarily used in doctest to test the functionality of a symbol.\n Takes NumPy arrays as inputs and outputs are also converted to NumPy arrays.\n\n Parameters\n ----------\n ctx : Context\n If ``None``, will take the default context.\n inputs : keyword arguments\n Mapping each input name to a NumPy array.\n\n Returns\n -------\n The result as a numpy array. Multiple results will\n be returned as a list of NumPy arrays.\n \"\"\"\n ctx = ctx or default_context()\n inputs = {k: array(v) for k, v in inputs.items()}\n exe = sym.bind(ctx, args=inputs)\n exe.forward(is_train=is_train)\n outputs = [x.asnumpy() for x in exe.outputs]\n if len(outputs) == 1:\n outputs = outputs[0]\n return outputs\n\n\ndef _parse_location(sym, location, ctx, dtype=default_dtype()):\n \"\"\"Parses the given location to a dictionary.\n\n Arguments of the provided op `sym` are used as dictionary keys\n and elements of `location` are used as values.\n\n Parameters\n ----------\n sym : Symbol\n Symbol containing op\n location : list or tuple or dict\n Argument values location\n\n - if type is list or tuple of `np.ndarray`\n inner elements are arrays correspoding to\n ``sym.list_arguments()``.\n - if type is dict of str -> `np.ndarray`\n maps the name of arguments to the corresponding `np.ndarray`.\n *In either case, value of all the arguments must be provided.*\n ctx : Context\n Device context.\n dtype: np.float16 or np.float32 or np.float64\n Datatype for mx.nd.array.\n\n Returns\n -------\n dict\n Dictionary with `sym` arguments as keys and `location` elements as\n values.\n\n Examples\n -------\n >>> a = mx.symbol.Variable('a')\n >>> b = mx.symbol.Variable('b')\n >>> l1 = np.ndarray([2,3])\n >>> l2 = np.ndarray([3,4])\n >>> _parse_location(a * b, [l1, l2], None)\n {'a': <NDArray 2x3 @cpu(0)>, 'b': <NDArray 3x4 @cpu(0)>}\n >>> _parse_location(a * b, {'a': l1, 'b': l2}, None)\n {'a': <NDArray 2x3 @cpu(0)>, 'b': <NDArray 3x4 @cpu(0)>}\n >>> _parse_location(a * b, {'a': l1}, None)\n ValueError: Symbol arguments and keys of the given location do not match.\n \"\"\"\n assert isinstance(location, (dict, list, tuple))\n assert dtype in (np.float16, np.float32, np.float64)\n if isinstance(location, dict):\n if set(location.keys()) != set(sym.list_arguments()):\n raise ValueError(\"Symbol arguments and keys of the given location do not match.\"\n \"symbol args:%s, location.keys():%s\"\n % (str(set(sym.list_arguments())), str(set(location.keys()))))\n else:\n location = {k: v for k, v in zip(sym.list_arguments(), location)}\n location = {k: mx.nd.array(v, ctx=ctx, dtype=dtype) if isinstance(v, np.ndarray) \\\n else v for k, v in location.items()}\n return location\n\n\ndef _parse_aux_states(sym, aux_states, ctx, dtype=default_dtype()):\n \"\"\"Parses the given auxiliary states to a dictionary.\n\n Auxiliary states of the provided op `sym` are used as dictionary\n keys and elements of `aux_states` are used as values.\n\n Parameters\n ----------\n sym : Symbol\n Symbol containing op\n aux_states : None or list or dict\n Aux states\n\n - if type is list or tuple of `np.ndarray`\n inner elements are arrays correspoding to\n ``sym.list_auxiliary_states()``.\n - if type is dict of str -> `np.ndarray`\n maps the name of arguments to the corresponding `np.ndarray`.\n *In either case, all aux states of `sym` must be provided.*\n ctx : Context\n Device context.\n dtype: np.float16 or np.float32 or np.float64\n Datatype for mx.nd.array.\n\n Returns\n -------\n dict\n Dictionary with `sym` aux states as keys and `aux_states` elements\n as values.\n\n Examples\n -------\n >>> data = mx.symbol.Variable('data')\n >>> weight = mx.sym.Variable(name='fc1_weight')\n >>> fc1 = mx.symbol.FullyConnected(data = data, weight=weight, name='fc1', num_hidden=128)\n >>> fc2 = mx.symbol.BatchNorm(fc1, name='batchnorm0')\n >>> mean_states = np.ones(3)\n >>> var_states = np.ones(3)\n >>> _parse_aux_states(fc2, [mean_states, var_states], None)\n {'batchnorm0_moving_var': <NDArray 3 @cpu(0)>, 'batchnorm0_moving_mean': <NDArray 3 @cpu(0)>}\n >>> _parse_aux_states(fc2, {'batchnorm0_moving_var': mean_states,\n ... 'batchnorm0_moving_mean': var_states}, None)\n {'batchnorm0_moving_var': <NDArray 3 @cpu(0)>, 'batchnorm0_moving_mean': <NDArray 3 @cpu(0)>}\n >>> _parse_aux_states(fc2, {'batchnorm0_moving_var': mean_states}, None)\n ValueError: Symbol aux_states names and given aux_states do not match.\n \"\"\"\n assert dtype in (np.float16, np.float32, np.float64)\n if aux_states is not None:\n if isinstance(aux_states, dict):\n if set(aux_states.keys()) != set(sym.list_auxiliary_states()):\n raise ValueError(\"Symbol aux_states names and given aux_states do not match.\"\n \"symbol aux_names:%s, aux_states.keys:%s\"\n % (str(set(sym.list_auxiliary_states())),\n str(set(aux_states.keys()))))\n elif isinstance(aux_states, (list, tuple)):\n aux_names = sym.list_auxiliary_states()\n aux_states = {k:v for k, v in zip(aux_names, aux_states)}\n aux_states = {k: mx.nd.array(v, ctx=ctx, dtype=dtype) for k, v in aux_states.items()}\n return aux_states\n\n\ndef numeric_grad(executor, location, aux_states=None, eps=1e-4,\n use_forward_train=True, dtype=default_dtype()):\n \"\"\"Calculates a numeric gradient via finite difference method.\n\n Class based on Theano's `theano.gradient.numeric_grad` [1]\n\n Parameters\n ----------\n executor : Executor\n Executor that computes the forward pass.\n location : list of numpy.ndarray or dict of str to numpy.ndarray\n Argument values used as location to compute gradient\n Maps the name of arguments to the corresponding numpy.ndarray.\n Value of all the arguments must be provided.\n aux_states : None or list of numpy.ndarray or dict of str to numpy.ndarray, optional\n Auxiliary states values used as location to compute gradient\n Maps the name of aux_states to the corresponding numpy.ndarray.\n Value of all the auxiliary arguments must be provided.\n eps : float, optional\n Epsilon for the finite-difference method.\n use_forward_train : bool, optional\n Whether to use `is_train=True` in testing.\n dtype: np.float16 or np.float32 or np.float64\n Datatype for mx.nd.array.\n\n References\n ---------\n ..[1] https://github.com/Theano/Theano/blob/master/theano/gradient.py\n \"\"\"\n def as_stype(var, stype, dtype):\n return mx.nd.cast_storage(mx.nd.array(var, dtype=dtype), stype=stype)\n\n assert dtype in (np.float16, np.float32, np.float64)\n approx_grads = {k: np.zeros(v.shape, dtype=dtype)\n for k, v in location.items()}\n for k, v in location.items():\n stype = executor.arg_dict[k].stype\n if stype == 'default':\n executor.arg_dict[k][:] = as_stype(v, stype, dtype=dtype)\n for k in location:\n location[k] = np.ascontiguousarray(location[k])\n for k, v in location.items():\n if v.dtype.kind != 'f':\n continue\n stype = executor.arg_dict[k].stype\n old_value = v.copy()\n for i in range(np.prod(v.shape)):\n # inplace update\n v.ravel()[i] += eps/2.0\n executor.arg_dict[k][:] = as_stype(v, stype, dtype=dtype)\n if aux_states is not None:\n for key, val in aux_states.items():\n executor.aux_dict[key][:] = val\n executor.forward(is_train=use_forward_train)\n f_peps = executor.outputs[0].asnumpy()\n\n v.ravel()[i] -= eps\n executor.arg_dict[k][:] = as_stype(v, stype, dtype=dtype)\n if aux_states is not None:\n for key, val in aux_states.items():\n adstype = executor.aux_dict[key].stype\n executor.aux_dict[key][:] = as_stype(val, adstype, dtype=dtype)\n executor.forward(is_train=use_forward_train)\n f_neps = executor.outputs[0].asnumpy()\n\n approx_grad = (f_peps - f_neps).sum() / eps\n approx_grads[k].ravel()[i] = approx_grad\n v.ravel()[i] = old_value.ravel()[i]\n # copy back the original value\n executor.arg_dict[k][:] = as_stype(old_value, stype, dtype=dtype)\n\n return approx_grads\n\n\ndef check_numeric_gradient(sym, location, aux_states=None, numeric_eps=1e-3, rtol=1e-2,\n atol=None, grad_nodes=None, use_forward_train=True, ctx=None,\n grad_stype_dict=None, dtype=default_dtype()):\n \"\"\"Verify an operation by checking backward pass via finite difference method.\n\n Based on Theano's `theano.gradient.verify_grad` [1]\n\n Parameters\n ----------\n sym : Symbol\n Symbol containing op to test\n location : list or tuple or dict\n Argument values used as location to compute gradient\n\n - if type is list of numpy.ndarray\n inner elements should have the same order as mxnet.sym.list_arguments().\n - if type is dict of str -> numpy.ndarray\n maps the name of arguments to the corresponding numpy.ndarray.\n *In either case, value of all the arguments must be provided.*\n aux_states : list or tuple or dict, optional\n The auxiliary states required when generating the executor for the symbol.\n numeric_eps : float, optional\n Delta for the finite difference method that approximates the gradient.\n check_eps : float, optional\n relative error eps used when comparing numeric grad to symbolic grad.\n grad_nodes : None or list or tuple or dict, optional\n Names of the nodes to check gradient on\n use_forward_train : bool\n Whether to use is_train=True when computing the finite-difference.\n ctx : Context, optional\n Check the gradient computation on the specified device.\n grad_stype_dict : dict of str->str, optional\n Storage type dictionary for gradient ndarrays.\n dtype: np.float16 or np.float32 or np.float64\n Datatype for mx.nd.array.\n\n References\n ---------\n ..[1] https://github.com/Theano/Theano/blob/master/theano/gradient.py\n \"\"\"\n assert dtype in (np.float16, np.float32, np.float64)\n if ctx is None:\n ctx = default_context()\n\n def random_projection(shape):\n \"\"\"Get a random weight matrix with not too small elements\n\n Parameters\n ----------\n shape : list or tuple\n \"\"\"\n # random_projection should not have elements too small,\n # otherwise too much precision is lost in numerical gradient\n plain = np.random.rand(*shape) + 0.1\n return plain\n\n location = _parse_location(sym=sym, location=location, ctx=ctx, dtype=dtype)\n location_npy = {k:v.asnumpy() for k, v in location.items()}\n aux_states = _parse_aux_states(sym=sym, aux_states=aux_states, ctx=ctx,\n dtype=dtype)\n if aux_states is not None:\n aux_states_npy = {k: v.asnumpy() for k, v in aux_states.items()}\n else:\n aux_states_npy = None\n if grad_nodes is None:\n grad_nodes = sym.list_arguments()\n grad_req = {k: 'write' for k in grad_nodes}\n elif isinstance(grad_nodes, (list, tuple)):\n grad_nodes = list(grad_nodes)\n grad_req = {k: 'write' for k in grad_nodes}\n elif isinstance(grad_nodes, dict):\n grad_req = grad_nodes.copy()\n grad_nodes = grad_nodes.keys()\n else:\n raise ValueError\n\n input_shape = {k: v.shape for k, v in location.items()}\n _, out_shape, _ = sym.infer_shape(**input_shape)\n proj = mx.sym.Variable(\"__random_proj\")\n out = sym * proj\n out = mx.sym.make_loss(out)\n\n location = dict(list(location.items()) +\n [(\"__random_proj\", mx.nd.array(random_projection(out_shape[0]),\n ctx=ctx, dtype=dtype))])\n args_grad_npy = dict([(k, np.random.normal(0, 0.01, size=location[k].shape))\n for k in grad_nodes]\n + [(\"__random_proj\", np.random.normal(0, 0.01, size=out_shape[0]))])\n\n args_grad = {k: mx.nd.array(v, ctx=ctx, dtype=dtype) for k, v in args_grad_npy.items()}\n if grad_stype_dict is not None:\n assert isinstance(grad_stype_dict, dict), \"grad_stype_dict must be a dict\"\n for k, v in grad_stype_dict.items():\n if k in args_grad and v in _STORAGE_TYPE_STR_TO_ID and v != 'default':\n # create an uninitialized sparse ndarray for executor\n # if the symbolic grad is expected to be zero, it should not be initialized at all\n args_grad[k] = mx.nd.zeros(args_grad[k].shape, args_grad[k].context,\n args_grad[k].dtype, v)\n\n executor = out.bind(ctx, grad_req=grad_req,\n args=location, args_grad=args_grad, aux_states=aux_states)\n\n inps = executor.arg_arrays\n if len(inps) != len(location):\n raise ValueError(\"Executor arg_arrays and and location len do not match.\"\n \"Got %d inputs and %d locations\"%(len(inps), len(location)))\n assert len(executor.outputs) == 1\n\n executor.forward(is_train=True)\n executor.backward()\n symbolic_grads = {k:executor.grad_dict[k].asnumpy() for k in grad_nodes}\n\n numeric_gradients = numeric_grad(\n executor, location_npy, aux_states_npy,\n eps=numeric_eps, use_forward_train=use_forward_train, dtype=dtype)\n\n for name in grad_nodes:\n fd_grad = numeric_gradients[name]\n orig_grad = args_grad_npy[name]\n sym_grad = symbolic_grads[name]\n if grad_req[name] == 'write':\n assert_almost_equal(fd_grad, sym_grad, rtol, atol,\n (\"NUMERICAL_%s\"%name, \"BACKWARD_%s\"%name))\n elif grad_req[name] == 'add':\n assert_almost_equal(fd_grad, sym_grad - orig_grad, rtol, atol,\n (\"NUMERICAL_%s\"%name, \"BACKWARD_%s\"%name))\n elif grad_req[name] == 'null':\n assert_almost_equal(orig_grad, sym_grad, rtol, atol,\n (\"NUMERICAL_%s\"%name, \"BACKWARD_%s\"%name))\n else:\n raise ValueError(\"Invalid grad_req %s for argument %s\"%(grad_req[name], name))\n\n\ndef check_symbolic_forward(sym, location, expected, rtol=1E-4, atol=None,\n aux_states=None, ctx=None, equal_nan=False,\n dtype=default_dtype()):\n \"\"\"Compares a symbol's forward results with the expected ones.\n Prints error messages if the forward results are not the same as the expected ones.\n\n Parameters\n ---------\n sym : Symbol\n output symbol\n location : list of np.ndarray or dict of str to np.ndarray\n The evaluation point\n\n - if type is list of np.ndarray\n Contains all the numpy arrays corresponding to `sym.list_arguments()`.\n - if type is dict of str to np.ndarray\n Contains the mapping between argument names and their values.\n expected : list of np.ndarray or dict of str to np.ndarray\n The expected output value\n\n - if type is list of np.ndarray\n Contains arrays corresponding to exe.outputs.\n - if type is dict of str to np.ndarray\n Contains mapping between sym.list_output() and exe.outputs.\n check_eps : float, optional\n Relative error to check to.\n aux_states : list of np.ndarray of dict, optional\n - if type is list of np.ndarray\n Contains all the NumPy arrays corresponding to sym.list_auxiliary_states\n - if type is dict of str to np.ndarray\n Contains the mapping between names of auxiliary states and their values.\n ctx : Context, optional\n running context\n dtype: np.float16 or np.float32 or np.float64\n Datatype for mx.nd.array.\n\n equal_nan: Boolean\n if True, `nan` is a valid value for checking equivalency (ie `nan` == `nan`)\n\n Example\n -------\n >>> shape = (2, 2)\n >>> lhs = mx.symbol.Variable('lhs')\n >>> rhs = mx.symbol.Variable('rhs')\n >>> sym_dot = mx.symbol.dot(lhs, rhs)\n >>> mat1 = np.array([[1, 2], [3, 4]])\n >>> mat2 = np.array([[5, 6], [7, 8]])\n >>> ret_expected = np.array([[19, 22], [43, 50]])\n >>> check_symbolic_forward(sym_dot, [mat1, mat2], [ret_expected])\n \"\"\"\n assert dtype in (np.float16, np.float32, np.float64)\n if ctx is None:\n ctx = default_context()\n\n location = _parse_location(sym=sym, location=location, ctx=ctx, dtype=dtype)\n aux_states = _parse_aux_states(sym=sym, aux_states=aux_states, ctx=ctx,\n dtype=dtype)\n if isinstance(expected, dict):\n expected = [expected[k] for k in sym.list_outputs()]\n args_grad_data = {k:mx.nd.empty(v.shape, ctx=ctx, dtype=dtype) for k, v in location.items()}\n\n executor = sym.bind(ctx=ctx, args=location, args_grad=args_grad_data, aux_states=aux_states)\n for g in executor.grad_arrays:\n g[:] = 0\n\n executor.forward(is_train=False)\n\n outputs = [x.asnumpy() for x in executor.outputs]\n for output_name, expect, output in zip(sym.list_outputs(), expected, outputs):\n assert_almost_equal(expect, output, rtol, atol,\n (\"EXPECTED_%s\"%output_name, \"FORWARD_%s\"%output_name),\n equal_nan=equal_nan)\n return executor.outputs\n\ndef check_symbolic_backward(sym, location, out_grads, expected, rtol=1e-5, atol=None,\n aux_states=None, grad_req='write', ctx=None, grad_stypes=None,\n equal_nan=False, dtype=default_dtype()):\n \"\"\"Compares a symbol's backward results with the expected ones.\n Prints error messages if the backward results are not the same as the expected results.\n\n Parameters\n ---------\n sym : Symbol\n output symbol\n location : list of np.ndarray or dict of str to np.ndarray\n The evaluation point\n\n - if type is list of np.ndarray\n Contains all the NumPy arrays corresponding to ``mx.sym.list_arguments``.\n - if type is dict of str to np.ndarray\n Contains the mapping between argument names and their values.\n out_grads : None or list of np.ndarray or dict of str to np.ndarray\n NumPys arrays corresponding to sym.outputs for incomming gradient.\n\n - if type is list of np.ndarray\n Contains arrays corresponding to ``exe.outputs``.\n - if type is dict of str to np.ndarray\n contains mapping between mxnet.sym.list_output() and Executor.outputs\n expected : list of np.ndarray or dict of str to np.ndarray\n expected gradient values\n\n - if type is list of np.ndarray\n Contains arrays corresponding to exe.grad_arrays\n - if type is dict of str to np.ndarray\n Contains mapping between ``sym.list_arguments()`` and exe.outputs.\n check_eps: float, optional\n Relative error to check to.\n aux_states : list of np.ndarray or dict of str to np.ndarray\n grad_req : str or list of str or dict of str to str, optional\n Gradient requirements. 'write', 'add' or 'null'.\n ctx : Context, optional\n Running context.\n grad_stypes: dict of str->str\n dictionary of mapping argument name to stype for the gradient\n equal_nan: Boolean\n if True, `nan` is a valid value for checking equivalency (ie `nan` == `nan`)\n dtype: np.float16 or np.float32 or np.float64\n Datatype for mx.nd.array.\n\n Example\n -------\n >>> lhs = mx.symbol.Variable('lhs')\n >>> rhs = mx.symbol.Variable('rhs')\n >>> sym_add = mx.symbol.elemwise_add(lhs, rhs)\n >>> mat1 = np.array([[1, 2], [3, 4]])\n >>> mat2 = np.array([[5, 6], [7, 8]])\n >>> grad1 = mx.nd.zeros(shape)\n >>> grad2 = mx.nd.zeros(shape)\n >>> exec_add = sym_add.bind(default_context(), args={'lhs': mat1, 'rhs': mat2},\n ... args_grad={'lhs': grad1, 'rhs': grad2}, grad_req={'lhs': 'write', 'rhs': 'write'})\n >>> exec_add.forward(is_train=True)\n >>> ograd = mx.nd.ones(shape)\n >>> grad_expected = ograd.copy().asnumpy()\n >>> check_symbolic_backward(sym_add, [mat1, mat2], [ograd], [grad_expected, grad_expected])\n \"\"\"\n assert dtype in (np.float16, np.float32, np.float64)\n if ctx is None:\n ctx = default_context()\n\n location = _parse_location(sym=sym, location=location, ctx=ctx, dtype=dtype)\n aux_states = _parse_aux_states(sym=sym, aux_states=aux_states, ctx=ctx,\n dtype=dtype)\n if isinstance(expected, (list, tuple)):\n expected = {k:v for k, v in zip(sym.list_arguments(), expected)}\n\n args_grad_npy = {k:np.random.normal(size=v.shape) for k, v in expected.items()}\n args_grad_data = {}\n for k, v in args_grad_npy.items():\n nd = mx.nd.array(v, ctx=ctx, dtype=dtype)\n if grad_stypes is not None and k in grad_stypes:\n stype = grad_stypes[k]\n if stype is not None and stype != 'default':\n out = create_sparse_array(v.shape, stype, density=0.0)\n else:\n out = nd\n args_grad_data[k] = out\n else:\n args_grad_data[k] = nd\n\n if isinstance(grad_req, str):\n grad_req = {k:grad_req for k in sym.list_arguments()}\n elif isinstance(grad_req, (list, tuple)):\n grad_req = {k:v for k, v in zip(sym.list_arguments(), grad_req)}\n\n executor = sym.bind(ctx=ctx, args=location, args_grad=args_grad_data,\n aux_states=aux_states, grad_req=grad_req)\n executor.forward(is_train=True)\n\n if isinstance(out_grads, (tuple, list)):\n outg = list()\n for arr in out_grads:\n if isinstance(arr, np.ndarray):\n outg.append(mx.nd.array(arr, ctx=ctx, dtype=dtype))\n else:\n outg.append(arr)\n out_grads = outg\n elif isinstance(out_grads, dict):\n outg = dict()\n for k, v in out_grads.items():\n if isinstance(v, np.ndarray):\n outg[k] = mx.nd.array(v, ctx=ctx, dtype=dtype)\n else:\n outg[k] = v\n out_grads = outg\n else:\n assert out_grads is None\n\n executor.backward(out_grads)\n\n grads = {k: v.asnumpy() for k, v in args_grad_data.items()}\n\n for name in expected:\n if grad_req[name] == 'write':\n assert_almost_equal(expected[name], grads[name], rtol, atol,\n (\"EXPECTED_%s\"%name, \"BACKWARD_%s\"%name),\n equal_nan=equal_nan)\n elif grad_req[name] == 'add':\n assert_almost_equal(expected[name], grads[name] - args_grad_npy[name],\n rtol, atol, (\"EXPECTED_%s\"%name, \"BACKWARD_%s\"%name),\n equal_nan=equal_nan)\n elif grad_req[name] == 'null':\n assert_almost_equal(args_grad_npy[name], grads[name],\n rtol, atol, (\"EXPECTED_%s\"%name, \"BACKWARD_%s\"%name),\n equal_nan=equal_nan)\n else:\n raise ValueError(\"Invalid grad_req %s for argument %s\"%(grad_req[name], name))\n return args_grad_data\n\ndef check_speed(sym, location=None, ctx=None, N=20, grad_req=None, typ=\"whole\",\n **kwargs):\n \"\"\"Check the running speed of a symbol.\n\n Parameters\n ----------\n sym : Symbol\n Symbol to run the speed test.\n location : none or dict of str to np.ndarray\n Location to evaluate the inner executor.\n ctx : Context\n Running context.\n N : int, optional\n Repeat times.\n grad_req : None or str or list of str or dict of str to str, optional\n Gradient requirements.\n typ : str, optional\n \"whole\" or \"forward\"\n\n - \"whole\"\n Test the forward_backward speed.\n - \"forward\"\n Only test the forward speed.\n \"\"\"\n if ctx is None:\n ctx = default_context()\n\n if grad_req is None:\n grad_req = 'write'\n if location is None:\n exe = sym.simple_bind(grad_req=grad_req, ctx=ctx, **kwargs)\n location = {k: np.random.normal(size=arr.shape, scale=1.0) for k, arr in\n exe.arg_dict.items()}\n else:\n assert isinstance(location, dict), \"Expect dict, get \\\"location\\\"=%s\" %str(location)\n exe = sym.simple_bind(grad_req=grad_req, ctx=ctx,\n **{k: v.shape for k, v in location.items()})\n\n for name, iarr in location.items():\n exe.arg_dict[name][:] = iarr.astype(exe.arg_dict[name].dtype)\n\n if typ == \"whole\":\n # Warm up\n exe.forward(is_train=True)\n exe.backward(out_grads=exe.outputs)\n for output in exe.outputs:\n output.wait_to_read()\n # Test forward + backward\n tic = time.time()\n for _ in range(N):\n exe.forward(is_train=True)\n exe.backward(out_grads=exe.outputs)\n mx.nd.waitall()\n toc = time.time()\n forward_backward_time = (toc - tic) * 1.0 / N\n return forward_backward_time\n elif typ == \"forward\":\n # Warm up\n exe.forward(is_train=False)\n for output in exe.outputs:\n output.wait_to_read()\n\n # Test forward only\n tic = time.time()\n for _ in range(N):\n exe.forward(is_train=False)\n mx.nd.waitall()\n toc = time.time()\n forward_time = (toc - tic) * 1.0 / N\n return forward_time\n else:\n raise ValueError('typ can only be \"whole\" or \"forward\".')\n\ndef check_consistency(sym, ctx_list, scale=1.0, grad_req='write',\n arg_params=None, aux_params=None, tol=None,\n raise_on_err=True, ground_truth=None, equal_nan=False,\n use_uniform=False, rand_type=np.float64):\n \"\"\"Check symbol gives the same output for different running context\n\n Parameters\n ----------\n sym : Symbol or list of Symbols\n Symbol(s) to run the consistency test.\n ctx_list : list\n Running context. See example for more detail.\n scale : float, optional\n Standard deviation of the inner normal distribution. Used in initialization.\n grad_req : str or list of str or dict of str to str\n Gradient requirement.\n use_unifrom: bool\n Optional, When flag set to true,\n random input data generated follows uniform distribution,\n not normal distribution\n rand_type: np.dtype\n casts the randomly generated data to this type\n Optional, when input data is passed via arg_params,\n defaults to np.float64 (numpy float default)\n\n Examples\n --------\n >>> # create the symbol\n >>> sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), name='conv')\n >>> # initialize the running context\n >>> ctx_list =\\\n[{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},\\\n {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},\\\n {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float16}},\\\n {'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},\\\n {'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}}]\n >>> check_consistency(sym, ctx_list)\n >>> sym = mx.sym.Concat(name='concat', num_args=2)\n >>> ctx_list = \\\n[{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),\\\n 'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},\\\n {'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),\\\n 'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}},\\\n {'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),\\\n 'type_dict': {'concat_arg0': np.float16, 'concat_arg1': np.float16}},\\\n {'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),\\\n 'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},\\\n {'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),\\\n 'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}}]\n >>> check_consistency(sym, ctx_list)\n \"\"\"\n if tol is None:\n tol = {np.dtype(np.float16): 1e-1,\n np.dtype(np.float32): 1e-3,\n np.dtype(np.float64): 1e-5,\n np.dtype(np.uint8): 0,\n np.dtype(np.int32): 0,\n np.dtype(np.int64): 0}\n elif isinstance(tol, numbers.Number):\n tol = {np.dtype(np.float16): tol,\n np.dtype(np.float32): tol,\n np.dtype(np.float64): tol,\n np.dtype(np.uint8): tol,\n np.dtype(np.int32): tol,\n np.dtype(np.int64): tol}\n\n assert len(ctx_list) > 1\n if isinstance(sym, Symbol):\n sym = [sym]*len(ctx_list)\n else:\n assert len(sym) == len(ctx_list)\n\n output_names = sym[0].list_outputs()\n arg_names = sym[0].list_arguments()\n exe_list = []\n for s, ctx in zip(sym, ctx_list):\n assert s.list_arguments() == arg_names\n assert s.list_outputs() == output_names\n exe_list.append(s.simple_bind(grad_req=grad_req, **ctx))\n\n arg_params = {} if arg_params is None else arg_params\n aux_params = {} if aux_params is None else aux_params\n for n, arr in exe_list[0].arg_dict.items():\n if n not in arg_params:\n if use_uniform:\n arg_params[n] = np.random.uniform(low=-0.92, high=0.92,\n size=arr.shape).astype(rand_type)\n else:\n arg_params[n] = np.random.normal(size=arr.shape,\n scale=scale).astype(rand_type)\n for n, arr in exe_list[0].aux_dict.items():\n if n not in aux_params:\n aux_params[n] = 0\n for exe in exe_list:\n for name, arr in exe.arg_dict.items():\n arr[:] = arg_params[name]\n for name, arr in exe.aux_dict.items():\n arr[:] = aux_params[name]\n # We need to initialize the gradient arrays if it's add.\n if (grad_req == \"add\"):\n for arr in exe.grad_arrays:\n arr[:] = np.zeros(arr.shape, dtype=arr.dtype)\n\n dtypes = [np.dtype(exe.outputs[0].dtype) for exe in exe_list]\n max_idx = np.argmax(dtypes)\n gt = ground_truth\n if gt is None:\n gt = exe_list[max_idx].output_dict.copy()\n if grad_req != 'null':\n gt.update(exe_list[max_idx].grad_dict)\n\n # test\n for exe in exe_list:\n exe.forward(is_train=False)\n\n for i, exe in enumerate(exe_list):\n if i == max_idx:\n continue\n for name, arr in zip(output_names, exe.outputs):\n gtarr = gt[name].astype(dtypes[i]).asnumpy()\n arr = arr.asnumpy()\n try:\n assert_almost_equal(arr, gtarr, rtol=tol[dtypes[i]], atol=tol[dtypes[i]],\n equal_nan=equal_nan)\n except AssertionError as e:\n print('Predict Err: ctx %d vs ctx %d at %s'%(i, max_idx, name))\n traceback.print_exc()\n if raise_on_err:\n raise e\n else:\n print(str(e))\n\n # train\n if grad_req != 'null':\n for exe in exe_list:\n exe.forward(is_train=True)\n exe.backward(exe.outputs)\n\n for i, exe in enumerate(exe_list):\n if i == max_idx:\n continue\n curr = zip(output_names + arg_names, exe.outputs + exe.grad_arrays)\n for name, arr in curr:\n if gt[name] is None:\n assert arr is None\n continue\n gtarr = gt[name].astype(dtypes[i]).asnumpy()\n arr = arr.asnumpy()\n try:\n assert_almost_equal(arr, gtarr, rtol=tol[dtypes[i]], atol=tol[dtypes[i]],\n equal_nan=equal_nan)\n except AssertionError as e:\n print('Train Err: ctx %d vs ctx %d at %s'%(i, max_idx, name))\n traceback.print_exc()\n if raise_on_err:\n raise e\n else:\n print(str(e))\n\n return gt\n\ndef list_gpus():\n \"\"\"Return a list of GPUs\n\n Returns\n -------\n list of int:\n If there are n GPUs, then return a list [0,1,...,n-1]. Otherwise returns\n [].\n \"\"\"\n re = ''\n nvidia_smi = ['nvidia-smi', '/usr/bin/nvidia-smi', '/usr/local/nvidia/bin/nvidia-smi']\n for cmd in nvidia_smi:\n try:\n re = subprocess.check_output([cmd, \"-L\"], universal_newlines=True)\n except OSError:\n pass\n return range(len([i for i in re.split('\\n') if 'GPU' in i]))\n\ndef download(url, fname=None, dirname=None, overwrite=False, retries=5):\n \"\"\"Download an given URL\n\n Parameters\n ----------\n\n url : str\n URL to download\n fname : str, optional\n filename of the downloaded file. If None, then will guess a filename\n from url.\n dirname : str, optional\n output directory name. If None, then guess from fname or use the current\n directory\n overwrite : bool, optional\n Default is false, which means skipping download if the local file\n exists. If true, then download the url to overwrite the local file if\n exists.\n retries : integer, default 5\n The number of times to attempt the download in case of failure or non 200 return codes\n\n Returns\n -------\n str\n The filename of the downloaded file\n \"\"\"\n\n assert retries >= 0, \"Number of retries should be at least 0\"\n\n if fname is None:\n fname = url.split('/')[-1]\n\n if dirname is None:\n dirname = os.path.dirname(fname)\n else:\n fname = os.path.join(dirname, fname)\n if dirname != \"\":\n if not os.path.exists(dirname):\n try:\n logging.info('create directory %s', dirname)\n os.makedirs(dirname)\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise OSError('failed to create ' + dirname)\n\n if not overwrite and os.path.exists(fname):\n logging.info(\"%s exists, skipping download\", fname)\n return fname\n\n while retries+1 > 0:\n # Disable pyling too broad Exception\n # pylint: disable=W0703\n try:\n r = requests.get(url, stream=True)\n assert r.status_code == 200, \"failed to open %s\" % url\n with open(fname, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n break\n except Exception as e:\n retries -= 1\n if retries <= 0:\n raise e\n else:\n print(\"download failed, retrying, {} attempt{} left\"\n .format(retries, 's' if retries > 1 else ''))\n logging.info(\"downloaded %s into %s successfully\", url, fname)\n return fname\n\ndef get_mnist():\n \"\"\"Download and load the MNIST dataset\n\n Returns\n -------\n dict\n A dict containing the data\n \"\"\"\n def read_data(label_url, image_url):\n with gzip.open(mx.test_utils.download(label_url)) as flbl:\n struct.unpack(\">II\", flbl.read(8))\n label = np.frombuffer(flbl.read(), dtype=np.int8)\n with gzip.open(mx.test_utils.download(image_url), 'rb') as fimg:\n _, _, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n image = np.frombuffer(fimg.read(), dtype=np.uint8).reshape(len(label), rows, cols)\n image = image.reshape(image.shape[0], 1, 28, 28).astype(np.float32)/255\n return (label, image)\n\n # changed to mxnet.io for more stable hosting\n # path = 'http://yann.lecun.com/exdb/mnist/'\n path = 'http://data.mxnet.io/data/mnist/'\n (train_lbl, train_img) = read_data(\n path+'train-labels-idx1-ubyte.gz', path+'train-images-idx3-ubyte.gz')\n (test_lbl, test_img) = read_data(\n path+'t10k-labels-idx1-ubyte.gz', path+'t10k-images-idx3-ubyte.gz')\n return {'train_data':train_img, 'train_label':train_lbl,\n 'test_data':test_img, 'test_label':test_lbl}\n\ndef get_mnist_pkl():\n \"\"\"Downloads MNIST dataset as a pkl.gz into a directory in the current directory\n with the name `data`\n \"\"\"\n if not os.path.isdir(\"data\"):\n os.makedirs('data')\n if not os.path.exists('data/mnist.pkl.gz'):\n download('http://deeplearning.net/data/mnist/mnist.pkl.gz',\n dirname='data')\n\ndef get_mnist_ubyte():\n \"\"\"Downloads ubyte version of the MNIST dataset into a directory in the current directory\n with the name `data` and extracts all files in the zip archive to this directory.\n \"\"\"\n if not os.path.isdir(\"data\"):\n os.makedirs('data')\n if (not os.path.exists('data/train-images-idx3-ubyte')) or \\\n (not os.path.exists('data/train-labels-idx1-ubyte')) or \\\n (not os.path.exists('data/t10k-images-idx3-ubyte')) or \\\n (not os.path.exists('data/t10k-labels-idx1-ubyte')):\n zip_file_path = download('http://data.mxnet.io/mxnet/data/mnist.zip',\n dirname='data')\n with zipfile.ZipFile(zip_file_path) as zf:\n zf.extractall('data')\n\ndef get_cifar10():\n \"\"\"Downloads CIFAR10 dataset into a directory in the current directory with the name `data`,\n and then extracts all files into the directory `data/cifar`.\n \"\"\"\n if not os.path.isdir(\"data\"):\n os.makedirs('data')\n if (not os.path.exists('data/cifar/train.rec')) or \\\n (not os.path.exists('data/cifar/test.rec')) or \\\n (not os.path.exists('data/cifar/train.lst')) or \\\n (not os.path.exists('data/cifar/test.lst')):\n zip_file_path = download('http://data.mxnet.io/mxnet/data/cifar10.zip',\n dirname='data')\n with zipfile.ZipFile(zip_file_path) as zf:\n zf.extractall('data')\n\ndef get_mnist_iterator(batch_size, input_shape, num_parts=1, part_index=0):\n \"\"\"Returns training and validation iterators for MNIST dataset\n \"\"\"\n\n get_mnist_ubyte()\n flat = False if len(input_shape) == 3 else True\n\n train_dataiter = mx.io.MNISTIter(\n image=\"data/train-images-idx3-ubyte\",\n label=\"data/train-labels-idx1-ubyte\",\n input_shape=input_shape,\n batch_size=batch_size,\n shuffle=True,\n flat=flat,\n num_parts=num_parts,\n part_index=part_index)\n\n val_dataiter = mx.io.MNISTIter(\n image=\"data/t10k-images-idx3-ubyte\",\n label=\"data/t10k-labels-idx1-ubyte\",\n input_shape=input_shape,\n batch_size=batch_size,\n flat=flat,\n num_parts=num_parts,\n part_index=part_index)\n\n return (train_dataiter, val_dataiter)\n\ndef get_zip_data(data_dir, url, data_origin_name):\n \"\"\"Download and extract zip data.\n\n Parameters\n ----------\n\n data_dir : str\n Absolute or relative path of the directory name to store zip files\n url : str\n URL to download data from\n data_origin_name : str\n Name of the downloaded zip file\n\n Examples\n --------\n >>> get_zip_data(\"data_dir\",\n \"http://files.grouplens.org/datasets/movielens/ml-10m.zip\",\n \"ml-10m.zip\")\n \"\"\"\n data_origin_name = os.path.join(data_dir, data_origin_name)\n if not os.path.exists(data_origin_name):\n download(url, dirname=data_dir, overwrite=False)\n zip_file = zipfile.ZipFile(data_origin_name)\n zip_file.extractall(path=data_dir)\n\ndef get_bz2_data(data_dir, data_name, url, data_origin_name):\n \"\"\"Download and extract bz2 data.\n\n Parameters\n ----------\n\n data_dir : str\n Absolute or relative path of the directory name to store bz2 files\n data_name : str\n Name of the output file in which bz2 contents will be extracted\n url : str\n URL to download data from\n data_origin_name : str\n Name of the downloaded b2 file\n\n Examples\n --------\n >>> get_bz2_data(\"data_dir\", \"kdda.t\",\n \"https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/kdda.t.bz2\",\n \"kdda.t.bz2\")\n \"\"\"\n\n data_name = os.path.join(data_dir, data_name)\n data_origin_name = os.path.join(data_dir, data_origin_name)\n if not os.path.exists(data_name):\n download(url, fname=data_origin_name, dirname=data_dir, overwrite=False)\n bz_file = bz2.BZ2File(data_origin_name, 'rb')\n with open(data_name, 'wb') as fout:\n for line in bz_file:\n fout.write(line)\n bz_file.close()\n os.remove(data_origin_name)\n\ndef set_env_var(key, val, default_val=\"\"):\n \"\"\"Set environment variable\n\n Parameters\n ----------\n\n key : str\n Env var to set\n val : str\n New value assigned to the env var\n default_val : str, optional\n Default value returned if the env var doesn't exist\n\n Returns\n -------\n str\n The value of env var before it is set to the new value\n \"\"\"\n prev_val = os.environ.get(key, default_val)\n os.environ[key] = val\n return prev_val\n\ndef same_array(array1, array2):\n \"\"\"Check whether two NDArrays sharing the same memory block\n\n Parameters\n ----------\n\n array1 : NDArray\n First NDArray to be checked\n array2 : NDArray\n Second NDArray to be checked\n\n Returns\n -------\n bool\n Whether two NDArrays share the same memory\n \"\"\"\n array1[:] += 1\n if not same(array1.asnumpy(), array2.asnumpy()):\n array1[:] -= 1\n return False\n array1[:] -= 1\n return same(array1.asnumpy(), array2.asnumpy())\n\n@contextmanager\ndef discard_stderr():\n \"\"\"\n Discards error output of a routine if invoked as:\n\n with discard_stderr():\n ...\n \"\"\"\n with open(os.devnull, 'w') as bit_bucket:\n try:\n stderr_fileno = sys.stderr.fileno()\n old_stderr = os.dup(stderr_fileno)\n try:\n os.dup2(bit_bucket.fileno(), stderr_fileno)\n yield\n finally:\n os.dup2(old_stderr, stderr_fileno)\n except AttributeError:\n # On some systems is stderr not a file descriptor but actually a virtual pipeline\n # that can not be copied\n yield\n\n\nclass DummyIter(mx.io.DataIter):\n \"\"\"A dummy iterator that always returns the same batch of data\n (the first data batch of the real data iter). This is usually used for speed testing.\n\n Parameters\n ----------\n real_iter: mx.io.DataIter\n The real data iterator where the first batch of data comes from\n \"\"\"\n def __init__(self, real_iter):\n super(DummyIter, self).__init__()\n self.real_iter = real_iter\n self.provide_data = real_iter.provide_data\n self.provide_label = real_iter.provide_label\n self.batch_size = real_iter.batch_size\n self.the_batch = next(real_iter)\n\n def __iter__(self):\n return self\n\n def next(self):\n \"\"\"Get a data batch from iterator. The first data batch of real iter is always returned.\n StopIteration will never be raised.\n\n Returns\n -------\n DataBatch\n The data of next batch.\n \"\"\"\n return self.the_batch\n\ndef gen_buckets_probs_with_ppf(ppf, nbuckets):\n \"\"\"Generate the buckets and probabilities for chi_square test when the ppf (Quantile function)\n is specified.\n\n Parameters\n ----------\n ppf : function\n The Quantile function that takes a probability and maps it back to a value.\n It's the inverse of the cdf function\n nbuckets : int\n size of the buckets\n\n Returns\n -------\n buckets : list of tuple\n The generated buckets\n probs : list\n The generate probabilities\n \"\"\"\n assert nbuckets > 0\n probs = [1.0 / nbuckets for _ in range(nbuckets)]\n buckets = [(ppf(i / float(nbuckets)), ppf((i + 1) / float(nbuckets))) for i in range(nbuckets)]\n return buckets, probs\n\ndef mean_check(generator, mu, sigma, nsamples=1000000):\n \"\"\"Test the generator by matching the mean.\n\n We test the sample mean by checking if it falls inside the range\n (mu - 3 * sigma / sqrt(n), mu + 3 * sigma / sqrt(n))\n\n References::\n\n @incollection{goucher2009beautiful,\n title={Beautiful Testing: Leading Professionals Reveal How They Improve Software},\n author={Goucher, Adam and Riley, Tim},\n year={2009},\n chapter=10\n }\n\n Examples::\n\n generator = lambda x: np.random.normal(0, 1.0, size=x)\n mean_check_ret = mean_check(generator, 0, 1.0)\n\n Parameters\n ----------\n generator : function\n The generator function. It's expected to generate N i.i.d samples by calling generator(N).\n mu : float\n sigma : float\n nsamples : int\n\n Returns\n -------\n ret : bool\n Whether the mean test succeeds\n \"\"\"\n samples = np.array(generator(nsamples))\n sample_mean = samples.mean()\n ret = (sample_mean > mu - 3 * sigma / np.sqrt(nsamples)) and\\\n (sample_mean < mu + 3 * sigma / np.sqrt(nsamples))\n return ret\n\ndef get_im2rec_path(home_env=\"MXNET_HOME\"):\n \"\"\"Get path to the im2rec.py tool\n\n Parameters\n ----------\n\n home_env : str\n Env variable that holds the path to the MXNET folder\n\n Returns\n -------\n str\n The path to im2rec.py\n \"\"\"\n # Check first if the path to MXNET is passed as an env variable\n if home_env in os.environ:\n mxnet_path = os.environ[home_env]\n else:\n # Else use currently imported mxnet as reference\n mxnet_path = os.path.dirname(mx.__file__)\n # If MXNet was installed through pip, the location of im2rec.py\n im2rec_path = os.path.join(mxnet_path, 'tools', 'im2rec.py')\n if os.path.isfile(im2rec_path):\n return im2rec_path\n # If MXNet has been built locally\n im2rec_path = os.path.join(mxnet_path, '..', '..', 'tools', 'im2rec.py')\n if os.path.isfile(im2rec_path):\n return im2rec_path\n raise IOError('Could not find path to tools/im2rec.py')\n\ndef var_check(generator, sigma, nsamples=1000000):\n \"\"\"Test the generator by matching the variance.\n It will need a large number of samples and is not recommended to use\n\n We test the sample variance by checking if it falls inside the range\n (sigma^2 - 3 * sqrt(2 * sigma^4 / (n-1)), sigma^2 + 3 * sqrt(2 * sigma^4 / (n-1)))\n\n References::\n\n @incollection{goucher2009beautiful,\n title={Beautiful Testing: Leading Professionals Reveal How They Improve Software},\n author={Goucher, Adam and Riley, Tim},\n year={2009},\n chapter=10\n }\n\n Examples::\n\n generator = lambda x: np.random.normal(0, 1.0, size=x)\n var_check_ret = var_check(generator, 0, 1.0)\n\n Parameters\n ----------\n generator : function\n The generator function. It's expected to generate N i.i.d samples by calling generator(N).\n sigma : float\n nsamples : int\n\n Returns\n -------\n ret : bool\n Whether the variance test succeeds\n \"\"\"\n samples = np.array(generator(nsamples))\n sample_var = samples.var(ddof=1)\n ret = (sample_var > sigma ** 2 - 3 * np.sqrt(2 * sigma ** 4 / (nsamples - 1))) and\\\n (sample_var < sigma ** 2 + 3 * np.sqrt(2 * sigma ** 4 / (nsamples - 1)))\n return ret\n\ndef chi_square_check(generator, buckets, probs, nsamples=1000000):\n \"\"\"Run the chi-square test for the generator. The generator can be both continuous and discrete.\n If the generator is continuous, the buckets should contain tuples of (range_min, range_max) and\n the probs should be the corresponding ideal probability within the specific ranges.\n Otherwise, the buckets should be the possible output of the discrete distribution and the probs\n should be groud-truth probability.\n\n Usually the user is required to specify the probs parameter.\n\n After obtatining the p value, we could further use the standard p > 0.05 threshold to get\n the final result.\n\n Examples::\n buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.norm.ppf(x, 0, 1), 5)\n generator = lambda x: np.random.normal(0, 1.0, size=x)\n p = chi_square_check(generator=generator, buckets=buckets, probs=probs)\n assert(p > 0.05)\n\n Parameters\n ----------\n generator: function\n A function that is assumed to generate i.i.d samples from a specific distribution.\n generator(N) should generate N random samples.\n buckets: list of tuple or list of number\n The buckets to run the chi-square the test. Make sure that the buckets cover\n the whole range of the distribution. Also, the buckets must be in ascending order and have\n no intersection\n probs: list or tuple\n The ground-truth probability of the random value fall in a specific bucket.\n nsamples:int\n The number of samples to generate for the testing\n\n Returns\n -------\n p : float\n p value that the generator has the expected distribution.\n A higher value indicates a larger confidence\n obs_freq : list\n Observed frequency of buckets\n expected_freq : list\n The expected (ground-truth) frequency of the buckets\n \"\"\"\n if not ss:\n raise ImportError(\"scipy is not available.\"\n \" Please check if the scipy python bindings are installed.\")\n assert isinstance(buckets, list)\n samples = generator(nsamples)\n assert len(probs) == len(buckets)\n if isinstance(buckets[0], (list, tuple)):\n # Check whether the buckets are valid and fill them into a npy array\n continuous_dist = True\n buckets_npy = np.zeros((len(buckets) * 2, ), dtype=np.float32)\n for i, _ in enumerate(buckets):\n assert(buckets[i][0] <= buckets[i][1])\n if i < len(buckets) - 1:\n assert(buckets[i][1] <= buckets[i + 1][0])\n buckets_npy[i * 2] = buckets[i][0]\n buckets_npy[i * 2 + 1] = buckets[i][1]\n else:\n continuous_dist = False\n buckets_npy = np.array(buckets)\n expected_freq = (nsamples * np.array(probs, dtype=np.float32)).astype(np.int32)\n if continuous_dist:\n sample_bucket_ids = np.searchsorted(buckets_npy, samples, side='right')\n else:\n sample_bucket_ids = samples\n if continuous_dist:\n sample_bucket_ids = sample_bucket_ids // 2\n obs_freq = np.zeros(shape=len(buckets), dtype=np.int)\n for i in range(len(buckets)):\n obs_freq[i] = (sample_bucket_ids == i).sum()\n _, p = ss.chisquare(f_obs=obs_freq, f_exp=expected_freq)\n return p, obs_freq, expected_freq\n\ndef verify_generator(generator, buckets, probs, nsamples=1000000, nrepeat=5, success_rate=0.15):\n \"\"\"Verify whether the generator is correct using chi-square testing.\n\n The test is repeated for \"nrepeat\" times and we check if the success rate is\n above the threshold (25% by default).\n\n Parameters\n ----------\n generator: function\n A function that is assumed to generate i.i.d samples from a specific distribution.\n generator(N) should generate N random samples.\n buckets: list of tuple or list of number\n The buckets to run the chi-square the test. Make sure that the buckets cover\n the whole range of the distribution. Also, the buckets must be in ascending order and\n have no intersection\n probs: list or tuple\n The ground-truth probability of the random value fall in a specific bucket.\n nsamples: int\n The number of samples to generate for the testing\n nrepeat: int\n The times to repeat the test\n success_rate: float\n The desired success rate\n\n Returns\n -------\n cs_ret_l: list\n The p values of the chi-square test.\n \"\"\"\n cs_ret_l = []\n obs_freq_l = []\n expected_freq_l = []\n for _ in range(nrepeat):\n cs_ret, obs_freq, expected_freq = chi_square_check(generator=generator, buckets=buckets,\n probs=probs, nsamples=nsamples)\n cs_ret_l.append(cs_ret)\n obs_freq_l.append(obs_freq)\n expected_freq_l.append(expected_freq)\n success_num = (np.array(cs_ret_l) > 0.05).sum()\n if success_num < nrepeat * success_rate:\n raise AssertionError(\"Generator test fails, Chi-square p=%s, obs_freq=%s, expected_freq=%s.\"\n \"\\nbuckets=%s, probs=%s\"\n % (str(cs_ret_l), str(obs_freq_l), str(expected_freq_l),\n str(buckets), str(probs)))\n return cs_ret_l\n"
] |
[
[
"numpy.sqrt",
"numpy.asarray",
"numpy.dtype",
"numpy.max",
"numpy.random.randn",
"numpy.searchsorted",
"numpy.random.randint",
"numpy.copy",
"numpy.argmax",
"numpy.unravel_index",
"numpy.zeros",
"numpy.isnan",
"numpy.ascontiguousarray",
"numpy.random.rand",
"numpy.array",
"numpy.abs",
"numpy.array_equal",
"numpy.nditer",
"numpy.set_printoptions",
"scipy.sparse.rand",
"numpy.random.shuffle",
"numpy.argwhere",
"numpy.random.normal",
"numpy.prod",
"scipy.stats.chisquare",
"numpy.random.uniform"
]
] |
dabito-san/Diabetic_Retinopathy_Detection
|
[
"ec1ea85773cf401d439f47fd9c30a56f0882b6bb"
] |
[
"utils/util.py"
] |
[
"# internal imports\nfrom configs import config\n\n# external imports\nfrom numpy.random import RandomState\nimport torch\nfrom torch.utils.data import DataLoader, WeightedRandomSampler\nimport numpy as np\n\n\ndef get_images_mean_std(dataloader):\n channels_sum, channels_squared_sum, num_batches = 0, 0, 0\n\n for data, _, _ in dataloader:\n channels_sum += torch.mean(data.float(), dim=[0, 2, 3])\n channels_squared_sum += torch.mean(data.float() ** 2, dim=[0, 2, 3])\n num_batches += 1\n\n mean = channels_sum / num_batches\n std = (channels_squared_sum / num_batches - mean ** 2) ** 0.5\n\n return mean, std\n\n\ndef train_valid_split(csv_path, save_path):\n data = pd.read_csv(csv_path)\n rng = RandomState()\n\n train = data.sample(frac=config.PERCENT_TRAIN, random_state=rng)\n valid = data.loc[~data.index.isin(train.index)]\n\n train.to_csv(save_path + 'train.csv', index=False)\n valid.to_csv(save_path + 'validation.csv', index=False)\n\n print(f'Total number of images: {data.shape[0]}')\n print(f'Number of training images: {train.shape[0]}')\n print(f'Number of validation images: {valid.shape[0]}')\n\n\ndef test_set(test_images_path, csv_save_path):\n images = []\n for image in os.listdir(test_images_path):\n images.append(str(image))\n images = pd.DataFrame(images, columns=['image'])\n images.to_csv(csv_save_path + 'test.csv', index=False)\n\n\ndef get_accuracy(loader, model, device='cuda'):\n model.eval()\n num_correct = 0\n num_samples = 0\n all_preds, all_labels = [], []\n\n for x, y, _ in loader:\n x = x.to(device=device)\n y = y.to(device=device)\n\n predictions = model.forward(x)\n predictions[predictions < 0.5] = 0\n predictions[(predictions >= 0.5) & (predictions < 1.5)] = 1\n predictions[(predictions >= 1.5) & (predictions < 2.5)] = 2\n predictions[(predictions >= 2.5) & (predictions < 3.5)] = 3\n predictions[(predictions >= 3.5)] = 4\n predictions = predictions.long().view(-1)\n y = y.view(-1)\n\n num_correct += (predictions == y).sum()\n num_samples += predictions.shape[0]\n\n all_preds.append(predictions.detach().cpu().numpy())\n all_labels.append(y.detach().cpu().numpy())\n\n print(f'[{num_correct}/{num_samples}] sample predictions are correct')\n print(f'Accuracy: {float(num_correct) / float(num_samples) * 100:.2f}%')\n\n model.train()\n\n return np.concatenate(all_preds, axis=0), np.concatenate(\n all_labels, axis=0)\n\n\ndef save_checkpoint(state, filename='my_checkpoint.pth.tar'):\n print('Saving checkpoint...')\n torch.save(state, filename)\n print('=> Checkpoint saved')\n\n\ndef load_checkpoint(checkpoint, model, optimizer, lr):\n print(\"=> Loading checkpoint\")\n model.load_state_dict(checkpoint[\"state_dict\"])\n #optimizer.load_state_dict(checkpoint[\"optimizer\"])\n\n # Reset learning rate\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = lr\n \n print(\"Checkpoint loaded.\")\n\n\ndef create_sampler(labels_list):\n labels_count = {\n '0': 0,\n '1': 0,\n '2': 0,\n '3': 0,\n '4': 0\n }\n for label in labels_list:\n labels_count[str(label)] += 1\n\n classes_weights = [v / len(labels_list) for v in labels_count.values()]\n samples_weights = torch.from_numpy(np.array([classes_weights[l] for l in labels_list]))\n\n # Instanciate sampler object\n sampler = WeightedRandomSampler(samples_weights, len(samples_weights))\n\n return sampler\n\n\ndef make_prediction(model, loader, output_csv=\"submission.csv\"):\n preds = []\n filenames = []\n model.eval()\n\n for x, y, files in tqdm(loader, leave=False):\n x = x.to(config.DEVICE)\n with torch.no_grad():\n predictions = model(x)\n\n predictions[predictions < 0.5] = 0\n predictions[(predictions >= 0.5) & (predictions < 1.5)] = 1\n predictions[(predictions >= 1.5) & (predictions < 2.5)] = 2\n predictions[(predictions >= 2.5) & (predictions < 3.5)] = 3\n predictions[(predictions >= 3.5)] = 4\n\n predictions = predictions.long().view(-1)\n preds.append(predictions.cpu().numpy())\n filenames += files\n\n df = pd.DataFrame({\"image\": filenames, \"level\": np.concatenate(preds, axis=0)})\n df.to_csv(output_csv, index=False)\n model.train()\n print(\"Done with predictions\")\n\n\nif __name__ == '__main__':\n from dataloaders.dataset import *\n\n dataset = DRDataset(images_folder='../data/train/',\n csv_path='../data/trainLabels.csv',\n mode='validation',\n transform=config.transformations['validation'])\n dataloader = DataLoader(dataset=dataset, batch_size=64, shuffle=False)\n\n mean, std = get_images_mean_std(dataloader)\n print(mean)\n print(std)\n"
] |
[
[
"torch.utils.data.DataLoader",
"numpy.concatenate",
"torch.no_grad",
"numpy.array",
"numpy.random.RandomState",
"torch.save"
]
] |
pingxue-hfut/SD-BNN
|
[
"c22be1799b199cba5233fedbe9e38aee6da53782"
] |
[
"imagenet/birealnet.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom utils import se_module\n\n'''\nReference:\n[20] Zechun Liu, Baoyuan Wu, Wenhan Luo, Xin Yang, Wei Liu,\nand Kwang-Ting Cheng. Bi-real net: Enhancing the performance of 1-bit cnns with improved representational capability and advanced training algorithm. In Computer Vision\n- ECCV 2018 - 15th European Conference, Munich, Germany, September 8-14, 2018, Proceedings, Part XV, pages\n747–763, 2018\n[21] Brais Mart´ınez, Jing Yang, Adrian Bulat, and Georgios Tzimiropoulos. Training binary neural networks with real-to-binary convolutions. In 8th International Conference\non Learning Representations, ICLR 2020, Addis Ababa,\nEthiopia, April 26-30, 2020, 2020.\n\n#We train it from scratch at first stage and start second stage based on the resulting model of first stage. For both stages ,we use Adam solver,and the start learning rate is 0.001 and linearly decay to 0 after 256 epoches, \nthe batchsize is set to 512, using ResNet-34 as the teacher model.\n'''\n\n__all__ = ['birealnet18', 'birealnet34']\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\nclass BinaryActivation(nn.Module):\n def __init__(self):\n super(BinaryActivation, self).__init__()\n\n def forward(self, x):\n out_forward = torch.sign(x)\n #out_e1 = (x^2 + 2*x)\n #out_e2 = (-x^2 + 2*x)\n out_e_total = 0\n mask1 = x < -1\n mask2 = x < 0\n mask3 = x < 1\n out1 = (-1) * mask1.type(torch.float32) + (x*x + 2*x) * (1-mask1.type(torch.float32))\n out2 = out1 * mask2.type(torch.float32) + (-x*x + 2*x) * (1-mask2.type(torch.float32))\n out3 = out2 * mask3.type(torch.float32) + 1 * (1-mask3.type(torch.float32))\n out = out_forward.detach() - out3.detach() + out3\n\n return out\n\n\nclass HardBinaryConv(nn.Module):\n def __init__(self, in_chn, out_chn, kernel_size=3, stride=1, padding=1):\n super(HardBinaryConv, self).__init__()\n self.stride = stride\n self.padding = padding\n self.number_of_weights = in_chn * out_chn * kernel_size * kernel_size\n self.shape = (out_chn, in_chn, kernel_size, kernel_size)\n self.weights = nn.Parameter(torch.rand((self.number_of_weights,1)) * 0.001, requires_grad=True)\n self.alpha = nn.Parameter(torch.zeros(out_chn), requires_grad=True)\n self.beta = se_module.SELayer(in_chn)\n self.binary_activation = BinaryActivation()\n\n def forward(self, x):\n w = self.weights.view(self.shape)\n '''\n First stage, keep weights full-precision,and binarize activaons.\n Second stage, both weights and activations are binarized, and uncomment the follow lines.\n '''\n '''\n mw = 0.01 * w.view(w.size(0), -1).mean(-1).view(w.size(0), 1, 1, 1).detach()\n w = w + torch.sigmoid(self.alpha.view(self.alpha.size(0), 1, 1, 1)) * mw\n binary_weights_no_grad = torch.sign(w)\n cliped_weights = torch.clamp(w, -1.0, 1.0)\n binary_weights = binary_weights_no_grad.detach() - cliped_weights.detach() + cliped_weights\n '''\n x = x + self.beta(x)\n bx = self.binary_activation(x)\n\n y = F.conv2d(bx, binary_weights, stride=self.stride, padding=self.padding)\n\n return y\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n\n self.binary_conv = HardBinaryConv(inplanes, planes, stride=stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.prelu = nn.PReLU(planes)\n\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.binary_conv(x)\n out = self.bn1(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n\n out = self.prelu(out)\n\n\n return out\n\nclass BiRealNet(nn.Module):\n\n def __init__(self, block, layers, num_classes=1000, zero_init_residual=False):\n super(BiRealNet, self).__init__()\n self.inplanes = 64\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.AvgPool2d(kernel_size=2, stride=stride),\n conv1x1(self.inplanes, planes * block.expansion),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\n\ndef birealnet18(pretrained=False, **kwargs):\n \"\"\"Constructs a BiRealNet-18 model. \"\"\"\n model = BiRealNet(BasicBlock, [4, 4, 4, 4], **kwargs)\n return model\n\n\ndef birealnet34(pretrained=False, **kwargs):\n \"\"\"Constructs a BiRealNet-34 model. \"\"\"\n model = BiRealNet(BasicBlock, [6, 8, 12, 6], **kwargs)\n return model\n\n"
] |
[
[
"torch.nn.Sequential",
"torch.sign",
"torch.zeros",
"torch.nn.PReLU",
"torch.nn.functional.conv2d",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.rand",
"torch.nn.BatchNorm2d"
]
] |
mintusf/land_cover_segmentation
|
[
"6e136c076d50aa6a85b9628e5cdcbedcf0dd9836",
"6e136c076d50aa6a85b9628e5cdcbedcf0dd9836"
] |
[
"tests/models/test_deeplab_forward.py",
"models/hrnet/hrnet.py"
] |
[
"import torch\nfrom torchvision.transforms import Compose\n\nfrom dataset import PatchDataset, get_transform\nfrom models import get_model\nfrom utils.io_utils import load_yaml\n\n\ndef test_deeplab_forward(test_config):\n channels_in = len(test_config.DATASET.INPUT.USED_CHANNELS)\n labels_config = load_yaml(test_config.DATASET.MASK.CONFIG)\n channels_out = len(labels_config[\"class2label\"])\n\n assert channels_in == 4\n assert channels_out == 5\n\n model = get_model(test_config, test_config.TRAIN.DEVICE)\n\n transform = get_transform(test_config)\n transforms = Compose([transform])\n dataset = PatchDataset(test_config, samples_list=\"train\", transforms=transforms)\n\n sample_batch = torch.stack([dataset[0][\"input\"], dataset[1][\"input\"]], 0)\n\n pred = model(sample_batch)[\"out\"]\n\n assert pred.dim() == 4\n assert pred.shape[0] == 2\n assert pred.shape[1] == channels_out\n assert pred.shape[2] == 256\n assert pred.shape[3] == 256\n",
"# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Written by Ke Sun (sunk@mail.ustc.edu.cn)\n# ------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport logging\nimport functools\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch._utils\nimport torch.nn.functional as F\n\nfrom models.hrnet.utils import BatchNorm2d, BatchNorm2d_class, relu_inplace\n\nBN_MOMENTUM = 0.1\nALIGN_CORNERS = None\n\nlogger = logging.getLogger(__name__)\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(\n in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False\n )\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=relu_inplace)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out = out + residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv2 = nn.Conv2d(\n planes, planes, kernel_size=3, stride=stride, padding=1, bias=False\n )\n self.bn2 = BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv3 = nn.Conv2d(\n planes, planes * self.expansion, kernel_size=1, bias=False\n )\n self.bn3 = BatchNorm2d(planes * self.expansion, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=relu_inplace)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out = out + residual\n out = self.relu(out)\n\n return out\n\n\nclass HighResolutionModule(nn.Module):\n def __init__(\n self,\n num_branches,\n blocks,\n num_blocks,\n num_inchannels,\n num_channels,\n fuse_method,\n multi_scale_output=True,\n ):\n super(HighResolutionModule, self).__init__()\n self._check_branches(\n num_branches, blocks, num_blocks, num_inchannels, num_channels\n )\n\n self.num_inchannels = num_inchannels\n self.fuse_method = fuse_method\n self.num_branches = num_branches\n\n self.multi_scale_output = multi_scale_output\n\n self.branches = self._make_branches(\n num_branches, blocks, num_blocks, num_channels\n )\n self.fuse_layers = self._make_fuse_layers()\n self.relu = nn.ReLU(inplace=relu_inplace)\n\n def _check_branches(\n self, num_branches, blocks, num_blocks, num_inchannels, num_channels\n ):\n if num_branches != len(num_blocks):\n error_msg = \"NUM_BRANCHES({}) <> NUM_BLOCKS({})\".format(\n num_branches, len(num_blocks)\n )\n logger.error(error_msg)\n raise ValueError(error_msg)\n\n if num_branches != len(num_channels):\n error_msg = \"NUM_BRANCHES({}) <> NUM_CHANNELS({})\".format(\n num_branches, len(num_channels)\n )\n logger.error(error_msg)\n raise ValueError(error_msg)\n\n if num_branches != len(num_inchannels):\n error_msg = \"NUM_BRANCHES({}) <> NUM_INCHANNELS({})\".format(\n num_branches, len(num_inchannels)\n )\n logger.error(error_msg)\n raise ValueError(error_msg)\n\n def _make_one_branch(self, branch_index, block, num_blocks, num_channels, stride=1):\n downsample = None\n if (\n stride != 1\n or self.num_inchannels[branch_index]\n != num_channels[branch_index] * block.expansion\n ):\n downsample = nn.Sequential(\n nn.Conv2d(\n self.num_inchannels[branch_index],\n num_channels[branch_index] * block.expansion,\n kernel_size=1,\n stride=stride,\n bias=False,\n ),\n BatchNorm2d(\n num_channels[branch_index] * block.expansion, momentum=BN_MOMENTUM\n ),\n )\n\n layers = []\n layers.append(\n block(\n self.num_inchannels[branch_index],\n num_channels[branch_index],\n stride,\n downsample,\n )\n )\n self.num_inchannels[branch_index] = num_channels[branch_index] * block.expansion\n for i in range(1, num_blocks[branch_index]):\n layers.append(\n block(self.num_inchannels[branch_index], num_channels[branch_index])\n )\n\n return nn.Sequential(*layers)\n\n def _make_branches(self, num_branches, block, num_blocks, num_channels):\n branches = []\n\n for i in range(num_branches):\n branches.append(self._make_one_branch(i, block, num_blocks, num_channels))\n\n return nn.ModuleList(branches)\n\n def _make_fuse_layers(self):\n if self.num_branches == 1:\n return None\n\n num_branches = self.num_branches\n num_inchannels = self.num_inchannels\n fuse_layers = []\n for i in range(num_branches if self.multi_scale_output else 1):\n fuse_layer = []\n for j in range(num_branches):\n if j > i:\n fuse_layer.append(\n nn.Sequential(\n nn.Conv2d(\n num_inchannels[j],\n num_inchannels[i],\n 1,\n 1,\n 0,\n bias=False,\n ),\n BatchNorm2d(num_inchannels[i], momentum=BN_MOMENTUM),\n )\n )\n elif j == i:\n fuse_layer.append(None)\n else:\n conv3x3s = []\n for k in range(i - j):\n if k == i - j - 1:\n num_outchannels_conv3x3 = num_inchannels[i]\n conv3x3s.append(\n nn.Sequential(\n nn.Conv2d(\n num_inchannels[j],\n num_outchannels_conv3x3,\n 3,\n 2,\n 1,\n bias=False,\n ),\n BatchNorm2d(\n num_outchannels_conv3x3, momentum=BN_MOMENTUM\n ),\n )\n )\n else:\n num_outchannels_conv3x3 = num_inchannels[j]\n conv3x3s.append(\n nn.Sequential(\n nn.Conv2d(\n num_inchannels[j],\n num_outchannels_conv3x3,\n 3,\n 2,\n 1,\n bias=False,\n ),\n BatchNorm2d(\n num_outchannels_conv3x3, momentum=BN_MOMENTUM\n ),\n nn.ReLU(inplace=relu_inplace),\n )\n )\n fuse_layer.append(nn.Sequential(*conv3x3s))\n fuse_layers.append(nn.ModuleList(fuse_layer))\n\n return nn.ModuleList(fuse_layers)\n\n def get_num_inchannels(self):\n return self.num_inchannels\n\n def forward(self, x):\n if self.num_branches == 1:\n return [self.branches[0](x[0])]\n\n for i in range(self.num_branches):\n x[i] = self.branches[i](x[i])\n\n x_fuse = []\n for i in range(len(self.fuse_layers)):\n y = x[0] if i == 0 else self.fuse_layers[i][0](x[0])\n for j in range(1, self.num_branches):\n if i == j:\n y = y + x[j]\n elif j > i:\n width_output = x[i].shape[-1]\n height_output = x[i].shape[-2]\n y = y + F.interpolate(\n self.fuse_layers[i][j](x[j]),\n size=[height_output, width_output],\n mode=\"bilinear\",\n align_corners=ALIGN_CORNERS,\n )\n else:\n y = y + self.fuse_layers[i][j](x[j])\n x_fuse.append(self.relu(y))\n\n return x_fuse\n\n\nblocks_dict = {\"BASIC\": BasicBlock, \"BOTTLENECK\": Bottleneck}\n\n\nclass HighResolutionNet(nn.Module):\n def __init__(self, config, channels_in, classes_out):\n global ALIGN_CORNERS\n extra = config[\"EXTRA\"]\n super(HighResolutionNet, self).__init__()\n ALIGN_CORNERS = config[\"ALIGN_CORNERS\"]\n\n # stem net\n self.conv1 = nn.Conv2d(\n channels_in, 64, kernel_size=3, stride=2, padding=1, bias=False\n )\n self.bn1 = BatchNorm2d(64, momentum=BN_MOMENTUM)\n self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, bias=False)\n self.bn2 = BatchNorm2d(64, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=relu_inplace)\n\n self.stage1_cfg = extra[\"STAGE1\"]\n num_channels = self.stage1_cfg[\"NUM_CHANNELS\"][0]\n block = blocks_dict[self.stage1_cfg[\"BLOCK\"]]\n num_blocks = self.stage1_cfg[\"NUM_BLOCKS\"][0]\n self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)\n stage1_out_channel = block.expansion * num_channels\n\n self.stage2_cfg = extra[\"STAGE2\"]\n num_channels = self.stage2_cfg[\"NUM_CHANNELS\"]\n block = blocks_dict[self.stage2_cfg[\"BLOCK\"]]\n num_channels = [\n num_channels[i] * block.expansion for i in range(len(num_channels))\n ]\n self.transition1 = self._make_transition_layer(\n [stage1_out_channel], num_channels\n )\n self.stage2, pre_stage_channels = self._make_stage(\n self.stage2_cfg, num_channels\n )\n\n self.stage3_cfg = extra[\"STAGE3\"]\n num_channels = self.stage3_cfg[\"NUM_CHANNELS\"]\n block = blocks_dict[self.stage3_cfg[\"BLOCK\"]]\n num_channels = [\n num_channels[i] * block.expansion for i in range(len(num_channels))\n ]\n self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels)\n self.stage3, pre_stage_channels = self._make_stage(\n self.stage3_cfg, num_channels\n )\n\n self.stage4_cfg = extra[\"STAGE4\"]\n num_channels = self.stage4_cfg[\"NUM_CHANNELS\"]\n block = blocks_dict[self.stage4_cfg[\"BLOCK\"]]\n num_channels = [\n num_channels[i] * block.expansion for i in range(len(num_channels))\n ]\n self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels)\n self.stage4, pre_stage_channels = self._make_stage(\n self.stage4_cfg, num_channels, multi_scale_output=True\n )\n\n last_inp_channels = np.int(np.sum(pre_stage_channels))\n\n self.last_layer = nn.Sequential(\n nn.Conv2d(\n in_channels=last_inp_channels,\n out_channels=last_inp_channels,\n kernel_size=1,\n stride=1,\n padding=0,\n ),\n BatchNorm2d(last_inp_channels, momentum=BN_MOMENTUM),\n nn.ReLU(inplace=relu_inplace),\n nn.Conv2d(\n in_channels=last_inp_channels,\n out_channels=classes_out,\n kernel_size=extra[\"FINAL_CONV_KERNEL\"],\n stride=1,\n padding=1 if extra[\"FINAL_CONV_KERNEL\"] == 3 else 0,\n ),\n )\n\n def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer):\n num_branches_cur = len(num_channels_cur_layer)\n num_branches_pre = len(num_channels_pre_layer)\n\n transition_layers = []\n for i in range(num_branches_cur):\n if i < num_branches_pre:\n if num_channels_cur_layer[i] != num_channels_pre_layer[i]:\n transition_layers.append(\n nn.Sequential(\n nn.Conv2d(\n num_channels_pre_layer[i],\n num_channels_cur_layer[i],\n 3,\n 1,\n 1,\n bias=False,\n ),\n BatchNorm2d(\n num_channels_cur_layer[i], momentum=BN_MOMENTUM\n ),\n nn.ReLU(inplace=relu_inplace),\n )\n )\n else:\n transition_layers.append(None)\n else:\n conv3x3s = []\n for j in range(i + 1 - num_branches_pre):\n inchannels = num_channels_pre_layer[-1]\n outchannels = (\n num_channels_cur_layer[i]\n if j == i - num_branches_pre\n else inchannels\n )\n conv3x3s.append(\n nn.Sequential(\n nn.Conv2d(inchannels, outchannels, 3, 2, 1, bias=False),\n BatchNorm2d(outchannels, momentum=BN_MOMENTUM),\n nn.ReLU(inplace=relu_inplace),\n )\n )\n transition_layers.append(nn.Sequential(*conv3x3s))\n\n return nn.ModuleList(transition_layers)\n\n def _make_layer(self, block, inplanes, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(\n inplanes,\n planes * block.expansion,\n kernel_size=1,\n stride=stride,\n bias=False,\n ),\n BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),\n )\n\n layers = []\n layers.append(block(inplanes, planes, stride, downsample))\n inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def _make_stage(self, layer_config, num_inchannels, multi_scale_output=True):\n num_modules = layer_config[\"NUM_MODULES\"]\n num_branches = layer_config[\"NUM_BRANCHES\"]\n num_blocks = layer_config[\"NUM_BLOCKS\"]\n num_channels = layer_config[\"NUM_CHANNELS\"]\n block = blocks_dict[layer_config[\"BLOCK\"]]\n fuse_method = layer_config[\"FUSE_METHOD\"]\n\n modules = []\n for i in range(num_modules):\n # multi_scale_output is only used last module\n if not multi_scale_output and i == num_modules - 1:\n reset_multi_scale_output = False\n else:\n reset_multi_scale_output = True\n modules.append(\n HighResolutionModule(\n num_branches,\n block,\n num_blocks,\n num_inchannels,\n num_channels,\n fuse_method,\n reset_multi_scale_output,\n )\n )\n num_inchannels = modules[-1].get_num_inchannels()\n\n return nn.Sequential(*modules), num_inchannels\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.conv2(x)\n x = self.bn2(x)\n x = self.relu(x)\n x = self.layer1(x)\n\n x_list = []\n for i in range(self.stage2_cfg[\"NUM_BRANCHES\"]):\n if self.transition1[i] is not None:\n x_list.append(self.transition1[i](x))\n else:\n x_list.append(x)\n y_list = self.stage2(x_list)\n\n x_list = []\n for i in range(self.stage3_cfg[\"NUM_BRANCHES\"]):\n if self.transition2[i] is not None:\n if i < self.stage2_cfg[\"NUM_BRANCHES\"]:\n x_list.append(self.transition2[i](y_list[i]))\n else:\n x_list.append(self.transition2[i](y_list[-1]))\n else:\n x_list.append(y_list[i])\n y_list = self.stage3(x_list)\n\n x_list = []\n for i in range(self.stage4_cfg[\"NUM_BRANCHES\"]):\n if self.transition3[i] is not None:\n if i < self.stage3_cfg[\"NUM_BRANCHES\"]:\n x_list.append(self.transition3[i](y_list[i]))\n else:\n x_list.append(self.transition3[i](y_list[-1]))\n else:\n x_list.append(y_list[i])\n x = self.stage4(x_list)\n\n # Upsampling\n x0_h, x0_w = x[0].size(2), x[0].size(3)\n x1 = F.interpolate(\n x[1], size=(x0_h, x0_w), mode=\"bilinear\", align_corners=ALIGN_CORNERS\n )\n x2 = F.interpolate(\n x[2], size=(x0_h, x0_w), mode=\"bilinear\", align_corners=ALIGN_CORNERS\n )\n x3 = F.interpolate(\n x[3], size=(x0_h, x0_w), mode=\"bilinear\", align_corners=ALIGN_CORNERS\n )\n\n x = torch.cat([x[0], x1, x2, x3], 1)\n\n x = self.last_layer(x)\n\n x = F.interpolate(x, size=[256, 256], mode=\"nearest\")\n\n return {\"out\": x}\n\n def init_weights(\n self,\n pretrained=\"\",\n ):\n logger.info(\"=> init weights from normal distribution\")\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.normal_(m.weight, std=0.001)\n elif isinstance(m, BatchNorm2d_class):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n\ndef get_hrnet(cfg, channels_in, classes_out, checkpoint_path):\n model = HighResolutionNet(cfg, channels_in, classes_out)\n model.init_weights(checkpoint_path)\n\n return model\n"
] |
[
[
"torch.stack"
],
[
"torch.nn.Sequential",
"torch.cat",
"torch.nn.init.constant_",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.init.normal_",
"torch.nn.functional.interpolate",
"torch.nn.ReLU",
"numpy.sum"
]
] |
fulankun1412/Table-and-Image-Extractor-from-PDF-Using-Tabula-and-PyPDF2
|
[
"373b7f32075bad875beeae0ff431c45a279d7230"
] |
[
"main.py"
] |
[
"import PyPDF2\nimport os\nfrom PIL import Image\nimport pandas as pd\nimport tabula as tb\nimport os\nimport pandas as pd\nimport PIL\nimport mysql.connector as sql\n\n#### Extracting Data Table From PDF file\n### Declare Variables\ndirr = \"C:/Users/afkaa/Google Drive/Multimatics/Data/UINSA/\"\ntable_dir = \"table_output\"\nlist_pdf = []\nlist_table = []\n\n### Create Folder if not exist\nif os.path.isdir(table_dir) == False:\n os.mkdir(table_dir) \n\n### Find pdf file in designated Directory\nfor file in os.listdir(dirr):\n if file.startswith(\"Daftar Hadir UINSA\") and file.endswith(\".pdf\"):\n print(\"Found File : \" + file)\n list_pdf.append(os.path.join(dirr, file))\n\n### Extract Table Data from listed Pdf File\nfor pdf in list_pdf:\n print(\"Extracting Table Data From\", pdf)\n dfs = tb.read_pdf(pdf, pages='all')\n ## Convert to Pandas\n df = pd.concat(dfs)\n df = df.drop(columns=['Added Time', 'Referrer Name', 'Task Owner'])\n list_table.append(df)\n\n \n### Create Separate File or Combine the whole table\n## Separate File\nnum_table = 0\nfor table in list_table:\n ### Save to CSV\n table.to_csv(os.path.join(table_dir,\"table-\" + str(num_table) + \".csv\"), index=False)\n print(\"Created CSV file Named : \" + \"table-\" + str(num_table) + \".csv\")\n num_table += 1\n\n## Combine Table\n#df_full = pd.concat(list_table)\n\n### Save to CSV\n#df_full.to_csv(\"combine.csv\",index=False)\n#print(df_full.dtypes)\n\n### Declare Variable\nimg_out = \"img_out\"\ntable_image_dir = \"tabel_image_output\"\ncsv_input = []\nrow_input = 0\nfile_num = 0\n\n### Indexing Table File\nfor file in os.listdir(os.path.join(os.getcwd(), table_dir)):\n if file.startswith(\"table\") and file.endswith(\".csv\"):\n print(\"Found CSV file for input: \" + file)\n csv_input.append(file)\n\n### Check if Table Image Output Directory Created and Create it if False\nif os.path.isdir(table_image_dir) == False:\n os.mkdir(table_image_dir)\n \n### Check if Image Output Directory Created and Create it if False\nif os.path.isdir(img_out) == False:\n os.mkdir(img_out) \n \n### Extracting Image from PDF by CSV list\nfor index in range(len(csv_input)):\n ## Read and Convert Column to Str\n df = pd.read_csv(csv_input[index])\n df = df.astype({\"Tanda Tangan Jelas\": str})\n \n pdf_input = PyPDF2.PdfFileReader(open(list_pdf[index], \"rb\"))\n \n print(\"Processing File \" + csv_input[index] + \" and \" + list_pdf[index])\n \n n_pages = pdf_input.getNumPages()\n for page in range(n_pages):\n pages = pdf_input.getPage(page)\n xObject = pages['/Resources']['/XObject'].getObject()\n\n for obj in xObject:\n if xObject[obj]['/Subtype'] == '/Image':\n size = (xObject[obj]['/Width'], xObject[obj]['/Height'])\n data = xObject[obj].getData()\n if xObject[obj]['/ColorSpace'] == '/DeviceRGB':\n mode = \"RGB\"\n else:\n mode = \"P\"\n\n if xObject[obj]['/Filter'] == '/FlateDecode':\n img = Image.frombytes(mode, size, data)\n saving_name = img_out + '/' + \"pdf-\" + str(file_num) + \"-\" + obj[1:] + \".png\"\n img.save(saving_name)\n df.at[row_input, \"Tanda Tangan Jelas\"] = saving_name\n elif xObject[obj]['/Filter'] == '/DCTDecode':\n img = open(img_out + '/' + \"pdf-\" + str(file_num) + \"-\" + obj[1:] + \".jpg\", \"wb\")\n img.write(data)\n img.close()\n elif xObject[obj]['/Filter'] == '/JPXDecode':\n img = open(img_out + '/' + \"pdf-\" + str(file_num) + \"-\" + obj[1:] + \".jp2\", \"wb\")\n img.write(data)\n img.close()\n \n row_input += 1\n \n df.to_csv(os.path.join(table_image_dir,\"hasil-\" + str(file_num) + \".csv\"),index=False)\n print(\"Saving CSV Index Image to : \" + \"hasil-\" + str(file_num) + \".csv\")\n row_input = 0\n file_num += 1\n\n#### Percobaan Import Ke MySQL\ndef convertToBinaryData(filename):\n ## Convert digital data to binary format\n try:\n with open(filename, 'rb') as file:\n binaryData = file.read()\n return binaryData\n except:\n return \"zero\"\n\ndef insertToMySQL(dataOneRow):\n try:\n connection = sql.connect(host='localhost', \n database='mahasiswa', \n user='lanang_afkaar', \n password='1q2w3e4r5t')\n cursor = connection.cursor()\n sql_insert = \"\"\"INSERT INTO UINSA\n (NIM, NamaLengkap, ProgramStudi, \n KelasPelatihan, PelatihanHariKe, TandaTanganJelas)\n VALUES (%s, %s, %s, %s, %s, %s)\"\"\"\n result = cursor.execute(sql_insert, dataOneRow)\n connection.commit()\n return 1\n \n except sql.Error as error:\n print(\"Failed inserting BLOB data into MySQL table {}\".format(error))\n return 0\n\n## Convert data into tuple format\nlist_file = []\ncount = 0\n\nfor file in os.listdir(os.getcwd()):\n if file.startswith(\"hasil\") and file.endswith(\".csv\"):\n print(\"Found File processing :\" + file)\n list_file.append(file)\n \nfor file in list_file:\n df_mahasiswa = pd.read_csv(file)\n print(\"Processing Upload for :\" + file)\n for index, row in df_mahasiswa.iterrows():\n NIM = row['NIM']\n NamaLengkap = row['Nama Lengkap']\n ProgramStudi = row['Program Studi']\n KelasPelatihan = row['Kelas Pelatihan']\n PelatihanHariKe = row['Pelatihan Hari Ke-']\n TandaTanganJelas = convertToBinaryData(row['Tanda Tangan Jelas'])\n tuple_data = (NIM, NamaLengkap, ProgramStudi, KelasPelatihan, PelatihanHariKe, TandaTanganJelas)\n counting = insertToMySQL(tuple_data)\n count = count + counting\n\nprint(\"{} Data and Tanda Tangan have been inserted successfully as a OLE into UINSA\".format(count))"
] |
[
[
"pandas.concat",
"pandas.read_csv"
]
] |
G-Doxastakis/SparseLSH
|
[
"156a3772a1447b87b1cb9860218a24246f9e61fd"
] |
[
"sparselsh/lsh.py"
] |
[
"from __future__ import print_function\n\nimport os\nimport json\nimport numpy as np\nfrom scipy import sparse\n\nfrom .storage import storage, serialize, deserialize\n\nclass LSH(object):\n \"\"\" LSH implments locality sensitive hashing using random projection for\n input vectors of dimension `input_dim`.\n\n Attributes:\n\n :param hash_size:\n The length of the resulting binary hash in integer. E.g., 32 means the\n resulting binary hash will be 32-bit long.\n :param input_dim:\n The dimension of the input vector. This can be found in your sparse\n matrix by checking the .shape attribute of your matrix. I.E.,\n `csr_dataset.shape[1]`\n :param num_hashtables:\n (optional) The number of hash tables used for multiple look-ups.\n Increasing the number of hashtables increases the probability of\n a hash collision of similar documents, but it also increases the\n amount of work needed to add points.\n :param storage_config:\n (optional) A dictionary of the form `{backend_name: config}` where\n `backend_name` is the either `dict`, `berkeleydb`, `leveldb` or\n `redis`. `config` is the configuration used by the backend.\n Example configs for each type are as follows:\n `In-Memory Python Dictionary`:\n {\"dict\": None} # Takes no options\n `Redis`:\n `{\"redis\": {\"host\": hostname, \"port\": port_num}}`\n Where `hostname` is normally `localhost` and `port` is normally 6379.\n `LevelDB`:\n {'leveldb':{'db': 'ldb'}}\n Where 'db' specifies the directory to store the LevelDB database.\n `Berkeley DB`:\n {'berkeleydb':{'filename': './db'}}\n Where 'filename' is the location of the database file.\n NOTE: Both Redis and Dict are in-memory. Keep this in mind when\n selecting a storage backend.\n :param matrices_filename:\n (optional) Specify the path to the compressed numpy file ending with\n extension `.npz`, where the uniform random planes are stored, or to be\n stored if the file does not exist yet.\n :param overwrite:\n (optional) Whether to overwrite the matrices file if it already exist.\n This needs to be True if the input dimensions or number of hashtables\n change.\n \"\"\"\n\n def __init__(self, hash_size, input_dim, num_hashtables=1,\n storage_config=None, matrices_filename=None, overwrite=False):\n\n self.hash_size = hash_size\n self.input_dim = input_dim\n self.num_hashtables = num_hashtables\n\n if storage_config is None:\n storage_config = {'dict': None}\n self.storage_config = storage_config\n\n if matrices_filename and not matrices_filename.endswith('.npz'):\n raise ValueError(\"The specified file name must end with .npz\")\n self.matrices_filename = matrices_filename\n self.overwrite = overwrite\n\n self._init_uniform_planes()\n self._init_hashtables()\n\n def _init_uniform_planes(self):\n \"\"\" Initialize uniform planes used to calculate the hashes\n\n if file `self.matrices_filename` exist and `self.overwrite` is\n selected, save the uniform planes to the specified file.\n\n if file `self.matrices_filename` exist and `self.overwrite` is not\n selected, load the matrix with `np.load`.\n\n if file `self.matrices_filename` does not exist and regardless of\n `self.overwrite`, only set `self.uniform_planes`.\n \"\"\"\n\n if \"uniform_planes\" in self.__dict__:\n return\n\n if self.matrices_filename:\n file_exist = os.path.isfile(self.matrices_filename)\n if file_exist and not self.overwrite:\n try:\n # TODO: load sparse file\n npzfiles = np.load(self.matrices_filename)\n except IOError:\n print(\"Cannot load specified file as a numpy array\")\n raise\n else:\n npzfiles = sorted(list(npzfiles.items()), key=lambda x: x[0])\n # TODO: to sparse\n self.uniform_planes = [t[1] for t in npzfiles]\n else:\n self.uniform_planes = [self._generate_uniform_planes()\n for _ in range(self.num_hashtables)]\n try:\n np.savez_compressed(self.matrices_filename,\n *self.uniform_planes)\n except IOError:\n print(\"IOError when saving matrices to specificed path\")\n raise\n else:\n self.uniform_planes = [self._generate_uniform_planes()\n for _ in range(self.num_hashtables)]\n\n def _init_hashtables(self):\n \"\"\" Initialize the hash tables such that each record will be in the\n form of \"[storage1, storage2, ...]\" \"\"\"\n\n self.hash_tables = [storage(self.storage_config, i)\n for i in range(self.num_hashtables)]\n\n def _generate_uniform_planes(self):\n \"\"\" Generate uniformly distributed hyperplanes and return it as a 2D\n numpy array.\n \"\"\"\n dense_planes = np.random.randn(self.hash_size, self.input_dim)\n return sparse.csr_matrix(dense_planes)\n\n def _hash(self, planes, input_point):\n \"\"\" Generates the binary hash for `input_point` and returns it.\n\n :param planes:\n The planes are random uniform planes with a dimension of\n `hash_size` * `input_dim`.\n :param input_point:\n A scipy sparse matrix that contains only numbers.\n The dimension needs to be 1 * `input_dim`.\n \"\"\"\n try:\n input_point = input_point.transpose()\n projections = planes.dot(input_point)\n\n except TypeError as e:\n print(\"\"\"The input point needs to be an array-like object with\n numbers only elements\"\"\")\n raise\n except ValueError as e:\n print((\"\"\"The input point needs to be of the same dimension as\n `input_dim` when initializing this LSH instance\"\"\", e))\n raise\n else:\n return \"\".join(['1' if i > 0 else '0' for i in projections])\n\n def _as_np_array(self, serial_or_sparse):\n \"\"\" Takes either a serialized data structure, a sparse matrix, or tuple\n that has the original input points stored, and returns the original\n input point (a 1 x N sparse matrix).\n \"\"\"\n # if we get a plain sparse matrix, return it (it's the point itself)\n if sparse.issparse(serial_or_sparse):\n return serial_or_sparse\n\n # here we have a serialized pickle object\n if isinstance(serial_or_sparse, str):\n try:\n deserial = deserialize(serial_or_sparse)\n except TypeError:\n print(\"The value stored is not deserializable\")\n raise\n else:\n # If extra_data exists, `tuples` is the entire\n # (point:sparse, extra_daa). Otherwise (i.e., extra_data=None),\n # return the point stored as a tuple\n deserial = serial_or_sparse\n\n # if we deserialized it, we might have the sparse now\n if sparse.issparse(deserial):\n return deserial\n\n if isinstance(deserial[0], tuple):\n # extra data was supplied, return point\n return tuples[0]\n\n elif isinstance(deserial, (tuple, list)):\n try:\n return deserial[0]\n except ValueError as e:\n print((\"The input needs to be an array-like object\", e))\n raise\n else:\n raise TypeError(\"the input data is not supported\")\n\n def index(self, input_point, extra_data=None):\n \"\"\" Index a single input point by adding it to the selected storage.\n\n If `extra_data` is provided, it will become the value of the dictionary\n {input_point: extra_data}, which in turn will become the value of the\n hash table.\n\n :param input_point:\n A sparse CSR matrix. The dimension needs to be 1 * `input_dim`.\n :param extra_data:\n (optional) A value to associate with the point. Commonly this is\n a target/class-value of some type.\n \"\"\"\n\n assert sparse.issparse(input_point), \"input_point needs to be sparse\"\n\n # NOTE: there was a bug with 0-equal extra_data\n # we need to allow blank extra_data if it's provided\n if not isinstance(extra_data, type(None)):\n # NOTE: needs to be tuple so it's set-hashable\n value = (input_point, extra_data)\n else:\n value = input_point\n\n for i, table in enumerate(self.hash_tables):\n table.append_val(\n self._hash(self.uniform_planes[i], input_point),\n value)\n\n def _string_bits_to_array( self, hash_key):\n \"\"\" Take our hash keys (strings of 0 and 1) and turn it\n into a numpy matrix we can do calculations with.\n\n :param hash_key\n \"\"\"\n return np.array( [ float(i) for i in hash_key])\n\n def query(self, query_point, num_results=None, distance_func=None):\n \"\"\" Takes `query_point` which is a sparse CSR matrix of 1 x `input_dim`,\n returns `num_results` of results as a list of tuples that are ranked\n based on the supplied metric function `distance_func`.\n\n :param query_point:\n A sparse CSR matrix. The dimension needs to be 1 * `input_dim`.\n Used by :meth:`._hash`.\n :param num_results:\n (optional) Integer, specifies the max amount of results to be\n returned. If not specified all candidates will be returned as a\n list in ranked order.\n NOTE: You do not save processing by limiting the results. Currently,\n a similarity ranking and sort is done on all items in the hashtable.\n :param distance_func:\n (optional) The distance function to be used. Currently it needs to\n be one of (\"hamming\", \"euclidean\", \"true_euclidean\",\n \"centred_euclidean\", \"cosine\", \"l1norm\"). By default \"euclidean\"\n will used.\n \"\"\"\n assert sparse.issparse(query_point), \"query_point needs to be sparse\"\n\n candidates = []\n if not distance_func:\n distance_func = \"euclidean\"\n\n for i, table in enumerate(self.hash_tables):\n # get hash of query point\n binary_hash = self._hash(self.uniform_planes[i], query_point)\n for key in list(table.keys()):\n # calculate distance from query point hash to all hashes\n distance = LSH.hamming_dist(\n self._string_bits_to_array(key),\n self._string_bits_to_array(binary_hash))\n # NOTE: we could make this threshold user defined\n if distance < 2:\n members = table.get_list(key)\n candidates.extend(members)\n\n d_func = LSH.euclidean_dist_square\n\n else:\n\n if distance_func == \"euclidean\":\n d_func = LSH.euclidean_dist_square\n elif distance_func == \"true_euclidean\":\n d_func = LSH.euclidean_dist\n elif distance_func == \"centred_euclidean\":\n d_func = LSH.euclidean_dist_centred\n elif distance_func == \"cosine\":\n d_func = LSH.cosine_dist\n elif distance_func == \"l1norm\":\n d_func = LSH.l1norm_dist\n else:\n raise ValueError(\"The distance function name is invalid.\")\n\n # TODO: pull out into fn w/ optional threshold arg\n for i, table in enumerate(self.hash_tables):\n binary_hash = self._hash(self.uniform_planes[i], query_point)\n candidates.extend(table.get_list(binary_hash)[0])\n\n # # rank candidates by distance function\n ranked_candidates = []\n for ix in candidates:\n point = self._as_np_array(ix)\n dist = d_func(query_point, point)\n ranked_candidates.append( (ix,dist))\n\n # TODO: stop sorting when we have top num_results, instead of truncating\n # TODO: (do this by replacing set with ordered set)\n # after we've done the entire list\n ranked_candidates.sort(key=lambda x: x[1])\n\n return ranked_candidates[:num_results] if num_results else ranked_candidates\n\n ### distance functions\n\n @staticmethod\n def hamming_dist(sparse1, sparse2):\n return (sparse1 != sparse2).sum()\n\n @staticmethod\n def euclidean_dist(x, y):\n diff = x - y\n return sparse.csr_matrix.sqrt( diff.dot(diff))\n\n @staticmethod\n def euclidean_dist_square(x, y):\n diff = x - y\n if diff.nnz == 0:\n return 0.0\n result = diff.dot(diff.transpose())\n return result.data[0]\n\n @staticmethod\n def euclidean_dist_centred(x, y):\n diff = x.mean() - y.mean()\n return diff.dot( diff)\n\n @staticmethod\n def l1norm_dist(x, y):\n return abs(x - y).sum()\n\n @staticmethod\n def cosine_dist(x, y):\n return 1 - x.dot(y) / ((x.dot(x) * y.dot(y)) ** 0.5)\n"
] |
[
[
"scipy.sparse.issparse",
"scipy.sparse.csr_matrix",
"numpy.savez_compressed",
"numpy.random.randn",
"numpy.load"
]
] |
ptbrown1729/exact_diag
|
[
"16c284aea88dd1fb795e89b8e877d564045b8269"
] |
[
"ed_nlce.py"
] |
[
"import numpy as np\r\nimport scipy.special\r\nimport scipy.sparse as sp\r\nimport ed_geometry as geom\r\nimport ed_symmetry as symm\r\n\r\n# TODO: some ideas\r\n# 1. Identify clusters which are topologically the same, to save on work done for diagonalization\r\n# 2. Automatically identify symmetry group of cluster to reduce work during diagonalization. Expect this is not so\r\n# important because many clusters will not have much symmetry, especially for larger orders (?)\r\n# 3. Improve speed of various functions. Right now using lots of loops. Not clear to me how to avoid this because\r\n# I don't know a natural way to order clusters. A few ideas for doing this, which might speed up some functions,\r\n# for example doing a binary search over a sorted list instead of searching through all clusters of the same order.\r\n\r\ndef get_clusters_next_order(cluster_list=None, lv1=np.array([1, 0]), lv2=np.array([0, 1]), use_symmetry=False):\r\n \"\"\"\r\n Get all clusters that can be generated from a list of clusters with one fewer site.\r\n # TODO: should I be keeping track of the number of symmetric clusters?\r\n # TODO: This is the multiplicity in the thermodynamic limit, so probably need this...\r\n\r\n :param cluster_list: List of clusters to used for generating the new set of clusters\r\n :param lv1: Lattice vector 1, giving allowed moves to add sites to our cluster\r\n :param lv2:\r\n :param bool use_symmetry: If using symmetry, will only keep a single cluster representing each symmetry group.\r\n\r\n :return list cluster_list_next: clusters of one higher order.\r\n :return multiplicity: multiplicities of each cluster\r\n \"\"\"\r\n cluster_list_next = []\r\n multiplicity = []\r\n vect_list = [lv1, -lv1, lv2, -lv2]\r\n\r\n if cluster_list is None:\r\n # to zeroth order, cluster of one site\r\n gm = geom.Geometry.createNonPeriodicGeometry([0], [0])\r\n cluster_list_next.append(gm)\r\n multiplicity.append(1)\r\n else:\r\n # for each site in cluster add +- each lattice vector.\r\n # produce a new cluster if we haven't already counted that one\r\n for c_index, cluster in enumerate(cluster_list):\r\n # loop over clusters\r\n coords = list(zip(cluster.xlocs, cluster.ylocs))\r\n for (xloc, yloc) in coords:\r\n # for each cluster, loop over sites\r\n for vect in vect_list:\r\n # for each site, add +/- lattice vectors and check if we have a new cluster\r\n xloc_new = xloc + vect[0]\r\n yloc_new = yloc + vect[1]\r\n if (xloc_new, yloc_new) not in coords:\r\n new_xlocs = np.concatenate((cluster.xlocs, np.array([xloc_new])))\r\n new_ylocs = np.concatenate((cluster.ylocs, np.array([yloc_new])))\r\n new_geom = geom.Geometry.createNonPeriodicGeometry(new_xlocs, new_ylocs)\r\n new_geom.permute_sites(new_geom.get_sorting_permutation())\r\n\r\n new_geom_symmetric_partners = [new_geom]\r\n if use_symmetry:\r\n new_geom_symmetric_partners = get_clusters_rel_by_symmetry(new_geom)\r\n\r\n # test if any cluster related to our new cluster by symmetry is already on our list\r\n if not [c for c in cluster_list_next if\r\n [cn for cn in new_geom_symmetric_partners if cn.isequal_adjacency(c)]]:\r\n cluster_list_next.append(new_geom)\r\n multiplicity.append(len(new_geom_symmetric_partners))\r\n\r\n return cluster_list_next, multiplicity\r\n\r\ndef get_all_clusters(max_cluster_order, lv1=np.array([1, 0]), lv2=np.array([0, 1]), use_symmetry=True):\r\n \"\"\"\r\n Get all clusters of infinite lattice up to a given order.\r\n\r\n :param max_cluster_order:\r\n :param lv1:\r\n :param lv2:\r\n :param bool use_symmetry: If True, will use D4 symmetry to reduce the number of clusters\r\n\r\n :return: clusters:\r\n :return multiplicities: list of the number of times a given cluster can be embedded in an infinite lattice.\r\n It is equal to the number of distinct clusters produced by point symmetry operations on the cluster\r\n :return order_edge_indices: is a list of indices, where the iith entry is the index of the first\r\n cluster of order ii.\r\n \"\"\"\r\n\r\n first_cluster, first_multiplicity = get_clusters_next_order(lv1=lv1, lv2=lv2, use_symmetry=use_symmetry)\r\n\r\n order_inds = [0, len(first_cluster)]\r\n cluster_list_list = [first_cluster]\r\n multiplicity_list_list = [first_multiplicity]\r\n\r\n for ii in range(1, max_cluster_order):\r\n clust, mult = get_clusters_next_order(cluster_list_list[ii - 1], lv1, lv2, use_symmetry)\r\n\r\n order_inds.append(len(clust) + order_inds[ii])\r\n cluster_list_list.append(clust)\r\n multiplicity_list_list.append(mult)\r\n\r\n clusters = [h for g in cluster_list_list for h in g]\r\n multiplicities = np.array([mult for g in multiplicity_list_list for mult in g])\r\n\r\n return clusters, multiplicities, order_inds\r\n\r\ndef get_all_clusters_with_subclusters(max_order, lv1=np.array([1, 0]), lv2=np.array([0, 1]),\r\n use_symmetry=True, print_progress=False):\r\n \"\"\"\r\n Obtain all sub-clusters of the infinite lattice up to a given order, including their multiplicities and sub-clusters\r\n :param max_order:\r\n :param lv1:\r\n :param lv2:\r\n :param use_symmetry:\r\n\r\n :return full_cluster_list:\r\n :return cluster_multiplicities:\r\n :return sub_cluster_mult_mat:\r\n \"\"\"\r\n\r\n full_cluster_list, cluster_multiplicities, order_indices_full = \\\r\n get_all_clusters(max_order, lv1=lv1, lv2=lv2, use_symmetry=use_symmetry)\r\n\r\n # for each of these clusters, we want to identify all subclusters with multiplicity.\r\n # To that end we define a matrix\r\n # sc[ii, jj] = m iff C[ii] > C[jj] exactly m times ...\r\n # i.e. the iith row of this matrix tells you which clusters\r\n # are contained in cluster C[ii] with what multiplicity\r\n cluster_mult_mat = sp.csr_matrix((len(full_cluster_list), len(full_cluster_list)))\r\n\r\n start_index = order_indices_full[-2]\r\n end_index = order_indices_full[-1]\r\n # loop over all clusters of the maximum size we are considering. The subcluster information of all smaller clusters\r\n # will naturally be generated during this process.\r\n for index in range(start_index, end_index):\r\n if print_progress:\r\n print(\"cluster index = %d\" % index)\r\n cluster = full_cluster_list[index]\r\n # get all sub clusters of each cluster\r\n subclusters_list, subcluster_mult_mat, order_indices_subclusters = get_reduced_subclusters(cluster)\r\n cluster_reduction_mat = map_between_cluster_bases(full_cluster_list, order_indices_full, subclusters_list, order_indices_subclusters, use_symmetry=use_symmetry)\r\n\r\n # now convert subcluster_mult_mat to correct indices...\r\n final_sub_cluster_mat = cluster_reduction_mat.transpose().dot(subcluster_mult_mat.dot(cluster_reduction_mat))\r\n\r\n # need to avoid double counting certain sites ... get rid of any rows if they have any nonzero elements\r\n row_sums = np.asarray(np.sum(cluster_mult_mat, 1))\r\n row_sums = 1 - (row_sums > 0)\r\n a = row_sums.reshape([len(row_sums), ]).tolist()\r\n b = sp.diags(a, offsets=0, format='csr')\r\n cluster_mult_mat = cluster_mult_mat + b.dot(final_sub_cluster_mat)\r\n\r\n return full_cluster_list, cluster_multiplicities, cluster_mult_mat, order_indices_full\r\n\r\ndef map_between_cluster_bases(cluster_basis_larger, order_indices_larger, cluster_basis_smaller,\r\n order_indices_smaller, use_symmetry=True):\r\n \"\"\"\r\n Create a matrix which maps between two different cluster bases.\r\n :param cluster_basis_larger: list of clusters. This list must contain all of the clusters in cluster_basis_smaller\r\n :param order_indices_larger:\r\n :param cluster_basis_smaller: a list of clusters\r\n :param order_indices_smaller:\r\n :param use_symmetry:\r\n\r\n :return basis_change_mat:\r\n \"\"\"\r\n\r\n # TODO: could I speed this up? Instead of a double sum, comparing all elements, can I assign a single\r\n # integer to each cluster of a given order and then do an ordered search?\r\n # now loop over the sub clusters of this order in the full cluster list (jj's) and the sub-clusters of\r\n # the given cluster (ii)\r\n\r\n # create the matrix that will map the subcluster basis to the full cluster basis\r\n # cr[ii, jj] = 1 iff sc[ii] <-> c[jj]\r\n basis_change_mat = sp.csr_matrix((len(cluster_basis_smaller), len(cluster_basis_larger)))\r\n\r\n # so far the sub cluster matrix is given in a basis which only contains subclusters of a given cluster,\r\n # but we want this matrix in a basis containing all subclusters of the infinite lattice, so we need to\r\n # go through and find the mapping in this basis\r\n max_cluster_order = np.min([len(order_indices_larger) - 1, len(order_indices_smaller) - 1])\r\n # loop over cluster orders\r\n for order in range(0, max_cluster_order):\r\n for ii in range(order_indices_smaller[order], order_indices_smaller[order + 1]):\r\n for jj in range(order_indices_larger[order], order_indices_larger[order + 1]):\r\n sub_cluster = cluster_basis_smaller[ii]\r\n symm_cluster_list = [sub_cluster]\r\n if use_symmetry:\r\n symm_cluster_list = get_clusters_rel_by_symmetry(sub_cluster)\r\n for symm_cluster in symm_cluster_list:\r\n if symm_cluster.isequal_adjacency(cluster_basis_larger[jj]):\r\n basis_change_mat[ii, jj] = 1\r\n\r\n return basis_change_mat\r\n\r\n# functions work on subclusters\r\ndef get_subclusters_next_order(parent_geometry, cluster_list=None):\r\n \"\"\"\r\n Given a list of subclusters of some parent geometry, generate all possible connected subclusters with one\r\n extra site. Also return which of the initial subclusters are contained in the higher order subclusters. This\r\n is in the form of a list of lists, where each each sublist corresponds to a cluster in cluster_list_next_order.\r\n Each sublist contains the indices of the clusters in cluster_list that are contained within the cluster in\r\n cluster_list_next_order. This is useful because in general we only care about subclusters one order lower\r\n than the cluster we are considering.(???). All lower order clusters will also be subclusters of one of these.\r\n\r\n :param parent_geometry: Geometry to generate subclusters from\r\n :param cluster_list: A collection of subclusters.\r\n\r\n :return cluster_list_next_order: list of clusters\r\n :return old_cluster_contained_in_new_clusters: a list of lists. Each sublist contains the\r\n indices of the clusters of lower order (i.e. the indices in the list cluster_list)\r\n representing clusters contained in the given cluster in cluster_list_next_order\r\n \"\"\"\r\n cluster_list_next = []\r\n old_cluster_contained_in_new_clusters = []\r\n if cluster_list is None:\r\n # to zeroth order, add each site as own cluster\r\n for ii in range(0, parent_geometry.nsites):\r\n gm = geom.Geometry.createNonPeriodicGeometry(parent_geometry.xlocs[ii], parent_geometry.ylocs[ii])\r\n cluster_list_next.append(gm)\r\n old_cluster_contained_in_new_clusters.append([])\r\n else:\r\n for c_index, cluster in enumerate(cluster_list):\r\n # need convenient way to switch between parent cluster and sub-cluster indexing\r\n parent_coords = list(zip(parent_geometry.xlocs, parent_geometry.ylocs))\r\n cluster_coords = list(zip(cluster.xlocs, cluster.ylocs))\r\n\r\n # loop over sites in our cluster, and try to add additional sites adjacent to them\r\n for ii, (xloc, yloc) in enumerate(cluster_coords):\r\n # parent cluster coordinate? Check this\r\n jj = [aa for aa,coord in enumerate(parent_coords) if coord==(xloc, yloc)]\r\n jj = jj[0]\r\n\r\n # loop over sites in parent geometry and check if they are adjacent\r\n for kk in range(0, parent_geometry.nsites):\r\n xloc_kk = parent_geometry.xlocs[kk]\r\n yloc_kk = parent_geometry.ylocs[kk]\r\n\r\n # if site is adjacent and not already in our cluster, make a new cluster by adding that site\r\n if parent_geometry.adjacency_mat[jj, kk] == 1 and (xloc_kk, yloc_kk) not in cluster_coords:\r\n new_xlocs = np.concatenate((cluster.xlocs, np.array([xloc_kk])))\r\n new_ylocs = np.concatenate((cluster.ylocs, np.array([yloc_kk])))\r\n #TODO: also get adjacency from the previous cluster\r\n new_geom = geom.Geometry.createNonPeriodicGeometry(new_xlocs, new_ylocs)\r\n new_geom.permute_sites(new_geom.get_sorting_permutation())\r\n #TODO: compare this cluster to make sure a duplicate doesn't already exist? So far only\r\n #dealing with real duplicates, not 'duplicates' that are the same shape and hence have the\r\n #same hamiltonian\r\n duplicates = [(ii, g) for ii, g in enumerate(cluster_list_next) if new_geom == g]\r\n if duplicates == []:\r\n cluster_list_next.append(new_geom)\r\n old_cluster_contained_in_new_clusters.append([c_index])\r\n else:\r\n new_cluster_index, _ = zip(*duplicates)\r\n new_cluster_index = new_cluster_index[0]\r\n old_cluster_contained_in_new_clusters[new_cluster_index].append(c_index)\r\n\r\n return cluster_list_next, old_cluster_contained_in_new_clusters\r\n\r\ndef get_all_subclusters(parent_geometry):\r\n \"\"\"\r\n Find all subclusters containing up to max_order sites of a given parent geometry, and return them as a list of\r\n lists. Each sublist contains all clusters with a given number of sites. The first sublist contains all clusters\r\n with one site, the second sublist contains all clusters with two sites, etc. For the purposes of this function,\r\n we regard clusters with different coordinates as being different clusters. This makes it easier to identify\r\n the multiplicity of a given subcluster.\r\n\r\n We can also think of these clusters as being numbered by their position in this list\r\n (if we imagine we have flattened the list of lists into a single\r\n list). Then for each cluster, sub_cluster_indices contains a list of the indices of all sub clusters of that\r\n cluster, where the indices are interpreted as in the previous sentence. E.g., we have\r\n cluster_order_list = [[gm00, gm01, gm02, ...], [gm10, gm11, ...], ...], then gm00 has index 0, gm01 has index 1,...\r\n sub_cluster_indiex = [[], [], ..., [1, 3], ...]. In this case, we interpret this as gm00 has no subclusters, and\r\n similarly for gm0n. Then gm10 has two subclusters, one with index 1 which is gm01, and on with index 3 which is\r\n gm02.\r\n\r\n :param parent_geometry:\r\n\r\n :return cluster_order_list: a list of lists of clusters\r\n :return sub_cluster_indices:\r\n :return sub_cluster_mat:\r\n :return order_start_indices:\r\n \"\"\"\r\n # TODO sub_cluster_indices is redundant as an output, and should be removed...\r\n\r\n cluster_orders_list = [] # output variable\r\n sub_cluster_indices = [] # what are these?\r\n current_order = 0\r\n total_clusters = 0\r\n total_clusters_previous = 0\r\n\r\n clusters_next_order, contained_cluster_indices = get_subclusters_next_order(parent_geometry, cluster_list=None)\r\n\r\n # continue generating the next order of clusters until we exceed the maximum order argument or reach the full\r\n # cluster specified in parent_geometry\r\n while not clusters_next_order == [] and current_order < parent_geometry.nsites:\r\n # append new cluster indices\r\n for jj, _ in enumerate(clusters_next_order):\r\n curr_indices = []\r\n if not contained_cluster_indices[jj] == []:\r\n curr_indices = [ci + total_clusters_previous for ci in contained_cluster_indices[jj]]\r\n for ii in range(0, len(curr_indices)):\r\n ci = curr_indices[ii]\r\n if not sub_cluster_indices[ci] == []:\r\n curr_indices = curr_indices + sub_cluster_indices[ci]\r\n curr_indices = sorted(list(set(curr_indices)))\r\n #print curr_indices\r\n sub_cluster_indices.append(curr_indices)\r\n\r\n # append new clusters\r\n cluster_orders_list.append(clusters_next_order)\r\n\r\n # get clusters of the next higher order\r\n total_clusters_previous = total_clusters\r\n total_clusters = total_clusters + len(clusters_next_order)\r\n clusters_next_order, contained_cluster_indices = get_subclusters_next_order(parent_geometry,\r\n cluster_orders_list[current_order])\r\n current_order = current_order + 1\r\n\r\n # generate sub_cluster_mat ... this way might be ineficient. Possibly nicer if can do it as we go along above\r\n nclusters = np.sum([len(cl) for cl in cluster_orders_list])\r\n sub_cluster_mat = sp.csc_matrix((nclusters, nclusters))\r\n for ii, sc_indices in enumerate(sub_cluster_indices):\r\n if sc_indices != []:\r\n sub_cluster_mat[ii, sc_indices] = 1\r\n\r\n # if you want a flattened version of cluster_orders_list, one way to get that is\r\n # flat = [g for h in cluster_orders_list for g in h]\r\n return cluster_orders_list, sub_cluster_indices, sub_cluster_mat\r\n\r\ndef reduce_clusters_by_geometry(cluster_orders_list, use_symmetry=True):\r\n \"\"\"\r\n Reduce clusters to those which are geometrically and/or symmetrically distinct. In contrast to\r\n get_all_subclusters, in this function we regard clusters with different coordinates for their sites as\r\n identical, provided their adjacency matrices and distancs between sites agree. Checking this properly\r\n requires us_interspecies to put our clusters in some sort of normal order before comparing them.\r\n\r\n :param cluster_orders_list:\r\n\r\n :return clusters_geom_distinct: a list of lists. Each sublist contains all the distinct clusters for a given order.\r\n :return clusters_geom_multiplicity: a list of lists. Each sublist contains the multiplicities of the corresponding\r\n cluster in the corresponding sublist of clusters_geom_distinct.\r\n TODO: this is now redundant with the addition of cluster_reduction_mat.\r\n :return cluster_reduction_mat: is an n_reduced_clusters x n_full_clusters matrix, where M[ii, jj] = 1 if\r\n and only if the cluster with index ii in the full list of clusters is geometrically the same as the cluster\r\n with index jj in the list of reduced clusters. In some sense we can think of this as a basis\r\n transformation matrix ...\r\n \"\"\"\r\n # TODO: would like to rewrite thise to use cluster_list and order_start_indices instead of cluster_orders_list\r\n nclusters = np.sum(np.array([len(cl) for cl in cluster_orders_list]))\r\n\r\n clusters_geom_distinct = []\r\n clusters_geom_multiplicity = []\r\n cluster_reduction_mat = sp.csr_matrix((nclusters, nclusters))\r\n running_full_cluster_total = 0\r\n running_reduced_cluster_total = 0\r\n\r\n # loop over each order of clusters and accumulate unique clusters and their multiplicity\r\n for cluster_list in cluster_orders_list:\r\n clusters_this_order = []\r\n multiplicity_this_order = []\r\n # loop over clusters\r\n for ii,cluster in enumerate(cluster_list):\r\n cluster_index_full = ii + running_full_cluster_total # cluster index in full list\r\n # check if cluster is already in our list\r\n\r\n # list all clusters that are symmetrically equivalent to our cluster. If we are not using symmetries, this\r\n # list contains only our cluster\r\n cluster_symm_partners = [cluster]\r\n if use_symmetry:\r\n cluster_symm_partners = get_clusters_rel_by_symmetry(cluster)\r\n\r\n duplicates = [(jj, g) for jj, g in enumerate(clusters_this_order) if\r\n [h for h in cluster_symm_partners if h.isequal_adjacency(g)]]\r\n\r\n if duplicates == []:\r\n # if not a duplicate, add this cluster to our list\r\n clusters_this_order.append(cluster)\r\n multiplicity_this_order.append(1)\r\n cluster_index_reduced = running_reduced_cluster_total + len(clusters_this_order) - 1\r\n else:\r\n # if is a duplicate, find index of duplicate\r\n indices_of_duplicate, _ = zip(*duplicates)\r\n indices_of_duplicate = indices_of_duplicate[0]\r\n multiplicity_this_order[indices_of_duplicate] = multiplicity_this_order[indices_of_duplicate] + 1\r\n cluster_index_reduced = indices_of_duplicate + running_reduced_cluster_total\r\n cluster_reduction_mat[cluster_index_full, cluster_index_reduced] = 1\r\n # append results to output variables\r\n clusters_geom_distinct.append(clusters_this_order)\r\n clusters_geom_multiplicity.append(multiplicity_this_order)\r\n # increment cluster counters\r\n running_full_cluster_total = running_full_cluster_total + len(cluster_list)\r\n running_reduced_cluster_total = running_reduced_cluster_total + len(clusters_this_order)\r\n # reduce size of cluster_reduction_mat by trimming trailing zeros.\r\n cluster_reduction_mat = cluster_reduction_mat[:, 0:running_reduced_cluster_total]\r\n # actually nicer to work with the transpose ... could rewrite the above to constrcut it directly\r\n cluster_reduction_mat = cluster_reduction_mat.transpose()\r\n\r\n return clusters_geom_distinct, clusters_geom_multiplicity, cluster_reduction_mat\r\n\r\ndef get_reduced_subclusters(parent_geometry, print_progress=False):\r\n \"\"\"\r\n For a given parent geometry, produce all subclusters and the number of times each subcluster is contained in the\r\n parent\r\n\r\n :param parent_geometry:\r\n\r\n :return cluster_list: a list of distinct sub clusters of the parent_geometry (including the parent geometry itself)\r\n :return sub_cluster_mat: a square matrix which gives the number of times cluster j can be embedded in cluster i, if\r\n cluster j is a proper sub-cluster of i (i.e. the diagonal of this matrix is zero).\r\n sub_cluster_mat[ii, jj] = # C_j < C_i\r\n :return order_edge_indices: give the indices in cluster_list where clusters of increasingly larger order appear.\r\n \"\"\"\r\n\r\n cluster_orders_list, sub_cluster_indices, sub_cluster_mat = get_all_subclusters(parent_geometry)\r\n\r\n # get unique geometric clusters of each order and multiplicity\r\n clusters_geom_distinct, clusters_geom_multiplicity, cluster_reduction_mat = \\\r\n reduce_clusters_by_geometry(cluster_orders_list, use_symmetry=True)\r\n clusters_list = [g for h in clusters_geom_distinct for g in h]\r\n\r\n # indices\r\n order_edge_indices = [0]\r\n for ii, cl_list in enumerate(clusters_geom_distinct):\r\n order_edge_indices.append(len(cl_list) + order_edge_indices[ii])\r\n\r\n # for each cluster, need to know all sub-clusters and multiplicities\r\n # M[ii, jj] = # of times cluster ii contains cluster jj\r\n # can do this in several steps.\r\n\r\n # First, let us_interspecies find the multiplicity of C^R_j, the reduced cluster of index j in C^F_i, the full cluster of index i\r\n # i.e. RF_ij = # C^R_j < C^F_i\r\n # this is just RF_ij = \\sum_k SC[i, k] * C[j, k] , with C = cluster_reduction_mat and SC = sub_cluster_mat\r\n red_cluster_mult_in_full = sub_cluster_mat.dot(cluster_reduction_mat.transpose())\r\n\r\n # Second, let us_interspecies convert the full clusters in the first index of the above matrix to reduce cluster\r\n # M_ij = # C^R_j < C^R_i\r\n # M_ij = \\sum_k C[i ,k] * RF[k, j] is our first guess, but this overcounts because we are converting each reduced\r\n # cluster to the sum of all full clusters which map onto it. We really only wanted to pick a single instantiation.\r\n # To account for this, we must divide each row by the number of full clusters that map onto that reduced cluster\r\n # i.e. we want to normalize the columns of M. This can be done by left multiplying M with a diagonal matrix of the\r\n # normalization factors\r\n d_mat = sp.diags(np.ravel(np.divide(1, cluster_reduction_mat.sum(1))), format='csc')\r\n reduced_sub_cluster_mat = d_mat.dot(cluster_reduction_mat.dot(red_cluster_mult_in_full))\r\n\r\n return clusters_list, reduced_sub_cluster_mat, order_edge_indices\r\n\r\ndef get_clusters_rel_by_symmetry(cluster, symmetry='d4'):\r\n \"\"\"\r\n Return all distinct clusters related to the initial cluster by symmetry.\r\n\r\n :param cluster: geometry object representing a cluster\r\n :param symmetry: TODO implement others besides D4\r\n :return cluster_symm_partners: a list of geometry objects, including the initial cluster which are related by the\r\n specified symmetry\r\n \"\"\"\r\n rot_fn = symm.getRotFn(4)\r\n refl_fn = symm.getReflFn([0, 1])\r\n\r\n cluster_symm_partners = [cluster]\r\n # list coordinates of all D4 symmetries\r\n places_round = 14\r\n xys_list = []\r\n xys_list.append(np.round(rot_fn(cluster.xlocs, cluster.ylocs), places_round))\r\n xys_list.append(np.round(rot_fn(xys_list[0][0, :], xys_list[0][1, :]), places_round))\r\n xys_list.append(np.round(rot_fn(xys_list[1][0, :], xys_list[1][1, :]), places_round))\r\n xys_list.append(np.round(refl_fn(cluster.xlocs, cluster.ylocs), places_round))\r\n xys_list.append(np.round(refl_fn(xys_list[0][0, :], xys_list[0][1, :]), places_round))\r\n xys_list.append(np.round(refl_fn(xys_list[1][0, :], xys_list[1][1, :]), places_round))\r\n xys_list.append(np.round(refl_fn(xys_list[2][0, :], xys_list[2][1, :]), places_round))\r\n\r\n # add distinct clusters to a list\r\n for xys in xys_list:\r\n c = geom.Geometry.createNonPeriodicGeometry(xys[0, :], xys[1, :])\r\n c.permute_sites(c.get_sorting_permutation())\r\n if not [h for h in cluster_symm_partners if h.isequal_adjacency(c)]:\r\n cluster_symm_partners.append(c)\r\n\r\n return cluster_symm_partners\r\n\r\n# nlce functions\r\ndef get_nlce_exp_val(exp_vals_clusters, sub_cluster_multiplicity_mat,\r\n parent_clust_multiplicity_vect, order_start_indices, nsites):\r\n \"\"\"\r\n Compute linked cluster expansion weights and expectation values from expectation values on individual clusters\r\n\r\n :param exp_vals_clusters:\r\n :param sub_cluster_multiplicity_mat:\r\n :param parent_clust_multiplicity_vect:\r\n :param order_start_indices:\r\n :param nsites:\r\n\r\n :return expectation_vals:\r\n :return cluster_weights:\r\n \"\"\"\r\n # TODO: make this function flexible enough to handle NCLE for infinite cluster or finite cluster\r\n # TODO: ensure parent_clust_multiplicity_vect is a row vector\r\n # TODO: this function goes awry if our sub_cluster_multiplicty_mat has extra clusters which we don't want to use\r\n # TODO: also return different orders of nlce expansion\r\n # e.g., if it contains all subclusters of a parent cluster, but we only diagonalized and want to work with up to\r\n # order 10 of them\r\n if sp.issparse(parent_clust_multiplicity_vect):\r\n parent_clust_multiplicity_vect = parent_clust_multiplicity_vect.toarray()\r\n if isinstance(parent_clust_multiplicity_vect, list):\r\n parent_clust_multiplicity_vect = np.array(parent_clust_multiplicity_vect)\r\n parent_clust_multiplicity_vect = np.reshape(parent_clust_multiplicity_vect, (1, parent_clust_multiplicity_vect.size))\r\n\r\n\r\n nclusters = exp_vals_clusters.shape[0]\r\n # want to accept arbitrary exp_vals shapes, subject only to the condition that the first dimension loops over clusters\r\n weights = np.zeros(exp_vals_clusters.shape)\r\n\r\n # compute weights in a nice tensorial way\r\n # W_p(c) = P(c) - \\sum_{s<c} W_p(s)\r\n # W_exp[ii] = P[ii] - \\sum_j sc[ii, jj] * W_exp[jj]\r\n # sc_ij = # of times C^R_j < C^R_i\r\n for ii in range(0, nclusters):\r\n # in the 2d case, we can writep\r\n # weights[ii, ...] = exp_vals_clusters[ii, ...] - sub_cluster_multiplicity_mat[ii, :].dot(weights)\r\n # in the general case, we need to sum over index 1 of sub_cluster_multiplicty_mat, and index 0 of weights.\r\n # np.dot no longer works, as this sums over the last index of the first array, and the second to last index of\r\n # the second array. To sum over arbitrary axes, use np.tensordot\r\n weights[ii, ...] = exp_vals_clusters[ii, ...] - np.squeeze(\r\n np.tensordot(sub_cluster_multiplicity_mat[ii, :].toarray(), weights, axes=(1, 0)))\r\n\r\n #exp_val_nlce[jj] = sub_cluster_multiplicity_mat[-1, :] * weights[:, jj] / nsites\r\n # exp_val_nlce = np.squeeze(np.tensordot(sub_cluster_multiplicity_mat[-1, :].toarray(), weights, axes=(1,0))) / nsites\r\n exp_val_nlce = np.squeeze(np.tensordot(parent_clust_multiplicity_vect, weights, axes=(1, 0))) / nsites\r\n\r\n b = list(exp_vals_clusters[0, ...].shape)\r\n size = [len(order_start_indices) - 1] + [int(c) for c in b]\r\n nlce_orders = np.zeros(tuple(size))\r\n for ii in range(0, len(order_start_indices) - 1):\r\n nlce_orders[ii, ...] = np.squeeze(np.tensordot(parent_clust_multiplicity_vect[0, order_start_indices[ii]:order_start_indices[ii+1]][None, :],\r\n weights[order_start_indices[ii]:order_start_indices[ii+1],...], axes=(1, 0))) / nsites\r\n\r\n return exp_val_nlce, nlce_orders, weights\r\n\r\ndef euler_resum(exp_vals_orders, y):\r\n # TODO: test\r\n euler_orders = np.zeros(exp_vals_orders.shape)\r\n\r\n for ii in range(0, euler_orders.shape[0]):\r\n jjs = np.arange(0, ii + 1)\r\n yjjs = np.power(y, jjs + 1)\r\n exp_vals_partial_orders = exp_vals_orders[0 : ii + 1, ...]\r\n binomial_coeffs = np.array([scipy.special.binom(ii, jj) for jj in jjs])\r\n partial_sum = np.tensordot(binomial_coeffs * yjjs, exp_vals_partial_orders, axes=(0, 0))\r\n euler_orders[ii, ...] = 1. / (1 + y) ** (ii + 1) * partial_sum\r\n\r\n euler_resum = np.sum(euler_orders, 0)\r\n\r\n return euler_resum, euler_orders\r\n\r\ndef wynn_resum(exp_vals_orders):\r\n pass\r\n"
] |
[
[
"scipy.sparse.csc_matrix",
"scipy.sparse.issparse",
"numpy.power",
"numpy.reshape",
"numpy.arange",
"scipy.sparse.diags",
"scipy.sparse.csr_matrix",
"numpy.tensordot",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] |
martijnbentum/immuno
|
[
"fc4d2c2e5b6e65de8bcea9e8ae43db7005eb3155"
] |
[
"repo/utils/view_ocr.py"
] |
[
"import matplotlib\nfrom matplotlib import pyplot as plt\nfrom matplotlib.patches import Rectangle\n\n\ndef page2xlim_ylim(page, extra = 100):\n\txlim = (0,page.page_width + extra)\n\tylim = (0,page.page_height + extra)\n\treturn xlim,ylim\n\t\ndef make_figure(xlim = (0,4250),ylim=(0,5500)):\n\tplt.ion()\n\tplt.figure()\n\tcurrent_axis = plt.gca(xlim=xlim,ylim=ylim)\n\tplt.show()\n\treturn current_axis\n\ndef page2figure(page, extra = 100):\n\txlim,ylim = page2xlim_ylim(page, extra)\n\treturn make_figure(xlim, ylim)\n\ndef add_rectangle(figure, x, color = 'b', alpha= 1.0,fill = None):\n\tfigure.add_patch(Rectangle((x.x0,x.y0),x.width,x.height, color=color, \n\t\tfill=fill, alpha= alpha))\n\ndef show_page(page, exclude_after_reference=False, exclude_all=False):\n\tfigure = page2figure(page)\n\tadd_rectangle(figure,page.rectangle, color = page.color)\n\t# _ = page.get_usable_objs(exclude_after_reference=exclude_after_reference,\n\t# exclude_all = exclude_all)\n\tfor block in page.blocks:\n\t\tmuted = False\n\t\twar = block.words_area_ratio\n\t\tpa = block.perc_area\n\t\twhd = block.word_hdistance\n\t\t# if not pa or pa < 2 or war > 0.09 or not whd or whd > 100:\n\t\tif not block.usable:\n\t\t\tmuted = True\n\t\t\tblock.color = 'y'\n\t\t\tblock.alpha = 0.1\n\t\tshow_block(block,figure, muted = muted)\n\t\t# ref = x.reference\n\t\tx = block\n\t\txcenter,ycenter = x.rectangle.center\n\t\tleft = x.rectangle.left\n\t\t# if x.index: overlap = x.get_perc_overlap()\n\t\t# else: overlap = ''\n\t\tt = str(x.perc_area) \n\t\tif x.word_hdistance:t+= ' | ' + str(x.word_hdistance)\n\t\tif war: t+= ' | ' + str(war)\n\t\tt += ' | w:' + str(x.rectangle.width)\n\t\tplt.text(left +10,ycenter,x.block_number+ ' ' + t, alpha=0.8,\n\t\t\tcolor = x.color)\n\t\t# if ref: plt.text(x.left,x.bottom,'reference',color = x.color)\n\ndef show_block(block,figure, muted =False):\n\tadd_rectangle(figure,block.rectangle,color=block.color, alpha=block.alpha)\n\tfor line in block.lines:\n\t\tshow_line(line,figure, muted)\n\ndef show_line(line,figure, muted = False):\n\tif muted:\n\t\tline.color = 'y'\n\t\tline.alpha = 0.1\n\tadd_rectangle(figure,line.rectangle,color=line.color, alpha=line.alpha)\n\tfor word in line.words:\n\t\tshow_word(word,figure, muted)\n\ndef show_word(word,figure, muted = False):\n\tif muted:\n\t\tword.color = 'y'\n\t\tword.alpha = 0.1\n\tadd_rectangle(figure,word.rectangle,color=word.color, alpha=word.alpha)\n\ndef show_text_objects(objs, colors = None):\n\tfigure = make_figure()\n\tfor i,x in enumerate(objs):\n\t\tif not colors:color = 'b'\n\t\telif type(colors) == str: color = colors\n\t\telif i >= len(colors): color = 'b'\n\t\telse:color = colors[i]\n\t\tadd_rectangle(figure,x,color=color, alpha = 1)\n\t\n"
] |
[
[
"matplotlib.pyplot.gca",
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.text",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
romanroibu/pyglui
|
[
"ab316d094e975491032ce2b395a03d922b03222c"
] |
[
"setup.py"
] |
[
"from __future__ import print_function\n\nimport io\nimport os\nimport platform\nimport re\nimport sys\nfrom stat import ST_MTIME\n\nimport numpy\nfrom Cython.Build import cythonize\nfrom setuptools import Extension, setup\n\ndir_containing_glew = os.path.dirname(__file__)\nsys.path.append(dir_containing_glew)\nfrom glew_pxd import generate_pxd\nsys.path.remove(dir_containing_glew)\n\ndef read(*names, **kwargs):\n with io.open(\n os.path.join(os.path.dirname(__file__), *names),\n encoding=kwargs.get(\"encoding\", \"utf8\"),\n ) as fp:\n return fp.read()\n\n\n# pip's single-source version method as described here:\n# https://python-packaging-user-guide.readthedocs.io/single_source_version/\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nrequirements = []\nif platform.system() != \"Windows\":\n requirements.append(\"cysignals\")\n\n\nincludes = [\"pyglui/cygl/\", \".\", numpy.get_include()]\nglew_binaries = []\nlib_dir = []\nfontstash_compile_args = [\n \"-D FONTSTASH_IMPLEMENTATION\",\n \"-D GLFONTSTASH_IMPLEMENTATION\",\n]\n\nif platform.system() == \"Darwin\":\n # find glew irrespective of version\n for root, dirs, files in os.walk(\"/usr/local/Cellar/glew\"):\n if \"glew.h\" in files:\n glew_header = os.path.join(root, \"glew.h\")\n includes += [\n \"/System/Library/Frameworks/OpenGL.framework/Versions/Current/Headers/\",\n \"/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/OpenGL.framework/Headers/\",\n ]\n link_args = []\n libs = [\"GLEW\"]\n libglew = [] # we are using the dylib\n extra_compile_args = [\"-Wno-strict-aliasing\", \"-O2\"]\nelif platform.system() == \"Linux\":\n glew_header = \"/usr/include/GL/glew.h\"\n includes += [\"/usr/include/GL\"]\n libs = [\"GLEW\", \"GL\"] # GL needed for fonstash\n link_args = []\n extra_compile_args = [\"-Wno-strict-aliasing\", \"-O2\"]\nelif platform.system() == \"Windows\":\n glew_header = \"pyglui/cygl/win_glew/gl/glew.h\"\n includes += [\"pyglui/cygl/win_glew\"]\n libs = [\"glew32\", \"OpenGL32\"]\n lib_dir = [\"pyglui/cygl/win_glew\"]\n link_args = []\n gl_compile_args = [] # ['/DGL_GLEXT_PROTOTYPES']\n extra_compile_args = [\"-O2\"]\n fontstash_compile_args = [\n \"/DFONTSTASH_IMPLEMENTATION\",\n \"/DGLFONTSTASH_IMPLEMENTATION\",\n ]\n glew_binaries = [(\"\", [\"pyglui/cygl/win_glew/glew32.dll\"])]\nelse:\n raise Exception(\"Platform build not implemented.\")\n\n\nif (\n os.path.isfile(\"pyglui/cygl/glew.pxd\")\n and os.stat(\"pyglui/cygl/glew.pxd\")[ST_MTIME] > os.stat(glew_header)[ST_MTIME]\n):\n print(\"'glew.pxd' is up-to-date.\")\nelse:\n print(\"generating glew.pxd based on '%s'\" % glew_header)\n generate_pxd(glew_header, \"pyglui/cygl\")\n\n\nextensions = [\n Extension(\n name=\"pyglui.ui\",\n sources=[\"pyglui/ui.pyx\"],\n include_dirs=includes + [\"pyglui/pyfontstash/fontstash/src\"],\n libraries=libs,\n library_dirs=lib_dir,\n extra_link_args=link_args,\n extra_compile_args=extra_compile_args,\n language=\"c++\",\n ),\n Extension(\n name=\"pyglui.graph\",\n sources=[\"pyglui/graph.pyx\"],\n include_dirs=includes + [\"pyglui/pyfontstash/fontstash/src\"],\n libraries=libs,\n library_dirs=lib_dir,\n extra_link_args=link_args,\n extra_compile_args=extra_compile_args,\n language=\"c++\",\n ),\n Extension(\n name=\"pyglui.cygl.utils\",\n sources=[\"pyglui/cygl/utils.pyx\"],\n include_dirs=includes,\n libraries=libs,\n library_dirs=lib_dir,\n extra_link_args=link_args,\n extra_compile_args=extra_compile_args,\n language=\"c++\",\n ),\n Extension(\n name=\"pyglui.cygl.shader\",\n sources=[\"pyglui/cygl/shader.pyx\"],\n include_dirs=includes,\n libraries=libs,\n library_dirs=lib_dir,\n extra_link_args=link_args,\n extra_compile_args=extra_compile_args,\n language=\"c++\",\n ),\n Extension(\n name=\"pyglui.pyfontstash.fontstash\",\n sources=[\"pyglui/pyfontstash/fontstash.pyx\"],\n include_dirs=includes + [\"pyglui/pyfontstash/fontstash/src\"],\n libraries=libs,\n library_dirs=lib_dir,\n extra_link_args=link_args,\n extra_compile_args=extra_compile_args + fontstash_compile_args,\n ),\n]\n\n\npyglui_version = find_version(\"pyglui\", \"__init__.py\")\n\nsetup(\n name=\"pyglui\",\n version=pyglui_version,\n packages=[\"pyglui\"],\n install_requires=requirements,\n py_modules=[\n \"pyglui.cygl.__init__\",\n \"pyglui.pyfontstash.__init__\",\n ], # add __init__.py files\n description=\"OpenGL UI powered by Cython\",\n url=\"https://github.com/pupil-labs/pyglui\",\n author=\"Pupil Labs\",\n author_email=\"info@pupil-labs.com\",\n license=\"MIT\",\n data_files=glew_binaries,\n package_dir={\"pyglui\": \"pyglui\"},\n package_data={\"pyglui\": [\"*.ttf\"]}, # fonts\n ext_modules=cythonize(extensions),\n)\n"
] |
[
[
"numpy.get_include"
]
] |
poelsner/nappy
|
[
"d3ede80138f0fc41deba9b7338932b8be2943e3f"
] |
[
"nappy/nc_interface/na_to_nc.py"
] |
[
"# Copyright (C) 2004 CCLRC & NERC( Natural Environment Research Council ).\r\n# This software may be distributed under the terms of the\r\n# Q Public License, version 1.0 or later. http://ndg.nerc.ac.uk/public_docs/QPublic_license.txt\r\n\r\n\"\"\"\r\nna_to_nc.py\r\n===========\r\n\r\nContains the NAToNC class for converting a NASA Ames file to a NetCDF file.\r\n\r\n\"\"\"\r\n\r\n# Imports from python standard library\r\nimport logging\r\nfrom collections.abc import Sequence\r\n\r\n# Third-party imports\r\nimport xarray as xr\r\nimport numpy as np\r\n\r\n# Import from nappy package\r\nimport nappy.nc_interface.na_to_xarray\r\n\r\nlog = logging.getLogger(__name__)\r\n\r\n\r\nclass NAToNC(nappy.nc_interface.na_to_xarray.NADictToXarrayObjects):\r\n \"\"\"\r\n Converts a NASA Ames file to a NetCDF file.\r\n \"\"\"\r\n \r\n def __init__(self, na_file, variables=None, aux_variables=None,\r\n global_attributes=None,\r\n time_units=None, time_warning=True, \r\n rename_variables=None):\r\n \"\"\"\r\n Sets up instance variables. Note that the argument 'na_file' has a relaxes definition\r\n and can be either a NASA Ames file object or the name of a NASA AMES file.\r\n Typical usage is:\r\n >>> import nappy.nc_interface.na_to_nc as na_to_nc\r\n >>> c = na_to_nc.NAToNC(\"old_file.na\") \r\n >>> c.convert()\r\n >>> c.writeNCFile(\"new_file.nc\") \r\n \"\"\"\r\n if global_attributes is None:\r\n global_attributes = []\r\n\r\n if not rename_variables:\r\n rename_variables = {}\r\n\r\n # First open na_file if it is a file rather than an na_file object\r\n na_file_obj = na_file\r\n if type(na_file_obj) == type(\"string\"):\r\n na_file_obj = nappy.openNAFile(na_file_obj)\r\n\r\n nappy.nc_interface.na_to_xarray.NADictToXarrayObjects.__init__(self, na_file_obj, variables=variables, \r\n aux_variables=aux_variables,\r\n global_attributes=global_attributes,\r\n time_units=time_units, time_warning=time_warning, \r\n rename_variables=rename_variables)\r\n\r\n\r\n def fix_ints(self, dct, key):\r\n \"\"\"\r\n Convert integer values in a dict to numpy.int32s - so they just show up as integers in ncdump\r\n (rather than 2LL (e.g. long longs)).\r\n\r\n Fixes the those dictionary items: either integers or a sequence of integers, then converts and\r\n returns them.\r\n \"\"\"\r\n def to_np_int(_value):\r\n return np.int32(_value)\r\n\r\n value = dct.get(key)\r\n\r\n if isinstance(value, Sequence) and all([isinstance(v, int) for v in value]):\r\n dct[key] = [to_np_int(v) for v in value]\r\n elif isinstance(value, int):\r\n dct[key] = to_np_int(value)\r\n\r\n def fix_attrs(self, obj):\r\n \"\"\"\r\n Check each attr of the object, and fix integer/integer-sequence values.\r\n \"\"\"\r\n for key in obj.attrs:\r\n self.fix_ints(obj.attrs, key)\r\n\r\n def writeNCFile(self, file_name, mode=\"w\"):\r\n \"\"\"\r\n Writes the NASA Ames content that has been converted into Xarray objects to a\r\n NetCDF file of name 'file_name'. Note that mode can be set to append so you \r\n can add the data to an existing file.\r\n \"\"\"\r\n if not self.converted:\r\n self.convert()\r\n\r\n # Build an Xarray Dataset and then write it to NetCDF\r\n combined_var_list = self.xr_variables + self.xr_aux_variables\r\n\r\n # Fix integers in attributes\r\n [self.fix_attrs(v) for v in combined_var_list]\r\n\r\n # Create the Datset\r\n variables = {da.name: da for da in combined_var_list}\r\n ds = xr.Dataset(variables, attrs=dict(self.global_attributes))\r\n self.fix_attrs(ds)\r\n\r\n # Write to NetCDF\r\n ds.to_netcdf(file_name)\r\n\r\n log.info(f\"NetCDF file '{file_name}' written successfully.\")\r\n return True\r\n"
] |
[
[
"numpy.int32"
]
] |
adriansmares/openml-deeplearning
|
[
"44856ae5f79b553acdb3001e52ca981922f3093f"
] |
[
"tests/test_flows/test_flow_functions.py"
] |
[
"from collections import OrderedDict\nimport copy\nimport unittest\n\nfrom distutils.version import LooseVersion\nimport sklearn\nimport pandas as pd\n\nimport openml\nfrom openml.testing import TestBase\nimport openml.extensions.sklearn\n\n\nclass TestFlowFunctions(TestBase):\n _multiprocess_can_split_ = True\n\n def _check_flow(self, flow):\n self.assertEqual(type(flow), dict)\n self.assertEqual(len(flow), 6)\n self.assertIsInstance(flow['id'], int)\n self.assertIsInstance(flow['name'], str)\n self.assertIsInstance(flow['full_name'], str)\n self.assertIsInstance(flow['version'], str)\n # There are some runs on openml.org that can have an empty external version\n ext_version_str_or_none = (isinstance(flow['external_version'], str)\n or flow['external_version'] is None)\n self.assertTrue(ext_version_str_or_none)\n\n def test_list_flows(self):\n openml.config.server = self.production_server\n # We can only perform a smoke test here because we test on dynamic\n # data from the internet...\n flows = openml.flows.list_flows()\n # 3000 as the number of flows on openml.org\n self.assertGreaterEqual(len(flows), 1500)\n for fid in flows:\n self._check_flow(flows[fid])\n\n def test_list_flows_output_format(self):\n openml.config.server = self.production_server\n # We can only perform a smoke test here because we test on dynamic\n # data from the internet...\n flows = openml.flows.list_flows(output_format='dataframe')\n self.assertIsInstance(flows, pd.DataFrame)\n self.assertGreaterEqual(len(flows), 1500)\n\n def test_list_flows_empty(self):\n openml.config.server = self.production_server\n flows = openml.flows.list_flows(tag='NoOneEverUsesThisTag123')\n if len(flows) > 0:\n raise ValueError(\n 'UnitTest Outdated, got somehow results (please adapt)'\n )\n\n self.assertIsInstance(flows, dict)\n\n def test_list_flows_by_tag(self):\n openml.config.server = self.production_server\n flows = openml.flows.list_flows(tag='weka')\n self.assertGreaterEqual(len(flows), 5)\n for did in flows:\n self._check_flow(flows[did])\n\n def test_list_flows_paginate(self):\n openml.config.server = self.production_server\n size = 10\n maximum = 100\n for i in range(0, maximum, size):\n flows = openml.flows.list_flows(offset=i, size=size)\n self.assertGreaterEqual(size, len(flows))\n for did in flows:\n self._check_flow(flows[did])\n\n def test_are_flows_equal(self):\n flow = openml.flows.OpenMLFlow(name='Test',\n description='Test flow',\n model=None,\n components=OrderedDict(),\n parameters=OrderedDict(),\n parameters_meta_info=OrderedDict(),\n external_version='1',\n tags=['abc', 'def'],\n language='English',\n dependencies='abc',\n class_name='Test',\n custom_name='Test')\n\n # Test most important values that can be set by a user\n openml.flows.functions.assert_flows_equal(flow, flow)\n for attribute, new_value in [('name', 'Tes'),\n ('description', 'Test flo'),\n ('external_version', '2'),\n ('language', 'english'),\n ('dependencies', 'ab'),\n ('class_name', 'Tes'),\n ('custom_name', 'Tes')]:\n new_flow = copy.deepcopy(flow)\n setattr(new_flow, attribute, new_value)\n self.assertNotEqual(\n getattr(flow, attribute),\n getattr(new_flow, attribute),\n )\n self.assertRaises(\n ValueError,\n openml.flows.functions.assert_flows_equal,\n flow,\n new_flow,\n )\n\n # Test that the API ignores several keys when comparing flows\n openml.flows.functions.assert_flows_equal(flow, flow)\n for attribute, new_value in [('flow_id', 1),\n ('uploader', 1),\n ('version', 1),\n ('upload_date', '18.12.1988'),\n ('binary_url', 'openml.org'),\n ('binary_format', 'gzip'),\n ('binary_md5', '12345'),\n ('model', []),\n ('tags', ['abc', 'de'])]:\n new_flow = copy.deepcopy(flow)\n setattr(new_flow, attribute, new_value)\n self.assertNotEqual(\n getattr(flow, attribute),\n getattr(new_flow, attribute),\n )\n openml.flows.functions.assert_flows_equal(flow, new_flow)\n\n # Now test for parameters\n flow.parameters['abc'] = 1.0\n flow.parameters['def'] = 2.0\n openml.flows.functions.assert_flows_equal(flow, flow)\n new_flow = copy.deepcopy(flow)\n new_flow.parameters['abc'] = 3.0\n self.assertRaises(ValueError, openml.flows.functions.assert_flows_equal,\n flow, new_flow)\n\n # Now test for components (subflows)\n parent_flow = copy.deepcopy(flow)\n subflow = copy.deepcopy(flow)\n parent_flow.components['subflow'] = subflow\n openml.flows.functions.assert_flows_equal(parent_flow, parent_flow)\n self.assertRaises(ValueError,\n openml.flows.functions.assert_flows_equal,\n parent_flow, subflow)\n new_flow = copy.deepcopy(parent_flow)\n new_flow.components['subflow'].name = 'Subflow name'\n self.assertRaises(ValueError,\n openml.flows.functions.assert_flows_equal,\n parent_flow, new_flow)\n\n def test_are_flows_equal_ignore_parameter_values(self):\n paramaters = OrderedDict((('a', 5), ('b', 6)))\n parameters_meta_info = OrderedDict((('a', None), ('b', None)))\n\n flow = openml.flows.OpenMLFlow(\n name='Test',\n description='Test flow',\n model=None,\n components=OrderedDict(),\n parameters=paramaters,\n parameters_meta_info=parameters_meta_info,\n external_version='1',\n tags=['abc', 'def'],\n language='English',\n dependencies='abc',\n class_name='Test',\n custom_name='Test',\n )\n\n openml.flows.functions.assert_flows_equal(flow, flow)\n openml.flows.functions.assert_flows_equal(flow, flow,\n ignore_parameter_values=True)\n\n new_flow = copy.deepcopy(flow)\n new_flow.parameters['a'] = 7\n self.assertRaisesRegex(\n ValueError,\n r\"values for attribute 'parameters' differ: \"\n r\"'OrderedDict\\(\\[\\('a', 5\\), \\('b', 6\\)\\]\\)'\\nvs\\n\"\n r\"'OrderedDict\\(\\[\\('a', 7\\), \\('b', 6\\)\\]\\)'\",\n openml.flows.functions.assert_flows_equal,\n flow, new_flow,\n )\n openml.flows.functions.assert_flows_equal(flow, new_flow,\n ignore_parameter_values=True)\n\n del new_flow.parameters['a']\n self.assertRaisesRegex(\n ValueError,\n r\"values for attribute 'parameters' differ: \"\n r\"'OrderedDict\\(\\[\\('a', 5\\), \\('b', 6\\)\\]\\)'\\nvs\\n\"\n r\"'OrderedDict\\(\\[\\('b', 6\\)\\]\\)'\",\n openml.flows.functions.assert_flows_equal,\n flow, new_flow,\n )\n self.assertRaisesRegex(\n ValueError,\n r\"Flow Test: parameter set of flow differs from the parameters \"\n r\"stored on the server.\",\n openml.flows.functions.assert_flows_equal,\n flow, new_flow, ignore_parameter_values=True,\n )\n\n def test_are_flows_equal_ignore_if_older(self):\n paramaters = OrderedDict((('a', 5), ('b', 6)))\n parameters_meta_info = OrderedDict((('a', None), ('b', None)))\n flow_upload_date = '2017-01-31T12-01-01'\n assert_flows_equal = openml.flows.functions.assert_flows_equal\n\n flow = openml.flows.OpenMLFlow(name='Test',\n description='Test flow',\n model=None,\n components=OrderedDict(),\n parameters=paramaters,\n parameters_meta_info=parameters_meta_info,\n external_version='1',\n tags=['abc', 'def'],\n language='English',\n dependencies='abc',\n class_name='Test',\n custom_name='Test',\n upload_date=flow_upload_date)\n\n assert_flows_equal(flow, flow, ignore_parameter_values_on_older_children=flow_upload_date)\n assert_flows_equal(flow, flow, ignore_parameter_values_on_older_children=None)\n new_flow = copy.deepcopy(flow)\n new_flow.parameters['a'] = 7\n self.assertRaises(ValueError, assert_flows_equal, flow, new_flow,\n ignore_parameter_values_on_older_children=flow_upload_date)\n self.assertRaises(ValueError, assert_flows_equal, flow, new_flow,\n ignore_parameter_values_on_older_children=None)\n\n new_flow.upload_date = '2016-01-31T12-01-01'\n self.assertRaises(ValueError, assert_flows_equal, flow, new_flow,\n ignore_parameter_values_on_older_children=flow_upload_date)\n assert_flows_equal(flow, flow, ignore_parameter_values_on_older_children=None)\n\n @unittest.skipIf(LooseVersion(sklearn.__version__) < \"0.20\",\n reason=\"OrdinalEncoder introduced in 0.20. \"\n \"No known models with list of lists parameters in older versions.\")\n def test_sklearn_to_flow_list_of_lists(self):\n from sklearn.preprocessing import OrdinalEncoder\n ordinal_encoder = OrdinalEncoder(categories=[[0, 1], [0, 1]])\n\n extension = openml.extensions.sklearn.SklearnExtension()\n\n # Test serialization works\n flow = extension.model_to_flow(ordinal_encoder)\n\n # Test flow is accepted by server\n self._add_sentinel_to_flow_name(flow)\n flow.publish()\n\n # Test deserialization works\n server_flow = openml.flows.get_flow(flow.flow_id, reinstantiate=True)\n self.assertEqual(server_flow.parameters['categories'], '[[0, 1], [0, 1]]')\n self.assertEqual(server_flow.model.categories, flow.model.categories)\n"
] |
[
[
"sklearn.preprocessing.OrdinalEncoder"
]
] |
DDMGNI/viIMHD2D
|
[
"d7c3546a6245fa0b12538a785146af3895f16e18"
] |
[
"examples/current_sheet_continuous_steep.py"
] |
[
"\nimport numpy as np\n\nu0 = 0.1\nB0 = 1.0\np0 = 0.1\n\nx1 = 0.5\nx2 = 1.5\nc = 20.\n\n\ndef magnetic_x(x, y, hx, hy):\n return 0.0\n\ndef magnetic_y(x, y, hx, hy):\n return np.tanh(c*(x-x1)) - np.tanh(c*(x-x2)) - 1.\n \ndef velocity_x(x, y, hx, hy):\n return u0 * np.sin(np.pi * y)\n\ndef velocity_y(x, y, hx, hy):\n return 0.0\n\ndef pressure(x, y, hx, hy):\n return p0\n"
] |
[
[
"numpy.tanh",
"numpy.sin"
]
] |
hanjinda/word2vec-explorer
|
[
"46c3f45e647dab82110bc8d21824b8d3063574c6"
] |
[
"explorer_old.py"
] |
[
"import math\nimport gensim\nimport cPickle\nimport numpy as np\nfrom tsne import bh_sne\nfrom sklearn.cluster import KMeans\nfrom gensim.models import KeyedVectors\n\nclass Exploration(dict):\n\n def __init__(self, query, labels=[], vectors=[]):\n self.query = query\n self.parsed_query = {}\n self.labels = labels\n self.vectors = vectors\n self.reduction = []\n self.clusters = []\n self.distances = []\n self.stats = {}\n\n def reduce(self):\n print('Performing tSNE reduction' +\n 'on {} vectors'.format(len(self.vectors)))\n self.reduction = bh_sne(np.array(self.vectors, dtype=np.float64))\n\n def cluster(self, num_clusters=30):\n clustering = KMeans(n_clusters=num_clusters)\n clustering.fit(self.reduction)\n self.clusters = clustering.labels_\n clustermatrix = []\n reduction = self.reduction.tolist()\n for cluster_id in range(num_clusters):\n clustermatrix.append([reduction[i]\n for i in range(len(self.vectors))\n if self.clusters[i] == cluster_id])\n self.cluster_centroids = clustering.cluster_centers_.tolist()\n self.cluster_centroids_closest_nodes = []\n for cluster_id in range(num_clusters):\n nodes_for_cluster = clustermatrix[cluster_id]\n centroid = self.cluster_centroids[cluster_id]\n closest_node_to_centroid = self._closest_node(\n centroid, nodes_for_cluster)\n coords = nodes_for_cluster[closest_node_to_centroid]\n node_id = reduction.index(coords)\n self.cluster_centroids_closest_nodes.append(node_id)\n\n def serialize(self):\n result = {\n 'query': self.query,\n 'parsed_query': self.parsed_query,\n 'labels': self.labels,\n 'stats': self.stats\n }\n if len(self.reduction) > 0:\n result['reduction'] = self.reduction.tolist()\n if len(self.distances) > 0:\n result['distances'] = self.distances\n if len(self.clusters) > 0:\n result['clusters'] = self.clusters.tolist()\n result['cluster_centroids'] = self.cluster_centroids\n closest_nodes = self.cluster_centroids_closest_nodes\n result['cluster_centroids_closest_nodes'] = closest_nodes\n return result\n\n def _closest_node(self, node, nodes):\n nodes = np.asarray(nodes)\n dist_2 = np.sum((nodes - node)**2, axis=1)\n return np.argmin(dist_2)\n\n\nclass Model(object):\n\n def __init__(self, filename):\n try:\n self.model = gensim.models.Word2Vec.load(filename)\n except cPickle.UnpicklingError:\n self.model = KeyedVectors.load_word2vec_format(filename, binary=False)\n # load = gensim.models.KeyedVectors.load_word2vec_format\n # self.model = load(filename, binary=False)\n\n def autocomplete(self, query, limit):\n words = []\n i = 0\n for word in self.model.vocab:\n if word.startswith(query):\n words.append({\n 'word': word,\n 'count': self.model.vocab[word].count})\n i += 1\n\n words = sorted(words, key=lambda x: x['count'], reverse=True)\n return words[0:limit]\n\n def compare(self, queries, limit):\n all_words = []\n comparison_words = []\n for query in queries:\n positive, negative = self._parse_query(query)\n comparison_words.append(positive[0])\n words, vectors, distances = self._most_similar_vectors(positive, negative, limit)\n all_words += words\n\n matrix = []\n labels = []\n for word in all_words:\n coordinates = []\n for word2 in comparison_words:\n distance = self.model.n_similarity([word2], [word])\n coordinates.append(distance)\n matrix.append(coordinates)\n labels.append(word)\n\n return {'labels': labels, 'comparison': matrix}\n\n def explore(self, query, limit=1000):\n print('Model#explore query={}, limit={}'.format(query, limit))\n exploration = Exploration(query)\n if len(query):\n positive, negative = self._parse_query(query)\n exploration.parsed_query['positive'] = positive\n exploration.parsed_query['negative'] = negative\n labels, vectors, distances = self._most_similar_vectors(positive, negative, limit)\n exploration.labels = labels\n exploration.vectors = vectors\n exploration.distances = distances\n else:\n exploration.labels, exploration.vectors, sample_rate = self._all_vectors(limit)\n exploration.stats['sample_rate'] = sample_rate\n exploration.stats['vocab_size'] = len(self.model.vocab)\n exploration.stats['num_vectors'] = len(exploration.vectors)\n return exploration\n\n def _most_similar_vectors(self, positive, negative, limit):\n print('Model#_most_similar_vectors' +\n 'positive={}, negative={}, limit={}'.format(positive, negative, limit))\n results = self.model.most_similar(positive=positive, negative=negative, topn=limit)\n labels = []\n vectors = []\n distances = []\n for key, distance in results:\n distances.append(distance)\n labels.append(key)\n vectors.append(self.model[key])\n return labels, vectors, distances\n\n def _parse_query(self, query):\n expressions = query.split(' AND ')\n positive = []\n negative = []\n for expression in expressions:\n if expression.startswith('NOT '):\n negative.append(expression[4:])\n else:\n positive.append(expression)\n return positive, negative\n\n def _all_vectors(self, limit):\n sample = 1\n if limit > -1:\n sample = int(math.ceil(len(self.model.vocab) / limit))\n sample_rate = float(limit) / len(self.model.vocab)\n print('Model#_most_similar_vectors' +\n 'sample={}, sample_rate={}, limit={}'.format(sample, sample_rate, limit))\n labels = []\n vectors = []\n i = 0\n for word in self.model.vocab:\n if (i % sample) == 0:\n vectors.append(self.model[word])\n labels.append(word)\n i += 1\n return labels, vectors, sample_rate\n"
] |
[
[
"sklearn.cluster.KMeans",
"numpy.asarray",
"numpy.argmin",
"numpy.array",
"numpy.sum"
]
] |
JesseTG/Sock
|
[
"97b2f76dae324708a26bb46ce466680e6e4c769e"
] |
[
"tests/test_pinning.py"
] |
[
"import pytest\nimport torch\nfrom torch import Tensor\n\nfrom .marks import *\n\nSIZE = torch.Size([100, 100, 100])\n\n\n@pytest.fixture(scope=\"module\")\ndef data_cpu():\n return torch.rand(SIZE, dtype=torch.float64, device=\"cpu\")\n\n\n@pytest.fixture(scope=\"module\")\ndef data_cuda():\n return torch.rand(SIZE, dtype=torch.float64, device=\"cuda\")\n\n\n@pytest.mark.benchmark(group=\"test_bench_pin\")\ndef test_bench_pin(benchmark, data_cpu: Tensor):\n result = benchmark(Tensor.pin_memory, data_cpu) # type: Tensor\n\n assert result is not None\n assert result.is_pinned()\n\n\n@needs_cuda\n@pytest.mark.benchmark(group=\"test_bench_copy_cpu_to_cuda\")\n@pytest.mark.parametrize(\"pin\", [False, True], ids=(\"unpinned\", \"pinned\"))\n@pytest.mark.parametrize(\"non_blocking\", [False, True], ids=(\"blocking\", \"non_blocking\"))\ndef test_bench_copy_cpu_to_cuda_new_tensor(benchmark, data_cpu: Tensor, pin: bool, non_blocking: bool):\n data = data_cpu.pin_memory() if pin else data_cpu\n\n result = benchmark(data.cuda, non_blocking=non_blocking)\n\n assert result is not None\n assert result.is_cuda\n\n\n@needs_cuda\n@pytest.mark.benchmark(group=\"test_bench_copy_cpu_to_cuda\")\n@pytest.mark.parametrize(\"pin\", [False, True], ids=(\"unpinned\", \"pinned\"))\n@pytest.mark.parametrize(\"non_blocking\", [False, True], ids=(\"blocking\", \"non_blocking\"))\ndef test_bench_copy_cpu_to_cuda_existing_tensor(benchmark, data_cpu: Tensor, pin: bool, non_blocking: bool):\n data = data_cpu.pin_memory() if pin else data_cpu\n destination = torch.empty_like(data_cpu, device=\"cuda\")\n destination_addr = destination.data_ptr()\n result = benchmark(destination.copy_, data, non_blocking=non_blocking)\n\n assert result is not None\n assert result.data_ptr() == destination_addr\n assert result.is_cuda\n\n\n@needs_cuda\n@pytest.mark.benchmark(group=\"test_bench_copy_cuda_to_cpu\")\n@pytest.mark.parametrize(\"non_blocking\", [False, True], ids=(\"blocking\", \"non_blocking\"))\ndef test_bench_copy_cuda_to_cpu_new_tensor(benchmark, data_cuda: Tensor, non_blocking: bool):\n result = benchmark(data_cuda.to, device=\"cpu\", non_blocking=non_blocking)\n\n assert result is not None\n assert not result.is_cuda\n\n\n@needs_cuda\n@pytest.mark.benchmark(group=\"test_bench_copy_cuda_to_cpu\")\n@pytest.mark.parametrize(\"pin\", [False, True], ids=(\"unpinned\", \"pinned\"))\n@pytest.mark.parametrize(\"non_blocking\", [False, True], ids=(\"blocking\", \"non_blocking\"))\ndef test_bench_copy_cuda_to_cpu_existing_tensor(benchmark, data_cuda: Tensor, pin: bool, non_blocking: bool):\n destination = torch.empty_like(data_cuda, device=\"cpu\")\n destination = destination.pin_memory() if pin else destination\n destination_addr = destination.data_ptr()\n\n result = benchmark(destination.copy_, data_cuda, non_blocking=non_blocking)\n\n assert result is not None\n assert result.data_ptr() == destination_addr\n assert not result.is_cuda\n assert result.is_pinned() == pin\n"
] |
[
[
"torch.Size",
"torch.empty_like",
"torch.rand"
]
] |
piinghel/JupyterWorkflow_TemplateJVD
|
[
"2d493edbfea9d79cd5d70b4d2241b44512657b6e"
] |
[
"jupyterworkflow/data.py"
] |
[
"import os\nfrom urllib.request import urlretrieve\nimport pandas as pd\n\nFREMONT_URL = 'https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD'\n\ndef get_fremont_data(filename='Fremont.csv', url=FREMONT_URL, \n force_download=False):\n \n \"\"\"Download and cache the fremont data\n \n Parameters\n ----------\n filename: string (optional)\n location to save the data\n url: string (optional)\n web location of the data\n force download: bool (optional)\n if True, force redownload of data\n \n Returns\n -------\n data: pandas.DataFrame\n The fremont bridge data\n \"\"\"\n \n if force_download or not os.path.exists(filename):\n urlretrieve(url, filename)\n \n data = pd.read_csv('Fremont.csv', index_col='Date')\n try:\n data.index = pd.to_datetime(data.index, format='%m/%d/%Y %I:%M:%S %p')\n except TypeError:\n data.index = pd.to_datetime(data.index)\n data.columns = ['West','East']\n data['Total'] = data['West'] + data['East']\n return data"
] |
[
[
"pandas.read_csv",
"pandas.to_datetime"
]
] |
NOBLES5E/bagua
|
[
"c6f029f14c7c08383282f8efe22e7c0aee35fab1"
] |
[
"bagua/contrib/load_balancing_data_loader/__init__.py"
] |
[
"import torch\nimport math\nimport torch.distributed as dist\nfrom torch.utils.data.sampler import Sampler\nfrom torch.utils.data.dataset import Dataset\nfrom typing import TypeVar, Optional, Iterator\nfrom collections import OrderedDict\nimport logging\n\n\nclass LoadBalancingDistributedSampler(Sampler):\n r\"\"\"Sampler that restricts data loading to a subset of the dataset.\n\n This sampler use a complexity_fn to calculate each sample's computational\n complexity and make each batch get similar computation complexity.\n\n This is useful in scenarios like speech and NLP, where each batch has variable\n length and distributed training suffers from straggler problem.\n\n The usage is similar to :class:`torch.utils.data.DistributedSampler`, where each\n process loads a subset of the original dataset that is exclusive to it.\n\n .. note::\n Dataset is assumed to be of constant size.\n\n Args:\n dataset: Dataset used for sampling.\n complexity_fn: a function whose input is a sample and output is the computational complexity of the sample\n num_replicas (int, optional): Number of processes participating in\n distributed training. By default, :attr:`world_size` is retrieved from the\n current distributed group.\n rank (int, optional): Rank of the current process within :attr:`num_replicas`.\n By default, :attr:`rank` is retrieved from the current distributed\n group.\n shuffle (bool, optional): If ``True`` (default), sampler will shuffle the\n indices.\n seed (int, optional): random seed used to shuffle the sampler if\n :attr:`shuffle=True`. This number should be identical across all\n processes in the distributed group. Default: ``0``.\n drop_last (bool, optional): if ``True``, then the sampler will drop the\n tail of the data to make it evenly divisible across the number of\n replicas. If ``False``, the sampler will add extra indices to make\n the data evenly divisible across the replicas. Default: ``False``.\n\n .. warning::\n In distributed mode, calling the :meth:`set_epoch` method at\n the beginning of each epoch **before** creating the :class:`DataLoader` iterator\n is necessary to make shuffling work properly across multiple epochs. Otherwise,\n the same ordering will be always used.\n\n Example::\n\n >>> sampler = sampler = LoadBalancingDistributedSampler(\n >>> dataset,\n >>> complexity_fn=complexity_fn) if is_distributed else None\n >>> loader = DataLoader(dataset, shuffle=(sampler is None),\n ... sampler=sampler)\n >>> for epoch in range(start_epoch, n_epochs):\n ... if is_distributed:\n ... sampler.set_epoch(epoch)\n ... train(loader)\n \"\"\"\n\n def __init__(\n self,\n dataset: Dataset,\n complexity_fn,\n num_replicas: Optional[int] = None,\n rank: Optional[int] = None,\n shuffle: bool = True,\n seed: int = 0,\n drop_last: bool = False,\n random_level: int = 0,\n ) -> None:\n if num_replicas is None:\n if not dist.is_available():\n raise RuntimeError(\"Requires distributed package to be available\")\n num_replicas = dist.get_world_size()\n if rank is None:\n if not dist.is_available():\n raise RuntimeError(\"Requires distributed package to be available\")\n rank = dist.get_rank()\n if rank >= num_replicas or rank < 0:\n raise ValueError(\n \"Invalid rank {}, rank should be in the interval\"\n \" [0, {}]\".format(rank, num_replicas - 1)\n )\n self.dataset = dataset\n self.num_replicas = num_replicas\n self.rank = rank\n self.epoch = 0\n self.drop_last = drop_last\n self.random_level = random_level\n\n if self.random_level > 0:\n logging.info(\"set random_level to {}\".format(self.random_level))\n\n # If the dataset length is evenly divisible by # of replicas, then there\n # is no need to drop any data, since the dataset will be split equally.\n dataset_len = len(self.dataset) # type: ignore\n if self.drop_last and dataset_len % self.num_replicas != 0: # type: ignore\n # Split to nearest available length that is evenly divisible.\n # This is to ensure each rank receives the same amount of data when\n # using this Sampler.\n self.num_samples = math.ceil(\n # `type:ignore` is required because Dataset cannot provide a default __len__\n # see NOTE in pytorch/torch/utils/data/sampler.py\n (dataset_len - self.num_replicas)\n / self.num_replicas\n )\n else:\n self.num_samples = math.ceil(dataset_len / self.num_replicas) # type: ignore\n self.total_size = self.num_samples * self.num_replicas\n self.shuffle = shuffle\n self.seed = seed\n\n self.item_complexity_map = dict()\n for item_index in range(dataset_len):\n self.item_complexity_map[item_index] = complexity_fn(\n self.dataset[item_index]\n )\n\n self.ordered_item_complexity_map = OrderedDict(\n sorted(self.item_complexity_map.items(), key=lambda t: t[1])\n )\n\n def shuffle_chunks(self):\n def chunks_wrap_padding(lst, n):\n \"\"\"Yield successive n-sized chunks from lst.\"\"\"\n num_chunks = max(1, self.num_samples)\n num_elements = num_chunks * n\n current_lst = []\n for i in range(num_elements):\n current_lst.append(lst[i % len(lst)])\n if len(current_lst) == n:\n yield current_lst\n current_lst = []\n\n if self.shuffle:\n # deterministically shuffle based on epoch and seed\n g = torch.Generator()\n g.manual_seed(self.seed + self.epoch)\n\n if self.random_level > 0:\n item_complexity_map = self.item_complexity_map.copy()\n complexity_random_ints = torch.randint(\n self.random_level, (len(item_complexity_map),), generator=g\n ).tolist()\n\n for k, v in zip(item_complexity_map, complexity_random_ints):\n item_complexity_map[k] += v\n\n ordered_item_complexity_map = OrderedDict(\n sorted(item_complexity_map.items(), key=lambda t: t[1])\n )\n else:\n ordered_item_complexity_map = self.ordered_item_complexity_map\n\n index_chunks = list(\n chunks_wrap_padding(\n list(ordered_item_complexity_map.keys()), self.num_replicas\n )\n )\n\n chunk_indices = torch.randperm(len(index_chunks), generator=g).tolist() # type: ignore\n else:\n index_chunks = list(\n chunks_wrap_padding(\n list(self.ordered_item_complexity_map.keys()), self.num_replicas\n )\n )\n chunk_indices = list(range(len(index_chunks))) # type: ignore\n\n if not self.drop_last:\n # add extra samples to make it evenly divisible\n padding_size = self.num_samples - len(chunk_indices)\n if padding_size <= len(chunk_indices):\n chunk_indices += chunk_indices[:padding_size]\n else:\n chunk_indices += (\n chunk_indices * math.ceil(padding_size / len(chunk_indices))\n )[:padding_size]\n else:\n # remove tail of data to make it evenly divisible.\n chunk_indices = chunk_indices[: self.num_samples]\n assert len(chunk_indices) == self.num_samples\n return index_chunks, chunk_indices\n\n def __iter__(self) -> Iterator:\n index_chunks, chunk_indices = self.shuffle_chunks()\n # subsample\n indices = [index_chunks[i][self.rank] for i in chunk_indices]\n assert len(indices) == self.num_samples\n\n return iter(indices)\n\n def __len__(self) -> int:\n return self.num_samples\n\n def set_epoch(self, epoch: int) -> None:\n r\"\"\"\n Sets the epoch for this sampler. When :attr:`shuffle=True`, this ensures all replicas\n use a different random ordering for each epoch. Otherwise, the next iteration of this\n sampler will yield the same ordering.\n\n Args:\n epoch (int): Epoch number.\n \"\"\"\n self.epoch = epoch\n\n\nclass LoadBalancingDistributedBatchSampler(Sampler):\n r\"\"\"Wraps another load balance sampler to yield variable sized mini-batches.\n\n Args:\n sampler (LoadBalancingDistributedSampler): Load balance sampler.\n batch_fn (Callable): Callable to yield mini-batch indices.\n drop_last (bool): If ``True``, the sampler will drop the last few batches exceeding\n the least number of batches among replicas, otherwises, the number of batches on each\n replica will be padded to the same.\n\n `batch_fn` has the following signature:\n ```python\n def batch_fn(indices: List[int]) -> List[List[int]]\n ```\n\n Example::\n\n >>> sampler = LoadBalancingDistributedSampler(dataset, complexity_fn=complexity_fn)\n >>> batch_sampler = LoadBalancingDistributedBatchSampler(sampler, batch_fn=batch_fn)\n >>> loader = DataLoader(dataset, batch_sampler=batch_sampler)\n ...\n >>> for epoch in range(start_epoch, n_epochs):\n ... batch_sampler.set_epoch(epoch)\n ... train(loader)\n\n \"\"\"\n\n def __init__(\n self,\n sampler: LoadBalancingDistributedSampler,\n batch_fn,\n drop_last: bool = False,\n ) -> None:\n if not isinstance(sampler, LoadBalancingDistributedSampler):\n raise ValueError(\n \"sampler should be of LoadBalancingDistributedSampler type.\"\n )\n\n if sampler.drop_last:\n raise ValueError(\"drop_last of sampler should be False\")\n\n self.sampler = sampler\n self.batch_fn = batch_fn\n self.drop_last = drop_last\n\n self.num_replicas = self.sampler.num_replicas\n self.rank = self.sampler.rank\n\n self.generate_batch()\n logging.info(\"Loadbalance distributed batch sampler is initialized\")\n\n def generate_batch(self):\n index_chunks, chunk_indices = self.sampler.shuffle_chunks()\n\n batches = []\n for rank in range(self.num_replicas):\n sub_indices = [index_chunks[i][rank] for i in chunk_indices]\n batches.append(self.batch_fn(sub_indices))\n\n logging.debug(f\"generate batched: {batches}\")\n self.total_batch = (\n max([len(b) for b in batches])\n if not self.drop_last\n else min([len(b) for b in batches])\n )\n\n if self.total_batch < len(batches[self.rank]):\n logging.info(\n f\"{len(batches[self.rank]) - self.total_batch} batches dropped for rank {self.rank}\"\n )\n\n self.padded_batches = [\n batch + batch[: self.total_batch - len(batch)] for batch in batches\n ]\n\n def __iter__(self):\n return iter(self.padded_batches[self.rank])\n\n def __len__(self):\n return self.total_batch\n\n def set_epoch(self, epoch: int) -> None:\n r\"\"\"\n Sets the epoch for this sampler. When :attr:`shuffle=True`, this ensures all replicas\n use a different random ordering for each epoch. Otherwise, the next iteration of this\n sampler will yield the same ordering.\n\n Args:\n epoch (int): Epoch number.\n \"\"\"\n self.sampler.set_epoch(epoch)\n self.generate_batch()\n"
] |
[
[
"torch.distributed.get_rank",
"torch.distributed.get_world_size",
"torch.distributed.is_available",
"torch.Generator"
]
] |
mef51/plawt
|
[
"7d5dbcd64d97499eaf7896d2f6e50826d54d2e6c"
] |
[
"examples/subplotstest.py"
] |
[
"#!/usr/bin/python3\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport plawt\nimport matplotlib as mpl\n\nx = np.linspace(0, 2 * np.pi, 400)\ny = np.sin(x ** 2)\n\n##### Vanilla Matplotlib #####\n\n# Three subplots sharing both x/y axes\n# f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)\nf, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)\nf.suptitle('Sharing both axes')\n\n# hack to have commone x and y labels\n# https://stackoverflow.com/questions/6963035/pyplot-axes-labels-for-subplots\nf.add_subplot(111, frameon=False)\nplt.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')\nplt.xlabel('Velocity')\nplt.ylabel('Amplitude')\n\nax1.plot(x, y)\n# ax1.set_title('Sharing both axes')\nax2.scatter(x, y)\nax3.scatter(x, 2 * y ** 2 - 1, color='r')\nax1.set_title('panel a', fontsize=12)\nax2.set_title('panel b', fontsize=12)\nax3.set_title('panel c', fontsize=12)\nax1.minorticks_on()\n\n# Fine-tune figure; make subplots close to each other and hide x ticks for\n# all but bottom plot.\nf.subplots_adjust(hspace=0.3, wspace=0)\nplt.savefig('subplotcompare.png')\nplt.close()\n\n##### Same plot but with plawt #####\nsubtitledict = {'verticalalignment': 'center'}\nplawt.plot({\n\t0: {'x': x, 'y': y},\n\t'title': 'Sharing both axes',\n\t'subtitle': 'panel a',\n\t'subtitledict': {'verticalalignment': 'center'},\n\t'fontsize': 12,\n\t'subloc': 'left',\n\t'minorticks': True,\n\t'xlabel': 'Velocity', 'ylabel': 'Amplitude',\n\t'sharex': True, 'sharey': True,\n\t'hspace': 0.3,\n\t# 'aspect': 16/9,\n\t'filename': 'subplottest.png'\n}, {\n\t0: {'x': x, 'y': y, 'line': 'bo'},\n\t'subtitle': 'panel b',\n\t'subtitledict': {'verticalalignment': 'center'},\n\t'fontsize': 12,\n\t'subloc': 'left',\n}, {\n\t0: {'x': x, 'y': 2*y**2-1, 'line': 'ro', 'label': 'panel c'},\n\t'subtitle': 'panel c',\n\t'subtitledict': {'verticalalignment': 'center'},\n\t'fontsize': 12,\n\t'subloc': 'left',\n\t'legend': {'loc': 2, 'fontsize': 10},\n})\n\n"
] |
[
[
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"numpy.sin",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.ylabel"
]
] |
metocean/tcrm
|
[
"ea782c8a6245419105a0a131ebe8ec61479feb25",
"ea782c8a6245419105a0a131ebe8ec61479feb25"
] |
[
"Utilities/timeseries.py",
"Utilities/track.py"
] |
[
"\"\"\"\n:mod:`timeseries` - Extract timeseries from each timestep of a simulation\n=========================================================================\n\nExtract station timeseries from each timestep of a simulation.\nThis samples the regional wind speed, not the site-specific wind speed.\nTo include site-specific effects, you will first need to include the\nmultiplier values for each site in the station file, then run\ntsmultipliers.py to apply said multipliers to the output.\n\n\"\"\"\n\nimport logging\nfrom os.path import join as pjoin\nfrom configparser import NoOptionError\n\nimport numpy as np\n\nfrom Utilities.config import ConfigParser\nfrom Utilities.files import flLoadFile\nfrom Utilities.maputils import find_index\nfrom Utilities.dynarray import DynamicRecArray\nfrom .shptools import shpGetVertices\n\n#from config import NoOptionError\nlog = logging.getLogger(__name__)\nlog.addHandler(logging.NullHandler())\n\nISO_FORMAT = \"%Y-%m-%d %H:%M\"\n\nOUTPUT_NAMES = ('Station', 'Time', 'Longitude', 'Latitude',\n 'Speed', 'UU', 'VV', 'Bearing',\n 'Pressure')\nOUTPUT_TYPES = ['|U16', '|U16', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8']\nOUTPUT_FMT = ['%s', '%s', '%9.5f', '%9.5f',\n '%6.2f', '%6.2f', '%6.2f', '%6.2f',\n '%7.2f']\n\nMINMAX_NAMES = ('Station', 'Time', 'Longitude', 'Latitude',\n 'Speed', 'UU', 'VV', 'Bearing', 'Pressure')\nMINMAX_TYPES = ['|U16', '|U16', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8']\nMINMAX_FMT = ['%s', '%s', '%9.5f', '%9.5f',\n '%6.2f', '%6.2f', '%6.2f', '%6.2f',\n '%7.2f']\n\nCONFIG_DEFAULTS = \"\"\"\n[Timeseries]\nStationID=None\n\"\"\"\n\nclass Station(object):\n \"\"\"Station:\n\n Description: An object to represent a location for which time series \n data will be extracted\n\n Members:\n `id`: Unique id string for the station\n `lon`: Longitude of the station (geographic coordinates)\n `lat`: Latitude of the station (geographic coordinates)\n `data`: A `DynamicRecArray` to hold the time series data\n\n Methods:\n `insideGrid`: Determine if the station is inside the simulation domain.\n \"\"\"\n\n def __init__(self, stationid, longitude, latitude):\n\n self.id = stationid\n self.lon = longitude\n self.lat = latitude\n self.data = DynamicRecArray(dtype={'names': OUTPUT_NAMES,\n 'formats':OUTPUT_TYPES})\n\n def __getattr__(self, key):\n \"\"\"\n Get the `key` from the `data` object.\n\n :param str key: the key to lookup in the `data` object.\n \"\"\"\n if key.startswith('__') and key.endswith('__'):\n return super(Station, self).__getattr__(key)\n return self.data.data[key]\n\n def insideGrid(self, gridx, gridy):\n \"\"\"\n Determine if a point is within the defined grid\n\n \"\"\"\n if (float(self.lon) >= gridx.min() \\\n and float(self.lon) <= gridx.max() and \\\n float(self.lat) >= gridy.min() and \\\n float(self.lat) <= gridy.max()):\n return True\n else:\n return False\n\nclass Timeseries(object):\n \"\"\"Timeseries:\n\n Description: Extract data at a set of :class:`Station`s \n\n Parameters:\n\n :param str configFile: Path to a TCRM configuration file\n\n Members:\n `meta`: Boolean whether additional metadata is attached to the `Station`s\n `outputPath`: Directory where extracted data will be stored in csv-format files\n `minfile`: Name of the file where minima for all `Station`s will be stored. \n This will be the `outputPath` folder\n `maxfile`: As above, but for maxima (e.g. maximum wind speeds)\n `stations`: A list of `Station` objects, read from a file containing details of the stations\n \n Methods:\n \n Internal methods:\n \n \"\"\"\n\n def __init__(self, configFile):\n \"\"\"\n Read configuration settings, load station data and set up\n output recarrays.\n\n :param str configFile: path to a configuration file.\n \"\"\"\n\n config = ConfigParser()\n config.read(configFile)\n\n self.meta = False\n\n stnFile = config.get('Timeseries', 'LocationFile')\n self.outputPath = pjoin(config.get('Output', 'Path'),\n 'process', 'timeseries')\n\n self.maxfile = pjoin(config.get('Output', 'Path'),\n 'process', 'maxima.csv')\n self.minfile = pjoin(config.get('Output', 'Path'),\n 'process', 'minima.csv')\n\n\n log.info(f\"Loading timeseries stations from {stnFile}\")\n log.debug(f\"Timeseries data will be written into {self.outputPath}\")\n self.stations = []\n if stnFile.endswith(\"shp\"):\n try:\n key_name = config.get('Timeseries', 'StationID')\n except NoOptionError:\n key_name = None\n\n vertices = shpGetVertices(stnFile, key_name=key_name)\n\n for stn in list(vertices.keys()):\n lat = vertices[stn][0][1]\n lon = vertices[stn][0][0]\n lon = np.where(lon < 0., lon + 360., lon)\n self.stations.append(Station(stn, lon, lat))\n\n\n else:\n stndata = flLoadFile(stnFile, delimiter=',')\n # If there are more than 3 columns, save the additional\n # columns as 'metadata'\n if stndata.shape[1] > 3:\n self.metadata = stndata[:, 3:]\n self.meta = True\n stnid = stndata[:, 0]\n stnlon = stndata[:, 1].astype(float)\n stnlat = stndata[:, 2].astype(float)\n for sid, lon, lat in zip(stnid, stnlon, stnlat):\n self.stations.append(Station(sid, lon, lat))\n log.info(f\"There are {len(self.stations)} stations that will collect timeseries data\")\n\n def sample(self, lon, lat, spd, uu, vv, prs, gridx, gridy):\n \"\"\"\n Extract values from 2-dimensional grids at the given lat/lon.\n\n :param float lon: Longitude of the point to extract.\n :param float lat: Latitude of the point to extract.\n :param spd: :class:`numpy.ndarray` of speed values.\n :param uu: :class:`numpy.ndarray` of eastward wind speed values.\n :param vv: :class:`numpy.ndarray` of northward wind speed values.\n :param prs: :class:`numpy.ndarray` of pressure values.\n :param gridx: :class:`numpy.ndarray` of grid longitudes.\n :param gridy: :class:`numpy.ndarray` of grid latitudes.\n\n :return: speed, esatward and northward wind components, and pressure\n values at the given location\n :rtype: tuple\n \"\"\"\n xx = find_index(gridx, float(lon))\n yy = find_index(gridy, float(lat))\n ss = spd[yy, xx]\n ux = uu[yy, xx]\n vy = vv[yy, xx]\n bb = np.mod((180. / np.pi) * np.arctan2(-ux, -vy), 360.)\n pp = prs[yy, xx]\n\n return (ss, ux, vy, bb, pp)\n\n\n def extract(self, dt, spd, uu, vv, prs, gridx, gridy):\n \"\"\"\n Extract data from the grid at the given locations.\n Data is stored in a dictionary, with keys as the station id's.\n\n :param float tstep: time step being evaluated, as a float (output\n from matplotlib.num2date)\n :param spd: :class:`numpy.ndarray` of speed values.\n :param uu: :class:`numpy.ndarray` of eastward wind speed values.\n :param vv: :class:`numpy.ndarray` of northward wind speed values.\n :param prs: :class:`numpy.ndarray` of pressure values.\n :param gridx: :class:`numpy.ndarray` of grid longitudes.\n :param gridy: :class:`numpy.ndarray` of grid latitudes.\n\n \"\"\"\n stns = 0\n for stn in self.stations:\n if stn.insideGrid(gridx, gridy):\n stns += 1\n result = self.sample(stn.lon, stn.lat, spd, uu, vv, prs,\n gridx, gridy)\n ss, ux, vy, bb, pp = result\n stn.data.append((str(stn.id), dt, stn.lon, stn.lat, ss,\n ux, vy, bb, pp))\n\n else:\n stn.data.append((str(stn.id), dt, stn.lon, stn.lat, 0.0, 0.0,\n 0.0, 0.0, prs[0, 0]))\n log.debug(\"Extracted data for {0} stations\".format(stns))\n\n def shutdown(self):\n \"\"\"\n Write the data to file, each station to a separate file.\n \"\"\"\n\n header = 'Station,Time,Longitude,Latitude,Speed,UU,VV,Bearing,Pressure'\n maxheader = ('Station,Time,Longitude,Latitude,Speed,'\n 'UU,VV,Bearing,Pressure')\n\n max_data = DynamicRecArray(dtype={'names': MINMAX_NAMES,\n 'formats':MINMAX_TYPES})\n\n min_data = DynamicRecArray(dtype={'names': MINMAX_NAMES,\n 'formats':MINMAX_TYPES})\n\n for stn in self.stations:\n\n if np.any(stn.data.data['Speed'] > 0.0):\n fname = pjoin(self.outputPath, 'ts.%s.csv' % str(stn.id))\n log.debug(\"Saving time series data to {0}\".format(fname))\n with open(fname, 'wb') as fh:\n np.savetxt(fh, np.array(stn.data.data), fmt=OUTPUT_FMT,\n delimiter=',', header=header, comments='', encoding='ascii')\n\n max_step = np.argmax(stn.data.data['Speed'])\n min_step = np.argmin(stn.data.data['Pressure'])\n max_data.append(tuple(stn.data.data[max_step]))\n min_data.append(tuple(stn.data.data[min_step]))\n\n\n np.savetxt(self.maxfile, max_data.data, fmt=MINMAX_FMT, delimiter=',',\n header=maxheader, comments='')\n np.savetxt(self.minfile, min_data.data, fmt=MINMAX_FMT, delimiter=',',\n header=maxheader, comments='')\n \"\"\"\n for stn in self.stations:\n if type(self.maxdata[stn.id][3]) == datetime.datetime:\n self.maxdata[stn.id][3] = self.maxdata[stn.id][3].strftime(ISO_FORMAT)\n self.mindata[stn.id][3] = self.mindata[stn.id][3].strftime(ISO_FORMAT)\n self.maxdata[stn.id][0] = str(int(stn.id))\n self.mindata[stn.id][0] = str(int(stn.id))\n maxdata.append(self.maxdata[stn.id])\n mindata.append(self.mindata[stn.id])\n\n for stn in self.stnid:\n if type(self.maxdata[stn][3]) == datetime.datetime:\n self.maxdata[stn][3] = self.maxdata[stn][3].strftime(ISO_FORMAT)\n self.mindata[stn][3] = self.mindata[stn][3].strftime(ISO_FORMAT)\n self.maxdata[stn][0] = str(int(stn))\n self.mindata[stn][0] = str(int(stn))\n maxdata.append(self.maxdata[stn])\n mindata.append(self.mindata[stn])\n else:\n pass\n\n np.savetxt(maxfname, np.array(maxdata), fmt='%s',\n header=maxheader, delimiter=',')\n #['%s','%7.3f','%7.3f','%s','%6.2f','%6.2f',\n # '%6.2f','%6.2f','%7.2f'] )\n minfname = pjoin(self.outputPath, 'minpressure.csv')\n\n np.savetxt(minfname, np.array(mindata), fmt='%s',\n header=maxheader, delimiter=',')\n #['%s','%7.3f','%7.3f','%s','%6.2f','%6.2f',\n # '%6.2f','%6.2f','%7.2f'] )\n \"\"\"\n log.info(\"Station data written to file\")\n",
"\"\"\"\n:mod:`track` - track-related attributes and functions\n=====================================================\n\n.. module:: tracks\n :synopsis: This module contains funcitons for reading/writing\n track data from/to csv and netCDF formats.\n\n.. moduleauthor:: Craig Arthur <craig.arthur@ga.gov.au>\n\n\"\"\"\n\nimport os\nimport time\nimport logging\nimport getpass\nimport numpy as np\nfrom datetime import datetime\n\nfrom os.path import join as pjoin\nimport re\nfrom shapely.geometry import Point, LineString\n\nfrom Utilities.metutils import convert\nfrom Utilities.maputils import bearing2theta\n\nfrom netCDF4 import Dataset, date2num, num2date\nfrom cftime import num2pydate\n\ntry:\n from exceptions import WindowsError\nexcept:\n class WindowsError(IOError): pass\n\n#if not getattr(__builtins__, \"WindowsError\", None):\n# class WindowsError(IOError):\n# pass\n\nlog = logging.getLogger(__name__)\nlog.addHandler(logging.NullHandler())\n\ntrackFields = ('Indicator', 'CycloneNumber', 'Year', 'Month',\n 'Day', 'Hour', 'Minute', 'TimeElapsed', 'Datetime', 'Longitude',\n 'Latitude', 'Speed', 'Bearing', 'CentralPressure',\n 'WindSpeed', 'rMax', 'EnvPressure')\n\ntrackTypes = ('i', 'i', 'i', 'i',\n 'i', 'i', 'i', 'f', datetime,\n 'f', 'f', 'f', 'f', 'f',\n 'f', 'f', 'f')\n\ntrackFormats = ('%i, %i, %i, %i,'\n '%i, %i, %i, %5.1f,' '%s',\n '%8.3f, %8.3f, %6.2f, %6.2f, %7.2f,'\n '%6.2f, %6.2f, %7.2f')\n\nPATTERN = re.compile(r'\\d+')\n\nclass Track(object):\n \"\"\"\n A single tropical cyclone track.\n\n The object exposes the track data through the object attributes.\n For example, If `data` contains the tropical cyclone track data\n (`numpy.array`) loaded with the :meth:`readTrackData` function,\n then the central pressure column can be printed out with the\n code::\n\n t = Track(data)\n print(t.CentralPressure)\n\n :type data: numpy.ndarray\n :param data: the tropical cyclone track data.\n \"\"\"\n\n def __init__(self, data):\n \"\"\"\n :type data: numpy.ndarray\n :param data: the tropical cyclone track data.\n \"\"\"\n self.data = data\n self.trackId = None\n self.trackfile = None\n if (len(data) > 0) and ('CentralPressure' in data.dtype.names):\n self.trackMinPressure = np.min(data['CentralPressure'])\n else:\n self.trackMinPressure = None\n if (len(data) > 0) and ('WindSpeed' in data.dtype.names):\n self.trackMaxWind = np.max(data['WindSpeed'])\n else:\n self.trackMaxWind = None\n\n def __getattr__(self, key):\n \"\"\"\n Get the `key` from the `data` object.\n\n :type key: str\n :param key: the key to lookup in the `data` object.\n \"\"\"\n if (key.startswith('__') and key.endswith('__')) or (key == 'data'):\n return super(Track, self).__getattr__(key)\n\n return self.data[key]\n\n def __repr__(self):\n return \"<Track of dtype [{}]>\".format(\", \".join(self.data.dtype.names))\n\n def inRegion(self, gridLimit):\n \"\"\"\n Check if the tropical cyclone track starts within a region.\n\n :type gridLimit: :class:`dict`\n :param gridLimit: the region to check.\n The :class:`dict` should contain the keys\n :attr:`xMin`, :attr:`xMax`, :attr:`yMin` and\n :attr:`yMax`. The *x* variable bounds the\n latitude and the *y* variable bounds the\n longitude.\n\n \"\"\"\n xMin = gridLimit['xMin']\n xMax = gridLimit['xMax']\n yMin = gridLimit['yMin']\n yMax = gridLimit['yMax']\n\n return ((xMin <= self.Longitude[0]) and\n (self.Longitude[0] <= xMax) and\n (yMin <= self.Latitude[0]) and\n (self.Latitude[0] <= yMax))\n\n def minimumDistance(self, points):\n \"\"\"\n Calculate the minimum distance between a track and a\n collection of :class:`shapely.geometry.Point` points. Assumes\n the points and the :attr:`Longitude` and :attr:`Latitude`\n attributes share the same coordinate system (presumed to be\n geographic coordinates).\n\n :param points: sequence of :class:`shapely.geometry.Point` objects.\n\n :returns: :class:`numpy.ndarray` of minimum distances between\n the set of points and the line features (in km).\n \"\"\"\n coords = [(x, y) for x, y in zip(self.Longitude, self.Latitude)]\n\n if len(coords) == 1:\n point_feature = Point(self.Longitude, self.Latitude)\n distances = [point_feature.distance(point) for point in points]\n else:\n line_feature = LineString(coords)\n distances = [line_feature.distance(point) for point in points]\n\n return convert(distances, 'deg', 'km')\n\n\n# Define format for TCRM output track files:\nISO_FORMAT = \"%Y-%m-%d %H:%M:%S\"\nTCRM_COLS = ('CycloneNumber', 'Datetime', 'TimeElapsed', 'Longitude',\n 'Latitude', 'Speed', 'Bearing', 'CentralPressure',\n 'EnvPressure', 'rMax')\n\nTCRM_UNIT = ('', '', 'hr', 'degree', 'degree', 'kph', 'degrees',\n 'hPa', 'hPa', 'km')\n\nTCRM_FMTS = ('i', 'object', 'f', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8')\n\nTCRM_CNVT = {\n 0: lambda s: int(float(s.strip() or 0)),\n 1: lambda s: datetime.strptime(s.strip(), ISO_FORMAT),\n 5: lambda s: convert(float(s.strip() or 0), TCRM_UNIT[5], 'mps'),\n 6: lambda s: bearing2theta(float(s.strip() or 0) * np.pi / 180.),\n 7: lambda s: convert(float(s.strip() or 0), TCRM_UNIT[7], 'Pa'),\n 8: lambda s: convert(float(s.strip() or 0), TCRM_UNIT[8], 'Pa'),\n}\n\nTRACK_DT_ERR = \"Track data does not have required \\\nattributes to convert to datetime object\"\n\nTRACK_EMPTY_GROUP = \"\"\"No track groups in this netcdf file: {0}\"\"\"\n\ndef ncReadTrackData(trackfile):\n \"\"\"\n Read a netcdf-format track file into a collection of\n :class:`Track` objects. The returned :class:`Track` objects *must*\n have all attributes accessed by the `__getattr__` method.\n\n :param str trackfile: track data filename (netCDF4 format).\n\n :return: track data\n :rtype: list of :class:`Track` objects\n\n \"\"\"\n\n track_dtype = np.dtype({'names':TCRM_COLS,\n 'formats':TCRM_FMTS})\n try:\n ncobj = Dataset(trackfile, mode='r')\n except (IOError, RuntimeError):\n log.exception(\"Cannot open {0}\".format(trackfile))\n raise IOError(\"Cannot open {0}\".format(trackfile))\n\n g = ncobj.groups\n if not bool(g):\n # We have a track file that stores data in separate variables\n log.debug(f\"Reading data from a single track file\")\n dt = ncobj.variables['Datetime']\n units = ncobj.getncattr('time_units')\n calendar = ncobj.getncattr('calendar')\n dtt = num2date(dt[:], units, calendar)\n # Convert to true python datetimes\n dtconversion = [datetime.strptime(d.strftime(), \"%Y-%m-%d %H:%M:%S\") for d in dtt]\n newtd = np.zeros(len(dtt), dtype=track_dtype)\n for f in ncobj.variables.keys():\n if f != 'Datetime' and f in track_dtype.names:\n newtd[f] = ncobj.variables[f][:]\n newtd['Datetime'] = dtconversion\n track = Track(newtd)\n track.trackfile = trackfile\n track.trackId = eval(ncobj.trackId)\n\n return [track]\n\n tracks = []\n if 'tracks' in g:\n tgroup = g['tracks'].groups\n ntracks = len(tgroup)\n for i, (t, data) in enumerate(tgroup.items()):\n log.debug(\"Loading data for {0}\".format(t))\n track_data = data.variables['track'][:]\n\n try: \n dt = num2date(track_data['Datetime'],\n data.variables['time'].units,\n data.variables['time'].calendar)\n except AttributeError:\n log.exception(TRACK_DT_ERR)\n raise AttributeError\n\n newtd = np.zeros(len(track_data), dtype=track_dtype)\n for f in track_data.dtype.names:\n if f != 'Datetime' and f in track_dtype.names:\n newtd[f] = track_data[f]\n dtconversion = [datetime.strptime(d.strftime(), \"%Y-%m-%d %H:%M:%S\") for d in dt]\n newtd['Datetime'] = dtconversion\n\n track = Track(newtd)\n track.trackfile = trackfile\n if hasattr(data.variables['track'], \"trackId\"):\n track.trackId = eval(data.variables['track'].trackId)\n else:\n track.trackId = (i+1, ntracks)\n tracks.append(track)\n\n else:\n log.warn(TRACK_EMPTY_GROUP.format(trackfile))\n\n ncobj.close()\n return tracks\n\ndef ncSaveTracks(trackfile, tracks,\n timeunits='hours since 1900-01-01 00:00',\n calendar='standard', attributes={}):\n \"\"\"\n Save a collection of :class:`Track` objects to a netCDF file. This\n makes use of netCDF4 compound data types to store the data as a\n structure akin to a :class:`numpy.recarray`. Each track in the\n collection is stored as a separate :class:`netCDF4.Group` instance\n in the output file.\n\n The :class:`Track` objects hold datetime information as an array\n of :class:`datetime.datetime` objects - there is no equivalent\n data type in netCDF4, so the datetime information is converted to\n floats using the :class:`netCDF4.date2num` function.\n\n :param str trackfile: Path to the file to save data to.\n :param list tracks: Collection of :class:`Track` objects.\n :param str timeunits: A string of the form '*time units* since\n *reference time*' describing the time units.\n Default is 'hours since 1900-01-01 00:00'.\n :param str calendar: Calendar used for time calculations. Valid calendars\n are 'standard', 'gregorian', 'proleptic_gregorian',\n 'noleap', '365_day', '360_day', 'julian', 'all_leap',\n '366_day'. Default is 'standard', which is a mixed\n Julian/Gregorian calendar.\n :param dict attributes: Global attributes to add to the file.\n\n \"\"\"\n\n if len(tracks) == 0:\n log.info(\"No tracks to be stored in track file: {0}\".format(trackfile))\n return\n\n try:\n ncobj = Dataset(trackfile, \"w\", format=\"NETCDF4\", clobber=True)\n except IOError:\n log.exception(\"Cannot open {0} for writing\".format(trackfile))\n raise IOError(\"Cannot open {0} for writing\".format(trackfile))\n\n tgroup = ncobj.createGroup('tracks')\n\n # Fidget with the dtype to convert :class:`datetime` objects to floats:\n track_dtype = np.dtype(tracks[0].data.dtype)\n dtidx = track_dtype.names.index('Datetime')\n track_dtype = track_dtype.descr\n track_dtype[dtidx] = ('Datetime', 'f8')\n track_dtype = np.dtype(track_dtype)\n\n for n, t in enumerate(tracks):\n if len(t.data) == 0: # Empty track\n continue\n tname = \"tracks-{:04d}\".format(n)\n tdata = tgroup.createGroup(tname)\n tdtype = tdata.createCompoundType(track_dtype, 'track_dtype')\n\n dims = tdata.createDimension('time', None)\n times = tdata.createVariable('time', 'f8', ('time',),\n zlib=True, complevel=8, shuffle=True)\n tvar = tdata.createVariable('track', tdtype, ('time',),\n zlib=True, complevel=8, shuffle=True)\n t.data['Datetime'] = date2num(t.data['Datetime'], timeunits, calendar)\n times[:] = t.data['Datetime']\n times.units = 'hours since 1900-01-01 00:00'\n times.calendar = calendar\n tvar[:] = t.data.astype(track_dtype)\n tvar.long_name = \"Tropical cyclone track data\"\n tvar.time_units = 'hours since 1900-01-01 00:00'\n tvar.calendar = calendar\n tvar.lon_units = 'degrees east'\n tvar.lat_units = 'degrees north'\n tvar.pressure_units = 'hPa'\n tvar.speed_units = 'km/h'\n tvar.length_units = 'km'\n tvar.trackId = repr(t.trackId)\n\n attributes['created_on'] = time.strftime(ISO_FORMAT, time.localtime())\n attributes['created_by'] = getpass.getuser()\n ncobj.setncatts(attributes)\n ncobj.close()\n\n return\n\ndef readTrackData(trackfile):\n \"\"\"\n Read a track .csv file into a numpy.ndarray.\n\n The track format and converters are specified with the global variables\n\n TRACKFILE_COLS -- The column names\n TRACKFILE_FMTS -- The entry formats\n TRACKFILE_CNVT -- The column converters\n\n :param str trackfile: the track data filename.\n\n :return: track data\n :rtype: :class:`numpy.ndarray`\n\n \"\"\"\n\n try:\n return np.loadtxt(trackfile,\n comments='%',\n delimiter=',',\n dtype={\n 'names': TCRM_COLS,\n 'formats': TCRM_FMTS},\n converters=TCRM_CNVT)\n except ValueError:\n # return an empty array with the appropriate `dtype` field names\n return np.empty(0, dtype={\n 'names': TCRM_COLS,\n 'formats': TCRM_FMTS})\n\ndef readMultipleTrackData(trackfile):\n \"\"\"\n Reads all the track datas from a .csv file into a list of numpy.ndarrays.\n The tracks are seperated based in their cyclone id. This function calls\n `readTrackData` to read the data from the file.\n\n :param str trackfile: the track data filename.\n\n :return: a collection of :class:`Track` objects\n\n \"\"\"\n\n datas = []\n data = readTrackData(trackfile)\n if len(data) > 0:\n cycloneId = data['CycloneNumber']\n for i in range(1, np.max(cycloneId) + 1):\n datas.append(data[cycloneId == i])\n else:\n datas.append(data)\n return datas\n\ndef loadTracks(trackfile):\n \"\"\"\n Read tracks from a track .nc file and return a list of :class:`Track`\n objects.\n\n This calls the function `ncReadTrackData` to parse the track .nc file.\n\n :param str trackfile: the track data filename.\n\n :return: list of :class:`Track` objects.\n\n \"\"\"\n if not isinstance(trackfile, str):\n raise TypeError(\"Track file name is not a string: {0}\".\\\n format(trackfile))\n if os.path.exists(trackfile):\n tracks = ncReadTrackData(trackfile)\n return tracks\n else:\n raise IOError(\"Track file doesn't exist: {0}\".format(trackfile))\n\ndef loadTracksFromFiles(trackfiles):\n \"\"\"\n Generator that yields :class:`Track` objects from a list of track\n filenames.\n\n When run in parallel, the list `trackfiles` is distributed across the MPI\n processors using the `balanced` function. Track files are loaded in a lazy\n fashion to reduce memory consumption. The generator returns individual\n tracks (recall: a trackfile can contain multiple tracks) and only moves on\n to the next file once all the tracks from the current file have been\n returned.\n\n :type trackfiles: list of strings\n :param trackfiles: list of track filenames. The filenames must include the\n path to the file.\n\n :raises: TypeError if input argument is not a list.\n \"\"\"\n if not isinstance(trackfiles, list):\n raise TypeError(\"Input argument is not a list\")\n\n for f in trackfiles:\n msg = \"Loading tracks in {0}\".format(f)\n log.debug(msg)\n tracks = loadTracks(f)\n for track in tracks:\n yield track\n \ndef loadTracksFromPath(path):\n \"\"\"\n Helper function to obtain a generator that yields :class:`Track` objects\n from a directory containing track .csv files.\n\n This function calls `loadTracksFromFiles` to obtain the generator and track\n filenames are processed in alphabetical order.\n\n :type path: str\n :param path: the directory path.\n\n :raises: IOError if the path does not exist.\n \"\"\"\n try: \n files = os.listdir(path)\n trackfiles = [pjoin(path, f) for f in files if f.startswith('tracks')]\n msg = \"Loading {0} track files in {1}\".format(len(trackfiles), path)\n log.info(msg)\n return loadTracksFromFiles(sorted(trackfiles))\n except (IOError, OSError, WindowsError):\n raise IOError(\"Path {0} does not exist\".format(path))\n"
] |
[
[
"numpy.arctan2",
"numpy.argmax",
"numpy.argmin",
"numpy.any",
"numpy.savetxt",
"numpy.array",
"numpy.where"
],
[
"numpy.min",
"numpy.empty",
"numpy.dtype",
"numpy.max",
"numpy.loadtxt"
]
] |
boasvdp/sailor
|
[
"978e11b8d1624631203fce0ff65d13f6ebd5ba05"
] |
[
"workflow/scripts/define_core.py"
] |
[
"#!/usr/bin/env python3\n\nimport argparse\n\ndef filter_no_genomes(df, threshold):\n total_genomes = df.shape[1] - 20\n threshold_genomes = total_genomes * threshold\n df_filt = df[df['number_genomes'] > threshold_genomes]\n return df_filt\n\ndef filter_dosage(df, dosage):\n df_filt = df[df['max_dose'] <= dosage]\n return df_filt\n\ndef main(args):\n import pandas as pd\n df = pd.read_csv(args.input, sep = '\\t')\n df_filt_genomes = filter_no_genomes(df, args.threshold)\n if args.dosage is not None:\n df_final = filter_dosage(df_filt_genomes, args.dosage)\n else:\n df_final = df_filt_genomes\n df_final.to_csv(args.output, sep = '\\t', index=False)\n\nif __name__ == \"__main__\":\n # Parse arguments if executed as main script\n parser = argparse.ArgumentParser(description='Select core genes from PIRATE file PIRATE.gene_families.tsv')\n\n parser.add_argument('--input', dest=\"input\", help=\"Input file\", type=str, metavar='INPUT FILE')\n parser.add_argument('--threshold', dest=\"threshold\", help=\"Threshold to define core (ratio of total strains, default: 0.95)\", type=float, default=0.95, metavar='THRESHOLD')\n parser.add_argument('--max-dosage', dest=\"dosage\", help=\"Max dosage to include\", type=float, metavar='DOSAGE THRESHOLD')\n parser.add_argument('--output', dest=\"output\", help=\"Output file\", type=str, metavar='OUTPUT FILE')\n\n args = parser.parse_args()\n\n main(args)\n"
] |
[
[
"pandas.read_csv"
]
] |
supercarryleoliao/IA-Flood-Warning-System-58
|
[
"af980dddc72d45d50ee70d9d10f6077948396d92"
] |
[
"floodsystem/plot.py"
] |
[
"import matplotlib\nfrom .analysis import polyfit\nimport matplotlib.pyplot as plt\ndef plot_water_level_with_fit(station, dates, levels, p):\n poly,d0=polyfit(dates, levels, p)\n typical_range=station.typical_range\n x = matplotlib.dates.date2num(dates)\n high_value=typical_range[1]*len(x-d0)\n low_value=typical_range[0]*len(x-d0)\n plt.plot(x-d0,poly(x-d0),label=\"$polyfit line$\")\n plt.plot(x-d0,high_value,linestyle=\"dotted\",color='r',label=\"$typical_high$\")\n plt.plot(x-d0,low_value,linestyle=\"dotted\",color='g',label=\"$typical_low$\")\n plt.xlabel('dates')\n plt.ylabel('levels')\n plt.legend()\n plt.show()"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.plot",
"matplotlib.dates.date2num",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
glwagner/Exasim
|
[
"ee4540443435f958fa2ca78d59cbf9cff0fe69de"
] |
[
"Version0.2/Python/Gencode/gencodedef.py"
] |
[
"import pdeapp\nimport numpy as np\n\n#def gencode(app):\n\nparam = np.array([1.0]);\nxdg = np.array([0.0, 0.0]);\nudg = np.array([2.0, 2.0, 2.0]);\nodg = np.array([0.0]);\nwdg = np.array([0.0]);\nuinf = np.array([0.0]);\ntime = np.array([0.0]);\nprint(pdeapp.flux(xdg, udg, odg, wdg, uinf, param, time))\n #Flux = getattr(pdeapp, app['Flux'])\n #print(Flux(xdg, udg, odg, wdg, uinf, param, time))\n\n# return 0;\n"
] |
[
[
"numpy.array"
]
] |
statpng/KaggleTranscript
|
[
"b110482a2adcf0390fac0d54c890c95894f98dea"
] |
[
"(1)Titanic2.py"
] |
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nplt.style.use(\"seaborn\")\nsns.set(font_scale=2.5)\n\nimport missingno as msno\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\ndf_train = pd.read_csv(\"./titanic/train.csv\")\ndf_test = pd.read_csv(\"./titanic/test.csv\")\n\ndf_train.head(2)\n\ndf_train.describe()\ndf_test.describe()\n\nfor col in df_train.columns:\n msg = \"column: {:>10}\\t Percent of NaN value: {:.2f}%\".format(col, 100 * df_train[col].isnull().sum() / df_train[col].shape[0])\n print(msg)\n\nfor col in df_test.columns:\n msg = \"column: {:>10}\\t Percent of NaN values: {:.2f}%\".format(col, df_train[col].isnull().sum()/df_train[col].shape[0])\n print(msg)\n\nmsno.matrix(df_train.iloc[:, :], figsize=(8, 8), color=(0.9, 0.5, 0.2))\nmsno.bar(df=df_train.iloc[:, :], figsize=(8, 8), color=(0.8, 0.5, 0.2))\nmsno.bar(df=df_test.iloc[:,:], figsize=(8,8), color=(.8, .5, .2))\n\nf, ax = plt.subplots(1, 2, figsize=(18,8))\ndf_train[\"Survived\"].value_counts().plot.pie(explode=[0.0, 0.1], autopct = \"%1.1f%%\", ax=ax[0], shadow=True)\nax[0].set_ylabel(\"\")\nsns.countplot(\"Survived\", data=df_train, ax=ax[1])\nax[1].set_title(\"Count plot - Survided\")\n\nplt.show()\n\n\ndf_train[[\"Pclass\", \"Survived\"]].groupby([\"Pclass\"], as_index=True).count()\ndf_train[[\"Pclass\", \"Survived\"]].groupby([\"Pclass\"], as_index=True).sum()\n\npd.crosstab(df_train[\"Pclass\"], df_train[\"Survived\"], margins=True)\ndf_train[[\"Pclass\", \"Survived\"]].groupby([\"Pclass\"], as_index=True).mean().sort_values(by=\"Survived\", ascending=False).plot.bar()\n\ny_position = 1.02\nf, ax = plt.subplots(1, 2, figsize = (18, 8))\ndf_train[\"Pclass\"].value_counts().plot.bar(color=[\"#CD7F32\", \"#FFDF00\", \"#D3D3D3\"], ax=ax[0])\nax[0].set_title(\"Number of Passengers by Pclass\", y=y_position)\nax[0].set_ylabel(\"Count\")\nsns.countplot(\"Pclass\", hue=\"Survived\", data=df_train, ax=ax[1])\nax[1].set_title(\"Pclass: Survived vs Dead\", y=y_position)\nplt.show()\n\n\n# Gender\n\nf, ax = plt.subplots(1, 2, figsize=(18, 8))\ndf_train[[\"Sex\", \"Survived\"]].groupby([\"Sex\"], as_index=True).mean().plot.bar(ax=ax[0])\nax[0].set_title(\"Survived vs Sex\")\nsns.countplot(\"Sex\", hue=\"Survived\", data=df_train, ax=ax[1])\nax[1].set_title(\"Sex: Survived vs Dead\")\nplt.show()\n\n\ndf_train[[\"Sex\", \"Survived\"]].groupby([\"Sex\"], as_index=True).mean().sort_values(by=\"Survived\", ascending=False)\n\npd.crosstab(df_train[\"Sex\"], df_train[\"Survived\"], margins=True)\n\nsns.catplot(\"Pclass\", \"Survived\", hue=\"Sex\", kind=\"point\", data=df_train, size=4, aspect=1.5)\n\nsns.catplot(\"Sex\", \"Survived\", kind=\"point\", col=\"Pclass\", hue=\"Pclass\", data=df_train, satureation=.5, size=5, aspect=1)\n\nprint( \"제일 나이 많은 탑승객 : {:.1f} Years\".format(df_train[\"Age\"].max()) )\nprint( \"제일 어린 탑승객 : {:.1f} Years\".format(df_train[\"Age\"].min()) )\nprint(\" 탑승객 평균 나이 : {:.1f} Years\".format(df_train[\"Age\"].mean() ))\n\nfig, ax = plt.subplots(1, 1, figsize=(9, 5))\nsns.kdeplot(df_train[df_train[\"Survived\"]==1][\"Age\"], ax=ax )\nsns.kdeplot(df_train[df_train[\"Survived\"]==0][\"Age\"], ax=ax)\nplt.legend([\"Survived == 1\", \"Survived == 0\"])\nplt.show()\n\n\nplt.figure(figsize=(8, 6))\ndf_train[\"Age\"][df_train[\"Pclass\"]==1].plot(kind=\"kde\")\ndf_train[\"Age\"][df_train[\"Pclass\"]==2].plot(kind=\"kde\")\ndf_train[\"Age\"][df_train[\"Pclass\"]==3].plot(kind=\"kde\")\n\nplt.xlabel(\"Age\")\nplt.title(\"Age Distribution within classes\")\nplt.legend([\"1st Class\", \"2nd Class\", \"3rd Class\"])\n\ncummulate_survival_ratio = []\nfor i in range(1, 80):\n cummulate_survival_ratio.append(\n df_train[ df_train[\"Age\"] < i ][\"Survived\"].mean()\n )\n\npd.DataFrame([0, pd.NA, 1]).mean()\n\n\ncummulate_survival_ratio = []\nfor i in range(1, 80):\n cummulate_survival_ratio.append(\n df_train[\"Survived\"][df_train[\"Age\"]<i].mean()\n )\n\nplt.figure(figsize=(7,7))\nplt.plot(cummulate_survival_ratio)\nplt.tile(\"Survival rate change depending on range of Age\", y=1.02)\nplt.ylabel(\"Survival rate\")\nplt.xlabel(\"Range of Age(0~x)\")\nplt.show()\n\n\nf,ax=plt.subplots(1, 2, figsize=(18,8))\nsns.violinplot(\"Pclass\", \"Age\", hue=\"Survived\", data=df_train, scale=\"count\", split=True, ax=ax[0])\nax[0].set_title(\"Pclass and Age vs Survived\")\nax[0].set_yticks(range(0, 110, 10))\nsns.violinplot(\"Sex\", \"Age\", hue=\"Survived\", data=df_train, scale=\"count\", split=True, ax=ax[1])\nax[1].set_title(\"Sex and Age vs Survived\")\nax[1].set_yticks(range(0, 110, 10))\nplt.show()\n\n\nf,ax=plt.subplots(1, 1, figsize=(8,8))\ndf_train[[\"Embarked\", \"Survived\"]].groupby([\"Embarked\"], as_index=True).mean().sort_values(by=\"Survived\", ascending=True).plot.bar(ax=ax)\n\n\nf,ax=plt.subplots(2,2,figsize=(20,15))\nsns.countplot(\"Embarked\", data=df_train, ax=ax[0,0])\nax[0,0].set_title(\"(1) No. of Passengers Boarded\")\nsns.countplot(\"Embarked\", hue=\"Sex\", data=df_train, ax=ax[0,1])\nax[0,1].set_title(\"(2) Male-Female Split for Embarked\")\nsns.countplot(\"Embarked\", hue=\"Survived\", data=df_train, ax=ax[1,0])\nax[1,0].set_title(\"(3) Embarked vs Survived\")\nsns.countplot(\"Embarked\", hue=\"Pclass\", data=df_train, ax=ax[1,1])\nax[1,1].set_title(\"(4) Embarked vs Pclass\")\nplt.subplots_adjust(wspace=0.2, hspace=0.5)\nplt.show()\n\n\ndf_train[\"FamilySize\"] = df_train[\"SibSp\"] + df_train[\"Parch\"] + 1\ndf_test[\"FamilySize\"] = df_test[\"SibSp\"] + df_test[\"Parch\"] + 1\n\nprint(\"Maximum size of Family : {:.1f}\".format(df_train[\"FamilySize\"].max()))\nprint(\"Minimum size of Family : {:.1f}\".format(df_train[\"FamilySize\"].min()))\n\n\nf,ax=plt.subplots(1, 3, figsize=(40,10))\nsns.countplot(\"FamilySize\", data=df_train, ax=ax[0])\nax[0].set_title(\"(1) No. of Passengers Boarded\", y=1.02)\n\nsns.countplot(\"FamilySize\", hue=\"Survived\", data=df_train, ax=ax[1])\nax[1].set_title(\"(2) Survived countplot depending on FamilySize\", y=1.02)\n\ndf_train[[\"FamilySize\", \"Survived\"]].groupby([\"FamilySize\"], as_index=True).mean().sort_values(by=\"Survived\", ascending=False).plot.bar(ax=ax[2])\nax[2].set_title(\"(3) Survived rate depending on FamilySize\", y=1.02)\n\nplt.subplots_adjust(wspace=.2, hspace=.5)\nplt.show()\n\n\nfig, ax=plt.subplots(1, 1, figsize=(8,8))\ng = sns.distplot(df_train[\"Fare\"], color=\"b\", label=\"Skewness : {:.2f}\".format(df_train[\"Fare\"].skew()), ax=ax)\ng = g.legend(loc=\"best\")\n\n\ndf_test.loc[df_test.Fare.isnull(), \"Fare\"] = df_test[\"Fare\"].mean()\n\ndf_train[\"Fare\"] = df_train[\"Fare\"].map(lambda i: np.log(i) if i > 0 else 0)\ndf_test[\"Fare\"] = df_test[\"Fare\"].map(lambda i: np.log(i) if i > 0 else 0)\n\nfig, ax = plt.subplots(1,1, figsize=(8,8))\ng = sns.distplot(df_train[\"Fare\"], color = \"b\", label=\"Skewness : {:.2f}\".format(df_train[\"Fare\"].skew()), ax=ax)\ng = g.legend(loc=\"best\")\n\n\ndf_train.head()\n\ndf_train[\"Ticket\"].value_counts()\n"
] |
[
[
"pandas.crosstab",
"matplotlib.pyplot.legend",
"pandas.read_csv",
"numpy.log",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"matplotlib.pyplot.tile",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure"
]
] |
strongio/strong-glm
|
[
"db05cb8a297858e46961e5d91105a515531dfdbb"
] |
[
"strong_glm/preprocessing/features.py"
] |
[
"from warnings import warn\n\nimport numpy as np\n\nfrom typing import Union, Sequence, Tuple\n\nfrom sklearn.base import TransformerMixin, BaseEstimator, clone\nfrom sklearn.preprocessing import FunctionTransformer, StandardScaler, MinMaxScaler, RobustScaler\nfrom sklearn.utils.metaestimators import _BaseComposition\n\ntry:\n from pandas import DataFrame\nexcept ImportError:\n def _no_pandas(*args, **kwargs):\n raise RuntimeError(\"Must install `pandas`\")\n\n\n DataFrame = _no_pandas\n\n\ndef _infer_feature_names(\n input_feature_names: Sequence[str],\n output_num_cols: int,\n trans_name: str,\n transformer: TransformerMixin\n) -> Sequence[str]:\n feature_names = False\n try:\n # polynomial features will add ^1, ^2, etc.\n # one_hot_encoder will add labels\n # TODO: this breaks the mapping between names in get/set_params and elsewhere feature-names. I think that's ok\n feature_names = transformer.get_feature_names(input_feature_names)\n except (TypeError, AttributeError, NotImplementedError):\n if output_num_cols == len(input_feature_names):\n if isinstance(transformer, (StandardScaler, MinMaxScaler, RobustScaler)):\n feature_names = [f\"{trans_name}({fname})\" for i, fname in enumerate(input_feature_names)]\n else:\n # can't assume 1-1 mapping. gotta wait for sklearn to support get_feature_names on everything\n # https://github.com/scikit-learn/scikit-learn/pull/12627\n pass\n\n elif len(input_feature_names) == 1:\n if output_num_cols == 1:\n feature_names = [f\"{trans_name}({input_feature_names[0]})\"]\n else:\n feature_names = [f\"{trans_name}({input_feature_names[0]})[{i}]\" for i in range(output_num_cols)]\n\n if feature_names is False:\n # TODO: maybe somehow support passing aliases?\n warn(f\"Unable to infer feature-names for {trans_name}, forced to concatenate.\")\n return _infer_feature_names(\n output_num_cols=output_num_cols,\n trans_name=trans_name,\n transformer=transformer,\n input_feature_names=input_feature_names.__repr__()\n )\n\n return feature_names\n\n\nclass FormulaArithmetic:\n def __add__(self, other):\n return FeatureList.main_effect(self, other)\n\n def __mul__(self, other):\n return FeatureList.main_effect(\n FeatureList.main_effect(self, other),\n FeatureList.interaction(self, other)\n )\n\n def __sub__(self, other):\n raise RuntimeError(\"TODO(@jwdink)\")\n\n def __mod__(self, other):\n return FeatureList.interaction(self, other)\n\n\nclass Feature(FormulaArithmetic, TransformerMixin, _BaseComposition):\n def __init__(self,\n feature: str,\n transforms: Sequence[Tuple[str, TransformerMixin]] = ()):\n # TODO: make it OK to pass list of features, e.g. for PCA. maybe regexp as well?\n self.feature = feature\n self.transforms = transforms\n self.transforms_ = None\n super().__init__()\n\n @property\n def name(self):\n name = self.feature\n for trans_nm, trans in self.transforms:\n name = f\"{trans_nm}({name})\"\n return name\n\n def get_params(self, deep=True):\n return self._get_params('transforms', deep=deep)\n\n def set_params(self, **kwargs):\n self._set_params('transforms', **kwargs)\n return self\n\n def fit(self, X, y=None):\n self.transforms_ = []\n orig_index = X.index\n X = X.loc[:, [self.feature]]\n for trans_name, transformer in self.transforms:\n input_feature_names = list(X.columns)\n self.transforms_.append(clone(transformer))\n X = self.transforms_[-1].transform(X.values, y=y)\n X = self._standardize_transform(X, trans_name, transformer, input_feature_names, orig_index)\n return self\n\n def transform(self, X, y=None) -> 'DataFrame':\n orig_index = X.index\n X = X.loc[:, [self.feature]]\n for trans_name, transformer in self.transforms:\n input_feature_names = list(X.columns)\n X = transformer.transform(X.values)\n # TODO: coerce to numpy array, not df. just keep track of feature-names in a list instead of in X.columns\n X = self._standardize_transform(X, trans_name, transformer, input_feature_names, orig_index)\n return X\n\n @staticmethod\n def _standardize_transform(X: np.ndarray,\n trans_name: str,\n transformer: TransformerMixin,\n input_feature_names: Sequence[str],\n orig_index: 'Index'):\n\n if len(X.shape) == 1:\n X = X[:, None]\n return DataFrame(\n data=X,\n columns=_infer_feature_names(\n input_feature_names=input_feature_names,\n output_num_cols=X.shape[1],\n trans_name=trans_name,\n transformer=transformer\n ),\n index=orig_index\n )\n\n\n# class FeatureInteraction(FormulaArithmetic, TransformerMixin, _BaseComposition):\n# # TODO: ditch this, just use tuples, move multiply logic to FeatureList\n# @classmethod\n# def flatten(cls, *args):\n# features = []\n# for arg in args:\n# if isinstance(arg, FeatureInteraction):\n# features.extend(arg.sub_features)\n# elif isinstance(arg, Feature):\n# features.append((arg.name, arg))\n# else:\n# raise ValueError(f\"Unexpected argument type for {arg}\")\n# return cls(features)\n#\n# def __init__(self, sub_features: Sequence[Tuple[str, Feature]]):\n# self.sub_features = sub_features\n# for nm, f in self.sub_features:\n# if nm != f.name:\n# raise RuntimeError(f\"sub-feature name attribute doesn't match name in argument: {nm}, {f.name}\")\n# super().__init__()\n#\n# @property\n# def name(self):\n# sub_feature_names, _ = zip(*self.sub_features)\n# return \"%\".join(sub_feature_names)\n#\n# def get_params(self, deep=True):\n# if len(self.sub_features) == 1:\n# return self.sub_features[0].get_params(deep=deep)\n#\n# return self._get_params('sub_features', deep=deep)\n#\n# def set_params(self, **kwargs):\n# if len(self.sub_features) == 1:\n# self.sub_features[0].set_params(**kwargs)\n# else:\n# self._set_params('sub_features', **kwargs)\n# return self\n#\n# def fit(self, X, y=None):\n# assert isinstance(X, pd.DataFrame)\n# for name, sub_feature in self.sub_features:\n# sub_feature.fit(X, y=y)\n# return self\n#\n# def transform(self, X, y=None) -> 'DataFrame':\n# assert isinstance(X, pd.DataFrame)\n# dfs = []\n# for name, sub_feature in self.sub_features:\n# dfs.append(sub_feature.transform(X))\n# if len(dfs) == 1:\n# return dfs[0]\n# if len(dfs) > 2:\n# raise RuntimeError(\"TODO(@jwdink)\")\n# else:\n# out = pd.DataFrame(index=X.index)\n# for col1 in dfs[0].columns:\n# for col2 in dfs[1].columns:\n# out[f\"{col1}%{col2}\"] = dfs[0][col1] * dfs[1][col2]\n#\n# return out\n\n\nclass FeatureList(FormulaArithmetic, BaseEstimator, TransformerMixin):\n @classmethod\n def main_effect(cls, lhs: Union[Feature, 'FeatureList'], rhs: Union[Feature, 'FeatureList']) -> 'FeatureList':\n lhs, rhs = cls._std_args(lhs, rhs)\n raise NotImplementedError(\"TODO\")\n\n final_features = list(lhs.features)\n final_feature_names = set(nm for nm, _ in final_features)\n for feature_name, feature in rhs.features:\n if feature_name not in final_feature_names:\n final_features.append((feature_name, feature))\n\n return cls(final_features)\n\n @classmethod\n def interaction(cls, lhs: Union[Feature, 'FeatureList'], rhs: Union[Feature, 'FeatureList']):\n lhs, rhs = cls._std_args(lhs, rhs)\n raise NotImplementedError(\"TODO\")\n\n interaction_features = []\n fsets = []\n for fname1, ffeat1 in lhs.features:\n for fname2, ffeat2 in rhs.features:\n fset = {fname1, fname2}\n if (fname1 == fname2) or (fset in fsets):\n continue\n fsets.append(fset)\n interaction = FeatureInteraction([(fname1, ffeat1), (fname2, ffeat2)])\n interaction_features.append((interaction.name, interaction))\n return cls(interaction_features)\n\n def __init__(self, groups: Sequence[str], features: Sequence[FeatureInteraction]):\n self.features = features\n self.features_ = None\n super().__init__()\n\n def fit(self, X, y=None):\n raise NotImplementedError(\"TODO\")\n\n def transform(self, X, y=None) -> 'DataFrame':\n # TODO: handle FeatureInteractions here\n # dfs = [feature.transform(X).reset_index(drop=True) for nm, feature in self.features]\n raise NotImplementedError(\"TODO\")\n\n def get_params(self, deep=True):\n # see _BaseComposition._get_params()\n raise NotImplementedError(\"TODO\")\n\n def set_params(self, **kwargs):\n # _BaseComposition._set_params()\n raise NotImplementedError(\"TODO\")\n\n def __repr__(self):\n # TODO: I don't like this\n names, _ = zip(*self.features)\n return \"FeatureList(\\n~{}\\n)\".format(\" + \".join(names))\n\n @staticmethod\n def _std_args(lhs: Union[Feature, 'FeatureList'], rhs: Union[Feature, 'FeatureList']):\n # TODO: tuples instead of FeatureInteractions\n raise NotImplementedError(\"TODO\")\n if isinstance(lhs, Feature):\n lhs = FeatureInteraction([(lhs.name, lhs)])\n if isinstance(lhs, FeatureInteraction):\n lhs = FeatureList([(lhs.name, lhs)])\n if isinstance(rhs, Feature):\n rhs = FeatureInteraction([(rhs.name, rhs)])\n if isinstance(rhs, FeatureInteraction):\n rhs = FeatureList([(rhs.name, rhs)])\n return lhs, rhs\n\n\ndef feature(nm: str, *args) -> Feature:\n transforms = []\n for trans in args:\n if not isinstance(trans, BaseEstimator) and callable(trans):\n name = trans.__name__.lower()\n trans = FunctionTransformer(trans, validate=False)\n else:\n name = trans.__class__.__name__.lower()\n transforms.append((name, trans))\n return Feature(nm, transforms)\n\n\ndef features(nms: Sequence[str], *args) -> FeatureList:\n out = None\n for nm in nms:\n feat = feature(nm, *[clone(arg, safe=False) for arg in args])\n if out is None:\n raise NotImplementedError(\"TODO\")\n # out = FeatureList([(feat.name, FeatureInteraction(sub_features=[(feat.name, feat)]))])\n else:\n raise NotImplementedError(\"TODO\")\n # out = FeatureList.main_effect(out, feat)\n return out\n\n\n"
] |
[
[
"sklearn.base.clone",
"sklearn.preprocessing.FunctionTransformer"
]
] |
StanSStanman/frites
|
[
"53f4745979dc2e7b27145cd63eab6a82fe893ec7"
] |
[
"frites/dataset/tests/test_ds_ephy_io.py"
] |
[
"\"\"\"Test the different supported I/O types for DatasetEphy.\"\"\"\nimport numpy as np\n\nfrom mne import EpochsArray, create_info\nfrom xarray import DataArray\nimport pandas as pd\n\nfrom frites.dataset.ds_ephy_io import (ds_ephy_io, mne_to_arr, xr_to_arr)\n\n\nn_epochs = 5\nn_roi = 3\nn_times = 10\nn_suj = 2\n\nx = [np.random.rand(n_epochs, n_roi, n_times) for k in range(n_suj)]\nsf = 128\ntimes = np.arange(n_times) / sf - 1\nroi = [np.array([f\"roi_{i}\" for i in range(n_roi)]) for _ in range(n_suj)]\ny = [np.random.rand(n_epochs) for k in range(n_suj)]\nz = [np.random.randint(0, 2, (n_epochs,)) for k in range(n_suj)]\n\n\nclass TestDsEphyIO(object):\n\n @staticmethod\n def _to_mne():\n x_mne = []\n for k in range(n_suj):\n info = create_info(roi[k].tolist(), sf)\n x_mne += [EpochsArray(x[k], info, tmin=times[0], verbose=False)]\n return x_mne\n\n @staticmethod\n def _to_xr():\n x_xr = []\n for k in range(n_suj):\n ind = pd.MultiIndex.from_arrays([y[k], z[k]], names=('y', 'z'))\n x_xr += [DataArray(x[k], dims=('epochs', 'roi', 'times'),\n coords=(ind, roi[k], times))]\n return x_xr\n\n def test_mne_to_arr(self):\n \"\"\"Test function mne_to_arr.\"\"\"\n # extract array either with / without roi\n x_mne = self._to_mne()\n x_m_1, times_m_1, roi_m_1 = mne_to_arr(x_mne.copy(), roi=None)\n x_m_2, times_m_2, roi_m_2 = mne_to_arr(x_mne.copy(), roi=roi)\n # testing outputs\n np.testing.assert_array_equal(times_m_1, times_m_2)\n for k in range(len(x_mne)):\n np.testing.assert_array_equal(x_m_1[k], x_m_2[k])\n np.testing.assert_array_equal(roi_m_1[k], roi_m_2[k])\n\n def test_xr_to_arr(self):\n \"\"\"Test function xr_to_arr.\"\"\"\n # testing elements independantly\n x_xr = self._to_xr()\n x_x, roi_xn, y_xn, z_xn, times_xn, sub_roi = xr_to_arr(x_xr.copy())\n assert roi_xn == y_xn == z_xn == times_xn == None\n roi_x = xr_to_arr(x_xr.copy(), roi='roi')[1]\n y_x = xr_to_arr(x_xr.copy(), y='y')[2]\n z_x = xr_to_arr(x_xr.copy(), z='z')[3]\n times_x = xr_to_arr(x_xr.copy(), times='times')[4]\n # testing results\n np.testing.assert_array_equal(times, times_x)\n for k in range(n_suj):\n np.testing.assert_array_equal(x[k], x_x[k])\n np.testing.assert_array_equal(roi[k], roi_x[k])\n np.testing.assert_array_equal(y[k], y_x[k])\n np.testing.assert_array_equal(z[k], z_x[k])\n\n def test_ds_ephy_io(self):\n \"\"\"Test function ds_ephy_io.\"\"\"\n # ---------------------------------------------------------------------\n # using numpy inputs\n x_arr, y_arr, z_arr, roi_arr, times_arr, sub_roi = ds_ephy_io(\n x, roi=roi, y=y, z=z, times=times)\n\n # ---------------------------------------------------------------------\n # using mne inputs\n x_mne = self._to_mne()\n x_mne, y_mne, z_mne, roi_mne, times_mne, sub_roi = ds_ephy_io(\n x_mne, roi=roi, y=y, z=z, times=times)\n\n # ---------------------------------------------------------------------\n x_xr = self._to_xr()\n x_xr, y_xr, z_xr, roi_xr, times_xr, sub_roi = ds_ephy_io(\n x_xr, roi='roi', y='y', z='z', times='times')\n\n # ---------------------------------------------------------------------\n # testing outputs\n for k in range(n_suj):\n # numpy outputs\n np.testing.assert_array_equal(x[k], x_arr[k])\n np.testing.assert_array_equal(roi[k], roi_arr[k])\n np.testing.assert_array_equal(y[k], y_arr[k])\n np.testing.assert_array_equal(z[k], z_arr[k])\n # mne outputs\n np.testing.assert_array_equal(x[k], x_mne[k])\n np.testing.assert_array_equal(roi[k], roi_mne[k])\n np.testing.assert_array_equal(y[k], y_mne[k])\n np.testing.assert_array_equal(z[k], z_mne[k])\n # xarray outputs\n np.testing.assert_array_equal(x[k], x_xr[k])\n np.testing.assert_array_equal(roi[k], roi_xr[k])\n np.testing.assert_array_equal(y[k], y_xr[k])\n np.testing.assert_array_equal(z[k], z_xr[k])\n"
] |
[
[
"numpy.arange",
"pandas.MultiIndex.from_arrays",
"numpy.testing.assert_array_equal",
"numpy.random.rand",
"numpy.random.randint"
]
] |
klieret/pandas
|
[
"1739199759f6dd0580a079d6ee96bc4de98ade97"
] |
[
"pandas/core/indexes/multi.py"
] |
[
"from __future__ import annotations\n\nfrom functools import wraps\nfrom sys import getsizeof\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Collection,\n Hashable,\n Iterable,\n List,\n Sequence,\n Tuple,\n cast,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._config import get_option\n\nfrom pandas._libs import (\n algos as libalgos,\n index as libindex,\n lib,\n)\nfrom pandas._libs.hashtable import duplicated\nfrom pandas._typing import (\n AnyArrayLike,\n DtypeObj,\n Scalar,\n Shape,\n)\nfrom pandas.compat.numpy import function as nv\nfrom pandas.errors import (\n InvalidIndexError,\n PerformanceWarning,\n UnsortedIndexError,\n)\nfrom pandas.util._decorators import (\n Appender,\n cache_readonly,\n deprecate_nonkeyword_arguments,\n doc,\n)\n\nfrom pandas.core.dtypes.cast import coerce_indexer_dtype\nfrom pandas.core.dtypes.common import (\n ensure_int64,\n ensure_platform_int,\n is_categorical_dtype,\n is_hashable,\n is_integer,\n is_iterator,\n is_list_like,\n is_object_dtype,\n is_scalar,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.dtypes import ExtensionDtype\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCDatetimeIndex,\n ABCTimedeltaIndex,\n)\nfrom pandas.core.dtypes.missing import (\n array_equivalent,\n isna,\n)\n\nimport pandas.core.algorithms as algos\nfrom pandas.core.arrays import Categorical\nfrom pandas.core.arrays.categorical import factorize_from_iterables\nimport pandas.core.common as com\nfrom pandas.core.indexers import is_empty_indexer\nimport pandas.core.indexes.base as ibase\nfrom pandas.core.indexes.base import (\n Index,\n _index_shared_docs,\n ensure_index,\n get_unanimous_names,\n)\nfrom pandas.core.indexes.frozen import FrozenList\nfrom pandas.core.indexes.numeric import Int64Index\nfrom pandas.core.ops.invalid import make_invalid_op\nfrom pandas.core.sorting import (\n get_group_index,\n indexer_from_factorized,\n lexsort_indexer,\n)\n\nfrom pandas.io.formats.printing import pprint_thing\n\nif TYPE_CHECKING:\n from pandas import (\n CategoricalIndex,\n DataFrame,\n Series,\n )\n\n_index_doc_kwargs = dict(ibase._index_doc_kwargs)\n_index_doc_kwargs.update(\n {\"klass\": \"MultiIndex\", \"target_klass\": \"MultiIndex or list of tuples\"}\n)\n\n\nclass MultiIndexUIntEngine(libindex.BaseMultiIndexCodesEngine, libindex.UInt64Engine):\n \"\"\"\n This class manages a MultiIndex by mapping label combinations to positive\n integers.\n \"\"\"\n\n _base = libindex.UInt64Engine\n\n def _codes_to_ints(self, codes):\n \"\"\"\n Transform combination(s) of uint64 in one uint64 (each), in a strictly\n monotonic way (i.e. respecting the lexicographic order of integer\n combinations): see BaseMultiIndexCodesEngine documentation.\n\n Parameters\n ----------\n codes : 1- or 2-dimensional array of dtype uint64\n Combinations of integers (one per row)\n\n Returns\n -------\n scalar or 1-dimensional array, of dtype uint64\n Integer(s) representing one combination (each).\n \"\"\"\n # Shift the representation of each level by the pre-calculated number\n # of bits:\n codes <<= self.offsets\n\n # Now sum and OR are in fact interchangeable. This is a simple\n # composition of the (disjunct) significant bits of each level (i.e.\n # each column in \"codes\") in a single positive integer:\n if codes.ndim == 1:\n # Single key\n return np.bitwise_or.reduce(codes)\n\n # Multiple keys\n return np.bitwise_or.reduce(codes, axis=1)\n\n\nclass MultiIndexPyIntEngine(libindex.BaseMultiIndexCodesEngine, libindex.ObjectEngine):\n \"\"\"\n This class manages those (extreme) cases in which the number of possible\n label combinations overflows the 64 bits integers, and uses an ObjectEngine\n containing Python integers.\n \"\"\"\n\n _base = libindex.ObjectEngine\n\n def _codes_to_ints(self, codes):\n \"\"\"\n Transform combination(s) of uint64 in one Python integer (each), in a\n strictly monotonic way (i.e. respecting the lexicographic order of\n integer combinations): see BaseMultiIndexCodesEngine documentation.\n\n Parameters\n ----------\n codes : 1- or 2-dimensional array of dtype uint64\n Combinations of integers (one per row)\n\n Returns\n -------\n int, or 1-dimensional array of dtype object\n Integer(s) representing one combination (each).\n \"\"\"\n # Shift the representation of each level by the pre-calculated number\n # of bits. Since this can overflow uint64, first make sure we are\n # working with Python integers:\n codes = codes.astype(\"object\") << self.offsets\n\n # Now sum and OR are in fact interchangeable. This is a simple\n # composition of the (disjunct) significant bits of each level (i.e.\n # each column in \"codes\") in a single positive integer (per row):\n if codes.ndim == 1:\n # Single key\n return np.bitwise_or.reduce(codes)\n\n # Multiple keys\n return np.bitwise_or.reduce(codes, axis=1)\n\n\ndef names_compat(meth):\n \"\"\"\n A decorator to allow either `name` or `names` keyword but not both.\n\n This makes it easier to share code with base class.\n \"\"\"\n\n @wraps(meth)\n def new_meth(self_or_cls, *args, **kwargs):\n if \"name\" in kwargs and \"names\" in kwargs:\n raise TypeError(\"Can only provide one of `names` and `name`\")\n elif \"name\" in kwargs:\n kwargs[\"names\"] = kwargs.pop(\"name\")\n\n return meth(self_or_cls, *args, **kwargs)\n\n return new_meth\n\n\nclass MultiIndex(Index):\n \"\"\"\n A multi-level, or hierarchical, index object for pandas objects.\n\n Parameters\n ----------\n levels : sequence of arrays\n The unique labels for each level.\n codes : sequence of arrays\n Integers for each level designating which label at each location.\n sortorder : optional int\n Level of sortedness (must be lexicographically sorted by that\n level).\n names : optional sequence of objects\n Names for each of the index levels. (name is accepted for compat).\n copy : bool, default False\n Copy the meta-data.\n verify_integrity : bool, default True\n Check that the levels/codes are consistent and valid.\n\n Attributes\n ----------\n names\n levels\n codes\n nlevels\n levshape\n\n Methods\n -------\n from_arrays\n from_tuples\n from_product\n from_frame\n set_levels\n set_codes\n to_frame\n to_flat_index\n sortlevel\n droplevel\n swaplevel\n reorder_levels\n remove_unused_levels\n get_locs\n\n See Also\n --------\n MultiIndex.from_arrays : Convert list of arrays to MultiIndex.\n MultiIndex.from_product : Create a MultiIndex from the cartesian product\n of iterables.\n MultiIndex.from_tuples : Convert list of tuples to a MultiIndex.\n MultiIndex.from_frame : Make a MultiIndex from a DataFrame.\n Index : The base pandas Index type.\n\n Notes\n -----\n See the `user guide\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html>`__\n for more.\n\n Examples\n --------\n A new ``MultiIndex`` is typically constructed using one of the helper\n methods :meth:`MultiIndex.from_arrays`, :meth:`MultiIndex.from_product`\n and :meth:`MultiIndex.from_tuples`. For example (using ``.from_arrays``):\n\n >>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]\n >>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))\n MultiIndex([(1, 'red'),\n (1, 'blue'),\n (2, 'red'),\n (2, 'blue')],\n names=['number', 'color'])\n\n See further examples for how to construct a MultiIndex in the doc strings\n of the mentioned helper methods.\n \"\"\"\n\n _hidden_attrs = Index._hidden_attrs | frozenset()\n\n # initialize to zero-length tuples to make everything work\n _typ = \"multiindex\"\n _names = FrozenList()\n _levels = FrozenList()\n _codes = FrozenList()\n _comparables = [\"names\"]\n\n sortorder: int | None\n\n # --------------------------------------------------------------------\n # Constructors\n\n def __new__(\n cls,\n levels=None,\n codes=None,\n sortorder=None,\n names=None,\n dtype=None,\n copy=False,\n name=None,\n verify_integrity: bool = True,\n ):\n\n # compat with Index\n if name is not None:\n names = name\n if levels is None or codes is None:\n raise TypeError(\"Must pass both levels and codes\")\n if len(levels) != len(codes):\n raise ValueError(\"Length of levels and codes must be the same.\")\n if len(levels) == 0:\n raise ValueError(\"Must pass non-zero number of levels/codes\")\n\n result = object.__new__(cls)\n result._cache = {}\n\n # we've already validated levels and codes, so shortcut here\n result._set_levels(levels, copy=copy, validate=False)\n result._set_codes(codes, copy=copy, validate=False)\n\n result._names = [None] * len(levels)\n if names is not None:\n # handles name validation\n result._set_names(names)\n\n if sortorder is not None:\n result.sortorder = int(sortorder)\n else:\n result.sortorder = sortorder\n\n if verify_integrity:\n new_codes = result._verify_integrity()\n result._codes = new_codes\n\n result._reset_identity()\n\n return result\n\n def _validate_codes(self, level: list, code: list):\n \"\"\"\n Reassign code values as -1 if their corresponding levels are NaN.\n\n Parameters\n ----------\n code : list\n Code to reassign.\n level : list\n Level to check for missing values (NaN, NaT, None).\n\n Returns\n -------\n new code where code value = -1 if it corresponds\n to a level with missing values (NaN, NaT, None).\n \"\"\"\n null_mask = isna(level)\n if np.any(null_mask):\n code = np.where(null_mask[code], -1, code)\n return code\n\n def _verify_integrity(self, codes: list | None = None, levels: list | None = None):\n \"\"\"\n Parameters\n ----------\n codes : optional list\n Codes to check for validity. Defaults to current codes.\n levels : optional list\n Levels to check for validity. Defaults to current levels.\n\n Raises\n ------\n ValueError\n If length of levels and codes don't match, if the codes for any\n level would exceed level bounds, or there are any duplicate levels.\n\n Returns\n -------\n new codes where code value = -1 if it corresponds to a\n NaN level.\n \"\"\"\n # NOTE: Currently does not check, among other things, that cached\n # nlevels matches nor that sortorder matches actually sortorder.\n codes = codes or self.codes\n levels = levels or self.levels\n\n if len(levels) != len(codes):\n raise ValueError(\n \"Length of levels and codes must match. NOTE: \"\n \"this index is in an inconsistent state.\"\n )\n codes_length = len(codes[0])\n for i, (level, level_codes) in enumerate(zip(levels, codes)):\n if len(level_codes) != codes_length:\n raise ValueError(\n f\"Unequal code lengths: {[len(code_) for code_ in codes]}\"\n )\n if len(level_codes) and level_codes.max() >= len(level):\n raise ValueError(\n f\"On level {i}, code max ({level_codes.max()}) >= length of \"\n f\"level ({len(level)}). NOTE: this index is in an \"\n \"inconsistent state\"\n )\n if len(level_codes) and level_codes.min() < -1:\n raise ValueError(f\"On level {i}, code value ({level_codes.min()}) < -1\")\n if not level.is_unique:\n raise ValueError(\n f\"Level values must be unique: {list(level)} on level {i}\"\n )\n if self.sortorder is not None:\n if self.sortorder > _lexsort_depth(self.codes, self.nlevels):\n raise ValueError(\n \"Value for sortorder must be inferior or equal to actual \"\n f\"lexsort_depth: sortorder {self.sortorder} \"\n f\"with lexsort_depth {_lexsort_depth(self.codes, self.nlevels)}\"\n )\n\n codes = [\n self._validate_codes(level, code) for level, code in zip(levels, codes)\n ]\n new_codes = FrozenList(codes)\n return new_codes\n\n @classmethod\n def from_arrays(cls, arrays, sortorder=None, names=lib.no_default) -> MultiIndex:\n \"\"\"\n Convert arrays to MultiIndex.\n\n Parameters\n ----------\n arrays : list / sequence of array-likes\n Each array-like gives one level's value for each data point.\n len(arrays) is the number of levels.\n sortorder : int or None\n Level of sortedness (must be lexicographically sorted by that\n level).\n names : list / sequence of str, optional\n Names for the levels in the index.\n\n Returns\n -------\n MultiIndex\n\n See Also\n --------\n MultiIndex.from_tuples : Convert list of tuples to MultiIndex.\n MultiIndex.from_product : Make a MultiIndex from cartesian product\n of iterables.\n MultiIndex.from_frame : Make a MultiIndex from a DataFrame.\n\n Examples\n --------\n >>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]\n >>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))\n MultiIndex([(1, 'red'),\n (1, 'blue'),\n (2, 'red'),\n (2, 'blue')],\n names=['number', 'color'])\n \"\"\"\n error_msg = \"Input must be a list / sequence of array-likes.\"\n if not is_list_like(arrays):\n raise TypeError(error_msg)\n elif is_iterator(arrays):\n arrays = list(arrays)\n\n # Check if elements of array are list-like\n for array in arrays:\n if not is_list_like(array):\n raise TypeError(error_msg)\n\n # Check if lengths of all arrays are equal or not,\n # raise ValueError, if not\n for i in range(1, len(arrays)):\n if len(arrays[i]) != len(arrays[i - 1]):\n raise ValueError(\"all arrays must be same length\")\n\n codes, levels = factorize_from_iterables(arrays)\n if names is lib.no_default:\n names = [getattr(arr, \"name\", None) for arr in arrays]\n\n return cls(\n levels=levels,\n codes=codes,\n sortorder=sortorder,\n names=names,\n verify_integrity=False,\n )\n\n @classmethod\n @names_compat\n def from_tuples(\n cls,\n tuples: Iterable[tuple[Hashable, ...]],\n sortorder: int | None = None,\n names: Sequence[Hashable] | None = None,\n ) -> MultiIndex:\n \"\"\"\n Convert list of tuples to MultiIndex.\n\n Parameters\n ----------\n tuples : list / sequence of tuple-likes\n Each tuple is the index of one row/column.\n sortorder : int or None\n Level of sortedness (must be lexicographically sorted by that\n level).\n names : list / sequence of str, optional\n Names for the levels in the index.\n\n Returns\n -------\n MultiIndex\n\n See Also\n --------\n MultiIndex.from_arrays : Convert list of arrays to MultiIndex.\n MultiIndex.from_product : Make a MultiIndex from cartesian product\n of iterables.\n MultiIndex.from_frame : Make a MultiIndex from a DataFrame.\n\n Examples\n --------\n >>> tuples = [(1, 'red'), (1, 'blue'),\n ... (2, 'red'), (2, 'blue')]\n >>> pd.MultiIndex.from_tuples(tuples, names=('number', 'color'))\n MultiIndex([(1, 'red'),\n (1, 'blue'),\n (2, 'red'),\n (2, 'blue')],\n names=['number', 'color'])\n \"\"\"\n if not is_list_like(tuples):\n raise TypeError(\"Input must be a list / sequence of tuple-likes.\")\n elif is_iterator(tuples):\n tuples = list(tuples)\n tuples = cast(Collection[Tuple[Hashable, ...]], tuples)\n\n arrays: list[Sequence[Hashable]]\n if len(tuples) == 0:\n if names is None:\n raise TypeError(\"Cannot infer number of levels from empty list\")\n arrays = [[]] * len(names)\n elif isinstance(tuples, (np.ndarray, Index)):\n if isinstance(tuples, Index):\n tuples = np.asarray(tuples._values)\n\n arrays = list(lib.tuples_to_object_array(tuples).T)\n elif isinstance(tuples, list):\n arrays = list(lib.to_object_array_tuples(tuples).T)\n else:\n arrs = zip(*tuples)\n arrays = cast(List[Sequence[Hashable]], arrs)\n\n return cls.from_arrays(arrays, sortorder=sortorder, names=names)\n\n @classmethod\n def from_product(\n cls, iterables, sortorder=None, names=lib.no_default\n ) -> MultiIndex:\n \"\"\"\n Make a MultiIndex from the cartesian product of multiple iterables.\n\n Parameters\n ----------\n iterables : list / sequence of iterables\n Each iterable has unique labels for each level of the index.\n sortorder : int or None\n Level of sortedness (must be lexicographically sorted by that\n level).\n names : list / sequence of str, optional\n Names for the levels in the index.\n\n .. versionchanged:: 1.0.0\n\n If not explicitly provided, names will be inferred from the\n elements of iterables if an element has a name attribute\n\n Returns\n -------\n MultiIndex\n\n See Also\n --------\n MultiIndex.from_arrays : Convert list of arrays to MultiIndex.\n MultiIndex.from_tuples : Convert list of tuples to MultiIndex.\n MultiIndex.from_frame : Make a MultiIndex from a DataFrame.\n\n Examples\n --------\n >>> numbers = [0, 1, 2]\n >>> colors = ['green', 'purple']\n >>> pd.MultiIndex.from_product([numbers, colors],\n ... names=['number', 'color'])\n MultiIndex([(0, 'green'),\n (0, 'purple'),\n (1, 'green'),\n (1, 'purple'),\n (2, 'green'),\n (2, 'purple')],\n names=['number', 'color'])\n \"\"\"\n from pandas.core.reshape.util import cartesian_product\n\n if not is_list_like(iterables):\n raise TypeError(\"Input must be a list / sequence of iterables.\")\n elif is_iterator(iterables):\n iterables = list(iterables)\n\n codes, levels = factorize_from_iterables(iterables)\n if names is lib.no_default:\n names = [getattr(it, \"name\", None) for it in iterables]\n\n # codes are all ndarrays, so cartesian_product is lossless\n codes = cartesian_product(codes)\n return cls(levels, codes, sortorder=sortorder, names=names)\n\n @classmethod\n def from_frame(cls, df: DataFrame, sortorder=None, names=None) -> MultiIndex:\n \"\"\"\n Make a MultiIndex from a DataFrame.\n\n Parameters\n ----------\n df : DataFrame\n DataFrame to be converted to MultiIndex.\n sortorder : int, optional\n Level of sortedness (must be lexicographically sorted by that\n level).\n names : list-like, optional\n If no names are provided, use the column names, or tuple of column\n names if the columns is a MultiIndex. If a sequence, overwrite\n names with the given sequence.\n\n Returns\n -------\n MultiIndex\n The MultiIndex representation of the given DataFrame.\n\n See Also\n --------\n MultiIndex.from_arrays : Convert list of arrays to MultiIndex.\n MultiIndex.from_tuples : Convert list of tuples to MultiIndex.\n MultiIndex.from_product : Make a MultiIndex from cartesian product\n of iterables.\n\n Examples\n --------\n >>> df = pd.DataFrame([['HI', 'Temp'], ['HI', 'Precip'],\n ... ['NJ', 'Temp'], ['NJ', 'Precip']],\n ... columns=['a', 'b'])\n >>> df\n a b\n 0 HI Temp\n 1 HI Precip\n 2 NJ Temp\n 3 NJ Precip\n\n >>> pd.MultiIndex.from_frame(df)\n MultiIndex([('HI', 'Temp'),\n ('HI', 'Precip'),\n ('NJ', 'Temp'),\n ('NJ', 'Precip')],\n names=['a', 'b'])\n\n Using explicit names, instead of the column names\n\n >>> pd.MultiIndex.from_frame(df, names=['state', 'observation'])\n MultiIndex([('HI', 'Temp'),\n ('HI', 'Precip'),\n ('NJ', 'Temp'),\n ('NJ', 'Precip')],\n names=['state', 'observation'])\n \"\"\"\n if not isinstance(df, ABCDataFrame):\n raise TypeError(\"Input must be a DataFrame\")\n\n column_names, columns = zip(*df.items())\n names = column_names if names is None else names\n return cls.from_arrays(columns, sortorder=sortorder, names=names)\n\n # --------------------------------------------------------------------\n\n @cache_readonly\n def _values(self) -> np.ndarray:\n # We override here, since our parent uses _data, which we don't use.\n values = []\n\n for i in range(self.nlevels):\n vals = self._get_level_values(i)\n if is_categorical_dtype(vals.dtype):\n vals = cast(\"CategoricalIndex\", vals)\n vals = vals._data._internal_get_values()\n if isinstance(vals.dtype, ExtensionDtype) or isinstance(\n vals, (ABCDatetimeIndex, ABCTimedeltaIndex)\n ):\n vals = vals.astype(object)\n # error: Incompatible types in assignment (expression has type \"ndarray\",\n # variable has type \"Index\")\n vals = np.array(vals, copy=False) # type: ignore[assignment]\n values.append(vals)\n\n arr = lib.fast_zip(values)\n return arr\n\n @property\n def values(self) -> np.ndarray:\n return self._values\n\n @property\n def array(self):\n \"\"\"\n Raises a ValueError for `MultiIndex` because there's no single\n array backing a MultiIndex.\n\n Raises\n ------\n ValueError\n \"\"\"\n raise ValueError(\n \"MultiIndex has no single backing array. Use \"\n \"'MultiIndex.to_numpy()' to get a NumPy array of tuples.\"\n )\n\n @cache_readonly\n def dtypes(self) -> Series:\n \"\"\"\n Return the dtypes as a Series for the underlying MultiIndex\n \"\"\"\n from pandas import Series\n\n return Series(\n {\n f\"level_{idx}\" if level.name is None else level.name: level.dtype\n for idx, level in enumerate(self.levels)\n }\n )\n\n def __len__(self) -> int:\n return len(self.codes[0])\n\n # --------------------------------------------------------------------\n # Levels Methods\n\n @cache_readonly\n def levels(self) -> FrozenList:\n # Use cache_readonly to ensure that self.get_locs doesn't repeatedly\n # create new IndexEngine\n # https://github.com/pandas-dev/pandas/issues/31648\n result = [x._rename(name=name) for x, name in zip(self._levels, self._names)]\n for level in result:\n # disallow midx.levels[0].name = \"foo\"\n level._no_setting_name = True\n return FrozenList(result)\n\n def _set_levels(\n self,\n levels,\n level=None,\n copy: bool = False,\n validate: bool = True,\n verify_integrity: bool = False,\n ) -> None:\n # This is NOT part of the levels property because it should be\n # externally not allowed to set levels. User beware if you change\n # _levels directly\n if validate:\n if len(levels) == 0:\n raise ValueError(\"Must set non-zero number of levels.\")\n if level is None and len(levels) != self.nlevels:\n raise ValueError(\"Length of levels must match number of levels.\")\n if level is not None and len(levels) != len(level):\n raise ValueError(\"Length of levels must match length of level.\")\n\n if level is None:\n new_levels = FrozenList(\n ensure_index(lev, copy=copy)._view() for lev in levels\n )\n else:\n level_numbers = [self._get_level_number(lev) for lev in level]\n new_levels_list = list(self._levels)\n for lev_num, lev in zip(level_numbers, levels):\n new_levels_list[lev_num] = ensure_index(lev, copy=copy)._view()\n new_levels = FrozenList(new_levels_list)\n\n if verify_integrity:\n new_codes = self._verify_integrity(levels=new_levels)\n self._codes = new_codes\n\n names = self.names\n self._levels = new_levels\n if any(names):\n self._set_names(names)\n\n self._reset_cache()\n\n @deprecate_nonkeyword_arguments(version=None, allowed_args=[\"self\", \"levels\"])\n def set_levels(\n self, levels, level=None, inplace=None, verify_integrity: bool = True\n ):\n \"\"\"\n Set new levels on MultiIndex. Defaults to returning new index.\n\n Parameters\n ----------\n levels : sequence or list of sequence\n New level(s) to apply.\n level : int, level name, or sequence of int/level names (default None)\n Level(s) to set (None for all levels).\n inplace : bool\n If True, mutates in place.\n\n .. deprecated:: 1.2.0\n verify_integrity : bool, default True\n If True, checks that levels and codes are compatible.\n\n Returns\n -------\n new index (of same type and class...etc) or None\n The same type as the caller or None if ``inplace=True``.\n\n Examples\n --------\n >>> idx = pd.MultiIndex.from_tuples(\n ... [\n ... (1, \"one\"),\n ... (1, \"two\"),\n ... (2, \"one\"),\n ... (2, \"two\"),\n ... (3, \"one\"),\n ... (3, \"two\")\n ... ],\n ... names=[\"foo\", \"bar\"]\n ... )\n >>> idx\n MultiIndex([(1, 'one'),\n (1, 'two'),\n (2, 'one'),\n (2, 'two'),\n (3, 'one'),\n (3, 'two')],\n names=['foo', 'bar'])\n\n >>> idx.set_levels([['a', 'b', 'c'], [1, 2]])\n MultiIndex([('a', 1),\n ('a', 2),\n ('b', 1),\n ('b', 2),\n ('c', 1),\n ('c', 2)],\n names=['foo', 'bar'])\n >>> idx.set_levels(['a', 'b', 'c'], level=0)\n MultiIndex([('a', 'one'),\n ('a', 'two'),\n ('b', 'one'),\n ('b', 'two'),\n ('c', 'one'),\n ('c', 'two')],\n names=['foo', 'bar'])\n >>> idx.set_levels(['a', 'b'], level='bar')\n MultiIndex([(1, 'a'),\n (1, 'b'),\n (2, 'a'),\n (2, 'b'),\n (3, 'a'),\n (3, 'b')],\n names=['foo', 'bar'])\n\n If any of the levels passed to ``set_levels()`` exceeds the\n existing length, all of the values from that argument will\n be stored in the MultiIndex levels, though the values will\n be truncated in the MultiIndex output.\n\n >>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1])\n MultiIndex([('a', 1),\n ('a', 2),\n ('b', 1),\n ('b', 2),\n ('c', 1),\n ('c', 2)],\n names=['foo', 'bar'])\n >>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1]).levels\n FrozenList([['a', 'b', 'c'], [1, 2, 3, 4]])\n \"\"\"\n if inplace is not None:\n warnings.warn(\n \"inplace is deprecated and will be removed in a future version.\",\n FutureWarning,\n stacklevel=3,\n )\n else:\n inplace = False\n\n if is_list_like(levels) and not isinstance(levels, Index):\n levels = list(levels)\n\n level, levels = _require_listlike(level, levels, \"Levels\")\n\n if inplace:\n idx = self\n else:\n idx = self._view()\n idx._reset_identity()\n idx._set_levels(\n levels, level=level, validate=True, verify_integrity=verify_integrity\n )\n if not inplace:\n return idx\n\n @property\n def nlevels(self) -> int:\n \"\"\"\n Integer number of levels in this MultiIndex.\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays([['a'], ['b'], ['c']])\n >>> mi\n MultiIndex([('a', 'b', 'c')],\n )\n >>> mi.nlevels\n 3\n \"\"\"\n return len(self._levels)\n\n @property\n def levshape(self) -> Shape:\n \"\"\"\n A tuple with the length of each level.\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays([['a'], ['b'], ['c']])\n >>> mi\n MultiIndex([('a', 'b', 'c')],\n )\n >>> mi.levshape\n (1, 1, 1)\n \"\"\"\n return tuple(len(x) for x in self.levels)\n\n # --------------------------------------------------------------------\n # Codes Methods\n\n @property\n def codes(self):\n return self._codes\n\n def _set_codes(\n self,\n codes,\n level=None,\n copy: bool = False,\n validate: bool = True,\n verify_integrity: bool = False,\n ) -> None:\n if validate:\n if level is None and len(codes) != self.nlevels:\n raise ValueError(\"Length of codes must match number of levels\")\n if level is not None and len(codes) != len(level):\n raise ValueError(\"Length of codes must match length of levels.\")\n\n if level is None:\n new_codes = FrozenList(\n _coerce_indexer_frozen(level_codes, lev, copy=copy).view()\n for lev, level_codes in zip(self._levels, codes)\n )\n else:\n level_numbers = [self._get_level_number(lev) for lev in level]\n new_codes_list = list(self._codes)\n for lev_num, level_codes in zip(level_numbers, codes):\n lev = self.levels[lev_num]\n new_codes_list[lev_num] = _coerce_indexer_frozen(\n level_codes, lev, copy=copy\n )\n new_codes = FrozenList(new_codes_list)\n\n if verify_integrity:\n new_codes = self._verify_integrity(codes=new_codes)\n\n self._codes = new_codes\n\n self._reset_cache()\n\n @deprecate_nonkeyword_arguments(version=None, allowed_args=[\"self\", \"codes\"])\n def set_codes(self, codes, level=None, inplace=None, verify_integrity: bool = True):\n \"\"\"\n Set new codes on MultiIndex. Defaults to returning new index.\n\n Parameters\n ----------\n codes : sequence or list of sequence\n New codes to apply.\n level : int, level name, or sequence of int/level names (default None)\n Level(s) to set (None for all levels).\n inplace : bool\n If True, mutates in place.\n\n .. deprecated:: 1.2.0\n verify_integrity : bool, default True\n If True, checks that levels and codes are compatible.\n\n Returns\n -------\n new index (of same type and class...etc) or None\n The same type as the caller or None if ``inplace=True``.\n\n Examples\n --------\n >>> idx = pd.MultiIndex.from_tuples(\n ... [(1, \"one\"), (1, \"two\"), (2, \"one\"), (2, \"two\")], names=[\"foo\", \"bar\"]\n ... )\n >>> idx\n MultiIndex([(1, 'one'),\n (1, 'two'),\n (2, 'one'),\n (2, 'two')],\n names=['foo', 'bar'])\n\n >>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]])\n MultiIndex([(2, 'one'),\n (1, 'one'),\n (2, 'two'),\n (1, 'two')],\n names=['foo', 'bar'])\n >>> idx.set_codes([1, 0, 1, 0], level=0)\n MultiIndex([(2, 'one'),\n (1, 'two'),\n (2, 'one'),\n (1, 'two')],\n names=['foo', 'bar'])\n >>> idx.set_codes([0, 0, 1, 1], level='bar')\n MultiIndex([(1, 'one'),\n (1, 'one'),\n (2, 'two'),\n (2, 'two')],\n names=['foo', 'bar'])\n >>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]], level=[0, 1])\n MultiIndex([(2, 'one'),\n (1, 'one'),\n (2, 'two'),\n (1, 'two')],\n names=['foo', 'bar'])\n \"\"\"\n if inplace is not None:\n warnings.warn(\n \"inplace is deprecated and will be removed in a future version.\",\n FutureWarning,\n stacklevel=3,\n )\n else:\n inplace = False\n\n level, codes = _require_listlike(level, codes, \"Codes\")\n\n if inplace:\n idx = self\n else:\n idx = self._view()\n idx._reset_identity()\n idx._set_codes(codes, level=level, verify_integrity=verify_integrity)\n if not inplace:\n return idx\n\n # --------------------------------------------------------------------\n # Index Internals\n\n @cache_readonly\n def _engine(self):\n # Calculate the number of bits needed to represent labels in each\n # level, as log2 of their sizes (including -1 for NaN):\n sizes = np.ceil(np.log2([len(level) + 1 for level in self.levels]))\n\n # Sum bit counts, starting from the _right_....\n lev_bits = np.cumsum(sizes[::-1])[::-1]\n\n # ... in order to obtain offsets such that sorting the combination of\n # shifted codes (one for each level, resulting in a unique integer) is\n # equivalent to sorting lexicographically the codes themselves. Notice\n # that each level needs to be shifted by the number of bits needed to\n # represent the _previous_ ones:\n offsets = np.concatenate([lev_bits[1:], [0]]).astype(\"uint64\")\n\n # Check the total number of bits needed for our representation:\n if lev_bits[0] > 64:\n # The levels would overflow a 64 bit uint - use Python integers:\n return MultiIndexPyIntEngine(self.levels, self.codes, offsets)\n return MultiIndexUIntEngine(self.levels, self.codes, offsets)\n\n @property\n def _constructor(self) -> Callable[..., MultiIndex]:\n return type(self).from_tuples\n\n @doc(Index._shallow_copy)\n def _shallow_copy(self, values: np.ndarray, name=lib.no_default) -> MultiIndex:\n names = name if name is not lib.no_default else self.names\n\n return type(self).from_tuples(values, sortorder=None, names=names)\n\n def _view(self) -> MultiIndex:\n result = type(self)(\n levels=self.levels,\n codes=self.codes,\n sortorder=self.sortorder,\n names=self.names,\n verify_integrity=False,\n )\n result._cache = self._cache.copy()\n result._cache.pop(\"levels\", None) # GH32669\n return result\n\n # --------------------------------------------------------------------\n\n def copy(\n self,\n names=None,\n dtype=None,\n levels=None,\n codes=None,\n deep=False,\n name=None,\n ):\n \"\"\"\n Make a copy of this object. Names, dtype, levels and codes can be\n passed and will be set on new copy.\n\n Parameters\n ----------\n names : sequence, optional\n dtype : numpy dtype or pandas type, optional\n\n .. deprecated:: 1.2.0\n levels : sequence, optional\n\n .. deprecated:: 1.2.0\n codes : sequence, optional\n\n .. deprecated:: 1.2.0\n deep : bool, default False\n name : Label\n Kept for compatibility with 1-dimensional Index. Should not be used.\n\n Returns\n -------\n MultiIndex\n\n Notes\n -----\n In most cases, there should be no functional difference from using\n ``deep``, but if ``deep`` is passed it will attempt to deepcopy.\n This could be potentially expensive on large MultiIndex objects.\n \"\"\"\n names = self._validate_names(name=name, names=names, deep=deep)\n if levels is not None:\n warnings.warn(\n \"parameter levels is deprecated and will be removed in a future \"\n \"version. Use the set_levels method instead.\",\n FutureWarning,\n stacklevel=2,\n )\n if codes is not None:\n warnings.warn(\n \"parameter codes is deprecated and will be removed in a future \"\n \"version. Use the set_codes method instead.\",\n FutureWarning,\n stacklevel=2,\n )\n\n if deep:\n from copy import deepcopy\n\n if levels is None:\n levels = deepcopy(self.levels)\n if codes is None:\n codes = deepcopy(self.codes)\n\n levels = levels if levels is not None else self.levels\n codes = codes if codes is not None else self.codes\n\n new_index = type(self)(\n levels=levels,\n codes=codes,\n sortorder=self.sortorder,\n names=names,\n verify_integrity=False,\n )\n new_index._cache = self._cache.copy()\n new_index._cache.pop(\"levels\", None) # GH32669\n\n if dtype:\n warnings.warn(\n \"parameter dtype is deprecated and will be removed in a future \"\n \"version. Use the astype method instead.\",\n FutureWarning,\n stacklevel=2,\n )\n new_index = new_index.astype(dtype)\n return new_index\n\n def __array__(self, dtype=None) -> np.ndarray:\n \"\"\"the array interface, return my values\"\"\"\n return self.values\n\n def view(self, cls=None):\n \"\"\"this is defined as a copy with the same identity\"\"\"\n result = self.copy()\n result._id = self._id\n return result\n\n @doc(Index.__contains__)\n def __contains__(self, key: Any) -> bool:\n hash(key)\n try:\n self.get_loc(key)\n return True\n except (LookupError, TypeError, ValueError):\n return False\n\n @cache_readonly\n def dtype(self) -> np.dtype:\n return np.dtype(\"O\")\n\n def _is_memory_usage_qualified(self) -> bool:\n \"\"\"return a boolean if we need a qualified .info display\"\"\"\n\n def f(level):\n return \"mixed\" in level or \"string\" in level or \"unicode\" in level\n\n return any(f(level) for level in self._inferred_type_levels)\n\n @doc(Index.memory_usage)\n def memory_usage(self, deep: bool = False) -> int:\n # we are overwriting our base class to avoid\n # computing .values here which could materialize\n # a tuple representation unnecessarily\n return self._nbytes(deep)\n\n @cache_readonly\n def nbytes(self) -> int:\n \"\"\"return the number of bytes in the underlying data\"\"\"\n return self._nbytes(False)\n\n def _nbytes(self, deep: bool = False) -> int:\n \"\"\"\n return the number of bytes in the underlying data\n deeply introspect the level data if deep=True\n\n include the engine hashtable\n\n *this is in internal routine*\n\n \"\"\"\n # for implementations with no useful getsizeof (PyPy)\n objsize = 24\n\n level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels)\n label_nbytes = sum(i.nbytes for i in self.codes)\n names_nbytes = sum(getsizeof(i, objsize) for i in self.names)\n result = level_nbytes + label_nbytes + names_nbytes\n\n # include our engine hashtable\n result += self._engine.sizeof(deep=deep)\n return result\n\n # --------------------------------------------------------------------\n # Rendering Methods\n\n def _formatter_func(self, tup):\n \"\"\"\n Formats each item in tup according to its level's formatter function.\n \"\"\"\n formatter_funcs = [level._formatter_func for level in self.levels]\n return tuple(func(val) for func, val in zip(formatter_funcs, tup))\n\n def _format_native_types(self, na_rep=\"nan\", **kwargs):\n new_levels = []\n new_codes = []\n\n # go through the levels and format them\n for level, level_codes in zip(self.levels, self.codes):\n level_strs = level._format_native_types(na_rep=na_rep, **kwargs)\n # add nan values, if there are any\n mask = level_codes == -1\n if mask.any():\n nan_index = len(level_strs)\n # numpy 1.21 deprecated implicit string casting\n level_strs = level_strs.astype(str)\n level_strs = np.append(level_strs, na_rep)\n assert not level_codes.flags.writeable # i.e. copy is needed\n level_codes = level_codes.copy() # make writeable\n level_codes[mask] = nan_index\n new_levels.append(level_strs)\n new_codes.append(level_codes)\n\n if len(new_levels) == 1:\n # a single-level multi-index\n return Index(new_levels[0].take(new_codes[0]))._format_native_types()\n else:\n # reconstruct the multi-index\n mi = MultiIndex(\n levels=new_levels,\n codes=new_codes,\n names=self.names,\n sortorder=self.sortorder,\n verify_integrity=False,\n )\n return mi._values\n\n def format(\n self,\n name: bool | None = None,\n formatter: Callable | None = None,\n na_rep: str | None = None,\n names: bool = False,\n space: int = 2,\n sparsify=None,\n adjoin: bool = True,\n ) -> list:\n if name is not None:\n names = name\n\n if len(self) == 0:\n return []\n\n stringified_levels = []\n for lev, level_codes in zip(self.levels, self.codes):\n na = na_rep if na_rep is not None else _get_na_rep(lev.dtype.type)\n\n if len(lev) > 0:\n\n formatted = lev.take(level_codes).format(formatter=formatter)\n\n # we have some NA\n mask = level_codes == -1\n if mask.any():\n formatted = np.array(formatted, dtype=object)\n formatted[mask] = na\n formatted = formatted.tolist()\n\n else:\n # weird all NA case\n formatted = [\n pprint_thing(na if isna(x) else x, escape_chars=(\"\\t\", \"\\r\", \"\\n\"))\n for x in algos.take_nd(lev._values, level_codes)\n ]\n stringified_levels.append(formatted)\n\n result_levels = []\n for lev, lev_name in zip(stringified_levels, self.names):\n level = []\n\n if names:\n level.append(\n pprint_thing(lev_name, escape_chars=(\"\\t\", \"\\r\", \"\\n\"))\n if lev_name is not None\n else \"\"\n )\n\n level.extend(np.array(lev, dtype=object))\n result_levels.append(level)\n\n if sparsify is None:\n sparsify = get_option(\"display.multi_sparse\")\n\n if sparsify:\n sentinel = \"\"\n # GH3547 use value of sparsify as sentinel if it's \"Falsey\"\n assert isinstance(sparsify, bool) or sparsify is lib.no_default\n if sparsify in [False, lib.no_default]:\n sentinel = sparsify\n # little bit of a kludge job for #1217\n result_levels = sparsify_labels(\n result_levels, start=int(names), sentinel=sentinel\n )\n\n if adjoin:\n from pandas.io.formats.format import get_adjustment\n\n adj = get_adjustment()\n return adj.adjoin(space, *result_levels).split(\"\\n\")\n else:\n return result_levels\n\n # --------------------------------------------------------------------\n # Names Methods\n\n def _get_names(self) -> FrozenList:\n return FrozenList(self._names)\n\n def _set_names(self, names, level=None, validate: bool = True):\n \"\"\"\n Set new names on index. Each name has to be a hashable type.\n\n Parameters\n ----------\n values : str or sequence\n name(s) to set\n level : int, level name, or sequence of int/level names (default None)\n If the index is a MultiIndex (hierarchical), level(s) to set (None\n for all levels). Otherwise level must be None\n validate : bool, default True\n validate that the names match level lengths\n\n Raises\n ------\n TypeError if each name is not hashable.\n\n Notes\n -----\n sets names on levels. WARNING: mutates!\n\n Note that you generally want to set this *after* changing levels, so\n that it only acts on copies\n \"\"\"\n # GH 15110\n # Don't allow a single string for names in a MultiIndex\n if names is not None and not is_list_like(names):\n raise ValueError(\"Names should be list-like for a MultiIndex\")\n names = list(names)\n\n if validate:\n if level is not None and len(names) != len(level):\n raise ValueError(\"Length of names must match length of level.\")\n if level is None and len(names) != self.nlevels:\n raise ValueError(\n \"Length of names must match number of levels in MultiIndex.\"\n )\n\n if level is None:\n level = range(self.nlevels)\n else:\n level = [self._get_level_number(lev) for lev in level]\n\n # set the name\n for lev, name in zip(level, names):\n if name is not None:\n # GH 20527\n # All items in 'names' need to be hashable:\n if not is_hashable(name):\n raise TypeError(\n f\"{type(self).__name__}.name must be a hashable type\"\n )\n # error: Cannot determine type of '__setitem__'\n self._names[lev] = name # type: ignore[has-type]\n\n # If .levels has been accessed, the names in our cache will be stale.\n self._reset_cache()\n\n names = property(\n fset=_set_names,\n fget=_get_names,\n doc=\"\"\"\n Names of levels in MultiIndex.\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays(\n ... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z'])\n >>> mi\n MultiIndex([(1, 3, 5),\n (2, 4, 6)],\n names=['x', 'y', 'z'])\n >>> mi.names\n FrozenList(['x', 'y', 'z'])\n \"\"\",\n )\n\n # --------------------------------------------------------------------\n\n @doc(Index._get_grouper_for_level)\n def _get_grouper_for_level(self, mapper, level):\n indexer = self.codes[level]\n level_index = self.levels[level]\n\n if mapper is not None:\n # Handle group mapping function and return\n level_values = self.levels[level].take(indexer)\n grouper = level_values.map(mapper)\n return grouper, None, None\n\n codes, uniques = algos.factorize(indexer, sort=True)\n\n if len(uniques) > 0 and uniques[0] == -1:\n # Handle NAs\n mask = indexer != -1\n ok_codes, uniques = algos.factorize(indexer[mask], sort=True)\n\n codes = np.empty(len(indexer), dtype=indexer.dtype)\n codes[mask] = ok_codes\n codes[~mask] = -1\n\n if len(uniques) < len(level_index):\n # Remove unobserved levels from level_index\n level_index = level_index.take(uniques)\n else:\n # break references back to us so that setting the name\n # on the output of a groupby doesn't reflect back here.\n level_index = level_index.copy()\n\n if level_index._can_hold_na:\n grouper = level_index.take(codes, fill_value=True)\n else:\n grouper = level_index.take(codes)\n\n return grouper, codes, level_index\n\n @cache_readonly\n def inferred_type(self) -> str:\n return \"mixed\"\n\n def _get_level_number(self, level) -> int:\n count = self.names.count(level)\n if (count > 1) and not is_integer(level):\n raise ValueError(\n f\"The name {level} occurs multiple times, use a level number\"\n )\n try:\n level = self.names.index(level)\n except ValueError as err:\n if not is_integer(level):\n raise KeyError(f\"Level {level} not found\") from err\n elif level < 0:\n level += self.nlevels\n if level < 0:\n orig_level = level - self.nlevels\n raise IndexError(\n f\"Too many levels: Index has only {self.nlevels} levels, \"\n f\"{orig_level} is not a valid level number\"\n ) from err\n # Note: levels are zero-based\n elif level >= self.nlevels:\n raise IndexError(\n f\"Too many levels: Index has only {self.nlevels} levels, \"\n f\"not {level + 1}\"\n ) from err\n return level\n\n @property\n def _has_complex_internals(self) -> bool:\n # used to avoid libreduction code paths, which raise or require conversion\n return True\n\n @cache_readonly\n def is_monotonic_increasing(self) -> bool:\n \"\"\"\n return if the index is monotonic increasing (only equal or\n increasing) values.\n \"\"\"\n if any(-1 in code for code in self.codes):\n return False\n\n if all(level.is_monotonic for level in self.levels):\n # If each level is sorted, we can operate on the codes directly. GH27495\n return libalgos.is_lexsorted(\n [x.astype(\"int64\", copy=False) for x in self.codes]\n )\n\n # reversed() because lexsort() wants the most significant key last.\n values = [\n self._get_level_values(i)._values for i in reversed(range(len(self.levels)))\n ]\n try:\n sort_order = np.lexsort(values)\n return Index(sort_order).is_monotonic\n except TypeError:\n\n # we have mixed types and np.lexsort is not happy\n return Index(self._values).is_monotonic\n\n @cache_readonly\n def is_monotonic_decreasing(self) -> bool:\n \"\"\"\n return if the index is monotonic decreasing (only equal or\n decreasing) values.\n \"\"\"\n # monotonic decreasing if and only if reverse is monotonic increasing\n return self[::-1].is_monotonic_increasing\n\n @cache_readonly\n def _inferred_type_levels(self) -> list[str]:\n \"\"\"return a list of the inferred types, one for each level\"\"\"\n return [i.inferred_type for i in self.levels]\n\n @doc(Index.duplicated)\n def duplicated(self, keep=\"first\") -> np.ndarray:\n shape = tuple(len(lev) for lev in self.levels)\n ids = get_group_index(self.codes, shape, sort=False, xnull=False)\n\n return duplicated(ids, keep)\n\n # error: Cannot override final attribute \"_duplicated\"\n # (previously declared in base class \"IndexOpsMixin\")\n _duplicated = duplicated # type: ignore[misc]\n\n def fillna(self, value=None, downcast=None):\n \"\"\"\n fillna is not implemented for MultiIndex\n \"\"\"\n raise NotImplementedError(\"isna is not defined for MultiIndex\")\n\n @doc(Index.dropna)\n def dropna(self, how: str = \"any\") -> MultiIndex:\n nans = [level_codes == -1 for level_codes in self.codes]\n if how == \"any\":\n indexer = np.any(nans, axis=0)\n elif how == \"all\":\n indexer = np.all(nans, axis=0)\n else:\n raise ValueError(f\"invalid how option: {how}\")\n\n new_codes = [level_codes[~indexer] for level_codes in self.codes]\n return self.set_codes(codes=new_codes)\n\n def _get_level_values(self, level: int, unique: bool = False) -> Index:\n \"\"\"\n Return vector of label values for requested level,\n equal to the length of the index\n\n **this is an internal method**\n\n Parameters\n ----------\n level : int\n unique : bool, default False\n if True, drop duplicated values\n\n Returns\n -------\n Index\n \"\"\"\n lev = self.levels[level]\n level_codes = self.codes[level]\n name = self._names[level]\n if unique:\n level_codes = algos.unique(level_codes)\n filled = algos.take_nd(lev._values, level_codes, fill_value=lev._na_value)\n return lev._shallow_copy(filled, name=name)\n\n def get_level_values(self, level):\n \"\"\"\n Return vector of label values for requested level.\n\n Length of returned vector is equal to the length of the index.\n\n Parameters\n ----------\n level : int or str\n ``level`` is either the integer position of the level in the\n MultiIndex, or the name of the level.\n\n Returns\n -------\n values : Index\n Values is a level of this MultiIndex converted to\n a single :class:`Index` (or subclass thereof).\n\n Examples\n --------\n Create a MultiIndex:\n\n >>> mi = pd.MultiIndex.from_arrays((list('abc'), list('def')))\n >>> mi.names = ['level_1', 'level_2']\n\n Get level values by supplying level as either integer or name:\n\n >>> mi.get_level_values(0)\n Index(['a', 'b', 'c'], dtype='object', name='level_1')\n >>> mi.get_level_values('level_2')\n Index(['d', 'e', 'f'], dtype='object', name='level_2')\n \"\"\"\n level = self._get_level_number(level)\n values = self._get_level_values(level)\n return values\n\n @doc(Index.unique)\n def unique(self, level=None):\n\n if level is None:\n return super().unique()\n else:\n level = self._get_level_number(level)\n return self._get_level_values(level=level, unique=True)\n\n def to_frame(self, index: bool = True, name=None) -> DataFrame:\n \"\"\"\n Create a DataFrame with the levels of the MultiIndex as columns.\n\n Column ordering is determined by the DataFrame constructor with data as\n a dict.\n\n Parameters\n ----------\n index : bool, default True\n Set the index of the returned DataFrame as the original MultiIndex.\n\n name : list / sequence of str, optional\n The passed names should substitute index level names.\n\n Returns\n -------\n DataFrame : a DataFrame containing the original MultiIndex data.\n\n See Also\n --------\n DataFrame : Two-dimensional, size-mutable, potentially heterogeneous\n tabular data.\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays([['a', 'b'], ['c', 'd']])\n >>> mi\n MultiIndex([('a', 'c'),\n ('b', 'd')],\n )\n\n >>> df = mi.to_frame()\n >>> df\n 0 1\n a c a c\n b d b d\n\n >>> df = mi.to_frame(index=False)\n >>> df\n 0 1\n 0 a c\n 1 b d\n\n >>> df = mi.to_frame(name=['x', 'y'])\n >>> df\n x y\n a c a c\n b d b d\n \"\"\"\n from pandas import DataFrame\n\n if name is not None:\n if not is_list_like(name):\n raise TypeError(\"'name' must be a list / sequence of column names.\")\n\n if len(name) != len(self.levels):\n raise ValueError(\n \"'name' should have same length as number of levels on index.\"\n )\n idx_names = name\n else:\n idx_names = self.names\n\n # Guarantee resulting column order - PY36+ dict maintains insertion order\n result = DataFrame(\n {\n (level if lvlname is None else lvlname): self._get_level_values(level)\n for lvlname, level in zip(idx_names, range(len(self.levels)))\n },\n copy=False,\n )\n\n if index:\n result.index = self\n return result\n\n def to_flat_index(self) -> Index:\n \"\"\"\n Convert a MultiIndex to an Index of Tuples containing the level values.\n\n Returns\n -------\n pd.Index\n Index with the MultiIndex data represented in Tuples.\n\n See Also\n --------\n MultiIndex.from_tuples : Convert flat index back to MultiIndex.\n\n Notes\n -----\n This method will simply return the caller if called by anything other\n than a MultiIndex.\n\n Examples\n --------\n >>> index = pd.MultiIndex.from_product(\n ... [['foo', 'bar'], ['baz', 'qux']],\n ... names=['a', 'b'])\n >>> index.to_flat_index()\n Index([('foo', 'baz'), ('foo', 'qux'),\n ('bar', 'baz'), ('bar', 'qux')],\n dtype='object')\n \"\"\"\n return Index(self._values, tupleize_cols=False)\n\n @property\n def _is_all_dates(self) -> bool:\n return False\n\n def is_lexsorted(self) -> bool:\n warnings.warn(\n \"MultiIndex.is_lexsorted is deprecated as a public function, \"\n \"users should use MultiIndex.is_monotonic_increasing instead.\",\n FutureWarning,\n stacklevel=2,\n )\n return self._is_lexsorted()\n\n def _is_lexsorted(self) -> bool:\n \"\"\"\n Return True if the codes are lexicographically sorted.\n\n Returns\n -------\n bool\n\n Examples\n --------\n In the below examples, the first level of the MultiIndex is sorted because\n a<b<c, so there is no need to look at the next level.\n\n >>> pd.MultiIndex.from_arrays([['a', 'b', 'c'], ['d', 'e', 'f']]).is_lexsorted()\n True\n >>> pd.MultiIndex.from_arrays([['a', 'b', 'c'], ['d', 'f', 'e']]).is_lexsorted()\n True\n\n In case there is a tie, the lexicographical sorting looks\n at the next level of the MultiIndex.\n\n >>> pd.MultiIndex.from_arrays([[0, 1, 1], ['a', 'b', 'c']]).is_lexsorted()\n True\n >>> pd.MultiIndex.from_arrays([[0, 1, 1], ['a', 'c', 'b']]).is_lexsorted()\n False\n >>> pd.MultiIndex.from_arrays([['a', 'a', 'b', 'b'],\n ... ['aa', 'bb', 'aa', 'bb']]).is_lexsorted()\n True\n >>> pd.MultiIndex.from_arrays([['a', 'a', 'b', 'b'],\n ... ['bb', 'aa', 'aa', 'bb']]).is_lexsorted()\n False\n \"\"\"\n return self._lexsort_depth == self.nlevels\n\n @property\n def lexsort_depth(self):\n warnings.warn(\n \"MultiIndex.is_lexsorted is deprecated as a public function, \"\n \"users should use MultiIndex.is_monotonic_increasing instead.\",\n FutureWarning,\n stacklevel=2,\n )\n return self._lexsort_depth\n\n @cache_readonly\n def _lexsort_depth(self) -> int:\n \"\"\"\n Compute and return the lexsort_depth, the number of levels of the\n MultiIndex that are sorted lexically\n\n Returns\n -------\n int\n \"\"\"\n if self.sortorder is not None:\n return self.sortorder\n return _lexsort_depth(self.codes, self.nlevels)\n\n def _sort_levels_monotonic(self) -> MultiIndex:\n \"\"\"\n This is an *internal* function.\n\n Create a new MultiIndex from the current to monotonically sorted\n items IN the levels. This does not actually make the entire MultiIndex\n monotonic, JUST the levels.\n\n The resulting MultiIndex will have the same outward\n appearance, meaning the same .values and ordering. It will also\n be .equals() to the original.\n\n Returns\n -------\n MultiIndex\n\n Examples\n --------\n >>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],\n ... codes=[[0, 0, 1, 1], [0, 1, 0, 1]])\n >>> mi\n MultiIndex([('a', 'bb'),\n ('a', 'aa'),\n ('b', 'bb'),\n ('b', 'aa')],\n )\n\n >>> mi.sort_values()\n MultiIndex([('a', 'aa'),\n ('a', 'bb'),\n ('b', 'aa'),\n ('b', 'bb')],\n )\n \"\"\"\n if self._is_lexsorted() and self.is_monotonic:\n return self\n\n new_levels = []\n new_codes = []\n\n for lev, level_codes in zip(self.levels, self.codes):\n\n if not lev.is_monotonic:\n try:\n # indexer to reorder the levels\n indexer = lev.argsort()\n except TypeError:\n pass\n else:\n lev = lev.take(indexer)\n\n # indexer to reorder the level codes\n indexer = ensure_platform_int(indexer)\n ri = lib.get_reverse_indexer(indexer, len(indexer))\n level_codes = algos.take_nd(ri, level_codes)\n\n new_levels.append(lev)\n new_codes.append(level_codes)\n\n return MultiIndex(\n new_levels,\n new_codes,\n names=self.names,\n sortorder=self.sortorder,\n verify_integrity=False,\n )\n\n def remove_unused_levels(self) -> MultiIndex:\n \"\"\"\n Create new MultiIndex from current that removes unused levels.\n\n Unused level(s) means levels that are not expressed in the\n labels. The resulting MultiIndex will have the same outward\n appearance, meaning the same .values and ordering. It will\n also be .equals() to the original.\n\n Returns\n -------\n MultiIndex\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_product([range(2), list('ab')])\n >>> mi\n MultiIndex([(0, 'a'),\n (0, 'b'),\n (1, 'a'),\n (1, 'b')],\n )\n\n >>> mi[2:]\n MultiIndex([(1, 'a'),\n (1, 'b')],\n )\n\n The 0 from the first level is not represented\n and can be removed\n\n >>> mi2 = mi[2:].remove_unused_levels()\n >>> mi2.levels\n FrozenList([[1], ['a', 'b']])\n \"\"\"\n new_levels = []\n new_codes = []\n\n changed = False\n for lev, level_codes in zip(self.levels, self.codes):\n\n # Since few levels are typically unused, bincount() is more\n # efficient than unique() - however it only accepts positive values\n # (and drops order):\n uniques = np.where(np.bincount(level_codes + 1) > 0)[0] - 1\n has_na = int(len(uniques) and (uniques[0] == -1))\n\n if len(uniques) != len(lev) + has_na:\n\n if lev.isna().any() and len(uniques) == len(lev):\n break\n # We have unused levels\n changed = True\n\n # Recalculate uniques, now preserving order.\n # Can easily be cythonized by exploiting the already existing\n # \"uniques\" and stop parsing \"level_codes\" when all items\n # are found:\n uniques = algos.unique(level_codes)\n if has_na:\n na_idx = np.where(uniques == -1)[0]\n # Just ensure that -1 is in first position:\n uniques[[0, na_idx[0]]] = uniques[[na_idx[0], 0]]\n\n # codes get mapped from uniques to 0:len(uniques)\n # -1 (if present) is mapped to last position\n code_mapping = np.zeros(len(lev) + has_na)\n # ... and reassigned value -1:\n code_mapping[uniques] = np.arange(len(uniques)) - has_na\n\n level_codes = code_mapping[level_codes]\n\n # new levels are simple\n lev = lev.take(uniques[has_na:])\n\n new_levels.append(lev)\n new_codes.append(level_codes)\n\n result = self.view()\n\n if changed:\n result._reset_identity()\n result._set_levels(new_levels, validate=False)\n result._set_codes(new_codes, validate=False)\n\n return result\n\n # --------------------------------------------------------------------\n # Pickling Methods\n\n def __reduce__(self):\n \"\"\"Necessary for making this object picklable\"\"\"\n d = {\n \"levels\": list(self.levels),\n \"codes\": list(self.codes),\n \"sortorder\": self.sortorder,\n \"names\": list(self.names),\n }\n return ibase._new_Index, (type(self), d), None\n\n # --------------------------------------------------------------------\n\n def __getitem__(self, key):\n if is_scalar(key):\n key = com.cast_scalar_indexer(key, warn_float=True)\n\n retval = []\n for lev, level_codes in zip(self.levels, self.codes):\n if level_codes[key] == -1:\n retval.append(np.nan)\n else:\n retval.append(lev[level_codes[key]])\n\n return tuple(retval)\n else:\n # in general cannot be sure whether the result will be sorted\n sortorder = None\n if com.is_bool_indexer(key):\n key = np.asarray(key, dtype=bool)\n sortorder = self.sortorder\n elif isinstance(key, slice):\n if key.step is None or key.step > 0:\n sortorder = self.sortorder\n elif isinstance(key, Index):\n key = np.asarray(key)\n\n new_codes = [level_codes[key] for level_codes in self.codes]\n\n return MultiIndex(\n levels=self.levels,\n codes=new_codes,\n names=self.names,\n sortorder=sortorder,\n verify_integrity=False,\n )\n\n def _getitem_slice(self: MultiIndex, slobj: slice) -> MultiIndex:\n \"\"\"\n Fastpath for __getitem__ when we know we have a slice.\n \"\"\"\n sortorder = None\n if slobj.step is None or slobj.step > 0:\n sortorder = self.sortorder\n\n new_codes = [level_codes[slobj] for level_codes in self.codes]\n\n return type(self)(\n levels=self.levels,\n codes=new_codes,\n names=self._names,\n sortorder=sortorder,\n verify_integrity=False,\n )\n\n @Appender(_index_shared_docs[\"take\"] % _index_doc_kwargs)\n def take(\n self: MultiIndex,\n indices,\n axis: int = 0,\n allow_fill: bool = True,\n fill_value=None,\n **kwargs,\n ) -> MultiIndex:\n nv.validate_take((), kwargs)\n indices = ensure_platform_int(indices)\n\n # only fill if we are passing a non-None fill_value\n allow_fill = self._maybe_disallow_fill(allow_fill, fill_value, indices)\n\n na_value = -1\n\n taken = [lab.take(indices) for lab in self.codes]\n if allow_fill:\n mask = indices == -1\n if mask.any():\n masked = []\n for new_label in taken:\n label_values = new_label\n label_values[mask] = na_value\n masked.append(np.asarray(label_values))\n taken = masked\n\n return MultiIndex(\n levels=self.levels, codes=taken, names=self.names, verify_integrity=False\n )\n\n def append(self, other):\n \"\"\"\n Append a collection of Index options together\n\n Parameters\n ----------\n other : Index or list/tuple of indices\n\n Returns\n -------\n appended : Index\n \"\"\"\n if not isinstance(other, (list, tuple)):\n other = [other]\n\n if all(\n (isinstance(o, MultiIndex) and o.nlevels >= self.nlevels) for o in other\n ):\n arrays = []\n for i in range(self.nlevels):\n label = self._get_level_values(i)\n appended = [o._get_level_values(i) for o in other]\n arrays.append(label.append(appended))\n return MultiIndex.from_arrays(arrays, names=self.names)\n\n to_concat = (self._values,) + tuple(k._values for k in other)\n new_tuples = np.concatenate(to_concat)\n\n # if all(isinstance(x, MultiIndex) for x in other):\n try:\n return MultiIndex.from_tuples(new_tuples, names=self.names)\n except (TypeError, IndexError):\n return Index(new_tuples)\n\n def argsort(self, *args, **kwargs) -> np.ndarray:\n return self._values.argsort(*args, **kwargs)\n\n @Appender(_index_shared_docs[\"repeat\"] % _index_doc_kwargs)\n def repeat(self, repeats: int, axis=None) -> MultiIndex:\n nv.validate_repeat((), {\"axis\": axis})\n # error: Incompatible types in assignment (expression has type \"ndarray\",\n # variable has type \"int\")\n repeats = ensure_platform_int(repeats) # type: ignore[assignment]\n return MultiIndex(\n levels=self.levels,\n codes=[\n level_codes.view(np.ndarray).astype(np.intp).repeat(repeats)\n for level_codes in self.codes\n ],\n names=self.names,\n sortorder=self.sortorder,\n verify_integrity=False,\n )\n\n def drop(self, codes, level=None, errors=\"raise\"):\n \"\"\"\n Make new MultiIndex with passed list of codes deleted\n\n Parameters\n ----------\n codes : array-like\n Must be a list of tuples when level is not specified\n level : int or level name, default None\n errors : str, default 'raise'\n\n Returns\n -------\n dropped : MultiIndex\n \"\"\"\n if level is not None:\n return self._drop_from_level(codes, level, errors)\n\n if not isinstance(codes, (np.ndarray, Index)):\n try:\n codes = com.index_labels_to_array(codes, dtype=np.dtype(\"object\"))\n except ValueError:\n pass\n\n inds = []\n for level_codes in codes:\n try:\n loc = self.get_loc(level_codes)\n # get_loc returns either an integer, a slice, or a boolean\n # mask\n if isinstance(loc, int):\n inds.append(loc)\n elif isinstance(loc, slice):\n step = loc.step if loc.step is not None else 1\n inds.extend(range(loc.start, loc.stop, step))\n elif com.is_bool_indexer(loc):\n if self._lexsort_depth == 0:\n warnings.warn(\n \"dropping on a non-lexsorted multi-index \"\n \"without a level parameter may impact performance.\",\n PerformanceWarning,\n stacklevel=3,\n )\n loc = loc.nonzero()[0]\n inds.extend(loc)\n else:\n msg = f\"unsupported indexer of type {type(loc)}\"\n raise AssertionError(msg)\n except KeyError:\n if errors != \"ignore\":\n raise\n\n return self.delete(inds)\n\n def _drop_from_level(self, codes, level, errors=\"raise\") -> MultiIndex:\n codes = com.index_labels_to_array(codes)\n i = self._get_level_number(level)\n index = self.levels[i]\n values = index.get_indexer(codes)\n # If nan should be dropped it will equal -1 here. We have to check which values\n # are not nan and equal -1, this means they are missing in the index\n nan_codes = isna(codes)\n values[(np.equal(nan_codes, False)) & (values == -1)] = -2\n if index.shape[0] == self.shape[0]:\n values[np.equal(nan_codes, True)] = -2\n\n not_found = codes[values == -2]\n if len(not_found) != 0 and errors != \"ignore\":\n raise KeyError(f\"labels {not_found} not found in level\")\n mask = ~algos.isin(self.codes[i], values)\n\n return self[mask]\n\n def swaplevel(self, i=-2, j=-1) -> MultiIndex:\n \"\"\"\n Swap level i with level j.\n\n Calling this method does not change the ordering of the values.\n\n Parameters\n ----------\n i : int, str, default -2\n First level of index to be swapped. Can pass level name as string.\n Type of parameters can be mixed.\n j : int, str, default -1\n Second level of index to be swapped. Can pass level name as string.\n Type of parameters can be mixed.\n\n Returns\n -------\n MultiIndex\n A new MultiIndex.\n\n See Also\n --------\n Series.swaplevel : Swap levels i and j in a MultiIndex.\n Dataframe.swaplevel : Swap levels i and j in a MultiIndex on a\n particular axis.\n\n Examples\n --------\n >>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],\n ... codes=[[0, 0, 1, 1], [0, 1, 0, 1]])\n >>> mi\n MultiIndex([('a', 'bb'),\n ('a', 'aa'),\n ('b', 'bb'),\n ('b', 'aa')],\n )\n >>> mi.swaplevel(0, 1)\n MultiIndex([('bb', 'a'),\n ('aa', 'a'),\n ('bb', 'b'),\n ('aa', 'b')],\n )\n \"\"\"\n new_levels = list(self.levels)\n new_codes = list(self.codes)\n new_names = list(self.names)\n\n i = self._get_level_number(i)\n j = self._get_level_number(j)\n\n new_levels[i], new_levels[j] = new_levels[j], new_levels[i]\n new_codes[i], new_codes[j] = new_codes[j], new_codes[i]\n new_names[i], new_names[j] = new_names[j], new_names[i]\n\n return MultiIndex(\n levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False\n )\n\n def reorder_levels(self, order) -> MultiIndex:\n \"\"\"\n Rearrange levels using input order. May not drop or duplicate levels.\n\n Parameters\n ----------\n order : list of int or list of str\n List representing new level order. Reference level by number\n (position) or by key (label).\n\n Returns\n -------\n MultiIndex\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays([[1, 2], [3, 4]], names=['x', 'y'])\n >>> mi\n MultiIndex([(1, 3),\n (2, 4)],\n names=['x', 'y'])\n\n >>> mi.reorder_levels(order=[1, 0])\n MultiIndex([(3, 1),\n (4, 2)],\n names=['y', 'x'])\n\n >>> mi.reorder_levels(order=['y', 'x'])\n MultiIndex([(3, 1),\n (4, 2)],\n names=['y', 'x'])\n \"\"\"\n order = [self._get_level_number(i) for i in order]\n if len(order) != self.nlevels:\n raise AssertionError(\n f\"Length of order must be same as number of levels ({self.nlevels}), \"\n f\"got {len(order)}\"\n )\n new_levels = [self.levels[i] for i in order]\n new_codes = [self.codes[i] for i in order]\n new_names = [self.names[i] for i in order]\n\n return MultiIndex(\n levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False\n )\n\n def _get_codes_for_sorting(self) -> list[Categorical]:\n \"\"\"\n we are categorizing our codes by using the\n available categories (all, not just observed)\n excluding any missing ones (-1); this is in preparation\n for sorting, where we need to disambiguate that -1 is not\n a valid valid\n \"\"\"\n\n def cats(level_codes):\n return np.arange(\n np.array(level_codes).max() + 1 if len(level_codes) else 0,\n dtype=level_codes.dtype,\n )\n\n return [\n Categorical.from_codes(level_codes, cats(level_codes), ordered=True)\n for level_codes in self.codes\n ]\n\n def sortlevel(\n self, level=0, ascending: bool = True, sort_remaining: bool = True\n ) -> tuple[MultiIndex, np.ndarray]:\n \"\"\"\n Sort MultiIndex at the requested level.\n\n The result will respect the original ordering of the associated\n factor at that level.\n\n Parameters\n ----------\n level : list-like, int or str, default 0\n If a string is given, must be a name of the level.\n If list-like must be names or ints of levels.\n ascending : bool, default True\n False to sort in descending order.\n Can also be a list to specify a directed ordering.\n sort_remaining : sort by the remaining levels after level\n\n Returns\n -------\n sorted_index : pd.MultiIndex\n Resulting index.\n indexer : np.ndarray\n Indices of output values in original index.\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays([[0, 0], [2, 1]])\n >>> mi\n MultiIndex([(0, 2),\n (0, 1)],\n )\n\n >>> mi.sortlevel()\n (MultiIndex([(0, 1),\n (0, 2)],\n ), array([1, 0]))\n\n >>> mi.sortlevel(sort_remaining=False)\n (MultiIndex([(0, 2),\n (0, 1)],\n ), array([0, 1]))\n\n >>> mi.sortlevel(1)\n (MultiIndex([(0, 1),\n (0, 2)],\n ), array([1, 0]))\n\n >>> mi.sortlevel(1, ascending=False)\n (MultiIndex([(0, 2),\n (0, 1)],\n ), array([0, 1]))\n \"\"\"\n if isinstance(level, (str, int)):\n level = [level]\n level = [self._get_level_number(lev) for lev in level]\n sortorder = None\n\n # we have a directed ordering via ascending\n if isinstance(ascending, list):\n if not len(level) == len(ascending):\n raise ValueError(\"level must have same length as ascending\")\n\n indexer = lexsort_indexer(\n [self.codes[lev] for lev in level], orders=ascending\n )\n\n # level ordering\n else:\n\n codes = list(self.codes)\n shape = list(self.levshape)\n\n # partition codes and shape\n primary = tuple(codes[lev] for lev in level)\n primshp = tuple(shape[lev] for lev in level)\n\n # Reverse sorted to retain the order of\n # smaller indices that needs to be removed\n for lev in sorted(level, reverse=True):\n codes.pop(lev)\n shape.pop(lev)\n\n if sort_remaining:\n primary += primary + tuple(codes)\n primshp += primshp + tuple(shape)\n else:\n sortorder = level[0]\n\n indexer = indexer_from_factorized(primary, primshp, compress=False)\n\n if not ascending:\n indexer = indexer[::-1]\n\n indexer = ensure_platform_int(indexer)\n new_codes = [level_codes.take(indexer) for level_codes in self.codes]\n\n new_index = MultiIndex(\n codes=new_codes,\n levels=self.levels,\n names=self.names,\n sortorder=sortorder,\n verify_integrity=False,\n )\n\n return new_index, indexer\n\n def reindex(\n self, target, method=None, level=None, limit=None, tolerance=None\n ) -> tuple[MultiIndex, np.ndarray | None]:\n \"\"\"\n Create index with target's values (move/add/delete values as necessary)\n\n Returns\n -------\n new_index : pd.MultiIndex\n Resulting index\n indexer : np.ndarray[np.intp] or None\n Indices of output values in original index.\n\n \"\"\"\n # GH6552: preserve names when reindexing to non-named target\n # (i.e. neither Index nor Series).\n preserve_names = not hasattr(target, \"names\")\n\n if level is not None:\n if method is not None:\n raise TypeError(\"Fill method not supported if level passed\")\n\n # GH7774: preserve dtype/tz if target is empty and not an Index.\n # target may be an iterator\n target = ibase.ensure_has_len(target)\n if len(target) == 0 and not isinstance(target, Index):\n idx = self.levels[level]\n attrs = idx._get_attributes_dict()\n attrs.pop(\"freq\", None) # don't preserve freq\n target = type(idx)._simple_new(np.empty(0, dtype=idx.dtype), **attrs)\n else:\n target = ensure_index(target)\n target, indexer, _ = self._join_level(\n target, level, how=\"right\", keep_order=False\n )\n else:\n target = ensure_index(target)\n if self.equals(target):\n indexer = None\n else:\n if self.is_unique:\n indexer = self.get_indexer(\n target, method=method, limit=limit, tolerance=tolerance\n )\n else:\n raise ValueError(\"cannot handle a non-unique multi-index!\")\n\n if not isinstance(target, MultiIndex):\n if indexer is None:\n target = self\n elif (indexer >= 0).all():\n target = self.take(indexer)\n else:\n try:\n target = MultiIndex.from_tuples(target)\n except TypeError:\n # not all tuples, see test_constructor_dict_multiindex_reindex_flat\n return target, indexer\n if (\n preserve_names\n and target.nlevels == self.nlevels\n and target.names != self.names\n ):\n target = target.copy(deep=False)\n target.names = self.names\n\n return target, indexer\n\n # --------------------------------------------------------------------\n # Indexing Methods\n\n def _check_indexing_error(self, key):\n if not is_hashable(key) or is_iterator(key):\n # We allow tuples if they are hashable, whereas other Index\n # subclasses require scalar.\n # We have to explicitly exclude generators, as these are hashable.\n raise InvalidIndexError(key)\n\n def _should_fallback_to_positional(self) -> bool:\n \"\"\"\n Should integer key(s) be treated as positional?\n \"\"\"\n # GH#33355\n return self.levels[0]._should_fallback_to_positional()\n\n def _get_values_for_loc(self, series: Series, loc, key):\n \"\"\"\n Do a positional lookup on the given Series, returning either a scalar\n or a Series.\n\n Assumes that `series.index is self`\n \"\"\"\n new_values = series._values[loc]\n if is_scalar(loc):\n return new_values\n\n if len(new_values) == 1 and not self.nlevels > 1:\n # If more than one level left, we can not return a scalar\n return new_values[0]\n\n new_index = self[loc]\n new_index = maybe_droplevels(new_index, key)\n new_ser = series._constructor(new_values, index=new_index, name=series.name)\n return new_ser.__finalize__(series)\n\n def _convert_listlike_indexer(self, keyarr) -> np.ndarray | None:\n \"\"\"\n Analogous to get_indexer when we are partial-indexing on our first level.\n\n Parameters\n ----------\n keyarr : Index, np.ndarray, or ExtensionArray\n Indexer to convert.\n\n Returns\n -------\n np.ndarray[intp] or None\n \"\"\"\n indexer = None\n\n # are we indexing a specific level\n if len(keyarr) and not isinstance(keyarr[0], tuple):\n _, indexer = self.reindex(keyarr, level=0)\n\n # take all\n if indexer is None:\n indexer = np.arange(len(self), dtype=np.intp)\n return indexer\n\n check = self.levels[0].get_indexer(keyarr)\n mask = check == -1\n if mask.any():\n raise KeyError(f\"{keyarr[mask]} not in index\")\n elif is_empty_indexer(indexer, keyarr):\n # We get here when levels still contain values which are not\n # actually in Index anymore\n raise KeyError(f\"{keyarr} not in index\")\n\n return indexer\n\n def _get_partial_string_timestamp_match_key(self, key):\n \"\"\"\n Translate any partial string timestamp matches in key, returning the\n new key.\n\n Only relevant for MultiIndex.\n \"\"\"\n # GH#10331\n if isinstance(key, str) and self.levels[0]._supports_partial_string_indexing:\n # Convert key '2016-01-01' to\n # ('2016-01-01'[, slice(None, None, None)]+)\n key = (key,) + (slice(None),) * (len(self.levels) - 1)\n\n if isinstance(key, tuple):\n # Convert (..., '2016-01-01', ...) in tuple to\n # (..., slice('2016-01-01', '2016-01-01', None), ...)\n new_key = []\n for i, component in enumerate(key):\n if (\n isinstance(component, str)\n and self.levels[i]._supports_partial_string_indexing\n ):\n new_key.append(slice(component, component, None))\n else:\n new_key.append(component)\n key = tuple(new_key)\n\n return key\n\n def _get_indexer(\n self,\n target: Index,\n method: str | None = None,\n limit: int | None = None,\n tolerance=None,\n ) -> np.ndarray:\n # returned ndarray is np.intp\n\n # empty indexer\n if not len(target):\n return ensure_platform_int(np.array([]))\n\n if not isinstance(target, MultiIndex):\n try:\n target = MultiIndex.from_tuples(target)\n except (TypeError, ValueError):\n\n # let's instead try with a straight Index\n if method is None:\n return Index(self._values).get_indexer(\n target, method=method, limit=limit, tolerance=tolerance\n )\n\n # TODO: explicitly raise here? we only have one test that\n # gets here, and it is checking that we raise with method=\"nearest\"\n\n if method == \"pad\" or method == \"backfill\":\n if tolerance is not None:\n raise NotImplementedError(\n \"tolerance not implemented yet for MultiIndex\"\n )\n # TODO: get_indexer_with_fill docstring says values must be _sorted_\n # but that doesn't appear to be enforced\n indexer = self._engine.get_indexer_with_fill(\n target=target._values, values=self._values, method=method, limit=limit\n )\n elif method == \"nearest\":\n raise NotImplementedError(\n \"method='nearest' not implemented yet \"\n \"for MultiIndex; see GitHub issue 9365\"\n )\n else:\n indexer = self._engine.get_indexer(target._values)\n\n # Note: we only get here (in extant tests at least) with\n # target.nlevels == self.nlevels\n return ensure_platform_int(indexer)\n\n def get_slice_bound(\n self, label: Hashable | Sequence[Hashable], side: str, kind: str | None = None\n ) -> int:\n \"\"\"\n For an ordered MultiIndex, compute slice bound\n that corresponds to given label.\n\n Returns leftmost (one-past-the-rightmost if `side=='right') position\n of given label.\n\n Parameters\n ----------\n label : object or tuple of objects\n side : {'left', 'right'}\n kind : {'loc', 'getitem', None}\n\n Returns\n -------\n int\n Index of label.\n\n Notes\n -----\n This method only works if level 0 index of the MultiIndex is lexsorted.\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays([list('abbc'), list('gefd')])\n\n Get the locations from the leftmost 'b' in the first level\n until the end of the multiindex:\n\n >>> mi.get_slice_bound('b', side=\"left\")\n 1\n\n Like above, but if you get the locations from the rightmost\n 'b' in the first level and 'f' in the second level:\n\n >>> mi.get_slice_bound(('b','f'), side=\"right\")\n 3\n\n See Also\n --------\n MultiIndex.get_loc : Get location for a label or a tuple of labels.\n MultiIndex.get_locs : Get location for a label/slice/list/mask or a\n sequence of such.\n \"\"\"\n if not isinstance(label, tuple):\n label = (label,)\n return self._partial_tup_index(label, side=side)\n\n def slice_locs(self, start=None, end=None, step=None, kind=None):\n \"\"\"\n For an ordered MultiIndex, compute the slice locations for input\n labels.\n\n The input labels can be tuples representing partial levels, e.g. for a\n MultiIndex with 3 levels, you can pass a single value (corresponding to\n the first level), or a 1-, 2-, or 3-tuple.\n\n Parameters\n ----------\n start : label or tuple, default None\n If None, defaults to the beginning\n end : label or tuple\n If None, defaults to the end\n step : int or None\n Slice step\n kind : string, optional, defaults None\n\n Returns\n -------\n (start, end) : (int, int)\n\n Notes\n -----\n This method only works if the MultiIndex is properly lexsorted. So,\n if only the first 2 levels of a 3-level MultiIndex are lexsorted,\n you can only pass two levels to ``.slice_locs``.\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays([list('abbd'), list('deff')],\n ... names=['A', 'B'])\n\n Get the slice locations from the beginning of 'b' in the first level\n until the end of the multiindex:\n\n >>> mi.slice_locs(start='b')\n (1, 4)\n\n Like above, but stop at the end of 'b' in the first level and 'f' in\n the second level:\n\n >>> mi.slice_locs(start='b', end=('b', 'f'))\n (1, 3)\n\n See Also\n --------\n MultiIndex.get_loc : Get location for a label or a tuple of labels.\n MultiIndex.get_locs : Get location for a label/slice/list/mask or a\n sequence of such.\n \"\"\"\n # This function adds nothing to its parent implementation (the magic\n # happens in get_slice_bound method), but it adds meaningful doc.\n return super().slice_locs(start, end, step)\n\n def _partial_tup_index(self, tup: tuple, side=\"left\"):\n if len(tup) > self._lexsort_depth:\n raise UnsortedIndexError(\n f\"Key length ({len(tup)}) was greater than MultiIndex lexsort depth \"\n f\"({self._lexsort_depth})\"\n )\n\n n = len(tup)\n start, end = 0, len(self)\n zipped = zip(tup, self.levels, self.codes)\n for k, (lab, lev, labs) in enumerate(zipped):\n section = labs[start:end]\n\n if lab not in lev and not isna(lab):\n if not lev.is_type_compatible(lib.infer_dtype([lab], skipna=False)):\n raise TypeError(f\"Level type mismatch: {lab}\")\n\n # short circuit\n loc = lev.searchsorted(lab, side=side)\n if side == \"right\" and loc >= 0:\n loc -= 1\n return start + section.searchsorted(loc, side=side)\n\n idx = self._get_loc_single_level_index(lev, lab)\n if isinstance(idx, slice) and k < n - 1:\n # Get start and end value from slice, necessary when a non-integer\n # interval is given as input GH#37707\n start = idx.start\n end = idx.stop\n elif k < n - 1:\n end = start + section.searchsorted(idx, side=\"right\")\n start = start + section.searchsorted(idx, side=\"left\")\n elif isinstance(idx, slice):\n idx = idx.start\n return start + section.searchsorted(idx, side=side)\n else:\n return start + section.searchsorted(idx, side=side)\n\n def _get_loc_single_level_index(self, level_index: Index, key: Hashable) -> int:\n \"\"\"\n If key is NA value, location of index unify as -1.\n\n Parameters\n ----------\n level_index: Index\n key : label\n\n Returns\n -------\n loc : int\n If key is NA value, loc is -1\n Else, location of key in index.\n\n See Also\n --------\n Index.get_loc : The get_loc method for (single-level) index.\n \"\"\"\n if is_scalar(key) and isna(key):\n return -1\n else:\n return level_index.get_loc(key)\n\n def get_loc(self, key, method=None):\n \"\"\"\n Get location for a label or a tuple of labels.\n\n The location is returned as an integer/slice or boolean\n mask.\n\n Parameters\n ----------\n key : label or tuple of labels (one for each level)\n method : None\n\n Returns\n -------\n loc : int, slice object or boolean mask\n If the key is past the lexsort depth, the return may be a\n boolean mask array, otherwise it is always a slice or int.\n\n See Also\n --------\n Index.get_loc : The get_loc method for (single-level) index.\n MultiIndex.slice_locs : Get slice location given start label(s) and\n end label(s).\n MultiIndex.get_locs : Get location for a label/slice/list/mask or a\n sequence of such.\n\n Notes\n -----\n The key cannot be a slice, list of same-level labels, a boolean mask,\n or a sequence of such. If you want to use those, use\n :meth:`MultiIndex.get_locs` instead.\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])\n\n >>> mi.get_loc('b')\n slice(1, 3, None)\n\n >>> mi.get_loc(('b', 'e'))\n 1\n \"\"\"\n if method is not None:\n raise NotImplementedError(\n \"only the default get_loc method is \"\n \"currently supported for MultiIndex\"\n )\n\n hash(key)\n\n def _maybe_to_slice(loc):\n \"\"\"convert integer indexer to boolean mask or slice if possible\"\"\"\n if not isinstance(loc, np.ndarray) or loc.dtype != np.intp:\n return loc\n\n loc = lib.maybe_indices_to_slice(loc, len(self))\n if isinstance(loc, slice):\n return loc\n\n mask = np.empty(len(self), dtype=\"bool\")\n mask.fill(False)\n mask[loc] = True\n return mask\n\n if not isinstance(key, tuple):\n loc = self._get_level_indexer(key, level=0)\n return _maybe_to_slice(loc)\n\n keylen = len(key)\n if self.nlevels < keylen:\n raise KeyError(\n f\"Key length ({keylen}) exceeds index depth ({self.nlevels})\"\n )\n\n if keylen == self.nlevels and self.is_unique:\n return self._engine.get_loc(key)\n\n # -- partial selection or non-unique index\n # break the key into 2 parts based on the lexsort_depth of the index;\n # the first part returns a continuous slice of the index; the 2nd part\n # needs linear search within the slice\n i = self._lexsort_depth\n lead_key, follow_key = key[:i], key[i:]\n start, stop = (\n self.slice_locs(lead_key, lead_key) if lead_key else (0, len(self))\n )\n\n if start == stop:\n raise KeyError(key)\n\n if not follow_key:\n return slice(start, stop)\n\n warnings.warn(\n \"indexing past lexsort depth may impact performance.\",\n PerformanceWarning,\n stacklevel=10,\n )\n\n loc = np.arange(start, stop, dtype=np.intp)\n\n for i, k in enumerate(follow_key, len(lead_key)):\n mask = self.codes[i][loc] == self._get_loc_single_level_index(\n self.levels[i], k\n )\n if not mask.all():\n loc = loc[mask]\n if not len(loc):\n raise KeyError(key)\n\n return _maybe_to_slice(loc) if len(loc) != stop - start else slice(start, stop)\n\n def get_loc_level(self, key, level=0, drop_level: bool = True):\n \"\"\"\n Get location and sliced index for requested label(s)/level(s).\n\n Parameters\n ----------\n key : label or sequence of labels\n level : int/level name or list thereof, optional\n drop_level : bool, default True\n If ``False``, the resulting index will not drop any level.\n\n Returns\n -------\n loc : A 2-tuple where the elements are:\n Element 0: int, slice object or boolean array\n Element 1: The resulting sliced multiindex/index. If the key\n contains all levels, this will be ``None``.\n\n See Also\n --------\n MultiIndex.get_loc : Get location for a label or a tuple of labels.\n MultiIndex.get_locs : Get location for a label/slice/list/mask or a\n sequence of such.\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')],\n ... names=['A', 'B'])\n\n >>> mi.get_loc_level('b')\n (slice(1, 3, None), Index(['e', 'f'], dtype='object', name='B'))\n\n >>> mi.get_loc_level('e', level='B')\n (array([False, True, False]), Index(['b'], dtype='object', name='A'))\n\n >>> mi.get_loc_level(['b', 'e'])\n (1, None)\n \"\"\"\n if not isinstance(level, (list, tuple)):\n level = self._get_level_number(level)\n else:\n level = [self._get_level_number(lev) for lev in level]\n return self._get_loc_level(key, level=level, drop_level=drop_level)\n\n def _get_loc_level(self, key, level: int | list[int] = 0, drop_level: bool = True):\n \"\"\"\n get_loc_level but with `level` known to be positional, not name-based.\n \"\"\"\n\n # different name to distinguish from maybe_droplevels\n def maybe_mi_droplevels(indexer, levels, drop_level: bool):\n if not drop_level:\n return self[indexer]\n # kludge around\n orig_index = new_index = self[indexer]\n\n for i in sorted(levels, reverse=True):\n try:\n new_index = new_index._drop_level_numbers([i])\n except ValueError:\n\n # no dropping here\n return orig_index\n return new_index\n\n if isinstance(level, (tuple, list)):\n if len(key) != len(level):\n raise AssertionError(\n \"Key for location must have same length as number of levels\"\n )\n result = None\n for lev, k in zip(level, key):\n loc, new_index = self._get_loc_level(k, level=lev)\n if isinstance(loc, slice):\n mask = np.zeros(len(self), dtype=bool)\n mask[loc] = True\n loc = mask\n\n result = loc if result is None else result & loc\n\n return result, maybe_mi_droplevels(result, level, drop_level)\n\n # kludge for #1796\n if isinstance(key, list):\n key = tuple(key)\n\n if isinstance(key, tuple) and level == 0:\n\n try:\n if key in self.levels[0]:\n indexer = self._get_level_indexer(key, level=level)\n new_index = maybe_mi_droplevels(indexer, [0], drop_level)\n return indexer, new_index\n except (TypeError, InvalidIndexError):\n pass\n\n if not any(isinstance(k, slice) for k in key):\n\n # partial selection\n # optionally get indexer to avoid re-calculation\n def partial_selection(key, indexer=None):\n if indexer is None:\n indexer = self.get_loc(key)\n ilevels = [\n i for i in range(len(key)) if key[i] != slice(None, None)\n ]\n return indexer, maybe_mi_droplevels(indexer, ilevels, drop_level)\n\n if len(key) == self.nlevels and self.is_unique:\n # Complete key in unique index -> standard get_loc\n try:\n return (self._engine.get_loc(key), None)\n except KeyError as e:\n raise KeyError(key) from e\n else:\n return partial_selection(key)\n else:\n indexer = None\n for i, k in enumerate(key):\n if not isinstance(k, slice):\n k = self._get_level_indexer(k, level=i)\n if isinstance(k, slice):\n # everything\n if k.start == 0 and k.stop == len(self):\n k = slice(None, None)\n else:\n k_index = k\n\n if isinstance(k, slice):\n if k == slice(None, None):\n continue\n else:\n raise TypeError(key)\n\n if indexer is None:\n indexer = k_index\n else: # pragma: no cover\n indexer &= k_index\n if indexer is None:\n indexer = slice(None, None)\n ilevels = [i for i in range(len(key)) if key[i] != slice(None, None)]\n return indexer, maybe_mi_droplevels(indexer, ilevels, drop_level)\n else:\n indexer = self._get_level_indexer(key, level=level)\n return indexer, maybe_mi_droplevels(indexer, [level], drop_level)\n\n def _get_level_indexer(self, key, level: int = 0, indexer=None):\n # `level` kwarg is _always_ positional, never name\n # return an indexer, boolean array or a slice showing where the key is\n # in the totality of values\n # if the indexer is provided, then use this\n\n level_index = self.levels[level]\n level_codes = self.codes[level]\n\n def convert_indexer(start, stop, step, indexer=indexer, codes=level_codes):\n # given the inputs and the codes/indexer, compute an indexer set\n # if we have a provided indexer, then this need not consider\n # the entire labels set\n if step is not None and step < 0:\n # Switch elements for negative step size\n start, stop = stop - 1, start - 1\n r = np.arange(start, stop, step)\n\n if indexer is not None and len(indexer) != len(codes):\n\n # we have an indexer which maps the locations in the labels\n # that we have already selected (and is not an indexer for the\n # entire set) otherwise this is wasteful so we only need to\n # examine locations that are in this set the only magic here is\n # that the result are the mappings to the set that we have\n # selected\n from pandas import Series\n\n mapper = Series(indexer)\n indexer = codes.take(ensure_platform_int(indexer))\n result = Series(Index(indexer).isin(r).nonzero()[0])\n m = result.map(mapper)\n # error: Incompatible types in assignment (expression has type\n # \"ndarray\", variable has type \"Series\")\n m = np.asarray(m) # type: ignore[assignment]\n\n else:\n # error: Incompatible types in assignment (expression has type\n # \"ndarray\", variable has type \"Series\")\n m = np.zeros(len(codes), dtype=bool) # type: ignore[assignment]\n m[np.in1d(codes, r, assume_unique=Index(codes).is_unique)] = True\n\n return m\n\n if isinstance(key, slice):\n # handle a slice, returning a slice if we can\n # otherwise a boolean indexer\n\n try:\n if key.start is not None:\n start = level_index.get_loc(key.start)\n else:\n start = 0\n if key.stop is not None:\n stop = level_index.get_loc(key.stop)\n elif isinstance(start, slice):\n stop = len(level_index)\n else:\n stop = len(level_index) - 1\n step = key.step\n except KeyError:\n\n # we have a partial slice (like looking up a partial date\n # string)\n start = stop = level_index.slice_indexer(key.start, key.stop, key.step)\n step = start.step\n\n if isinstance(start, slice) or isinstance(stop, slice):\n # we have a slice for start and/or stop\n # a partial date slicer on a DatetimeIndex generates a slice\n # note that the stop ALREADY includes the stopped point (if\n # it was a string sliced)\n start = getattr(start, \"start\", start)\n stop = getattr(stop, \"stop\", stop)\n return convert_indexer(start, stop, step)\n\n elif level > 0 or self._lexsort_depth == 0 or step is not None:\n # need to have like semantics here to right\n # searching as when we are using a slice\n # so include the stop+1 (so we include stop)\n return convert_indexer(start, stop + 1, step)\n else:\n # sorted, so can return slice object -> view\n i = level_codes.searchsorted(start, side=\"left\")\n j = level_codes.searchsorted(stop, side=\"right\")\n return slice(i, j, step)\n\n else:\n\n idx = self._get_loc_single_level_index(level_index, key)\n\n if level > 0 or self._lexsort_depth == 0:\n # Desired level is not sorted\n locs = np.array(level_codes == idx, dtype=bool, copy=False)\n if not locs.any():\n # The label is present in self.levels[level] but unused:\n raise KeyError(key)\n return locs\n\n if isinstance(idx, slice):\n start = idx.start\n end = idx.stop\n else:\n start = level_codes.searchsorted(idx, side=\"left\")\n end = level_codes.searchsorted(idx, side=\"right\")\n\n if start == end:\n # The label is present in self.levels[level] but unused:\n raise KeyError(key)\n return slice(start, end)\n\n def get_locs(self, seq):\n \"\"\"\n Get location for a sequence of labels.\n\n Parameters\n ----------\n seq : label, slice, list, mask or a sequence of such\n You should use one of the above for each level.\n If a level should not be used, set it to ``slice(None)``.\n\n Returns\n -------\n numpy.ndarray\n NumPy array of integers suitable for passing to iloc.\n\n See Also\n --------\n MultiIndex.get_loc : Get location for a label or a tuple of labels.\n MultiIndex.slice_locs : Get slice location given start label(s) and\n end label(s).\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])\n\n >>> mi.get_locs('b') # doctest: +SKIP\n array([1, 2], dtype=int64)\n\n >>> mi.get_locs([slice(None), ['e', 'f']]) # doctest: +SKIP\n array([1, 2], dtype=int64)\n\n >>> mi.get_locs([[True, False, True], slice('e', 'f')]) # doctest: +SKIP\n array([2], dtype=int64)\n \"\"\"\n\n # must be lexsorted to at least as many levels\n true_slices = [i for (i, s) in enumerate(com.is_true_slices(seq)) if s]\n if true_slices and true_slices[-1] >= self._lexsort_depth:\n raise UnsortedIndexError(\n \"MultiIndex slicing requires the index to be lexsorted: slicing \"\n f\"on levels {true_slices}, lexsort depth {self._lexsort_depth}\"\n )\n # indexer\n # this is the list of all values that we want to select\n n = len(self)\n indexer = None\n\n def _convert_to_indexer(r) -> Int64Index:\n # return an indexer\n if isinstance(r, slice):\n m = np.zeros(n, dtype=bool)\n m[r] = True\n r = m.nonzero()[0]\n elif com.is_bool_indexer(r):\n if len(r) != n:\n raise ValueError(\n \"cannot index with a boolean indexer \"\n \"that is not the same length as the \"\n \"index\"\n )\n r = r.nonzero()[0]\n return Int64Index(r)\n\n def _update_indexer(idxr: Index | None, indexer: Index | None, key) -> Index:\n if indexer is None:\n indexer = Index(np.arange(n))\n if idxr is None:\n return indexer\n indexer_intersection = indexer.intersection(idxr)\n if indexer_intersection.empty and not idxr.empty and not indexer.empty:\n raise KeyError(key)\n return indexer_intersection\n\n for i, k in enumerate(seq):\n\n if com.is_bool_indexer(k):\n # a boolean indexer, must be the same length!\n k = np.asarray(k)\n indexer = _update_indexer(\n _convert_to_indexer(k), indexer=indexer, key=seq\n )\n\n elif is_list_like(k):\n # a collection of labels to include from this level (these\n # are or'd)\n indexers: Int64Index | None = None\n for x in k:\n try:\n idxrs = _convert_to_indexer(\n self._get_level_indexer(x, level=i, indexer=indexer)\n )\n indexers = (idxrs if indexers is None else indexers).union(\n idxrs, sort=False\n )\n except KeyError:\n\n # ignore not founds\n continue\n\n if indexers is not None:\n indexer = _update_indexer(indexers, indexer=indexer, key=seq)\n else:\n # no matches we are done\n return np.array([], dtype=np.int64)\n\n elif com.is_null_slice(k):\n # empty slice\n indexer = _update_indexer(None, indexer=indexer, key=seq)\n\n elif isinstance(k, slice):\n\n # a slice, include BOTH of the labels\n indexer = _update_indexer(\n _convert_to_indexer(\n self._get_level_indexer(k, level=i, indexer=indexer)\n ),\n indexer=indexer,\n key=seq,\n )\n else:\n # a single label\n indexer = _update_indexer(\n _convert_to_indexer(\n self.get_loc_level(k, level=i, drop_level=False)[0]\n ),\n indexer=indexer,\n key=seq,\n )\n\n # empty indexer\n if indexer is None:\n return np.array([], dtype=np.int64)\n\n assert isinstance(indexer, Int64Index), type(indexer)\n indexer = self._reorder_indexer(seq, indexer)\n\n return indexer._values\n\n # --------------------------------------------------------------------\n\n def _reorder_indexer(\n self,\n seq: tuple[Scalar | Iterable | AnyArrayLike, ...],\n indexer: Int64Index,\n ) -> Int64Index:\n \"\"\"\n Reorder an indexer of a MultiIndex (self) so that the label are in the\n same order as given in seq\n\n Parameters\n ----------\n seq : label/slice/list/mask or a sequence of such\n indexer: an Int64Index indexer of self\n\n Returns\n -------\n indexer : a sorted Int64Index indexer of self ordered as seq\n \"\"\"\n # If the index is lexsorted and the list_like label in seq are sorted\n # then we do not need to sort\n if self._is_lexsorted():\n need_sort = False\n for i, k in enumerate(seq):\n if is_list_like(k):\n if not need_sort:\n k_codes = self.levels[i].get_indexer(k)\n k_codes = k_codes[k_codes >= 0] # Filter absent keys\n # True if the given codes are not ordered\n need_sort = (k_codes[:-1] > k_codes[1:]).any()\n elif isinstance(k, slice) and k.step is not None and k.step < 0:\n need_sort = True\n # Bail out if both index and seq are sorted\n if not need_sort:\n return indexer\n\n n = len(self)\n keys: tuple[np.ndarray, ...] = ()\n # For each level of the sequence in seq, map the level codes with the\n # order they appears in a list-like sequence\n # This mapping is then use to reorder the indexer\n for i, k in enumerate(seq):\n if is_scalar(k):\n # GH#34603 we want to treat a scalar the same as an all equal list\n k = [k]\n if com.is_bool_indexer(k):\n new_order = np.arange(n)[indexer]\n elif is_list_like(k):\n # Generate a map with all level codes as sorted initially\n k = algos.unique(k)\n key_order_map = np.ones(len(self.levels[i]), dtype=np.uint64) * len(\n self.levels[i]\n )\n # Set order as given in the indexer list\n level_indexer = self.levels[i].get_indexer(k)\n level_indexer = level_indexer[level_indexer >= 0] # Filter absent keys\n key_order_map[level_indexer] = np.arange(len(level_indexer))\n\n new_order = key_order_map[self.codes[i][indexer]]\n elif isinstance(k, slice) and k.step is not None and k.step < 0:\n new_order = np.arange(n)[k][indexer]\n elif isinstance(k, slice) and k.start is None and k.stop is None:\n # slice(None) should not determine order GH#31330\n new_order = np.ones((n,))[indexer]\n else:\n # For all other case, use the same order as the level\n new_order = np.arange(n)[indexer]\n keys = (new_order,) + keys\n\n # Find the reordering using lexsort on the keys mapping\n ind = np.lexsort(keys)\n return indexer[ind]\n\n def truncate(self, before=None, after=None) -> MultiIndex:\n \"\"\"\n Slice index between two labels / tuples, return new MultiIndex\n\n Parameters\n ----------\n before : label or tuple, can be partial. Default None\n None defaults to start\n after : label or tuple, can be partial. Default None\n None defaults to end\n\n Returns\n -------\n truncated : MultiIndex\n \"\"\"\n if after and before and after < before:\n raise ValueError(\"after < before\")\n\n i, j = self.levels[0].slice_locs(before, after)\n left, right = self.slice_locs(before, after)\n\n new_levels = list(self.levels)\n new_levels[0] = new_levels[0][i:j]\n\n new_codes = [level_codes[left:right] for level_codes in self.codes]\n new_codes[0] = new_codes[0] - i\n\n return MultiIndex(\n levels=new_levels,\n codes=new_codes,\n names=self._names,\n verify_integrity=False,\n )\n\n def equals(self, other: object) -> bool:\n \"\"\"\n Determines if two MultiIndex objects have the same labeling information\n (the levels themselves do not necessarily have to be the same)\n\n See Also\n --------\n equal_levels\n \"\"\"\n if self.is_(other):\n return True\n\n if not isinstance(other, Index):\n return False\n\n if len(self) != len(other):\n return False\n\n if not isinstance(other, MultiIndex):\n # d-level MultiIndex can equal d-tuple Index\n if not self._should_compare(other):\n # object Index or Categorical[object] may contain tuples\n return False\n return array_equivalent(self._values, other._values)\n\n if self.nlevels != other.nlevels:\n return False\n\n for i in range(self.nlevels):\n self_codes = self.codes[i]\n other_codes = other.codes[i]\n self_mask = self_codes == -1\n other_mask = other_codes == -1\n if not np.array_equal(self_mask, other_mask):\n return False\n self_codes = self_codes[~self_mask]\n self_values = self.levels[i]._values.take(self_codes)\n\n other_codes = other_codes[~other_mask]\n other_values = other.levels[i]._values.take(other_codes)\n\n # since we use NaT both datetime64 and timedelta64 we can have a\n # situation where a level is typed say timedelta64 in self (IOW it\n # has other values than NaT) but types datetime64 in other (where\n # its all NaT) but these are equivalent\n if len(self_values) == 0 and len(other_values) == 0:\n continue\n\n if not array_equivalent(self_values, other_values):\n return False\n\n return True\n\n def equal_levels(self, other: MultiIndex) -> bool:\n \"\"\"\n Return True if the levels of both MultiIndex objects are the same\n\n \"\"\"\n if self.nlevels != other.nlevels:\n return False\n\n for i in range(self.nlevels):\n if not self.levels[i].equals(other.levels[i]):\n return False\n return True\n\n # --------------------------------------------------------------------\n # Set Methods\n\n def _union(self, other, sort) -> MultiIndex:\n other, result_names = self._convert_can_do_setop(other)\n if (\n any(-1 in code for code in self.codes)\n and any(-1 in code for code in self.codes)\n or self.has_duplicates\n or other.has_duplicates\n ):\n # This is only necessary if both sides have nans or one has dups,\n # fast_unique_multiple is faster\n result = super()._union(other, sort)\n else:\n rvals = other._values.astype(object, copy=False)\n result = lib.fast_unique_multiple([self._values, rvals], sort=sort)\n\n return MultiIndex.from_arrays(zip(*result), sortorder=0, names=result_names)\n\n def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:\n return is_object_dtype(dtype)\n\n def _get_reconciled_name_object(self, other) -> MultiIndex:\n \"\"\"\n If the result of a set operation will be self,\n return self, unless the names change, in which\n case make a shallow copy of self.\n \"\"\"\n names = self._maybe_match_names(other)\n if self.names != names:\n # Incompatible return value type (got \"Optional[MultiIndex]\", expected\n # \"MultiIndex\")\n return self.rename(names) # type: ignore[return-value]\n return self\n\n def _maybe_match_names(self, other):\n \"\"\"\n Try to find common names to attach to the result of an operation between\n a and b. Return a consensus list of names if they match at least partly\n or list of None if they have completely different names.\n \"\"\"\n if len(self.names) != len(other.names):\n return [None] * len(self.names)\n names = []\n for a_name, b_name in zip(self.names, other.names):\n if a_name == b_name:\n names.append(a_name)\n else:\n # TODO: what if they both have np.nan for their names?\n names.append(None)\n return names\n\n def _intersection(self, other, sort=False) -> MultiIndex:\n other, result_names = self._convert_can_do_setop(other)\n other = other.astype(object, copy=False)\n\n uniq_tuples = None # flag whether _inner_indexer was successful\n if self.is_monotonic and other.is_monotonic:\n try:\n inner_tuples = self._inner_indexer(other)[0]\n sort = False # inner_tuples is already sorted\n except TypeError:\n pass\n else:\n uniq_tuples = algos.unique(inner_tuples)\n\n if uniq_tuples is None:\n uniq_tuples = self._intersection_via_get_indexer(other, sort)\n\n if sort is None:\n uniq_tuples = sorted(uniq_tuples)\n\n if len(uniq_tuples) == 0:\n return MultiIndex(\n levels=self.levels,\n codes=[[]] * self.nlevels,\n names=result_names,\n verify_integrity=False,\n )\n else:\n return MultiIndex.from_arrays(\n zip(*uniq_tuples), sortorder=0, names=result_names\n )\n\n def _difference(self, other, sort) -> MultiIndex:\n other, result_names = self._convert_can_do_setop(other)\n\n this = self._get_unique_index()\n\n indexer = this.get_indexer(other)\n indexer = indexer.take((indexer != -1).nonzero()[0])\n\n label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True)\n difference = this._values.take(label_diff)\n if sort is None:\n difference = sorted(difference)\n\n if len(difference) == 0:\n return MultiIndex(\n levels=[[]] * self.nlevels,\n codes=[[]] * self.nlevels,\n names=result_names,\n verify_integrity=False,\n )\n else:\n return MultiIndex.from_tuples(difference, sortorder=0, names=result_names)\n\n def _convert_can_do_setop(self, other):\n result_names = self.names\n\n if not isinstance(other, Index):\n\n if len(other) == 0:\n return self[:0], self.names\n else:\n msg = \"other must be a MultiIndex or a list of tuples\"\n try:\n other = MultiIndex.from_tuples(other, names=self.names)\n except (ValueError, TypeError) as err:\n # ValueError raised by tuples_to_object_array if we\n # have non-object dtype\n raise TypeError(msg) from err\n else:\n result_names = get_unanimous_names(self, other)\n\n return other, result_names\n\n def symmetric_difference(self, other, result_name=None, sort=None):\n # On equal symmetric_difference MultiIndexes the difference is empty.\n # Therefore, an empty MultiIndex is returned GH13490\n tups = Index.symmetric_difference(self, other, result_name, sort)\n if len(tups) == 0:\n return type(self)(\n levels=[[] for _ in range(self.nlevels)],\n codes=[[] for _ in range(self.nlevels)],\n names=tups.names,\n )\n return tups\n\n # --------------------------------------------------------------------\n\n @doc(Index.astype)\n def astype(self, dtype, copy: bool = True):\n dtype = pandas_dtype(dtype)\n if is_categorical_dtype(dtype):\n msg = \"> 1 ndim Categorical are not supported at this time\"\n raise NotImplementedError(msg)\n elif not is_object_dtype(dtype):\n raise TypeError(\n \"Setting a MultiIndex dtype to anything other than object \"\n \"is not supported\"\n )\n elif copy is True:\n return self._view()\n return self\n\n def _validate_fill_value(self, item):\n if not isinstance(item, tuple):\n # Pad the key with empty strings if lower levels of the key\n # aren't specified:\n item = (item,) + (\"\",) * (self.nlevels - 1)\n elif len(item) != self.nlevels:\n raise ValueError(\"Item must have length equal to number of levels.\")\n return item\n\n def insert(self, loc: int, item) -> MultiIndex:\n \"\"\"\n Make new MultiIndex inserting new item at location\n\n Parameters\n ----------\n loc : int\n item : tuple\n Must be same length as number of levels in the MultiIndex\n\n Returns\n -------\n new_index : Index\n \"\"\"\n item = self._validate_fill_value(item)\n\n new_levels = []\n new_codes = []\n for k, level, level_codes in zip(item, self.levels, self.codes):\n if k not in level:\n # have to insert into level\n # must insert at end otherwise you have to recompute all the\n # other codes\n lev_loc = len(level)\n level = level.insert(lev_loc, k)\n else:\n lev_loc = level.get_loc(k)\n\n new_levels.append(level)\n new_codes.append(np.insert(ensure_int64(level_codes), loc, lev_loc))\n\n return MultiIndex(\n levels=new_levels, codes=new_codes, names=self.names, verify_integrity=False\n )\n\n def delete(self, loc) -> MultiIndex:\n \"\"\"\n Make new index with passed location deleted\n\n Returns\n -------\n new_index : MultiIndex\n \"\"\"\n new_codes = [np.delete(level_codes, loc) for level_codes in self.codes]\n return MultiIndex(\n levels=self.levels,\n codes=new_codes,\n names=self.names,\n verify_integrity=False,\n )\n\n @doc(Index.isin)\n def isin(self, values, level=None) -> np.ndarray:\n if level is None:\n values = MultiIndex.from_tuples(values, names=self.names)._values\n return algos.isin(self._values, values)\n else:\n num = self._get_level_number(level)\n levs = self.get_level_values(num)\n\n if levs.size == 0:\n return np.zeros(len(levs), dtype=np.bool_)\n return levs.isin(values)\n\n @deprecate_nonkeyword_arguments(version=None, allowed_args=[\"self\", \"names\"])\n def set_names(self, names, level=None, inplace: bool = False) -> MultiIndex | None:\n return super().set_names(names=names, level=level, inplace=inplace)\n\n rename = set_names\n\n @deprecate_nonkeyword_arguments(version=None, allowed_args=[\"self\"])\n def drop_duplicates(self, keep: str | bool = \"first\") -> MultiIndex:\n return super().drop_duplicates(keep=keep)\n\n # ---------------------------------------------------------------\n # Arithmetic/Numeric Methods - Disabled\n\n __add__ = make_invalid_op(\"__add__\")\n __radd__ = make_invalid_op(\"__radd__\")\n __iadd__ = make_invalid_op(\"__iadd__\")\n __sub__ = make_invalid_op(\"__sub__\")\n __rsub__ = make_invalid_op(\"__rsub__\")\n __isub__ = make_invalid_op(\"__isub__\")\n __pow__ = make_invalid_op(\"__pow__\")\n __rpow__ = make_invalid_op(\"__rpow__\")\n __mul__ = make_invalid_op(\"__mul__\")\n __rmul__ = make_invalid_op(\"__rmul__\")\n __floordiv__ = make_invalid_op(\"__floordiv__\")\n __rfloordiv__ = make_invalid_op(\"__rfloordiv__\")\n __truediv__ = make_invalid_op(\"__truediv__\")\n __rtruediv__ = make_invalid_op(\"__rtruediv__\")\n __mod__ = make_invalid_op(\"__mod__\")\n __rmod__ = make_invalid_op(\"__rmod__\")\n __divmod__ = make_invalid_op(\"__divmod__\")\n __rdivmod__ = make_invalid_op(\"__rdivmod__\")\n # Unary methods disabled\n __neg__ = make_invalid_op(\"__neg__\")\n __pos__ = make_invalid_op(\"__pos__\")\n __abs__ = make_invalid_op(\"__abs__\")\n __inv__ = make_invalid_op(\"__inv__\")\n\n\ndef _lexsort_depth(codes: list[np.ndarray], nlevels: int) -> int:\n \"\"\"Count depth (up to a maximum of `nlevels`) with which codes are lexsorted.\"\"\"\n int64_codes = [ensure_int64(level_codes) for level_codes in codes]\n for k in range(nlevels, 0, -1):\n if libalgos.is_lexsorted(int64_codes[:k]):\n return k\n return 0\n\n\ndef sparsify_labels(label_list, start: int = 0, sentinel=\"\"):\n pivoted = list(zip(*label_list))\n k = len(label_list)\n\n result = pivoted[: start + 1]\n prev = pivoted[start]\n\n for cur in pivoted[start + 1 :]:\n sparse_cur = []\n\n for i, (p, t) in enumerate(zip(prev, cur)):\n if i == k - 1:\n sparse_cur.append(t)\n result.append(sparse_cur)\n break\n\n if p == t:\n sparse_cur.append(sentinel)\n else:\n sparse_cur.extend(cur[i:])\n result.append(sparse_cur)\n break\n\n prev = cur\n\n return list(zip(*result))\n\n\ndef _get_na_rep(dtype) -> str:\n return {np.datetime64: \"NaT\", np.timedelta64: \"NaT\"}.get(dtype, \"NaN\")\n\n\ndef maybe_droplevels(index: Index, key) -> Index:\n \"\"\"\n Attempt to drop level or levels from the given index.\n\n Parameters\n ----------\n index: Index\n key : scalar or tuple\n\n Returns\n -------\n Index\n \"\"\"\n # drop levels\n original_index = index\n if isinstance(key, tuple):\n for _ in key:\n try:\n index = index._drop_level_numbers([0])\n except ValueError:\n # we have dropped too much, so back out\n return original_index\n else:\n try:\n index = index._drop_level_numbers([0])\n except ValueError:\n pass\n\n return index\n\n\ndef _coerce_indexer_frozen(array_like, categories, copy: bool = False) -> np.ndarray:\n \"\"\"\n Coerce the array_like indexer to the smallest integer dtype that can encode all\n of the given categories.\n\n Parameters\n ----------\n array_like : array-like\n categories : array-like\n copy : bool\n\n Returns\n -------\n np.ndarray\n Non-writeable.\n \"\"\"\n array_like = coerce_indexer_dtype(array_like, categories)\n if copy:\n array_like = array_like.copy()\n array_like.flags.writeable = False\n return array_like\n\n\ndef _require_listlike(level, arr, arrname: str):\n \"\"\"\n Ensure that level is either None or listlike, and arr is list-of-listlike.\n \"\"\"\n if level is not None and not is_list_like(level):\n if not is_list_like(arr):\n raise TypeError(f\"{arrname} must be list-like\")\n if is_list_like(arr[0]):\n raise TypeError(f\"{arrname} must be list-like\")\n level = [level]\n arr = [arr]\n elif level is None or is_list_like(level):\n if not is_list_like(arr) or not is_list_like(arr[0]):\n raise TypeError(f\"{arrname} must be list of lists-like\")\n return level, arr\n"
] |
[
[
"pandas.Series",
"numpy.asarray",
"pandas._libs.lib.tuples_to_object_array",
"numpy.cumsum",
"numpy.dtype",
"pandas.core.indexes.base.Index",
"pandas.core.indexes.frozen.FrozenList",
"numpy.concatenate",
"pandas._config.get_option",
"numpy.all",
"numpy.any",
"pandas.core.sorting.get_group_index",
"pandas.util._decorators.deprecate_nonkeyword_arguments",
"numpy.where",
"pandas.core.indexes.numeric.Int64Index",
"pandas.compat.numpy.function.validate_take",
"pandas.core.indexes.base.get_unanimous_names",
"pandas.core.arrays.categorical.factorize_from_iterables",
"numpy.arange",
"pandas.core.algorithms.unique",
"pandas.core.indexes.base.Index.symmetric_difference",
"pandas._libs.lib.fast_unique_multiple",
"numpy.lexsort",
"numpy.bitwise_or.reduce",
"pandas.core.algorithms.factorize",
"pandas.core.common.cast_scalar_indexer",
"pandas._libs.lib.fast_zip",
"pandas._libs.hashtable.duplicated",
"pandas.core.dtypes.common.ensure_int64",
"pandas.core.dtypes.common.is_iterator",
"pandas._libs.algos.is_lexsorted",
"pandas.core.indexers.is_empty_indexer",
"pandas._libs.lib.infer_dtype",
"pandas.core.dtypes.cast.coerce_indexer_dtype",
"pandas.core.algorithms.take_nd",
"pandas.core.sorting.lexsort_indexer",
"pandas.core.dtypes.common.is_categorical_dtype",
"numpy.zeros",
"pandas.core.ops.invalid.make_invalid_op",
"pandas.core.dtypes.common.is_list_like",
"pandas.errors.InvalidIndexError",
"pandas.util._decorators.Appender",
"pandas.core.dtypes.common.pandas_dtype",
"pandas.core.indexes.base.ensure_index",
"pandas.core.dtypes.common.is_hashable",
"pandas.core.common.is_true_slices",
"numpy.delete",
"pandas.core.sorting.indexer_from_factorized",
"pandas.core.dtypes.common.ensure_platform_int",
"numpy.append",
"numpy.equal",
"numpy.array",
"pandas.core.common.index_labels_to_array",
"pandas.io.formats.format.get_adjustment",
"pandas.core.indexes.base.ensure_has_len",
"pandas.errors.UnsortedIndexError",
"pandas.core.algorithms.isin",
"pandas.core.common.is_bool_indexer",
"numpy.array_equal",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.dtypes.missing.array_equivalent",
"pandas._libs.lib.to_object_array_tuples",
"pandas.core.dtypes.common.is_integer",
"pandas.core.common.is_null_slice",
"numpy.ones",
"pandas.io.formats.printing.pprint_thing",
"pandas.core.dtypes.common.is_object_dtype",
"numpy.bincount",
"pandas.core.dtypes.missing.isna",
"pandas.core.reshape.util.cartesian_product",
"pandas.util._decorators.doc",
"pandas.compat.numpy.function.validate_repeat",
"numpy.empty"
]
] |
wailamjonathanlee/predicting-alignment-algorithm-performance-using-machine-learning
|
[
"5255be1f5382390f87cc10b6cb377bffe84a46a3"
] |
[
"src/run_create_clf_data.py"
] |
[
"#!/usr/bin/env python3\n\n\nimport os, sys, json\nimport numpy as np\nimport pandas as pd\nimport functools as fct\nimport collections as cols\n\n\nfrom alignclf import create_clf_data\n\n\nif __name__ == '__main__':\n result_dnames = [\n 'clst-2018-12-generic_50-inc0-net1',\n 'clst-2018-12-generic_50-inc0-net2',\n # 'clst-2018-12-generic_50-inc0-net3',\n # 'clst-2018-12-generic_50-inc0-net4',\n # 'clst-2018-12-generic_50-inc0-net5',\n 'clst-2018-12-sese_25-inc0-net1',\n 'clst-2018-12-sese_25-inc0-net2',\n # 'clst-2018-12-sese_25-inc0-net3',\n # 'clst-2018-12-sese_25-inc0-net4',\n # 'clst-2018-12-sese_25-inc0-net5'\n ]\n\n # find out the subset of logs\n for result_dname in result_dnames:\n result_dir = os.path.join('.', 'results-agg', result_dname)\n\n print('Processing {}'.format(result_dname))\n\n model_log_sets = []\n dir_map = dict()\n\n for d in os.listdir(result_dir):\n dirpath = os.path.join(result_dir, d)\n\n if not os.path.isdir(dirpath):\n continue\n\n model_log_set = set()\n\n for replay_d in os.listdir(dirpath):\n\n replay_dirpath = os.path.join(dirpath, replay_d)\n\n if not os.path.isdir(replay_dirpath):\n continue\n\n configs_fp = os.path.join(replay_dirpath, 'configs.json')\n with open(configs_fp) as f:\n configs_dict = json.load(f)\n\n log = configs_dict['log']\n model = configs_dict['model']\n if 'recomposeStrategy' in configs_dict:\n algo_type = 'recomp' + '-' + configs_dict['algorithmType']\n else:\n algo_type = configs_dict['algorithmType']\n\n if model not in dir_map:\n dir_map[model] = cols.defaultdict(list)\n\n dir_map[model][log].append((algo_type, replay_dirpath))\n model_log_set.add((model, log))\n\n model_log_sets.append(model_log_set)\n\n model_logs = list(fct.reduce(lambda s1, s2: s1.intersection(s2), model_log_sets))\n model_log_dict = cols.defaultdict(list)\n\n for model, log in model_logs:\n model_log_dict[model].append(log)\n\n # print('Model and logs: {}'.format(model_logs))\n # print('Model log set: {}'.format(model_log_sets))\n\n clf_df_list = list()\n\n for model, logs in model_log_dict.items():\n\n if not logs:\n continue\n\n for log in logs:\n\n result_df_dict = dict()\n for algo_type, dirpath in dir_map[model][log]:\n\n is_mono = 'recomp' not in algo_type\n\n # print('algo_type: {}'.format(algo_type))\n\n if is_mono:\n result_fp = os.path.join(dirpath, 'trace-stats-enriched.csv')\n result_df = pd.read_csv(result_fp)\n result_df[create_clf_data.RESULT_DIR] = dirpath\n result_df = create_clf_data.process_df(result_df)\n else:\n result_fp = os.path.join(dirpath, 'trace-stats.csv')\n result_df = pd.read_csv(result_fp)\n result_df[create_clf_data.RESULT_DIR] = dirpath\n result_df = create_clf_data.process_recomposing_df(result_df)\n\n result_df_dict[algo_type] = result_df\n\n clf_df = create_clf_data.to_clf_df(result_df_dict)\n\n columns = list(clf_df.columns)\n clf_df['model'] = model\n clf_df['log'] = log\n columns = [('model', ''), ('log', '')] + columns\n clf_df = clf_df[columns]\n\n clf_df_list.append(clf_df)\n\n clf_df = pd.concat(clf_df_list, axis=0)\n out_fp = os.path.join(result_dir, '{}-predictive-output.csv'.format(result_dname))\n clf_df.to_csv(out_fp, index=False)\n\n"
] |
[
[
"pandas.concat",
"pandas.read_csv"
]
] |
code-burster/tensorflow
|
[
"203c4f750e33ff1927506f9ba585bc6e69d92725"
] |
[
"tensorflow/contrib/distribute/python/mirrored_strategy_multigpu_test.py"
] |
[
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Multi-GPU tests for MirroredStrategy.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nimport sys\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.contrib.distribute.python import combinations\nfrom tensorflow.contrib.distribute.python import mirrored_strategy\nfrom tensorflow.contrib.distribute.python import multi_worker_test_base\nfrom tensorflow.contrib.distribute.python import strategy_test_lib\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib\nfrom tensorflow.python.distribute import device_util\nfrom tensorflow.python.distribute import distribution_strategy_context as ds_context\nfrom tensorflow.python.distribute import reduce_util\nfrom tensorflow.python.distribute import values\nfrom tensorflow.python.eager import backprop\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import function\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import func_graph\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.keras.engine import training as keras_training\nfrom tensorflow.python.keras.layers import core as keras_core\nfrom tensorflow.python.layers import core\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import rnn\nfrom tensorflow.python.ops import rnn_cell_impl\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.training import gradient_descent\nfrom tensorflow.python.training import optimizer as optimizer_lib\nfrom tensorflow.python.training import server_lib\n\n\nGPU_TEST = \"test_gpu\" in sys.argv[0]\n\n\n@combinations.generate(combinations.combine(\n distribution=[\n combinations.mirrored_strategy_with_gpu_and_cpu,\n combinations.mirrored_strategy_with_two_gpus,\n combinations.core_mirrored_strategy_with_gpu_and_cpu,\n combinations.core_mirrored_strategy_with_two_gpus],\n mode=[\"graph\", \"eager\"]))\nclass MirroredTwoDeviceDistributionTest(\n strategy_test_lib.DistributionTestBase,\n strategy_test_lib.TwoDeviceDistributionTestBase,\n parameterized.TestCase):\n\n def testMinimizeLoss(self, distribution):\n if context.executing_eagerly():\n self._test_minimize_loss_eager(distribution)\n else:\n self._test_minimize_loss_graph(distribution)\n\n def testReplicaId(self, distribution):\n self._test_replica_id(distribution)\n\n def testNumReplicasInSync(self, distribution):\n self.assertEqual(2, distribution.num_replicas_in_sync)\n\n def testCallAndMergeExceptions(self, distribution):\n self._test_call_and_merge_exceptions(distribution)\n\n def testRunRegroupError(self, distribution):\n def run_fn():\n replica_id = int(self.evaluate(_replica_id()))\n # Generates a list with different lengths on different devices.\n # Will fail in _regroup() (if more than one device).\n return list(range(replica_id))\n\n with distribution.scope(), self.assertRaises(AssertionError):\n distribution.extended.call_for_each_replica(run_fn)\n\n def testReduceToCpu(self, distribution):\n with distribution.scope():\n result = distribution.extended.call_for_each_replica(_replica_id)\n reduced = distribution.reduce(reduce_util.ReduceOp.SUM, result)\n expected = sum(range(distribution.num_replicas_in_sync))\n self.assertEqual(expected, self.evaluate(reduced))\n\n def testMakeInputFnIteratorWithDataset(self, distribution):\n dataset_fn = lambda: dataset_ops.Dataset.range(10)\n expected_values = [[i, i+1] for i in range(0, 10, 2)]\n\n input_fn = self._input_fn_to_test_input_context(\n dataset_fn,\n expected_num_replicas_in_sync=2,\n expected_num_input_pipelines=1,\n expected_input_pipeline_id=0)\n iterator = distribution.make_input_fn_iterator(input_fn)\n self._test_input_fn_iterator(iterator, distribution.extended.worker_devices,\n expected_values)\n\n # TODO(b/124344198): Re-enable after fixing this flaky test.\n def DISABLED_testMakeInputFnIteratorWithCallable(self, distribution):\n def fn():\n dataset = dataset_ops.Dataset.range(2).interleave(\n (lambda _: dataset_ops.Dataset.range(10)), cycle_length=2)\n it = dataset.make_one_shot_iterator()\n return it.get_next\n expected_values = [[i, i] for i in range(0, 10)]\n\n input_fn = self._input_fn_to_test_input_context(\n fn,\n expected_num_replicas_in_sync=2,\n expected_num_input_pipelines=1,\n expected_input_pipeline_id=0)\n iterator = distribution.make_input_fn_iterator(input_fn)\n self._test_input_fn_iterator(iterator, distribution.extended.worker_devices,\n expected_values, test_reinitialize=False)\n\n def testNumpyIterator(self, distribution):\n self._test_numpy_iterator(distribution)\n\n def testGlobalStepUpdate(self, distribution):\n self._test_global_step_update(distribution)\n\n def testRun(self, distribution):\n self._test_run(distribution)\n\n def testAllReduceSum(self, distribution):\n self._test_all_reduce_sum(distribution)\n\n def testAllReduceSumGradients(self, distribution):\n self._test_all_reduce_sum_gradients(distribution)\n\n def testAllReduceSumGradientTape(self, distribution):\n self._test_all_reduce_sum_gradient_tape(distribution)\n\n def testAllReduceMean(self, distribution):\n self._test_all_reduce_mean(distribution)\n\n def testAllReduceMeanGradients(self, distribution):\n self._test_all_reduce_mean_gradients(distribution)\n\n def testAllReduceMeanGradientTape(self, distribution):\n self._test_all_reduce_mean_gradient_tape(distribution)\n\n def testSummaryForReplicaZeroOnly(self, distribution):\n self._test_summary_for_replica_zero_only(distribution)\n\n\ndef one_device_combinations():\n return combinations.combine(\n distribution=[\n combinations.mirrored_strategy_with_one_cpu,\n combinations.mirrored_strategy_with_one_gpu,\n combinations.core_mirrored_strategy_with_one_cpu,\n combinations.core_mirrored_strategy_with_one_gpu],\n mode=[\"graph\", \"eager\"])\n\n\n@combinations.generate(one_device_combinations())\nclass MirroredOneDeviceDistributionTest(\n strategy_test_lib.DistributionTestBase,\n strategy_test_lib.OneDeviceDistributionTestBase,\n parameterized.TestCase):\n\n def testMinimizeLoss(self, distribution):\n if context.executing_eagerly():\n self._test_minimize_loss_eager(distribution)\n else:\n self._test_minimize_loss_graph(distribution)\n\n def testReplicaId(self, distribution):\n self._test_replica_id(distribution)\n\n def testCallAndMergeExceptions(self, distribution):\n self._test_call_and_merge_exceptions(distribution)\n\n def testRun(self, distribution):\n self._test_run(distribution)\n\n def testAllReduceSum(self, distribution):\n self._test_all_reduce_sum(distribution)\n\n def testAllReduceSumGradients(self, distribution):\n self._test_all_reduce_sum_gradients(distribution)\n\n def testAllReduceSumGradientTape(self, distribution):\n self._test_all_reduce_sum_gradient_tape(distribution)\n\n def testAllReduceMean(self, distribution):\n self._test_all_reduce_mean(distribution)\n\n def testAllReduceMeanGradients(self, distribution):\n self._test_all_reduce_mean_gradients(distribution)\n\n def testAllReduceMeanGradientTape(self, distribution):\n self._test_all_reduce_mean_gradient_tape(distribution)\n\n\nclass MirroredStrategyVariableCreatorStackTest(\n test.TestCase, parameterized.TestCase):\n\n @combinations.generate(combinations.combine(\n distribution=[combinations.mirrored_strategy_with_gpu_and_cpu,\n combinations.core_mirrored_strategy_with_gpu_and_cpu],\n mode=[\"graph\"]))\n def testCreatorStacksAreThreadLocal(self, distribution):\n def model_fn():\n replica_id_str = str(self.evaluate(_replica_id()))\n\n def thread_creator_fn(next_creator, *args, **kwargs):\n return next_creator(*args, **kwargs) + \":thread_\" + replica_id_str\n\n with variable_scope.variable_creator_scope(thread_creator_fn):\n # Create a variable in this scope.\n v = variable_scope.variable(1.0)\n\n # This will pause the current thread, and execute the other thread.\n ds_context.get_replica_context().merge_call(lambda _: _)\n return v\n\n def main_thread_creator(next_creator, *args, **kwargs):\n # We are not using the underlying next_creator for test purposes.\n del next_creator, args, kwargs\n return \"main_thread\"\n\n with context.graph_mode(), \\\n distribution.scope(), \\\n variable_scope.variable_creator_scope(main_thread_creator):\n result = distribution.extended.call_for_each_replica(model_fn)\n result = distribution.experimental_local_results(result)\n expected = (\"main_thread:thread_0\", \"main_thread:thread_1\")\n self.assertEqual(expected, result)\n\n@combinations.generate(combinations.combine(\n distribution=[\n combinations.mirrored_strategy_with_gpu_and_cpu,\n combinations.core_mirrored_strategy_with_gpu_and_cpu],\n mode=[\"graph\", \"eager\"]))\nclass MirroredStrategyCallForEachReplicaTest(test.TestCase):\n\n def testExecutingEagerlyOutsideFunction(self, distribution):\n \"\"\"Verify we preserve the value of executing_eagerly_outside_functions().\"\"\"\n def model_fn():\n return ops.executing_eagerly_outside_functions()\n\n originally = ops.executing_eagerly_outside_functions()\n with distribution.scope():\n in_scope = ops.executing_eagerly_outside_functions()\n in_model_fn = distribution.extended.call_for_each_replica(model_fn)\n unwrapped = distribution.experimental_local_results(in_model_fn)\n self.assertEqual(in_scope, unwrapped[0])\n self.assertEqual(in_scope, originally)\n\n # Verify this all again, but this time in a FuncGraph.\n with func_graph.FuncGraph(\"fg\").as_default(), distribution.scope():\n in_scope = ops.executing_eagerly_outside_functions()\n in_model_fn = distribution.extended.call_for_each_replica(model_fn)\n unwrapped = distribution.experimental_local_results(in_model_fn)\n self.assertEqual(in_scope, unwrapped[0])\n self.assertEqual(in_scope, originally)\n\n def testFunctionInCallForEachReplicaNoMergeCall(self, distribution):\n @def_function.function\n def model_fn():\n return 0.\n\n with distribution.scope():\n result = distribution.extended.call_for_each_replica(model_fn)\n self.assertEqual((0., 0.), self.evaluate(result.values))\n\n def testFunctionInCallForEachReplicaWithMergeCall(self, distribution):\n def merge_fn(_):\n pass\n\n @def_function.function\n def model_fn():\n ds_context.get_replica_context().merge_call(merge_fn)\n return 0.\n\n with distribution.scope():\n with self.assertRaisesRegexp(\n RuntimeError, \"`merge_call` called while defining a new graph.\"):\n distribution.extended.call_for_each_replica(model_fn)\n\n@combinations.generate(combinations.combine(\n distribution=[\n combinations.mirrored_strategy_with_gpu_and_cpu,\n combinations.core_mirrored_strategy_with_gpu_and_cpu],\n mode=[\"graph\", \"eager\"]))\nclass MirroredStrategyVariableCreationTest(test.TestCase):\n\n # TODO(priyag): Modify more tests to use this helper and check more\n # properties.\n def _test_mv_properties(self, var, name, strategy):\n self.assertIsInstance(var, values.MirroredVariable)\n self.assertEqual(name, var.name)\n self.assertIs(strategy, var.distribute_strategy)\n for d in var.devices:\n self.assertEqual(d, var.get(d).device)\n self.assertIs(strategy, var.get(d)._distribute_strategy) # pylint: disable=protected-access\n\n def testVariableInFuncGraph(self, distribution):\n def model_fn():\n v = variable_scope.variable(2.0, name=\"bar\")\n ds_context.get_replica_context().merge_call(lambda _: _)\n return v\n\n with func_graph.FuncGraph(\"fg\").as_default(), distribution.scope():\n v1 = variable_scope.variable(1.0, name=\"foo\")\n v2 = distribution.extended.call_for_each_replica(model_fn)\n\n self._test_mv_properties(v1, \"foo:0\", distribution)\n self._test_mv_properties(v2, \"bar:0\", distribution)\n\n def testSingleVariable(self, distribution):\n def model_fn():\n # This variable should be created only once across the threads because of\n # special variable_creator functions used by\n # `distribution.extended.call_for_each_replica`.\n v = variable_scope.variable(1.0, name=\"foo\")\n ds_context.get_replica_context().merge_call(lambda _: _)\n return v\n\n with distribution.scope():\n result = distribution.extended.call_for_each_replica(model_fn)\n self._test_mv_properties(result, \"foo:0\", distribution)\n\n def testUnnamedVariable(self, distribution):\n def model_fn():\n v = variable_scope.variable(1.0)\n ds_context.get_replica_context().merge_call(lambda _: _)\n return v\n\n with distribution.scope():\n result = distribution.extended.call_for_each_replica(model_fn)\n self._test_mv_properties(result, \"Variable:0\", distribution)\n\n def testMultipleVariables(self, distribution):\n def model_fn():\n vs = []\n for i in range(5):\n vs.append(variable_scope.variable(1.0, name=\"foo\" + str(i)))\n ds_context.get_replica_context().merge_call(lambda _: _)\n return vs\n\n with distribution.scope():\n result = distribution.extended.call_for_each_replica(model_fn)\n for i, v in enumerate(result):\n self._test_mv_properties(v, \"foo\" + str(i) + \":0\", distribution)\n\n def testMultipleVariablesWithSameCanonicalName(self, distribution):\n def model_fn():\n vs = []\n vs.append(variable_scope.variable(1.0, name=\"foo/bar\"))\n vs.append(variable_scope.variable(1.0, name=\"foo_1/bar\"))\n vs.append(variable_scope.variable(1.0, name=\"foo_1/bar_1\"))\n vs.append(variable_scope.variable(1.0, name=\"foo/bar_1\"))\n ds_context.get_replica_context().merge_call(lambda _: _)\n return vs\n\n with distribution.scope():\n result = distribution.extended.call_for_each_replica(model_fn)\n for v in result:\n self.assertIsInstance(v, values.MirroredVariable)\n self.assertEqual(4, len(result))\n self.assertEqual(\"foo/bar:0\", result[0].name)\n self.assertEqual(\"foo_1/bar:0\", result[1].name)\n self.assertEqual(\"foo_1/bar_1:0\", result[2].name)\n self.assertEqual(\"foo/bar_1:0\", result[3].name)\n\n def testVariableWithSameCanonicalNameAcrossThreads(self, distribution):\n def model_fn():\n replica_id = self.evaluate(_replica_id())\n v = variable_scope.variable(1.0, name=\"foo_\" + str(replica_id))\n ds_context.get_replica_context().merge_call(lambda _: _)\n return v\n\n with distribution.scope():\n result = distribution.extended.call_for_each_replica(model_fn)\n self.assertIsInstance(result, values.MirroredVariable)\n # The resulting mirrored variable will use the name from the first device.\n self.assertEqual(\"foo_0:0\", result.name)\n\n def testWithLayers(self, distribution):\n def model_fn(features):\n with variable_scope.variable_scope(\"common\"):\n layer1 = core.Dense(1)\n layer1(features)\n layer2 = core.Dense(1)\n layer2(features)\n # This will pause the current thread, and execute the other thread.\n ds_context.get_replica_context().merge_call(lambda _: _)\n layer3 = core.Dense(1)\n layer3(features)\n return [(layer1.kernel, layer1.bias),\n (layer2.kernel, layer2.bias),\n (layer3.kernel, layer3.bias)]\n\n iterator = distribution.make_input_fn_iterator(\n lambda _: dataset_ops.Dataset.from_tensors([[1.]]).repeat(10))\n self.evaluate(iterator.initialize())\n features = iterator.get_next()\n\n with distribution.scope():\n result = distribution.extended.call_for_each_replica(\n model_fn, args=(features,))\n suffixes = [\"\", \"_1\", \"_2\"]\n for (kernel, bias), suffix in zip(result, suffixes):\n self.assertIsInstance(kernel, values.MirroredVariable)\n self.assertEqual(\"common/dense\" + suffix + \"/kernel:0\", kernel.name)\n self.assertIsInstance(bias, values.MirroredVariable)\n self.assertEqual(\"common/dense\" + suffix + \"/bias:0\", bias.name)\n\n def testWithVariableAndVariableScope(self, distribution):\n def model_fn():\n v0 = variable_scope.variable(1.0, name=\"var0\", aggregation=None)\n with variable_scope.variable_scope(\"common\"):\n v1 = variable_scope.variable(1.0, name=\"var1\")\n # This will pause the current thread, and execute the other thread.\n ds_context.get_replica_context().merge_call(lambda _: _)\n v2 = variable_scope.variable(\n 1.0,\n name=\"var2\",\n synchronization=variable_scope.VariableSynchronization.ON_READ,\n aggregation=variable_scope.VariableAggregation.SUM)\n v3 = variable_scope.variable(\n 1.0,\n name=\"var3\",\n synchronization=variable_scope.VariableSynchronization.ON_WRITE,\n aggregation=variable_scope.VariableAggregation.MEAN)\n\n return v0, v1, v2, v3\n\n with distribution.scope():\n v = variable_scope.variable(1.0, name=\"var-main0\")\n self.assertEqual(\"var-main0:0\", v.name)\n\n result = distribution.extended.call_for_each_replica(model_fn)\n self.assertEqual(4, len(result))\n v0, v1, v2, v3 = result\n self.assertIsInstance(v0, values.MirroredVariable)\n self.assertEqual(\"var0:0\", v0.name)\n self.assertIsInstance(v1, values.MirroredVariable)\n self.assertEqual(\"common/var1:0\", v1.name)\n self.assertIsInstance(v2, values.SyncOnReadVariable)\n self.assertEqual(\"common/var2:0\", v2.name)\n self.assertEqual(variable_scope.VariableAggregation.SUM, v2.aggregation)\n self.assertIsInstance(v3, values.MirroredVariable)\n self.assertEqual(\"common/var3:0\", v3.name)\n self.assertEqual(variable_scope.VariableAggregation.MEAN, v3.aggregation)\n\n def testWithGetVariableAndVariableScope(self, distribution):\n def model_fn():\n v0 = variable_scope.get_variable(\"var0\", [1])\n with variable_scope.variable_scope(\"common\"):\n v1 = variable_scope.get_variable(\"var1\", [1])\n # This will pause the current thread, and execute the other thread.\n ds_context.get_replica_context().merge_call(lambda _: _)\n v2 = variable_scope.get_variable(\n \"var2\", [1],\n synchronization=variable_scope.VariableSynchronization.ON_READ,\n aggregation=variable_scope.VariableAggregation.SUM)\n v3 = variable_scope.get_variable(\n \"var3\", [1],\n synchronization=variable_scope.VariableSynchronization.ON_WRITE,\n aggregation=variable_scope.VariableAggregation.MEAN)\n\n return v0, v1, v2, v3\n\n with distribution.scope():\n with variable_scope.variable_scope(\"main\"):\n v = variable_scope.get_variable(\"var-main0\", [1])\n self.assertEqual(\"main/var-main0:0\", v.name)\n\n result = distribution.extended.call_for_each_replica(model_fn)\n self.assertEqual(4, len(result))\n v0, v1, v2, v3 = result\n self.assertIsInstance(v0, values.MirroredVariable)\n self.assertEqual(\"main/var0:0\", v0.name)\n self.assertIsInstance(v1, values.MirroredVariable)\n self.assertEqual(\"main/common/var1:0\", v1.name)\n self.assertIsInstance(v2, values.SyncOnReadVariable)\n self.assertEqual(\"main/common/var2:0\", v2.name)\n self.assertEqual(variable_scope.VariableAggregation.SUM,\n v2.aggregation)\n self.assertIsInstance(v3, values.MirroredVariable)\n self.assertEqual(\"main/common/var3:0\", v3.name)\n self.assertEqual(variable_scope.VariableAggregation.MEAN,\n v3.aggregation)\n\n def testOnlyFirstReplicaUpdatesVariables(self, distribution):\n def create_fn():\n aggregation = variable_scope.VariableAggregation.ONLY_FIRST_REPLICA\n v0 = variable_scope.variable(\n 2.0,\n name=\"on_read\",\n synchronization=variable_scope.VariableSynchronization.ON_READ,\n aggregation=aggregation)\n v1 = variable_scope.variable(\n 3.0,\n name=\"on_write\",\n synchronization=variable_scope.VariableSynchronization.ON_WRITE,\n aggregation=aggregation)\n return v0, v1\n\n devices = [\"/device:GPU:0\", \"/device:CPU:0\"]\n with distribution.scope():\n v0, v1 = distribution.extended.call_for_each_replica(create_fn)\n self.evaluate(v0.initializer)\n self.assertEqual(2.0, self.evaluate(v0.get(devices[0])))\n self.assertEqual(2.0, self.evaluate(v0.get(devices[1])))\n self.assertEqual(2.0, self.evaluate(distribution.extended.read_var(v0)))\n self.evaluate(v1.initializer)\n self.assertEqual(3.0, self.evaluate(v1.get(devices[0])))\n self.assertEqual(3.0, self.evaluate(v1.get(devices[1])))\n self.assertEqual(3.0, self.evaluate(distribution.extended.read_var(v1)))\n\n def replica_id_plus_one():\n return math_ops.cast(_replica_id() + 1, dtype=dtypes.float32)\n\n # Update using the assign_add member function.\n def update_member_fn():\n update0 = v0.assign_add(5.0 * replica_id_plus_one())\n update1 = v1.assign_add(7.0 * replica_id_plus_one())\n return update0, update1\n\n update0a, update1a = distribution.extended.call_for_each_replica(\n update_member_fn)\n\n # Update \"sync on read\" variable.\n self.evaluate(distribution.group(update0a))\n self.assertEqual(2.0 + 5.0, self.evaluate(v0.get(devices[0])))\n # Writes are not synchronized for \"sync on read\" variables,\n # so device[1] can end up with a different value.\n self.assertEqual(2.0 + 2*5.0, self.evaluate(v0.get(devices[1])))\n # Always reads from device 0.\n self.assertEqual(2.0 + 5.0, self.evaluate(\n distribution.extended.read_var(v0)))\n\n # Update \"sync on write\" variable.\n self.evaluate(distribution.group(update1a))\n self.assertEqual(3.0 + 7.0, self.evaluate(v1.get(devices[0])))\n # Writes are synchronized for v1, only the argument to assign_add on\n # device[0] is used.\n self.assertEqual(3.0 + 7.0, self.evaluate(v1.get(devices[1])))\n self.assertEqual(3.0 + 7.0, self.evaluate(\n distribution.extended.read_var(v1)))\n\n # Update using state_ops.assign_add global function.\n def update_state_ops_fn():\n update0 = state_ops.assign_add(v0, 11.0 * replica_id_plus_one())\n update1 = state_ops.assign_add(v1, 13.0 * replica_id_plus_one())\n return update0, update1\n\n update0b, update1b = distribution.extended.call_for_each_replica(\n update_state_ops_fn)\n self.evaluate(distribution.group(update0b))\n\n # Update \"sync on read\" variable.\n self.assertEqual(2.0 + 5.0 + 11.0, self.evaluate(v0.get(devices[0])))\n self.assertEqual(2.0 + 2*5.0 + 2*11.0, self.evaluate(v0.get(devices[1])))\n self.assertEqual(2.0 + 5.0 + 11.0, self.evaluate(\n distribution.extended.read_var(v0)))\n\n # Update \"sync on write\" variable.\n self.evaluate(distribution.group(update1b))\n self.assertEqual(3.0 + 7.0 + 13.0, self.evaluate(v1.get(devices[0])))\n self.assertEqual(3.0 + 7.0 + 13.0, self.evaluate(v1.get(devices[1])))\n self.assertEqual(3.0 + 7.0 + 13.0, self.evaluate(\n distribution.extended.read_var(v1)))\n\n def testNoneSynchronizationWithGetVariable(self, distribution):\n with distribution.scope():\n with self.assertRaisesRegexp(\n ValueError, \"`NONE` variable synchronization mode is not \"\n \"supported with `Mirrored` distribution strategy. Please change \"\n \"the `synchronization` for variable: v\"):\n variable_scope.get_variable(\n \"v\", [1],\n synchronization=variable_scope.VariableSynchronization.NONE)\n\n def testNoneSynchronizationWithVariable(self, distribution):\n with distribution.scope():\n with self.assertRaisesRegexp(\n ValueError, \"`NONE` variable synchronization mode is not \"\n \"supported with `Mirrored` distribution strategy. Please change \"\n \"the `synchronization` for variable: v\"):\n variable_scope.variable(\n 1.0,\n name=\"v\",\n synchronization=variable_scope.VariableSynchronization.NONE)\n\n def testInvalidSynchronizationWithVariable(self, distribution):\n with distribution.scope():\n with self.assertRaisesRegexp(\n ValueError, \"Invalid variable synchronization mode: Invalid for \"\n \"variable: v\"):\n variable_scope.variable(1.0, name=\"v\", synchronization=\"Invalid\")\n\n def testInvalidAggregationWithGetVariable(self, distribution):\n with distribution.scope():\n with self.assertRaisesRegexp(\n ValueError, \"Invalid variable aggregation mode: invalid for \"\n \"variable: v\"):\n variable_scope.get_variable(\n \"v\", [1],\n synchronization=variable_scope.VariableSynchronization.ON_WRITE,\n aggregation=\"invalid\")\n\n def testInvalidAggregationWithVariable(self, distribution):\n with distribution.scope():\n with self.assertRaisesRegexp(\n ValueError, \"Invalid variable aggregation mode: invalid for \"\n \"variable: v\"):\n variable_scope.variable(\n 1.0,\n name=\"v\",\n synchronization=variable_scope.VariableSynchronization.ON_WRITE,\n aggregation=\"invalid\")\n\n def testNonMatchingVariableCreation(self, distribution):\n self.skipTest(\"b/123075960\")\n def model_fn(name):\n v = variable_scope.variable(1.0, name=name)\n ds_context.get_replica_context().merge_call(lambda _: _)\n return v\n\n with distribution.scope():\n device_map = values.ReplicaDeviceMap((\"/device:CPU:0\", \"/device:GPU:0\"))\n names = values.DistributedValues(device_map, (\"foo\", \"bar\"))\n with self.assertRaises(RuntimeError):\n _ = distribution.extended.call_for_each_replica(model_fn, args=(names,))\n\n def testSyncOnReadVariable(self, distribution):\n all_v_sum = {}\n all_v_mean = {}\n components_sum = {}\n components_mean = {}\n\n def model_fn():\n replica_id = self.evaluate(_replica_id())\n v_sum = variable_scope.variable(\n 1.0,\n synchronization=variable_scope.VariableSynchronization.ON_READ,\n aggregation=variable_scope.VariableAggregation.SUM)\n v_mean = variable_scope.variable(\n 4.0,\n synchronization=variable_scope.VariableSynchronization.ON_READ,\n aggregation=variable_scope.VariableAggregation.MEAN)\n self.assertIsInstance(v_sum, values.SyncOnReadVariable)\n self.assertIsInstance(v_mean, values.SyncOnReadVariable)\n updates = [v_sum.assign_add(2.0 + replica_id),\n v_mean.assign(6.0 * replica_id)]\n all_v_sum[replica_id] = v_sum\n all_v_mean[replica_id] = v_mean\n c_sum = v_sum.get()\n c_mean = v_mean.get()\n components_sum[replica_id] = c_sum\n components_mean[replica_id] = c_mean\n self.assertIsNot(v_sum, c_sum)\n self.assertIsNot(v_mean, c_mean)\n return updates, v_sum, v_mean, c_sum, c_mean\n\n with distribution.scope():\n # Create \"sum\" and \"mean\" versions of SyncOnReadVariables.\n ret_ops, ret_v_sum, ret_v_mean, regrouped_sum, regrouped_mean = (\n distribution.extended.call_for_each_replica(model_fn))\n # Should see the same wrapping instance in all replicas.\n self.assertIs(all_v_sum[0], ret_v_sum)\n self.assertIs(all_v_mean[0], ret_v_mean)\n self.assertIs(all_v_sum[0], all_v_sum[1])\n self.assertIs(all_v_mean[0], all_v_mean[1])\n\n # Regroup should recover the same wrapper.\n self.assertIs(ret_v_sum, regrouped_sum)\n self.assertIs(ret_v_mean, regrouped_mean)\n self.assertIsNot(components_sum[0], components_sum[1])\n self.assertIsNot(components_mean[0], components_mean[1])\n\n # Apply updates\n self.evaluate(variables.global_variables_initializer())\n self.evaluate([y for x in ret_ops # pylint: disable=g-complex-comprehension\n for y in distribution.experimental_local_results(x)])\n expected_sum = 0.0\n expected_mean = 0.0\n for i, d in enumerate(distribution.extended.worker_devices):\n # Should see different values on different devices.\n v_sum_value = self.evaluate(ret_v_sum.get(d).read_value())\n v_mean_value = self.evaluate(ret_v_mean.get(d).read_value())\n expected = i + 3.0\n self.assertEqual(expected, v_sum_value)\n expected_sum += expected\n expected = i * 6.0\n self.assertEqual(expected, v_mean_value)\n expected_mean += expected\n expected_mean /= len(distribution.extended.worker_devices)\n\n # Without get(device), should return the value you get by\n # applying the reduction across all replicas (whether you use\n # read_var(), get(), or nothing).\n self.assertEqual(expected_sum, self.evaluate(\n distribution.extended.read_var(ret_v_sum)))\n self.assertEqual(expected_mean, self.evaluate(\n distribution.extended.read_var(ret_v_mean)))\n self.assertEqual(expected_sum, self.evaluate(ret_v_sum.get()))\n self.assertEqual(expected_mean, self.evaluate(ret_v_mean.get()))\n self.assertEqual(expected_sum, self.evaluate(ret_v_sum))\n self.assertEqual(expected_mean, self.evaluate(ret_v_mean))\n\n # TODO(priyag): Update this test to work in eager mode as well.\n def testDynamicRnnVariables(self, distribution):\n def model_fn():\n inputs = constant_op.constant(2 * [2 * [[0.0, 1.0, 2.0, 3.0, 4.0]]])\n cell_fw = rnn_cell_impl.LSTMCell(300)\n cell_bw = rnn_cell_impl.LSTMCell(300)\n (outputs, _) = rnn.bidirectional_dynamic_rnn(\n cell_fw,\n cell_bw,\n inputs,\n dtype=dtypes.float32)\n return outputs\n\n with context.graph_mode(), distribution.scope():\n result = distribution.extended.call_for_each_replica(model_fn)\n # Two variables are created by the RNN layer.\n self.assertEqual(2, len(result))\n for v in result:\n self.assertIsInstance(v, values.DistributedValues)\n _, v1 = distribution.experimental_local_results(v)\n self.assertStartsWith(v1._op.name, \"replica_1/\")\n\n def testSyncOnReadVariableUpdate(self, distribution):\n def model_fn():\n v_sum = variable_scope.variable(\n 1.0,\n synchronization=variable_scope.VariableSynchronization.ON_READ,\n aggregation=variable_scope.VariableAggregation.SUM)\n self.assertIsInstance(v_sum, values.SyncOnReadVariable)\n return v_sum\n\n def update(var, value):\n return var.assign(value)\n\n with distribution.scope():\n ret_v_sum = distribution.extended.call_for_each_replica(model_fn)\n\n # Initialize variables.\n self.evaluate(variables.global_variables_initializer())\n # Assert that the aggregated value of the sync on read var is the sum\n # of the individual values before running the update ops.\n self.assertEqual(1.0, self.evaluate(ret_v_sum.get(\n distribution.extended.worker_devices[0]).read_value()))\n self.assertEqual(2.0, self.evaluate(ret_v_sum))\n\n # Apply updates.\n update_ops = distribution.extended.update(\n ret_v_sum, update, args=(5.0,), group=False)\n self.evaluate(update_ops)\n # Assert that the aggregated value of the sync on read vars is the sum\n # of the individual values after running the update ops.\n self.assertEqual(5.0, self.evaluate(ret_v_sum.get(\n distribution.extended.worker_devices[0]).read_value()))\n self.assertEqual(10.0, self.evaluate(ret_v_sum))\n\n def testVarDistributeStrategy(self, distribution):\n with distribution.scope():\n mirrored = variable_scope.variable(1.0)\n sync_on_read = variable_scope.variable(\n 1.0,\n synchronization=variable_scope.VariableSynchronization.ON_READ)\n self.assertIs(distribution, mirrored.distribute_strategy)\n self.assertIs(distribution, sync_on_read.distribute_strategy)\n\n\n@combinations.generate(combinations.combine(\n distribution=[\n combinations.mirrored_strategy_with_gpu_and_cpu,\n combinations.core_mirrored_strategy_with_gpu_and_cpu],\n mode=[\"graph\"]))\nclass MirroredStrategyNameScopeTest(test.TestCase):\n # NOTE(priyag): Names and name scopes are ignored in eager, hence we are not\n # testing this in eager mode.\n\n def testNameScope(self, distribution):\n def model_fn():\n with ops.name_scope(\"foo\"):\n a = constant_op.constant(1.0, name=\"a\")\n ds_context.get_replica_context().merge_call(lambda _: _)\n b = constant_op.constant(1.0, name=\"b\")\n return a, b\n\n with context.graph_mode(), distribution.scope():\n with ops.name_scope(\"main\"):\n result = distribution.extended.call_for_each_replica(model_fn)\n self.assertEqual(2, len(result))\n for v, name in zip(result, [\"a\", \"b\"]):\n self.assertIsInstance(v, values.DistributedValues)\n v0, v1 = distribution.experimental_local_results(v)\n self.assertEqual(\"main/foo/\" + name + \":0\", v0.name)\n self.assertEqual(\"main/replica_1/foo/\" + name + \":0\", v1.name)\n\n def testWithDefaultName(self, distribution):\n def model_fn():\n with ops.name_scope(None, \"foo\"):\n a = constant_op.constant(1.0, name=\"a\")\n ds_context.get_replica_context().merge_call(lambda _: _)\n b = constant_op.constant(2.0, name=\"b\")\n return a, b\n\n with context.graph_mode(), distribution.scope():\n result = distribution.extended.call_for_each_replica(model_fn)\n self.assertEqual(2, len(result))\n for v, name in zip(result, [\"a\", \"b\"]):\n self.assertIsInstance(v, values.DistributedValues)\n v0, v1 = distribution.experimental_local_results(v)\n self.assertEqual(\"foo/\" + name + \":0\", v0.name)\n self.assertEqual(\"replica_1/foo/\" + name + \":0\", v1.name)\n\n # variable_scope.variable() respects name scopes when creating\n # variables. On the other hand variable_scope.get_variable() ignores name\n # scopes but respects variable scope when creating variables. We test both\n # methods of creating variables to make sure that we have the same\n # variable names in both cases.\n def testNameScopeWithVariable(self, distribution):\n def in_cross_replica(_):\n c = variable_scope.variable(1.0, name=\"c\")\n return c\n\n def model_fn():\n b = variable_scope.variable(1.0, name=\"b\")\n with ops.name_scope(\"foo\"):\n c = ds_context.get_replica_context().merge_call(in_cross_replica)\n return b, c\n\n with context.graph_mode(), distribution.scope():\n with ops.name_scope(\"main\"):\n a = variable_scope.variable(1.0, name=\"a\")\n result = distribution.extended.call_for_each_replica(model_fn)\n result_b = result[0]\n result_c = result[1]\n self.assertIsInstance(result_b, values.DistributedValues)\n self.assertIsInstance(result_c, values.DistributedValues)\n a0, a1 = distribution.experimental_local_results(a)\n b0, b1 = distribution.experimental_local_results(result_b)\n c0, c1 = distribution.experimental_local_results(result_c)\n self.assertEqual(\"main/a:0\", a0.name)\n self.assertEqual(\"main/a/replica_1:0\", a1.name)\n self.assertEqual(\"main/b:0\", b0.name)\n self.assertEqual(\"main/b/replica_1:0\", b1.name)\n self.assertEqual(\"main/foo/c:0\", c0.name)\n self.assertEqual(\"main/foo/c/replica_1:0\", c1.name)\n\n def testNameScopeWithGetVariable(self, distribution):\n def in_cross_replica(_):\n c = variable_scope.get_variable(\"c\", [1])\n return c\n\n def model_fn():\n b = variable_scope.get_variable(\"b\", [1])\n with ops.name_scope(\"foo\"):\n c = ds_context.get_replica_context().merge_call(in_cross_replica)\n return b, c\n\n with context.graph_mode(), distribution.scope():\n with ops.name_scope(\"main\"):\n a = variable_scope.get_variable(\"a\", [1])\n result = distribution.extended.call_for_each_replica(model_fn)\n result_b = result[0]\n result_c = result[1]\n self.assertIsInstance(result_b, values.DistributedValues)\n self.assertIsInstance(result_c, values.DistributedValues)\n a0, a1 = distribution.experimental_local_results(a)\n b0, b1 = distribution.experimental_local_results(result_b)\n c0, c1 = distribution.experimental_local_results(result_c)\n self.assertEqual(\"a:0\", a0.name)\n self.assertEqual(\"a/replica_1:0\", a1.name)\n self.assertEqual(\"b:0\", b0.name)\n self.assertEqual(\"b/replica_1:0\", b1.name)\n self.assertEqual(\"c:0\", c0.name)\n self.assertEqual(\"c/replica_1:0\", c1.name)\n\n def testVariableScopeWithGetVariable(self, distribution):\n\n def in_cross_replica(_):\n c = variable_scope.get_variable(\"c\", [1])\n return c\n\n def model_fn():\n b = variable_scope.get_variable(\"b\", [1])\n with variable_scope.variable_scope(\"foo\"):\n c = ds_context.get_replica_context().merge_call(in_cross_replica)\n return b, c\n\n with context.graph_mode(), distribution.scope():\n with variable_scope.variable_scope(\"main\"):\n a = variable_scope.get_variable(\"a\", [1])\n result = distribution.extended.call_for_each_replica(model_fn)\n result_b = result[0]\n result_c = result[1]\n self.assertIsInstance(result_b, values.DistributedValues)\n self.assertIsInstance(result_c, values.DistributedValues)\n a0, a1 = distribution.experimental_local_results(a)\n b0, b1 = distribution.experimental_local_results(result_b)\n c0, c1 = distribution.experimental_local_results(result_c)\n self.assertEqual(\"main/a:0\", a0.name)\n self.assertEqual(\"main/a/replica_1:0\", a1.name)\n self.assertEqual(\"main/b:0\", b0.name)\n self.assertEqual(\"main/b/replica_1:0\", b1.name)\n self.assertEqual(\"main/foo/c:0\", c0.name)\n self.assertEqual(\"main/foo/c/replica_1:0\", c1.name)\n\n\n@combinations.generate(\n combinations.combine(\n distribution=[\n combinations.NamedDistribution(\n \"Mirrored3Devices\",\n # pylint: disable=g-long-lambda\n lambda: mirrored_strategy.MirroredStrategy(\n [\"/device:GPU:0\", \"/device:GPU:1\", \"/device:CPU:0\"]),\n required_gpus=2),\n combinations.NamedDistribution(\n \"CoreMirrored3Devices\",\n # pylint: disable=g-long-lambda\n lambda: mirrored_strategy.CoreMirroredStrategy(\n [\"/device:GPU:0\", \"/device:GPU:1\", \"/device:CPU:0\"]),\n required_gpus=2)\n ],\n mode=[\"graph\", \"eager\"]))\nclass MirroredThreeDeviceDistributionTest(\n strategy_test_lib.DistributionTestBase,\n parameterized.TestCase):\n\n def testThreeDevices(self, distribution):\n def model_fn():\n v = variable_scope.variable(1.0, name=\"foo\")\n ds_context.get_replica_context().merge_call(lambda _: _)\n return v\n\n with distribution.scope():\n result = distribution.extended.call_for_each_replica(model_fn)\n self.assertIsInstance(result, values.MirroredVariable)\n self.assertEqual(\"foo:0\", result.name)\n\n\n@combinations.generate(combinations.combine(\n distribution=[\n combinations.mirrored_strategy_with_gpu_and_cpu,\n combinations.core_mirrored_strategy_with_gpu_and_cpu],\n mode=[\"graph\", \"eager\"]))\nclass MirroredVariableUpdateTest(test.TestCase):\n # The following tests check assign, assign_add and assign_sub on Mirrored\n # variables in replica and cross replica context.\n\n def testAssignMirroredVarReplicaContextWithoutAggregationType(self,\n distribution):\n # Test that we always have an aggregation type set on the mirrored variable\n # if we assign to it in replica mode.\n def var_fn():\n v = variable_scope.variable(1.0, name=\"foo\")\n return v\n\n with distribution.scope():\n mirrored_var = distribution.extended.call_for_each_replica(var_fn)\n self.assertIsInstance(mirrored_var, values.MirroredVariable)\n self.evaluate(variables.global_variables_initializer())\n\n def model_fn():\n return mirrored_var.assign(5.0)\n\n with self.assertRaisesRegexp(\n ValueError, \"You must specify an aggregation method to update a \"\n \"MirroredVariable in Replica Context.\"):\n self.evaluate(distribution.experimental_local_results(\n distribution.extended.call_for_each_replica(model_fn)))\n\n def testAssignMirroredVarReplicaContextWithSum(self, distribution):\n # Test that we don't reduce a non-per-replica value with the \"sum\"\n # aggregation type.\n def var_fn():\n v = variable_scope.variable(\n 1.0, name=\"foo\", aggregation=variable_scope.VariableAggregation.SUM)\n return v\n\n with distribution.scope():\n mirrored_var = distribution.extended.call_for_each_replica(var_fn)\n self.assertIsInstance(mirrored_var, values.MirroredVariable)\n self.evaluate(variables.global_variables_initializer())\n\n def model_fn():\n return mirrored_var.assign(5.0)\n\n with self.assertRaisesRegexp(\n ValueError, \"A non-DistributedValues value 5.0 cannot be reduced \"\n \"with the given reduce op ReduceOp.SUM.\"):\n self.evaluate(distribution.experimental_local_results(\n distribution.extended.call_for_each_replica(model_fn)))\n\n def testAssignMirroredVarCrossDeviceContext(self, distribution):\n def var_fn():\n return variable_scope.variable(1.0, name=\"foo\")\n\n with distribution.scope():\n mirrored_var = distribution.extended.call_for_each_replica(var_fn)\n self.assertIsInstance(mirrored_var, values.MirroredVariable)\n self.evaluate(variables.global_variables_initializer())\n self.assertEqual(1.0, self.evaluate(mirrored_var))\n mirrored_var_result = self.evaluate(mirrored_var.assign(6.0))\n self.assertEqual(6.0, mirrored_var_result)\n\n def testAssignMirroredVarReplicaContext(self, distribution):\n def var_fn():\n return variable_scope.variable(\n 1.0, name=\"foo\", aggregation=variable_scope.VariableAggregation.MEAN)\n\n with distribution.scope():\n mirrored_var = distribution.extended.call_for_each_replica(var_fn)\n self.assertIsInstance(mirrored_var, values.MirroredVariable)\n self.evaluate(variables.global_variables_initializer())\n self.assertEqual(1.0, self.evaluate(mirrored_var))\n\n def model_fn():\n value = math_ops.cast(\n ds_context.get_replica_context().replica_id_in_sync_group,\n mirrored_var.dtype)\n return mirrored_var.assign(value)\n\n self.evaluate(distribution.experimental_local_results(\n distribution.extended.call_for_each_replica(model_fn)))\n self.assertEqual(0.5, self.evaluate(mirrored_var))\n\n def testAssignMirroredVarReplicaContextWithSingleValue(self, distribution):\n def var_fn():\n return variable_scope.variable(\n 1.0, name=\"foo\", aggregation=variable_scope.VariableAggregation.MEAN)\n\n with distribution.scope():\n mirrored_var = distribution.extended.call_for_each_replica(var_fn)\n self.assertIsInstance(mirrored_var, values.MirroredVariable)\n self.evaluate(variables.global_variables_initializer())\n self.assertEqual(1.0, self.evaluate(mirrored_var))\n\n def model_fn():\n return mirrored_var.assign(5.0)\n\n self.evaluate(distribution.experimental_local_results(\n distribution.extended.call_for_each_replica(model_fn)))\n self.assertEqual(5.0, self.evaluate(mirrored_var))\n\n def testAssignAddMirroredVarCrossDeviceContext(self, distribution):\n def var_fn():\n return variable_scope.variable(1.0, name=\"foo\")\n\n with distribution.scope():\n mirrored_var = distribution.extended.call_for_each_replica(var_fn)\n self.assertIsInstance(mirrored_var, values.MirroredVariable)\n self.evaluate(variables.global_variables_initializer())\n self.assertEqual(1.0, self.evaluate(mirrored_var))\n\n # read_value == True\n mirrored_var_result = self.evaluate(\n mirrored_var.assign_add(6.0, read_value=True))\n self.assertEqual(7.0, mirrored_var_result)\n self.assertEqual(7.0, self.evaluate(mirrored_var.get(\"/device:CPU:0\")))\n self.assertEqual(7.0, self.evaluate(mirrored_var.get(\"/device:GPU:0\")))\n\n # read_value == False\n self.evaluate(mirrored_var.assign_add(2.0, read_value=False))\n self.assertEqual(9.0, self.evaluate(mirrored_var.get(\"/device:CPU:0\")))\n self.assertEqual(9.0, self.evaluate(mirrored_var.get(\"/device:GPU:0\")))\n\n def testAssignAddMirroredVarReplicaContext(self, distribution):\n def var_fn():\n return variable_scope.variable(\n 1.0, name=\"foo\", aggregation=variable_scope.VariableAggregation.MEAN)\n\n with distribution.scope():\n mirrored_var = distribution.extended.call_for_each_replica(var_fn)\n self.assertIsInstance(mirrored_var, values.MirroredVariable)\n self.evaluate(variables.global_variables_initializer())\n self.assertEqual(1.0, self.evaluate(mirrored_var))\n\n def model_fn():\n value = math_ops.cast(\n ds_context.get_replica_context().replica_id_in_sync_group,\n mirrored_var.dtype)\n return mirrored_var.assign_add(value)\n\n self.evaluate(distribution.experimental_local_results(\n distribution.extended.call_for_each_replica(model_fn)))\n self.assertEqual(1.5, self.evaluate(mirrored_var))\n\n def testAssignAddMirroredVarReplicaContextWithSingleValue(self, distribution):\n def var_fn():\n return variable_scope.variable(\n 1.0, name=\"foo\", aggregation=variable_scope.VariableAggregation.MEAN)\n\n with distribution.scope():\n mirrored_var = distribution.extended.call_for_each_replica(var_fn)\n self.assertIsInstance(mirrored_var, values.MirroredVariable)\n self.evaluate(variables.global_variables_initializer())\n self.assertEqual(1.0, self.evaluate(mirrored_var))\n\n def model_fn():\n return mirrored_var.assign_add(5.0)\n\n self.evaluate(distribution.experimental_local_results(\n distribution.extended.call_for_each_replica(model_fn)))\n self.assertEqual(6.0, self.evaluate(mirrored_var))\n\n def testAssignSubMirroredVarCrossDeviceContext(self, distribution):\n def var_fn():\n return variable_scope.variable(5.0, name=\"foo\")\n\n with distribution.scope():\n mirrored_var = distribution.extended.call_for_each_replica(var_fn)\n self.assertIsInstance(mirrored_var, values.MirroredVariable)\n self.evaluate(variables.global_variables_initializer())\n self.assertEqual(5.0, self.evaluate(mirrored_var))\n mirrored_var_result = self.evaluate(mirrored_var.assign_sub(2.0))\n self.assertEqual(3.0, mirrored_var_result)\n self.assertEqual(3.0, self.evaluate(mirrored_var.get(\"/device:GPU:0\")))\n self.assertEqual(3.0, self.evaluate(mirrored_var.get(\"/device:CPU:0\")))\n\n def testAssignSubMirroredVarReplicaContext(self, distribution):\n def var_fn():\n return variable_scope.variable(\n 5.0, name=\"foo\", aggregation=variable_scope.VariableAggregation.MEAN)\n\n with distribution.scope():\n mirrored_var = distribution.extended.call_for_each_replica(var_fn)\n self.assertIsInstance(mirrored_var, values.MirroredVariable)\n self.evaluate(variables.global_variables_initializer())\n self.assertEqual(5.0, self.evaluate(mirrored_var))\n\n def model_fn():\n value = math_ops.cast(\n ds_context.get_replica_context().replica_id_in_sync_group,\n mirrored_var.dtype)\n return mirrored_var.assign_sub(value)\n\n self.evaluate(distribution.experimental_local_results(\n distribution.extended.call_for_each_replica(model_fn)))\n self.assertEqual(4.5, self.evaluate(mirrored_var))\n\n def testAssignSubMirroredVarReplicaContextWithSingleValue(self, distribution):\n def var_fn():\n return variable_scope.variable(\n 5.0, name=\"foo\", aggregation=variable_scope.VariableAggregation.MEAN)\n\n with distribution.scope():\n mirrored_var = distribution.extended.call_for_each_replica(var_fn)\n self.assertIsInstance(mirrored_var, values.MirroredVariable)\n self.evaluate(variables.global_variables_initializer())\n self.assertEqual(5.0, self.evaluate(mirrored_var))\n\n def model_fn():\n return mirrored_var.assign_sub(1.0)\n\n self.evaluate(distribution.experimental_local_results(\n distribution.extended.call_for_each_replica(model_fn)))\n self.assertEqual(4.0, self.evaluate(mirrored_var))\n\n\n@combinations.generate(combinations.combine(\n distribution=[\n combinations.mirrored_strategy_with_gpu_and_cpu,\n combinations.core_mirrored_strategy_with_gpu_and_cpu],\n mode=[\"graph\", \"eager\"]))\nclass MirroredAndSyncOnReadVariableInitializerTest(test.TestCase):\n\n def testAssignMirroredVarInitializer(self, distribution):\n # This test is not eager compatible since in eager variables are initialized\n # upon construction instead of once the initialization op is run.\n with context.graph_mode():\n def var_fn():\n v = variable_scope.variable(1.0, name=\"foo\")\n return v\n\n with distribution.scope():\n mirrored_var = distribution.extended.call_for_each_replica(var_fn)\n self.assertIsInstance(mirrored_var, values.MirroredVariable)\n self.assertFalse(self.evaluate(mirrored_var.is_initialized()))\n self.evaluate(mirrored_var.initializer)\n self.assertTrue(self.evaluate(mirrored_var.is_initialized()))\n\n def testAssignReplicaLocalVarInitializer(self, distribution):\n # This test is not eager compatible since in eager variables are initialized\n # upon construction instead of once the initialization op is run.\n with context.graph_mode():\n def model_fn():\n v_sum = variable_scope.variable(\n 1.0,\n synchronization=variable_scope.VariableSynchronization.ON_READ,\n aggregation=variable_scope.VariableAggregation.SUM)\n self.assertIsInstance(v_sum, values.SyncOnReadVariable)\n return v_sum\n\n with distribution.scope():\n sync_on_read_var = distribution.extended.call_for_each_replica(\n model_fn)\n self.assertIsInstance(sync_on_read_var, values.SyncOnReadVariable)\n self.assertFalse(self.evaluate(sync_on_read_var.is_initialized()))\n self.evaluate(sync_on_read_var.initializer)\n self.assertTrue(self.evaluate(sync_on_read_var.is_initialized()))\n\n\n@combinations.generate(combinations.combine(\n distribution=[\n combinations.mirrored_strategy_with_gpu_and_cpu,\n combinations.core_mirrored_strategy_with_gpu_and_cpu],\n mode=[\"graph\", \"eager\"]))\nclass SyncOnReadVariableAssignTest(test.TestCase):\n\n def testAssignReplicaLocalVarSumAggregation(self, distribution):\n def model_fn():\n v_sum = variable_scope.variable(\n 1.0,\n synchronization=variable_scope.VariableSynchronization.ON_READ,\n aggregation=variable_scope.VariableAggregation.SUM)\n return v_sum\n\n with distribution.scope():\n sync_on_read_var = distribution.extended.call_for_each_replica(model_fn)\n self.assertIsInstance(sync_on_read_var, values.SyncOnReadVariable)\n self.evaluate(variables.global_variables_initializer())\n # Each replica has a value of 1.0 assigned to it in replica context.\n # When we read the value using `read_var` we should see the SUM of each of\n # values on each of the replicas.\n self.assertEqual(2.0, self.evaluate(\n distribution.extended.read_var(sync_on_read_var)))\n # Assigning 6.0 in cross replica context will assign a value of\n # 6.0/num_replicas to each replica.\n tlv_ops = sync_on_read_var.assign(6.0)\n self.evaluate(tlv_ops)\n # On reading the sync on read var we should get the assigned value back.\n # The value on all the replicas are added before being returned by\n # `read_var`.\n self.assertEqual(6.0, self.evaluate(\n distribution.extended.read_var(sync_on_read_var)))\n\n def testAssignReplicaLocalVarMeanAggregation(self, distribution):\n def model_fn():\n v_sum = variable_scope.variable(\n 1.0,\n synchronization=variable_scope.VariableSynchronization.ON_READ,\n aggregation=variable_scope.VariableAggregation.MEAN)\n return v_sum\n\n with distribution.scope():\n sync_on_read_var = distribution.extended.call_for_each_replica(model_fn)\n self.assertIsInstance(sync_on_read_var, values.SyncOnReadVariable)\n self.evaluate(variables.global_variables_initializer())\n # Each replica has a value of 1.0 assigned to it in replica context.\n # When we read the value using `read_var` we should see the MEAN of values\n # on all replicas which is the value assigned in replica context.\n self.assertEqual(1.0, self.evaluate(\n distribution.extended.read_var(sync_on_read_var)))\n tlv_ops = sync_on_read_var.assign(6.0)\n self.evaluate(tlv_ops)\n # On reading the sync on read var we should get the MEAN of all values\n # which is equal to the value assigned.\n self.assertEqual(6.0, self.evaluate(\n distribution.extended.read_var(sync_on_read_var)))\n\n\nclass MockModel(object):\n\n def __init__(self, two_variables=False):\n self.variables = []\n self.variables.append(variable_scope.variable(1.25, name=\"dummy_var1\"))\n if two_variables:\n self.variables.append(variable_scope.variable(2.0, name=\"dummy_var2\"))\n\n def __call__(self, factor=2):\n x = factor * self.variables[0]\n if len(self.variables) > 1:\n x += self.variables[1]\n return x\n\n\nclass MiniModel(keras_training.Model):\n \"\"\"Minimal model for mnist.\n\n Useful for testing and debugging on slow TPU simulators.\n \"\"\"\n\n def __init__(self):\n super(MiniModel, self).__init__(name=\"\")\n self.fc = keras_core.Dense(1, name=\"fc\", kernel_initializer=\"ones\",\n bias_initializer=\"ones\")\n\n def call(self, inputs, training=True):\n inputs = array_ops.ones([1, 10])\n return self.fc(inputs)\n\n\n@combinations.generate(combinations.combine(\n distribution=[\n combinations.mirrored_strategy_with_gpu_and_cpu,\n combinations.core_mirrored_strategy_with_gpu_and_cpu],\n mode=[\"graph\", \"eager\"]))\nclass MirroredStrategyDefunTest(test.TestCase):\n\n def _call_and_check(self, distribution, model_fn, inputs, expected_result,\n defuns, two_variables=False):\n cpu_dev = device_util.canonicalize(\"CPU:0\")\n gpu_dev = device_util.canonicalize(\"GPU:0\")\n devices = [cpu_dev, gpu_dev]\n\n with distribution.scope():\n mock_model = MockModel(two_variables)\n self.evaluate(variables.global_variables_initializer())\n\n result = distribution.extended.call_for_each_replica(\n model_fn, args=[mock_model] + inputs)\n for r in range(len(devices)):\n device_result = values.select_replica(r, result)\n device_expected_result = values.select_replica(r, expected_result)\n self.assertAllClose(device_expected_result,\n self.evaluate(device_result))\n\n for defun in defuns:\n # `Function`s are specialized to the current device stack, so\n # call_for_each has one trace per device. To check that the expected set\n # of variables was accessed on each trace, we first retrieve each\n # device-specific graph function.\n per_replica_graph_functions = (\n distribution.extended.call_for_each_replica(\n defun.get_concrete_function, args=[mock_model] + inputs))\n for device in devices:\n graph_function = per_replica_graph_functions.get(device=device)\n self.assertEqual(set(mock_model.variables),\n set(graph_function.graph.variables))\n\n def testVariableInDefun(self, distribution):\n @function.defun\n def times_two(mock_model):\n return mock_model()\n\n def model_fn(mock_model):\n return times_two(mock_model)\n\n self._call_and_check(distribution, model_fn, [], 2.5, [times_two])\n\n def testVariableInNestedDefun(self, distribution):\n @function.defun\n def times_two(mock_model):\n return mock_model()\n\n @function.defun\n def two_x_plus_one(mock_model):\n return times_two(mock_model) + 1\n\n def model_fn(mock_model):\n return two_x_plus_one(mock_model)\n\n self._call_and_check(distribution, model_fn, [], 3.5,\n [times_two, two_x_plus_one])\n\n def testTwoVariablesInNestedDefun(self, distribution):\n @function.defun\n def fn1(mock_model):\n return mock_model()\n\n @function.defun\n def fn2(mock_model):\n return fn1(mock_model) + 1\n\n def model_fn(mock_model):\n return fn2(mock_model)\n\n self._call_and_check(distribution, model_fn, [], 5.5, [fn1, fn2],\n two_variables=True)\n\n def testGradientTapeOverNestedDefuns(self, distribution):\n @function.defun\n def fn1(mock_model):\n return mock_model()\n\n @function.defun\n def fn2(mock_model):\n return fn1(mock_model) + 1\n\n def model_fn(mock_model):\n with backprop.GradientTape(persistent=True) as gtape:\n result = fn2(mock_model)\n grads = gtape.gradient(result,\n [v.get() for v in mock_model.variables])\n return grads\n\n self._call_and_check(distribution, model_fn, [], [2.0, 1.0], [fn1, fn2],\n two_variables=True)\n\n def testPassPerReplica(self, distribution):\n @function.defun\n def fn1(mock_model, factor):\n return mock_model(factor)\n\n device_map = values.ReplicaDeviceMap((\"/device:CPU:0\", \"/device:GPU:0\"))\n factors = values.PerReplica(device_map, (5.0, 3.0))\n expected_result = values.PerReplica(device_map, (5.0 * 1.25, 3.0 * 1.25))\n self._call_and_check(distribution, fn1, [factors], expected_result, [fn1])\n\n def testTrain(self, distribution):\n with distribution.scope():\n mock_model = MiniModel()\n mock_model.call = function.defun(mock_model.call)\n\n def loss_fn(ctx):\n del ctx\n return mock_model(array_ops.ones([1, 10]))\n\n gradients_fn = backprop.implicit_grad(loss_fn)\n gradients_fn = optimizer_lib.get_filtered_grad_fn(gradients_fn)\n grads_and_vars = distribution.extended.call_for_each_replica(\n gradients_fn, args=(None,))\n\n optimizer = gradient_descent.GradientDescentOptimizer(0.25)\n update_ops = optimizer._distributed_apply(distribution, grads_and_vars) # pylint: disable=protected-access\n\n if not context.executing_eagerly():\n self.evaluate(variables.global_variables_initializer())\n self.evaluate(update_ops)\n\n updated_var_values = self.evaluate(mock_model.variables)\n # All variables start at 1.0 and get two updates of 0.25.\n self.assertAllEqual(0.5 * np.ones([10, 1]), updated_var_values[0])\n self.assertAllEqual([0.5], updated_var_values[1])\n\n\n@combinations.generate(\n combinations.combine(\n distribution=[\n combinations.NamedDistribution(\n \"Mirrored\",\n # pylint: disable=g-long-lambda\n lambda: mirrored_strategy.MirroredStrategy(num_gpus_per_worker=\n context.num_gpus()),\n required_gpus=1),\n combinations.NamedDistribution(\n \"CoreMirrored\",\n # pylint: disable=g-long-lambda\n lambda: mirrored_strategy.CoreMirroredStrategy(\n mirrored_strategy.all_local_devices()),\n required_gpus=1)\n ],\n mode=[\"graph\"]))\nclass MultiWorkerMirroredStrategyTest(\n multi_worker_test_base.MultiWorkerTestBase,\n strategy_test_lib.DistributionTestBase):\n\n def _configure_distribution_strategy(self, distribution):\n cluster_spec = server_lib.ClusterSpec({\n \"worker\": [\"/job:worker/task:0\", \"/job:worker/task:1\"]\n })\n distribution.configure(cluster_spec=cluster_spec)\n\n def test_num_replicas_in_sync(self, distribution):\n self._configure_distribution_strategy(distribution)\n # We calculate the total number of gpus across the workers(2) specified in\n # the cluster spec.\n self.assertEqual(context.num_gpus() * 2, distribution.num_replicas_in_sync)\n\n def testMinimizeLossGraph(self, distribution):\n self._configure_distribution_strategy(distribution)\n self._test_minimize_loss_graph(distribution, learning_rate=0.05)\n\n def testDeviceScope(self, distribution):\n \"\"\"Test the device scope of multi-worker MirroredStrategy.\"\"\"\n self._configure_distribution_strategy(distribution)\n with distribution.scope():\n a = constant_op.constant(1.)\n with ops.device(\"/cpu:0\"):\n b = constant_op.constant(1.)\n self.assertEqual(a.device, \"/job:worker/task:0\")\n self.assertEqual(b.device, \"/job:worker/task:0/device:CPU:0\")\n\n def testMakeInputFnIteratorWithDataset(self, distribution):\n self._configure_distribution_strategy(distribution)\n dataset_fn = lambda: dataset_ops.Dataset.range(100)\n num_gpus = context.num_gpus()\n num_workers = 2\n\n expected_values = [[i+j for j in range(num_gpus)] * num_workers\n for i in range(0, 100, num_gpus)]\n\n with context.graph_mode(), self.cached_session() as sess:\n # `expected_input_pipeline_id` is None because the input_fn will be called\n # multiple times, each with a different input_pipeline_id.\n input_fn = self._input_fn_to_test_input_context(\n dataset_fn,\n expected_num_replicas_in_sync=num_workers*num_gpus,\n expected_num_input_pipelines=num_workers,\n expected_input_pipeline_id=None)\n iterator = distribution.make_input_fn_iterator(input_fn)\n self._test_input_fn_iterator(\n iterator, distribution.extended.worker_devices, expected_values, sess)\n\n def DISABLED_testMakeInputFnIteratorWithCallable(self, distribution):\n self._configure_distribution_strategy(distribution)\n def fn():\n dataset = dataset_ops.Dataset.range(100)\n it = dataset.make_one_shot_iterator()\n return it.get_next\n num_gpus = context.num_gpus()\n num_workers = 2\n\n expected_values = []\n for i in range(0, 100, num_gpus):\n expected_values.append([i+j for j in range(num_gpus)] * num_workers)\n\n with context.graph_mode(), self.cached_session() as sess:\n # `expected_input_pipeline_id` is None because the input_fn will be called\n # multiple times, each with a different input_pipeline_id.\n input_fn = self._input_fn_to_test_input_context(\n fn,\n expected_num_replicas_in_sync=num_workers*num_gpus,\n expected_num_input_pipelines=num_workers,\n expected_input_pipeline_id=None)\n iterator = distribution.make_input_fn_iterator(input_fn)\n self._test_input_fn_iterator(\n iterator, distribution.extended.worker_devices, expected_values, sess,\n test_reinitialize=False)\n\n def testUpdateConfigProto(self, distribution):\n distribution.configure(cluster_spec={\"worker\": [\"fake1\", \"fake2\"]})\n\n config_proto = config_pb2.ConfigProto()\n new_config = distribution.update_config_proto(config_proto)\n\n # Verify isolate_session_state\n self.assertTrue(new_config.isolate_session_state)\n\n\nclass MultiWorkerMirroredStrategyTestWithChief(\n multi_worker_test_base.MultiWorkerTestBase,\n strategy_test_lib.DistributionTestBase):\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Create a local cluster with 2 workers and 1 chief.\"\"\"\n cls._cluster_spec = multi_worker_test_base.create_in_process_cluster(\n num_workers=2, num_ps=0, has_chief=True)\n cls._default_target = \"grpc://\" + cls._cluster_spec[\"chief\"][0]\n\n def testMinimizeLossGraph(self):\n strategy = mirrored_strategy.MirroredStrategy(\n num_gpus_per_worker=context.num_gpus())\n strategy.configure(cluster_spec=self._cluster_spec)\n self._test_minimize_loss_graph(strategy, learning_rate=0.05)\n\n def testMinimizeLossGraphCoreMirroredStrategy(self):\n strategy = mirrored_strategy.CoreMirroredStrategy(\n mirrored_strategy.all_local_devices())\n strategy.configure(cluster_spec=self._cluster_spec)\n self._test_minimize_loss_graph(strategy, learning_rate=0.05)\n\n def testMinimizeLossGraphCoreMirroredStrategyWithOneNode(self):\n cluster_spec = {}\n cluster_spec[\"chief\"] = self._cluster_spec[\"chief\"]\n tf_config = {\"cluster\": cluster_spec}\n with test.mock.patch.dict(\"os.environ\",\n {\"TF_CONFIG\": json.dumps(tf_config)}):\n strategy = mirrored_strategy.CoreMirroredStrategy()\n self.assertIsInstance(strategy.extended._inferred_cross_device_ops,\n cross_device_ops_lib.NcclAllReduce)\n self._test_minimize_loss_graph(strategy, learning_rate=0.05)\n\n def testInitializeFromTFConfig(self):\n tf_config = {\"cluster\": self._cluster_spec}\n with test.mock.patch.dict(\"os.environ\",\n {\"TF_CONFIG\": json.dumps(tf_config)}):\n strategy = mirrored_strategy.CoreMirroredStrategy()\n self.assertEqual(\n max(context.num_gpus(), 1) * 3, strategy.num_replicas_in_sync)\n\n def testSummaryForReplicaZeroOnly(self):\n strategy = mirrored_strategy.CoreMirroredStrategy(\n mirrored_strategy.all_local_devices())\n strategy.configure(cluster_spec=self._cluster_spec)\n self._test_summary_for_replica_zero_only(strategy)\n\n\ndef _replica_id():\n replica_id = ds_context.get_replica_context().replica_id_in_sync_group\n if not isinstance(replica_id, ops.Tensor):\n replica_id = constant_op.constant(replica_id)\n return replica_id\n\n\nif __name__ == \"__main__\":\n test.main()\n"
] |
[
[
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensors",
"tensorflow.python.ops.variable_scope.variable_creator_scope",
"tensorflow.python.distribute.values.ReplicaDeviceMap",
"tensorflow.python.training.optimizer.get_filtered_grad_fn",
"tensorflow.contrib.distribute.python.mirrored_strategy.CoreMirroredStrategy",
"tensorflow.python.framework.ops.executing_eagerly_outside_functions",
"tensorflow.python.eager.backprop.GradientTape",
"tensorflow.python.data.ops.dataset_ops.Dataset.range",
"tensorflow.python.framework.ops.device",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.layers.core.Dense",
"tensorflow.python.distribute.values.select_replica",
"tensorflow.python.ops.rnn.bidirectional_dynamic_rnn",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.contrib.distribute.python.combinations.combine",
"tensorflow.contrib.distribute.python.mirrored_strategy.all_local_devices",
"tensorflow.python.eager.context.graph_mode",
"tensorflow.contrib.distribute.python.multi_worker_test_base.create_in_process_cluster",
"tensorflow.python.eager.backprop.implicit_grad",
"tensorflow.python.eager.test.main",
"tensorflow.python.framework.func_graph.FuncGraph",
"tensorflow.python.eager.function.defun",
"tensorflow.python.distribute.values.PerReplica",
"tensorflow.python.training.server_lib.ClusterSpec",
"tensorflow.python.keras.layers.core.Dense",
"tensorflow.python.distribute.device_util.canonicalize",
"tensorflow.python.ops.variable_scope.variable",
"tensorflow.python.distribute.distribution_strategy_context.get_replica_context",
"tensorflow.python.training.gradient_descent.GradientDescentOptimizer",
"tensorflow.python.distribute.values.DistributedValues",
"tensorflow.python.ops.rnn_cell_impl.LSTMCell",
"tensorflow.python.eager.context.num_gpus",
"tensorflow.python.ops.variable_scope.get_variable",
"numpy.ones",
"tensorflow.contrib.distribute.python.mirrored_strategy.MirroredStrategy",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.core.protobuf.config_pb2.ConfigProto",
"tensorflow.python.framework.constant_op.constant"
]
] |
ZihaoChen0319/CMB-Segmentation
|
[
"99c5788baacc280ca5dbe02f3e18403e399fb238"
] |
[
"DiscriTrainer.py"
] |
[
"import torch.nn as nn\r\nimport os\r\nimport torch.optim as optim\r\nfrom tqdm import tqdm\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn.functional as nnf\r\nimport json\r\nfrom PairwiseMeasures_modified import PairwiseMeasures\r\nimport medpy.io as mio\r\n\r\nfrom ScreenTrainer import ScreenTrainer\r\nfrom MyDataloader import get_train_cases, get_cmbdataloader\r\nfrom MyNetwork import DiscriNet\r\nfrom MyLoss import FocalLoss\r\n\r\n\r\nclass DiscriTrainer(nn.Module):\r\n def __init__(self, data_path, model_save_path, dataset_path, screen_model_path, load_screen='current',\r\n device='cuda', fold=0, bbox=(24, 24, 20), batch_size=128, loss='ce',\r\n optimizer='sgd', init_lr=1e-4, all_epoch=50, decay_exponent=0.9, config=None, if_test=False,\r\n random_negatives=200000, aug_num=10, add_fp=False, resample_num=(10000, 10000, 10000),\r\n modality=('T1', 'T2', 'T2S')):\r\n \"\"\"\r\n Trainer of the Discrimination Network.\r\n \"\"\"\r\n super(DiscriTrainer, self).__init__()\r\n\r\n self.bbox = bbox\r\n self.batch_size = batch_size\r\n self.init_lr = init_lr\r\n self.decay_exponent = decay_exponent\r\n self.all_epoch = all_epoch\r\n self.config = config\r\n self.resample_num = resample_num\r\n self.modality = modality\r\n self.fold = fold\r\n self.random_negatives = random_negatives\r\n if load_screen:\r\n self.screen_trainer = ScreenTrainer(\r\n data_path=data_path,\r\n model_save_path=screen_model_path,\r\n dataset_path=dataset_path,\r\n device=device,\r\n fold=fold,\r\n modality=modality,\r\n if_test=True)\r\n self.screen_trainer.load_model(load_screen)\r\n else:\r\n self.screen_trainer = None\r\n\r\n # path define\r\n self.data_path = data_path\r\n self.model_save_path = model_save_path + 'fold_%d/' % fold\r\n if not os.path.exists(self.model_save_path):\r\n os.makedirs(self.model_save_path)\r\n\r\n # device\r\n self.device = device\r\n\r\n # load division of data\r\n if os.path.exists(dataset_path + 'fold_division.json'):\r\n with open(dataset_path + 'fold_division.json', mode='r') as f:\r\n splits = json.load(f)\r\n self.train_list_sub = splits[str(fold)]['train']\r\n self.val_list_sub = splits[str(fold)]['val']\r\n else:\r\n self.train_list_sub = []\r\n self.val_list_sub = []\r\n print('Data division is empty!')\r\n\r\n # training and validation samples\r\n if not if_test:\r\n self.dataset_name = 'fold_%d/bbox-%d-%d-%d_neg-%d_aug-%d/' % \\\r\n (fold, self.screen_trainer.bbox[0], self.screen_trainer.bbox[1],\r\n self.screen_trainer.bbox[2], random_negatives, aug_num)\r\n if not os.path.exists(dataset_path + self.dataset_name):\r\n os.makedirs(dataset_path + self.dataset_name)\r\n # load or generate the training samples\r\n if os.path.exists(dataset_path + self.dataset_name + 'pos.json'):\r\n with open(dataset_path + self.dataset_name + 'pos.json', mode='r') as f:\r\n self.train_cases_pos = json.load(f)\r\n if os.path.exists(dataset_path + self.dataset_name + 'neg.json'):\r\n with open(dataset_path + self.dataset_name + 'neg.json', mode='r') as f:\r\n self.train_cases_neg = json.load(f)\r\n else:\r\n self.train_cases_pos, self.train_cases_neg = get_train_cases(\r\n data_path=self.data_path, train_list=self.train_list_sub, bbox=self.bbox, seed=2021,\r\n if_translation=True, random_negatives=random_negatives, aug_num=aug_num)\r\n with open(dataset_path + self.dataset_name + 'pos.json', mode='w') as f:\r\n json.dump(self.train_cases_pos, f)\r\n with open(dataset_path + self.dataset_name + 'neg.json', mode='w') as f:\r\n json.dump(self.train_cases_neg, f)\r\n # load false positive samples\r\n self.train_cases_fp = []\r\n if add_fp:\r\n if os.path.exists(dataset_path + 'fold_%d/fp_%s_current.json' % (self.fold, self.screen_trainer.model_name)):\r\n with open(dataset_path + 'fold_%d/fp_%s_current.json' % (self.fold, self.screen_trainer.model_name), mode='r') as f:\r\n self.train_cases_fp = json.load(f)\r\n print('Dataset: pos %d, neg %d, fp %d' %\r\n (len(self.train_cases_pos), len(self.train_cases_neg), len(self.train_cases_fp)))\r\n else:\r\n self.train_cases_fp = []\r\n self.train_cases_pos = []\r\n self.train_cases_neg = []\r\n\r\n # model\r\n self.model = DiscriNet(in_channel=len(modality), num_class=2)\r\n self.model.to(self.device)\r\n\r\n # loss function\r\n if loss == 'ce':\r\n self.loss_fc = nn.CrossEntropyLoss()\r\n elif loss == 'weighted ce':\r\n self.loss_fc = nn.CrossEntropyLoss(weight=torch.tensor([0.25, 0.75], device=device))\r\n elif loss == 'focal loss':\r\n self.loss_fc = FocalLoss()\r\n else:\r\n raise ValueError('No such optimizer')\r\n\r\n # optimizer\r\n if optimizer == 'sgd':\r\n self.optimizer = optim.SGD(self.model.parameters(), lr=init_lr, momentum=0.99, nesterov=True)\r\n elif optimizer == 'adam':\r\n self.optimizer = optim.Adam(self.model.parameters(), lr=init_lr)\r\n else:\r\n raise ValueError('No such optimizer')\r\n\r\n self.epoch = 1\r\n self.lr = init_lr\r\n self.train_metric = [0] * 3\r\n self.test_metric = [0] * 4\r\n\r\n def train_epoch(self):\r\n self.model.train()\r\n train_accum = [0] * 6\r\n train_cases_fp = self.train_cases_fp.copy()\r\n train_cases_pos = self.train_cases_pos.copy()\r\n train_cases_neg = self.train_cases_neg.copy()\r\n # randomly choose training samples, ensuring that the number of samples is fixed under different conditions\r\n if len(self.resample_num):\r\n train_cases_pos = np.random.choice(train_cases_pos, size=self.resample_num[0]).tolist()\r\n train_cases_neg = np.random.choice(train_cases_neg, size=self.resample_num[1]).tolist()\r\n if len(train_cases_fp):\r\n train_cases_fp = np.random.choice(train_cases_fp, size=self.resample_num[2]).tolist()\r\n data_list = train_cases_pos + train_cases_neg + train_cases_fp\r\n dataloader = get_cmbdataloader(\r\n data_path=self.data_path,\r\n dataset_index=data_list,\r\n bbox=self.bbox,\r\n batch_size=self.batch_size,\r\n shuffle=True,\r\n pin_memory=True,\r\n modality=self.modality,\r\n num_workers=2,\r\n )\r\n dataloader = tqdm(dataloader)\r\n for img_batch, label_batch in dataloader:\r\n img_batch = img_batch.to(self.device).float()\r\n label_batch = label_batch.to(self.device)\r\n\r\n pred_batch = self.model(img_batch)\r\n loss = self.loss_fc(pred_batch, label_batch)\r\n\r\n self.optimizer.zero_grad()\r\n loss.backward()\r\n self.optimizer.step()\r\n\r\n y_hat = pred_batch.argmax(axis=1).detach().cpu().numpy()\r\n y = label_batch.detach().cpu().numpy()\r\n\r\n train_accum[0] += img_batch.shape[0]\r\n train_accum[1] += loss.detach().cpu().numpy() * img_batch.shape[0]\r\n train_accum[2] += np.sum(y_hat == y) # acc\r\n train_accum[3] += np.sum((y_hat == 1) & (y == 1)) # tp\r\n train_accum[4] += np.sum((y_hat == 1) & (y != 1)) # fp\r\n train_accum[5] += np.sum((y_hat != 1) & (y == 1)) # fn\r\n\r\n self.train_metric[0] = train_accum[1] / train_accum[0] # loss\r\n self.train_metric[1] = train_accum[2] / train_accum[0] # acc\r\n self.train_metric[2] = 2 * train_accum[3] / np.clip(2 * train_accum[3] + train_accum[4] + train_accum[5],\r\n a_min=1e-5, a_max=1e10) # f1\r\n dataloader.set_description('Epoch: %d, ' % self.epoch +\r\n 'train loss %.4f, ' % self.train_metric[0] +\r\n 'train acc %.4f, ' % self.train_metric[1] +\r\n 'train f1 %.4f, ' % self.train_metric[2])\r\n\r\n return self.train_metric\r\n\r\n def val_epoch(self):\r\n self.screen_trainer.model.eval()\r\n self.model.eval()\r\n test_accum = [0] * 6\r\n for pat in self.val_list_sub:\r\n data_list = []\r\n for mod in self.modality:\r\n data_list.append(np.load(self.data_path + '%s/%s_space-T2S_%s.npy' % (pat, pat, mod)))\r\n cmb, h = mio.load(self.data_path + '%s/%s_space-T2S_CMB.nii.gz' % (pat, pat))\r\n img = np.stack(data_list, axis=0)\r\n _, _, n_obj, pred_init_space, candidates_list, score_init_space = \\\r\n self.screen_trainer.inference(img, patch_size=(160, 160, 80), if_nms=True, thresh=0.1, size=2)\r\n pred_fp_reduced, reduc_candidates_list, num = self.inference(img, candidates_list, thresh=0.5, size=2)\r\n pe_dis = PairwiseMeasures(ref_img=cmb, seg_img=pred_fp_reduced, analysis='microbleeds',\r\n measures=('f1_score', 'tp', 'fn', 'fp'),\r\n connectivity=3, pixdim=h.get_voxel_spacing(), empty=True,\r\n threshold=0.5, thresh_assign=3)\r\n tp_dis, fn_dis, fp_dis, f1_dis = pe_dis.m_dict['tp'][0](), pe_dis.m_dict['fn'][0](), \\\r\n pe_dis.m_dict['fp'][0](), pe_dis.m_dict['f1_score'][0]()\r\n test_accum[0] += 1\r\n test_accum[1] += 1 if np.sum(cmb) else 0\r\n test_accum[2] += tp_dis\r\n test_accum[3] += fn_dis\r\n test_accum[4] += fp_dis\r\n test_accum[5] += f1_dis if np.sum(cmb) else 0\r\n print('%s: reduc TP %d, reduc FN %d, reduc FP %d, reduc F1 %.4f' % (pat, tp_dis, fn_dis, fp_dis, f1_dis))\r\n\r\n self.test_metric[0] = test_accum[2]\r\n self.test_metric[1] = test_accum[3]\r\n self.test_metric[2] = test_accum[4] / test_accum[0]\r\n self.test_metric[3] = test_accum[5] / test_accum[1]\r\n print('Epoch: %d, reduc TP %d, reduc FN %d, reduc avg FP %.2f, reduc F1 %.4f' %\r\n (self.epoch, self.test_metric[0], self.test_metric[1], self.test_metric[2], self.test_metric[3]))\r\n\r\n return self.test_metric\r\n\r\n def adjust_lr(self):\r\n \"\"\"Adjust the learning rate following ‘poly’ policy\"\"\"\r\n self.lr = self.init_lr * (1 - self.epoch / self.all_epoch) ** self.decay_exponent\r\n for param_group in self.optimizer.param_groups:\r\n param_group['lr'] = self.lr\r\n return self.lr\r\n\r\n def save_model(self, force=False):\r\n \"\"\"Save the model every epoch(current) and every 5 epochs(epoch_xx)\"\"\"\r\n state = {\r\n 'epoch': self.epoch,\r\n 'state_dict': self.model.state_dict(),\r\n 'config': self.config,\r\n }\r\n torch.save(state, self.model_save_path + 'current.pth.tar')\r\n if self.epoch % 5 == 0 or force:\r\n torch.save(state, self.model_save_path + 'epoch_%d_%d_%d_%.2f_%.2f.pth.tar' %\r\n (self.epoch, self.test_metric[0], self.test_metric[1], self.test_metric[2], self.test_metric[3]))\r\n\r\n def load_model(self, model_name='current', silent=False):\r\n all_saved_models = os.listdir(self.model_save_path)\r\n matched_model = [model for model in all_saved_models if model.startswith(model_name)]\r\n if len(matched_model) == 1:\r\n checkpoint = torch.load(self.model_save_path + matched_model[0], map_location={'cuda:0': self.device})\r\n self.epoch = checkpoint['epoch'] + 1\r\n self.model.load_state_dict(checkpoint['state_dict'])\r\n self.model.to(self.device)\r\n # self.config = checkpoint['config']\r\n self.adjust_lr()\r\n elif len(matched_model) > 1:\r\n print(matched_model)\r\n raise ValueError('Too many matched models!')\r\n if not silent:\r\n print('Discrimination model: %s, device: %s, epoch: %d'\r\n % (self.model_save_path + model_name, self.device, self.epoch))\r\n\r\n def inference(self, data: np.ndarray, candidates_list, thresh=0.5, size=2):\r\n shape = data.shape[1:]\r\n tp_list = []\r\n num = 0\r\n pred_fp_reduced = np.zeros(shape)\r\n for position in candidates_list:\r\n position = np.array(position, dtype=int)\r\n x, y, z = position\r\n # Slightly move the candidates to deal with disturbance\r\n position_enlarged = [[i, j, k] for i in [x-1, x, x+1] for j in [y-1, y, y+1] for k in [z-1, z, z+1]]\r\n regions = np.zeros((len(position_enlarged), len(self.modality), self.bbox[0], self.bbox[1], self.bbox[2]))\r\n for i, pos in enumerate(position_enlarged):\r\n neighbour = self.get_neighbour(data, pos)\r\n regions[i] = neighbour\r\n regions = torch.tensor(regions, dtype=torch.float32, device=self.device)\r\n out_enlarged = self.model(regions).detach()\r\n out_enlarged = nnf.softmax(out_enlarged, dim=1)[:, 1].cpu().numpy()\r\n if np.max(out_enlarged) > thresh:\r\n pos_new = position_enlarged[np.argmax(out_enlarged)]\r\n tp_list.append(pos_new)\r\n pred_fp_reduced[pos_new[0]-size//2:pos_new[0]+size//2,\r\n pos_new[1]-size//2:pos_new[1]+size//2,\r\n pos_new[2]-size//2:pos_new[2]+size//2] = 1\r\n num += 1\r\n return pred_fp_reduced, tp_list, num\r\n\r\n def get_neighbour(self, data: np.ndarray, position):\r\n shape = data.shape[1:]\r\n if self.bbox[0] // 2 <= position[0] <= shape[0] - self.bbox[0] // 2 and \\\r\n self.bbox[1] // 2 <= position[1] <= shape[1] - self.bbox[1] // 2 and \\\r\n self.bbox[2] // 2 <= position[2] <= shape[2] - self.bbox[2] // 2:\r\n return data[:, position[0] - self.bbox[0] // 2:position[0] + self.bbox[0] // 2,\r\n position[1] - self.bbox[1] // 2:position[1] + self.bbox[1] // 2,\r\n position[2] - self.bbox[2] // 2:position[2] + self.bbox[2] // 2]\r\n else:\r\n data = np.pad(data, ((0, 0), (self.bbox[0] // 2, self.bbox[0] // 2),\r\n (self.bbox[1] // 2, self.bbox[1] // 2), (self.bbox[2] // 2, self.bbox[2] // 2)),\r\n mode='constant', constant_values=0)\r\n return data[:, position[0]:position[0] + self.bbox[0],\r\n position[1]:position[1] + self.bbox[1],\r\n position[2]:position[2] + self.bbox[2]]\r\n\r\n\r\n"
] |
[
[
"torch.nn.CrossEntropyLoss",
"torch.nn.functional.softmax",
"numpy.pad",
"torch.load",
"numpy.clip",
"numpy.random.choice",
"numpy.stack",
"torch.tensor",
"numpy.max",
"numpy.argmax",
"numpy.load",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"torch.save"
]
] |
jmonrods/pyeis
|
[
"e354691cd4d3a9a80c4e42e99129d6ee704f0f46"
] |
[
"replot2.py"
] |
[
"#Algorithm to plot data as Nyquist plot, Bode plot, or Real and Imagine component plot\r\n# ------------------------------------------------------\r\n# Copyright (C) 2020 Gustavo Rodriguez Gutierrez\r\n# Licensed under the MIT license, see LICENSE.\r\n# ------------------------------------------------------\r\n\r\nimport random\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib import cm\r\nimport numpy as np\r\nfrom upfiles import *\r\n\r\ndef replot2(ldata,graph):\r\n #List to store all data except for the frequency\r\n Re = []\r\n Im = []\r\n Mg = []\r\n Ph = []\r\n\r\n #Store all data in the list (Except for Frequency)\r\n for i in range(len(ldata)):\r\n Re.append(np.array(ldata[i][:,1]))\r\n Im.append(np.array(ldata[i][:,2]))\r\n Mg.append(np.array(ldata[i][:,3]))\r\n Ph.append(np.array(ldata[i][:,4]))\r\n\r\n #Store the frequency in variable b\r\n freq = np.array(ldata[0][:,0])\r\n\r\n # Helps pick a random color for each plot, used for readability\r\n rand = lambda: random.randint(0, 255)\r\n\r\n\r\n\r\n\r\n if(graph=='Re/Im'):\r\n fig, axs = plt.subplots(2,1,figsize=(30,7.5))\r\n # Parameters for Re/Im Plots\r\n plt.setp(axs[1], xlabel='Frequency [Hz]')\r\n plt.setp(axs[0], xlabel='Frequency [Hz]')\r\n plt.subplots_adjust(left=0.1, right=0.925, wspace=0.23, hspace=0.335)\r\n\r\n # Re/Im Plots\r\n axs[0].set_title(\"Real Component vs Frequency\")\r\n plt.setp(axs[0], ylabel='Real Component')\r\n for ydata in Re:\r\n clr = '#%02X%02X%02X' % (rand(),rand(),rand())\r\n plot, = axs[0].loglog(freq, ydata, color=clr)\r\n\r\n axs[1].set_title(\"Imaginary Component vs Frequency\")\r\n plt.setp(axs[1], ylabel='Imaginary Component')\r\n for ydata in Im:\r\n clr = '#%02X%02X%02X' % (rand(),rand(),rand())\r\n plot, = axs[1].loglog(freq, abs(ydata), color=clr)\r\n\r\n elif(graph=='Bode'):\r\n fig, axs = plt.subplots(2,1,figsize=(30,7.5))\r\n # Parameters for Bode Plots\r\n plt.setp(axs[1], xlabel='Frequency [Hz]')\r\n plt.setp(axs[0], xlabel='Frequency [Hz]')\r\n plt.subplots_adjust(left=0.1, right=0.925, wspace=0.23, hspace=0.335)\r\n\r\n # Bode Plots\r\n axs[0].set_title(\"Magnitude vs Frequency\")\r\n plt.setp(axs[0], ylabel='Magnitude')\r\n for ydata in Mg:\r\n clr = '#%02X%02X%02X' % (rand(),rand(),rand())\r\n plot, = axs[0].loglog(freq, ydata, color=clr)\r\n\r\n axs[1].set_title(\"Phase vs Frequency\")\r\n plt.setp(axs[1], ylabel='Phase')\r\n for ydata in Ph:\r\n clr = '#%02X%02X%02X' % (rand(),rand(),rand())\r\n plot, = axs[1].semilogx(freq, ydata, color=clr)\r\n\r\n\r\n elif (graph=='Nyquist'):\r\n # Parameters for Nyquist plot\r\n fig2 = plt.figure(figsize=(30,7.5))\r\n ax2 = fig2.add_subplot(111)\r\n ax2.set_title(\"Nyquist Plot\")\r\n plt.setp(ax2, ylabel='Imaginary Component')\r\n plt.setp(ax2, xlabel='Real Component')\r\n\r\n # Nyquist Plot\r\n count = 0\r\n for ydata in Im:\r\n clr = '#%02X%02X%02X' % (rand(),rand(),rand())\r\n plot, = ax2.plot(Re[count], abs(ydata), color=clr)\r\n count = count + 1\r\n\r\n\r\n plt.show()\r\n\r\n#ldata = upfiles()\r\n#print(ldata)\r\n#replot2(ldata,'Nyquist')\r\n#replot2(ldata,'Bode')\r\n#replot2(ldata,'Re/Im')\r\n#print(len(ldata))\r\n"
] |
[
[
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.setp",
"matplotlib.pyplot.subplots_adjust",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
ajbouh/tfi
|
[
"6e89e8c8f1ca3b285c788cc6b802fc44f9001290"
] |
[
"src/tfi/serve/rest.py"
] |
[
"import base64\nimport json\nimport inspect\nimport opentracing\n\nimport tfi.tensor.codec\n\nfrom flask import request, make_response\nfrom opentracing.ext import tags as opentracing_ext_tags\nfrom urllib.parse import urlparse\nfrom werkzeug.exceptions import BadRequest\n\nfrom tfi import data as tfi_data\nfrom tfi import json as tfi_json\nfrom tfi.base import _recursive_transform\n\nfrom collections import OrderedDict\n\nfrom tfi.tensor.codec import encode as _encode_tensor\n\ndef _decode_form_value(value):\n # Attempt to auto-detect JSON-encoded values\n ch = value[0]\n # Assume that values that start with a char that is \n # - non-object,\n # - non-array,\n # - non-quote,\n # - and non-numeric\n # should be considered a string.\n if ch != '{' and ch != '[' and ch != '\"' and not value.isdecimal():\n # HACK(adamb) Replace value with a json-encoded version of itself\n value = json.dumps(value)\n return tfi_data.json(value)\n\ndef _decode_predict_pb2_request(request):\n decoded = OrderedDict()\n from tensorflow.python.framework import tensor_util\n import tensorflow_serving_api.predict_pb2\n pr = tensorflow_serving_api.predict_pb2.PredictRequest()\n pr.ParseFromString(request.data)\n for input_name, input_tensor_proto in pr.inputs.items():\n decoded[input_name] = tensor_util.MakeNdarray(input_tensor_proto)\n return list(pr.output_filter), decoded\n\ndef _decode_request(request):\n # TODO(adamb) Evaluate whether or not request is a tf.PredictionRequest\n # If so, decode it using tensorflow-serving-api\n if request.mimetype == 'application/x-protobuf':\n return _decode_predict_pb2_request(request)\n\n output_filter = None\n decoded = OrderedDict()\n\n if 'x-tesserai-input-json' in request.headers:\n decoded.update(json.loads(request.headers['x-tesserai-input-json']))\n\n if 'x-tesserai-input-body' in request.headers:\n body_key = request.headers['x-tesserai-input-body']\n decoded[body_key] = tfi_data.bytes(request.data, mimetype=request.mimetype)\n return output_filter, decoded\n\n if request.mimetype == 'application/json':\n decoded_json = tfi_data.json(request.data)\n if decoded_json is not None:\n decoded.update(decoded_json)\n\n if request.form is not None:\n for field in request.form:\n values = request.form.getlist(field)\n decoded[field] = [\n _decode_form_value(value)\n for value in values\n ]\n\n if len(decoded[field]) == 1:\n decoded[field] = decoded[field][0]\n\n for field, file in request.files.items():\n decoded[field] = tfi_data.file(file, mimetype=file.mimetype)\n \n return output_filter, decoded\n\ndef _encode_response(request, result):\n if 'x-tesserai-output-body' in request.headers:\n body_key = request.headers['x-tesserai-output-body']\n if body_key[0] != '[':\n body_key = [body_key]\n else:\n body_key = json.loads(body_key)\n\n for key in body_key:\n result = result[key]\n\n accept = request.headers['accept']\n if '*/*' in accept:\n if request.mimetype == 'application/json':\n accept = 'application/json'\n elif request.mimetype == 'application/x-protobuf':\n accept = 'application/x-protobuf'\n\n # NOTE(adamb) This is a bit gross and confusing. Why can't\n # we always use _encode_tensor?\n if 'application/json' in accept:\n return 'application/json', tfi_json.dumps(result, coerce=True)\n\n if 'application/x-protobuf' in accept:\n # TODO(adamb) Properly encode outputs\n return 'application/x-protobuf', None\n\n return accept, _encode_tensor({accept: lambda x: x}, result)\n\ndef _get_request_field(req, field, annotation):\n if field not in req:\n return False, None\n \n return True, req[field]\n\ndef _maybe_plural(n, singular, plural):\n return singular if n == 1 else plural\n\ndef _MissingParameters(missing):\n noun = _maybe_plural(len(missing), \"parameter\", \"parameters\")\n desc = \"Missing %s: %s\" % (noun, \", \".join(missing))\n return BadRequest(desc)\n\ndef _make_method_endpoint(model, method_name):\n method = getattr(model, method_name)\n sig = inspect.signature(method)\n param_annotations = {k: v.annotation for k, v in sig.parameters.items()}\n required = {k for k, v in sig.parameters.items() if v.default is inspect.Parameter.empty}\n\n def fn():\n output_filter, decoded = _decode_request(request)\n d = {}\n missing = set(required)\n for k, ann in param_annotations.items():\n ok, v = _get_request_field(decoded, k, ann)\n if ok:\n v = decoded[k]\n missing.remove(k)\n d[k] = v\n\n if missing:\n raise _MissingParameters(missing)\n \n # TODO(adamb) Support for output_filter, so we can avoid computing outputs\n # we don't care about.\n result = method(**d)\n mimetype, encoded = _encode_response(request, result)\n response = make_response(encoded, 200)\n response.headers['Content-Type'] = mimetype\n return response\n\n return fn\n\ndef _set_environ_later(k, v):\n def _wrap(f):\n def _do():\n request.environ[k] = v\n return f()\n return _do\n return _wrap\n\ndef add_endpoints(model, tracer, app):\n trace_route = tracer(app)\n\n for method_name, method in inspect.getmembers(model, predicate=inspect.ismethod):\n if method_name.startswith('_'):\n continue\n\n tracing_tags = {\n \"model.method\": method_name,\n \"visibility\": \"public\",\n }\n fn = _make_method_endpoint(model, method_name)\n fn = _set_environ_later('TFI_METHOD', method_name)(fn)\n fn = trace_route(tracing_tags)(fn)\n fn.__name__ = method_name\n app.route(\"/api/%s\" % method_name, methods=[\"POST\", \"GET\"])(fn)\n\n"
] |
[
[
"tensorflow.python.framework.tensor_util.MakeNdarray"
]
] |
ishakir/ml-tictactoe
|
[
"904eb24aa2c76e081138a1a90740173b403097d8"
] |
[
"mlgames/config/connect4_learnanynonlosing_config.py"
] |
[
"import tensorflow as tf\nfrom tensorflow import keras\nfrom collections import defaultdict\n\nfrom mlgames.config.config_abc import ConfigABC\n\nfrom mlgames.connect4.board import empty\n\nBOARD_HEIGHT = 6\nBOARD_WIDTH = 7\n\nclass Connect4LearnAnyNonLosingConfig(ConfigABC):\n\tdef name(self):\n\t\treturn \"connect4_learnanynonlosing\"\n\n\tdef number_of_bots(self):\n\t\treturn 2\n\n\tdef empty_board_gen(self):\n\t\treturn empty\n\n\tdef new_model(self):\n\t\tmodel = keras.Sequential([\n\t\t keras.layers.Dense(80, activation=tf.nn.relu, input_shape=(126, )),\n\t\t keras.layers.Dense(35, activation=tf.nn.relu),\n\t\t keras.layers.Dense(7, activation=tf.nn.softmax)\n\t\t])\n\n\t\tmodel.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n\t\treturn model\n\n\tdef board_to_input(self):\n\t\tdef f(board, piece):\n\t\t\tdef translate(val):\n\t\t\t\treturn [True, False, False] if val == piece else [False, True, False] if not val == b'.' else [False, False, True]\n\t\t\tbinary_state = []\n\t\t\tfor b in board.board_state:\n\t\t\t\tfor a in b:\n\t\t\t\t\tbinary_state.extend(translate(a))\n\t\t\treturn binary_state\n\t\treturn f\n\n\tdef move_to_prediction(self):\n\t\tdef identity(x):\n\t\t\treturn x\n\t\treturn identity\n\n\tdef prediction_to_move(self):\n\t\tdef identity(x):\n\t\t\treturn x\n\t\treturn identity\n\n\tdef epochs(self):\n\t\treturn 5\n\n\tdef good_move_confidence_appearance_threshold(self):\n\t\treturn 1\n\n\tdef should_train(self):\n\t\tdef f(results):\n\t\t\treturn 'win' in results or 'draw' in results\n\t\treturn f\n\n\tdef benchmark_minimax_depth(self):\n\t\treturn 2\n\n\tdef board_evaluation(self):\n\t\tdef examine_sublist(sublist):\n\t\t\tvalues = set(sublist)\n\t\t\tif b'X' in values and b'O' in values:\n\t\t\t\treturn 'not_completable'\n\t\t\telif b'X' not in values and b'O' not in values in values:\n\t\t\t\treturn 'empty'\n\t\t\telse:\n\t\t\t\tif b'X' in values:\n\t\t\t\t\treturn ('X', sublist.count(b'X'))\n\t\t\t\telse:\n\t\t\t\t\treturn ('O', sublist.count(b'O'))\n\n\t\tdef all_sublists_of_length_four(lst):\n\t\t\tal = []\n\t\t\tfor i in range(0, len(lst) - 3):\n\t\t\t\tal.append(lst[i:i+4].tolist())\n\t\t\treturn al\n\n\t\tdef evaluate(board, piece):\n\t\t\tus_completable = defaultdict(lambda: 0)\n\t\t\tthem_completable = defaultdict(lambda: 0)\n\t\t\tempty_count = 0\n\t\t\tnot_completable_count = 0\n\n\t\t\tall_groups_of_four = []\n\n\t\t\tfor x in range(BOARD_HEIGHT):\n\t\t\t\tall_groups_of_four.extend(all_sublists_of_length_four(board.board_state[x, :]))\n\n\t\t\tfor x in range(BOARD_WIDTH):\n\t\t\t\tall_groups_of_four.extend(all_sublists_of_length_four(board.board_state[:, x]))\n\n\t\t\ty_reflected = board.reflect_board_y()\n\t\t\tfor x in range(-2, 4):\n\t\t\t\tall_groups_of_four.extend(all_sublists_of_length_four(board.board_state.diagonal(x)))\n\t\t\t\tall_groups_of_four.extend(all_sublists_of_length_four(y_reflected.board_state.diagonal(x)))\n\n\t\t\tfor group in all_groups_of_four:\n\t\t\t\tresult = examine_sublist(group)\n\t\t\t\tif result == 'not_completable':\n\t\t\t\t\tnot_completable_count += 1 \n\t\t\t\telif result == 'empty':\n\t\t\t\t\tempty_count += 1\n\t\t\t\telif result[0] == piece:\n\t\t\t\t\tus_completable[result[1]] += 1\n\t\t\t\telse:\n\t\t\t\t\tthem_completable[result[1]] += 1\n\n\t\t\tif us_completable[4] >= 1:\n\t\t\t\treturn 1\n\t\t\telif them_completable[4] >=1:\n\t\t\t\treturn -1\n\t\t\tif not_completable_count == len(all_groups_of_four):\n\t\t\t\treturn 0\n\t\t\telse:\n\t\t\t\treturn (((100 * (us_completable[3] - them_completable[3])) + (10 * (us_completable[2] - them_completable[2])) + (us_completable[0] - them_completable[0])) / 6900)\n\n\t\treturn evaluate\n"
] |
[
[
"tensorflow.keras.layers.Dense"
]
] |
javier-ruiz-b/eToro-Anlage-KAP-calculator
|
[
"27ca81292c48de00b468a781405b3e996d212708"
] |
[
"kap_summary.py"
] |
[
"import detailed_table\nimport pandas as pd\nfrom locale import atof\nimport json\n\n# KAP 19\nCOL_PROFIT_STOCKS = 'Aktien G/V'\nCOL_DIVIDENDS_STOCKS = 'Aktien Dividende'\nCOL_PROFIT_CFDS = 'CFD G/V'\nCOL_FEES_CFDS = 'CFD Gebühren'\n\n# KAP 20\nCOL_PROFIT_ON_SALE_STOCKS = 'Aktien - Enthaltene Gewinne aus Aktienveräußerungen'\nCOL_PROFIT_ON_SALE_CFDS = 'CFD - Enthaltene Gewinne aus Aktienveräußerungen'\n\n# KAP 22\nCOL_LOSS_ON_SALE_CFDS = 'CFD G/W - darin enthaltene Verluste aus Kapitalerträgen ohne Aktienveräußerung'\nCOL_FEES_ON_SALE_CFDS = 'CFD Gebühren - darin enthaltene Verluste aus Kapitalerträgen ohne Aktienveräußerung'\n\n# KAP 23\nCOL_LOSS_ON_SALE_STOCKS = 'Aktien G/W - Enthaltene Verluste aus Aktienveräußerungen'\n\ndef roundTo2Decimals(dict):\n result = {}\n for key in dict: \n result[key] = round(dict[key], 2)\n return result\n\ndef calcKapSummary(detailedTable):\n resultColumns = [COL_PROFIT_STOCKS, COL_DIVIDENDS_STOCKS, COL_PROFIT_CFDS, COL_FEES_CFDS,\n COL_PROFIT_ON_SALE_STOCKS, COL_LOSS_ON_SALE_CFDS, COL_FEES_ON_SALE_CFDS, \n COL_LOSS_ON_SALE_STOCKS]\n resultTable = pd.DataFrame(columns=resultColumns)\n result = {}\n result[COL_PROFIT_STOCKS] = 0\n result[COL_DIVIDENDS_STOCKS] = 0\n result[COL_PROFIT_CFDS] = 0\n result[COL_FEES_CFDS] = 0\n result[COL_PROFIT_ON_SALE_STOCKS] = 0\n result[COL_PROFIT_ON_SALE_CFDS] = 0\n result[COL_LOSS_ON_SALE_CFDS] = 0\n result[COL_FEES_ON_SALE_CFDS] = 0\n result[COL_LOSS_ON_SALE_STOCKS] = 0\n\n for index, row in detailedTable.iterrows():\n revenueEUR = row[detailed_table.COL_REVENUE_EUR]\n dividend = row[detailed_table.COL_DIVIDENDS_EUR]\n fees = row[detailed_table.COL_FEES_EUR]\n if row[detailed_table.COL_TYPE] == detailed_table.TYPE_STOCK:\n result[COL_PROFIT_STOCKS] += revenueEUR - dividend\n result[COL_DIVIDENDS_STOCKS] += dividend\n if revenueEUR > 0:\n result[COL_PROFIT_ON_SALE_STOCKS] += revenueEUR - dividend\n else:\n result[COL_LOSS_ON_SALE_STOCKS] += revenueEUR - dividend\n\n elif row[detailed_table.COL_TYPE] == detailed_table.TYPE_CFD:\n result[COL_PROFIT_CFDS] += revenueEUR - fees\n result[COL_FEES_CFDS] += fees\n result[COL_FEES_ON_SALE_CFDS] += fees\n if revenueEUR > 0:\n result[COL_PROFIT_ON_SALE_CFDS] += revenueEUR - fees\n else:\n result[COL_LOSS_ON_SALE_CFDS] += revenueEUR - fees\n \n # print(json.dumps(result, indent=1))\n print(json.dumps(roundTo2Decimals(result), ensure_ascii=False, indent=4))\n \n kapResult = {}\n kapResult[\"19. Ausländische Kapitalerträge\"] = result[COL_PROFIT_STOCKS] + result[COL_DIVIDENDS_STOCKS] + result[COL_PROFIT_CFDS] + result[COL_FEES_CFDS]\n kapResult[\"20. Enthaltene Gewinne aus Aktienveräußerungen\"] = result[COL_PROFIT_ON_SALE_STOCKS] + result[COL_PROFIT_ON_SALE_CFDS]\n kapResult[\"22. Enthaltene Verluste ohne Verluste aus Aktienveräußerungen\"] = result[COL_LOSS_ON_SALE_CFDS] + result[COL_FEES_ON_SALE_CFDS]\n kapResult[\"23. Enthaltene Verluste aus Aktienveräußerungen\"] = result[COL_LOSS_ON_SALE_STOCKS]\n\n print(\"\\nAnlage KAP\") \n print(json.dumps(roundTo2Decimals(kapResult), ensure_ascii=False, indent=4))"
] |
[
[
"pandas.DataFrame"
]
] |
ahmadki/SSD-ResNet50
|
[
"754ee6a3e9a5ba563d8ec6284c2745850ad5dcda"
] |
[
"ssd/coco_pipeline.py"
] |
[
"# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport ctypes\nimport time\nimport logging\n\nimport numpy as np\nimport torch\n\n# DALI imports\nimport nvidia.dali as dali\nfrom nvidia.dali.pipeline import Pipeline\n\n\nclass COCOPipeline(Pipeline):\n def __init__(self, batch_size, file_root, annotations_file, default_boxes,\n device_id, num_shards,\n output_fp16=False, output_nhwc=False, pad_output=False,\n num_threads=1, seed=15, figsize=300):\n super(COCOPipeline, self).__init__(batch_size=batch_size,\n device_id=device_id,\n num_threads=num_threads,\n seed=seed)\n\n if torch.distributed.is_initialized():\n shard_id = torch.distributed.get_rank()\n else:\n shard_id = 0\n\n # Data loader and image decoder\n self.input = dali.ops.readers.COCO(file_root=file_root,\n annotations_file=annotations_file,\n shard_id=shard_id,\n num_shards=num_shards,\n ratio=True,\n ltrb=True,\n shuffle_after_epoch=True,\n skip_empty=True)\n self.decode_slice = dali.ops.decoders.ImageSlice(device=\"cpu\",\n output_type=dali.types.RGB)\n\n # Augumentation techniques\n ## Random crop\n self.crop = dali.ops.RandomBBoxCrop(device=\"cpu\",\n aspect_ratio=[0.5, 2.0],\n thresholds=[0, 0.1, 0.3, 0.5, 0.7, 0.9],\n scaling=[0.3, 1.0],\n bbox_layout=\"xyXY\",\n allow_no_crop=True,\n num_attempts=1)\n ## Color twist\n self.hsv = dali.ops.Hsv(device=\"gpu\",\n dtype=dali.types.FLOAT) # use float to avoid clipping and quantizing the intermediate result\n self.bc = dali.ops.BrightnessContrast(device=\"gpu\",\n contrast_center=128, # input is in the [0, 255] range\n dtype=dali.types.UINT8)\n ## Cropping and normalization\n dtype = dali.types.FLOAT16 if output_fp16 else dali.types.FLOAT\n output_layout = dali.types.NHWC if output_nhwc else dali.types.NCHW\n self.normalize = dali.ops.CropMirrorNormalize(\n device=\"gpu\",\n crop=(figsize, figsize),\n mean=[0.0, 0.0, 0.0],\n std=[255.0, 255.0, 255.0],\n mirror=0,\n dtype=dtype,\n output_layout=output_layout,\n pad_output=pad_output)\n ## Flipping\n self.flip = dali.ops.Flip(device=\"cpu\")\n self.bbflip = dali.ops.BbFlip(device=\"cpu\", ltrb=True)\n\n # Resize\n self.resize = dali.ops.Resize(device=\"cpu\",\n resize_x=figsize,\n resize_y=figsize)\n\n # Random variables\n self.rng1 = dali.ops.random.Uniform(range=[0.5, 1.5])\n self.rng2 = dali.ops.random.Uniform(range=[0.875, 1.125])\n self.rng3 = dali.ops.random.Uniform(range=[-0.5, 0.5])\n self.flip_coin = dali.ops.random.CoinFlip(probability=0.5)\n\n # bbox encoder\n self.anchors = default_boxes(order='ltrb').cpu().numpy().flatten().tolist()\n self.box_encoder = dali.ops.BoxEncoder(device=\"cpu\",\n criteria=0.5,\n anchors=self.anchors)\n\n def define_graph(self):\n saturation = self.rng1()\n contrast = self.rng1()\n brightness = self.rng2()\n hue = self.rng3()\n coin_rnd = self.flip_coin()\n\n inputs, bboxes, labels = self.input(name=\"Reader\")\n crop_begin, crop_size, bboxes, labels = self.crop(bboxes, labels)\n images = self.decode_slice(inputs, crop_begin, crop_size)\n\n images = self.flip(images, horizontal=coin_rnd)\n bboxes = self.bbflip(bboxes, horizontal=coin_rnd)\n images = self.resize(images)\n images = images.gpu()\n\n images = self.hsv(images, hue=hue, saturation=saturation)\n images = self.bc(images, brightness=brightness, contrast=contrast)\n\n images = self.normalize(images)\n bboxes, labels = self.box_encoder(bboxes, labels)\n\n # bboxes and images and labels on GPU\n return (images, bboxes.gpu(), labels.gpu())\n\nto_torch_type = {\n np.dtype(np.float32) : torch.float32,\n np.dtype(np.float64) : torch.float64,\n np.dtype(np.float16) : torch.float16,\n np.dtype(np.uint8) : torch.uint8,\n np.dtype(np.int8) : torch.int8,\n np.dtype(np.int16) : torch.int16,\n np.dtype(np.int32) : torch.int32,\n np.dtype(np.int64) : torch.int64\n}\n\ndef feed_ndarray(dali_tensor, arr):\n \"\"\"\n Copy contents of DALI tensor to pyTorch's Tensor.\n\n Parameters\n ----------\n `dali_tensor` : nvidia.dali.backend.TensorCPU or nvidia.dali.backend.TensorGPU\n Tensor from which to copy\n `arr` : torch.Tensor\n Destination of the copy\n \"\"\"\n assert dali_tensor.shape() == list(arr.size()), \\\n (\"Shapes do not match: DALI tensor has size {0}\"\n \", but PyTorch Tensor has size {1}\".format(dali_tensor.shape(), list(arr.size())))\n #turn raw int to a c void pointer\n c_type_pointer = ctypes.c_void_p(arr.data_ptr())\n dali_tensor.copy_to_external(c_type_pointer)\n return arr\n\nclass DALICOCOIterator(object):\n \"\"\"\n COCO DALI iterator for pyTorch.\n\n Parameters\n ----------\n pipelines : list of nvidia.dali.pipeline.Pipeline\n List of pipelines to use\n size : int\n Epoch size.\n \"\"\"\n def __init__(self, pipelines, size):\n if not isinstance(pipelines, list):\n pipelines = [pipelines]\n\n self._num_gpus = len(pipelines)\n assert pipelines is not None, \"Number of provided pipelines has to be at least 1\"\n self.batch_size = pipelines[0].max_batch_size\n self._size = size\n self._pipes = pipelines\n\n # Build all pipelines\n for p in self._pipes:\n p.build()\n\n # Use double-buffering of data batches\n self._data_batches = [[None, None, None, None] for i in range(self._num_gpus)]\n self._counter = 0\n self._current_data_batch = 0\n self.output_map = [\"image\", \"bboxes\", \"labels\"]\n\n # We need data about the batches (like shape information),\n # so we need to run a single batch as part of setup to get that info\n self._first_batch = None\n self._first_batch = self.next()\n\n def __next__(self):\n if self._first_batch is not None:\n batch = self._first_batch\n self._first_batch = None\n return batch\n if self._counter > self._size:\n raise StopIteration\n\n # Gather outputs\n outputs = []\n for p in self._pipes:\n p._prefetch()\n for p in self._pipes:\n outputs.append(p.share_outputs())\n for i in range(self._num_gpus):\n dev_id = self._pipes[i].device_id\n out_images = []\n bboxes = []\n labels = []\n # segregate outputs into image/labels/bboxes entries\n for j, out in enumerate(outputs[i]):\n if self.output_map[j] == \"image\":\n out_images.append(out)\n elif self.output_map[j] == \"bboxes\":\n bboxes.append(out)\n elif self.output_map[j] == \"labels\":\n labels.append(out)\n\n # Change DALI TensorLists into Tensors\n images = [x.as_tensor() for x in out_images]\n images_shape = [x.shape() for x in images]\n\n # Prepare bboxes shapes\n bboxes_shape = []\n for j in range(len(bboxes)):\n bboxes_shape.append([])\n for k in range(len(bboxes[j])):\n bboxes_shape[j].append(bboxes[j][k].shape())\n\n # Prepare labels shapes and offsets\n labels_shape = []\n bbox_offsets = []\n\n torch.cuda.synchronize()\n for j in range(len(labels)):\n labels_shape.append([])\n bbox_offsets.append([0])\n for k in range(len(labels[j])):\n lshape = labels[j][k].shape()\n bbox_offsets[j].append(bbox_offsets[j][k] + lshape[0])\n labels_shape[j].append(lshape)\n\n # We always need to alocate new memory as bboxes and labels varies in shape\n images_torch_type = to_torch_type[np.dtype(images[0].dtype())]\n bboxes_torch_type = to_torch_type[np.dtype(bboxes[0][0].dtype())]\n labels_torch_type = to_torch_type[np.dtype(labels[0][0].dtype())]\n\n torch_gpu_device = torch.device('cuda', dev_id)\n torch_cpu_device = torch.device('cpu')\n\n pyt_images = [torch.zeros(shape, dtype=images_torch_type, device=torch_gpu_device) for shape in images_shape]\n pyt_bboxes = [[torch.zeros(shape, dtype=bboxes_torch_type, device=torch_gpu_device) for shape in shape_list] for shape_list in bboxes_shape]\n pyt_labels = [[torch.zeros(shape, dtype=labels_torch_type, device=torch_gpu_device) for shape in shape_list] for shape_list in labels_shape]\n pyt_offsets = [torch.zeros(len(offset), dtype=torch.int32, device=torch_cpu_device) for offset in bbox_offsets]\n\n self._data_batches[i][self._current_data_batch] = (pyt_images, pyt_bboxes, pyt_labels, pyt_offsets)\n\n # Copy data from DALI Tensors to torch tensors\n for j, i_arr in enumerate(images):\n feed_ndarray(i_arr, pyt_images[j])\n\n for j, b_list in enumerate(bboxes):\n for k in range(len(b_list)):\n if (pyt_bboxes[j][k].shape[0] != 0):\n feed_ndarray(b_list[k], pyt_bboxes[j][k])\n pyt_bboxes[j] = torch.cat(pyt_bboxes[j])\n\n for j, l_list in enumerate(labels):\n for k in range(len(l_list)):\n if (pyt_labels[j][k].shape[0] != 0):\n feed_ndarray(l_list[k], pyt_labels[j][k])\n pyt_labels[j] = torch.cat(pyt_labels[j])\n\n for j in range(len(pyt_offsets)):\n pyt_offsets[j] = torch.IntTensor(bbox_offsets[j])\n\n for p in self._pipes:\n p.release_outputs()\n p.schedule_run()\n\n copy_db_index = self._current_data_batch\n # Change index for double buffering\n self._current_data_batch = (self._current_data_batch + 1) % 2\n self._counter += self._num_gpus * self.batch_size\n return [db[copy_db_index] for db in self._data_batches]\n\n def next(self):\n \"\"\"\n Returns the next batch of data.\n \"\"\"\n return self.__next__();\n\n def __iter__(self):\n return self\n\n def reset(self):\n \"\"\"\n Resets the iterator after the full epoch.\n DALI iterators do not support resetting before the end of the epoch\n and will ignore such request.\n \"\"\"\n if self._counter > self._size:\n self._counter = self._counter % self._size\n else:\n logging.warning(\"DALI iterator does not support resetting while epoch is not finished. Ignoring...\")\n"
] |
[
[
"torch.cuda.synchronize",
"torch.zeros",
"torch.cat",
"torch.distributed.is_initialized",
"numpy.dtype",
"torch.IntTensor",
"torch.device",
"torch.distributed.get_rank"
]
] |
XuBLin/590PresentGroupProject
|
[
"7126a43ed53f7ae528c350a289273884eaaf160e"
] |
[
"genre_classification/evaluation.py"
] |
[
"# Load the data from dataset and load the model we have built.\n# Make evaluation of the 1D CNN model\nimport os\nfrom sklearn.preprocessing import LabelBinarizer\nimport utils\nfrom sklearn.model_selection import train_test_split\nfrom keras.models import load_model\n\n# load data\nos.environ['AUDIO_DIR']='../data/fma_small/'\nAUDIO_DIR = os.environ.get('AUDIO_DIR')\ntracks = utils.load('../data/fma_metadata/tracks.csv')\nfeatures = utils.load('../data/fma_metadata/features.csv')\n\nsubset = tracks.index[tracks['set', 'subset'] <= 'medium']\ntracks = tracks.loc[subset]\n\n# load model\nonedCNN_model = load_model('oneDCNN.h5')\nonedCNN_model.summary()\n\n# get feature and label\nfeatures_all = features.loc[subset]\nlabels_onehot = LabelBinarizer().fit_transform(tracks['track', 'genre_top'])\nfeatures_all=features_all.to_numpy()\n\n# data partition\nx_train, x_test, y_train, y_test = train_test_split(features_all, labels_onehot, test_size=0.1)\nx_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.1)\n\nx_train=x_train.reshape((x_train.shape[0],x_train.shape[1],1))\nx_test = x_test.reshape((x_test.shape[0], x_test.shape[1],1))\nx_val=x_val.reshape((x_val.shape[0], x_val.shape[1],1))\n\n\ndef evaluate(model,title=''):\n print(\"\\n\")\n print(\"---------------------------\")\n print(title)\n print(\"---------------------------\")\n model.summary()\n train_loss, train_acc = model.evaluate(x_train, y_train)\n val_loss, val_acc = model.evaluate(x_val, y_val)\n test_loss, test_acc = model.evaluate(x_test, y_test)\n print('\\nEvaluation Metric:')\n print('train loss:',train_loss,' train accuracy:',train_acc)\n print('validation loss:',val_loss, 'validation accuracy:',val_acc)\n print('test loss:',test_loss, ' test accuracy:',test_acc)\n\n# model evaluation\nevaluate(onedCNN_model,title='1DCNN')\n\n\n"
] |
[
[
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.LabelBinarizer"
]
] |
bwalker1/GAT
|
[
"fb3c6ca528ad773bfcfc22eb7e6ec6a5197d6e66"
] |
[
"execute_cora_sparse.py"
] |
[
"import time\nimport scipy.sparse as sp\nimport numpy as np\nimport tensorflow as tf\nimport argparse\n\nfrom gat import GAT\nfrom gat import SpGAT\nfrom utils import process\n\ncheckpt_file = 'pre_trained/cora/mod_cora.ckpt'\n\ndataset = 'cora'\n\n# training params\nbatch_size = 1\nnb_epochs = 100000\npatience = 100\nlr = 0.005 # learning rate\nl2_coef = 0.0005 # weight decay\nhid_units = [8] # numbers of hidden units per each attention head in each layer\nn_heads = [8, 1] # additional entry for the output layer\nresidual = False\nnonlinearity = tf.nn.elu\n# model = GAT\nmodel = SpGAT\n\nprint('Dataset: ' + dataset)\nprint('----- Opt. hyperparams -----')\nprint('lr: ' + str(lr))\nprint('l2_coef: ' + str(l2_coef))\nprint('----- Archi. hyperparams -----')\nprint('nb. layers: ' + str(len(hid_units)))\nprint('nb. units per layer: ' + str(hid_units))\nprint('nb. attention heads: ' + str(n_heads))\nprint('residual: ' + str(residual))\nprint('nonlinearity: ' + str(nonlinearity))\nprint('model: ' + str(model))\n\nsparse = True\n\nadj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask = process.load_data(dataset)\nfeatures, spars = process.preprocess_features(features)\n\nnb_nodes = features.shape[0]\nft_size = features.shape[1]\nnb_classes = y_train.shape[1]\n\nfeatures = features[np.newaxis]\ny_train = y_train[np.newaxis]\ny_val = y_val[np.newaxis]\ny_test = y_test[np.newaxis]\ntrain_mask = train_mask[np.newaxis]\nval_mask = val_mask[np.newaxis]\ntest_mask = test_mask[np.newaxis]\n\nif sparse:\n biases = process.preprocess_adj_bias(adj)\nelse:\n adj = adj.todense()\n adj = adj[np.newaxis]\n biases = process.adj_to_bias(adj, [nb_nodes], nhood=1)\n\nwith tf.Graph().as_default():\n with tf.name_scope('input'):\n ftr_in = tf.placeholder(dtype=tf.float32, shape=(batch_size, nb_nodes, ft_size))\n if sparse:\n #bias_idx = tf.placeholder(tf.int64)\n #bias_val = tf.placeholder(tf.float32)\n #bias_shape = tf.placeholder(tf.int64)\n bias_in = tf.sparse_placeholder(dtype=tf.float32)\n else:\n bias_in = tf.placeholder(dtype=tf.float32, shape=(batch_size, nb_nodes, nb_nodes))\n lbl_in = tf.placeholder(dtype=tf.int32, shape=(batch_size, nb_nodes, nb_classes))\n msk_in = tf.placeholder(dtype=tf.int32, shape=(batch_size, nb_nodes))\n attn_drop = tf.placeholder(dtype=tf.float32, shape=())\n ffd_drop = tf.placeholder(dtype=tf.float32, shape=())\n is_train = tf.placeholder(dtype=tf.bool, shape=())\n\n logits = model.inference(ftr_in, nb_classes, nb_nodes, is_train,\n attn_drop, ffd_drop,\n bias_mat=bias_in,\n hid_units=hid_units, n_heads=n_heads,\n residual=residual, activation=nonlinearity)\n log_resh = tf.reshape(logits, [-1, nb_classes])\n lab_resh = tf.reshape(lbl_in, [-1, nb_classes])\n msk_resh = tf.reshape(msk_in, [-1])\n loss = model.masked_softmax_cross_entropy(log_resh, lab_resh, msk_resh)\n accuracy = model.masked_accuracy(log_resh, lab_resh, msk_resh)\n\n train_op = model.training(loss, lr, l2_coef)\n\n saver = tf.train.Saver()\n\n init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n\n vlss_mn = np.inf\n vacc_mx = 0.0\n curr_step = 0\n\n with tf.Session() as sess:\n sess.run(init_op)\n\n train_loss_avg = 0\n train_acc_avg = 0\n val_loss_avg = 0\n val_acc_avg = 0\n\n for epoch in range(nb_epochs):\n tr_step = 0\n tr_size = features.shape[0]\n\n while tr_step * batch_size < tr_size:\n if sparse:\n bbias = biases\n else:\n bbias = biases[tr_step*batch_size:(tr_step+1)*batch_size]\n\n _, loss_value_tr, acc_tr = sess.run([train_op, loss, accuracy],\n feed_dict={\n ftr_in: features[tr_step*batch_size:(tr_step+1)*batch_size],\n bias_in: bbias,\n lbl_in: y_train[tr_step*batch_size:(tr_step+1)*batch_size],\n msk_in: train_mask[tr_step*batch_size:(tr_step+1)*batch_size],\n is_train: True,\n attn_drop: 0.6, ffd_drop: 0.6})\n train_loss_avg += loss_value_tr\n train_acc_avg += acc_tr\n tr_step += 1\n\n vl_step = 0\n vl_size = features.shape[0]\n\n while vl_step * batch_size < vl_size:\n if sparse:\n bbias = biases\n else:\n bbias = biases[vl_step*batch_size:(vl_step+1)*batch_size]\n loss_value_vl, acc_vl = sess.run([loss, accuracy],\n feed_dict={\n ftr_in: features[vl_step*batch_size:(vl_step+1)*batch_size],\n bias_in: bbias,\n lbl_in: y_val[vl_step*batch_size:(vl_step+1)*batch_size],\n msk_in: val_mask[vl_step*batch_size:(vl_step+1)*batch_size],\n is_train: False,\n attn_drop: 0.0, ffd_drop: 0.0})\n val_loss_avg += loss_value_vl\n val_acc_avg += acc_vl\n vl_step += 1\n\n print('Training: loss = %.5f, acc = %.5f | Val: loss = %.5f, acc = %.5f' %\n (train_loss_avg/tr_step, train_acc_avg/tr_step,\n val_loss_avg/vl_step, val_acc_avg/vl_step))\n\n if val_acc_avg/vl_step >= vacc_mx or val_loss_avg/vl_step <= vlss_mn:\n if val_acc_avg/vl_step >= vacc_mx and val_loss_avg/vl_step <= vlss_mn:\n vacc_early_model = val_acc_avg/vl_step\n vlss_early_model = val_loss_avg/vl_step\n saver.save(sess, checkpt_file)\n vacc_mx = np.max((val_acc_avg/vl_step, vacc_mx))\n vlss_mn = np.min((val_loss_avg/vl_step, vlss_mn))\n curr_step = 0\n else:\n curr_step += 1\n if curr_step == patience:\n print('Early stop! Min loss: ', vlss_mn, ', Max accuracy: ', vacc_mx)\n print('Early stop model validation loss: ', vlss_early_model, ', accuracy: ', vacc_early_model)\n break\n\n train_loss_avg = 0\n train_acc_avg = 0\n val_loss_avg = 0\n val_acc_avg = 0\n\n saver.restore(sess, checkpt_file)\n\n ts_size = features.shape[0]\n ts_step = 0\n ts_loss = 0.0\n ts_acc = 0.0\n\n while ts_step * batch_size < ts_size:\n if sparse:\n bbias = biases\n else:\n bbias = biases[ts_step*batch_size:(ts_step+1)*batch_size]\n loss_value_ts, acc_ts = sess.run([loss, accuracy],\n feed_dict={\n ftr_in: features[ts_step*batch_size:(ts_step+1)*batch_size],\n bias_in: bbias,\n lbl_in: y_test[ts_step*batch_size:(ts_step+1)*batch_size],\n msk_in: test_mask[ts_step*batch_size:(ts_step+1)*batch_size],\n is_train: False,\n attn_drop: 0.0, ffd_drop: 0.0})\n ts_loss += loss_value_ts\n ts_acc += acc_ts\n ts_step += 1\n\n print('Test loss:', ts_loss/ts_step, '; Test accuracy:', ts_acc/ts_step)\n\n sess.close()\n"
] |
[
[
"tensorflow.Graph",
"tensorflow.sparse_placeholder",
"tensorflow.local_variables_initializer",
"numpy.min",
"tensorflow.reshape",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"numpy.max",
"tensorflow.name_scope",
"tensorflow.Session",
"tensorflow.train.Saver"
]
] |
gmelodie/adventofcode2019
|
[
"4f18945b8b671590d6998fa7df8c373fbc641dbf"
] |
[
"day11/part2/painting.py"
] |
[
"import sys\nimport numpy as np\nfrom PIL import Image\nimport intcode\n\nDEBUG = False\n\ndef turn_right(direction):\n if direction == \"up\":\n return \"right\"\n elif direction == \"right\":\n return \"down\"\n elif direction == \"down\":\n return \"left\"\n elif direction == \"left\":\n return \"up\"\n\n\ndef turn_left(direction):\n if direction == \"up\":\n return \"left\"\n elif direction == \"right\":\n return \"up\"\n elif direction == \"down\":\n return \"right\"\n elif direction == \"left\":\n return \"down\"\n\n\ndef move(pos, direction):\n new_pos = pos\n if direction == \"up\":\n new_pos[0] -= 1\n elif direction == \"right\":\n new_pos[1] += 1\n elif direction == \"down\":\n new_pos[0] += 1\n elif direction == \"left\":\n new_pos[1] -= 1\n\n if new_pos[0] < 0 or new_pos[1] < 0:\n print(\"error: unexpected position\", new_pos)\n exit(2)\n\n return new_pos\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print(\"usage: python3 painting.py <intcode_file>\")\n exit(1)\n\n pos = [500,500]\n direction = \"up\"\n painted = np.zeros((1000,1000))\n panel = np.zeros((1000,1000))\n\n # Start on a white panel\n panel[pos[0]][pos[1]] = 1\n\n instructions = intcode.load_instructions(sys.argv[1])\n\n idx = -1\n new_idx = 0\n rboffset = 0\n while new_idx != -1:\n intcode.OUTPUT = []\n idx = new_idx\n if DEBUG:\n print('RBOFFSET:', rboffset)\n new_idx, rboffset = intcode.execute(instructions, \\\n startidx=idx, \\\n rboffset=rboffset,\\\n inputdata=[panel[pos[0]][pos[1]]])\n\n if new_idx == -1: # program halted\n break\n\n # paint current position\n color = intcode.OUTPUT[0]\n painted[pos[0]][pos[1]] = 1\n panel[pos[0]][pos[1]] = int(color)\n\n if DEBUG:\n print('Painted', pos[0], pos[1], color)\n print('Turning', intcode.OUTPUT[1])\n\n # move to next position\n turn_dir = intcode.OUTPUT[1]\n if turn_dir == 0:\n direction = turn_left(direction)\n elif turn_dir == 1:\n direction = turn_right(direction)\n pos = move(pos, direction)\n\n img = Image.fromarray(np.uint8(panel * 255) , 'L')\n img = img.resize((2000, 2000))\n img.show()\n"
] |
[
[
"numpy.uint8",
"numpy.zeros"
]
] |
MeetGandhi/MeetGandhi-Post-hoc-Explainability-of-Deep-Learning-Models-using-Conditional-Adversarial-Networks
|
[
"089226dce6d318247111ea60c2cc15c247b430d2"
] |
[
"braXAI/mean_plots.py"
] |
[
"import matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport numpy as np\nimport os\n\nx_train = np.load(\"x_train.npy\")\nx_test = np.load(\"x_test.npy\")\nx = np.concatenate((x_train, x_test))\n\ny_train = np.load(\"y_train.npy\")\ny_test = np.load(\"y_test.npy\")\ny = np.concatenate((y_train, y_test))\n\nclasses = [0, 1]\npd={0:'bogus', 1:'real'}\n\n##dmints = [-8,-5,-3,-2.5,-2,-1.5,-1,-0.5,-0.3,-0.2,-0.1,0,0.1,0.2,0.3,0.5,1,1.5,2,2.5,3,5,8]\n##dtints = [0,1.0/145,2.0/145,3.0/145,4.0/145,1.0/25,2.0/25,3.0/25,1.5,2.5,3.5,4.5,5.5,7,10,20,30,60,90,120,240,600,960,2000,4000]\n##\n##xloc=np.arange(25)\n##yloc=np.arange(23)\n##yloc=yloc[::-1]\n##for i in range(len(dtints)):\n## dtints[i]=round(dtints[i],3)\n##yloc=yloc-0.5\n##xloc=xloc-0.5\n\nos.mkdir(\"mean_plots/\")\n#plt.rcParams['figure.figsize'] = (18, 6)\nplt.rcParams[\"font.weight\"] = \"bold\"\nplt.rcParams[\"axes.labelweight\"] = \"bold\"\n#plt.rcParams.update({'font.size': 12})\n\nfor cls in classes:\n id_ = np.where(y==cls)[0]\n length = id_.shape[0]\n\n x_sum_1 = np.zeros((63,63))\n x_sum_2 = np.zeros((63,63))\n x_sum_3 = np.zeros((63,63))\n \n for j in range(length):\n x_sum_1 = np.add(x_sum_1, x[id_[j],:,:,0])\n x_sum_2 = np.add(x_sum_2, x[id_[j],:,:,1])\n x_sum_3 = np.add(x_sum_3, x[id_[j],:,:,2])\n\n x_mean = np.zeros((63,63,3))\n\n x_mean[:,:,0] = x_sum_1/length\n x_mean[:,:,1] = x_sum_2/length\n x_mean[:,:,2] = x_sum_3/length\n \n mtype = {0:'SCI',1:'REF',2:'DIFF'}\n\n for m in range(3):\n \n plt.figure()\n fig, ax = plt.subplots(1,1)\n im1 = ax.imshow(x_mean[:,:,m], origin='upper', cmap=plt.cm.bone)\n divider1 = make_axes_locatable(ax)\n cax1 = divider1.append_axes(\"right\", size=\"5%\", pad=0.1)\n #ax.set_xticks(xloc)\n #ax.set_xticklabels(dtints,rotation=90)\n #ax.set_yticks(yloc)\n #ax.set_yticklabels(dmints)\n #ax.set(xlabel=\"dt(days)\",ylabel=\"dm(mag)\")\n fig.colorbar(im1, cax=cax1, boundaries=np.linspace(0,x_mean[:,:,m].max(),10))\n ax.axis(\"off\")\n #plt.suptitle(\"Class: \"+pd[cls]+\" X_id: \"+str(i))\n plt.tight_layout()\n plt.savefig(\"mean_plots/\"+pd[cls]+\"_\"+mtype[m]+\".png\")\n plt.cla()\n plt.clf()\n plt.close()\n\n \n \n"
] |
[
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.cla",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"numpy.concatenate",
"numpy.add",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.close",
"numpy.load",
"numpy.zeros",
"numpy.where",
"matplotlib.pyplot.figure"
]
] |
cppxaxa/PyTFObjectDetection
|
[
"6818f672b7d20d0de93b920b6ec29ddbca3d1d0a"
] |
[
"classify_image.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport os.path, os\nimport re\nimport sys\nimport tarfile\n\nimport query\n\nimport numpy as np\nfrom six.moves import urllib\nimport tensorflow as tf\n\nFLAGS = None\nDATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' \n\nclass NodeLookup(object):\n\n def __init__(self,\n label_lookup_path=None,\n uid_lookup_path=None):\n if not label_lookup_path:\n label_lookup_path = os.path.join(\n FLAGS.model_dir, 'imagenet_2012_challenge_label_map_proto.pbtxt')\n if not uid_lookup_path:\n uid_lookup_path = os.path.join(\n FLAGS.model_dir, 'imagenet_synset_to_human_label_map.txt')\n self.node_lookup = self.load(label_lookup_path, uid_lookup_path)\n\n def load(self, label_lookup_path, uid_lookup_path):\n if not tf.gfile.Exists(uid_lookup_path):\n tf.logging.fatal('File does not exist %s', uid_lookup_path)\n if not tf.gfile.Exists(label_lookup_path):\n tf.logging.fatal('File does not exist %s', label_lookup_path)\n\n proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()\n uid_to_human = {}\n p = re.compile(r'[n\\d]*[ \\S,]*')\n for line in proto_as_ascii_lines:\n parsed_items = p.findall(line)\n uid = parsed_items[0]\n human_string = parsed_items[2]\n uid_to_human[uid] = human_string\n\n node_id_to_uid = {}\n proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()\n for line in proto_as_ascii:\n if line.startswith(' target_class:'):\n target_class = int(line.split(': ')[1])\n if line.startswith(' target_class_string:'):\n target_class_string = line.split(': ')[1]\n node_id_to_uid[target_class] = target_class_string[1:-2]\n\n node_id_to_name = {}\n for key, val in node_id_to_uid.items():\n if val not in uid_to_human:\n tf.logging.fatal('Failed to locate: %s', val)\n name = uid_to_human[val]\n node_id_to_name[key] = name\n\n return node_id_to_name\n\n def id_to_string(self, node_id):\n if node_id not in self.node_lookup:\n return ''\n return self.node_lookup[node_id]\n\n\ndef create_graph():\n with tf.gfile.FastGFile(os.path.join(\n FLAGS.model_dir, 'classify_image_graph_def.pb'), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')\n\n\ndef run_inference_on_image(image):\n if not tf.gfile.Exists(image):\n tf.logging.fatal('File does not exist %s', image)\n image_data = tf.gfile.FastGFile(image, 'rb').read()\n\n \n create_graph()\n\n with tf.Session() as sess:\n \n softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')\n predictions = sess.run(softmax_tensor,\n {'DecodeJpeg/contents:0': image_data})\n predictions = np.squeeze(predictions)\n\n \n node_lookup = NodeLookup()\n\n top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1]\n for node_id in top_k:\n human_string = node_lookup.id_to_string(node_id)\n score = predictions[node_id]\n print('%s,%.5f\\n' % (human_string, score))\n\n\ndef maybe_download_and_extract():\n dest_directory = FLAGS.model_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n #filepath = \"inception-2015-12-05.tgz\";\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)\n\n\ndef main(_):\n #print (\"Start\")\n maybe_download_and_extract()\n image = (FLAGS.image_file if FLAGS.image_file else\n os.path.join(FLAGS.model_dir, 'cropped_panda.jpg'))\n run_inference_on_image(image)\n #print (\"Initiated\")\n\nif __name__ == '__main__':\n #print (\"Parsing args\")\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--model_dir',\n type=str,\n default='model_directory',\n help=\"\"\"\n \"\"\"\n )\n parser.add_argument(\n '--image_file',\n type=str,\n default='image.jpg',\n help='Absolute path to image file.'\n )\n parser.add_argument(\n '--num_top_predictions',\n type=int,\n default=1,\n help='Display this many predictions.'\n )\n FLAGS, unparsed = parser.parse_known_args()\n #print (\"Running result\")\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n \n #print (\"Done\")\n "
] |
[
[
"tensorflow.import_graph_def",
"tensorflow.gfile.Exists",
"tensorflow.gfile.GFile",
"numpy.squeeze",
"tensorflow.Session",
"tensorflow.GraphDef",
"tensorflow.logging.fatal",
"tensorflow.gfile.FastGFile",
"tensorflow.app.run"
]
] |
venkatrajun/qlikds
|
[
"2833c0d611f22da79f1008f2333da97dcd5da5ad"
] |
[
"core/_spacy.py"
] |
[
"import os\nimport re\nimport sys\nimport time\nimport string\nimport pathlib\nimport random\nimport warnings\nimport numpy as np\nimport pandas as pd\n\n# Suppress warnings\nif not sys.warnoptions:\n warnings.simplefilter(\"ignore\")\n\nimport spacy\nfrom spacy.util import minibatch, compounding, decaying\nfrom spacy.gold import GoldParse\nfrom sklearn.model_selection import train_test_split\nimport _utils as utils\nimport ServerSideExtension_pb2 as SSE\n\n# Add Generated folder to module path\nPARENT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(os.path.join(PARENT_DIR, 'generated'))\n\nclass SpaCyForQlik:\n \"\"\"\n A class to implement spaCy natural language processing capabilities for Qlik.\n https://spacy.io/\n \"\"\"\n \n # Counter used to name log files for instances of the class\n log_no = 0\n\n def __init__(self, request, context, path=\"../models/spaCy/\"):\n \"\"\"\n Class initializer.\n :param request: an iterable sequence of RowData\n :param context:\n :param path: a directory path to store persistent models\n :Sets up the model parameters based on the request\n \"\"\"\n \n # Set the request, context and path variables for this object instance\n self.request = request\n self.context = context\n self.path = path\n self.logfile = None\n \n def get_entities(self, default=True):\n \"\"\"\n Use spaCy NER to return named entities from text.\n :\n :default=True uses the pre-trained English language models provided by spaCy. \n :default=False allows the use of a re-trained spaCy model.\n \"\"\"\n\n if default:\n # Interpret the request data based on the expected row and column structure\n row_template = ['strData', 'strData', 'strData']\n col_headers = ['key', 'text', 'kwargs']\n else:\n # A model name is required if using a custom spaCy model\n row_template = ['strData', 'strData', 'strData', 'strData']\n col_headers = ['key', 'text', 'model_name', 'kwargs']\n \n # Create a Pandas DataFrame for the request data\n self.request_df = utils.request_df(self.request, row_template, col_headers)\n\n # Get the argument strings from the request dataframe\n kwargs = self.request_df.loc[0, 'kwargs']\n # Set the relevant parameters using the argument strings\n self._set_params(kwargs)\n\n # Print the request dataframe to the logs\n if self.debug:\n self._print_log(3)\n\n # Extract named entities for each text in the request dataframe\n self.response_df = self._entity_tagger()\n\n # Print the response dataframe to the logs\n if self.debug:\n self._print_log(4)\n\n # Send the reponse table description to Qlik\n self._send_table_description(\"entities\")\n \n return self.response_df\n \n def retrain(self):\n \"\"\"\n Retrain a spacy model for NER using training data.\n \"\"\"\n\n # The request provides training data texts, entities, entity types together with the model name and any other arguments\n row_template = ['strData', 'strData', 'strData', 'strData', 'strData']\n col_headers = ['text', 'entity', 'entity_type', 'model_name', 'kwargs']\n \n # Create a Pandas DataFrame for the request data\n self.request_df = utils.request_df(self.request, row_template, col_headers)\n\n # Get the argument strings from the request dataframe\n kwargs = self.request_df.loc[0, 'kwargs']\n # Set the relevant parameters using the argument strings\n self._set_params(kwargs)\n\n # Check that a model name has been set\n if self.model in [\"en_core_web_sm\"]:\n err = \"Incorrect usage: A name for the custom model needs to be specified.\"\n raise Exception(err)\n \n # Transform the training data to spaCy's training data format\n # This call populates the self.train and self.validation (if a test set is specified in the request arguments) objects\n self._prep_data()\n\n # Retrain the model and calculate evaluation metrics\n # This call saves the retrained model to disk and pepares the self.metrics dataframe for the response\n self._retrain_model()\n\n # Prepare the response, which will be the evaluation metrics prepared during retraining\n self.response_df = self.metrics\n\n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.debug:\n self._print_log(11)\n \n # Send the reponse table description to Qlik\n self._send_table_description(\"metrics\")\n \n # Finally send the response\n return self.response_df\n \n def _set_params(self, kwargs):\n \"\"\"\n Set input parameters based on the request.\n :\n :For details refer to the GitHub project: https://github.com/nabeel-oz/qlik-py-tools\n \"\"\"\n \n # Set default values which will be used if execution arguments are not passed\n \n # Default parameters:\n self.debug = False\n self.model = 'en_core_web_sm'\n self.custom = False\n self.base_model = 'en_core_web_sm'\n self.blank = False\n self.epochs = 100\n self.batch_size = compounding(4.0, 32.0, 1.001)\n self.drop = 0.25\n self.test = 0\n \n # Extract the model path if required\n try:\n # Get the model name from the first row in the request_df \n self.model = self.request_df.loc[0, 'model_name']\n\n # Remove the model_name column from the request_df\n self.request_df = self.request_df.drop(['model_name'], axis=1)\n except KeyError:\n pass\n \n # If key word arguments were included in the request, get the parameters and values\n if len(kwargs) > 0:\n \n # Transform the string of arguments into a dictionary\n self.kwargs = utils.get_kwargs(kwargs)\n \n # Set the debug option for generating execution logs\n # Valid values are: true, false\n if 'debug' in self.kwargs:\n self.debug = 'true' == self.kwargs['debug'].lower()\n \n # Additional information is printed to the terminal and logs if the paramater debug = true\n if self.debug:\n # Increment log counter for the class. Each instance of the class generates a new log.\n self.__class__.log_no += 1\n\n # Create a log file for the instance\n # Logs will be stored in ..\\logs\\SpaCy Log <n>.txt\n self.logfile = os.path.join(os.getcwd(), 'logs', 'SpaCy Log {}.txt'.format(self.log_no))\n\n self._print_log(1)\n \n # Set whether the model (if getting named entites) or base model (if retraining) is a custom model\n # i.e. not one of the pre-trained models provided by spaCy\n if 'custom' in self.kwargs:\n self.custom = 'true' == self.kwargs['custom'].lower()\n \n # Set the base model, i.e an existing spaCy model to be retrained.\n if 'base_model' in self.kwargs:\n self.base_model = self.kwargs['base_model'].lower()\n \n # Set the retraining to be done on a blank Language class\n if 'blank' in self.kwargs:\n self.blank = 'true' == self.kwargs['blank'].lower()\n \n # Set the epochs for training the model. \n # This is the the number times that the learning algorithm will work through the entire training dataset.\n # Valid values are an integer e.g. 200\n if 'epochs' in self.kwargs:\n self.epochs = utils.atoi(self.kwargs['epochs'])\n \n # Set the batch size to be used during model training. \n # The model's internal parameters will be updated at the end of each batch.\n # Valid values are a single integer or compounding or decaying parameters.\n if 'batch_size' in self.kwargs:\n # The batch size may be a single integer\n try:\n self.batch_size = utils.atoi(self.kwargs['batch_size'])\n # Or a list of floats\n except ValueError:\n sizes = utils.get_kwargs_by_type(self.kwargs['batch_size']) \n\n # If the start < end, batch sizes will be compounded\n if sizes[0] < sizes[1]:\n self.batch_size = compounding(sizes[0], sizes[1], sizes[2])\n # else bath sizes will decay during training\n else:\n self.batch_size = decaying(sizes[0], sizes[1], sizes[2])\n \n # Set the dropout rate for retraining the model\n # This determines the likelihood that a feature or internal representation in the model will be dropped,\n # making it harder for the model to memorize the training data.\n # Valid values are a float lesser than 1.0 e.g. 0.35\n if 'drop' in self.kwargs:\n self.drop = utils.atof(self.kwargs['drop'])\n \n # Set the ratio of data to be used for testing. \n # This data will be held out from training and just used to provide evaluation metrics.\n # Valid values are a float >= zero and < 1.0 e.g. 0.3\n if 'test' in self.kwargs:\n self.test = utils.atof(self.kwargs['test'])\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.debug:\n self._print_log(2)\n \n # Remove the kwargs column from the request_df\n self.request_df = self.request_df.drop(['kwargs'], axis=1)\n\n def _entity_tagger(self):\n \"\"\"\n Get named entities from the spaCy model for each text in the request dataframe.\n \"\"\"\n\n # If this is a custom model, set the path to the directory\n if self.custom:\n self.model = self.path + self.model + \"/\"\n\n # Load the spaCy model\n try:\n nlp = spacy.load(self.model)\n except OSError:\n self.model = self.path + self.model + \"/\"\n nlp = spacy.load(self.model)\n \n # Create an empty list for storing named entities\n entities = []\n\n # Send each text to the model and save the named entities\n for i in range(len(self.request_df)):\n key = self.request_df.loc[i, 'key']\n doc = nlp(self.request_df.loc[i, 'text'])\n\n # Obtain entities, start and end characters, labels and descriptions \n for ent in doc.ents:\n entities.append([key, ent.text, ent.start_char, ent.end_char, ent.label_, spacy.explain(ent.label_)])\n\n # Transform the entities list to a dataframe\n entities = pd.DataFrame(entities, columns=['key', 'entity', 'start', 'end', 'type', 'description'])\n \n return entities\n\n def _prep_data(self):\n \"\"\"\n Prepare the data for retraining the model.\n This transforms the data into spaCy's training data format with tuples of text and entity offsets.\n \"\"\"\n\n # Firstly, we transform the dataframe which has repeated texts with one row per entity...\n # to a list of each text with its corresponding dictionary of entities.\n\n prev_text = ''\n self.train = []\n entities = {\"entities\": []}\n\n # For each sample in the dataframe\n for i in self.request_df.index:\n # Extract the text for the current index\n text = self.request_df.loc[i, 'text'] \n\n # If this is not the first record and we have reached a new text\n if i > 0 and text != prev_text:\n # Add the text and dictionary of entities to the training set\n self.train.append((prev_text, entities))\n\n # Reset variables\n entities = {\"entities\": []}\n prev_text = text\n # For the first record we set previous text to this text\n elif i == 0:\n prev_text = text\n\n # Extract the entity and entity type for the current index\n entity = (self.request_df.loc[i, 'entity'], self.request_df.loc[i, 'entity_type'])\n\n # Add entity to the entities dictionary \n entities[\"entities\"].append(entity)\n\n # Add the final text and dictionary of entities to the training set\n self.train.append((prev_text, entities))\n \n # Print the semi-transformed data to the logs\n if self.debug:\n self._print_log(6)\n\n # Complete the data prep by calculating entity offsets and finalizing the format for spaCy\n\n # Format the training data for spaCy\n for sample in self.train:\n # Get the text and named entities for the current sample\n text = sample[0]\n entities = sample[1][\"entities\"]\n entity_boundaries = []\n \n # For each entity\n for entity in entities:\n \n # Set up a regex pattern to look for the entity w.r.t. word boundaries \n pattern = re.compile(r\"\\b\" + entity[0] + r\"\\b\")\n \n # Find all occurrences of the entity in the text\n for match in re.finditer(pattern, text):\n entity_boundaries.append((match.start(), match.end(), entity[1]))\n \n # Add the entity boundaries to the sample\n sample[1][\"entities\"] = entity_boundaries\n\n # If required, split the data into training and testing sets\n if self.test > 0:\n self.train, self.validation = train_test_split(self.train, test_size=self.test)\n # Otherwise use the entire dataset for training\n else:\n self.validation = None\n\n # Print the final training data to the logs\n if self.debug:\n self._print_log(6)\n \n def _retrain_model(self):\n \"\"\"\n Update an existing spaCy model with labelled training data.\n \"\"\"\n\n # Load the model, set up the pipeline and train the entity recognizer:\n \n # Load existing spaCy model\n if not self.blank:\n # If this is a custom model, set the path to the directory\n if self.custom:\n self.base_model = self.path + self.base_model + \"/\"\n \n nlp = spacy.load(self.base_model) \n # If the parameter blank=true is passed we start with a blank Language class, e.g. en\n else:\n nlp = spacy.blank(self.base_model) \n\n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.debug:\n self._print_log(7)\n\n # create the built-in pipeline components and add them to the pipeline\n\n # nlp.create_pipe works for built-ins that are registered with spaCy\n if \"ner\" not in nlp.pipe_names:\n ner = nlp.create_pipe(\"ner\")\n nlp.add_pipe(ner, last=True)\n # otherwise, get it so we can add labels\n else:\n ner = nlp.get_pipe(\"ner\")\n\n # add labels\n for _, annotations in self.train:\n for ent in annotations.get(\"entities\"):\n ner.add_label(ent[2])\n\n # Retrain the model:\n \n # get names of other pipes to disable them during training\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe != \"ner\"]\n with nlp.disable_pipes(*other_pipes): # only train NER\n \n # Setup lists to store the loss for each epoch\n self.losses_train = []\n self.losses_test = []\n \n # reset and initialize the weights randomly – but only if we're\n # training a new model\n if self.blank:\n nlp.begin_training()\n for epoch in range(self.epochs): \n random.shuffle(self.train)\n losses = {}\n # batch up the examples using spaCy's minibatch\n batches = minibatch(self.train, size=self.batch_size)\n for batch in batches:\n texts, annotations = zip(*batch)\n nlp.update(\n texts, # batch of texts\n annotations, # batch of annotations\n drop=self.drop, # dropout - make it harder to memorise data\n losses=losses,\n )\n # Store loss for the epoch to a list\n self.losses_train.append(('Epoch {}'.format(epoch+1), losses['ner']))\n\n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.debug:\n self._print_log(8)\n \n # If a test dataset is available, calculate losses for it as well\n if self.validation is not None:\n losses = {}\n\n # batch up the examples using spaCy's minibatch\n batches = minibatch(self.validation, size=self.batch_size)\n for batch in batches:\n texts, annotations = zip(*batch)\n # Get losses for the test data without updating the model \n nlp.update(\n texts, # batch of texts\n annotations, # batch of annotations\n sgd = None, # do not update model weights\n losses=losses,\n )\n # Store loss for the epoch to a list\n self.losses_test.append(('Epoch {}'.format(epoch+1), losses['ner']))\n\n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.debug:\n self._print_log(9)\n\n # Save model to output directory:\n \n output_dir = pathlib.Path(self.path + self.model + '/')\n if not output_dir.exists():\n output_dir.mkdir(parents=True, exist_ok=False)\n nlp.to_disk(output_dir)\n\n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.debug:\n self._print_log(10)\n\n # Evaluate the model:\n\n # Prepare spaCy docs and golds for getting evaluation metrics\n docs_golds = []\n for sample in self.train:\n doc = nlp.make_doc(sample[0])\n gold = GoldParse(doc, entities=sample[1][\"entities\"])\n docs_golds.append((doc, gold))\n \n # Get scores for training data\n scorer_train = nlp.evaluate(docs_golds)\n # Add the training scores to evaluation metrics\n self.metrics = self._prep_scores(scorer_train)\n\n # Get scores for testing data and add to the evaluation metrics\n if self.validation is not None:\n docs_golds = []\n for sample in self.validation:\n doc = nlp.make_doc(sample[0])\n gold = GoldParse(doc, entities=sample[1][\"entities\"])\n docs_golds.append((doc, gold))\n \n scorer_test = nlp.evaluate(docs_golds)\n self.metrics = pd.concat([self.metrics, self._prep_scores(scorer_test, subset='test')], ignore_index=True)\n\n # Add loss metrics\n self.metrics = pd.concat([self.metrics, self._prep_losses(self.losses_train)], ignore_index=True)\n if self.validation is not None:\n self.metrics = pd.concat([self.metrics, self._prep_losses(self.losses_test, subset='test')], ignore_index=True)\n \n def _prep_scores(self, scorer, subset='train'):\n \"\"\"\n Prepare score metrics using a spaCy scrorer \n Returns a dataframe formatted for output\n \"\"\"\n columns = [\"metric\", \"value\"]\n\n # Prepare scorer metrics\n scores = {\"Precision\": scorer.scores[\"ents_p\"], \"Recall\": scorer.scores[\"ents_r\"], \"F-score\": scorer.scores[\"ents_f\"]}\n metrics_df = pd.DataFrame([(k, v) for k, v in scores.items()], columns=columns)\n\n metrics_df.loc[:,'model'] = self.model\n metrics_df.loc[:,'subset'] = subset\n metrics_df = metrics_df[['model', 'subset'] + columns]\n\n return metrics_df\n \n def _prep_losses(self, losses, subset='train'):\n \"\"\"\n Prepare loss metrics using a list of tuples of the format: (epoch, loss)\n Where epoch is an integer and loss is a float \n Returns a dataframe formatted for output\n \"\"\"\n columns = [\"metric\", \"value\"]\n metrics_df = pd.DataFrame(losses, columns=columns)\n metrics_df.loc[:,'model'] = self.model\n metrics_df.loc[:,'subset'] = subset\n metrics_df = metrics_df[['model', 'subset'] + columns]\n\n return metrics_df\n\n def _send_table_description(self, variant):\n \"\"\"\n Send the table description to Qlik as meta data.\n Used when the SSE is called from the Qlik load script.\n \"\"\"\n \n # Set up the table description to send as metadata to Qlik\n self.table = SSE.TableDescription()\n self.table.name = \"SSE-Response-spaCy\"\n self.table.numberOfRows = len(self.response_df)\n\n # Set up fields for the table\n if variant == \"entities\":\n self.table.fields.add(name=\"key\")\n self.table.fields.add(name=\"entity\")\n self.table.fields.add(name=\"start\", dataType=1)\n self.table.fields.add(name=\"end\", dataType=1)\n self.table.fields.add(name=\"type\")\n self.table.fields.add(name=\"description\")\n elif variant == \"metrics\":\n self.table.fields.add(name=\"model_name\")\n self.table.fields.add(name=\"subset\")\n self.table.fields.add(name=\"metric\")\n self.table.fields.add(name=\"value\", dataType=1)\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.debug:\n self._print_log(5)\n \n # Send table description\n table_header = (('qlik-tabledescription-bin', self.table.SerializeToString()),)\n self.context.send_initial_metadata(table_header)\n \n def _print_log(self, step):\n \"\"\"\n Output useful information to stdout and the log file if debugging is required.\n :step: Print the corresponding step in the log\n \"\"\"\n \n # Set mode to append to log file\n mode = 'a'\n\n if self.logfile is None:\n # Increment log counter for the class. Each instance of the class generates a new log.\n self.__class__.log_no += 1\n\n # Create a log file for the instance\n # Logs will be stored in ..\\logs\\SKLearn Log <n>.txt\n self.logfile = os.path.join(os.getcwd(), 'logs', 'SpaCy Log {}.txt'.format(self.log_no))\n \n if step == 1:\n # Output log header\n output = \"\\nSpaCyForQlik Log: {0} \\n\\n\".format(time.ctime(time.time()))\n # Set mode to write new log file\n mode = 'w'\n \n elif step == 2:\n # Output the model name and execution parameters to the terminal and log file\n output = \"Model: {0}\\n\\n\".format(self.model)\n output += \"Execution parameters: {0}\\n\\n\".format(self.kwargs) \n \n elif step == 3:\n # Output the request data frame to the terminal and log file\n output = \"REQUEST: {0} rows x cols\\nSample Data:\\n\\n\".format(self.request_df.shape)\n output += \"{0}\\n...\\n{1}\\n\\n\".format(self.request_df.head().to_string(), self.request_df.tail().to_string())\n \n elif step == 4:\n # Output the response data frame to the terminal and log file\n output = \"RESPONSE: {0} rows x cols\\nSample Data:\\n\\n\".format(self.response_df.shape)\n output += \"{0}\\n...\\n{1}\\n\\n\".format(self.response_df.head().to_string(), self.response_df.tail().to_string())\n \n elif step == 5:\n # Output the table description if the call was made from the load script\n output = \"\\nTABLE DESCRIPTION SENT TO QLIK:\\n\\n{0} \\n\\n\".format(self.table)\n \n elif step == 6:\n output = \"Completed transformations.\\nTop and bottom 3 samples for training:\\n\\n\"\n \n # Output the top and bottom 3 results from training data\n for text, annotations in self.train[:3] + self.train[-3:]:\n output += '{0}\\n\\n {1}\\n\\n'.format(text, annotations)\n \n # Print the top and bottom 3 testing samples as well if the data has been split into subsets\n try:\n output += \"Data split into {0} samples for training and {1} samples for testing.\\n\".format(len(self.train), len(self.validation))\n output += \"Top and bottom 3 samples for testing:\\n\\n\"\n\n # Output the top and bottom 3 results from the testing data\n for text, annotations in self.validation[:3] + self.validation[-3:]:\n output += '{0}\\n\\n {1}\\n\\n'.format(text, annotations)\n except (TypeError, AttributeError) as e:\n pass\n \n elif step == 7:\n # Output after a model is successfully loaded for training\n output = \"Loaded model {0}\\n\\n\".format(self.base_model)\n\n elif step == 8:\n # Print loss at current epoch with training data\n output = \"{0}, Losses with Training data: {1}\\n\".format(self.losses_train[-1][0], self.losses_train[-1][1])\n\n elif step == 9:\n # Print loss at current epoch with testing data\n output = \"{0}, Losses with Testing data: {1}\\n\".format(self.losses_test[-1][0], self.losses_test[-1][1])\n\n elif step == 10:\n # Output after model is successfully saved to disk\n output = \"\\nModel successfully saved to disk at directory: {0}\\n\\n\".format(self.path + self.model + '/')\n\n elif step == 11:\n # Output after evaluation metrics have been calculated\n output = \"RESPONSE: {0} rows x cols\\nSample Data:\\n\\n\".format(self.response_df.shape)\n output += \"{0}\\n...\\n{1}\\n\\n\".format(self.response_df.head(10).to_string(), self.response_df.tail(5).to_string())\n\n sys.stdout.write(output)\n with open(self.logfile, mode, encoding='utf-8') as f:\n f.write(output)\n\n def _print_exception(self, s, e):\n \"\"\"\n Output exception message to stdout and also to the log file if debugging is required.\n :s: A description for the error\n :e: The exception\n \"\"\"\n \n # Output exception message\n sys.stdout.write(\"\\n{0}: {1} \\n\\n\".format(s, e))\n \n if self.debug:\n with open(self.logfile,'a') as f:\n f.write(\"\\n{0}: {1} \\n\\n\".format(s, e))"
] |
[
[
"sklearn.model_selection.train_test_split",
"pandas.DataFrame"
]
] |
escorciav/kaust-cluster-status
|
[
"c67f372c5484424dabe58bcafe8413ef784cdca0"
] |
[
"cluster.py"
] |
[
"import subprocess\n\nimport pandas as pd\n\n\ndef parse_slurm_line(x, strip=True, add_ngpu=True):\n \"\"\"Split info from slurm and (optionally) add GPU\n\n TODO:\n PR to get rid of this in favor of other SLURM command is welcome.\n \"\"\"\n ind_slice = slice(1, None)\n if strip:\n ind_slice = slice(1, -1)\n\n data = x[ind_slice].split(' ')\n # Non-generalizable depends on your SLURM config\n # GPUs must be set as SLURM-GRES.\n # Need small patch if you manage other resources with GRES.\n if add_ngpu:\n gpu_data = data[-1]\n # Sanitize: remove \" or ' added by users\n gpu_data = gpu_data.replace(\"'\", '')\n gpu_data = gpu_data.replace('\"', '')\n num_gpus = 1\n gpu_name = 'null'\n if ':' in gpu_data:\n seq_gpu_data = gpu_data.split(':')\n if len(seq_gpu_data) == 2:\n _, num_gpus = seq_gpu_data\n # in case someone request `gpu:gpu_name`\n if not int_as_str(num_gpus):\n num_gpus = '1'\n else:\n _, gpu_name, num_gpus = seq_gpu_data\n num_gpus = int(num_gpus)\n data.extend([num_gpus, gpu_name])\n return data\n\n\ndef cluster_info(gpu_filter=None, add_ngpu=True):\n \"\"\"Info about nodes in the cluster\n\n Grab info with `sinfo -o \"%n %A %D %P %T %c %z %m %d %w %f %G\"`,\n Then form a list of list with the following info per node:\n TODO\n\n \"\"\"\n cmd = ['sinfo', '-o', '\"%n %A %D %P %T %c %z %m %d %w %f %G\"']\n cmd_status = subprocess.run(cmd, stdout=subprocess.PIPE,\n universal_newlines=True)\n data = cmd_status.stdout.split('\\n')[:-1]\n data_keys, data_values = data[0], data[1::]\n\n keys = parse_slurm_line(data_keys, add_ngpu=False)\n feat_ind = -2\n if add_ngpu:\n keys += ['NUM_GPUS', 'GPU_NAME']\n feat_ind -= 2\n values = []\n for i in data_values:\n data = parse_slurm_line(i, add_ngpu=add_ngpu)\n if gpu_filter is None:\n values.append(data)\n elif gpu_filter in data[feat_ind]:\n values.append(data)\n\n table = pd.DataFrame(values, columns=keys)\n return table\n\n\ndef queue_status(add_ngpu=True):\n \"\"\"Status of the queue\n\n Grab info with `squeue -o \"%u %i %t %N %b\"`,\n Then form a list of list with the following info per node:\n TODO\n\n \"\"\"\n cmd = ['squeue', '-o', '\"%u %i %t %N %b\"']\n cmd_status = subprocess.run(cmd, stdout=subprocess.PIPE,\n universal_newlines=True)\n data = cmd_status.stdout.split('\\n')[:-1]\n data_keys, data_values = data[0], data[1::]\n\n # TODO: fix based on output\n values = [parse_slurm_line(i, strip=False, add_ngpu=add_ngpu)\n for i in data_values]\n keys = parse_slurm_line(data_keys, add_ngpu=False)\n if add_ngpu:\n keys += ['NUM_GPUS', 'GPU_NAME']\n table = pd.DataFrame(values, columns=keys)\n return table\n\n\ndef gpu_avail(verbose=True, gpu_filter='', add_ngpu=True):\n \"Utility to return a pandas-table with gpu available per node\"\n queue = queue_status(add_ngpu=add_ngpu)\n ind_running = queue['ST'] == 'R'\n running_jobs = queue.loc[ind_running, :]\n running_jobs_gbn = running_jobs.groupby('NODELIST')\n node_info = cluster_info(gpu_filter=gpu_filter, add_ngpu=add_ngpu)\n node_info.set_index('HOSTNAMES', inplace=True)\n if verbose:\n print('HOSTNAMES GPUs(USED/TOTAL)')\n fmt = '{} {}/{}'\n\n for node, group in running_jobs_gbn:\n if node not in node_info.index:\n continue\n\n if verbose:\n print(fmt.format(node,\n group.loc[:, 'NUM_GPUS'].sum(),\n node_info.loc[node, 'NUM_GPUS']))\n node_info.loc[node, 'NUM_GPUS'] -= group.loc[:, 'NUM_GPUS'].sum()\n\n if verbose:\n print()\n return node_info.loc[:, ['NUM_GPUS', 'GPU_NAME']]\n\n\ndef gpu_status(verbose=True, gpu_filter='', add_ngpu=True):\n \"Utility to return a pandas-table with gpu consumption per node\"\n queue = queue_status(add_ngpu=add_ngpu)\n ind_running = queue['ST'] == 'R'\n running_jobs = queue.loc[ind_running, :]\n running_jobs_gbn = running_jobs.groupby('NODELIST')\n node_info = cluster_info(gpu_filter=gpu_filter, add_ngpu=add_ngpu)\n node_info.set_index('HOSTNAMES', inplace=True)\n\n used_gpus = []\n indices = []\n for node, group in running_jobs_gbn:\n if node not in node_info.index:\n continue\n used_gpus.append(group.loc[:, 'NUM_GPUS'].sum())\n indices.append(node)\n node_info['USED_GPUS'] = pd.Series(used_gpus, index=indices)\n\n return node_info.loc[:, ['GPU_NAME', 'USED_GPUS', 'NUM_GPUS']]\n\n\ndef int_as_str(s):\n \"return True if string is integer\"\n try:\n int(s)\n return True\n except ValueError:\n return False\n"
] |
[
[
"pandas.Series",
"pandas.DataFrame"
]
] |
jklewis99/magical-movie-poster-processing
|
[
"88aefe4c446fd3d8366b527f59e20c04ac584fb4"
] |
[
"generate_train_test.py"
] |
[
"import pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\n\r\ndef main():\r\n metadata = pd.read_csv(\"data/movies-metadata-cleaned.csv\").drop(\r\n columns=['Language', 'Poster', 'Country', 'Director', 'Released', 'Writer', 'Genre', 'Actors'])\r\n ratings = pd.get_dummies(metadata['Rated'], prefix='rated') # one hot encode \"Rated\" column\r\n metadata = metadata.drop(columns=[\"Rated\"]).join(ratings) # replace \"Rated\" with one_hot\r\n metadata = metadata.dropna() # drop the missing box_office values\r\n posters = pd.read_csv(\"data/posters-and-genres.csv\").drop(columns=[\"Genre\"]).rename(columns={\"Id\": \"imdbID\"})\r\n data = metadata.merge(posters, on='imdbID').drop_duplicates() # add genres\r\n data = data[((data['Short'] != 1) & ( data['N/A'] != 1))]\r\n data = data.drop(columns=['Reality-TV', 'Short', 'N/A'])\r\n cols = data.columns.tolist()\r\n cols = cols[1:2] + cols[5:6] + cols[2:5] + cols[6:] + cols[0:1]\r\n data = data[cols] # reorder columns\r\n train, test = train_test_split(data, test_size=0.2) # generate train and test data\r\n train.to_csv(\"data/train_data.csv\", index=False)\r\n test.to_csv(\"data/test_data.csv\", index=False)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n"
] |
[
[
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"pandas.get_dummies"
]
] |
ldqcarbon/RTK
|
[
"88df8ed953805aca3c5a73c22cb940164e7cc296"
] |
[
"utilities/SimpleRTK/Examples/RTKFirstCudaReconstruction.py"
] |
[
"#!/usr/bin/env python\nfrom __future__ import print_function\nimport SimpleRTK as srtk\nimport sys\nimport os\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport time\n\nif len ( sys.argv ) < 3:\n print( \"Usage: RTKFirstReconstruction <outputimage> <outputgeometry>\" )\n sys.exit ( 1 )\n\n# Defines the RTK geometry object\ngeometry = srtk.ThreeDCircularProjectionGeometry()\nnumberOfProjections = 360\nfirstAngle = 0\nangularArc = 360\nsid = 600 # source to isocenter distance in mm\nsdd = 1200 # source to detector distance in mm\nisox = 0 # X coordinate on the projection image of isocenter\nisoy = 0 # Y coordinate on the projection image of isocenter\nfor x in range(0,numberOfProjections):\n angle = firstAngle + x * angularArc / numberOfProjections\n geometry.AddProjection(sid,sdd,angle,isox,isoy)\n\n# Writing the geometry\ngeometrywriter = srtk.ThreeDCircularProjectionGeometryXMLFileWriter()\ngeometrywriter.SetFileName ( sys.argv[2] )\ngeometrywriter.Execute ( geometry );\n\nconstantImageSource = srtk.ConstantImageSource()\norigin = [ -127.5, -127.5, 0. ]\nsizeOutput = [ 256, 256, numberOfProjections ]\nspacing = [ 1.0, 1.0, 1.0 ]\nconstantImageSource.SetOrigin( origin )\nconstantImageSource.SetSpacing( spacing )\nconstantImageSource.SetSize( sizeOutput )\nconstantImageSource.SetConstant(0.0)\nsource = constantImageSource.Execute()\n\nrei = srtk.RayEllipsoidIntersectionImageFilter()\nsemiprincipalaxis = [ 50, 50, 50]\ncenter = [ 0, 0, 0]\n# Set GrayScale value, axes, center...\nrei.SetDensity(20)\nrei.SetAngle(0)\nrei.SetCenter(center)\nrei.SetAxis(semiprincipalaxis)\nrei.SetGeometry( geometry )\nreiImage = rei.Execute(source)\n\n# Create reconstructed image\nconstantImageSource2 = srtk.ConstantImageSource()\norigin = [ -63.5, -63.5, -63.5 ]\nsizeOutput = [ 128, 128, 128 ]\nconstantImageSource2.SetOrigin( origin )\nconstantImageSource2.SetSpacing( spacing )\nconstantImageSource2.SetSize( sizeOutput )\nconstantImageSource2.SetConstant(0.0)\nsource2 = constantImageSource2.Execute()\n\nprint(\"Performing reconstruction\")\nfeldkamp = srtk.CudaFDKConeBeamReconstructionFilter()\nfeldkamp.SetGeometry( geometry );\nfeldkamp.SetTruncationCorrection(0.0);\nfeldkamp.SetHannCutFrequency(0.0);\n\nstart = time.time()\nimage = feldkamp.Execute(source2,reiImage)\nend = time.time()\nprint(\"Reconstruction took %f\" %(end-start))\n\nplt.imshow(srtk.GetArrayFromImage(image[:,64,:]), cmap = cm.Greys_r)\nplt.show()\n\npixelID = image.GetPixelIDValue()\ncaster = srtk.CastImageFilter()\ncaster.SetOutputPixelType( pixelID )\nimage = caster.Execute( image )\n\nwriter = srtk.ImageFileWriter()\nwriter.SetFileName ( sys.argv[1] )\nwriter.Execute ( image );\n\n"
] |
[
[
"matplotlib.pyplot.show"
]
] |
Waste-Wood/HGM-GIF
|
[
"969b4c213360a5e47369c0072f9fe20ded0c1570"
] |
[
"module/PositionEmbedding.py"
] |
[
"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\nimport torch\r\nimport numpy as np\r\n\r\n\r\ndef get_sinusoid_encoding_table(n_position, d_hid, padding_idx=None):\r\n ''' Sinusoid position encoding table '''\r\n\r\n def cal_angle(position, hid_idx):\r\n return position / np.power(10000, 2 * (hid_idx // 2) / d_hid)\r\n\r\n def get_posi_angle_vec(position):\r\n return [cal_angle(position, hid_j) for hid_j in range(d_hid)]\r\n\r\n sinusoid_table = np.array([get_posi_angle_vec(pos_i) for pos_i in range(n_position)])\r\n\r\n sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i\r\n sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1\r\n\r\n if padding_idx is not None:\r\n # zero vector for padding dimension\r\n sinusoid_table[padding_idx] = 0.\r\n\r\n return torch.FloatTensor(sinusoid_table)\r\n"
] |
[
[
"torch.FloatTensor",
"numpy.cos",
"numpy.power",
"numpy.sin"
]
] |
CoderTousif/python
|
[
"e96c6eebf6be89dc9f81660c8e8826efefb1ef3a"
] |
[
"img/row.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 10 18:27:48 2019\n\n@author: sandipan\n\"\"\"\n\nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport os\nimport glob\nk = 0\n\nnew = input(\"ENTER YOUR INPUT IMAGE DIRECTORY: \")\ninpath = os.getcwd() + new\nentries = os.listdir(inpath)\nfor entry in entries: \n#for img in glob('*.png'):\n test_image = cv2.imread(os.path.join(inpath,entry))\n print(entry)\n\n#path='Tp'\n#listing=os.listdir(path)\n#for file in listing:\n# fname =glob.glob(path+'/'+file+'/*.jpg')\n# print(fname)\n# for fn in fname:\n# test_image=cv2.imread(fn)\n# print(test_image)\n windowsize_r = 5\n windowsize_c = 5\n img = 255 * np.ones((test_image.shape[0],test_image.shape[1],3),np.uint8)\n out_img = 255 * np.ones((test_image.shape[0],test_image.shape[1],3),np.uint8)\n print(img.shape)\n\n b,g,r = cv2.split(test_image)\n\n# value=open(\"value.txt\",\"w\")\n for i in range(windowsize_r,test_image.shape[0] - windowsize_r):\n for j in range( windowsize_c,test_image.shape[1] - windowsize_c):\n window_b = b[i-windowsize_r:i+windowsize_r+1,j-windowsize_c:j+windowsize_c+1]\n window_g = g[i-windowsize_r:i+windowsize_r+1,j-windowsize_c:j+windowsize_c+1]\n window_r = r[i-windowsize_r:i+windowsize_r+1,j-windowsize_c:j+windowsize_c+1]\n \n \n mean_b,mean_g,mean_r,_=cv2.mean(test_image[i-windowsize_r:i+windowsize_r+1,j-windowsize_c:j+windowsize_c+1])\n mean_b = np.mean(window_b)\n mean_g = np.mean(window_g)\n mean_r = np.mean(window_r)\n \n if i == 0:\n out_img[i][j] = [int(mean_b),int(mean_g),int(mean_r)]\n \n if i > 0:\n m_b = int(mean_b - out_img[i-1][j][0])\n m_g = int(mean_g - out_img[i-1][j][1])\n m_r = int(mean_r - out_img[i-1][j][2])\n \n if m_b < 0:\n m_b = (-1) * m_b\n \n if m_g < 0:\n m_g = (-1) * m_g\n \n if m_r < 0:\n m_r = (-1) * m_r\n \n out_img[i][j] = [m_b,m_g,m_r]\n \n \n img[i][j] = [int(mean_b),int(mean_g),int(mean_r)]\n #print(int(mean_b),int(mean_g),int(mean_r))\n v = int(mean_b),int(mean_g),int(mean_r)\n# value.write(str(v))\n# value.write(\"\\n\")\n\n# value.close()\n\n# cv2.imwrite('mean.jpg',img)\n# cv2.imwrite('mean_diff.jpg',out_img)\n out_string = 'a/a/000000'+ str(k)+'.jpg'\n k=k+1\n# cv2.imwrite(os.path.join(outpath,out_arr,out_string),img)\n cv2.imwrite(out_string,out_img)\n\n "
] |
[
[
"numpy.mean",
"numpy.ones"
]
] |
hellohwang/tf2.0-semantic-segmentation
|
[
"fe407376efc310ef5a3afb5787a04e34f420cdb8"
] |
[
"tools/train1.py"
] |
[
"import os\nimport sys\nimport random\nimport shutil\nimport cv2\nimport time\nimport math\nimport pprint\nimport numpy as np\nimport pandas as pd\nfrom tensorboardX import SummaryWriter\nfrom dataset import Dataset, BatchGenerator\nimport tensorflow as tf\n\nfrom utils.experiments import LabelSmoother\nfrom utils.tools import AverageMeter, Logger\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, Callback\n\n\nclass CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):\n def __init__(self, d_model=256, warmup_steps=4000):\n super(CustomSchedule, self).__init__()\n\n self.d_model = d_model\n self.d_model = tf.cast(self.d_model, tf.float32)\n\n self.warmup_steps = warmup_steps\n self.current_lr = 0.0\n\n def __call__(self, step):\n arg1 = tf.math.rsqrt(step)\n arg2 = step * (self.warmup_steps ** -1.5)\n result = tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)\n self.current_lr = result.numpy()\n return result\n\n\ndef evaluate_single_epoch(config, model, dataloader, criterion, log_val, epoch, writer, dataset_size):\n batch_time = AverageMeter()\n losses = AverageMeter()\n scores = AverageMeter()\n eval_loss = tf.keras.metrics.Mean(name='eval_loss')\n eval_accuracy = tf.keras.metrics.BinaryAccuracy(name='eval_accuracy')\n end = time.time()\n for i, (images, labels) in enumerate(dataloader):\n\n preds = model(images)\n\n loss = criterion(labels, preds)\n eval_loss(loss)\n loss_mean = eval_loss.result().numpy()\n\n losses.update(loss_mean, 1)\n eval_accuracy(labels, preds)\n score = eval_accuracy.result().numpy()\n scores.update(score, 1)\n\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % config.PRINT_EVERY == 0:\n print('[%2d/%2d] time: %.2f, loss: %.6f, score: %.4f'\n % (i, dataset_size, batch_time.sum, loss_mean, score))\n\n del images, labels, preds\n ## end of epoch. break..\n if i > dataset_size / config.EVAL.BATCH_SIZE: break\n writer.add_scalar('val/loss', losses.avg, epoch)\n writer.add_scalar('val/score', scores.avg, epoch)\n log_val.write('[%d/%d] loss: %.6f, score: %.4f\\n'\n % (epoch, config.TRAIN.NUM_EPOCHS, losses.avg, scores.avg))\n print('average loss over VAL epoch: %f' % losses.avg)\n\n return scores.avg, losses.avg\n\n\ndef train_single_epoch(config, model, dataloader, criterion, optimizer, log_train, epoch, writer, dataset_size):\n batch_time = AverageMeter()\n losses = AverageMeter()\n scores = AverageMeter()\n\n train_loss = tf.keras.metrics.Mean(name='train_loss')\n train_accuracy = tf.keras.metrics.BinaryAccuracy(name='train_accuracy')\n end = time.time()\n for i, (images, labels) in enumerate(dataloader):\n with tf.GradientTape() as grad_tape:\n preds = model(images)\n loss = criterion(labels, preds)\n\n gradients = grad_tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n\n # preds = tf.sigmoid(logits)\n train_loss(loss)\n train_accuracy(labels, preds)\n losses.update(train_loss.result().numpy(), 1)\n scores.update(train_accuracy.result().numpy(), 1)\n\n batch_time.update(time.time() - end)\n end = time.time()\n dataloader_len = dataset_size / config.TRAIN.BATCH_SIZE\n if i % 100 == 0:\n print(\"[%d/%d][%d/%d] time: %.2f, loss: %.6f, score: %.4f, lr: %f\"\n % (epoch, config.TRAIN.NUM_EPOCHS, i, dataloader_len, batch_time.sum, train_loss.result().numpy(),\n train_accuracy.result().numpy(),\n optimizer.learning_rate.numpy()))\n\n if i == 0:\n iteration = dataloader_len * epoch + i\n annotated_images = utils.tools.annotate_to_images(images, labels, preds.numpy())\n for idx, annotated_image in enumerate(annotated_images):\n writer.add_image('train/image_{}_class_{}'.format(int(idx / 8), idx % 8), annotated_image, iteration)\n\n del images, labels, preds\n ## end of epoch. break..\n if i > dataset_size / config.TRAIN.BATCH_SIZE: break\n writer.add_scalar('train/score', scores.avg, epoch)\n writer.add_scalar('train/loss', losses.avg, epoch)\n writer.add_scalar('train/lr', optimizer.learning_rate.numpy(), epoch)\n log_train.write('[%d/%d] loss: %.6f, score: %.4f, lr: %f\\n'\n % (epoch, config.TRAIN.NUM_EPOCHS, losses.avg, scores.avg, optimizer.learning_rate.numpy()))\n print('average loss over TRAIN epoch: %f' % losses.avg)\n\n\ndef train(config, model, train_loader, test_loader, optimizer, log_train, log_val, start_epoch, best_score, best_loss,\n writer, dataset_size, criterion):\n if 1: # keras mode\n ## train phase..\n metric = tf.keras.metrics.BinaryAccuracy()\n\n checkpoint_all = ModelCheckpoint(\n 'checkpoints\\\\all_models.{epoch:02d}-{loss:.2f}.h5',\n monitor='loss',\n verbose=1,\n save_best_only=False,\n mode='auto',\n period=1\n )\n\n model.compile(optimizer=optimizer, loss=dice_loss, metrics=['accuracy'])\n model.fit_generator(generator=train_loader, steps_per_epoch=len(train_loader), epochs=config.TRAIN.NUM_EPOCHS,\n verbose=1,\n validation_data=test_loader,\n max_queue_size=10,\n workers=config.TRAIN.NUM_WORKERS,\n use_multiprocessing=False,\n callbacks=[checkpoint_all])\n ## use_multiprocessing=True.. get erorr i don't know..\n\n else: # pytorch style mode\n for epoch in range(start_epoch, config.TRAIN.NUM_EPOCHS):\n # ## TODO set a loss function..\n train_single_epoch(config, model, train_loader, criterion, optimizer, log_train, epoch, writer,\n dataset_size[0])\n\n test_score, test_loss = evaluate_single_epoch(config, model, test_loader, criterion, log_val, epoch, writer,\n dataset_size[1])\n print('Total Test Score: %.4f, Test Loss: %.4f' % (test_score, test_loss))\n #\n if test_score > best_score:\n best_score = test_score\n print('Test score Improved! Save checkpoint')\n\n model.save_weights(str(epoch) + \"_\" + str(best_score) + \"_model.h5\")\n # utils.checkpoint.save_checkpoint(config, model, epoch, test_score, test_loss)\n\n\ndef run(config):\n ## TODO change to get model\n\n sm.set_framework('tf.keras') ## segmentation_model 2.0 support feature..\n backbone = 'mobilenetv2'\n model = sm.Unet(backbone, input_shape=(256, 256, 3), encoder_weights=None,\n activation='sigmoid') # activation='identity')#, decoder_attention_type='scse') # 'imagenet')\n model.summary()\n\n ## TODO optimizer change\n # optimizer = tf.keras.optimizers.Adam(learning_rate_schedule)#learning_rate=config.OPTIMIZER.LR) #get_optimizer(config, model.parameters())\n optimizer = tf.keras.optimizers.Adam(\n learning_rate=config.OPTIMIZER.LR) # config.OPTIMIZER.LR) #get_optimizer(config, model.parameters())\n ##loss ##\n criterion = FocalLoss() # DiceLoss()#tf.keras.losses.BinaryCrossentropy()\n\n checkpoint = None\n # checkpoint = utils.checkpoint.get_initial_checkpoint(config)\n if checkpoint is not None:\n last_epoch, score, loss = utils.checkpoint.load_checkpoint(config, model, checkpoint)\n # utils.checkpoint.load_checkpoint_legacy(config, model, checkpoint)\n else:\n print('[*] no checkpoint found')\n last_epoch, score, loss = -1, -1, float('inf')\n print('last epoch:{} score:{:.4f} loss:{:.4f}'.format(last_epoch, score, loss))\n\n # optimizer.param_groups[0]['initial_lr'] = config.OPTIMIZER.LR\n\n writer = SummaryWriter(os.path.join(config.TRAIN_DIR + config.RECIPE, 'logs'))\n log_train = Logger()\n log_val = Logger()\n log_train.open(os.path.join(config.TRAIN_DIR + config.RECIPE, 'log_train.txt'), mode='a')\n log_val.open(os.path.join(config.TRAIN_DIR + config.RECIPE, 'log_val.txt'), mode='a')\n train_loader = BatchGenerator(config, 'train', config.TRAIN.BATCH_SIZE, None)\n # train_dataset = Dataset(config, 'train', None)\n # train_loader = train_dataset.DataGenerator(config.DATA_DIR, batch_size=config.TRAIN.BATCH_SIZE, shuffle = True)\n train_datasize = len(train_loader) # train_dataset.get_length()\n\n # val_dataset = Dataset(config, 'val', None)\n # val_loader = val_dataset.DataGenerator(config.DATA_DIR, batch_size=config.TRAIN.BATCH_SIZE, shuffle=False)\n\n val_loader = BatchGenerator(config, 'val', config.EVAL.BATCH_SIZE, None)\n val_datasize = len(val_loader) # val_dataset.get_length()\n\n ### TODO: add transform\n\n train(config, model, train_loader, val_loader, optimizer, log_train, log_val, last_epoch + 1, score, loss, writer,\n (train_datasize, val_datasize), criterion)\n\n model.save_weights(\"model.h5\")\n\n\ndef seed_everything():\n seed = 2019\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n tf.random.set_seed(seed)\n\n\ndef main():\n import warnings\n warnings.filterwarnings(\"ignore\")\n\n print('start training.')\n seed_everything()\n\n ymls = ['configs/fastscnn_mv3_sj_add_data_1024.yml']\n for yml in ymls:\n config = utils.config.load(yml)\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = '0,1' # config.GPU\n prepare_train_directories(config)\n pprint.pprint(config, indent=2)\n utils.config.save_config(yml, config.TRAIN_DIR + config.RECIPE)\n run(config)\n\n print('success!')\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"tensorflow.keras.metrics.BinaryAccuracy",
"tensorflow.keras.callbacks.ModelCheckpoint",
"numpy.random.seed",
"tensorflow.random.set_seed",
"tensorflow.cast",
"tensorflow.math.rsqrt",
"tensorflow.GradientTape",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.metrics.Mean",
"tensorflow.math.minimum"
]
] |
choltz95/DREAMPlace
|
[
"fd3fec54bdafddb9c80909d3768ddbae9256c118"
] |
[
"dreamplace/ops/dct/dct.py"
] |
[
"##\n# @file dct.py\n# @author Yibo Lin\n# @date Jun 2018\n#\n\nimport os \nimport sys \nimport numpy as np\nimport torch\nfrom torch.autograd import Function\nfrom torch import nn\n\nimport dreamplace.ops.dct.dct_cpp as dct_cpp\ntry: \n import dreamplace.ops.dct.dct_cuda as dct_cuda\nexcept:\n pass \n\nimport dreamplace.ops.dct.discrete_spectral_transform as discrete_spectral_transform\n\ndef dct(x, expk, algorithm):\n \"\"\"compute discrete cosine transformation, DCT II, using N-FFT or 2N-FFT \n yk = \\sum_{n=0}^{N-1} x_n cos(pi/N*n*(k+1/2))\n\n @param x sequence \n @param expk coefficients for post-processing \n @param algorithm algorithm type N | 2N\n \"\"\"\n if x.is_cuda:\n if algorithm == 'N': \n output = dct_cuda.dct(x.view([-1, x.size(-1)]), expk)\n elif algorithm == '2N': \n output = dct_cuda.dct_2N(x.view([-1, x.size(-1)]), expk)\n else:\n if algorithm == 'N': \n output = dct_cpp.dct(x.view([-1, x.size(-1)]), expk, torch.get_num_threads())\n elif algorithm == '2N':\n output = dct_cpp.dct_2N(x.view([-1, x.size(-1)]), expk, torch.get_num_threads())\n return output.view(x.size()) \n\nclass DCTFunction(Function):\n @staticmethod\n def forward(ctx, x, expk, algorithm):\n return dct(x, expk, algorithm)\n\nclass DCT(nn.Module):\n def __init__(self, expk=None, algorithm='N'):\n super(DCT, self).__init__()\n self.expk = expk\n self.algorithm = algorithm\n def forward(self, x): \n if self.expk is None or self.expk.size(-2) != x.size(-1):\n self.expk = discrete_spectral_transform.get_expk(x.size(-1), dtype=x.dtype, device=x.device)\n return DCTFunction.apply(x, self.expk, self.algorithm)\n\ndef idct(x, expk, algorithm):\n \"\"\"Compute inverse discrete cosine transformation, which is also the DCT III, using N-FFT or 2N-FFT\n yk = Re { 1/2*x0 + \\sum_{n=1}^{N-1} xn exp(j*pi/(2N)*n*(2k+1)) }\n The actual yk will be scaled by 2 to match other python implementation\n\n @param x sequence \n @param expk coefficients for pre-processing \n @param algorithm algorithm type N | 2N\n \"\"\"\n if x.is_cuda:\n if algorithm == 'N': \n output = dct_cuda.idct(x.view([-1, x.size(-1)]), expk)\n elif algorithm == '2N': \n output = dct_cuda.idct_2N(x.view([-1, x.size(-1)]), expk)\n else:\n if algorithm == 'N': \n output = dct_cpp.idct(x.view([-1, x.size(-1)]), expk, torch.get_num_threads())\n elif algorithm == '2N': \n output = dct_cpp.idct_2N(x.view([-1, x.size(-1)]), expk, torch.get_num_threads())\n return output.view(x.size()) \n\nclass IDCTFunction(Function):\n @staticmethod\n def forward(ctx, x, expk, algorithm):\n return idct(x, expk, algorithm)\n\nclass IDCT(nn.Module):\n def __init__(self, expk=None, algorithm='N'):\n super(IDCT, self).__init__()\n self.expk = expk\n self.algorithm = algorithm\n def forward(self, x): \n if self.expk is None or self.expk.size(-2) != x.size(-1):\n self.expk = discrete_spectral_transform.get_expk(x.size(-1), dtype=x.dtype, device=x.device)\n return IDCTFunction.apply(x, self.expk, self.algorithm)\n\ndef dct2(x, expk0, expk1, algorithm='N'):\n \"\"\"compute 2D discrete cosine transformation, using N-FFT or 2N-FFT\n \"\"\"\n if x.is_cuda:\n if algorithm == 'N': \n output = dct_cuda.dct2(x, expk0, expk1)\n #output = dct_cuda.dct(dct_cuda.dct(x, expk1).transpose_(dim0=-2, dim1=-1).contiguous(), expk0).transpose_(dim0=-2, dim1=-1).contiguous()\n elif algorithm == '2N':\n output = dct_cuda.dct2_2N(x, expk0, expk1)\n #output = dct_cuda.dct_2N(dct_cuda.dct_2N(x, expk1).transpose_(dim0=-2, dim1=-1).contiguous(), expk0).transpose_(dim0=-2, dim1=-1).contiguous()\n else:\n if algorithm == 'N': \n output = dct_cpp.dct2(x, expk0, expk1, torch.get_num_threads())\n #output = dct_cpp.dct(dct_cpp.dct(x, expk1, torch.get_num_threads()).transpose_(dim0=-2, dim1=-1).contiguous(), expk0, torch.get_num_threads()).transpose_(dim0=-2, dim1=-1).contiguous()\n elif algorithm == '2N':\n output = dct_cpp.dct2_2N(x, expk0, expk1, torch.get_num_threads())\n return output \n\nclass DCT2Function(Function):\n @staticmethod\n def forward(ctx, x, expk0, expk1, algorithm):\n return dct2(x, expk0, expk1, algorithm)\n\nclass DCT2(nn.Module):\n def __init__(self, expk0=None, expk1=None, algorithm='N'):\n super(DCT2, self).__init__()\n self.expk0 = expk0\n self.expk1 = expk1\n self.algorithm = algorithm\n def forward(self, x): \n if self.expk0 is None or self.expk0.size(-2) != x.size(-2):\n self.expk0 = discrete_spectral_transform.get_expk(x.size(-2), dtype=x.dtype, device=x.device)\n if self.expk1 is None or self.expk1.size(-2) != x.size(-1):\n self.expk1 = discrete_spectral_transform.get_expk(x.size(-1), dtype=x.dtype, device=x.device)\n return DCT2Function.apply(x, self.expk0, self.expk1, self.algorithm)\n\ndef idct2(x, expk0, expk1, algorithm='N'):\n \"\"\"compute 2D inverse discrete cosine transformation, using N-FFT or 2N-FFT\n \"\"\"\n if x.is_cuda:\n if algorithm == 'N': \n output = dct_cuda.idct2(x, expk0, expk1)\n #output = dct_cuda.idct(dct_cuda.idct(x, expk1).transpose_(dim0=-2, dim1=-1).contiguous(), expk0).transpose_(dim0=-2, dim1=-1).contiguous()\n elif algorithm == '2N': \n output = dct_cuda.idct2_2N(x, expk0, expk1)\n else:\n if algorithm == 'N': \n output = dct_cpp.idct2(x, expk0, expk1, torch.get_num_threads())\n #output = dct_cpp.idct(dct_cpp.idct(x, expk1, torch.get_num_threads()).transpose_(dim0=-2, dim1=-1).contiguous(), expk0, torch.get_num_threads()).transpose_(dim0=-2, dim1=-1).contiguous()\n elif algorithm == '2N': \n output = dct_cpp.idct2_2N(x, expk0, expk1, torch.get_num_threads())\n return output \n\nclass IDCT2Function(Function):\n @staticmethod\n def forward(ctx, x, expk0, expk1, algorithm):\n return idct2(x, expk0, expk1, algorithm)\n\nclass IDCT2(nn.Module):\n def __init__(self, expk0=None, expk1=None, algorithm='N'):\n super(IDCT2, self).__init__()\n self.expk0 = expk0\n self.expk1 = expk1\n self.algorithm = algorithm\n def forward(self, x): \n if self.expk0 is None or self.expk0.size(-2) != x.size(-2):\n self.expk0 = discrete_spectral_transform.get_expk(x.size(-2), dtype=x.dtype, device=x.device)\n if self.expk1 is None or self.expk1.size(-2) != x.size(-1):\n self.expk1 = discrete_spectral_transform.get_expk(x.size(-1), dtype=x.dtype, device=x.device)\n return IDCT2Function.apply(x, self.expk0, self.expk1, self.algorithm)\n\ndef dst(x, expk):\n \"\"\"compute discrete sine transformation\n yk = \\sum_{n=0}^{N-1} x_n cos(pi/N*(n+1/2)*(k+1))\n \"\"\"\n if x.is_cuda:\n output = dct_cuda.dst(x.view([-1, x.size(-1)]), expk)\n else:\n output = dct_cpp.dst(x.view([-1, x.size(-1)]), expk, torch.get_num_threads())\n return output.view(x.size()) \n\nclass DSTFunction(Function):\n @staticmethod\n def forward(ctx, x, expk):\n return dst(x, expk)\n\nclass DST(nn.Module):\n def __init__(self, expk=None):\n super(DST, self).__init__()\n self.expk = expk\n def forward(self, x): \n if self.expk is None or self.expk.size(-2) != x.size(-1):\n self.expk = discrete_spectral_transform.get_expk(x.size(-1), dtype=x.dtype, device=x.device)\n return DSTFunction.apply(x, self.expk)\n\ndef idst(x, expk):\n \"\"\"Compute inverse discrete sine transformation, which is also the DST III\n yk = Im { (-1)^k*x_{N-1}/2 + \\sum_{n=0}^{N-2} xn exp(j*pi/(2N)*(n+1)*(2k+1)) }\n The actual yk will be scaled by 2 to match other python implementation\n \"\"\"\n if x.is_cuda:\n output = dct_cuda.idst(x.view([-1, x.size(-1)]), expk)\n else:\n output = dct_cpp.idst(x.view([-1, x.size(-1)]), expk, torch.get_num_threads())\n return output.view(x.size()) \n\nclass IDSTFunction(Function):\n @staticmethod\n def forward(ctx, x, expk):\n return idst(x, expk)\n\nclass IDST(nn.Module):\n def __init__(self, expk=None):\n super(IDST, self).__init__()\n self.expk = expk\n def forward(self, x): \n if self.expk is None or self.expk.size(-2) != x.size(-1):\n self.expk = discrete_spectral_transform.get_expk(x.size(-1), dtype=x.dtype, device=x.device)\n return IDSTFunction.apply(x, self.expk)\n\ndef idxct(x, expk):\n \"\"\"compute inverse discrete cosine transformation\n This is different from ordinary formulation for IDCT III\n yk = Re { \\sum_{n=0}^{N-1} xn exp(j*pi/(2N)*n*(2k+1)) }\n \"\"\"\n if x.is_cuda:\n output = dct_cuda.idxct(x.view([-1, x.size(-1)]), expk)\n else:\n output = dct_cpp.idxct(x.view([-1, x.size(-1)]), expk, torch.get_num_threads())\n #output = IDCTFunction.forward(ctx, x, expk)\n #output.add_(x[..., 0].unsqueeze(-1)).mul_(0.5)\n ##output.mul_(0.5).add_(x[..., 0].unsqueeze(-1).mul(0.5))\n return output.view(x.size()) \n\nclass IDXCTFunction(Function):\n @staticmethod\n def forward(ctx, x, expk):\n return idxct(x, expk)\n\nclass IDXCT(nn.Module):\n def __init__(self, expk=None):\n super(IDXCT, self).__init__()\n self.expk = expk\n def forward(self, x): \n if self.expk is None or self.expk.size(-2) != x.size(-1):\n self.expk = discrete_spectral_transform.get_expk(x.size(-1), dtype=x.dtype, device=x.device)\n return IDXCTFunction.apply(x, self.expk)\n\ndef idxst(x, expk):\n \"\"\"compute inverse discrete sine transformation\n This is different from ordinary formulation for IDCT III\n yk = Im { \\sum_{n=0}^{N-1} xn exp(j*pi/(2N)*n*(2k+1)) }\n \"\"\"\n if x.is_cuda:\n output = dct_cuda.idxst(x.view([-1, x.size(-1)]), expk)\n else:\n output = dct_cpp.idxst(x.view([-1, x.size(-1)]), expk, torch.get_num_threads())\n return output.view(x.size()) \n\nclass IDXSTFunction(Function):\n @staticmethod\n def forward(ctx, x, expk):\n return idxst(x, expk)\n\nclass IDXST(nn.Module):\n def __init__(self, expk=None):\n super(IDXST, self).__init__()\n self.expk = expk\n def forward(self, x): \n if self.expk is None or self.expk.size(-2) != x.size(-1):\n self.expk = discrete_spectral_transform.get_expk(x.size(-1), dtype=x.dtype, device=x.device)\n return IDXSTFunction.apply(x, self.expk)\n\ndef idcct2(x, expk0, expk1):\n \"\"\"compute inverse discrete cosine-sine transformation\n This is equivalent to idcct(idcct(x)^T)^T\n \"\"\"\n if x.is_cuda:\n output = dct_cuda.idcct2(x.view([-1, x.size(-1)]), expk0, expk1)\n else:\n output = dct_cpp.idcct2(x.view([-1, x.size(-1)]), expk0, expk1, torch.get_num_threads())\n return output.view(x.size()) \n\nclass IDCCT2Function(Function):\n @staticmethod\n def forward(ctx, x, expk0, expk1):\n return idcct2(x, expk0, expk1)\n\nclass IDCCT2(nn.Module):\n def __init__(self, expk0=None, expk1=None):\n super(IDCCT2, self).__init__()\n self.expk0 = expk0\n self.expk1 = expk1\n def forward(self, x): \n if self.expk0 is None or self.expk0.size(-2) != x.size(-2):\n self.expk0 = discrete_spectral_transform.get_expk(x.size(-2), dtype=x.dtype, device=x.device)\n if self.expk1 is None or self.expk1.size(-2) != x.size(-1):\n self.expk1 = discrete_spectral_transform.get_expk(x.size(-1), dtype=x.dtype, device=x.device)\n return IDCCT2Function.apply(x, self.expk0, self.expk1)\n\ndef idcst2(x, expk0, expk1):\n \"\"\"compute inverse discrete cosine-sine transformation\n This is equivalent to idxct(idxst(x)^T)^T\n \"\"\"\n if x.is_cuda:\n output = dct_cuda.idcst2(x.view([-1, x.size(-1)]), expk0, expk1)\n else:\n output = dct_cpp.idcst2(x.view([-1, x.size(-1)]), expk0, expk1, torch.get_num_threads())\n return output.view(x.size()) \n\nclass IDCST2Function(Function):\n @staticmethod\n def forward(ctx, x, expk0, expk1):\n return idcst2(x, expk0, expk1)\n\nclass IDCST2(nn.Module):\n def __init__(self, expk0=None, expk1=None):\n super(IDCST2, self).__init__()\n self.expk0 = expk0\n self.expk1 = expk1\n def forward(self, x): \n if self.expk0 is None or self.expk0.size(-2) != x.size(-2):\n self.expk0 = discrete_spectral_transform.get_expk(x.size(-2), dtype=x.dtype, device=x.device)\n if self.expk1 is None or self.expk1.size(-2) != x.size(-1):\n self.expk1 = discrete_spectral_transform.get_expk(x.size(-1), dtype=x.dtype, device=x.device)\n return IDCST2Function.apply(x, self.expk0, self.expk1)\n\ndef idsct2(x, expk0, expk1):\n \"\"\"compute inverse discrete cosine-sine transformation\n This is equivalent to idxst(idxct(x)^T)^T\n \"\"\"\n if x.is_cuda:\n output = dct_cuda.idsct2(x.view([-1, x.size(-1)]), expk0, expk1)\n else:\n output = dct_cpp.idsct2(x.view([-1, x.size(-1)]), expk0, expk1, torch.get_num_threads())\n return output.view(x.size()) \n\nclass IDSCT2Function(Function):\n @staticmethod\n def forward(ctx, x, expk0, expk1):\n return idsct2(x, expk0, expk1)\n\nclass IDSCT2(nn.Module):\n def __init__(self, expk0=None, expk1=None):\n super(IDSCT2, self).__init__()\n self.expk0 = expk0\n self.expk1 = expk1\n def forward(self, x): \n if self.expk0 is None or self.expk0.size(-2) != x.size(-2):\n self.expk0 = discrete_spectral_transform.get_expk(x.size(-2), dtype=x.dtype, device=x.device)\n if self.expk1 is None or self.expk1.size(-2) != x.size(-1):\n self.expk1 = discrete_spectral_transform.get_expk(x.size(-1), dtype=x.dtype, device=x.device)\n return IDSCT2Function.apply(x, self.expk0, self.expk1)\n\ndef idct_idxst(x, expk0, expk1):\n \"\"\"compute inverse discrete cosine-sine transformation\n This is equivalent to idct(idxst(x)^T)^T\n \"\"\"\n if x.is_cuda:\n output = dct_cuda.idct_idxst(x.view([-1, x.size(-1)]), expk0, expk1)\n else:\n output = dct_cpp.idct_idxst(x.view([-1, x.size(-1)]), expk0, expk1, torch.get_num_threads())\n return output.view(x.size()) \n\nclass IDCT_IDXSTFunction(Function):\n @staticmethod\n def forward(ctx, x, expk0, expk1):\n return idct_idxst(x, expk0, expk1)\n\nclass IDCT_IDXST(nn.Module):\n def __init__(self, expk0=None, expk1=None):\n super(IDCT_IDXST, self).__init__()\n self.expk0 = expk0\n self.expk1 = expk1\n def forward(self, x): \n if self.expk0 is None or self.expk0.size(-2) != x.size(-2):\n self.expk0 = discrete_spectral_transform.get_expk(x.size(-2), dtype=x.dtype, device=x.device)\n if self.expk1 is None or self.expk1.size(-2) != x.size(-1):\n self.expk1 = discrete_spectral_transform.get_expk(x.size(-1), dtype=x.dtype, device=x.device)\n return IDCT_IDXSTFunction.apply(x, self.expk0, self.expk1)\n\ndef idxst_idct(x, expk0, expk1):\n \"\"\"compute inverse discrete cosine-sine transformation\n This is equivalent to idxst(idct(x)^T)^T\n \"\"\"\n if x.is_cuda:\n output = dct_cuda.idxst_idct(x.view([-1, x.size(-1)]), expk0, expk1)\n else:\n output = dct_cpp.idxst_idct(x.view([-1, x.size(-1)]), expk0, expk1, torch.get_num_threads())\n return output.view(x.size()) \n\nclass IDXST_IDCTFunction(Function):\n @staticmethod\n def forward(ctx, x, expk0, expk1):\n return idxst_idct(x, expk0, expk1)\n\n\nclass IDXST_IDCT(nn.Module):\n def __init__(self, expk0=None, expk1=None):\n super(IDXST_IDCT, self).__init__()\n self.expk0 = expk0\n self.expk1 = expk1\n def forward(self, x): \n if self.expk0 is None or self.expk0.size(-2) != x.size(-2):\n self.expk0 = discrete_spectral_transform.get_expk(x.size(-2), dtype=x.dtype, device=x.device)\n if self.expk1 is None or self.expk1.size(-2) != x.size(-1):\n self.expk1 = discrete_spectral_transform.get_expk(x.size(-1), dtype=x.dtype, device=x.device)\n return IDXST_IDCTFunction.apply(x, self.expk0, self.expk1)\n"
] |
[
[
"torch.get_num_threads"
]
] |
moorea1/DS595CS525-RL-Projects
|
[
"96931df2ca61dc3ce33393cdcbaa20d414081ea7"
] |
[
"Project2/Project2-2/td.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport numpy as np\nimport random\nfrom collections import defaultdict\n#-------------------------------------------------------------------------\n'''\n Temporal Difference\n In this problem, you will implememnt an AI player for cliffwalking.\n The main goal of this problem is to get familar with temporal diference algorithm.\n You could test the correctness of your code \n by typing 'nosetests -v td_test.py' in the terminal.\n \n You don't have to follow the comments to write your code. They are provided\n as hints in case you need. \n'''\n#-------------------------------------------------------------------------\n\ndef epsilon_greedy(Q, state, nA, epsilon = 0.1):\n \"\"\"Selects epsilon-greedy action for supplied state.\n \n Parameters:\n -----------\n Q: dict()\n A dictionary that maps from state -> action-values,\n where Q[s][a] is the estimated action value corresponding to state s and action a. \n state: int\n current state\n nA: int\n Number of actions in the environment\n epsilon: float\n The probability to select a random action, range between 0 and 1\n \n Returns:\n --------\n action: int\n action based current state\n Hints:\n You can use the function from project2-1\n \"\"\"\n ############################\n # YOUR IMPLEMENTATION HERE #\n\n\n\n\n\n ############################\n return action\n\ndef sarsa(env, n_episodes, gamma=1.0, alpha=0.5, epsilon=0.1):\n \"\"\"On-policy TD control. Find an optimal epsilon-greedy policy.\n \n Parameters:\n -----------\n env: function\n OpenAI gym environment\n n_episodes: int\n Number of episodes to sample\n gamma: float\n Gamma discount factor, range between 0 and 1\n alpha: float\n step size, range between 0 and 1\n epsilon: float\n The probability to select a random action, range between 0 and 1\n Returns:\n --------\n Q: dict()\n A dictionary that maps from state -> action-values,\n where Q[s][a] is the estimated action value corresponding to state s and action a. \n Hints:\n -----\n You could consider decaying epsilon, i.e. epsilon = 0.99*epsilon during each episode.\n \"\"\"\n \n # a nested dictionary that maps state -> (action -> action-value)\n # e.g. Q[state] = np.darrary(nA)\n Q = defaultdict(lambda: np.zeros(env.action_space.n))\n \n ############################\n # YOUR IMPLEMENTATION HERE #\n \n # loop n_episodes\n\n # define decaying epsilon\n\n\n # initialize the environment \n\n \n # get an action from policy\n\n # loop for each step of episode\n\n # return a new state, reward and done\n\n # get next action\n\n \n # TD update\n # td_target\n\n # td_error\n\n # new Q\n\n \n # update state\n\n # update action\n\n ############################\n return Q\n\ndef q_learning(env, n_episodes, gamma=1.0, alpha=0.5, epsilon=0.1):\n \"\"\"Off-policy TD control. Find an optimal epsilon-greedy policy.\n \n Parameters:\n -----------\n env: function\n OpenAI gym environment\n n_episodes: int\n Number of episodes to sample\n gamma: float\n Gamma discount factor, range between 0 and 1\n alpha: float\n step size, range between 0 and 1\n epsilon: float\n The probability to select a random action, range between 0 and 1\n Returns:\n --------\n Q: dict()\n A dictionary that maps from state -> action-values,\n where Q[s][a] is the estimated action value corresponding to state s and action a. \n \"\"\"\n # a nested dictionary that maps state -> (action -> action-value)\n # e.g. Q[state] = np.darrary(nA)\n Q = defaultdict(lambda: np.zeros(env.action_space.n))\n \n ############################\n # YOUR IMPLEMENTATION HERE #\n \n # loop n_episodes\n\n # initialize the environment \n\n \n # loop for each step of episode\n\n # get an action from policy\n \n # return a new state, reward and done\n \n # TD update\n # td_target with best Q\n\n # td_error\n\n # new Q\n \n # update state\n\n ############################\n return Q\n"
] |
[
[
"numpy.zeros"
]
] |
wenyudu/SDLM
|
[
"7446445e4e932bf9c645b3c8be1bf7068ad8af5b"
] |
[
"ON_LSTM.py"
] |
[
"import torch.nn.functional as F\nimport torch.nn as nn\nimport torch\nimport numpy\n\nfrom locked_dropout import LockedDropout\n\n\nclass LayerNorm(nn.Module):\n\n def __init__(self, features, eps=1e-6):\n super(LayerNorm, self).__init__()\n self.gamma = nn.Parameter(torch.ones(features))\n self.beta = nn.Parameter(torch.zeros(features))\n self.eps = eps\n\n def forward(self, x):\n mean = x.mean(-1, keepdim=True)\n std = x.std(-1, keepdim=True)\n return self.gamma * (x - mean) / (std + self.eps) + self.beta\n\n\nclass LinearDropConnect(nn.Linear):\n def __init__(self, in_features, out_features, bias=True, dropout=0.):\n super(LinearDropConnect, self).__init__(\n in_features=in_features,\n out_features=out_features,\n bias=bias\n )\n self.dropout = dropout\n\n def sample_mask(self):\n if self.dropout == 0.:\n self._weight = self.weight\n else:\n mask = self.weight.new_empty(\n self.weight.size(),\n dtype=torch.uint8\n )\n mask.bernoulli_(self.dropout)\n self._weight = self.weight.masked_fill(mask.bool(), 0.)\n\n def forward(self, input, sample_mask=False):\n if self.training:\n if sample_mask:\n self.sample_mask()\n return F.linear(input, self._weight, self.bias)\n else:\n return F.linear(input, self.weight * (1 - self.dropout),\n self.bias)\n\n\ndef cumsoftmax(x, dim=-1):\n return torch.cumsum(F.softmax(x, dim=dim), dim=dim)\n\n\ndef softmax(x, dim=-1):\n return F.softmax(x, dim=dim)\n\ndef cum(x, dim=-1):\n return torch.cumsum(x, dim=dim)\n\n\n\nclass ONLSTMCell(nn.Module):\n\n def __init__(self, input_size, hidden_size, chunk_size, wds='no', dropconnect=0.):\n super(ONLSTMCell, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.chunk_size = chunk_size\n self.n_chunk = int(hidden_size / chunk_size)\n\n self.ih = nn.Sequential(\n nn.Linear(input_size, 4 * hidden_size + self.n_chunk * 2, bias=True),\n # LayerNorm(3 * hidden_size)\n )\n self.hh = LinearDropConnect(hidden_size, hidden_size * 4 + self.n_chunk * 2, bias=True, dropout=dropconnect)\n\n # self.c_norm = LayerNorm(hidden_size)\n\n # self.fwh = LinearDropConnect(self.n_chunk, self.n_chunk, bias=True, dropout=dropconnect)\n\n # self.drop_weight_modules = [self.hh,self.fwh]\n\n self.wds = wds\n if self.wds != 'no':\n self.fwh = LinearDropConnect(self.n_chunk, self.n_chunk, bias=True, dropout=dropconnect)\n self.drop_weight_modules = [self.hh, self.fwh]\n else:\n self.drop_weight_modules = [self.hh]\n\n # self.wds = wds\n # if self.wds != 'no':\n # self.weighted_sd_vector = nn.Parameter(torch.zeros(self.n_chunk))\n\n def forward(self, input, hidden,\n transformed_input=None):\n hx, cx = hidden\n\n if transformed_input is None:\n transformed_input = self.ih(input)\n\n gates = transformed_input + self.hh(hx)\n cingate_raw, cforgetgate_raw = gates[:, :self.n_chunk * 2].chunk(2, 1)\n outgate, cell, ingate, forgetgate = gates[:, self.n_chunk * 2:].view(-1, self.n_chunk * 4,\n self.chunk_size).chunk(4, 1)\n cingate = 1. - cumsoftmax(cingate_raw)\n distance_cin = cingate.sum(dim=-1) / self.n_chunk\n\n cforgetgate = cumsoftmax(cforgetgate_raw)\n distance_cforget = 1. - cforgetgate.sum(dim=-1) / self.n_chunk\n\n if self.wds != 'no':\n c_w_forgetgate = cumsoftmax(self.fwh(cforgetgate_raw))\n distance_w_cforget = 1. - c_w_forgetgate.sum(dim=-1) / self.n_chunk\n else:\n distance_w_cforget = distance_cforget\n\n cingate = cingate[:, :, None]\n cforgetgate = cforgetgate[:, :, None]\n\n ingate = torch.sigmoid(ingate)\n forgetgate = torch.sigmoid(forgetgate)\n cell = torch.tanh(cell)\n outgate = torch.sigmoid(outgate)\n\n # cy = cforgetgate * forgetgate * cx + cingate * ingate * cell\n\n overlap = cforgetgate * cingate\n forgetgate = forgetgate * overlap + (cforgetgate - overlap)\n ingate = ingate * overlap + (cingate - overlap)\n cy = forgetgate * cx + ingate * cell\n\n # hy = outgate * F.tanh(self.c_norm(cy))\n hy = outgate * torch.tanh(cy)\n\n # self.last = [transformed_input, cforgetgate, weight, distance_cforget,hy,cy]\n # if self.wds != 'no':\n # # return hy.view(-1, self.hidden_size), cy ,(origin_distance_cforget, distance_cforget, distance_cin,self.weighted_sd_vector)\n # else:\n return hy.view(-1, self.hidden_size), cy ,(distance_cforget, distance_w_cforget, distance_cin,distance_cin)\n\n\n def init_hidden(self, bsz):\n weight = next(self.parameters()).data\n return (weight.new(bsz, self.hidden_size).zero_(),\n weight.new(bsz, self.n_chunk, self.chunk_size).zero_())\n\n def sample_masks(self):\n for m in self.drop_weight_modules:\n m.sample_mask()\n\n\nclass ONLSTMStack(nn.Module):\n def __init__(self, layer_sizes, chunk_size, l4d=0,wds='no', dropout=0., dropconnect=0.):\n super(ONLSTMStack, self).__init__()\n self.cells = nn.ModuleList([ONLSTMCell(layer_sizes[i],\n layer_sizes[i + 1],\n chunk_size,\n wds=wds,\n dropconnect=dropconnect)\n for i in range(len(layer_sizes) - 1)])\n self.lockdrop = LockedDropout()\n self.dropout = dropout\n self.sizes = layer_sizes\n self.l4d = l4d\n\n def init_hidden(self, bsz):\n return [c.init_hidden(bsz) for c in self.cells]\n\n def forward(self, input, hidden):\n length, batch_size, _ = input.size()\n\n if self.training:\n for c in self.cells:\n c.sample_masks()\n\n prev_state = list(hidden)\n prev_layer = input\n\n raw_outputs = []\n outputs = []\n distances_forget = []\n origin_distances_forget = []\n distances_in = []\n weighted_sd_vector=[]\n\n for l in range(len(self.cells)):\n curr_layer = [None] * length\n dist = [None] * length\n t_input = self.cells[l].ih(prev_layer)\n\n for t in range(length):\n hidden, cell, d = self.cells[l](\n None, prev_state[l],\n transformed_input=t_input[t]\n )\n prev_state[l] = hidden, cell # overwritten every timestep\n curr_layer[t] = hidden\n dist[t] = d\n\n prev_layer = torch.stack(curr_layer)\n origin_dist_cforget, dist_cforget, dist_cin, wsd_vector = zip(*dist)\n origin_dist_layer_cforget = torch.stack(origin_dist_cforget)\n dist_layer_cforget = torch.stack(dist_cforget)\n dist_layer_cin = torch.stack(dist_cin)\n wsd_layer_vector = torch.stack(wsd_vector)\n raw_outputs.append(prev_layer)\n if l < len(self.cells) - 1:\n prev_layer = self.lockdrop(prev_layer, self.dropout)\n outputs.append(prev_layer)\n distances_forget.append(dist_layer_cforget)\n origin_distances_forget.append(origin_dist_layer_cforget)\n distances_in.append(dist_layer_cin)\n if l == self.l4d:\n weighted_sd_vector.append(wsd_layer_vector)\n output = prev_layer\n # print(self.cells[2].weighted_vector[0])\n\n return output, prev_state, raw_outputs, outputs, (torch.stack(origin_distances_forget),torch.stack(distances_forget), torch.stack(distances_in), torch.stack(weighted_sd_vector))\n\n\nif __name__ == \"__main__\":\n x = torch.Tensor(10, 10, 10)\n x.data.normal_()\n lstm = ONLSTMStack([10, 10, 10], chunk_size=10)\n print(lstm(x, lstm.init_hidden(10))[1])\n"
] |
[
[
"torch.sigmoid",
"torch.nn.functional.softmax",
"torch.ones",
"torch.Tensor",
"torch.zeros",
"torch.tanh",
"torch.nn.Linear",
"torch.stack",
"torch.cumsum",
"torch.nn.functional.linear"
]
] |
Mozartich/triqler
|
[
"b958958342f6c78a218aefa605955ec3fb520e28"
] |
[
"triqler/triqler.py"
] |
[
"from __future__ import print_function\n\n\"\"\"triqler.triqler: provides entry point main().\"\"\"\n\n__version__ = \"0.4.0\"\n__copyright__ = '''Copyright (c) 2018-2020 Matthew The. All rights reserved.\nWritten by Matthew The (matthew.the@scilifelab.se) in the\nSchool of Engineering Sciences in Chemistry, Biotechnology and Health at the \nRoyal Institute of Technology in Stockholm.'''\n\nimport sys\nimport os\nimport collections\nimport copy\nimport csv\nimport multiprocessing\nimport warnings\n\nimport numpy as np\n\nfrom . import parsers\nfrom . import qvality\nfrom . import hyperparameters\nfrom . import pgm\nfrom . import diff_exp\n\ndef main():\n print('Triqler version %s\\n%s' % (__version__, __copyright__))\n print('Issued command:', os.path.basename(__file__) + \" \" + \" \".join(map(str, sys.argv[1:])))\n \n args, params = parseArgs()\n \n params['warningFilter'] = \"ignore\"\n with warnings.catch_warnings():\n warnings.simplefilter(params['warningFilter'])\n runTriqler(params, args.in_file, args.out_file)\n\ndef parseArgs():\n import argparse\n apars = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n apars.add_argument('in_file', default=None, metavar = \"IN_FILE\",\n help='''List of PSMs with abundances (not log transformed!) \n and search engine score. See README for a detailed \n description of the columns.\n ''')\n \n apars.add_argument('--out_file', default = \"proteins.tsv\", metavar='OUT', \n help='''Path to output file (writing in TSV format). \n N.B. if more than 2 treatment groups are present, \n suffixes will be added before the file extension.\n ''')\n \n apars.add_argument('--fold_change_eval', type=float, default=1.0, metavar='F',\n help='log2 fold change evaluation threshold.')\n \n apars.add_argument('--decoy_pattern', default = \"decoy_\", metavar='P', \n help='Prefix for decoy proteins.')\n \n apars.add_argument('--min_samples', type=int, default=2, metavar='N', \n help='Minimum number of samples a peptide needed to be quantified in.')\n # Peptides quantified in less than the minimum number will be discarded\n \n apars.add_argument('--num_threads', type=int, default=multiprocessing.cpu_count(), metavar='N', \n help='Number of threads, by default this is equal to the number of CPU cores available on the device.')\n \n apars.add_argument('--ttest',\n help='Use t-test for evaluating differential expression instead of posterior probabilities.',\n action='store_true')\n \n apars.add_argument('--write_spectrum_quants',\n help='Write quantifications for consensus spectra. Only works if consensus spectrum index are given in input.',\n action='store_true')\n \n apars.add_argument('--write_protein_posteriors', default = '', metavar='P_OUT',\n help='Write raw data of protein posteriors to the specified file in TSV format.')\n \n apars.add_argument('--write_group_posteriors', default = '', metavar='G_OUT',\n help='Write raw data of treatment group posteriors to the specified file in TSV format.')\n \n apars.add_argument('--write_fold_change_posteriors', default = '', metavar='F_OUT',\n help='Write raw data of fold change posteriors to the specified file in TSV format.')\n \n # ------------------------------------------------\n args = apars.parse_args()\n \n params = dict()\n params['warningFilter'] = \"default\"\n params['foldChangeEval'] = args.fold_change_eval\n params['t-test'] = args.ttest\n params['minSamples'] = args.min_samples\n params['decoyPattern'] = args.decoy_pattern\n params['numThreads'] = args.num_threads\n params['writeSpectrumQuants'] = args.write_spectrum_quants\n params['proteinPosteriorsOutput'] = args.write_protein_posteriors\n params['groupPosteriorsOutput'] = args.write_group_posteriors\n params['foldChangePosteriorsOutput'] = args.write_fold_change_posteriors\n params['returnPosteriors'] = len(params['proteinPosteriorsOutput']) > 0 or len(params['groupPosteriorsOutput']) > 0 or len(params['foldChangePosteriorsOutput']) > 0\n \n if params['minSamples'] < 2:\n sys.exit(\"ERROR: --min_samples should be >= 2\")\n \n return args, params\n \ndef runTriqler(params, triqlerInputFile, triqlerOutputFile): \n from timeit import default_timer as timer\n\n start = timer()\n\n if not os.path.isfile(triqlerInputFile):\n sys.exit(\"Could not locate input file %s. Check if the path is correct.\" % triqlerInputFile)\n \n params['hasLinkPEPs'] = parsers.hasLinkPEPs(triqlerInputFile)\n if triqlerInputFile.endswith(\".pqr.tsv\"):\n params['fileList'], params['groups'], params['groupLabels'], peptQuantRows = parsers.parsePeptideQuantFile(triqlerInputFile)\n else:\n peptQuantRowFile = triqlerInputFile + \".pqr.tsv\"\n peptQuantRows = convertTriqlerInputToPeptQuantRows(triqlerInputFile, peptQuantRowFile, params)\n \n qvalMethod = 'pvalues' if params['t-test'] else 'avg_pep'\n \n selectComparisonBayesTmp = lambda proteinOutputRows, comparisonKey : selectComparisonBayes(proteinOutputRows, comparisonKey, params['t-test'])\n diff_exp.doDiffExp(params, peptQuantRows, triqlerOutputFile, doPickedProteinQuantification, selectComparisonBayesTmp, qvalMethod = qvalMethod)\n\n end = timer()\n print(\"Triqler execution took\", end - start, \"seconds wall clock time\")\n\ndef convertTriqlerInputToPeptQuantRows(triqlerInputFile, peptQuantRowFile, params):\n peptQuantRowMap, getPEPFromScore, params['fileList'], params['groupLabels'], params['groups'] = groupTriqlerRowsByFeatureGroup(triqlerInputFile, params['decoyPattern'])\n \n if params['hasLinkPEPs'] and params['writeSpectrumQuants']:\n _, spectrumQuantRows, intensityDiv = _selectBestFeaturesPerRunAndPeptide(\n peptQuantRowMap, getPEPFromScore, params, \n groupingKey = lambda x : x.spectrumId)\n spectrumQuantRows = _divideIntensities(spectrumQuantRows, intensityDiv)\n spectrumQuantRows = _updateIdentPEPs(spectrumQuantRows, params['decoyPattern'], params['hasLinkPEPs'])\n \n specQuantRowFile = triqlerInputFile + \".sqr.tsv\"\n print(\"Writing spectrum quant rows to file:\", specQuantRowFile)\n parsers.printPeptideQuantRows(specQuantRowFile, parsers.getRunIds(params), spectrumQuantRows)\n \n spectrumToFeatureMatch, peptideQuantRows, intensityDiv = _selectBestFeaturesPerRunAndPeptide(peptQuantRowMap, getPEPFromScore, params)\n peptideQuantRows = _selectBestPeptideQuantRowPerFeatureGroup(spectrumToFeatureMatch, peptideQuantRows)\n peptideQuantRows = _divideIntensities(peptideQuantRows, intensityDiv)\n peptideQuantRows = _updateIdentPEPs(peptideQuantRows, params['decoyPattern'], params['hasLinkPEPs'])\n \n print(\"Writing peptide quant rows to file:\", peptQuantRowFile)\n parsers.printPeptideQuantRows(peptQuantRowFile, parsers.getRunIds(params), peptideQuantRows)\n \n return peptideQuantRows\n\ndef groupTriqlerRowsByFeatureGroup(triqlerInputFile, decoyPattern):\n print(\"Parsing triqler input file\")\n \n peptQuantRowMap = collections.defaultdict(list)\n seenSpectra = set()\n targetScores, decoyScores = list(), list()\n runCondPairs = list()\n for i, trqRow in enumerate(parsers.parseTriqlerInputFile(triqlerInputFile)):\n if i % 1000000 == 0:\n print(\" Reading row\", i)\n \n peptQuantRowMap[trqRow.featureClusterId].append(trqRow)\n if (trqRow.run, trqRow.condition) not in runCondPairs:\n runCondPairs.append((trqRow.run, trqRow.condition))\n \n if not np.isnan(trqRow.searchScore) and trqRow.spectrumId not in seenSpectra:\n if _isDecoy(trqRow.proteins, decoyPattern):\n decoyScores.append(trqRow.searchScore)\n else:\n targetScores.append(trqRow.searchScore)\n seenSpectra.add(trqRow.spectrumId)\n \n fileList, groupLabels, groups = _getFilesAndGroups(runCondPairs)\n \n print(\"Calculating identification PEPs\")\n getPEPFromScore = qvality.getPEPFromScoreLambda(targetScores, decoyScores)\n \n return peptQuantRowMap, getPEPFromScore, fileList, groupLabels, groups\n\ndef _selectBestFeaturesPerRunAndPeptide(peptQuantRowMap, getPEPFromScore, params, groupingKey = lambda x : x.peptide):\n print(\"Selecting best feature per run and spectrum\")\n numRuns = len(params['fileList'])\n \n minIntensity = 1e100\n noSpectrum = 0\n peptideQuantRows = list()\n spectrumToFeatureMatch = dict() # stores the best peptideQuantRow per (protein, spectrumIdx)-pair\n for featureGroupIdx, trqRows in peptQuantRowMap.items():\n if featureGroupIdx % 100000 == 0:\n print(\" featureGroupIdx:\", featureGroupIdx)\n \n bestFeaturesPerRun = _selectBestFeaturesPerFeatureGroup(trqRows, \n getPEPFromScore, params['fileList'], groupingKey, numRuns)\n \n for gKey in bestFeaturesPerRun:\n numRunsPresent = sum(1 for x in bestFeaturesPerRun[gKey] if x[0] < 1.01)\n if numRunsPresent < params['minSamples']:\n continue\n \n pqr = _convertFeatureGroupToPeptideQuantRow(bestFeaturesPerRun[gKey], \n getPEPFromScore, featureGroupIdx, numRuns)\n \n # some feature clusters might not have a spectrum associated with them\n if pqr.spectrum == 0:\n noSpectrum += 1\n pqr = pqr._replace(spectrum = -100 * noSpectrum)\n \n peptideQuantRows.append(pqr)\n \n minIntensity = min(minIntensity, min([x for x in pqr.quant if x > 0.0]))\n \n # combinedPEP field temporarily contains SVM score\n identPEP = getPEPFromScore(pqr.combinedPEP)\n peptLinkErrorProb = 1.0 - np.prod([1.0 - x for x in pqr.linkPEP if x < 1.01])\n combinedPEP = _combinePEPs(identPEP, peptLinkErrorProb)\n \n # multiple featureGroups can be associated with the same consensus spectrum\n # when two or more analytes match closely in prec m/z and retention time;\n # choose the best featureGroup per (peptide, spectrum)-pair based on combinedPEP\n # note that chimeric spectra can still be associated with multiple peptideQuantRows, \n # as the protein is included in the key\n key = (\",\".join(pqr.protein), pqr.spectrum / 100)\n if combinedPEP < spectrumToFeatureMatch.get(key, (-1, -1, 1.01))[2]:\n spectrumToFeatureMatch[key] = (pqr.spectrum, featureGroupIdx, combinedPEP)\n \n # divide intensities by a power of 10 for increased readability of peptide \n # output file, make sure that the lowest intensity retains two significant \n # digits after printing with two digits after the decimal point\n intensityDiv = np.power(10, np.floor(np.log10(minIntensity))+1)\n \n return spectrumToFeatureMatch, peptideQuantRows, intensityDiv\n\ndef _selectBestFeaturesPerFeatureGroup(trqRows, getPEPFromScore, fileList, groupingKey, numRuns):\n # groupingKey => array([linkPEP, triqlerInputRow])\n bestFeaturesPerRun = collections.defaultdict(lambda : [(1.01, None)]*numRuns)\n \n for trqRow in trqRows:\n fileIdx = fileList.index(trqRow.run)\n gKey = groupingKey(trqRow) \n bestPEPForRun, bestTrqRowForRun = bestFeaturesPerRun[gKey][fileIdx]\n\n combinedPEP = _combinePEPs(trqRow.linkPEP, getPEPFromScore(trqRow.searchScore))\n \n samePEPhigherIntensity = (combinedPEP == bestPEPForRun and \n trqRow.intensity > bestTrqRowForRun.intensity)\n if combinedPEP < bestPEPForRun or samePEPhigherIntensity:\n bestFeaturesPerRun[gKey][fileIdx] = (combinedPEP, trqRow)\n \n return bestFeaturesPerRun\n\ndef _convertFeatureGroupToPeptideQuantRow(bestFeaturesPerRun, getPEPFromScore, \n featureGroupIdx, numRuns):\n intensities, linkPEPs, identPEPs = [0.0]*numRuns, [1.01]*numRuns, [1.01]*numRuns\n first = True\n svmScore = -1e9\n for fileIdx, (_, trqRow) in enumerate(bestFeaturesPerRun):\n if trqRow == None:\n continue\n \n if first:\n charge, proteins, first = trqRow.charge, trqRow.proteins, False\n \n if trqRow.searchScore > svmScore or np.isnan(trqRow.searchScore):\n svmScore, spectrumId, peptide = trqRow.searchScore, trqRow.spectrumId, trqRow.peptide\n \n intensities[fileIdx] = trqRow.intensity\n linkPEPs[fileIdx] = trqRow.linkPEP #_combinePEPs(linkPEP, identPEP)\n identPEPs[fileIdx] = getPEPFromScore(trqRow.searchScore)\n \n # fill in PEPs for missing values\n linkPEPs = _setMissingAsMax(linkPEPs)\n identPEPs = _setMissingAsMax(identPEPs)\n \n return parsers.PeptideQuantRow(svmScore, charge, featureGroupIdx, \n spectrumId, linkPEPs, intensities, identPEPs, peptide, proteins)\n \ndef _setMissingAsMax(PEPs):\n maxPEP = max([x for x in PEPs if x <= 1.0])\n return [x if x <= 1.0 else maxPEP for x in PEPs]\n \ndef _getFilesAndGroups(runCondPairs):\n runCondPairs = sorted(runCondPairs, key = lambda x : (x[1], x[0]))\n fileList = list()\n groupLabels, groups = list(), list()\n for run, cond in runCondPairs:\n if run not in fileList:\n fileList.append(run)\n if cond not in groupLabels:\n groupLabels.append(cond)\n groups.append([])\n groups[groupLabels.index(cond)].append(len(fileList) - 1)\n \n if len(fileList) < 2:\n sys.exit(\"ERROR: There should be at least two runs.\")\n elif len(groups) < 2:\n sys.exit(\"ERROR: At least two conditions (treatment groups) should be specified.\")\n elif min([len(g) for g in groups]) < 2:\n sys.exit(\"ERROR: Each condition (treatment group) should have at least two runs.\")\n \n return fileList, groupLabels, groups\n\n# multiple peptideQuantRows can be associated with the same featureGroup \n# when two or more analytes match closely in prec m/z and retention time;\n# choose the best peptideQuantRow per featureGroup based on combinedPEP\ndef _selectBestPeptideQuantRowPerFeatureGroup(spectrumToFeatureMatch, peptideQuantRows):\n featureGroupToSpectrumIdx = dict()\n for (spectrumIdx, featureGroupIdx, combinedPEP) in spectrumToFeatureMatch.values():\n if combinedPEP < featureGroupToSpectrumIdx.get(featureGroupIdx, (-1, 1.01))[1]:\n featureGroupToSpectrumIdx[featureGroupIdx] = (spectrumIdx, combinedPEP)\n survivingSpectrumIdxs = set([y[0] for x, y in featureGroupToSpectrumIdx.items()])\n #print(\"Surviving spectrumIdxs:\", len(survivingSpectrumIdxs))\n \n peptideQuantRows = filter(lambda x : x.spectrum in survivingSpectrumIdxs, peptideQuantRows)\n \n return peptideQuantRows\n\ndef _divideIntensities(peptideQuantRows, intensityDiv = 1e6):\n print(\"Dividing intensities by %g for increased readability\" % intensityDiv)\n newPeptideQuantRows = list()\n for row in peptideQuantRows:\n row = row._replace(quant = list(map(lambda x : x/intensityDiv, row.quant)))\n newPeptideQuantRows.append(row)\n return newPeptideQuantRows\n\ndef doPickedProteinQuantification(peptQuantRows, params, proteinModifier, getEvalFeatures):\n notPickedProteinOutputRows = _groupPeptideQuantRowsByProtein(\n peptQuantRows, proteinModifier, params['decoyPattern'])\n \n np.random.shuffle(notPickedProteinOutputRows)\n notPickedProteinOutputRows = sorted(notPickedProteinOutputRows, key = lambda x : x[0], reverse = True)\n \n print(\"Calculating protein-level identification PEPs\")\n pickedProteinOutputRows, proteinPEPs = _pickedProteinStrategy(notPickedProteinOutputRows, params['decoyPattern']) \n \n print(\"Fitting hyperparameters\")\n hyperparameters.fitPriors(peptQuantRows, params)\n \n print(\"Calculating protein posteriors\")\n posteriors = getPosteriors(pickedProteinOutputRows, proteinPEPs, params)\n \n proteinQuantRows = _updateProteinQuantRows(pickedProteinOutputRows, posteriors, proteinPEPs, getEvalFeatures, params)\n \n return proteinQuantRows\n\ndef _updateProteinQuantRows(pickedProteinOutputRows, posteriors, proteinPEPs, \n getEvalFeatures, params):\n proteinQuantRows = list()\n sumPEP = 0.0\n for (linkPEP, protein, quantRows, numPeptides), (bayesQuantRow, muGroupDiffs, probsBelowFoldChange, posteriorDists), proteinPEP in zip(pickedProteinOutputRows, posteriors, proteinPEPs):\n evalFeatures = getEvalFeatures(bayesQuantRow)\n \n if not params['t-test']:\n evalFeatures[-1] = probsBelowFoldChange\n evalFeatures[-2] = muGroupDiffs\n \n sumPEP += proteinPEP\n if not params['t-test'] or sumPEP / (len(proteinQuantRows) + 1) < 0.05:\n proteinQuantRows.append([linkPEP, protein, quantRows, evalFeatures, numPeptides, proteinPEP, bayesQuantRow, posteriorDists])\n \n proteinQuantRows = sorted(proteinQuantRows, key = lambda x : (x[0], x[1]))\n return proteinQuantRows\n\ndef _groupPeptideQuantRowsByProtein(peptQuantRows, proteinModifier, decoyPattern):\n protQuantRows = parsers.filterAndGroupPeptides(peptQuantRows)\n \n proteinRows = list()\n for prot, quantRows in protQuantRows:\n psmIdx = 0\n quantRows = sorted(quantRows, key = lambda x : x.combinedPEP)\n seenPeptides = set()\n usablePeptides = 0\n filteredQuantRows = list()\n for quantRow in quantRows:\n cleanPeptide = parsers.cleanPeptide(quantRow.peptide)\n if cleanPeptide not in seenPeptides:\n seenPeptides.add(cleanPeptide)\n usablePeptides += 1\n filteredQuantRows.append(quantRow)\n\n if usablePeptides < 1:\n continue\n else:\n quantRows = filteredQuantRows\n protein = proteinModifier(prot)\n numPeptides = usablePeptides\n\n proteinOutputRow = (_getProteinScore(quantRows), list(quantRows[psmIdx].linkPEP), protein, quantRows, numPeptides)\n proteinRows.append(proteinOutputRow)\n \n return proteinRows\n\ndef _getProteinScore(quantRows):\n # logged version performs slightly worse on iPRG2016 set, but might \n # prevent convergence problems in the case of many peptides for a protein\n return np.log(-1*np.sum([np.log(x.combinedPEP) for x in quantRows]))\n\ndef _pickedProteinStrategy(notPickedProteinOutputRows, decoyPattern):\n pickedProteinOutputRows = list()\n targetScores, decoyScores = list(), list()\n seenProteins = set()\n for score, linkPEP, protein, quantRows, numPeptides in notPickedProteinOutputRows:\n evalProtein = protein.replace(decoyPattern, \"\", 1)\n if evalProtein not in seenProteins:\n seenProteins.add(evalProtein)\n if _isDecoy([protein], decoyPattern):\n decoyScores.append(score)\n else:\n targetScores.append(score)\n pickedProteinOutputRows.append([linkPEP, protein, quantRows, numPeptides])\n \n targetScores = np.array(targetScores)\n decoyScores = np.array(decoyScores)\n _, proteinPEPs = qvality.getQvaluesFromScores(targetScores, decoyScores, includePEPs = True, includeDecoys = True, tdcInput = True)\n \n if len(np.nonzero(proteinPEPs < 1.0)) == 0:\n sys.exit(\"ERROR: No proteins could be identified with a PEP below 1.0, cannot calculate posteriors.\")\n else:\n print(\" Identified\", qvality.countBelowFDR(proteinPEPs, 0.01), \"proteins at 1% FDR\")\n \n return pickedProteinOutputRows, proteinPEPs\n\ndef getPosteriors(pickedProteinOutputRows, peps, params):\n if params['numThreads'] > 1:\n from . import multiprocessing_pool as pool\n processingPool = pool.MyPool(processes = params['numThreads'], warningFilter = params['warningFilter'])\n \n addDummyPosteriors = 0\n posteriors = list()\n for (linkPEP, protein, quantRows, numPeptides), proteinIdPEP in zip(pickedProteinOutputRows, peps): \n if proteinIdPEP < 1.0:\n if params['numThreads'] > 1:\n processingPool.applyAsync(pgm.getPosteriors, [quantRows, params])\n else:\n posteriors.append(pgm.getPosteriors(quantRows, params))\n if len(posteriors) % 50 == 0:\n print(\" \", len(posteriors),\"/\", sum(1 for p in peps if p < 1.0), \"%.2f\" % (float(len(posteriors)) / sum(1 for p in peps if p < 1.0) * 100) + \"%\")\n else:\n addDummyPosteriors += 1\n \n if params['numThreads'] > 1:\n posteriors = processingPool.checkPool(printProgressEvery = 50)\n \n posteriors.extend([pgm.getDummyPosteriors(params)] * addDummyPosteriors)\n \n return posteriors\n \ndef selectComparisonBayes(proteinOutputRows, comparisonKey, tTest = False):\n proteinOutputRowsUpdatedPEP = list()\n for (linkPEP, protein, quantRows, evalFeatures, numPeptides, proteinPEP, bayesQuantRow, posteriorDists) in proteinOutputRows:\n evalFeaturesNew = copy.deepcopy(evalFeatures)\n evalFeaturesNew[-1] = evalFeatures[-1][comparisonKey] # probBelowFoldChange\n evalFeaturesNew[-2] = evalFeatures[-2][comparisonKey] # log2_fold_change\n if not tTest:\n combinedPEP = _combinePEPs(evalFeaturesNew[-1], proteinPEP)\n else:\n combinedPEP = evalFeaturesNew[-1]\n \n proteinOutputRowsUpdatedPEP.append([combinedPEP, linkPEP, protein, quantRows, evalFeaturesNew, numPeptides, proteinPEP, bayesQuantRow, posteriorDists])\n\n proteinOutputRowsUpdatedPEP = sorted(proteinOutputRowsUpdatedPEP, key = lambda x : (x[0], x[1]))\n return proteinOutputRowsUpdatedPEP\n\n# calculate peptide-level identification FDRs and update the linkPEPs with this estimate\ndef _updateIdentPEPs(peptideQuantRows, decoyPattern, hasLinkPEPs):\n print(\"Calculating peptide-level identification PEPs\")\n \n scoreIdxPairs = list()\n for i, row in enumerate(peptideQuantRows):\n if not np.isnan(row.combinedPEP):\n # row.combinedPEP contains the SVM score\n scoreIdxPairs.append([row.combinedPEP, i, _isDecoy(row.protein, decoyPattern)]) \n \n scoreIdxPairs = sorted(scoreIdxPairs, reverse = True)\n scoreIdxs = np.argsort([x[1] for x in scoreIdxPairs])\n targetScores = np.array([x[0] for x in scoreIdxPairs if x[2] == False])\n decoyScores = np.array([x[0] for x in scoreIdxPairs if x[2] == True])\n \n _, identPEPs = qvality.getQvaluesFromScores(targetScores, decoyScores, includePEPs = True, includeDecoys = True, tdcInput = True)\n \n print(\" Identified\", qvality.countBelowFDR(identPEPs, 0.01), \"peptides at 1% FDR\")\n newPeptideQuantRows = list()\n i = 0\n for row in peptideQuantRows:\n identPEP = 1.0\n if not np.isnan(row.combinedPEP):\n identPEP = identPEPs[scoreIdxs[i]]\n i += 1\n \n if hasLinkPEPs:\n newPeptideQuantRows.append(row._replace(combinedPEP = identPEP)) # using consensus spectra\n else:\n newPeptideQuantRows.append(row._replace(combinedPEP = identPEP, identificationPEP = [_combinePEPs(identPEP, x) for x in row.identificationPEP]))\n return newPeptideQuantRows\n\ndef _isDecoy(proteins, decoyPattern):\n isDecoyProt = True\n for protein in proteins:\n if not protein.startswith(decoyPattern):\n isDecoyProt = False\n break\n return isDecoyProt\n \ndef _combinePEPs(linkPEP, identPEP):\n return 1.0 - (1.0 - linkPEP)*(1.0 - identPEP)\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"numpy.log",
"numpy.nonzero",
"numpy.isnan",
"numpy.random.shuffle",
"numpy.log10",
"numpy.prod",
"numpy.argsort",
"numpy.array"
]
] |
KevinLiao159/TalkingData
|
[
"c4a4095c0d805ef54b4f56d7b0e66f2883bb394a",
"c4a4095c0d805ef54b4f56d7b0e66f2883bb394a"
] |
[
"insample_iterations/scripts/train_model.py",
"insample_iterations/scripts/dump_insample_data.py"
] |
[
"import os\nimport psutil\nimport time\nimport gc\nimport numpy as np\nimport pandas as pd\nimport lightgbm\n# sklearn imports\nfrom sklearn.metrics.scorer import roc_auc_score\n\n# memory status\nprocess = psutil.Process(os.getpid())\nmemused = process.memory_info().rss\nprint('Total memory in use before reading data: {:.02f} GB '\n ''.format(memused / (2 ** 30)))\n\n# # read data\ndf_train = pd.read_hdf('../data/train.hdf').astype('float32')\ndf_test = pd.read_hdf('../data/test.hdf').astype('float32')\n# col\ntarget = 'is_attributed'\nfeatures = [\n 'app',\n 'device',\n 'os',\n 'channel',\n 'hour',\n 'in_test_hh',\n 'ip_day_hour_clicks',\n 'ip_app_day_hour_clicks',\n 'ip_os_day_hour_clicks',\n 'ip_device_day_hour_clicks',\n 'ip_day_test_hh_clicks',\n 'ip_app_device_clicks',\n 'ip_app_device_day_clicks',\n 'ip_day_nunique_app',\n 'ip_day_nunique_device',\n 'ip_day_nunique_channel',\n 'ip_day_nunique_hour',\n 'ip_nunique_app',\n 'ip_nunique_device',\n 'ip_nunique_channel',\n 'ip_nunique_hour',\n 'app_day_nunique_channel',\n 'app_nunique_channel',\n 'ip_app_day_nunique_os',\n 'ip_app_nunique_os',\n 'ip_device_os_day_nunique_app',\n 'ip_device_os_nunique_app',\n 'ip_app_day_var_hour',\n 'ip_device_day_var_hour',\n 'ip_os_day_var_hour',\n 'ip_channel_day_var_hour',\n 'ip_app_os_var_hour',\n 'ip_app_channel_var_day',\n 'ip_app_channel_mean_hour',\n 'ip_day_cumcount',\n 'ip_cumcount',\n 'ip_app_day_cumcount',\n 'ip_app_cumcount',\n 'ip_device_os_day_cumcount',\n 'ip_device_os_cumcount',\n 'next_click',\n 'previous_click',\n]\n# categorical\ncategorical_features = [\n 'app',\n 'device',\n 'os',\n 'channel',\n 'hour',\n 'in_test_hh',\n]\n# prep data\ndtrain = lightgbm.Dataset(\n df_train[features].values,\n label=df_train[target].values,\n feature_name=features,\n categorical_feature=categorical_features,\n free_raw_data=True,\n)\ndtest = lightgbm.Dataset(\n df_test[features].values,\n label=df_test[target].values,\n feature_name=features,\n categorical_feature=categorical_features\n)\n# cleanup\ndel df_train\ngc.collect()\nprint('done data prep!!!')\n# memory status\nmemused = process.memory_info().rss\nprint('Total memory in use after reading data: {:.02f} GB '\n ''.format(memused / (2 ** 30)))\n\n###################################################################\nparams = {\n 'boosting_type': 'gbdt', # I think dart would be better, but takes too long to run # noqa\n # 'drop_rate': 0.09, # only used in dart, Rate at which to drop trees # noqa\n 'objective': 'binary',\n 'learning_rate': 0.1,\n 'num_leaves': 32, # Was 255: Reduced to control overfitting # noqa\n 'max_depth': 6, # Was 8: LightGBM splits leaf-wise, so control depth via num_leaves # noqa\n 'min_split_gain': 0,\n 'subsample': 0.9, # Was 0.7\n 'subsample_freq': 1,\n 'colsample_bytree': 0.7,\n 'min_child_samples': 100,\n 'min_child_weight': 0,\n 'max_bin': 100, # default was 255\n 'subsample_for_bin': 200000,\n 'reg_alpha': 0,\n 'reg_lambda': 0,\n 'scale_pos_weight': 100,\n 'metric': 'auc',\n 'nthread': 22,\n 'verbose': 0,\n# seed is set default\n}\n# train\nt0 = time.time()\nmodel = lightgbm.train(\n params=params,\n train_set=dtrain,\n valid_sets=[dtrain, dtest],\n valid_names=['train', 'valid'],\n num_boost_round=350,\n early_stopping_rounds=30,\n feature_name=features,\n categorical_feature=categorical_features,\n verbose_eval=1,\n # init_model='model.txt'\n)\n####################################################################\nt1 = time.time()\nt_min = np.round((t1-t0) / 60, 2)\nprint('It took {} mins to train model'.format(t_min))\n####################################################################\nproba = model.predict(df_test[features], num_iteration=model.best_iteration)\nroc_score = roc_auc_score(y_true=df_test[target], y_score=proba)\nprint('Out of sample roc score is {}'.format(roc_score))\n",
"import os\nimport psutil\nimport time\nimport numpy as np\nimport pandas as pd\n\n# memory status\nprocess = psutil.Process(os.getpid())\nmemused = process.memory_info().rss\nprint('Total memory in use before reading data: {:.02f} GB '\n ''.format(memused / (2 ** 30)))\n\nt0 = time.time()\n# spec for train\ntrain_columns = \\\n ['ip', 'app', 'device', 'os', 'channel', 'click_time', 'is_attributed']\ntest_columns = \\\n ['ip', 'app', 'device', 'os', 'channel', 'click_time', 'click_id']\ndtype = {\n 'ip': 'uint32',\n 'app': 'uint16',\n 'device': 'uint16',\n 'os': 'uint16',\n 'channel': 'uint16',\n 'is_attributed': 'uint16',\n 'click_id': 'uint32'\n}\n# read data\ndf_all = pd.read_csv(\n filepath_or_buffer=\"../../data/train.csv\",\n usecols=train_columns,\n dtype=dtype,\n low_memory=True,\n parse_dates=['click_time'],\n infer_datetime_format=True,\n)\n# get training data for insample\n# NOTE: below indices are found in EDA\n# df_all.iloc[59709852:122070800, :] \\\n# .reset_index(drop=True) \\\n# .to_hdf('../data/train_raw.hdf', key='foo')\ndf_all.iloc[:122070800, :] \\\n .reset_index(drop=True) \\\n .to_hdf('../data/train_raw.hdf', key='train')\n# test_supplement\ndf_all.iloc[122070801:184903442, :] \\\n .reset_index(drop=True) \\\n .to_hdf('../data/test_supplement_raw.hdf', key='test_supplement')\n# test submission\npd.concat(\n [\n df_all.iloc[144708152:152413508, :],\n df_all.iloc[161974465:168265843, :],\n df_all.iloc[174976526:181878211, :]\n ],\n axis=0,\n verify_integrity=True\n).reset_index(drop=True).to_hdf('../data/test_raw.hdf', key='test')\nt1 = time.time()\nt_min = np.round((t1-t0) / 60, 2)\nprint('It took {} mins to dump in sample data'.format(t_min))\n"
] |
[
[
"numpy.round",
"pandas.read_hdf",
"sklearn.metrics.scorer.roc_auc_score"
],
[
"numpy.round",
"pandas.concat",
"pandas.read_csv"
]
] |
YJulyXing/SECNVs-SimulateCNVs-2.0-
|
[
"e1a4a7fe6ca4370e9fe3d7b92ecdf3ec5c55cbd4"
] |
[
"snp_rate.py"
] |
[
"#!/usr/bin/python\n\nimport random\nimport os\nimport subprocess\nimport math\nimport sys\nimport time\nimport copy\nfrom numpy.random import choice as choices\n\ndef switch_nt(nt):\n\tswitcher = {\n\t\t\"A\": list(choices([\"C\",\"T\",\"G\"],1,p=[0.14, 0.04, 0.82])),\n\t\t\"T\": list(choices([\"C\",\"A\",\"G\"],1,p=[0.84, 0.03, 0.13])),\n\t\t\"G\": list(choices([\"C\",\"A\",\"T\"],1,p=[0.19, 0.7, 0.11])),\n\t\t\"C\": list(choices([\"G\",\"A\",\"T\"],1,p=[0.17, 0.12, 0.71])),\n\t\t\"N\": [\"N\"]\n\t}\n\treturn switcher.get(nt, \"N\")\n"
] |
[
[
"numpy.random.choice"
]
] |
alexandru-dinu/cae
|
[
"f0f60f768c78cf1d182c6778df6cad6d69712634"
] |
[
"src/train.py"
] |
[
"import os\nimport yaml\nimport argparse\nfrom pathlib import Path\n\nimport numpy as np\nimport torch as T\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom data_loader import ImageFolder720p\nfrom utils import save_imgs\n\nfrom namespace import Namespace\nfrom logger import Logger\n\nfrom models.cae_32x32x32_zero_pad_bin import CAE\n\nlogger = Logger(__name__, colorize=True)\n\n\ndef train(cfg: Namespace) -> None:\n assert cfg.device == \"cpu\" or (cfg.device == \"cuda\" and T.cuda.is_available())\n\n root_dir = Path(__file__).resolve().parents[1]\n\n logger.info(\"training: experiment %s\" % (cfg.exp_name))\n\n # make dir-tree\n exp_dir = root_dir / \"experiments\" / cfg.exp_name\n\n for d in [\"out\", \"checkpoint\", \"logs\"]:\n os.makedirs(exp_dir / d, exist_ok=True)\n\n cfg.to_file(exp_dir / \"train_config.json\")\n\n # tb tb_writer\n tb_writer = SummaryWriter(exp_dir / \"logs\")\n logger.info(\"started tensorboard writer\")\n\n model = CAE()\n model.train()\n if cfg.device == \"cuda\":\n model.cuda()\n logger.info(f\"loaded model on {cfg.device}\")\n\n dataloader = DataLoader(\n dataset=ImageFolder720p(cfg.dataset_path),\n batch_size=cfg.batch_size,\n shuffle=cfg.shuffle,\n num_workers=cfg.num_workers,\n )\n logger.info(f\"loaded dataset from {cfg.dataset_path}\")\n\n optimizer = optim.Adam(model.parameters(), lr=cfg.learning_rate, weight_decay=1e-5)\n loss_criterion = nn.MSELoss()\n\n avg_loss, epoch_avg = 0.0, 0.0\n ts = 0\n\n # EPOCHS\n for epoch_idx in range(cfg.start_epoch, cfg.num_epochs + 1):\n # BATCHES\n for batch_idx, data in enumerate(dataloader, start=1):\n img, patches, _ = data\n\n if cfg.device == \"cuda\":\n patches = patches.cuda()\n\n avg_loss_per_image = 0.0\n for i in range(6):\n for j in range(10):\n optimizer.zero_grad()\n\n x = patches[:, :, i, j, :, :]\n y = model(x)\n loss = loss_criterion(y, x)\n\n avg_loss_per_image += (1 / 60) * loss.item()\n\n loss.backward()\n optimizer.step()\n\n avg_loss += avg_loss_per_image\n epoch_avg += avg_loss_per_image\n\n if batch_idx % cfg.batch_every == 0:\n tb_writer.add_scalar(\"train/avg_loss\", avg_loss / cfg.batch_every, ts)\n\n for name, param in model.named_parameters():\n tb_writer.add_histogram(name, param, ts)\n\n logger.debug(\n \"[%3d/%3d][%5d/%5d] avg_loss: %.8f\"\n % (\n epoch_idx,\n cfg.num_epochs,\n batch_idx,\n len(dataloader),\n avg_loss / cfg.batch_every,\n )\n )\n\n avg_loss = 0.0\n ts += 1\n # -- end batch every\n\n if batch_idx % cfg.save_every == 0:\n out = T.zeros(6, 10, 3, 128, 128)\n for i in range(6):\n for j in range(10):\n x = patches[0, :, i, j, :, :].unsqueeze(0).cuda()\n out[i, j] = model(x).cpu().data\n\n out = np.transpose(out, (0, 3, 1, 4, 2))\n out = np.reshape(out, (768, 1280, 3))\n out = np.transpose(out, (2, 0, 1))\n\n y = T.cat((img[0], out), dim=2).unsqueeze(0)\n save_imgs(\n imgs=y,\n to_size=(3, 768, 2 * 1280),\n name=exp_dir / f\"out/{epoch_idx}_{batch_idx}.png\",\n )\n # -- end save every\n # -- end batches\n\n if epoch_idx % cfg.epoch_every == 0:\n epoch_avg /= len(dataloader) * cfg.epoch_every\n\n tb_writer.add_scalar(\n \"train/epoch_avg_loss\",\n avg_loss / cfg.batch_every,\n epoch_idx // cfg.epoch_every,\n )\n\n logger.info(\"Epoch avg = %.8f\" % epoch_avg)\n epoch_avg = 0.0\n\n T.save(model.state_dict(), exp_dir / f\"checkpoint/model_{epoch_idx}.pth\")\n # -- end epoch every\n # -- end epoch\n\n # save final model\n T.save(model.state_dict(), exp_dir / \"model_final.pth\")\n\n # cleaning\n tb_writer.close()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--config\", type=str, required=True)\n args = parser.parse_args()\n\n with open(args.config, \"rt\") as fp:\n cfg = Namespace(**yaml.safe_load(fp))\n\n train(cfg)\n"
] |
[
[
"torch.zeros",
"numpy.reshape",
"torch.cat",
"torch.utils.tensorboard.SummaryWriter",
"torch.cuda.is_available",
"numpy.transpose",
"torch.nn.MSELoss"
]
] |
java-abhinav07/formreader
|
[
"63aa695eb4b8547bc56c6f070dd8d1fadbdf29b1",
"63aa695eb4b8547bc56c6f070dd8d1fadbdf29b1"
] |
[
"text_renderer/text_renderer/utils/draw_utils.py",
"text_renderer/main.py"
] |
[
"from typing import Tuple, Union\n\nfrom PIL import ImageDraw, Image\nfrom PIL.Image import Image as PILImage\nimport numpy as np\n\nfrom text_renderer.utils.font_text import FontText\n\n\ndef transparent_img(size: Tuple[int, int]) -> PILImage:\n \"\"\"\n\n Args:\n size: (width, height)\n\n Returns:\n\n \"\"\"\n return Image.new(\"RGBA\", (size[0], size[1]), (255, 255, 255, 0))\n\n\ndef draw_text_on_bg(\n font_text: FontText,\n text_color: Tuple[int, int, int, int] = (0, 0, 0, 255),\n char_spacing: Union[float, Tuple[float, float]] = -1,\n) -> PILImage:\n \"\"\"\n\n Parameters\n ----------\n font_text : FontText\n text_color : RGBA\n Default is black\n char_spacing : Union[float, Tuple[float, float]]\n Draw character with spacing. If tuple, random choice between [min, max)\n Set -1 to disable\n\n Returns\n -------\n PILImage:\n RGBA Pillow image with text on a transparent image\n -------\n\n \"\"\"\n if char_spacing == -1:\n return _draw_text_on_bg(font_text, text_color)\n\n chars_size = []\n width = 0\n for c in font_text.text:\n size = font_text.font.getsize(c)\n chars_size.append(size)\n width += size[0]\n\n max_size = max(chars_size)[0]\n\n height = font_text.size[1]\n\n char_spacings = []\n for i in range(len(font_text.text)):\n if isinstance(char_spacing, list) or isinstance(char_spacing, tuple):\n s = np.random.uniform(*char_spacing)\n char_spacings.append(int(s * height))\n else:\n char_spacings.append(int(char_spacing * height))\n\n width += sum(char_spacings[:-1])\n\n text_mask = transparent_img((int(width * 1.1), int(height * 1.5)))\n\n draw = ImageDraw.Draw(text_mask)\n\n c_x = int(width * 0.05)\n c_y = int(0.08 * height)\n colors = [\n \"#000\",\n \"#080808\",\n \"#101010\",\n \"#181818\",\n \"#202020\",\n \"#282828\",\n \"#303030\",\n \"#383838\",\n \"#404040\",\n \"#484848\",\n \"#505050\",\n ]\n c_color = np.random.choice(np.array(colors))\n\n y_offset = font_text.offset[1]\n x = np.random.choice([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n flag = True\n if x%7 == 0:\n flag = False\n for i, c in enumerate(font_text.text):\n\n xo = c_x - (char_spacings[i] // 2) * (1.05)\n yo = c_y - (y_offset // 16)\n\n y1 = c_y + int(height) - (y_offset // 16)\n x1 = xo + chars_size[i][0] + int(char_spacings[i] * 1.05)\n # print(xo, yo, x1, y1)\n\n # draw random background text\n r = np.random.choice(\n np.array([\"\", \"D\", \"M\", \"Y\", \"\", \"A\", \"B\", \"C\", \"E\", \" \", \"F\"])\n )\n rgb = np.random.randint(20, 80)\n draw.text(\n (c_x, c_y - y_offset),\n str(r),\n fill=(rgb, rgb, rgb, rgb - 20),\n font=font_text.font,\n width=2,\n )\n\n draw.text(\n (c_x, c_y - y_offset),\n c,\n fill=text_color,\n font=font_text.font,\n stroke_fill=\"#000\",\n width=np.random.choice([1, 2, 3]),\n )\n\n c_x += chars_size[i][0] + char_spacings[i]\n # text_mask.show()\n\n # draw a box around text\n if flag:\n draw.rectangle((xo, yo, x1, y1), width=2, outline=\"#000\", fill=None)\n\n return text_mask\n\n\ndef _draw_text_on_bg(\n font_text: FontText, text_color: Tuple[int, int, int, int] = (0, 0, 0, 255),\n) -> PILImage:\n \"\"\"\n Draw text\n\n Parameters\n ----------\n font_text : FontText\n text_color : RGBA\n Default is black\n\n Returns\n -------\n PILImage:\n RGBA Pillow image with text on a transparent image\n \"\"\"\n text_width, text_height = font_text.size\n text_mask = transparent_img((text_width, text_height))\n draw = ImageDraw.Draw(text_mask)\n\n xy = font_text.xy\n\n # TODO: figure out anchor\n draw.text(\n xy, font_text.text, font=font_text.font, fill=text_color, anchor=None,\n )\n\n return text_mask\n",
"import argparse\nimport multiprocessing as mp\nimport os\nimport time\nfrom multiprocessing.context import Process\n\nimport cv2\nfrom loguru import logger\n\nfrom text_renderer.config import get_cfg, GeneratorCfg\nfrom text_renderer.dataset import LmdbDataset, ImgDataset\nfrom text_renderer.render import Render\n\ncv2.setNumThreads(1)\n\nSTOP_TOKEN = \"kill\"\n\n# each child process will initialize Render in process_setup\nrender: Render\n\n\nclass DBWriterProcess(Process):\n def __init__(\n self,\n dataset_cls,\n data_queue,\n generator_cfg: GeneratorCfg,\n log_period: float = 1,\n ):\n super().__init__()\n self.dataset_cls = dataset_cls\n self.data_queue = data_queue\n self.generator_cfg = generator_cfg\n self.log_period = log_period\n\n def run(self):\n num_image = self.generator_cfg.num_image\n save_dir = self.generator_cfg.save_dir\n log_period = max(1, int(self.log_period / 100 * num_image))\n try:\n with self.dataset_cls(str(save_dir)) as db:\n exist_count = db.read_count()\n count = 0\n logger.info(f\"Exist image count in {save_dir}: {exist_count}\")\n start = time.time()\n while True:\n m = self.data_queue.get()\n if m == STOP_TOKEN:\n logger.info(\"DBWriterProcess receive stop token\")\n break\n\n name = \"{:09d}\".format(exist_count + count)\n db.write(name, m[\"image\"], m[\"label\"])\n count += 1\n if count % log_period == 0:\n logger.info(\n f\"{(count/num_image)*100:.2f}%({count}/{num_image}) {log_period/(time.time() - start):.1f} img/s\"\n )\n start = time.time()\n db.write_count(count + exist_count)\n logger.info(f\"{(count / num_image) * 100:.2f}%({count}/{num_image})\")\n logger.info(f\"Finish generate: {count}. Total: {exist_count+count}\")\n except Exception as e:\n logger.exception(\"DBWriterProcess error\")\n raise e\n\n\ndef generate_img(data_queue):\n data = render()\n if data is not None:\n data_queue.put({\"image\": data[0], \"label\": data[1]})\n\n\ndef process_setup(*args):\n global render\n import numpy as np\n\n # Make sure different process has different random seed\n np.random.seed()\n\n render = Render(args[0])\n logger.info(f\"Finish setup image generate process: {os.getpid()}\")\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--config\", required=True, help=\"python file path\")\n parser.add_argument(\"--dataset\", default=\"img\", choices=[\"lmdb\", \"img\"])\n parser.add_argument(\"--num_processes\", type=int, default=2)\n parser.add_argument(\"--log_period\", type=float, default=10)\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n mp.set_start_method(\"spawn\", force=True)\n manager = mp.Manager()\n data_queue = manager.Queue()\n args = parse_args()\n\n dataset_cls = LmdbDataset if args.dataset == \"lmdb\" else ImgDataset\n\n generator_cfgs = get_cfg(args.config)\n\n for generator_cfg in generator_cfgs:\n db_writer_process = DBWriterProcess(\n dataset_cls, data_queue, generator_cfg, args.log_period\n )\n db_writer_process.start()\n\n with mp.Pool(\n processes=args.num_processes,\n initializer=process_setup,\n initargs=(generator_cfg.render_cfg,),\n ) as pool:\n\n for _ in range(generator_cfg.num_image):\n pool.apply_async(generate_img, args=(data_queue,))\n\n pool.close()\n pool.join()\n\n data_queue.put(STOP_TOKEN)\n db_writer_process.join()\n"
] |
[
[
"numpy.random.uniform",
"numpy.array",
"numpy.random.randint",
"numpy.random.choice"
],
[
"numpy.random.seed"
]
] |
DerThorsten/kipoi
|
[
"7110ea9f157c6996197bc75d5c2516d33a9e2861"
] |
[
"tests/test_20_cli_examples.py"
] |
[
"\"\"\"Run the example scripts\n\"\"\"\nimport os\nimport subprocess\nimport sys\n\nimport pandas as pd\nimport pytest\nimport yaml\n\nimport config\n# import filecmp\nimport kipoi\nimport kipoi_conda\nimport kipoi_utils\nfrom kipoi.env_db import EnvDbEntry\nfrom kipoi.readers import HDF5Reader\nfrom utils import cp_tmpdir\n\nif config.install_req:\n INSTALL_FLAG = \"--install_req\"\nelse:\n INSTALL_FLAG = \"\"\n\nEXAMPLES_TO_RUN = [\"rbp\", \"extended_coda\", \"sklearn_iris\", \"iris_model_template\",\n \"non_bedinput_model\", \"pyt\", \"iris_tensorflow\", \"kipoi_dataloader_decorator\"]\n\npredict_activation_layers = {\n \"rbp\": \"concatenate_6\",\n \"pyt\": \"3\" # two before the last layer\n}\nACTIVATION_EXAMPLES = ['rbp', 'pyt']\n\n\ndef test_cli_get_example(tmpdir):\n \"\"\"kipoi test ..., add also output file writing\n \"\"\"\n example = \"kipoi_dataloader_decorator\"\n example_dir = \"example/models/{0}\".format(example)\n\n outdir = os.path.join(str(tmpdir), example)\n args = [\"python\", \"./kipoi/__main__.py\", \"get-example\",\n example_dir,\n \"--source\", 'dir',\n \"-o\", outdir]\n kipoi.cli.main.cli_get_example(\"get-example\", args[3:])\n assert os.path.exists(os.path.join(outdir, \"targets_file\"))\n\n\n@pytest.mark.parametrize(\"example\", EXAMPLES_TO_RUN)\ndef test_test_example(example, tmpdir):\n \"\"\"kipoi test ..., add also output file writing\n \"\"\"\n if example in {\"rbp\", \"non_bedinput_model\", \"iris_model_template\"} \\\n and sys.version_info[0] == 2:\n pytest.skip(\"example not supported on python 2 \")\n\n example_dir = cp_tmpdir(\"example/models/{0}\".format(example), tmpdir)\n\n args = [\"python\", \"./kipoi/__main__.py\", \"test\",\n \"--batch_size=4\",\n example_dir]\n if INSTALL_FLAG:\n args.append(INSTALL_FLAG)\n returncode = subprocess.call(args=args)\n assert returncode == 0\n\n if example == 'pyt':\n # python interface, write also the output file\n output_file = os.path.join(example_dir, 'preds.h5')\n kipoi.cli.main.cli_test(\"test\", args[3:] + [\"-o\", output_file])\n\n assert os.path.exists(output_file)\n preds = HDF5Reader.load(output_file)\n assert 'inputs' in preds\n assert 'metadata' in preds\n assert 'preds' in preds\n\n\ndef test_cli_test_expect(tmpdir):\n \"\"\"kipoi test - check that the expected predictions also match\n \"\"\"\n example = 'pyt'\n example_dir = cp_tmpdir(\"example/models/{0}\".format(example), tmpdir)\n\n # fail the test\n args = [\"python\", \"./kipoi/__main__.py\", \"test\",\n \"--batch_size=4\",\n \"-e\", os.path.join(example_dir, \"wrong.pred.h5\"),\n example_dir]\n if INSTALL_FLAG:\n args.append(INSTALL_FLAG)\n returncode = subprocess.call(args=args)\n assert returncode == 1\n\n # succeed\n kipoi.cli.main.cli_test(\"test\", [\"--batch_size=4\",\n \"-e\", os.path.join(example_dir, \"expected.pred.h5\"),\n example_dir])\n\n\ndef test_postproc_cli_fail():\n \"\"\"kipoi test ...\n \"\"\"\n # This command should fail\n args = [\"python\", \"./kipoi/__main__.py\", \"postproc\", \"score_variants\"]\n returncode = subprocess.call(args=args)\n assert returncode > 0\n\n args = [\"python\", \"./kipoi/__main__.py\", \"other\"]\n returncode = subprocess.call(args=args)\n assert returncode > 0\n\n@pytest.mark.parametrize(\"new_dataloader_kwargs_format\", [False, True])\n@pytest.mark.parametrize(\"example\", EXAMPLES_TO_RUN)\ndef test_preproc_example(example, new_dataloader_kwargs_format, tmpdir):\n \"\"\"kipoi preproc ...\n \"\"\"\n if example in {\"rbp\", \"non_bedinput_model\", \"iris_model_template\"} and sys.version_info[0] == 2:\n pytest.skip(\"example not supported on python 2 \")\n if example in {\"extended_coda\", \"kipoi_dataloader_decorator\"}:\n # extended_coda will anyway be tested in models\n pytest.skip(\"randomly failing on circleci without any reason. Skipping this test.\")\n\n example_dir = cp_tmpdir(\"example/models/{0}\".format(example), tmpdir)\n # example_dir = \"example/models/{0}\".format(example)\n\n tmpfile = str(tmpdir.mkdir(\"output\", ).join(\"out.h5\"))\n\n if example in {\"rbp\"} and new_dataloader_kwargs_format:\n if example == \"rbp\":\n dataloader_args = [\n \"intervals_file=intervals.tsv\",\n \"fasta_file=hg38_chr22.fa\",\n \"preproc_transformer=../dataloader_files/encodeSplines.pkl\",\n \"gtf_file=gencode_v25_chr22.gtf.pkl.gz\",\n \"tarOget_file=targets.tsv\"\n ]\n elif example == \"extended_coda\":\n dataloader_args = [\n \"intervals_file=intervals.tsv\",\n \"input_data_sources={'H3K27AC_subsampled':'H3K27AC_subsampled.bw'}\",\n \"batch_size=4\"\n ]\n # run the\n args = [\"python\", os.path.abspath(\"./kipoi/__main__.py\"), \"preproc\",\n \"../\", # directory\n \"--source=dir\",\n \"--batch_size=4\",\n \"--num_workers=2\",\n \"--dataloader_args\"] + dataloader_args + [\"--output\", tmpfile]\n\n else:\n # run the\n args = [\"python\", os.path.abspath(\"./kipoi/__main__.py\"), \"preproc\",\n \"../\", # directory\n \"--source=dir\",\n \"--batch_size=4\",\n \"--num_workers=2\",\n \"--dataloader_args=test.json\",\n \"--output\", tmpfile]\n if INSTALL_FLAG:\n args.append(INSTALL_FLAG)\n returncode = subprocess.call(args=args,\n cwd=os.path.realpath(example_dir + \"/example_files\"))\n\n assert returncode == 0\n\n assert os.path.exists(tmpfile)\n\n data = HDF5Reader.load(tmpfile)\n\n with open(example_dir + \"/dataloader.yaml\", \"r\") as f:\n ex_descr = yaml.load(f)\n\n if example not in {\"pyt\", \"sklearn_iris\"}:\n assert data[\"inputs\"].keys() == ex_descr[\"output_schema\"][\"inputs\"].keys()\n\n if example == 'pyt':\n args[-1] = tmpfile + \"2.h5\"\n with kipoi_utils.utils.cd(os.path.join(example_dir, \"example_files\")):\n kipoi.cli.main.cli_preproc(\"preproc\", args[3:])\n\n\n@pytest.mark.parametrize(\"example\", EXAMPLES_TO_RUN)\ndef test_predict_example(example, tmpdir):\n \"\"\"kipoi predict ...\n \"\"\"\n # TODO - test -out\n # Traceback (most recent call last):\n # File \"/home/avsec/projects-work/kipoi/kipoi/__main__.py\", line 60, in <module>\n # main()\n # File \"/home/avsec/projects-work/kipoi/kipoi/__main__.py\", line 56, in main\n # command_fn(args.command, sys.argv[2:])\n # File \"/home/avsec/bin/anaconda3/lib/python3.6/site-packages/kipoi/pipeline.py\", line 273, in cli_predict\n # pred_batch = model.predict_on_batch(batch['inputs'])\n # File \"/home/avsec/bin/anaconda3/lib/python3.6/site-packages/kipoi/model.py\", line 22, in predict_on_batch\n # raise NotImplementedError\n # NotImplementedError\n # _________________________\n if example in {\"rbp\", \"non_bedinput_model\", \"iris_model_template\"} and sys.version_info[0] == 2:\n pytest.skip(\"rbp example not supported on python 2 \")\n\n if example in {'kipoi_dataloader_decorator'}:\n pytest.skip(\"Automatically-dowloaded input files skipped for prediction\")\n\n example_dir = cp_tmpdir(\"example/models/{0}\".format(example), tmpdir)\n # example_dir = \"example/models/{0}\".format(example)\n\n if example == \"rbp\":\n file_format = \"tsv\"\n else:\n file_format = \"hdf5\"\n\n print(example)\n print(\"tmpdir: {0}\".format(tmpdir))\n tmpfile = str(tmpdir.mkdir(\"output\").join(\"out.{0}\".format(file_format)))\n\n # run the\n args = [\"python\", os.path.abspath(\"./kipoi/__main__.py\"), \"predict\",\n \"../\", # directory\n \"--source=dir\",\n \"--batch_size=4\",\n \"--num_workers=2\",\n \"--dataloader_args=test.json\",\n \"--output\", tmpfile]\n if INSTALL_FLAG:\n args.append(INSTALL_FLAG)\n returncode = subprocess.call(args=args,\n cwd=os.path.realpath(example_dir + \"/example_files\"))\n assert returncode == 0\n\n assert os.path.exists(tmpfile)\n\n if file_format == \"hdf5\":\n data = HDF5Reader.load(tmpfile)\n assert {'metadata', 'preds'} <= set(data.keys())\n else:\n data = pd.read_csv(tmpfile, sep=\"\\t\")\n assert list(data.columns) == ['metadata/ranges/chr',\n 'metadata/ranges/end',\n 'metadata/ranges/id',\n 'metadata/ranges/start',\n 'metadata/ranges/strand',\n 'preds/0']\n if example == 'pyt':\n args[-1] = tmpfile + \"out2.{0}\".format(file_format)\n with kipoi_utils.utils.cd(os.path.join(example_dir, \"example_files\")):\n kipoi.cli.main.cli_predict(\"predict\", args[3:])\n\n\n@pytest.mark.parametrize(\"example\", ACTIVATION_EXAMPLES)\ndef test_predict_activation_example(example, tmpdir):\n \"\"\"Kipoi predict --layer=x with a specific output layer specified\n \"\"\"\n if example in {\"rbp\", \"non_bedinput_model\", \"iris_model_template\"} and sys.version_info[0] == 2:\n pytest.skip(\"rbp example not supported on python 2 \")\n if example in {'kipoi_dataloader_decorator'}:\n pytest.skip(\"Automatically-dowloaded input files skipped for prediction\")\n\n example_dir = cp_tmpdir(\"example/models/{0}\".format(example), tmpdir)\n # example_dir = \"example/models/{0}\".format(example)\n\n print(example)\n print(\"tmpdir: {0}\".format(tmpdir))\n tmpfile = str(tmpdir.mkdir(\"output\").join(\"out.h5\"))\n\n # run the\n args = [\"python\", os.path.abspath(\"./kipoi/__main__.py\"), \"predict\",\n \"../\", # directory\n \"--source=dir\",\n \"--layer\", predict_activation_layers[example],\n \"--batch_size=4\",\n \"--num_workers=2\",\n \"--dataloader_args=test.json\",\n \"--output\", tmpfile]\n if INSTALL_FLAG:\n args.append(INSTALL_FLAG)\n returncode = subprocess.call(args=args,\n cwd=os.path.realpath(example_dir + \"/example_files\"))\n assert returncode == 0\n\n assert os.path.exists(tmpfile)\n\n data = HDF5Reader.load(tmpfile)\n assert {'metadata', 'preds'} <= set(data.keys())\n if example == 'pyt':\n args[-1] = tmpfile + \"2.h5\"\n with kipoi_utils.utils.cd(os.path.join(example_dir, \"example_files\")):\n kipoi.cli.main.cli_predict(\"predict\", args[3:])\n\n\ndef test_kipoi_pull():\n \"\"\"Test that pull indeed pulls the right model\n \"\"\"\n args = [\"python\", os.path.abspath(\"./kipoi/__main__.py\"), \"pull\",\n \"rbp_eclip/AARS\"]\n returncode = subprocess.call(args=args)\n assert returncode == 0\n # obsolete - not using the git-lfs source anymore\n # assert (os.path.exists(os.path.expanduser('~/.kipoi/models/rbp_eclip/downloaded/AARS/model_files/model.h5')) or\n # os.path.exists(os.path.expanduser('~/.kipoi/models/rbp_eclip/AARS/model_files/model.h5')))\n\n kipoi.cli.main.cli_pull(\"pull\", [\"rbp_eclip/AARS\"])\n\n\ndef test_kipoi_info():\n \"\"\"Test that pull indeed pulls the right model\n \"\"\"\n if sys.version_info[0] == 2:\n pytest.skip(\"example not supported on python 2 \")\n args = [\"python\", os.path.abspath(\"./kipoi/__main__.py\"), \"info\",\n \"rbp_eclip/AARS\"]\n returncode = subprocess.call(args=args)\n assert returncode == 0\n\n\ndef assert_rec(a, b):\n if isinstance(a, dict):\n assert set(a.keys()) == set(b.keys())\n for k in a:\n assert_rec(a[k], b[k])\n elif isinstance(a, list):\n assert len(a) == len(b)\n for a_el, b_el in zip(a, b):\n assert_rec(a_el, b_el)\n else:\n assert a == b\n\n\ndef process_args(args):\n raw_args = args[4:]\n cmd = \" \".join(args)\n return cmd, raw_args\n\n\nclass PseudoConda:\n def __init__(self, tmpdir):\n self.existing_envs = {}\n self.tmpdir = tmpdir\n\n @staticmethod\n def strip_yaml_suffix(env):\n env = env.split(\"/\")[-1]\n if env.endswith(\".yaml\"):\n return env[:-len(\".yaml\")]\n else:\n return env\n\n def add_env(self, env, **kwargs):\n env = self.strip_yaml_suffix(env)\n if env in self.existing_envs:\n return 1\n\n kipoi_cli_path = os.path.join(str(self.tmpdir), \"kipoi_cli_\" + env)\n with open(kipoi_cli_path, \"w\") as ofh:\n ofh.write(\"kipoi\")\n self.existing_envs[env] = kipoi_cli_path\n return 0\n\n def get_cli(self, env):\n env = self.strip_yaml_suffix(env)\n if env not in self.existing_envs:\n return None\n return self.existing_envs[env]\n\n def delete_env(self, env):\n env = self.strip_yaml_suffix(env)\n if env in self.existing_envs:\n self.existing_envs.pop(env)\n return 0\n else:\n raise Exception(\"Failed\")\n\n\ndef test_kipoi_env_create_cleanup_remove(tmpdir, monkeypatch):\n from kipoi.cli.env import cli_create, cli_cleanup, cli_remove, cli_get, cli_get_cli, cli_list\n tempfile = os.path.join(str(tmpdir), \"envs.json\")\n\n # Define things necessary for monkeypatching\n\n def get_assert_env(equals):\n def assert_to(val):\n assert len(val) == len(equals)\n assert all([v.create_args.env == e for v, e in zip(val, equals)])\n\n return assert_to\n\n def get_assert_env_cli(equals):\n def assert_to(val):\n assert len(val) == len(equals)\n assert all([v.cli_path == e for v, e in zip(val, equals)])\n\n return assert_to\n\n # pseudo kipoi CLI executable\n conda = PseudoConda(tmpdir)\n\n if os.path.exists(tempfile):\n os.unlink(tempfile)\n\n test_model = \"example/models/pyt\"\n test_env_name = \"kipoi-testenv\"\n source_path = kipoi.get_source(\"dir\").local_path\n\n # monkeypatch:\n old_env_db_path = kipoi.config._env_db_path\n monkeypatch.setattr(kipoi.config, '_env_db_path', tempfile)\n monkeypatch.setattr(kipoi_conda, 'create_env_from_file', conda.add_env)\n monkeypatch.setattr(kipoi_conda, 'remove_env', conda.delete_env)\n monkeypatch.setattr(kipoi_conda, 'get_kipoi_bin', conda.get_cli)\n monkeypatch.setattr(kipoi.cli.env, 'print_env_names', get_assert_env([test_env_name]))\n # load the db from the new path\n kipoi.env_db.reload_model_env_db()\n\n args = [\"python\", os.path.abspath(\"./kipoi/__main__.py\"), \"env\", \"create\", \"--source\", \"dir\", \"--env\",\n test_env_name, test_model]\n\n # pretend to run the CLI\n cli_create(*process_args(args))\n\n # make sure the successful flag is set and the kipoi-cli exists\n kipoi.env_db.reload_model_env_db()\n db = kipoi.env_db.get_model_env_db()\n\n entry = db.get_entry_by_model(os.path.join(source_path, test_model))\n assert entry.successful\n assert os.path.exists(entry.cli_path)\n\n # add a new entry that does not exist:\n cfg = entry.get_config()\n cfg[\"create_args\"][\"env\"] += \"____AAAAAA_____\"\n cfg[\"cli_path\"] += \"____AAAAAA_____\"\n db.append(EnvDbEntry.from_config(cfg))\n\n # now test the get environment name and the get_kipoi_bin\n args = [\"python\", os.path.abspath(\"./kipoi/__main__.py\"), \"env\", \"get\", \"--source\", \"dir\", test_model]\n cli_get(*process_args(args))\n\n monkeypatch.setattr(kipoi.cli.env, 'print_env_cli_paths', get_assert_env_cli([conda.get_cli(test_env_name)]))\n args = [\"python\", os.path.abspath(\"./kipoi/__main__.py\"), \"env\", \"get_bin\", \"--source\", \"dir\", test_model]\n cli_get_cli(*process_args(args))\n\n # list environments:\n monkeypatch.setattr(kipoi.cli.env, 'print_valid_env_names', get_assert_env([test_env_name]))\n monkeypatch.setattr(kipoi.cli.env, 'print_invalid_env_names', get_assert_env([test_env_name + \"____AAAAAA_____\"]))\n monkeypatch.setattr(subprocess, 'call', lambda *args, **kwargs: None)\n args = [\"python\", os.path.abspath(\"./kipoi/__main__.py\"), \"env\", \"list\"]\n cli_list(*process_args(args))\n\n # pretend also the first installation didn't work\n entry.successful = False\n first_config = entry.get_config()\n db.save()\n\n args = [\"python\", os.path.abspath(\"./kipoi/__main__.py\"), \"env\", \"cleanup\", \"--all\", '--yes']\n print(conda.existing_envs)\n print(db.entries)\n # pretend to run the CLI\n cli_cleanup(*process_args(args))\n\n # now\n kipoi.env_db.reload_model_env_db()\n db = kipoi.env_db.get_model_env_db()\n assert len(db.entries) == 1\n assert_rec(db.entries[0].get_config(), cfg)\n\n args = [\"python\", os.path.abspath(\"./kipoi/__main__.py\"), \"env\", \"cleanup\", \"--all\", \"--db\", '--yes']\n # pretend to run the CLI\n cli_cleanup(*process_args(args))\n\n kipoi.env_db.reload_model_env_db()\n db = kipoi.env_db.get_model_env_db()\n assert len(db.entries) == 0\n assert len(conda.existing_envs) == 0\n\n # now final test of creating and removing an environment:\n\n args = [\"python\", os.path.abspath(\"./kipoi/__main__.py\"), \"env\", \"create\", \"--source\", \"dir\", \"--env\",\n test_env_name, test_model]\n # pretend to run the CLI\n cli_create(*process_args(args))\n args = [\"python\", os.path.abspath(\"./kipoi/__main__.py\"), \"env\", \"remove\", \"--source\", \"dir\", test_model, '--yes']\n cli_remove(*process_args(args))\n\n kipoi.env_db.reload_model_env_db()\n db = kipoi.env_db.get_model_env_db()\n assert len(db.entries) == 0\n assert len(conda.existing_envs) == 0\n\n # just make sure this resets after the test.\n kipoi.config._env_db_path = old_env_db_path\n kipoi.env_db.reload_model_env_db()\n\n\ndef test_kipoi_env_create_all(tmpdir, monkeypatch):\n from kipoi.cli.env import cli_create\n conda = PseudoConda(tmpdir)\n monkeypatch.setattr(kipoi_conda, 'create_env_from_file', conda.add_env)\n monkeypatch.setattr(kipoi_conda, 'remove_env', conda.delete_env)\n monkeypatch.setattr(kipoi_conda, 'get_kipoi_bin', conda.get_cli)\n\n args = [\"python\", os.path.abspath(\"./kipoi/__main__.py\"), \"env\", \"create\", \"all\"]\n # pretend to run the CLI\n cli_create(*process_args(args))\n\n\ndef test_kipoi_env_create_all_dry_run():\n from kipoi.cli.env import cli_create\n args = [\"python\", os.path.abspath(\"./kipoi/__main__.py\"), \"env\", \"create\", \"all\", \"--dry-run\"]\n # pretend to run the CLI\n cli_create(*process_args(args))\n"
] |
[
[
"pandas.read_csv"
]
] |
wahjtran/transform_encoders
|
[
"855f1879b174bf0b25443dd35ca850055af6d287"
] |
[
"intencoder.py"
] |
[
"\n\n# Author: Wah (Nick) Tran\n# Status: Production\n\n\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.preprocessing import LabelEncoder\n\n\n\nclass IntegerEncoder(object):\n '''\n Transformer to encode multiple categorical features as integers.\n\n Applies sklearn's LabelEncoder to dtype:object columns to produce sequential \n integers starting at 0 to represent unique classes. Encodes null values as -999.\n ''' \n\n def __init__(self, cat_cols=None):\n '''\n Parameters\n ----------\n\n cat_cols : list or None (default=None)\n Column names to be encoded.\n If None, will default to all columns identified as dtype:object.\n\n Attributes\n ----------\n\n cat_cols : list\n Column names to be encoded.\n\n n_cat_cols: int\n Number of categorical columns to encode.\n\n encoders: dict\n Container for LabelEncoders fit to each column in cat_cols.\n '''\n\n self.cat_cols = cat_cols\n \n \n \n def fit(self, data):\n '''\n Fit encoder to dataset.\n\n Parameters\n ----------\n\n data: DataFrame, shape (n_samples, n_features)\n Full dataset for which categorical features are to be identified.\n\n Returns\n -------\n\n self: returns an instance of self.\n\n '''\n\n # If no columns are specified, all dtype:object columns will be encoded\n if self.cat_cols==None:\n self.cat_cols = data.select_dtypes('object').columns.tolist()\n \n self.n_cat_cols = len(self.cat_cols)\n print('\\n{} categorial features detected\\n'.format(self.n_cat_cols))\n \n\n # Fit LabelEncoder to each column in cat_cols and store encoder\n self.encoders = {}\n for i, ii in enumerate(self.cat_cols):\n cat_data = data[ii].astype(str).str.lower().replace('nan', np.nan)\n non_na_ind = cat_data[~(cat_data.isna())].index\n \n le = LabelEncoder()\n le.fit(cat_data.loc[non_na_ind])\n \n self.encoders[ii] = le\n \n print('{:<8} scanned: \"{}\"'.format('({}/{})'.format(i+1, self.n_cat_cols), ii))\n \n return self\n \n \n \n def transform(self, data):\n '''\n Transform class labels to integer labels\n\n Parameters\n ----------\n\n data: DataFrame, shape (n_samples, n_features)\n Full dataset for which categorical features are to be encoded.\n\n Returns\n -------\n\n data_out: DataFrame, shape (n_samples, n_features)\n Full dataset with categorical features encoded as integers and\n null values encoded as -999.\n '''\n\n data_out = data.copy()\n \n for i, ii in enumerate(self.cat_cols):\n cat_data = data_out[ii].astype(str).str.lower().replace('nan', np.nan)\n \n non_na_ind = cat_data[~(cat_data.isna())].index\n cat_data.loc[non_na_ind] = self.encoders[ii].transform(cat_data.loc[non_na_ind])\n \n data_out[ii] = cat_data\n \n print('{:<8} transformed: \"{}\"'.format('({}/{})'.format(i+1, self.n_cat_cols), ii))\n \n\n print('\\nEncoding null values...')\n data_out[self.cat_cols] = data_out[self.cat_cols].fillna(-999).astype(int)\n \n return data_out\n\n \n \n def fit_transform(self, data):\n '''\n Fit encoder and return encoded data.\n\n Parameters\n ----------\n\n data: DataFrame, shape (n_samples, n_features)\n Full dataset for which categorical features are to be identified and encoded.\n\n Returns\n -------\n\n data_out: DataFrame, shape (n_samples, n_features)\n Full dataset with categorical features encoded as integers and\n null values encoded as -999.\n '''\n \n return self.fit(data).transform(data)\n \n \n \n def inverse_transform(self, data):\n '''\n Returns integer values to original classes and -999 values to nulls.\n\n Parameters\n ----------\n\n data: DataFrame, shape (n_samples, n_features)\n Full dataset for which categorical features are to be returned to \n their original calues.\n \n Returns\n -------\n\n data_out: DataFrame, shape (n_samples, n_features)\n Full dataset with categorical features returned to orginal values.\n '''\n \n data_out = data.copy()\n \n print('\\nRecoding null values...')\n data_out[self.cat_cols] = data_out[self.cat_cols].replace(-999, np.nan)\n\n \n for i, ii in enumerate(self.cat_cols):\n cat_data = data_out[ii]\n \n non_na_ind = cat_data[~(cat_data.isna())].index\n cat_data.loc[non_na_ind] = self.encoders[ii].inverse_transform(cat_data.loc[non_na_ind].astype(int))\n \n data_out[ii] = cat_data\n \n print('({}/{}) transformed: \"{}\"'.format(i+1, self.n_cat_cols, ii))\n \n return data_out\n \n \n "
] |
[
[
"sklearn.preprocessing.LabelEncoder"
]
] |
Dootmaan/Point-Unet
|
[
"f5f9732702f991d277c1b006ca8164f76d295b22"
] |
[
"train_saliencyAttentionNet.py"
] |
[
"from model.SaliencyAttentionNet import SaliencyAttentionNet\n# from model.SaliencyAttentionNet import UNet\nfrom dataset.BraTSDataset3D import BraTSDataset3D\nfrom config import config\nimport torch as pt\nimport numpy as np\n# from loss.FALoss3D import FALoss3D\nimport cv2\nfrom medpy.metric.binary import jc, dc, hd95\nfrom scipy import ndimage\nfrom loss.DiceLoss import BinaryDiceLoss\n\nlr = 0.0001\nepoch = 100\nbatch_size = 1\nmodel_path = '/newdata/why/Saved_models'\ncrop_size = config.crop_size\nsize = crop_size[2] * 2 #用于最后cv2显示\nimg_size = config.input_img_size\n\nprint(\n 'Please note that this experiment actually uses 2x larger patch than the displayed patch size(above).'\n)\n\ntrainset = BraTSDataset3D('/newdata/why/BraTS20', mode='train')\n# valset=BraTSDataset3D('/newdata/why/BraTS20',mode='val')\ntestset = BraTSDataset3D('/newdata/why/BraTS20', mode='test')\n\ntrain_dataset = pt.utils.data.DataLoader(trainset,\n batch_size=batch_size,\n shuffle=True,\n drop_last=True)\n# val_dataset=pt.utils.data.DataLoader(valset,batch_size=1,shuffle=True,drop_last=True)\ntest_dataset = pt.utils.data.DataLoader(testset,\n batch_size=1,\n shuffle=True,\n drop_last=True)\n\nmodel = SaliencyAttentionNet(in_ch=1).cuda()\n# model.load_state_dict(\n# pt.load(model_path +\n# '/PointUnet/SaliencyAttentionNet_3D_BraTS_patch-free_bs1_best.pt',\n# map_location='cpu'))\n\n# lossfunc_sr=pt.nn.MSELoss()\nlossfunc_seg = pt.nn.BCELoss()\n# lossfunc_fa=FALoss3D()\noptimizer = pt.optim.Adam(model.parameters(), lr=lr)\nscheduler = pt.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.99)\n\n\ndef ValModel():\n model.eval()\n dice_sum = 0\n hd_sum = 0\n jc_sum = 0\n weight_map = np.zeros(\n (1, 1, 2 * img_size[0], 2 * img_size[1], 2 * img_size[2]))\n for a in range(0, img_size[0] - crop_size[0] + 1,\n crop_size[0] // 2): # overlap0.5\n for b in range(0, img_size[1] - crop_size[1] + 1, crop_size[1] // 2):\n for c in range(0, img_size[2] - crop_size[2] + 1,\n crop_size[2] // 2):\n weight_map[:, :, (2 * a):(2 * (a + crop_size[0])),\n (2 * b):(2 * (b + crop_size[1])),\n (2 * c):(2 * (c + crop_size[2]))] += 1\n\n weight_map[weight_map == 0] = 1\n weight_map = 1. / weight_map\n for i, data in enumerate(val_dataset):\n output_list = np.zeros(\n (1, 1, 2 * img_size[0], 2 * img_size[1], 2 * img_size[2]))\n label_list = np.zeros(\n (1, 1, 2 * img_size[0], 2 * img_size[1], 2 * img_size[2]))\n\n (_, labels, inputs) = data # use label_sr as input\n labels3D = pt.autograd.Variable(labels).type(\n pt.FloatTensor).cuda().unsqueeze(1)\n\n for a in range(0, img_size[0] - crop_size[0] + 1,\n crop_size[0] // 2): # overlap0.5\n for b in range(0, img_size[1] - crop_size[1] + 1,\n crop_size[1] // 2):\n for c in range(0, img_size[2] - crop_size[2] + 1,\n crop_size[2] // 2):\n inputs3D = pt.autograd.Variable(\n inputs[:, (2 * a):(2 * (a + crop_size[0])),\n (2 * b):(2 * (b + crop_size[1])),\n (2 * c):(2 * (c + crop_size[2]))]).type(\n pt.FloatTensor).cuda().unsqueeze(1)\n with pt.no_grad():\n outputs3D = model(inputs3D)\n outputs3D = np.array(outputs3D.cpu().data.numpy())\n # outputs3D=ndimage.interpolation.zoom(outputs3D,[1,1,2,2,2],order=3)\n # outputs3D[outputs3D<0.5]=0\n # outputs3D[outputs3D>=0.5]=1\n output_list[:, :, (2 * a):(2 * (a + crop_size[0])),\n (2 * b):(2 * (b + crop_size[1])),\n (2 * c):(2 * (c + crop_size[2]))] += outputs3D\n\n label_list = np.array(labels3D.cpu().data.numpy())\n\n output_list = np.array(output_list) * weight_map\n\n # label_list=np.array(label_list)\n\n output_list[output_list < 0.5] = 0\n output_list[output_list >= 0.5] = 1\n\n final_img = np.zeros(shape=(2 * img_size[1], 2 * 2 * img_size[2]))\n final_img[:, :2 * img_size[2]] = output_list[0, 0, 64, :, :] * 255\n final_img[:, 2 * img_size[2]:] = label_list[0, 0, 64, :, :] * 255\n cv2.imwrite('TestPhase_Res_patchfree_BraTS.png', final_img)\n\n pr_sum = output_list.sum()\n gt_sum = label_list.sum()\n pr_gt_sum = np.sum(output_list[label_list == 1])\n dice = 2 * pr_gt_sum / (pr_sum + gt_sum)\n dice_sum += dice\n print(\"dice:\", dice)\n\n # hausdorff=hd95(output_list.squeeze(0).squeeze(0),label_list.squeeze(0).squeeze(0))\n # jaccard=jc(output_list.squeeze(0).squeeze(0),label_list.squeeze(0).squeeze(0))\n\n # hd_sum+=hausdorff\n # jc_sum+=jaccard\n\n print(\"Finished. Total dice: \", dice_sum / len(val_dataset), '\\n')\n print(\"Finished. Avg Jaccard: \", jc_sum / len(val_dataset))\n print(\"Finished. Avg hausdorff: \", hd_sum / len(val_dataset))\n return dice_sum / len(val_dataset)\n\n\ndef TestModel():\n model.eval()\n dice_sum = 0\n hd_sum = 0\n jc_sum = 0\n weight_map = np.zeros(\n (1, 1, 2 * img_size[0], 2 * img_size[1], 2 * img_size[2]))\n for a in range(0, img_size[0] - crop_size[0] + 1,\n crop_size[0] // 2): # overlap0.5\n for b in range(0, img_size[1] - crop_size[1] + 1, crop_size[1] // 2):\n for c in range(0, img_size[2] - crop_size[2] + 1,\n crop_size[2] // 2):\n weight_map[:, :, (2 * a):(2 * (a + crop_size[0])),\n (2 * b):(2 * (b + crop_size[1])),\n (2 * c):(2 * (c + crop_size[2]))] += 1\n\n weight_map[weight_map == 0] = 1\n weight_map = 1. / weight_map\n for i, data in enumerate(test_dataset):\n output_list = np.zeros(\n (1, 1, 2 * img_size[0], 2 * img_size[1], 2 * img_size[2]))\n label_list = np.zeros(\n (1, 1, 2 * img_size[0], 2 * img_size[1], 2 * img_size[2]))\n\n (_, labels, inputs) = data # use label_sr as input\n labels3D = pt.autograd.Variable(labels).type(\n pt.FloatTensor).cuda().unsqueeze(1)\n\n for a in range(0, img_size[0] - crop_size[0] + 1,\n crop_size[0] // 2): # overlap0.5\n for b in range(0, img_size[1] - crop_size[1] + 1,\n crop_size[1] // 2):\n for c in range(0, img_size[2] - crop_size[2] + 1,\n crop_size[2] // 2):\n inputs3D = pt.autograd.Variable(\n inputs[:, (2 * a):(2 * (a + crop_size[0])),\n (2 * b):(2 * (b + crop_size[1])),\n (2 * c):(2 * (c + crop_size[2]))]).type(\n pt.FloatTensor).cuda().unsqueeze(1)\n with pt.no_grad():\n outputs3D = model(inputs3D)\n outputs3D = np.array(outputs3D.cpu().data.numpy())\n # outputs3D=ndimage.interpolation.zoom(outputs3D,[1,1,2,2,2],order=3)\n # outputs3D[outputs3D<0.5]=0\n # outputs3D[outputs3D>=0.5]=1\n output_list[:, :, (2 * a):(2 * (a + crop_size[0])),\n (2 * b):(2 * (b + crop_size[1])),\n (2 * c):(2 * (c + crop_size[2]))] += outputs3D\n\n label_list = np.array(labels3D.cpu().data.numpy())\n\n output_list = np.array(output_list) * weight_map\n\n # label_list=np.array(label_list)\n\n output_list[output_list < 0.5] = 0\n output_list[output_list >= 0.5] = 1\n\n final_img = np.zeros(shape=(2 * img_size[1], 2 * 2 * img_size[2]))\n final_img[:, :2 * img_size[2]] = output_list[0, 0, 64, :, :] * 255\n final_img[:, 2 * img_size[2]:] = label_list[0, 0, 64, :, :] * 255\n cv2.imwrite('TestPhase_Res_patchfree_BraTS.png', final_img)\n\n pr_sum = output_list.sum()\n gt_sum = label_list.sum()\n pr_gt_sum = np.sum(output_list[label_list == 1])\n dice = 2 * pr_gt_sum / (pr_sum + gt_sum)\n dice_sum += dice\n # print(\"dice:\",dice)\n\n try:\n hausdorff = hd95(\n output_list.squeeze(0).squeeze(0),\n label_list.squeeze(0).squeeze(0))\n except:\n hausdorff = 0\n jaccard = jc(\n output_list.squeeze(0).squeeze(0),\n label_list.squeeze(0).squeeze(0))\n\n print(\"dice:\", dice, \";hd95:\", hausdorff, \";jaccard:\", jaccard)\n\n hd_sum += hausdorff\n jc_sum += jaccard\n\n print(\"Finished. Total dice: \", dice_sum / len(test_dataset), '\\n')\n print(\"Finished. Avg Jaccard: \", jc_sum / len(test_dataset))\n print(\"Finished. Avg hausdorff: \", hd_sum / len(test_dataset))\n return dice_sum / len(test_dataset)\n\n\n# TestModel()\n# raise Exception(\"end of test\")\n\nbest_dice = 0\nfor x in range(epoch):\n model.train()\n print('==>Epoch', x, ': lr=', optimizer.param_groups[0]['lr'], '==>\\n')\n\n for i, data in enumerate(train_dataset):\n (_, labels_seg, inputs) = data\n optimizer.zero_grad()\n inputs = pt.autograd.Variable(inputs).type(\n pt.FloatTensor).cuda().unsqueeze(1)\n labels_seg = pt.autograd.Variable(labels_seg).type(\n pt.FloatTensor).cuda().unsqueeze(1)\n outputs_seg = model(inputs)\n loss_seg = lossfunc_seg(\n outputs_seg, labels_seg) #+lossfunc_dice(outputs_seg,labels_seg)\n\n loss_seg.backward()\n optimizer.step()\n\n if i % 10 == 0:\n final_img = np.zeros(shape=(size, size * 3))\n print('[epoch {:3d},iter {:5d}]'.format(x, i), 'loss:',\n loss_seg.item())\n final_img[:, 0:size] = outputs_seg.cpu().data.numpy()[\n 0, 0, crop_size[0], :, :] * 255\n # final_img[:,128:256]=outputs_sr.cpu().data.numpy()[0,0,31,:,:]*255\n final_img[:, size:(\n 2 * size\n )] = labels_seg.cpu().data.numpy()[0, 0, crop_size[0], :, :] * 255\n # final_img[:,384:512]=labels_sr.cpu().data.numpy()[0,0,31,:,:]*255\n final_img[:, (\n 2 * size\n ):] = inputs.cpu().data.numpy()[0, 0, crop_size[0], :, :] * 255\n cv2.imwrite('resunet_3d_patchfree_combine.png', final_img)\n\n scheduler.step()\n\n print('==>End of epoch', x, '==>\\n')\n\n print('===VAL===>')\n dice = TestModel()\n if dice > best_dice:\n best_dice = dice\n print(\n 'New best dice! Model saved to', model_path +\n '/PointUnet/SaliencyAttentionNet_3D_BraTS_patch-free_bs' +\n str(batch_size) + '_best.pt')\n pt.save(\n model.state_dict(), model_path +\n '/PointUnet/SaliencyAttentionNet_3D_BraTS_patch-free_bs' +\n str(batch_size) + '_best.pt')\n # print('===TEST===>')\n # TestModel()\n\nprint('\\nBest Dice:', best_dice)\n"
] |
[
[
"torch.utils.data.DataLoader",
"torch.optim.lr_scheduler.ExponentialLR",
"torch.nn.BCELoss",
"torch.no_grad",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"torch.autograd.Variable"
]
] |
uvuvwewe/celp
|
[
"1da84d005fd0f0705704c6e555b821ce0cfa8ae1"
] |
[
".~c9_invoke_whzHnd.py"
] |
[
"from data import CITIES, BUSINESSES, USERS, REVIEWS, TIPS, CHECKINS\n\nimport random\nimport json\nimport pandas as pd\nimport numpy as np\nfrom pathlib import Path\n\n\n# HELPERS\ndef create_similarity_matrix_categories(matrix):\n \"\"\"Create a \"\"\"\n npu = matrix.values\n m1 = npu @ npu.T\n diag = np.diag(m1)\n m2 = m1 / diag\n m3 = np.minimum(m2, m2.T)\n return pd.DataFrame(m3, index = matrix.index, columns = matrix.index)\n\n\ndef get_data(bid):\n for i in data:\n if i['business_id'] == bid:\n return {'business_id' : bid, 'stars' : i['stars'], 'name' : i['name'], 'city' : i['city'], 'adress' : i['address']}\n\n\n# HELPERS\n\n\ndef recommend(user_id=None, business_id=None, city=None, n=10):\n\n business_id = '-MsRvdPnuw6QuLn5Vxjruw'\n city = 'Westlake'\n\n \"\"\"\n Returns n recommendations as a list of dicts.\n Optionally takes in a user_id, business_id and/or city.\n A recommendation is a dictionary in the form of:\n {\n business_id:str\n stars:str\n name:str\n city:str\n adress:str\n }\n \"\"\"\n # Genereer random stad indien geen city gegeven\n if not city:\n city = random.choice(CITIES)\n #return random.sample(BUSINESSES[city], n)\n\n # Genereer random business indien geen business gegeven\n if not business_id:\n return random.sample(BUSINESSES[city], n)\n\n # Data inladen\n path = \"data/\"\n path += city.lower() + \"/\"\n data_folder = Path(path)\n file_to_open = data_folder / \"business.json\"\n data = []\n for line in open(file_to_open, 'r'):\n data.append(json.loads(line))\n\n\n # Zelfde categorie + lijst maken WERKT!\n dict_categories = {}\n for i in data:\n dict_categories[i['business_id']] = i['categories'].split(', ')\n\n\n # Maak waarden in lijst per twee WERKT!\n listos = []\n for i in dict_categories:\n for j in dict_categories[i]:\n listos.append([i, j])\n\n\n # Maak er een pandas-dataframe van WERKT!\n df_categories = pd.DataFrame(columns=['bedrijf', 'soort'])\n for i in range(len(listos)):\n df_categories.loc[i] = listos[i]\n\n\n # Maak similarity matrix WERKT!\n utility_matrix = df_categories.pivot_table(index = 'bedrijf', columns = 'soort', aggfunc = 'size', fill_value=0)\n similarity_matrix = create_similarity_matrix_categories(utility_matrix)\n\n\n # Kijk of bedrijf similar is KIJK EVEN!\n similarities = {}\n for column in similarity_matrix:\n for row in similarity_matrix:\n if 0 < similarity_matrix.loc[column][row] < 1:\n similarities[(column, row)] = {'value' : similarity_matrix.loc[column][row]}\n\n\n\n print(similarities)\n print('dus t werkt (nu nog niet dus)')\n\n # Maak paren met gelijkenis\n possibilities = {}\n for Tuple in similarities:\n if business_id in Tuple[0]:\n possibilities[Tuple[1]] = similarities[Tuple]['value']\n if business_id in Tuple[1]:\n possibilities[Tuple[0]] = similarities[Tuple]['value']\n\n\n # Kijk welke meest gelijke er zijn, en hier de top n van nemen\n possible = pd.Series(possibilities)\n possible = possible.sort_values(ascending=False)\n possible_id = possible.index.tolist()[:n]\n\n\n # Maak laatste lijstje om te returnen\n finalreturn = []\n for i in possible_id:\n finalreturn.append(get_data(i))\n\n return finalreturn\n\n\n\n"
] |
[
[
"numpy.diag",
"numpy.minimum",
"pandas.Series",
"pandas.DataFrame"
]
] |
pioy/incubator-mxnet
|
[
"9d432079f489327ec6daab61326a3e4f3c7cb8b3"
] |
[
"benchmark/opperf/utils/profiler_utils.py"
] |
[
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport time\nimport functools\nimport numpy as np\n\nfrom .common_utils import merge_map_list\nfrom mxnet import profiler\n\n\"\"\"\nTODO: Below we are using logic of parsing the MXNet profiler output string to\nfetch the benchmark results. Note that this is a temporary solution till we add\na new utility API into MXNet profiler to get_summary(), reset(). All the below\nparsing logic should be removed once these read APIs are available in Profiler.\n\n\"\"\"\n\n\ndef _get_memory_profile(memory_profile_results):\n memory_profile = {}\n for line in memory_profile_results:\n if line.startswith(\"Memory:\"):\n device_id = line.split()[1]\n avg_time_memory_alloc = float(line.split()[-1])\n memory_profile[\"max_storage_mem_alloc_\" + device_id] = avg_time_memory_alloc\n\n return memory_profile\n\n\ndef _get_operator_profile(operator_name, operator_profile_results):\n operator_profile = {}\n\n # alias map : dictionary of the form {\"alias\" : \"registered_name\"}\n # allows to retrieve alias operator profile from the profiler results\n alias_map = {\"broadcast_plus\": \"broadcast_add\", \"broadcast_minus\": \"broadcast_sub\", \"flatten\": \"Flatten\", \"max_axis\": \"max\", \"Custom\": \"CustomAddOne\",\n \"swapaxes\": \"SwapAxis\", \"flip\": \"reverse\", \"reshape\": \"Reshape\", \"crop\": \"slice\", \"sum_axis\": \"sum\", \"min_axis\": \"min\", \"ctc_loss\": \"CTCLoss\",\n \"fill_element_0index\": \"TernaryOp\", \"identity\": \"_copy\", \"ElementWiseSum\": \"add_n\", \"choose_element_0index\": \"pick\", \"stop_gradient\": \"BlockGrad\",\n \"broadcast_axes\": \"broadcast_axis\"}\n\n op_name = None\n\n if operator_name in alias_map:\n op_name = alias_map[operator_name]\n else:\n op_name = operator_name\n\n # Variables to store forward/backward performance results\n forward_res, backward_res = None, None\n\n for line in operator_profile_results:\n if op_name in line or op_name[:3] + \" \" in line:\n operation = line.split()[0]\n operation_avg_time = float(line.split()[-1])\n if \"_backward\" in operation:\n backward_res = operation_avg_time\n else:\n forward_res = operation_avg_time\n\n # Add forward and backward performance results to the dict in the correct order\n if forward_res:\n operator_profile[\"avg_time_forward_\" + operator_name] = forward_res\n\n if backward_res:\n operator_profile[\"avg_time_backward_\" + operator_name] = backward_res\n\n return operator_profile\n\n\ndef parse_profiler_dump(operator_name, profiler_dump):\n \"\"\"Parse the MXNet profiler dump output, fetch Memory profile results and\n Operator compute profiler results.\n\n Parameters\n ----------\n profiler_dump: string\n MXNet profiler output from mx.profiler.dumps() API.\n\n Returns\n -------\n map, Memory and Compute profiler results.\n\n \"\"\"\n if not profiler_dump:\n raise AssertionError(\"Invalid MXNet profiler output provided to parse!\")\n\n \"\"\"\n MXNet profiler output from mx.profiler.dumps() API looks like below. This function parses\n this string profiler output to fetch Memory and Compute metrics.\n\n Profile Statistics.\n Note that counter items are counter values and not time units.\n Device Storage\n =================\n Name Total Count Time (ms) Min Time (ms) Max Time (ms) Avg Time (ms)\n ---- ----------- --------- ------------- ------------- -------------\n Memory: cpu/0 100 2097152.0000 1681915.8750 2097152.0000 207618.0469\n\n MXNET_C_API\n =================\n Name Total Count Time (ms) Min Time (ms) Max Time (ms) Avg Time (ms)\n ---- ----------- --------- ------------- ------------- -------------\n MXNDArrayFree 49 1.1220 0.0170 0.0360 0.0229\n MXAutogradBackwardEx 50 11.5460 0.1980 0.3360 0.2309\n MXNet C API Calls 399 1.9990 1.6010 1.9990 0.1990\n MXImperativeInvoke 50 4.4810 0.0700 0.1330 0.0896\n MXNDArrayWaitAll 50 769.0570 14.0200 24.5030 15.3811\n MXAutogradSetIsTraining 100 0.0190 0.0000 0.0010 0.0002\n MXAutogradSetIsRecording 100 0.0400 0.0000 0.0010 0.0004\n MXNet C API Concurrency 798 0.0000 0.0000 0.0010 0.0005\n\n operator\n =================\n Name Total Count Time (ms) Min Time (ms) Max Time (ms) Avg Time (ms)\n ---- ----------- --------- ------------- ------------- -------------\n DeleteVariable 196 1.4490 0.0040 0.0250 0.0074\n _backward_broadcast_add 100 521.2320 4.8070 8.5970 5.2123\n SetValueOp 100 645.8060 5.8820 10.0380 6.4581\n broadcast_add 100 394.8910 3.5230 5.8790 3.9489\n \"\"\"\n\n # String Patterns to look out for when parsing\n memory_profile_result_start = \"Device Storage\" # Helps identify start of Memory profile\n c_api_profile_result_start = \"MXNET_C_API\" # Helps identify end of Memory profile\n\n if operator_name == \"Custom\":\n operator_profile_result_start = \"Custom Operator\" # Helps identify start of Custom Operator profile\n else:\n operator_profile_result_start = \"operator\" # Helps identify start of Operator profile\n\n memory_profile_results = []\n operator_profile_results = []\n\n # Parse lines corresponding to Memory and Computation profiling\n read_memory_profile = False\n read_operator_profile = False\n for line in profiler_dump.splitlines():\n if line.startswith(memory_profile_result_start):\n read_memory_profile = True\n elif line.startswith(operator_profile_result_start):\n read_operator_profile = True\n elif line.startswith(c_api_profile_result_start):\n read_memory_profile = False\n\n if read_memory_profile:\n memory_profile_results.append(line)\n elif read_operator_profile:\n operator_profile_results.append(line)\n\n # Prepare results\n memory_profile = _get_memory_profile(memory_profile_results)\n operator_profile = _get_operator_profile(operator_name, operator_profile_results)\n\n return merge_map_list([memory_profile, operator_profile])\n\n\ndef cpp_profile(func):\n \"\"\"Decorator for profiling MXNet operation.\n Uses MXNet profiler to collect metrics on memory usage and execution time\n of the operation.\n\n Parameters\n ----------\n func:\n Operation to be executed and timed.\n\n Returns\n -------\n res, profiler output. res being result returned after operator execution.\n profiler output is a dictionary with summary of operation execution.\n Example output : { \"add\": [{\"avg_time_mem_alloc_cpu/0\": 207618.0469,\n \"avg_time_forward_broadcast_add\": 4.204,\n \"avg_time_backward_broadcast_add\": 5.6288,\n \"inputs\": {\n \"lhs\": [1024, 1024],\n \"rhs\": [1024,1024]\n }]\n }\n \"\"\"\n\n @functools.wraps(func)\n def cpp_profile_it(*args, **kwargs):\n # Profile the operation\n profiler.set_config(profile_all=True, aggregate_stats=True)\n profiler.set_state('run')\n res = func(*args, **kwargs)\n profiler.set_state('stop')\n\n # Prepare the results\n profiler_dump = profiler.dumps(reset=True)\n\n # args[0] is assumed to be operator name, if not found check for block name.\n # NOTE: This parameter should be removed when we get away from parsing\n # profiler output and start using new profiler APIs - get_summary(), reset()\n if len(args) > 0:\n operator_name = args[0].__name__\n elif 'block' in kwargs:\n operator_name = kwargs['block']._op_name\n else:\n raise ValueError(\"Unable to identify operator name to extract profiler output!\")\n\n # Get the MXNet profile output\n profiler_output = parse_profiler_dump(operator_name, profiler_dump)\n return res, profiler_output\n\n return cpp_profile_it\n\n\ndef python_profile(func):\n \"\"\"Decorator for profiling MXNet operation.\n Uses Python's time module to collect execution time information\n of the operation.\n\n Parameters\n ----------\n func:\n Operation to be executed and timed.\n\n Returns\n -------\n res, timing output. res being result returned after operator execution.\n profiler output is a dictionary with summary of operation execution.\n Example output : { \"add\": [{\"avg_time_add\": 0.4053089120425284,\n 'p50_time_add': 16.761042876169086,\n 'p90_time_add': 18.081666342914108,\n 'p99_time_add': 19.060144051909447,\n \"inputs\": {\n \"lhs\": [1024, 1024],\n \"rhs\": [1024,1024]\n }]\n }\n \"\"\"\n\n @functools.wraps(func)\n def python_profile_it(*args, **kwargs):\n runs = args[1]\n modified_args = (args[0], 1)\n times = []\n\n for _ in range(runs):\n start_time = time.perf_counter() # 1\n res = func(*modified_args, **kwargs)\n end_time = time.perf_counter() # 2\n run_time = (end_time - start_time)*1000 # 3\n times.append(run_time)\n\n # NOTE : same as cpp_profile_it\n if len(args) > 0:\n operator_name = args[0].__name__\n elif 'block' in kwargs:\n operator_name = kwargs['block']._op_name\n else:\n raise ValueError(\"Unable to identify operator name to extract profiler output!\")\n\n avg_run_time = np.mean(times)\n p50_run_time = np.percentile(times, 50)\n p90_run_time = np.percentile(times, 90)\n p99_run_time = np.percentile(times, 99)\n\n profiler_output = {'avg_time_'+str(operator_name): avg_run_time,\n 'p50_time_'+str(operator_name): p50_run_time,\n 'p90_time_'+str(operator_name): p90_run_time,\n 'p99_time_'+str(operator_name): p99_run_time,\n }\n return res, profiler_output\n return python_profile_it\n"
] |
[
[
"numpy.mean",
"numpy.percentile"
]
] |
uberman4740/AlgoRepo
|
[
"a0ef89f9ec2bb7de7d9377147f06013a682470e6"
] |
[
"pykalman/plot_online.py"
] |
[
"'''\r\n==============================================\r\nOnline State Estimation with the Kalman Filter\r\n==============================================\r\n\r\nThis example shows how :class:`KalmanFilter` can be used to estimate hidden\r\nstates in an online setting.\r\n\r\nWhile the Kalman Smoother is able to estimate the hidden state of a target at\r\nany time step using *all* measurements, the Kalman Filter only uses\r\nmeasurements up to and including the current time step. This is done using a\r\nset of recursive formulae that only require the mean and covariance matrix\r\noutput by the Kalman Filter at the previous time step, meaning that we may\r\napply the Kalman Filter in an online manner.\r\n\r\nThe drawn figure shows two sets of lines; the first represents the true, hidden\r\nstate of the target, while the second represents the estimates output by the\r\nKalman Filter.\r\n'''\r\nimport numpy as np\r\nimport pylab as pl\r\n\r\nfrom pykalman.datasets import load_robot\r\nfrom pykalman import KalmanFilter\r\n\r\n# Initialize the Kalman Filter\r\ndata = load_robot()\r\nkf = KalmanFilter(\r\n data.transition_matrix,\r\n data.observation_matrix,\r\n data.initial_transition_covariance,\r\n data.initial_observation_covariance,\r\n data.transition_offsets,\r\n data.observation_offset,\r\n data.initial_state_mean,\r\n data.initial_state_covariance,\r\n random_state=0\r\n)\r\n\r\n# Estimate mean and covariance of hidden state distribution iteratively. This\r\n# is equivalent to\r\n#\r\n# >>> (filter_state_means, filtered_state_covariance) = kf.filter(data)\r\nn_timesteps = data.observations.shape[0]\r\nn_dim_state = data.transition_matrix.shape[0]\r\nfiltered_state_means = np.zeros((n_timesteps, n_dim_state))\r\nfiltered_state_covariances = np.zeros((n_timesteps, n_dim_state, n_dim_state))\r\nfor t in range(n_timesteps - 1):\r\n if t == 0:\r\n filtered_state_means[t] = data.initial_state_mean\r\n filtered_state_covariances[t] = data.initial_state_covariance\r\n \r\n filtered_state_means[t + 1], filtered_state_covariances[t + 1] = (\r\n kf.filter_update(\r\n filtered_state_means[t],\r\n filtered_state_covariances[t],\r\n data.observations[t + 1],\r\n transition_offset=data.transition_offsets[t],\r\n )\r\n )\r\n\r\n# draw estimates\r\npl.figure()\r\nlines_true = pl.plot(data.states, color='b')\r\nlines_filt = pl.plot(filtered_state_means, color='r')\r\npl.legend((lines_true[0], lines_filt[0]), ('true', 'filtered'))\r\npl.show()"
] |
[
[
"numpy.zeros"
]
] |
peteykun/R-Net
|
[
"b5525b98e0f8a3f41be2b7bb102ca24bff1ef3b8"
] |
[
"util.py"
] |
[
"import tensorflow as tf\nimport numpy as np\nimport re\nfrom collections import Counter\nimport string\n\n\ndef get_record_parser(config, is_test=False):\n def parse(example):\n para_limit = config.test_para_limit if is_test else config.para_limit\n ques_limit = config.test_ques_limit if is_test else config.ques_limit\n char_limit = config.char_limit\n features = tf.parse_single_example(example,\n features={\n \"context_idxs\": tf.FixedLenFeature([], tf.string),\n \"ques_idxs\": tf.FixedLenFeature([], tf.string),\n \"context_char_idxs\": tf.FixedLenFeature([], tf.string),\n \"ques_char_idxs\": tf.FixedLenFeature([], tf.string),\n \"y1\": tf.FixedLenFeature([], tf.string),\n \"y2\": tf.FixedLenFeature([], tf.string),\n \"id\": tf.FixedLenFeature([], tf.int64)\n })\n context_idxs = tf.reshape(tf.decode_raw(\n features[\"context_idxs\"], tf.int32), [para_limit])\n ques_idxs = tf.reshape(tf.decode_raw(\n features[\"ques_idxs\"], tf.int32), [ques_limit])\n context_char_idxs = tf.reshape(tf.decode_raw(\n features[\"context_char_idxs\"], tf.int32), [para_limit, char_limit])\n ques_char_idxs = tf.reshape(tf.decode_raw(\n features[\"ques_char_idxs\"], tf.int32), [ques_limit, char_limit])\n y1 = tf.reshape(tf.decode_raw(\n features[\"y1\"], tf.float32), [para_limit])\n y2 = tf.reshape(tf.decode_raw(\n features[\"y2\"], tf.float32), [para_limit])\n qa_id = features[\"id\"]\n return context_idxs, ques_idxs, context_char_idxs, ques_char_idxs, y1, y2, qa_id\n return parse\n\n\ndef get_batch_dataset(record_file, parser, config):\n num_threads = tf.constant(config.num_threads, dtype=tf.int32)\n dataset = tf.data.TFRecordDataset(record_file).map(\n parser, num_parallel_calls=num_threads).shuffle(config.capacity).repeat()\n if config.is_bucket:\n buckets = [tf.constant(num) for num in range(*config.bucket_range)]\n\n def key_func(context_idxs, ques_idxs, context_char_idxs, ques_char_idxs, y1, y2, qa_id):\n c_len = tf.reduce_sum(\n tf.cast(tf.cast(context_idxs, tf.bool), tf.int32))\n buckets_min = [np.iinfo(np.int32).min] + buckets\n buckets_max = buckets + [np.iinfo(np.int32).max]\n conditions_c = tf.logical_and(\n tf.less(buckets_min, c_len), tf.less_equal(c_len, buckets_max))\n bucket_id = tf.reduce_min(tf.where(conditions_c))\n return bucket_id\n\n def reduce_func(key, elements):\n return elements.batch(config.batch_size)\n\n dataset = dataset.apply(tf.contrib.data.group_by_window(\n key_func, reduce_func, window_size=5 * config.batch_size)).shuffle(len(buckets) * 25)\n else:\n dataset = dataset.batch(config.batch_size)\n return dataset\n\n\ndef get_dataset(record_file, parser, config):\n num_threads = tf.constant(config.num_threads, dtype=tf.int32)\n dataset = tf.data.TFRecordDataset(record_file).map(\n parser, num_parallel_calls=num_threads).repeat().batch(config.batch_size)\n return dataset\n\n\ndef convert_tokens(eval_file, qa_id, pp1, pp2):\n answer_dict = {}\n remapped_dict = {}\n for qid, p1, p2 in zip(qa_id, pp1, pp2):\n context = eval_file[str(qid)][\"context\"]\n spans = eval_file[str(qid)][\"spans\"]\n uuid = eval_file[str(qid)][\"uuid\"]\n #print(uuid)\n start_idx = spans[p1][0]\n end_idx = spans[p2][1]\n answer_dict[str(qid)] = context[start_idx: end_idx]\n remapped_dict[uuid] = context[start_idx: end_idx]\n return answer_dict, remapped_dict\n\n\ndef evaluate(eval_file, answer_dict):\n f1 = exact_match = total = 0\n for key, value in answer_dict.items():\n total += 1\n ground_truths = eval_file[key][\"answers\"]\n prediction = value\n exact_match += metric_max_over_ground_truths(\n exact_match_score, prediction, ground_truths)\n f1 += metric_max_over_ground_truths(f1_score,\n prediction, ground_truths)\n exact_match = 100.0 * exact_match / total\n f1 = 100.0 * f1 / total\n return {'exact_match': exact_match, 'f1': f1}\n\n\ndef normalize_answer(s):\n\n def remove_articles(text):\n return re.sub(r'\\b(a|an|the)\\b', ' ', text)\n\n def white_space_fix(text):\n return ' '.join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))\n\n\ndef f1_score(prediction, ground_truth):\n prediction_tokens = normalize_answer(prediction).split()\n ground_truth_tokens = normalize_answer(ground_truth).split()\n common = Counter(prediction_tokens) & Counter(ground_truth_tokens)\n num_same = sum(common.values())\n if num_same == 0:\n return 0\n precision = 1.0 * num_same / len(prediction_tokens)\n recall = 1.0 * num_same / len(ground_truth_tokens)\n f1 = (2 * precision * recall) / (precision + recall)\n return f1\n\n\ndef exact_match_score(prediction, ground_truth):\n return (normalize_answer(prediction) == normalize_answer(ground_truth))\n\n\ndef metric_max_over_ground_truths(metric_fn, prediction, ground_truths):\n scores_for_ground_truths = []\n for ground_truth in ground_truths:\n score = metric_fn(prediction, ground_truth)\n scores_for_ground_truths.append(score)\n return max(scores_for_ground_truths)\n"
] |
[
[
"tensorflow.constant",
"tensorflow.FixedLenFeature",
"tensorflow.less",
"tensorflow.data.TFRecordDataset",
"tensorflow.decode_raw",
"tensorflow.less_equal",
"tensorflow.cast",
"numpy.iinfo",
"tensorflow.where",
"tensorflow.contrib.data.group_by_window"
]
] |
Delebrith/leaf-recognition
|
[
"c58ccd84dfcbff8be98aa1ea5797fa489ae8f73b"
] |
[
"src/visualize_svm.py"
] |
[
"from src.DfPerceptron import DfPerceptron\nfrom src.LeNet5 import LeNet5\nfrom matplotlib import pyplot as plt\nfrom sklearn.metrics import auc\n\nfrom src.file_utils import get_images_in_classes\nfrom src.svm import get_train_data, SVC\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import roc_curve\nimport numpy as np\nimport pandas as pd\n\n\ndef plot_cnn(width, height, filter_size, filters, batch_size, regularization, color):\n lenet = LeNet5(width, height, 32, filter_size, filters, None if regularization == 'None' else regularization, 0.001)\n lenet.load(\"../../data/sets/lenet-model-{}-{}-{}-{}-{}-{}-0.001-adam.hdf5\"\n .format(width, height, filter_size, filters, batch_size, regularization))\n fpr, tpr = lenet.draw_roc(\"../../data/sets\")\n roc_auc = auc(fpr, tpr)\n\n plt.plot(fpr, tpr,\n label='LeNet5 model ({}x{}) filter size: {} filters: {} batch size: {} regularization: {} '\n 'ROC curve (area = {})'\n .format(width, height, filter_size, filters, batch_size, regularization,\n roc_auc),\n color=color, linewidth=1)\n\n\ndef plot_svm(kernel, C, color):\n files_dict = get_images_in_classes(\"../../data/svm/\")\n data = get_train_data(files_in_classes=files_dict, data_dir=\"../../data/svm/\")\n X, Y = zip(*data)\n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2,\n shuffle=True, random_state=1)\n\n svm = SVC(kernel=kernel, degree=2, C=C, class_weight='balanced', random_state=1,\n verbose=2)\n y_score = svm.fit(X_train, y_train).decision_function(X_test).ravel()\n\n def to_catogrical(vector):\n result = []\n for _ in range(len(vector)):\n result.append([0] * 32)\n\n for elem in range(len(result)):\n result[elem][vector[elem]] = 1\n\n return result\n y_test = np.asarray(to_catogrical(y_test)).ravel()\n\n fpr, tpr, _ = roc_curve(y_test, y_score)\n roc_auc = auc(fpr, tpr)\n\n # Compute micro-average ROC curve and ROC area\n lw = 2\n plt.plot(fpr, tpr, color=color,\n lw=lw, label='ROC curve SVM kernel: {} C: {} (area = {})'.format(kernel, C, roc_auc))\n\n\ndef main():\n plt.figure(1)\n\n plot_cnn(32, 64, 3, 20, 32, 'None', '#00FF00')\n plot_svm('poly', 0.1, '#FF0000')\n plot_svm('poly', 1, '#FF9999')\n plot_svm('poly', 10, '#FF00FF')\n plot_svm('rbf', 0.1, '#0000FF')\n plot_svm('rbf', 1, '#9999FF')\n plot_svm('rbf', 10, '#00FFFF')\n\n perceptron = DfPerceptron(512, 256, 128, 32)\n perceptron.load(\"../../rgb-to-csv2/perceptron-model-512-256-128-input-6-1000.hdf5\")\n test_df = pd.read_csv(\"../../rgb-to-csv2/test-leafs.csv\")\n fpr, tpr = perceptron.draw_roc(test_df, \"../../rgb-to-csv2\")\n roc_auc = auc(fpr, tpr)\n\n plt.plot(fpr, tpr,\n label='perceptron (512 x 256 x 128) (6 x 1000) ROC curve (area = {0:0.2f})'\n ''.format(roc_auc),\n color='#00FF99', linewidth=1)\n\n plt.plot([0, 1], [0, 1], 'k--', lw=1)\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.legend(loc=\"lower right\", prop={'size': 8})\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()"
] |
[
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.roc_curve",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"sklearn.metrics.auc",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
sandroci/dedupe
|
[
"20736bb693aeffa0ac6fb1990485fb39f60c0d7b"
] |
[
"dedupe/clustering.py"
] |
[
"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport itertools\nfrom collections import defaultdict\nimport array\nimport logging\n\nimport numpy\nimport fastcluster\nimport hcluster\n\nfrom typing import (Iterable,\n Dict,\n ValuesView,\n cast,\n List,\n Set,\n Generator,\n Sequence,\n Tuple)\nfrom dedupe._typing import Clusters, RecordID, Links\n\nlogger = logging.getLogger(__name__)\n\n\ndef connected_components(edgelist: numpy.ndarray,\n max_components: int) -> Generator[numpy.ndarray, None, None]:\n\n if len(edgelist) == 0:\n raise StopIteration()\n\n components = union_find(edgelist['pairs'])\n\n for component in components:\n sub_graph = edgelist[component]\n n_components = len(numpy.unique(sub_graph['pairs']))\n\n if n_components > max_components:\n min_score = numpy.min(sub_graph['score'])\n min_score_logit = numpy.log(min_score) - numpy.log(1 - min_score)\n threshold = 1 / (1 + numpy.exp(-min_score_logit - 1))\n logger.warning('A component contained %s elements. '\n 'Components larger than %s are '\n 're-filtered. The threshold for this '\n 'filtering is %s' % (n_components,\n max_components,\n threshold))\n filtered_sub_graph = sub_graph[sub_graph['score'] > threshold]\n for sub_graph in connected_components(filtered_sub_graph,\n max_components):\n yield sub_graph\n else:\n yield sub_graph\n\n\ndef union_find(edgelist: numpy.ndarray) -> ValuesView[Sequence[int]]:\n\n root: Dict[RecordID, RecordID] = {}\n components = {}\n component_size = {}\n\n it = numpy.nditer(edgelist, ['external_loop'])\n\n for i, (a, b) in enumerate(it):\n root_a = root.get(a)\n root_b = root.get(b)\n\n if root_a is None and root_b is None:\n # assuming that it will be a while before we are handling\n # edgelists of much more than 4 billion elements we will\n # use an the 'I' type\n components[a] = array.array('I', [i])\n component_size[a] = 2\n root[a] = root[b] = a\n elif root_a is None or root_b is None:\n if root_a is None:\n b = a\n root_a = root_b\n components[root_a].append(i)\n component_size[root_a] += 1\n root_a = cast(RecordID, root_a)\n root[b] = root_a\n elif root_a != root_b:\n if component_size[root_a] < component_size[root_b]:\n root_a, root_b = root_b, root_a\n\n components[root_a].extend(components[root_b])\n components[root_a].append(i)\n\n component_b = numpy.unique(edgelist[components[root_b]])\n\n for node in component_b:\n root[node] = root_a\n\n component_size[root_a] += len(component_b)\n\n del components[root_b]\n del component_size[root_b]\n\n else:\n components[root_a].append(i)\n\n return components.values()\n\n\ndef condensedDistance(dupes: numpy.ndarray) -> Tuple[Dict[int, RecordID],\n numpy.ndarray,\n int]:\n '''\n Convert the pairwise list of distances in dupes to \"condensed\n distance matrix\" required by the hierarchical clustering\n algorithms. Also return a dictionary that maps the distance matrix\n to the record_ids.\n\n The formula for an index of the condensed matrix is\n\n index = {N choose 2}-{N-row choose 2} + (col-row-1)\n = N*(N-1)/2 - (N-row)*(N-row-1)/2 + col - row - 1\n ^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^\n matrix_length row_step\n\n where (row,col) is index of an uncondensed square N X N distance matrix.\n\n See http://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.squareform.html\n '''\n\n candidate_set = numpy.unique(dupes['pairs'])\n\n i_to_id = dict(enumerate(candidate_set))\n\n ids = candidate_set.searchsorted(dupes['pairs'])\n row = ids[:, 0]\n col = ids[:, 1]\n\n N = len(candidate_set)\n matrix_length = N * (N - 1) / 2\n\n row_step = (N - row) * (N - row - 1) / 2\n index = matrix_length - row_step + col - row - 1\n\n condensed_distances = numpy.ones(int(matrix_length), 'f4')\n condensed_distances[index.astype(int)] = 1 - dupes['score']\n\n return i_to_id, condensed_distances, N\n\n\ndef cluster(dupes: numpy.ndarray,\n threshold: float = .5,\n max_components: int = 30000) -> Clusters:\n '''\n Takes in a list of duplicate pairs and clusters them in to a\n list records that all refer to the same entity based on a given\n threshold\n\n Keyword arguments:\n threshold -- number betweent 0 and 1 (default is .5). lowering the\n number will increase precision, raising it will increase\n recall\n '''\n distance_threshold = 1 - threshold\n dupe_sub_graphs = connected_components(dupes, max_components)\n\n for sub_graph in dupe_sub_graphs:\n if len(sub_graph) > 1:\n\n i_to_id, condensed_distances, N = condensedDistance(sub_graph)\n\n linkage = fastcluster.linkage(condensed_distances,\n method='centroid',\n preserve_input=True)\n\n partition = hcluster.fcluster(linkage,\n distance_threshold,\n criterion='distance')\n\n clusters: Dict[int, List[int]] = defaultdict(list)\n\n for i, cluster_id in enumerate(partition):\n clusters[cluster_id].append(i)\n\n for cluster in clusters.values():\n if len(cluster) > 1:\n scores = confidences(cluster, condensed_distances, N)\n yield tuple(i_to_id[i] for i in cluster), scores\n\n else:\n (ids, score), = sub_graph\n if score > threshold:\n yield tuple(ids), (score,) * 2\n\n\ndef confidences(cluster: Sequence[int],\n condensed_distances: numpy.ndarray,\n d: int) -> numpy.ndarray:\n '''\n We calculate a per record score that is similar to a standard\n deviation. The main reason is that these record scores can be\n used to calculate the standard deviation of an entire cluster,\n which is a reasonable metric for clusters.\n '''\n\n scores_d = dict.fromkeys(cluster, 0.0)\n squared_distances = condensed_distances ** 2\n for i, j in itertools.combinations(cluster, 2):\n index = d * (d - 1) / 2 - (d - i) * (d - i - 1) / 2 + j - i - 1\n squared_dist = squared_distances[int(index)]\n scores_d[i] += squared_dist\n scores_d[j] += squared_dist\n scores = numpy.array([score for _, score in sorted(scores_d.items())])\n scores /= len(cluster) - 1\n scores = numpy.sqrt(scores)\n scores = 1 - scores\n return scores\n\n\ndef greedyMatching(dupes: numpy.ndarray) -> Links:\n A: Set[RecordID] = set()\n B: Set[RecordID] = set()\n\n dupes.sort(order='score')\n dupes = dupes[::-1]\n\n for (a, b), score in dupes:\n if a not in A and b not in B:\n A.add(a)\n B.add(b)\n\n yield (a, b), score\n\n\ndef gazetteMatching(scored_blocks: Iterable[numpy.ndarray],\n threshold: float = 0,\n n_matches: int = 1) -> Links:\n\n for block in scored_blocks:\n block = block[block['score'] > threshold]\n block.sort(order='score')\n block = block[::-1]\n\n if n_matches:\n yield block[:n_matches].copy()\n else:\n yield block.copy()\n\n\ndef pair_gazette_matching(scored_pairs: numpy.ndarray,\n threshold: float = 0.0,\n n_matches: int = 1) -> Links:\n\n scored_pairs.sort(order='pairs')\n\n group_key = scored_pairs['pairs'][:, 0]\n change_points = numpy.where(numpy.roll(group_key, 1) != group_key)[0]\n scored_blocks = numpy.split(scored_pairs, change_points)\n\n for match in gazetteMatching(scored_blocks, threshold, n_matches):\n if match:\n yield from match\n"
] |
[
[
"numpy.split",
"numpy.log",
"numpy.sqrt",
"numpy.nditer",
"numpy.min",
"numpy.unique",
"numpy.exp",
"numpy.roll"
]
] |
zge/tacotron2-vae
|
[
"f0f5dc69bdbeb5331ee039afdbfdc1dea65ab033"
] |
[
"data_utils.py"
] |
[
"import numpy as np\nimport torch\nimport torch.utils.data\nimport os\n\nimport layers\nfrom utils import load_wav_to_torch, load_filepaths_and_text\n# for individual & batch level permuting\nfrom utils import permute_filelist, permute_batch_from_filelist\n# for pre-batching\nfrom utils import batching, get_batch_sizes, permute_batch_from_batch\nfrom text import text_to_sequence\n\n\nclass TextMelLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio,text pairs\n 2) normalizes text and converts them to sequences of one-hot vectors\n 3) computes mel-spectrograms from audio files.\n \"\"\"\n def __init__(self, audiopaths_and_text, shuffle_plan, hparams, epoch=0,\n speaker_ids=None, emotion_ids=None):\n self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text)\n self.shuffle_audiopaths = shuffle_plan['shuffle-audiopath']\n self.shuffle_batches = shuffle_plan['shuffle-batch']\n self.permute_opt = shuffle_plan['permute-opt']\n self.pre_batching = shuffle_plan['pre-batching']\n self.prep_trainset_per_epoch = hparams.prep_trainset_per_epoch\n self.filelist_cols = hparams.filelist_cols\n self.local_rand_factor = hparams.local_rand_factor\n self.include_emo_emb = hparams.include_emo_emb\n self.emo_emb_dim = hparams.emo_emb_dim\n self.text_cleaners = hparams.text_cleaners\n self.max_wav_value = hparams.max_wav_value\n self.sampling_rate = hparams.sampling_rate\n self.load_mel_from_disk = hparams.load_mel_from_disk\n self.n_speakers = hparams.n_speakers\n self.n_emotions = hparams.n_emotions\n self.label_type = hparams.label_type\n self.use_vae = hparams.use_vae\n\n if hparams.override_sample_size:\n self.hop_length = int(np.ceil(hparams.hop_time/1000*hparams.sampling_rate))\n self.win_length = int(np.ceil(hparams.win_time/1000*hparams.sampling_rate))\n self.filter_length = int(2**np.ceil(np.log2(self.win_length)))\n else:\n self.hop_length = hparams.hop_length\n self.win_length = hparams.win_length\n self.filter_length = hparams.filter_length\n self.stft = layers.TacotronSTFT(\n self.filter_length, self.hop_length, self.win_length,\n hparams.n_mel_channels, hparams.sampling_rate, hparams.mel_fmin,\n hparams.mel_fmax)\n\n audiopaths_and_text_ori = self.audiopaths_and_text[:]\n if self.prep_trainset_per_epoch:\n seed = hparams.seed + epoch\n else:\n seed = hparams.seed\n if self.shuffle_audiopaths:\n self.audiopaths_and_text = permute_filelist(self.audiopaths_and_text,\n self.filelist_cols, seed, self.permute_opt, self.local_rand_factor)[0]\n if self.pre_batching:\n batch_sizes = get_batch_sizes(self.audiopaths_and_text,\n hparams.filelist_cols, hparams.batch_size)\n assert sum(batch_sizes) == len(self.audiopaths_and_text),\\\n \"check: not all samples get batched in pre-batching!\"\n self.audiopaths_and_text = batching(self.audiopaths_and_text, batch_sizes)\n if self.shuffle_batches:\n if self.pre_batching:\n self.audiopaths_and_text = permute_batch_from_batch(\n self.audiopaths_and_text, seed)\n else:\n self.audiopaths_and_text = permute_batch_from_filelist(\n self.audiopaths_and_text, hparams.batch_size, seed)\n\n self.speaker_ids = speaker_ids\n if not self.speaker_ids:\n self.speaker_ids = self.create_lookup(audiopaths_and_text_ori, 'speaker')\n\n self.emotion_ids = emotion_ids\n if not self.emotion_ids:\n self.emotion_ids = self.create_lookup(audiopaths_and_text_ori, 'emotion')\n\n def parse_filelist_line(self, audiopath_and_text):\n # parse basic cols\n audiopath = audiopath_and_text[self.filelist_cols.index('audiopath')]\n text = audiopath_and_text[self.filelist_cols.index('text')]\n # parse optional cols\n emoembpath, dur, speaker, emotion = '', '', '', ''\n if 'emoembpath' in self.filelist_cols:\n emoembpath = audiopath_and_text[self.filelist_cols.index('emoembpath')]\n if 'dur' in self.filelist_cols:\n dur = float(audiopath_and_text[self.filelist_cols.index('dur')])\n if 'speaker' in self.filelist_cols:\n speaker = audiopath_and_text[self.filelist_cols.index('speaker')]\n if 'emotion' in self.filelist_cols:\n emotion = audiopath_and_text[self.filelist_cols.index('emotion')]\n return audiopath, emoembpath, text, dur, speaker, emotion\n\n def get_mel_text_pair(self, audiopath_and_text):\n # separate filename and text\n emoemb, speaker, emotion = '', '', ''\n audiopath, emoembpath, text, dur, speaker, emotion = \\\n self.parse_filelist_line(audiopath_and_text)\n text = self.get_text(text) # int_tensor[char_index, ....]\n mel = self.get_mel(audiopath) # []\n if self.use_vae:\n if self.include_emo_emb:\n emoemb = self.get_emoemb(emoembpath)\n speaker = self.get_speaker(speaker, self.label_type)\n emotion = self.get_emotion(emotion, self.label_type)\n audioid = os.path.splitext(os.path.basename(audiopath))[0]\n return (text, mel, emoemb, speaker, emotion, dur, audioid)\n\n def get_mel(self, filename):\n if not self.load_mel_from_disk:\n audio, sampling_rate = load_wav_to_torch(filename)\n if sampling_rate != self.stft.sampling_rate:\n raise ValueError(\"{} SR doesn't match target {} SR\".format(\n sampling_rate, self.stft.sampling_rate))\n audio_norm = audio / self.max_wav_value\n audio_norm = audio_norm.unsqueeze(0)\n audio_norm = torch.autograd.Variable(audio_norm, requires_grad=False)\n melspec = self.stft.mel_spectrogram(audio_norm) # 1 X n_mel_channels X n_frames\n melspec = torch.squeeze(melspec, 0) # n_mel_channels X n_frames\n else:\n melspec = torch.from_numpy(np.load(filename))\n assert melspec.size(0) == self.stft.n_mel_channels, (\n 'Mel dimension mismatch: given {}, expected {}'.format(\n melspec.size(0), self.stft.n_mel_channels))\n\n return melspec\n\n def get_emoemb(self, filename):\n emoemb = torch.from_numpy(np.load(filename)).T\n assert emoemb.size(0) == self.emo_emb_dim, (\n 'Emotion embedding dimension mismatch: given {}, expected {}'.format(\n emoemb.size(0), self.emo_emb_dim))\n return emoemb\n\n def get_text(self, text):\n text_norm = torch.IntTensor(text_to_sequence(text, self.text_cleaners))\n return text_norm\n\n def create_lookup(self, audiopaths_and_text, attribute):\n a2i = {'speaker':-2, 'emotion':-1}\n ids = sorted(set(x[a2i[attribute]] for x in audiopaths_and_text))\n d = {ids[i]: i for i in range(len(ids))}\n return d\n\n def get_speaker(self, speaker, label_type='one-hot'):\n if label_type == 'one-hot':\n speaker_vector = np.zeros(self.n_speakers)\n speaker_vector[self.speaker_ids[speaker]] = 1\n output = torch.Tensor(speaker_vector.astype(dtype=np.float32))\n elif label_type == 'id':\n output = torch.tensor([self.speaker_ids[speaker]])\n return output\n\n def get_emotion(self, emotion, label_type='one-hot'):\n if label_type == 'one-hot':\n emotion_vector = np.zeros(self.n_emotions)\n emotion_vector[self.emotion_ids[emotion]] = 1\n output = torch.Tensor(emotion_vector.astype(dtype=np.float32))\n elif label_type == 'id':\n output = torch.tensor([self.emotion_ids[emotion]])\n return output\n\n def __getitem__(self, index):\n if self.pre_batching:\n audiopaths_and_text = self.audiopaths_and_text[index]\n pairs = [self.get_mel_text_pair(audiopath_and_text) for\n audiopath_and_text in audiopaths_and_text]\n else:\n pairs = self.get_mel_text_pair(self.audiopaths_and_text[index])\n return pairs\n\n def __len__(self):\n return len(self.audiopaths_and_text)\n\n\nclass TextMelCollate():\n \"\"\" Zero-pads model inputs and targets based on number of frames per step\n \"\"\"\n def __init__(self, hparams, pre_batching=False):\n self.pre_batching = pre_batching\n self.n_frames_per_step = hparams.n_frames_per_step\n self.label_type = hparams.label_type\n self.use_vae = hparams.use_vae\n\n def __call__(self, batch):\n \"\"\"Collate's training batch from normalized text and mel-spectrogram\n PARAMS\n ------\n batch: [[text_normalized, mel_normalized], ...]\n e.g.\n import itertools\n batch = list(itertools.islice(train_loader.dataset, hparams.batch_size))\n \"\"\"\n\n if self.pre_batching:\n batch = batch[0]\n\n # Right zero-pad all one-hot text sequences to max input length\n input_lengths, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([len(x[0]) for x in batch]),\n dim=0, descending=True)\n max_input_len = input_lengths[0]\n\n text_padded = torch.LongTensor(len(batch), max_input_len)\n text_padded.zero_()\n for i in range(len(ids_sorted_decreasing)):\n text = batch[ids_sorted_decreasing[i]][0]\n text_padded[i, :text.size(0)] = text\n\n if self.use_vae:\n if self.label_type == 'one-hot':\n speakers = torch.LongTensor(len(batch), len(batch[0][3]))\n for i in range(len(ids_sorted_decreasing)):\n speaker = batch[ids_sorted_decreasing[i]][3]\n speakers[i, :] = speaker\n emotions = torch.LongTensor(len(batch), len(batch[0][4]))\n for i in range(len(ids_sorted_decreasing)):\n emotion = batch[ids_sorted_decreasing[i]][4]\n emotions[i, :] = emotion\n elif self.label_type == 'id':\n speakers = torch.LongTensor(len(batch))\n emotions = torch.LongTensor(len(batch))\n for i in range(len(ids_sorted_decreasing)):\n speakers[i] = batch[ids_sorted_decreasing[i]][3]\n emotions[i] = batch[ids_sorted_decreasing[i]][4]\n else:\n speakers = emotions = ''\n\n durs = [[] for _ in range(len(batch))]\n audioids = [[] for _ in range(len(batch))]\n for i in range(len(ids_sorted_decreasing)):\n durs[i] = batch[ids_sorted_decreasing[i]][5]\n audioids[i] = batch[ids_sorted_decreasing[i]][6]\n\n # Right zero-pad mel-spec\n num_mels = batch[0][1].size(0)\n max_target_len1 = max([x[1].size(1) for x in batch])\n\n if len(batch[0][2]) > 0:\n num_emoembs = batch[0][2].size(0)\n max_target_len2 = max([x[2].size(1) for x in batch])\n\n max_target_len = max_target_len1\n # todo: uniform wintime/hoptime of mel and emoemb so max_target_len will be the same\n\n # increment max_target_len to the multiples of n_frames_per_step\n if max_target_len % self.n_frames_per_step != 0:\n max_target_len += self.n_frames_per_step - max_target_len % self.n_frames_per_step\n assert max_target_len % self.n_frames_per_step == 0\n # todo: to support n_frames_per_step > 1\n\n # include mel padded and gate padded\n mel_padded = torch.FloatTensor(len(batch), num_mels, max_target_len)\n mel_padded.zero_()\n gate_padded = torch.FloatTensor(len(batch), max_target_len)\n gate_padded.zero_()\n output_lengths = torch.LongTensor(len(batch))\n for i in range(len(ids_sorted_decreasing)):\n mel = batch[ids_sorted_decreasing[i]][1]\n mel_padded[i, :, :mel.size(1)] = mel\n gate_padded[i, mel.size(1)-1:] = 1\n output_lengths[i] = mel.size(1)\n\n if len(batch[0][2]) > 0:\n emoemb_padded = torch.FloatTensor(len(batch), num_emoembs, max_target_len)\n emoemb_padded.zero_()\n for i in range(len(ids_sorted_decreasing)):\n emoemb = batch[ids_sorted_decreasing[i]][2]\n emoemb_nframes = min(emoemb.size(1), max_target_len)\n emoemb_padded[i, :, :emoemb_nframes] = emoemb[:, :emoemb_nframes]\n else:\n emoemb_padded = ''\n\n return text_padded, input_lengths, mel_padded, emoemb_padded, \\\n gate_padded, output_lengths, speakers, emotions, durs, audioids\n"
] |
[
[
"numpy.log2",
"torch.tensor",
"numpy.ceil",
"numpy.load",
"numpy.zeros",
"torch.squeeze",
"torch.autograd.Variable"
]
] |
nagyation/Audioscope
|
[
"dbe86005bcd0bd58cb2cc9b28624dccc7b5d5867"
] |
[
"signalplotter.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\n\nclass SignalPlotter:\n\n\n def __init__(self,timeStep,timeMax):\n \"\"\"\n timeStep : the step for time Axis from second\n timeMax : the max time in the time Axis in same scale\n \n \"\"\"\n self.time = []\n self.amp = []\n self.timeStep = timeStep\n self.timeMax = timeMax\n self.numberOfSteps = timeMax/timeStep\n plt.plot(self.time,self.amp)\n plt.ion()\n self.time.append(0) #the base to increment over it\n \n\n #manages signal relative to time\n def __manageSignal(self):\n if len(self.amp) > self.numberOfSteps:\n del self.amp[0] #shift signal by deleting the first element\n if len(self.time) != self.numberOfSteps:\n while len(self.time) < len(self.amp) :\n self.time.append(self.time[-1]+self.timeStep)\n \n \n def addAmp(self,amp):\n self.amp = list(amp)\n self.__manageSignal()\n\n def draw(self):\n plt.gca().lines[0].set_xdata(self.time);\n plt.gca().lines[0].set_ydata(self.amp);\n plt.gca().relim();\n plt.gca().autoscale_view();\n plt.pause(self.timeStep)\n\n\n"
] |
[
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.pause"
]
] |
amorehead/alphafold_multimer_non_docker
|
[
"32f3b81fc4bbc2d8750da27158b42c2412fdfd3f"
] |
[
"alphafold/model/common_modules.py"
] |
[
"# Copyright 2021 DeepMind Technologies Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A collection of common Haiku modules for use in protein folding.\"\"\"\nimport numbers\nfrom typing import Union, Sequence\n\nimport haiku as hk\nimport jax.numpy as jnp\nimport numpy as np\n\n\n# Constant from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)\nTRUNCATED_NORMAL_STDDEV_FACTOR = np.asarray(.87962566103423978,\n dtype=np.float32)\n\n\ndef get_initializer_scale(initializer_name, input_shape):\n \"\"\"Get Initializer for weights and scale to multiply activations by.\"\"\"\n\n if initializer_name == 'zeros':\n w_init = hk.initializers.Constant(0.0)\n else:\n # fan-in scaling\n scale = 1.\n for channel_dim in input_shape:\n scale /= channel_dim\n if initializer_name == 'relu':\n scale *= 2\n\n noise_scale = scale\n\n stddev = np.sqrt(noise_scale)\n # Adjust stddev for truncation.\n stddev = stddev / TRUNCATED_NORMAL_STDDEV_FACTOR\n w_init = hk.initializers.TruncatedNormal(mean=0.0, stddev=stddev)\n\n return w_init\n\n\nclass Linear(hk.Module):\n \"\"\"Protein folding specific Linear module.\n\n This differs from the standard Haiku Linear in a few ways:\n * It supports inputs and outputs of arbitrary rank\n * Initializers are specified by strings\n \"\"\"\n\n def __init__(self,\n num_output: Union[int, Sequence[int]],\n initializer: str = 'linear',\n num_input_dims: int = 1,\n use_bias: bool = True,\n bias_init: float = 0.,\n precision = None,\n name: str = 'linear'):\n \"\"\"Constructs Linear Module.\n\n Args:\n num_output: Number of output channels. Can be tuple when outputting\n multiple dimensions.\n initializer: What initializer to use, should be one of {'linear', 'relu',\n 'zeros'}\n num_input_dims: Number of dimensions from the end to project.\n use_bias: Whether to include trainable bias\n bias_init: Value used to initialize bias.\n precision: What precision to use for matrix multiplication, defaults\n to None.\n name: Name of module, used for name scopes.\n \"\"\"\n super().__init__(name=name)\n if isinstance(num_output, numbers.Integral):\n self.output_shape = (num_output,)\n else:\n self.output_shape = tuple(num_output)\n self.initializer = initializer\n self.use_bias = use_bias\n self.bias_init = bias_init\n self.num_input_dims = num_input_dims\n self.num_output_dims = len(self.output_shape)\n self.precision = precision\n\n def __call__(self, inputs):\n \"\"\"Connects Module.\n\n Args:\n inputs: Tensor with at least num_input_dims dimensions.\n\n Returns:\n output of shape [...] + num_output.\n \"\"\"\n\n num_input_dims = self.num_input_dims\n\n if self.num_input_dims > 0:\n in_shape = inputs.shape[-self.num_input_dims:]\n else:\n in_shape = ()\n\n weight_init = get_initializer_scale(self.initializer, in_shape)\n\n in_letters = 'abcde'[:self.num_input_dims]\n out_letters = 'hijkl'[:self.num_output_dims]\n\n weight_shape = in_shape + self.output_shape\n weights = hk.get_parameter('weights', weight_shape, inputs.dtype,\n weight_init)\n\n equation = f'...{in_letters}, {in_letters}{out_letters}->...{out_letters}'\n\n output = jnp.einsum(equation, inputs, weights, precision=self.precision)\n\n if self.use_bias:\n bias = hk.get_parameter('bias', self.output_shape, inputs.dtype,\n hk.initializers.Constant(self.bias_init))\n output += bias\n\n return output\n\n"
] |
[
[
"numpy.asarray",
"numpy.sqrt"
]
] |
cpuimage/MADGRAD
|
[
"eba15646230944a53a237e94fa92b4568bb7bf3b"
] |
[
"MadGrad.py"
] |
[
"\"\"\"Madgrad optimizer implementation.\"\"\"\n# pylint: disable=g-classes-have-attributes\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.keras import backend_config\nfrom tensorflow.python.keras.optimizer_v2 import optimizer_v2\nfrom tensorflow.python.ops import array_ops, control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import state_ops\n\n\nclass MadGrad(optimizer_v2.OptimizerV2):\n _HAS_AGGREGATE_GRAD = True\n\n def __init__(\n self,\n learning_rate=1e-2,\n momentum=0.9,\n weight_decay=0.0,\n power=1.0 / 3.0,\n epsilon=1e-6,\n name=\"Madgrad\",\n **kwargs\n ):\n super(MadGrad, self).__init__(name, **kwargs)\n if momentum < 0 or momentum >= 1:\n raise ValueError(f\"Momentum {momentum} must be in the range [0,1]\")\n if learning_rate <= 0:\n raise ValueError(f\"Learning rate {learning_rate} must be positive\")\n if weight_decay < 0:\n raise ValueError(f\"Weight decay {weight_decay} must be non-negative\")\n if epsilon < 0:\n raise ValueError(f\"Eps must be non-negative\")\n self._set_hyper(\"learning_rate\", kwargs.get(\"lr\", learning_rate))\n self._set_hyper(\"momentum\", momentum)\n self._set_hyper(\"power\", power)\n self._set_hyper(\"weight_decay\", weight_decay)\n self.epsilon = epsilon or backend_config.epsilon()\n self.apply_weight_decay = weight_decay > 0.0\n\n def _create_slots(self, var_list):\n for var in var_list:\n self.add_slot(var, \"grad_sum_sq\")\n self.add_slot(var, \"s\")\n self.add_slot(var, \"x0\", initializer=var)\n\n def _prepare_local(self, var_device, var_dtype, apply_state):\n super(MadGrad, self)._prepare_local(var_device, var_dtype, apply_state)\n power = array_ops.identity(self._get_hyper(\"power\", var_dtype))\n momentum = array_ops.identity(self._get_hyper(\"momentum\", var_dtype))\n weight_decay = array_ops.identity(self._get_hyper(\"weight_decay\", var_dtype))\n lr_t = apply_state[(var_device, var_dtype)]['lr_t']\n local_step = math_ops.cast(self.iterations + 1, var_dtype)\n apply_state[(var_device, var_dtype)] = dict(\n epsilon=ops.convert_to_tensor_v2(self.epsilon, var_dtype),\n power=power,\n momentum=momentum,\n one_minus_momentum_t=1.0 - momentum,\n weight_decay=weight_decay,\n lamb=lr_t * math_ops.sqrt(local_step),\n )\n\n def _resource_apply_dense(self, grad, var, apply_state=None):\n var_device, var_dtype = var.device, var.dtype.base_dtype\n coefficients = (apply_state or {}).get(\n (var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype)\n grad_sum_sq = self.get_slot(var, \"grad_sum_sq\")\n s = self.get_slot(var, \"s\")\n x0 = self.get_slot(var, \"x0\")\n if self.apply_weight_decay:\n grad += coefficients[\"weight_decay\"] * var\n sk_grad = coefficients[\"lamb\"] * grad\n s_t = state_ops.assign_add(s, sk_grad, use_locking=self._use_locking)\n grad_sum_sq_t = state_ops.assign_add(grad_sum_sq, sk_grad * grad, use_locking=self._use_locking)\n rms = math_ops.maximum(math_ops.pow(grad_sum_sq_t, coefficients[\"power\"]), coefficients[\"epsilon\"])\n z = x0 - (s_t / rms)\n var_t = coefficients['one_minus_momentum_t'] * var + coefficients[\"momentum\"] * z\n var_update = state_ops.assign(var, var_t, use_locking=self._use_locking)\n updates = [s_t, grad_sum_sq_t, var_update]\n return control_flow_ops.group(*updates)\n\n def _resource_apply_sparse(self, grad, var, indices, apply_state=None):\n var_device, var_dtype = var.device, var.dtype.base_dtype\n coefficients = (apply_state or {}).get(\n (var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype)\n grad_sum_sq = self.get_slot(var, \"grad_sum_sq\")\n s = self.get_slot(var, \"s\")\n x0 = self.get_slot(var, \"x0\")\n if self.apply_weight_decay:\n grad += coefficients[\"weight_decay\"] * array_ops.gather(var, indices)\n sk_grad = coefficients[\"lamb\"] * grad\n s_t = self._resource_scatter_add(s, indices, sk_grad)\n grad_sum_sq_t = self._resource_scatter_add(grad_sum_sq, indices, sk_grad * grad)\n rms = math_ops.maximum(math_ops.pow(grad_sum_sq_t, coefficients[\"power\"]), coefficients[\"epsilon\"])\n z = x0 - (s_t / rms)\n var_t = coefficients['one_minus_momentum_t'] * var + coefficients[\"momentum\"] * z\n var_update = state_ops.assign(var, var_t, use_locking=self._use_locking)\n updates = [s_t, grad_sum_sq_t, var_update]\n return control_flow_ops.group(*updates)\n\n def get_config(self):\n config = super(MadGrad, self).get_config()\n config.update(\n {\n \"learning_rate\": self._serialize_hyperparameter(\"learning_rate\"),\n \"momentum\": self._serialize_hyperparameter(\"momentum\"),\n \"power\": self._serialize_hyperparameter(\"power\"),\n \"weight_decay\": self._serialize_hyperparameter(\"weight_decay\"),\n \"epsilon\": self.epsilon,\n }\n )\n return config\n"
] |
[
[
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.ops.state_ops.assign_add",
"tensorflow.python.ops.math_ops.pow",
"tensorflow.python.framework.ops.convert_to_tensor_v2",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.ops.math_ops.sqrt",
"tensorflow.python.ops.state_ops.assign",
"tensorflow.python.keras.backend_config.epsilon",
"tensorflow.python.ops.math_ops.cast"
]
] |
ydhongHIT/DDRNet
|
[
"f2f91b4053831fd54b04e30f60c9f1d4b55cd5b9",
"f2f91b4053831fd54b04e30f60c9f1d4b55cd5b9"
] |
[
"classification/DDRNet_39.py",
"segmentation/DDRNet_23_slim_eval_speed.py"
] |
[
"import math\r\nimport torch \r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nBatchNorm2d = nn.BatchNorm2d\r\nbn_mom = 0.1\r\n\r\n\r\ndef conv3x3(in_planes, out_planes, stride=1):\r\n \"\"\"3x3 convolution with padding\"\"\"\r\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\r\n padding=1, bias=False)\r\n\r\nclass BasicBlock(nn.Module):\r\n expansion = 1\r\n\r\n def __init__(self, inplanes, planes, stride=1, downsample=None, no_relu=False):\r\n super(BasicBlock, self).__init__()\r\n self.conv1 = conv3x3(inplanes, planes, stride)\r\n self.bn1 = nn.BatchNorm2d(planes, momentum=bn_mom)\r\n self.relu = nn.ReLU(inplace=True)\r\n self.conv2 = conv3x3(planes, planes)\r\n self.bn2 = nn.BatchNorm2d(planes, momentum=bn_mom)\r\n self.downsample = downsample\r\n self.stride = stride\r\n self.no_relu = no_relu\r\n\r\n def forward(self, x):\r\n residual = x\r\n\r\n out = self.conv1(x)\r\n out = self.bn1(out)\r\n out = self.relu(out)\r\n\r\n out = self.conv2(out)\r\n out = self.bn2(out)\r\n\r\n if self.downsample is not None:\r\n residual = self.downsample(x)\r\n\r\n out += residual\r\n if self.no_relu:\r\n return out\r\n else:\r\n return self.relu(out)\r\n\r\nclass Bottleneck(nn.Module):\r\n expansion = 2\r\n\r\n def __init__(self, inplanes, planes, stride=1, downsample=None, no_relu=False):\r\n super(Bottleneck, self).__init__()\r\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\r\n self.bn1 = nn.BatchNorm2d(planes, momentum=bn_mom)\r\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\r\n padding=1, bias=False)\r\n self.bn2 = nn.BatchNorm2d(planes, momentum=bn_mom)\r\n self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,\r\n bias=False)\r\n self.bn3 = nn.BatchNorm2d(planes * self.expansion,\r\n momentum=bn_mom)\r\n self.relu = nn.ReLU(inplace=True)\r\n self.downsample = downsample\r\n self.stride = stride\r\n self.no_relu = no_relu\r\n\r\n def forward(self, x):\r\n residual = x\r\n\r\n out = self.conv1(x)\r\n out = self.bn1(out)\r\n out = self.relu(out)\r\n\r\n out = self.conv2(out)\r\n out = self.bn2(out)\r\n out = self.relu(out)\r\n\r\n out = self.conv3(out)\r\n out = self.bn3(out)\r\n\r\n if self.downsample is not None:\r\n residual = self.downsample(x)\r\n\r\n out += residual\r\n if self.no_relu:\r\n return out\r\n else:\r\n return self.relu(out)\r\n\r\nclass DualResNet(nn.Module):\r\n\r\n def __init__(self, block, layers, num_classes=1000, planes=64, last_planes=2048):\r\n super(DualResNet, self).__init__()\r\n\r\n #self.inplanes = 64\r\n #fuse_planes = 128\r\n highres_planes = planes * 2\r\n self.last_planes = last_planes\r\n\r\n self.conv1 = nn.Sequential(\r\n nn.Conv2d(3,planes,kernel_size=3, stride=2, padding=1),\r\n BatchNorm2d(planes, momentum=bn_mom),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(planes,planes,kernel_size=3, stride=2, padding=1),\r\n BatchNorm2d(planes, momentum=bn_mom),\r\n nn.ReLU(inplace=True),\r\n )\r\n\r\n self.relu = nn.ReLU(inplace=False)\r\n self.layer1 = self._make_layer(block, planes, planes, layers[0])\r\n self.layer2 = self._make_layer(block, planes, planes * 2, layers[1], stride=2)\r\n self.layer3_1 = self._make_layer(block, planes * 2, planes * 4, layers[2] // 2, stride=2)\r\n self.layer3_2 = self._make_layer(block, planes * 4, planes * 4, layers[2] // 2)\r\n self.layer4 = self._make_layer(block, planes * 4, planes * 8, layers[3], stride=2)\r\n\r\n self.compression3_1 = nn.Sequential(\r\n nn.Conv2d(planes * 4, highres_planes, kernel_size=1, bias=False),\r\n BatchNorm2d(highres_planes, momentum=bn_mom),\r\n )\r\n\r\n self.compression3_2 = nn.Sequential(\r\n nn.Conv2d(planes * 4, highres_planes, kernel_size=1, bias=False),\r\n BatchNorm2d(highres_planes, momentum=bn_mom),\r\n )\r\n\r\n self.compression4 = nn.Sequential(\r\n nn.Conv2d(planes * 8, highres_planes, kernel_size=1, bias=False),\r\n BatchNorm2d(highres_planes, momentum=bn_mom),\r\n )\r\n\r\n self.down3_1 = nn.Sequential(\r\n nn.Conv2d(highres_planes, planes * 4, kernel_size=3, stride=2, padding=1, bias=False),\r\n BatchNorm2d(planes * 4, momentum=bn_mom),\r\n )\r\n\r\n self.down3_2 = nn.Sequential(\r\n nn.Conv2d(highres_planes, planes * 4, kernel_size=3, stride=2, padding=1, bias=False),\r\n BatchNorm2d(planes * 4, momentum=bn_mom),\r\n )\r\n\r\n self.down4 = nn.Sequential(\r\n nn.Conv2d(highres_planes, planes * 4, kernel_size=3, stride=2, padding=1, bias=False),\r\n BatchNorm2d(planes * 4, momentum=bn_mom),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(planes * 4, planes * 8, kernel_size=3, stride=2, padding=1, bias=False),\r\n BatchNorm2d(planes * 8, momentum=bn_mom),\r\n )\r\n\r\n self.layer3_1_ = self._make_layer(block, planes * 2, highres_planes, layers[2] // 2)\r\n\r\n self.layer3_2_ = self._make_layer(block, highres_planes, highres_planes, layers[2] // 2)\r\n\r\n self.layer4_ = self._make_layer(block, highres_planes, highres_planes, layers[3])\r\n\r\n self.layer5_ = self._make_layer(Bottleneck, highres_planes, highres_planes, 1)\r\n\r\n self.down5 = nn.Sequential(\r\n nn.Conv2d(planes * 4, planes * 8, kernel_size=3, stride=2, padding=1, bias=False),\r\n BatchNorm2d(planes * 8, momentum=bn_mom),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(planes * 8, planes * 16, kernel_size=3, stride=2, padding=1, bias=False),\r\n BatchNorm2d(planes * 16, momentum=bn_mom),\r\n )\r\n\r\n self.layer5 = self._make_layer(Bottleneck, planes * 8, planes * 8, 1)\r\n\r\n self.last_layer = nn.Sequential(\r\n nn.Conv2d(planes * 16, last_planes, kernel_size=1, stride=1, padding=0, bias=False),\r\n BatchNorm2d(last_planes, momentum=bn_mom),\r\n nn.ReLU(inplace=True),\r\n nn.AdaptiveAvgPool2d((1, 1)),\r\n )\r\n\r\n self.linear = nn.Linear(last_planes, num_classes)\r\n\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\r\n elif isinstance(m, BatchNorm2d):\r\n nn.init.constant_(m.weight, 1)\r\n nn.init.constant_(m.bias, 0)\r\n\r\n\r\n def _make_layer(self, block, inplanes, planes, blocks, stride=1):\r\n downsample = None\r\n if stride != 1 or inplanes != planes * block.expansion:\r\n downsample = nn.Sequential(\r\n nn.Conv2d(inplanes, planes * block.expansion,\r\n kernel_size=1, stride=stride, bias=False),\r\n nn.BatchNorm2d(planes * block.expansion, momentum=bn_mom),\r\n )\r\n\r\n layers = []\r\n layers.append(block(inplanes, planes, stride, downsample)) \r\n inplanes = planes * block.expansion\r\n for i in range(1, blocks):\r\n if i == (blocks-1):\r\n layers.append(block(inplanes, planes, stride=1, no_relu=True))\r\n else:\r\n layers.append(block(inplanes, planes, stride=1, no_relu=False))\r\n\r\n return nn.Sequential(*layers)\r\n\r\n\r\n def forward(self, x):\r\n\r\n width_output = x.shape[-1] // 8\r\n height_output = x.shape[-2] // 8\r\n layers = []\r\n\r\n x = self.conv1(x)\r\n\r\n x = self.layer1(x)\r\n layers.append(x)\r\n\r\n x = self.layer2(self.relu(x))\r\n layers.append(x)\r\n \r\n x = self.layer3_1(self.relu(x))\r\n layers.append(x)\r\n x_ = self.layer3_1_(self.relu(layers[1]))\r\n x = x + self.down3_1(self.relu(x_))\r\n x_ = x_ + F.interpolate(\r\n self.compression3_1(self.relu(layers[2])),\r\n size=[height_output, width_output],\r\n mode='bilinear')\r\n\r\n x = self.layer3_2(self.relu(x))\r\n layers.append(x)\r\n x_ = self.layer3_2_(self.relu(x_))\r\n x = x + self.down3_2(self.relu(x_))\r\n x_ = x_ + F.interpolate(\r\n self.compression3_2(self.relu(layers[3])),\r\n size=[height_output, width_output],\r\n mode='bilinear')\r\n\r\n x = self.layer4(self.relu(x))\r\n layers.append(x)\r\n x_ = self.layer4_(self.relu(x_))\r\n x = x + self.down4(self.relu(x_))\r\n x_ = x_ + F.interpolate(\r\n self.compression4(self.relu(layers[4])),\r\n size=[height_output, width_output],\r\n mode='bilinear')\r\n\r\n x = self.layer5(self.relu(x))+ self.down5(self.relu(self.layer5_(self.relu(x_))))\r\n\r\n x = self.last_layer(self.relu(x))\r\n x = x.view(-1, self.last_planes)\r\n x = self.linear(x) \r\n return x\r\n\r\ndef get_model():\r\n return DualResNet(block=BasicBlock, layers=[3, 4, 6, 3], planes=64, last_planes=2048)\r\n\r\n\r\n\r\n\r\n\r\n",
"import math\r\nimport torch\r\nimport numpy as np \r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch.nn import init\r\nfrom collections import OrderedDict\r\n\r\nBatchNorm2d = nn.BatchNorm2d\r\nbn_mom = 0.1\r\n\r\n\r\ndef conv3x3(in_planes, out_planes, stride=1):\r\n \"\"\"3x3 convolution with padding\"\"\"\r\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\r\n padding=1, bias=True)\r\n\r\n\r\nclass BasicBlock(nn.Module):\r\n expansion = 1\r\n\r\n def __init__(self, inplanes, planes, stride=1, downsample=None, no_relu=False):\r\n super(BasicBlock, self).__init__()\r\n self.conv1 = conv3x3(inplanes, planes, stride)\r\n self.bn1 = BatchNorm2d(planes, momentum=bn_mom)\r\n self.relu = nn.ReLU(inplace=True)\r\n self.conv2 = conv3x3(planes, planes)\r\n self.bn2 = BatchNorm2d(planes, momentum=bn_mom)\r\n self.downsample = downsample\r\n self.stride = stride\r\n self.no_relu = no_relu\r\n\r\n def forward(self, x):\r\n residual = x\r\n\r\n out = self.conv1(x)\r\n #out = self.bn1(out)\r\n out = self.relu(out)\r\n\r\n out = self.conv2(out)\r\n #out = self.bn2(out)\r\n\r\n if self.downsample is not None:\r\n residual = self.downsample(x)\r\n\r\n out += residual\r\n\r\n if self.no_relu:\r\n return out\r\n else:\r\n return self.relu(out)\r\n\r\nclass Bottleneck(nn.Module):\r\n expansion = 2\r\n\r\n def __init__(self, inplanes, planes, stride=1, downsample=None, no_relu=True):\r\n super(Bottleneck, self).__init__()\r\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=True)\r\n self.bn1 = BatchNorm2d(planes, momentum=bn_mom)\r\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\r\n padding=1, bias=True)\r\n self.bn2 = BatchNorm2d(planes, momentum=bn_mom)\r\n self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,\r\n bias=True)\r\n self.bn3 = BatchNorm2d(planes * self.expansion, momentum=bn_mom)\r\n self.relu = nn.ReLU(inplace=True)\r\n self.downsample = downsample\r\n self.stride = stride\r\n self.no_relu = no_relu\r\n\r\n def forward(self, x):\r\n residual = x\r\n\r\n out = self.conv1(x)\r\n #out = self.bn1(out)\r\n out = self.relu(out)\r\n\r\n out = self.conv2(out)\r\n #out = self.bn2(out)\r\n out = self.relu(out)\r\n\r\n out = self.conv3(out)\r\n #out = self.bn3(out)\r\n\r\n if self.downsample is not None:\r\n residual = self.downsample(x)\r\n\r\n out += residual\r\n if self.no_relu:\r\n return out\r\n else:\r\n return self.relu(out)\r\n\r\nclass DAPPM(nn.Module):\r\n def __init__(self, inplanes, branch_planes, outplanes):\r\n super(DAPPM, self).__init__()\r\n self.scale1 = nn.Sequential(nn.AvgPool2d(kernel_size=5, stride=2, padding=2),\r\n BatchNorm2d(inplanes, momentum=bn_mom),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False),\r\n )\r\n self.scale2 = nn.Sequential(nn.AvgPool2d(kernel_size=9, stride=4, padding=4),\r\n BatchNorm2d(inplanes, momentum=bn_mom),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False),\r\n )\r\n self.scale3 = nn.Sequential(nn.AvgPool2d(kernel_size=17, stride=8, padding=8),\r\n BatchNorm2d(inplanes, momentum=bn_mom),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False),\r\n )\r\n self.scale4 = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),\r\n BatchNorm2d(inplanes, momentum=bn_mom),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False),\r\n )\r\n self.scale0 = nn.Sequential(\r\n BatchNorm2d(inplanes, momentum=bn_mom),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False),\r\n )\r\n self.process1 = nn.Sequential(\r\n BatchNorm2d(branch_planes, momentum=bn_mom),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(branch_planes, branch_planes, kernel_size=3, padding=1, bias=False),\r\n )\r\n self.process2 = nn.Sequential(\r\n BatchNorm2d(branch_planes, momentum=bn_mom),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(branch_planes, branch_planes, kernel_size=3, padding=1, bias=False),\r\n )\r\n self.process3 = nn.Sequential(\r\n BatchNorm2d(branch_planes, momentum=bn_mom),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(branch_planes, branch_planes, kernel_size=3, padding=1, bias=False),\r\n )\r\n self.process4 = nn.Sequential(\r\n BatchNorm2d(branch_planes, momentum=bn_mom),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(branch_planes, branch_planes, kernel_size=3, padding=1, bias=False),\r\n ) \r\n self.compression = nn.Sequential(\r\n BatchNorm2d(branch_planes * 5, momentum=bn_mom),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(branch_planes * 5, outplanes, kernel_size=1, bias=False),\r\n )\r\n self.shortcut = nn.Sequential(\r\n BatchNorm2d(inplanes, momentum=bn_mom),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inplanes, outplanes, kernel_size=1, bias=False),\r\n )\r\n\r\n def forward(self, x):\r\n\r\n #x = self.downsample(x)\r\n width = x.shape[-1]\r\n height = x.shape[-2] \r\n x_list = []\r\n\r\n x_list.append(self.scale0(x))\r\n x_list.append(self.process1((F.interpolate(self.scale1(x),\r\n size=[height, width],\r\n mode='bilinear')+x_list[0])))\r\n x_list.append((self.process2((F.interpolate(self.scale2(x),\r\n size=[height, width],\r\n mode='bilinear')+x_list[1]))))\r\n x_list.append(self.process3((F.interpolate(self.scale3(x),\r\n size=[height, width],\r\n mode='bilinear')+x_list[2])))\r\n x_list.append(self.process4((F.interpolate(self.scale4(x),\r\n size=[height, width],\r\n mode='bilinear')+x_list[3])))\r\n \r\n out = self.compression(torch.cat(x_list, 1)) + self.shortcut(x)\r\n return out \r\n\r\n\r\nclass segmenthead(nn.Module):\r\n\r\n def __init__(self, inplanes, interplanes, outplanes, scale_factor=8):\r\n super(segmenthead, self).__init__()\r\n self.bn1 = BatchNorm2d(inplanes, momentum=bn_mom)\r\n self.conv1 = nn.Conv2d(inplanes, interplanes, kernel_size=3, padding=1, bias=False)\r\n #self.bn2 = BatchNorm2d(interplanes, momentum=bn_mom)\r\n self.relu = nn.ReLU(inplace=True)\r\n self.conv2 = nn.Conv2d(interplanes, outplanes, kernel_size=1, padding=0, bias=True)\r\n self.scale_factor = scale_factor\r\n\r\n def forward(self, x):\r\n \r\n x = self.conv1(self.relu(self.bn1(x)))\r\n out = self.conv2(self.relu(x))\r\n\r\n if self.scale_factor is not None:\r\n height = x.shape[-2] * self.scale_factor\r\n width = x.shape[-1] * self.scale_factor\r\n out = F.interpolate(out,\r\n size=[height, width],\r\n mode='bilinear')\r\n\r\n return out\r\n\r\nclass DualResNet(nn.Module):\r\n\r\n def __init__(self, block, layers, num_classes=19, planes=64, spp_planes=128, head_planes=128, augment=False):\r\n super(DualResNet, self).__init__()\r\n\r\n highres_planes = planes * 2\r\n self.augment = augment\r\n\r\n self.conv1 = nn.Sequential(\r\n nn.Conv2d(3,planes,kernel_size=3, stride=2, padding=1),\r\n #BatchNorm2d(planes, momentum=bn_mom),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(planes,planes,kernel_size=3, stride=2, padding=1),\r\n #BatchNorm2d(planes, momentum=bn_mom),\r\n nn.ReLU(inplace=True),\r\n )\r\n\r\n self.relu = nn.ReLU(inplace=False)\r\n self.layer1 = self._make_layer(block, planes, planes, layers[0])\r\n self.layer2 = self._make_layer(block, planes, planes * 2, layers[1], stride=2)\r\n self.layer3 = self._make_layer(block, planes * 2, planes * 4, layers[2], stride=2)\r\n self.layer4 = self._make_layer(block, planes * 4, planes * 8, layers[3], stride=2)\r\n\r\n self.compression3 = nn.Sequential(\r\n nn.Conv2d(planes * 4, highres_planes, kernel_size=1, bias=True),\r\n #BatchNorm2d(highres_planes, momentum=bn_mom),\r\n )\r\n\r\n self.compression4 = nn.Sequential(\r\n nn.Conv2d(planes * 8, highres_planes, kernel_size=1, bias=True),\r\n #BatchNorm2d(highres_planes, momentum=bn_mom),\r\n )\r\n\r\n self.down3 = nn.Sequential(\r\n nn.Conv2d(highres_planes, planes * 4, kernel_size=3, stride=2, padding=1, bias=True),\r\n #BatchNorm2d(planes * 4, momentum=bn_mom),\r\n )\r\n\r\n self.down4 = nn.Sequential(\r\n nn.Conv2d(highres_planes, planes * 4, kernel_size=3, stride=2, padding=1, bias=True),\r\n #BatchNorm2d(planes * 4, momentum=bn_mom),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(planes * 4, planes * 8, kernel_size=3, stride=2, padding=1, bias=True),\r\n #BatchNorm2d(planes * 8, momentum=bn_mom),\r\n )\r\n\r\n self.layer3_ = self._make_layer(block, planes * 2, highres_planes, 2)\r\n\r\n self.layer4_ = self._make_layer(block, highres_planes, highres_planes, 2)\r\n\r\n self.layer5_ = self._make_layer(Bottleneck, highres_planes, highres_planes, 1)\r\n\r\n self.layer5 = self._make_layer(Bottleneck, planes * 8, planes * 8, 1, stride=2)\r\n\r\n self.spp = DAPPM(planes * 16, spp_planes, planes * 4)\r\n\r\n if self.augment:\r\n self.seghead_extra = segmenthead(highres_planes, head_planes, num_classes) \r\n\r\n self.final_layer = segmenthead(planes * 4, head_planes, num_classes)\r\n\r\n\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\r\n elif isinstance(m, BatchNorm2d):\r\n nn.init.constant_(m.weight, 1)\r\n nn.init.constant_(m.bias, 0)\r\n\r\n\r\n def _make_layer(self, block, inplanes, planes, blocks, stride=1):\r\n downsample = None\r\n if stride != 1 or inplanes != planes * block.expansion:\r\n downsample = nn.Sequential(\r\n nn.Conv2d(inplanes, planes * block.expansion,\r\n kernel_size=1, stride=stride, bias=True),\r\n #nn.BatchNorm2d(planes * block.expansion, momentum=bn_mom),\r\n )\r\n\r\n layers = []\r\n layers.append(block(inplanes, planes, stride, downsample))\r\n inplanes = planes * block.expansion\r\n for i in range(1, blocks):\r\n if i == (blocks-1):\r\n layers.append(block(inplanes, planes, stride=1, no_relu=True))\r\n else:\r\n layers.append(block(inplanes, planes, stride=1, no_relu=False))\r\n\r\n return nn.Sequential(*layers)\r\n\r\n\r\n def forward(self, x): \r\n\r\n width_output = x.shape[-1] // 8\r\n height_output = x.shape[-2] // 8\r\n layers = []\r\n\r\n x = self.conv1(x)\r\n\r\n x = self.layer1(x)\r\n layers.append(x)\r\n\r\n x = self.layer2(self.relu(x))\r\n layers.append(x)\r\n \r\n x = self.layer3(self.relu(x))\r\n layers.append(x)\r\n x_ = self.layer3_(self.relu(layers[1]))\r\n\r\n x = x + self.down3(self.relu(x_))\r\n x_ = x_ + F.interpolate(\r\n self.compression3(self.relu(layers[2])),\r\n size=[height_output, width_output],\r\n mode='bilinear')\r\n if self.augment:\r\n temp = x_\r\n\r\n x = self.layer4(self.relu(x))\r\n layers.append(x)\r\n x_ = self.layer4_(self.relu(x_))\r\n\r\n x = x + self.down4(self.relu(x_))\r\n x_ = x_ + F.interpolate(\r\n self.compression4(self.relu(layers[3])),\r\n size=[height_output, width_output],\r\n mode='bilinear')\r\n\r\n x_ = self.layer5_(self.relu(x_))\r\n x = F.interpolate(\r\n self.spp(self.layer5(self.relu(x))),\r\n size=[height_output, width_output],\r\n mode='bilinear')\r\n\r\n x_ = self.final_layer(x + x_)\r\n\r\n if self.augment: \r\n x_extra = self.seghead_extra(temp)\r\n return [x_, x_extra]\r\n else:\r\n return x_ \r\n\r\ndef DualResNet_imagenet(pretrained=False):\r\n model = DualResNet(BasicBlock, [2, 2, 2, 2], num_classes=19, planes=32, spp_planes=128, head_planes=64, augment=True)\r\n if pretrained:\r\n checkpoint = torch.load('/home/user1/hyd/HRNet/' + \"DDRNet23s_imagenet.pth\", map_location='cpu') \r\n ''' \r\n new_state_dict = OrderedDict()\r\n for k, v in checkpoint['state_dict'].items():\r\n name = k[7:] \r\n new_state_dict[name] = v\r\n #model_dict.update(new_state_dict)\r\n #model.load_state_dict(model_dict)\r\n '''\r\n model.load_state_dict(new_state_dict, strict = False)\r\n return model\r\n\r\ndef get_seg_model(cfg, **kwargs):\r\n\r\n model = DualResNet_imagenet(pretrained=False)\r\n return model\r\n\r\nif __name__ == '__main__':\r\n\r\n\r\n import time\r\n device = torch.device('cuda')\r\n #torch.backends.cudnn.enabled = True\r\n #torch.backends.cudnn.benchmark = True\r\n\r\n model = DualResNet(BasicBlock, [2, 2, 2, 2], num_classes=19, planes=32, spp_planes=128, head_planes=64)\r\n model.eval()\r\n model.to(device)\r\n iterations = None\r\n\r\n input = torch.randn(1, 3, 1024, 2048).cuda()\r\n with torch.no_grad():\r\n for _ in range(10):\r\n model(input)\r\n\r\n if iterations is None:\r\n elapsed_time = 0\r\n iterations = 100\r\n while elapsed_time < 1:\r\n torch.cuda.synchronize()\r\n torch.cuda.synchronize()\r\n t_start = time.time()\r\n for _ in range(iterations):\r\n model(input)\r\n torch.cuda.synchronize()\r\n torch.cuda.synchronize()\r\n elapsed_time = time.time() - t_start\r\n iterations *= 2\r\n FPS = iterations / elapsed_time\r\n iterations = int(FPS * 6)\r\n\r\n print('=========Speed Testing=========')\r\n torch.cuda.synchronize()\r\n torch.cuda.synchronize()\r\n t_start = time.time()\r\n for _ in range(iterations):\r\n model(input)\r\n torch.cuda.synchronize()\r\n torch.cuda.synchronize()\r\n elapsed_time = time.time() - t_start\r\n latency = elapsed_time / iterations * 1000\r\n torch.cuda.empty_cache()\r\n FPS = 1000 / latency\r\n print(FPS)\r\n\r\n\r\n"
] |
[
[
"torch.nn.Sequential",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.init.kaiming_normal_"
],
[
"torch.nn.Sequential",
"torch.cuda.synchronize",
"torch.load",
"torch.cat",
"torch.randn",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.cuda.empty_cache",
"torch.nn.AvgPool2d",
"torch.no_grad",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.functional.interpolate",
"torch.device",
"torch.nn.ReLU",
"torch.nn.init.kaiming_normal_"
]
] |
BSRushwanth/cakechat
|
[
"9d5a572cf3f452e48139184c7b78f6b589bebf27"
] |
[
"cakechat/dialogue_model/inference/candidates/sampling.py"
] |
[
"import keras.backend as K\nimport numpy as np\n\nfrom cakechat.config import INTX\nfrom cakechat.dialog_model.inference.candidates.abstract_generator import AbstractCandidatesGenerator\nfrom cakechat.dialog_model.inference.service_tokens import ServiceTokensIDs\nfrom cakechat.dialog_model.inference.utils import get_next_token_prob_one_step, get_thought_vectors\n\n\nclass TokenSampler(object):\n \"\"\"\n Class for sampling responses without banned tokens and repeating.\n There has to be individual instance of TokenSampler for each run of sampling procedure\n because it contains counters of tokens that have to be reset before sampling new responses.\n \"\"\"\n\n def __init__(self, batch_size, banned_tokens_ids, non_penalizable_tokens_ids, repetition_penalization_coefficient):\n self._batch_size = batch_size\n self._banned_tokens_ids = banned_tokens_ids\n self._non_penalizable_tokens_ids = non_penalizable_tokens_ids\n self._used_tokens_ids = [[] for _ in range(batch_size)]\n self._repetition_penalization_coefficient = repetition_penalization_coefficient\n\n def sample(self, probabilities, sample_idx, temperature=1.0):\n \"\"\"\n Sample using individual priors for each sample_idx.\n Also updates individual priors for each sample in batch.\n We need individual priors to prevent the model from repeating the same tokens over and over again in one\n response.\n probabilities: Probabilities of each token. The distribution given by these probabilities\n is used by this function to sample the token.\n sample_idx : Integer between 0 and batch_size-1. We need it to figure out which token_log_prior line to use.\n temperature: Temperature for sampling. Temperature has to be a positive number.\n :return: Index of sampled token\n \"\"\"\n # To make the repetition penalization invariant to the original temperature we have to adjust the coefficient:\n repetition_penalize_coefficient = np.exp(np.log(self._repetition_penalization_coefficient) / temperature)\n # Back-up the array to avoid side-effects (otherwise the function will change the probabilities passed as an\n # argument)\n probabilities = np.copy(probabilities)\n\n probabilities[self._banned_tokens_ids] = 0\n probabilities[self._used_tokens_ids[sample_idx]] /= repetition_penalize_coefficient\n\n probabilities /= np.sum(probabilities)\n token_id = np.random.choice(probabilities.shape[0], replace=False, p=probabilities)\n\n # Update used tokens list\n if token_id not in self._non_penalizable_tokens_ids:\n self._used_tokens_ids[sample_idx].append(token_id)\n\n return token_id\n\n\nclass SamplingCandidatesGenerator(AbstractCandidatesGenerator):\n def __init__(self, nn_model, temperature, samples_num, repetition_penalization_coefficient):\n self._nn_model = nn_model\n self._temperature = temperature\n self._samples_num = samples_num\n self._service_tokens_ids = ServiceTokensIDs(nn_model.token_to_index)\n self._repetition_penalization_coefficient = repetition_penalization_coefficient\n\n def _sample_response(self, thought_vectors, condition_ids, output_seq_len):\n batch_size = thought_vectors.shape[0]\n sampler = TokenSampler(batch_size, self._service_tokens_ids.banned_tokens_ids,\n self._service_tokens_ids.non_penalizable_tokens_ids,\n self._repetition_penalization_coefficient)\n # For each candidate in the batch, for each layer of the decoder we need hidden_states_dim numbers to store\n # this array\n hidden_states_batch = np.zeros(\n (batch_size, self._nn_model.decoder_depth, self._nn_model.hidden_layer_dim),\n dtype=K.floatx()) # By default, numpy has dtype=np.float64, but this array is passed\n # right into model's functions, so we need to have explicit type declaring here.\n\n response_tokens_ids = np.full((batch_size, output_seq_len), self._service_tokens_ids.pad_token_id, dtype=INTX)\n\n # Track finished responses to skip prediction step for them\n is_response_finished = np.zeros(batch_size, dtype=np.bool)\n\n # Fill in first tokens of each response in the batch:\n response_tokens_ids[:, 0] = self._service_tokens_ids.start_token_id\n for token_idx in range(1, output_seq_len): # Starting with the second token\n hidden_states_batch, next_token_probs_batch = \\\n get_next_token_prob_one_step(self._nn_model, thought_vectors, hidden_states_batch,\n response_tokens_ids[:, token_idx - 1], # previous token for each response\n condition_ids,\n temperature=self._temperature)\n\n for response_idx, next_token_probs in enumerate(next_token_probs_batch):\n if is_response_finished[response_idx]:\n continue\n\n next_token_id = sampler.sample(next_token_probs, response_idx, self._temperature)\n response_tokens_ids[response_idx, token_idx] = next_token_id\n\n if next_token_id in [self._service_tokens_ids.eos_token_id, self._service_tokens_ids.pad_token_id]:\n is_response_finished[response_idx] = True\n\n # Stop if all responses are done\n if np.all(is_response_finished):\n break\n\n return response_tokens_ids\n\n def generate_candidates(self, context_tokens_ids, condition_ids, output_seq_len):\n \"\"\"\n Predict answers for every sequence token by token until EOS_TOKEN occurred in the sequence\n using sampling with temperature.\n During the sampling procedure offensive and <unk> tokens are banned.\n Probabilities of tokens that have already been used in a response are penalized\n (divided by REPETITION_PENALIZE_COEFFICIENT).\n All the rest of the sequence is filled with PAD_TOKENs.\n \"\"\"\n thought_vectors = get_thought_vectors(self._nn_model, context_tokens_ids)\n sampled_candidates = [\n self._sample_response(thought_vectors, condition_ids, output_seq_len) for _ in range(self._samples_num)\n ]\n\n # Transpose the result: candidate_id x batch_size x seq_len -> batch_size x candidate_id x seq_len\n return np.swapaxes(sampled_candidates, 0, 1)\n"
] |
[
[
"numpy.swapaxes",
"numpy.log",
"numpy.random.choice",
"numpy.full",
"numpy.all",
"numpy.copy",
"numpy.zeros",
"numpy.sum"
]
] |
caljrobe/numba
|
[
"97c954100cb8bef8a563cec71e1f3751af3ecb08"
] |
[
"numba/tests/test_dyn_array.py"
] |
[
"import contextlib\nimport sys\nimport numpy as np\nimport random\nimport re\nimport threading\nimport gc\n\nfrom numba.core.errors import TypingError\nfrom numba import njit\nfrom numba.core import types, utils, config\nfrom numba.tests.support import MemoryLeakMixin, TestCase, tag\nimport unittest\n\n\nnrtjit = njit(_nrt=True, nogil=True)\n\n\ndef np_concatenate1(a, b, c):\n return np.concatenate((a, b, c))\n\ndef np_concatenate2(a, b, c, axis):\n return np.concatenate((a, b, c), axis=axis)\n\ndef np_stack1(a, b, c):\n return np.stack((a, b, c))\n\ndef np_stack2(a, b, c, axis):\n return np.stack((a, b, c), axis=axis)\n\ndef np_hstack(a, b, c):\n return np.hstack((a, b, c))\n\ndef np_vstack(a, b, c):\n return np.vstack((a, b, c))\n\ndef np_dstack(a, b, c):\n return np.dstack((a, b, c))\n\ndef np_column_stack(a, b, c):\n return np.column_stack((a, b, c))\n\n\nclass BaseTest(TestCase):\n\n def check_outputs(self, pyfunc, argslist, exact=True):\n cfunc = nrtjit(pyfunc)\n for args in argslist:\n expected = pyfunc(*args)\n ret = cfunc(*args)\n self.assertEqual(ret.size, expected.size)\n self.assertEqual(ret.dtype, expected.dtype)\n self.assertStridesEqual(ret, expected)\n if exact:\n np.testing.assert_equal(expected, ret)\n else:\n np.testing.assert_allclose(expected, ret)\n\n\nclass NrtRefCtTest(MemoryLeakMixin):\n def assert_array_nrt_refct(self, arr, expect):\n self.assertEqual(arr.base.refcount, expect)\n\n\nclass TestDynArray(NrtRefCtTest, TestCase):\n\n def test_empty_0d(self):\n @nrtjit\n def foo():\n arr = np.empty(())\n arr[()] = 42\n return arr\n\n arr = foo()\n self.assert_array_nrt_refct(arr, 1)\n np.testing.assert_equal(42, arr)\n self.assertEqual(arr.size, 1)\n self.assertEqual(arr.shape, ())\n self.assertEqual(arr.dtype, np.dtype(np.float64))\n self.assertEqual(arr.strides, ())\n arr.fill(123) # test writability\n np.testing.assert_equal(123, arr)\n del arr\n\n def test_empty_1d(self):\n @nrtjit\n def foo(n):\n arr = np.empty(n)\n for i in range(n):\n arr[i] = i\n\n return arr\n\n n = 3\n arr = foo(n)\n self.assert_array_nrt_refct(arr, 1)\n np.testing.assert_equal(np.arange(n), arr)\n self.assertEqual(arr.size, n)\n self.assertEqual(arr.shape, (n,))\n self.assertEqual(arr.dtype, np.dtype(np.float64))\n self.assertEqual(arr.strides, (np.dtype(np.float64).itemsize,))\n arr.fill(123) # test writability\n np.testing.assert_equal(123, arr)\n del arr\n\n def test_empty_2d(self):\n def pyfunc(m, n):\n arr = np.empty((m, n), np.int32)\n for i in range(m):\n for j in range(n):\n arr[i, j] = i + j\n\n return arr\n\n cfunc = nrtjit(pyfunc)\n m = 4\n n = 3\n expected_arr = pyfunc(m, n)\n got_arr = cfunc(m, n)\n self.assert_array_nrt_refct(got_arr, 1)\n np.testing.assert_equal(expected_arr, got_arr)\n\n self.assertEqual(expected_arr.size, got_arr.size)\n self.assertEqual(expected_arr.shape, got_arr.shape)\n self.assertEqual(expected_arr.strides, got_arr.strides)\n\n del got_arr\n\n def test_empty_3d(self):\n def pyfunc(m, n, p):\n arr = np.empty((m, n, p), np.int32)\n for i in range(m):\n for j in range(n):\n for k in range(p):\n arr[i, j, k] = i + j + k\n\n return arr\n\n cfunc = nrtjit(pyfunc)\n m = 4\n n = 3\n p = 2\n expected_arr = pyfunc(m, n, p)\n got_arr = cfunc(m, n, p)\n self.assert_array_nrt_refct(got_arr, 1)\n np.testing.assert_equal(expected_arr, got_arr)\n\n self.assertEqual(expected_arr.size, got_arr.size)\n self.assertEqual(expected_arr.shape, got_arr.shape)\n self.assertEqual(expected_arr.strides, got_arr.strides)\n\n del got_arr\n\n def test_empty_2d_sliced(self):\n def pyfunc(m, n, p):\n arr = np.empty((m, n), np.int32)\n for i in range(m):\n for j in range(n):\n arr[i, j] = i + j\n\n return arr[p]\n\n cfunc = nrtjit(pyfunc)\n m = 4\n n = 3\n p = 2\n expected_arr = pyfunc(m, n, p)\n got_arr = cfunc(m, n, p)\n self.assert_array_nrt_refct(got_arr, 1)\n np.testing.assert_equal(expected_arr, got_arr)\n\n self.assertEqual(expected_arr.size, got_arr.size)\n self.assertEqual(expected_arr.shape, got_arr.shape)\n self.assertEqual(expected_arr.strides, got_arr.strides)\n\n del got_arr\n\n def test_return_global_array(self):\n y = np.ones(4, dtype=np.float32)\n initrefct = sys.getrefcount(y)\n\n def return_external_array():\n return y\n\n cfunc = nrtjit(return_external_array)\n out = cfunc()\n\n # out reference by cfunc\n self.assertEqual(initrefct + 1, sys.getrefcount(y))\n\n np.testing.assert_equal(y, out)\n np.testing.assert_equal(y, np.ones(4, dtype=np.float32))\n np.testing.assert_equal(out, np.ones(4, dtype=np.float32))\n\n del out\n gc.collect()\n # out is only referenced by cfunc\n self.assertEqual(initrefct + 1, sys.getrefcount(y))\n\n del cfunc\n gc.collect()\n # y is no longer referenced by cfunc\n self.assertEqual(initrefct, sys.getrefcount(y))\n\n def test_return_global_array_sliced(self):\n y = np.ones(4, dtype=np.float32)\n\n def return_external_array():\n return y[2:]\n\n cfunc = nrtjit(return_external_array)\n out = cfunc()\n self.assertIsNone(out.base)\n\n yy = y[2:]\n np.testing.assert_equal(yy, out)\n np.testing.assert_equal(yy, np.ones(2, dtype=np.float32))\n np.testing.assert_equal(out, np.ones(2, dtype=np.float32))\n\n def test_array_pass_through(self):\n def pyfunc(y):\n return y\n\n arr = np.ones(4, dtype=np.float32)\n\n cfunc = nrtjit(pyfunc)\n expected = cfunc(arr)\n got = pyfunc(arr)\n\n np.testing.assert_equal(expected, arr)\n np.testing.assert_equal(expected, got)\n self.assertIs(expected, arr)\n self.assertIs(expected, got)\n\n def test_array_pass_through_sliced(self):\n def pyfunc(y):\n return y[y.size // 2:]\n\n arr = np.ones(4, dtype=np.float32)\n\n initrefct = sys.getrefcount(arr)\n\n cfunc = nrtjit(pyfunc)\n got = cfunc(arr)\n self.assertEqual(initrefct + 1, sys.getrefcount(arr))\n expected = pyfunc(arr)\n self.assertEqual(initrefct + 2, sys.getrefcount(arr))\n\n np.testing.assert_equal(expected, arr[arr.size // 2])\n np.testing.assert_equal(expected, got)\n\n del expected\n self.assertEqual(initrefct + 1, sys.getrefcount(arr))\n del got\n self.assertEqual(initrefct, sys.getrefcount(arr))\n\n def test_ufunc_with_allocated_output(self):\n\n def pyfunc(a, b):\n out = np.empty(a.shape)\n np.add(a, b, out)\n return out\n\n cfunc = nrtjit(pyfunc)\n\n # 1D case\n arr_a = np.random.random(10)\n arr_b = np.random.random(10)\n\n np.testing.assert_equal(pyfunc(arr_a, arr_b),\n cfunc(arr_a, arr_b))\n\n self.assert_array_nrt_refct(cfunc(arr_a, arr_b), 1)\n\n # 2D case\n arr_a = np.random.random(10).reshape(2, 5)\n arr_b = np.random.random(10).reshape(2, 5)\n\n np.testing.assert_equal(pyfunc(arr_a, arr_b),\n cfunc(arr_a, arr_b))\n\n self.assert_array_nrt_refct(cfunc(arr_a, arr_b), 1)\n\n # 3D case\n arr_a = np.random.random(70).reshape(2, 5, 7)\n arr_b = np.random.random(70).reshape(2, 5, 7)\n\n np.testing.assert_equal(pyfunc(arr_a, arr_b),\n cfunc(arr_a, arr_b))\n\n self.assert_array_nrt_refct(cfunc(arr_a, arr_b), 1)\n\n def test_allocation_mt(self):\n \"\"\"\n This test exercises the array allocation in multithreaded usecase.\n This stress the freelist inside NRT.\n \"\"\"\n\n def pyfunc(inp):\n out = np.empty(inp.size)\n\n # Zero fill\n for i in range(out.size):\n out[i] = 0\n\n for i in range(inp[0]):\n # Allocate inside a loop\n tmp = np.empty(inp.size)\n # Write to tmp\n for j in range(tmp.size):\n tmp[j] = inp[j]\n # out = tmp + i\n for j in range(tmp.size):\n out[j] += tmp[j] + i\n\n return out\n\n cfunc = nrtjit(pyfunc)\n size = 10 # small array size so that the computation is short\n arr = np.random.randint(1, 10, size)\n frozen_arr = arr.copy()\n\n np.testing.assert_equal(pyfunc(arr), cfunc(arr))\n # Ensure we did not modify the input\n np.testing.assert_equal(frozen_arr, arr)\n\n workers = []\n inputs = []\n outputs = []\n\n # Make wrapper to store the output\n def wrapped(inp, out):\n out[:] = cfunc(inp)\n\n # Create a lot of worker threads to create contention\n for i in range(100):\n arr = np.random.randint(1, 10, size)\n out = np.empty_like(arr)\n thread = threading.Thread(target=wrapped,\n args=(arr, out),\n name=\"worker{0}\".format(i))\n workers.append(thread)\n inputs.append(arr)\n outputs.append(out)\n\n # Launch worker threads\n for thread in workers:\n thread.start()\n\n # Join worker threads\n for thread in workers:\n thread.join()\n\n # Check result\n for inp, out in zip(inputs, outputs):\n np.testing.assert_equal(pyfunc(inp), out)\n\n def test_refct_mt(self):\n \"\"\"\n This test exercises the refct in multithreaded code\n \"\"\"\n\n def pyfunc(n, inp):\n out = np.empty(inp.size)\n for i in range(out.size):\n out[i] = inp[i] + 1\n # Use swap to trigger many refct ops\n for i in range(n):\n out, inp = inp, out\n return out\n\n cfunc = nrtjit(pyfunc)\n size = 10\n input = np.arange(size, dtype=np.float)\n expected_refct = sys.getrefcount(input)\n swapct = random.randrange(1000)\n expected = pyfunc(swapct, input)\n np.testing.assert_equal(expected, cfunc(swapct, input))\n # The following checks can discover a reference count error\n del expected\n self.assertEqual(expected_refct, sys.getrefcount(input))\n\n workers = []\n outputs = []\n swapcts = []\n\n # Make wrapper to store the output\n def wrapped(n, input, out):\n out[:] = cfunc(n, input)\n\n # Create worker threads\n for i in range(100):\n out = np.empty(size)\n # All thread shares the same input\n swapct = random.randrange(1000)\n thread = threading.Thread(target=wrapped,\n args=(swapct, input, out),\n name=\"worker{0}\".format(i))\n workers.append(thread)\n outputs.append(out)\n swapcts.append(swapct)\n\n # Launch worker threads\n for thread in workers:\n thread.start()\n\n # Join worker threads\n for thread in workers:\n thread.join()\n\n # Check result\n for swapct, out in zip(swapcts, outputs):\n np.testing.assert_equal(pyfunc(swapct, input), out)\n\n del outputs, workers\n # The following checks can discover a reference count error\n self.assertEqual(expected_refct, sys.getrefcount(input))\n\n def test_swap(self):\n\n def pyfunc(x, y, t):\n \"\"\"Swap array x and y for t number of times\n \"\"\"\n for i in range(t):\n x, y = y, x\n\n return x, y\n\n\n cfunc = nrtjit(pyfunc)\n\n x = np.random.random(100)\n y = np.random.random(100)\n\n t = 100\n\n initrefct = sys.getrefcount(x), sys.getrefcount(y)\n expect, got = pyfunc(x, y, t), cfunc(x, y, t)\n self.assertIsNone(got[0].base)\n self.assertIsNone(got[1].base)\n np.testing.assert_equal(expect, got)\n del expect, got\n self.assertEqual(initrefct, (sys.getrefcount(x), sys.getrefcount(y)))\n\n def test_return_tuple_of_array(self):\n\n def pyfunc(x):\n y = np.empty(x.size)\n for i in range(y.size):\n y[i] = x[i] + 1\n return x, y\n\n cfunc = nrtjit(pyfunc)\n\n x = np.random.random(5)\n initrefct = sys.getrefcount(x)\n expected_x, expected_y = pyfunc(x)\n got_x, got_y = cfunc(x)\n self.assertIs(x, expected_x)\n self.assertIs(x, got_x)\n np.testing.assert_equal(expected_x, got_x)\n np.testing.assert_equal(expected_y, got_y)\n del expected_x, got_x\n self.assertEqual(initrefct, sys.getrefcount(x))\n\n self.assertEqual(sys.getrefcount(expected_y), sys.getrefcount(got_y))\n\n def test_return_tuple_of_array_created(self):\n\n def pyfunc(x):\n y = np.empty(x.size)\n for i in range(y.size):\n y[i] = x[i] + 1\n out = y, y\n return out\n\n cfunc = nrtjit(pyfunc)\n\n x = np.random.random(5)\n expected_x, expected_y = pyfunc(x)\n got_x, got_y = cfunc(x)\n np.testing.assert_equal(expected_x, got_x)\n np.testing.assert_equal(expected_y, got_y)\n # getrefcount owns 1, got_y owns 1\n self.assertEqual(2, sys.getrefcount(got_y))\n # getrefcount owns 1, got_y owns 1\n self.assertEqual(2, sys.getrefcount(got_y))\n\n def test_issue_with_return_leak(self):\n \"\"\"\n Dispatcher returns a new reference.\n It need to workaround it for now.\n \"\"\"\n @nrtjit\n def inner(out):\n return out\n\n def pyfunc(x):\n return inner(x)\n\n cfunc = nrtjit(pyfunc)\n\n arr = np.arange(10)\n old_refct = sys.getrefcount(arr)\n\n self.assertEqual(old_refct, sys.getrefcount(pyfunc(arr)))\n self.assertEqual(old_refct, sys.getrefcount(cfunc(arr)))\n self.assertEqual(old_refct, sys.getrefcount(arr))\n\n\nclass ConstructorBaseTest(NrtRefCtTest):\n\n def check_0d(self, pyfunc):\n cfunc = nrtjit(pyfunc)\n expected = pyfunc()\n ret = cfunc()\n self.assert_array_nrt_refct(ret, 1)\n self.assertEqual(ret.size, expected.size)\n self.assertEqual(ret.shape, expected.shape)\n self.assertEqual(ret.dtype, expected.dtype)\n self.assertEqual(ret.strides, expected.strides)\n self.check_result_value(ret, expected)\n # test writability\n expected = np.empty_like(ret) # np.full_like was not added until Numpy 1.8\n expected.fill(123)\n ret.fill(123)\n np.testing.assert_equal(ret, expected)\n\n def check_1d(self, pyfunc):\n cfunc = nrtjit(pyfunc)\n n = 3\n expected = pyfunc(n)\n ret = cfunc(n)\n self.assert_array_nrt_refct(ret, 1)\n self.assertEqual(ret.size, expected.size)\n self.assertEqual(ret.shape, expected.shape)\n self.assertEqual(ret.dtype, expected.dtype)\n self.assertEqual(ret.strides, expected.strides)\n self.check_result_value(ret, expected)\n # test writability\n expected = np.empty_like(ret) # np.full_like was not added until Numpy 1.8\n expected.fill(123)\n ret.fill(123)\n np.testing.assert_equal(ret, expected)\n # errors\n with self.assertRaises(ValueError) as cm:\n cfunc(-1)\n self.assertEqual(str(cm.exception), \"negative dimensions not allowed\")\n\n def check_2d(self, pyfunc):\n cfunc = nrtjit(pyfunc)\n m, n = 2, 3\n expected = pyfunc(m, n)\n ret = cfunc(m, n)\n self.assert_array_nrt_refct(ret, 1)\n self.assertEqual(ret.size, expected.size)\n self.assertEqual(ret.shape, expected.shape)\n self.assertEqual(ret.dtype, expected.dtype)\n self.assertEqual(ret.strides, expected.strides)\n self.check_result_value(ret, expected)\n # test writability\n expected = np.empty_like(ret) # np.full_like was not added until Numpy 1.8\n expected.fill(123)\n ret.fill(123)\n np.testing.assert_equal(ret, expected)\n # errors\n with self.assertRaises(ValueError) as cm:\n cfunc(2, -1)\n self.assertEqual(str(cm.exception), \"negative dimensions not allowed\")\n\n def check_alloc_size(self, pyfunc):\n \"\"\"Checks that pyfunc will error, not segfaulting due to array size.\"\"\"\n cfunc = nrtjit(pyfunc)\n with self.assertRaises(ValueError) as e:\n cfunc()\n self.assertIn(\n \"array is too big\",\n str(e.exception)\n )\n\n\nclass TestNdZeros(ConstructorBaseTest, TestCase):\n\n def setUp(self):\n super(TestNdZeros, self).setUp()\n self.pyfunc = np.zeros\n\n def check_result_value(self, ret, expected):\n np.testing.assert_equal(ret, expected)\n\n def test_0d(self):\n pyfunc = self.pyfunc\n def func():\n return pyfunc(())\n self.check_0d(func)\n\n def test_1d(self):\n pyfunc = self.pyfunc\n def func(n):\n return pyfunc(n)\n self.check_1d(func)\n\n def test_1d_dtype(self):\n pyfunc = self.pyfunc\n def func(n):\n return pyfunc(n, np.int32)\n self.check_1d(func)\n\n def test_1d_dtype_instance(self):\n # dtype as numpy dtype, not as scalar class\n pyfunc = self.pyfunc\n _dtype = np.dtype('int32')\n def func(n):\n return pyfunc(n, _dtype)\n self.check_1d(func)\n\n def test_1d_dtype_str(self):\n pyfunc = self.pyfunc\n _dtype = 'int32'\n def func(n):\n return pyfunc(n, _dtype)\n self.check_1d(func)\n\n def func(n):\n return pyfunc(n, 'complex128')\n self.check_1d(func)\n\n def test_1d_dtype_non_const_str(self):\n pyfunc = self.pyfunc\n\n @njit\n def func(n, dt):\n return pyfunc(n, dt)\n\n with self.assertRaises(TypingError) as raises:\n func(5, 'int32')\n\n excstr = str(raises.exception)\n self.assertIn('No match', excstr)\n restr = r'\\b{}\\(int.*?, unicode_type\\)\\B'\n regex = re.compile(restr.format(pyfunc.__name__))\n self.assertRegex(excstr, regex)\n\n def test_2d(self):\n pyfunc = self.pyfunc\n def func(m, n):\n return pyfunc((m, n))\n self.check_2d(func)\n\n def test_2d_shape_dtypes(self):\n # Test for issue #4575\n pyfunc = self.pyfunc\n def func1(m, n):\n return pyfunc((np.int16(m), np.int32(n)))\n self.check_2d(func1)\n # Using a 64-bit value checks that 32 bit systems will downcast to intp\n def func2(m, n):\n return pyfunc((np.int64(m), np.int8(n)))\n self.check_2d(func2)\n # Make sure an error is thrown if we can't downcast safely\n if config.IS_32BITS:\n cfunc = nrtjit(lambda m, n: pyfunc((m, n)))\n with self.assertRaises(ValueError):\n cfunc(np.int64(1 << (32 - 1)), 1)\n\n def test_2d_dtype_kwarg(self):\n pyfunc = self.pyfunc\n def func(m, n):\n return pyfunc((m, n), dtype=np.complex64)\n self.check_2d(func)\n\n def test_2d_dtype_str_kwarg(self):\n pyfunc = self.pyfunc\n def func(m, n):\n return pyfunc((m, n), dtype='complex64')\n self.check_2d(func)\n\n def test_alloc_size(self):\n pyfunc = self.pyfunc\n width = types.intp.bitwidth\n def gen_func(shape, dtype):\n return lambda : pyfunc(shape, dtype)\n # Under these values numba will segfault, but thats another issue\n self.check_alloc_size(gen_func(1 << width - 2, np.intp))\n self.check_alloc_size(gen_func((1 << width - 8, 64), np.intp))\n\n\nclass TestNdOnes(TestNdZeros):\n\n def setUp(self):\n super(TestNdOnes, self).setUp()\n self.pyfunc = np.ones\n\n\nclass TestNdFull(ConstructorBaseTest, TestCase):\n\n def check_result_value(self, ret, expected):\n np.testing.assert_equal(ret, expected)\n\n def test_0d(self):\n def func():\n return np.full((), 4.5)\n self.check_0d(func)\n\n def test_1d(self):\n def func(n):\n return np.full(n, 4.5)\n self.check_1d(func)\n\n def test_1d_dtype(self):\n def func(n):\n return np.full(n, 4.5, np.bool_)\n self.check_1d(func)\n\n def test_1d_dtype_instance(self):\n dtype = np.dtype('bool')\n def func(n):\n return np.full(n, 4.5, dtype)\n self.check_1d(func)\n\n def test_1d_dtype_str(self):\n def func(n):\n return np.full(n, 4.5, 'bool_')\n self.check_1d(func)\n\n def test_1d_dtype_non_const_str(self):\n\n @njit\n def func(n, fv, dt):\n return np.full(n, fv, dt)\n\n with self.assertRaises(TypingError) as raises:\n func((5,), 4.5, 'int32')\n\n excstr = str(raises.exception)\n self.assertIn('No match', excstr)\n restr = r'\\bfull\\(UniTuple\\(int.*? x 1\\), float64, unicode_type\\)\\B'\n regex = re.compile(restr)\n self.assertRegex(excstr, regex)\n\n def test_2d(self):\n def func(m, n):\n return np.full((m, n), 4.5)\n self.check_2d(func)\n\n def test_2d_dtype_kwarg(self):\n def func(m, n):\n return np.full((m, n), 1 + 4.5j, dtype=np.complex64)\n self.check_2d(func)\n\n def test_2d_dtype_from_type(self):\n # tests issue #2862\n def func(m, n):\n return np.full((m, n), np.int32(1))\n self.check_2d(func)\n\n # Complex uses `.real`, imaginary part dropped\n def func(m, n):\n return np.full((m, n), np.complex128(1))\n self.check_2d(func)\n\n # and that if a dtype is specified, this influences the return type\n def func(m, n):\n return np.full((m, n), 1, dtype=np.int8)\n self.check_2d(func)\n\n def test_2d_shape_dtypes(self):\n # Test for issue #4575\n def func1(m, n):\n return np.full((np.int16(m), np.int32(n)), 4.5)\n self.check_2d(func1)\n # Using a 64-bit value checks that 32 bit systems will downcast to intp\n def func2(m, n):\n return np.full((np.int64(m), np.int8(n)), 4.5)\n self.check_2d(func2)\n # Make sure an error is thrown if we can't downcast safely\n if config.IS_32BITS:\n cfunc = nrtjit(lambda m, n: np.full((m, n), 4.5))\n with self.assertRaises(ValueError):\n cfunc(np.int64(1 << (32 - 1)), 1)\n\n def test_alloc_size(self):\n width = types.intp.bitwidth\n def gen_func(shape, value):\n return lambda : np.full(shape, value)\n # Under these values numba will segfault, but thats another issue\n self.check_alloc_size(gen_func(1 << width - 2, 1))\n self.check_alloc_size(gen_func((1 << width - 8, 64), 1))\n\n\nclass ConstructorLikeBaseTest(object):\n\n def mutate_array(self, arr):\n try:\n arr.fill(42)\n except (TypeError, ValueError):\n # Try something else (e.g. Numpy 1.6 with structured dtypes)\n fill_value = b'x' * arr.dtype.itemsize\n arr.fill(fill_value)\n\n def check_like(self, pyfunc, dtype):\n def check_arr(arr):\n expected = pyfunc(arr)\n ret = cfunc(arr)\n self.assertEqual(ret.size, expected.size)\n self.assertEqual(ret.dtype, expected.dtype)\n self.assertStridesEqual(ret, expected)\n self.check_result_value(ret, expected)\n # test writability\n self.mutate_array(ret)\n self.mutate_array(expected)\n np.testing.assert_equal(ret, expected)\n\n orig = np.linspace(0, 5, 6).astype(dtype)\n cfunc = nrtjit(pyfunc)\n\n for shape in (6, (2, 3), (1, 2, 3), (3, 1, 2), ()):\n if shape == ():\n arr = orig[-1:].reshape(())\n else:\n arr = orig.reshape(shape)\n check_arr(arr)\n # Non-contiguous array\n if arr.ndim > 0:\n check_arr(arr[::2])\n # Check new array doesn't inherit readonly flag\n arr.flags['WRITEABLE'] = False\n # verify read-only\n with self.assertRaises(ValueError):\n arr[0] = 1\n check_arr(arr)\n\n # Scalar argument => should produce a 0-d array\n check_arr(orig[0])\n\n\nclass TestNdEmptyLike(ConstructorLikeBaseTest, TestCase):\n\n def setUp(self):\n super(TestNdEmptyLike, self).setUp()\n self.pyfunc = np.empty_like\n\n def check_result_value(self, ret, expected):\n pass\n\n def test_like(self):\n pyfunc = self.pyfunc\n def func(arr):\n return pyfunc(arr)\n self.check_like(func, np.float64)\n\n def test_like_structured(self):\n dtype = np.dtype([('a', np.int16), ('b', np.float32)])\n pyfunc = self.pyfunc\n def func(arr):\n return pyfunc(arr)\n self.check_like(func, dtype)\n\n def test_like_dtype(self):\n pyfunc = self.pyfunc\n def func(arr):\n return pyfunc(arr, np.int32)\n self.check_like(func, np.float64)\n\n def test_like_dtype_instance(self):\n dtype = np.dtype('int32')\n pyfunc = self.pyfunc\n def func(arr):\n return pyfunc(arr, dtype)\n self.check_like(func, np.float64)\n\n def test_like_dtype_structured(self):\n dtype = np.dtype([('a', np.int16), ('b', np.float32)])\n pyfunc = self.pyfunc\n def func(arr):\n return pyfunc(arr, dtype)\n self.check_like(func, np.float64)\n\n def test_like_dtype_kwarg(self):\n pyfunc = self.pyfunc\n def func(arr):\n return pyfunc(arr, dtype=np.int32)\n self.check_like(func, np.float64)\n\n def test_like_dtype_str_kwarg(self):\n pyfunc = self.pyfunc\n def func(arr):\n return pyfunc(arr, dtype='int32')\n self.check_like(func, np.float64)\n\n def test_like_dtype_str_kwarg(self):\n pyfunc = self.pyfunc\n def func(arr):\n return pyfunc(arr, dtype='int32')\n self.check_like(func, np.float64)\n\n def test_like_dtype_non_const_str(self):\n pyfunc = self.pyfunc\n\n @njit\n def func(n, dt):\n return pyfunc(n, dt)\n\n with self.assertRaises(TypingError) as raises:\n func(np.ones(4), 'int32')\n\n excstr = str(raises.exception)\n\n self.assertIn('No match', excstr)\n self.assertIn(\n '{}(array(float64, 1d, C), unicode_type)'.format(pyfunc.__name__),\n excstr)\n\n\nclass TestNdZerosLike(TestNdEmptyLike):\n\n def setUp(self):\n super(TestNdZerosLike, self).setUp()\n self.pyfunc = np.zeros_like\n\n def check_result_value(self, ret, expected):\n np.testing.assert_equal(ret, expected)\n\n def test_like_structured(self):\n super(TestNdZerosLike, self).test_like_structured()\n\n def test_like_dtype_structured(self):\n super(TestNdZerosLike, self).test_like_dtype_structured()\n\n\nclass TestNdOnesLike(TestNdZerosLike):\n\n def setUp(self):\n super(TestNdOnesLike, self).setUp()\n self.pyfunc = np.ones_like\n self.expected_value = 1\n\n # Not supported yet.\n\n @unittest.expectedFailure\n def test_like_structured(self):\n super(TestNdOnesLike, self).test_like_structured()\n\n @unittest.expectedFailure\n def test_like_dtype_structured(self):\n super(TestNdOnesLike, self).test_like_dtype_structured()\n\n\nclass TestNdFullLike(ConstructorLikeBaseTest, TestCase):\n\n def check_result_value(self, ret, expected):\n np.testing.assert_equal(ret, expected)\n\n def test_like(self):\n def func(arr):\n return np.full_like(arr, 3.5)\n self.check_like(func, np.float64)\n\n # Not supported yet.\n @unittest.expectedFailure\n def test_like_structured(self):\n dtype = np.dtype([('a', np.int16), ('b', np.float32)])\n def func(arr):\n return np.full_like(arr, 4.5)\n self.check_like(func, dtype)\n\n def test_like_dtype(self):\n def func(arr):\n return np.full_like(arr, 4.5, np.bool_)\n self.check_like(func, np.float64)\n\n def test_like_dtype_instance(self):\n dtype = np.dtype('bool')\n def func(arr):\n return np.full_like(arr, 4.5, dtype)\n self.check_like(func, np.float64)\n\n def test_like_dtype_kwarg(self):\n def func(arr):\n return np.full_like(arr, 4.5, dtype=np.bool_)\n self.check_like(func, np.float64)\n\n def test_like_dtype_str_kwarg(self):\n def func(arr):\n return np.full_like(arr, 4.5, 'bool_')\n self.check_like(func, np.float64)\n\n def test_like_dtype_non_const_str_kwarg(self):\n\n @njit\n def func(arr, fv, dt):\n return np.full_like(arr, fv, dt)\n\n with self.assertRaises(TypingError) as raises:\n func(np.ones(3,), 4.5, 'int32')\n\n excstr = str(raises.exception)\n self.assertIn('No match', excstr)\n self.assertIn('full_like(array(float64, 1d, C), float64, unicode_type)',\n excstr)\n\n\nclass TestNdIdentity(BaseTest):\n\n def check_identity(self, pyfunc):\n self.check_outputs(pyfunc, [(3,)])\n\n def test_identity(self):\n def func(n):\n return np.identity(n)\n self.check_identity(func)\n\n def test_identity_dtype(self):\n for dtype in (np.complex64, np.int16, np.bool_, np.dtype('bool'),\n 'bool_'):\n def func(n):\n return np.identity(n, dtype)\n self.check_identity(func)\n\n def test_like_dtype_non_const_str_kwarg(self):\n\n @njit\n def func(n, dt):\n return np.identity(n, dt)\n\n with self.assertRaises(TypingError) as raises:\n func(4, 'int32')\n\n excstr = str(raises.exception)\n self.assertIn('No match', excstr)\n regex = re.compile(r'\\bidentity\\(int.*?, unicode_type\\)\\B')\n self.assertRegex(excstr, regex)\n\n\nclass TestNdEye(BaseTest):\n\n def test_eye_n(self):\n def func(n):\n return np.eye(n)\n self.check_outputs(func, [(1,), (3,)])\n\n def test_eye_n_dtype(self):\n # check None option, dtype class, instance of dtype class\n for dt in (None, np.complex128, np.complex64(1)):\n def func(n, dtype=dt):\n return np.eye(n, dtype=dtype)\n self.check_outputs(func, [(1,), (3,)])\n\n def test_eye_n_m(self):\n def func(n, m):\n return np.eye(n, m)\n self.check_outputs(func, [(1, 2), (3, 2), (0, 3)])\n\n def check_eye_n_m_k(self, func):\n self.check_outputs(func, [(1, 2, 0),\n (3, 4, 1),\n (3, 4, -1),\n (4, 3, -2),\n (4, 3, -5),\n (4, 3, 5)])\n\n def test_eye_n_m_k(self):\n def func(n, m, k):\n return np.eye(n, m, k)\n self.check_eye_n_m_k(func)\n\n def test_eye_n_m_k_dtype(self):\n def func(n, m, k):\n return np.eye(N=n, M=m, k=k, dtype=np.int16)\n self.check_eye_n_m_k(func)\n\n def test_eye_n_m_k_dtype_instance(self):\n dtype = np.dtype('int16')\n def func(n, m, k):\n return np.eye(N=n, M=m, k=k, dtype=dtype)\n self.check_eye_n_m_k(func)\n\n\nclass TestNdDiag(TestCase):\n\n def setUp(self):\n v = np.array([1, 2, 3])\n hv = np.array([[1, 2, 3]])\n vv = np.transpose(hv)\n self.vectors = [v, hv, vv]\n a3x4 = np.arange(12).reshape(3, 4)\n a4x3 = np.arange(12).reshape(4, 3)\n self.matricies = [a3x4, a4x3]\n def func(q):\n return np.diag(q)\n self.py = func\n self.jit = nrtjit(func)\n\n def func_kwarg(q, k=0):\n return np.diag(q, k=k)\n self.py_kw = func_kwarg\n self.jit_kw = nrtjit(func_kwarg)\n\n def check_diag(self, pyfunc, nrtfunc, *args, **kwargs):\n expected = pyfunc(*args, **kwargs)\n computed = nrtfunc(*args, **kwargs)\n self.assertEqual(computed.size, expected.size)\n self.assertEqual(computed.dtype, expected.dtype)\n # NOTE: stride not tested as np returns a RO view, nb returns new data\n np.testing.assert_equal(expected, computed)\n\n # create a diag matrix from a vector\n def test_diag_vect_create(self):\n for d in self.vectors:\n self.check_diag(self.py, self.jit, d)\n\n # create a diag matrix from a vector at a given offset\n def test_diag_vect_create_kwarg(self):\n for k in range(-10, 10):\n for d in self.vectors:\n self.check_diag(self.py_kw, self.jit_kw, d, k=k)\n\n # extract the diagonal\n def test_diag_extract(self):\n for d in self.matricies:\n self.check_diag(self.py, self.jit, d)\n\n # extract a diagonal at a given offset\n def test_diag_extract_kwarg(self):\n for k in range(-4, 4):\n for d in self.matricies:\n self.check_diag(self.py_kw, self.jit_kw, d, k=k)\n\n # check error handling\n def test_error_handling(self):\n d = np.array([[[1.]]])\n cfunc = nrtjit(self.py)\n\n # missing arg\n with self.assertRaises(TypeError):\n cfunc()\n\n # > 2d\n with self.assertRaises(TypingError):\n cfunc(d)\n with self.assertRaises(TypingError):\n dfunc = nrtjit(self.py_kw)\n dfunc(d, k=3)\n\nclass TestNdArange(BaseTest):\n\n def test_linspace_2(self):\n def pyfunc(n, m):\n return np.linspace(n, m)\n self.check_outputs(pyfunc,\n [(0, 4), (1, 100), (-3.5, 2.5), (-3j, 2+3j),\n (2, 1), (1+0.5j, 1.5j)], exact=False)\n\n def test_linspace_3(self):\n def pyfunc(n, m, p):\n return np.linspace(n, m, p)\n self.check_outputs(pyfunc,\n [(0, 4, 9), (1, 4, 3), (-3.5, 2.5, 8),\n (-3j, 2+3j, 7), (2, 1, 0),\n (1+0.5j, 1.5j, 5), (1, 1e100, 1)],\n exact=False)\n\n\nclass TestNpyEmptyKeyword(TestCase):\n def _test_with_dtype_kw(self, dtype):\n def pyfunc(shape):\n return np.empty(shape, dtype=dtype)\n\n shapes = [1, 5, 9]\n\n cfunc = nrtjit(pyfunc)\n for s in shapes:\n expected = pyfunc(s)\n got = cfunc(s)\n self.assertEqual(expected.dtype, got.dtype)\n self.assertEqual(expected.shape, got.shape)\n\n def test_with_dtype_kws(self):\n for dtype in [np.int32, np.float32, np.complex64, np.dtype('complex64')]:\n self._test_with_dtype_kw(dtype)\n\n def _test_with_shape_and_dtype_kw(self, dtype):\n def pyfunc(shape):\n return np.empty(shape=shape, dtype=dtype)\n\n shapes = [1, 5, 9]\n\n cfunc = nrtjit(pyfunc)\n for s in shapes:\n expected = pyfunc(s)\n got = cfunc(s)\n self.assertEqual(expected.dtype, got.dtype)\n self.assertEqual(expected.shape, got.shape)\n\n def test_with_shape_and_dtype_kws(self):\n for dtype in [np.int32, np.float32, np.complex64, np.dtype('complex64')]:\n self._test_with_shape_and_dtype_kw(dtype)\n\n def test_empty_no_args(self):\n\n def pyfunc():\n return np.empty()\n\n cfunc = nrtjit(pyfunc)\n\n # Trigger the compilation\n # That will cause a TypingError due to missing shape argument\n with self.assertRaises(TypingError):\n cfunc()\n\n\nclass TestNpArray(MemoryLeakMixin, BaseTest):\n\n def test_0d(self):\n def pyfunc(arg):\n return np.array(arg)\n\n cfunc = nrtjit(pyfunc)\n got = cfunc(42)\n self.assertPreciseEqual(got, np.array(42, dtype=np.intp))\n got = cfunc(2.5)\n self.assertPreciseEqual(got, np.array(2.5))\n\n def test_0d_with_dtype(self):\n def pyfunc(arg):\n return np.array(arg, dtype=np.int16)\n\n self.check_outputs(pyfunc, [(42,), (3.5,)])\n\n def test_1d(self):\n def pyfunc(arg):\n return np.array(arg)\n\n cfunc = nrtjit(pyfunc)\n # A list\n got = cfunc([2, 3, 42])\n self.assertPreciseEqual(got, np.intp([2, 3, 42]))\n # A heterogeneous tuple\n got = cfunc((1.0, 2.5j, 42))\n self.assertPreciseEqual(got, np.array([1.0, 2.5j, 42]))\n # An empty tuple\n got = cfunc(())\n self.assertPreciseEqual(got, np.float64(()))\n\n def test_1d_with_dtype(self):\n def pyfunc(arg):\n return np.array(arg, dtype=np.float32)\n\n self.check_outputs(pyfunc,\n [([2, 42],),\n ([3.5, 1.0],),\n ((1, 3.5, 42),),\n ((),),\n ])\n\n def test_1d_with_str_dtype(self):\n def pyfunc(arg):\n return np.array(arg, dtype='float32')\n\n self.check_outputs(pyfunc,\n [([2, 42],),\n ([3.5, 1.0],),\n ((1, 3.5, 42),),\n ((),),\n ])\n\n def test_1d_with_non_const_str_dtype(self):\n\n @njit\n def func(arg, dt):\n return np.array(arg, dtype=dt)\n\n with self.assertRaises(TypingError) as raises:\n func((5, 3), 'int32')\n\n excstr = str(raises.exception)\n self.assertIn('No match', excstr)\n restr = r'\\barray\\(UniTuple\\(int.*? x 2\\), dtype=unicode_type\\)\\B'\n regex = re.compile(restr)\n self.assertRegex(excstr, regex)\n\n def test_2d(self):\n def pyfunc(arg):\n return np.array(arg)\n\n cfunc = nrtjit(pyfunc)\n # A list of tuples\n got = cfunc([(1, 2), (3, 4)])\n self.assertPreciseEqual(got, np.intp([[1, 2], [3, 4]]))\n got = cfunc([(1, 2.5), (3, 4.5)])\n self.assertPreciseEqual(got, np.float64([[1, 2.5], [3, 4.5]]))\n # A tuple of lists\n got = cfunc(([1, 2], [3, 4]))\n self.assertPreciseEqual(got, np.intp([[1, 2], [3, 4]]))\n got = cfunc(([1, 2], [3.5, 4.5]))\n self.assertPreciseEqual(got, np.float64([[1, 2], [3.5, 4.5]]))\n # A tuple of tuples\n got = cfunc(((1.5, 2), (3.5, 4.5)))\n self.assertPreciseEqual(got, np.float64([[1.5, 2], [3.5, 4.5]]))\n got = cfunc(((), ()))\n self.assertPreciseEqual(got, np.float64(((), ())))\n\n def test_2d_with_dtype(self):\n def pyfunc(arg):\n return np.array(arg, dtype=np.int32)\n\n cfunc = nrtjit(pyfunc)\n got = cfunc([(1, 2.5), (3, 4.5)])\n self.assertPreciseEqual(got, np.int32([[1, 2], [3, 4]]))\n\n def test_raises(self):\n\n def pyfunc(arg):\n return np.array(arg)\n\n cfunc = nrtjit(pyfunc)\n\n @contextlib.contextmanager\n def check_raises(msg):\n with self.assertRaises(TypingError) as raises:\n yield\n self.assertIn(msg, str(raises.exception))\n\n with check_raises(('array(float64, 1d, C) not allowed in a '\n 'homogeneous sequence')):\n cfunc(np.array([1.]))\n\n with check_raises(('type Tuple(int64, reflected list(int64)<iv=None>) '\n 'does not have a regular shape')):\n cfunc((np.int64(1), [np.int64(2)]))\n\n with check_raises(\n \"cannot convert Tuple(int64, Record(a[type=int32;offset=0],\"\n \"b[type=float32;offset=4];8;False)) to a homogeneous type\",\n ):\n st = np.dtype([('a', 'i4'), ('b', 'f4')])\n val = np.zeros(1, dtype=st)[0]\n cfunc(((1, 2), (np.int64(1), val)))\n\n\nclass TestNpConcatenate(MemoryLeakMixin, TestCase):\n \"\"\"\n Tests for np.concatenate().\n \"\"\"\n\n def _3d_arrays(self):\n a = np.arange(24).reshape((4, 3, 2))\n b = a + 10\n c = (b + 10).copy(order='F')\n d = (c + 10)[::-1]\n e = (d + 10)[...,::-1]\n return a, b, c, d, e\n\n @contextlib.contextmanager\n def assert_invalid_sizes_over_dim(self, axis):\n with self.assertRaises(ValueError) as raises:\n yield\n self.assertIn(\"input sizes over dimension %d do not match\" % axis,\n str(raises.exception))\n\n def test_3d(self):\n pyfunc = np_concatenate2\n cfunc = nrtjit(pyfunc)\n\n def check(a, b, c, axis):\n for ax in (axis, -3 + axis):\n expected = pyfunc(a, b, c, axis=ax)\n got = cfunc(a, b, c, axis=ax)\n self.assertPreciseEqual(got, expected)\n\n def check_all_axes(a, b, c):\n for axis in range(3):\n check(a, b, c, axis)\n\n a, b, c, d, e = self._3d_arrays()\n\n # Inputs with equal sizes\n # C, C, C\n check_all_axes(a, b, b)\n # C, C, F\n check_all_axes(a, b, c)\n # F, F, F\n check_all_axes(a.T, b.T, a.T)\n # F, F, C\n check_all_axes(a.T, b.T, c.T)\n # F, F, A\n check_all_axes(a.T, b.T, d.T)\n # A, A, A\n # (note Numpy may select the layout differently for other inputs)\n check_all_axes(d.T, e.T, d.T)\n\n # Inputs with compatible sizes\n check(a[1:], b, c[::-1], axis=0)\n check(a, b[:,1:], c, axis=1)\n check(a, b, c[:,:,1:], axis=2)\n\n # Different but compatible dtypes\n check_all_axes(a, b.astype(np.float64), b)\n\n # Exceptions leak references\n self.disable_leak_check()\n\n # Incompatible sizes\n for axis in (1, 2, -2, -1):\n with self.assert_invalid_sizes_over_dim(0):\n cfunc(a[1:], b, b, axis)\n for axis in (0, 2, -3, -1):\n with self.assert_invalid_sizes_over_dim(1):\n cfunc(a, b[:,1:], b, axis)\n\n def test_3d_no_axis(self):\n pyfunc = np_concatenate1\n cfunc = nrtjit(pyfunc)\n\n def check(a, b, c):\n expected = pyfunc(a, b, c)\n got = cfunc(a, b, c)\n self.assertPreciseEqual(got, expected)\n\n a, b, c, d, e = self._3d_arrays()\n\n # Inputs with equal sizes\n # C, C, C\n check(a, b, b)\n # C, C, F\n check(a, b, c)\n # F, F, F\n check(a.T, b.T, a.T)\n # F, F, C\n check(a.T, b.T, c.T)\n # F, F, A\n check(a.T, b.T, d.T)\n # A, A, A\n # (note Numpy may select the layout differently for other inputs)\n check(d.T, e.T, d.T)\n\n # Inputs with compatible sizes\n check(a[1:], b, c[::-1])\n\n # Exceptions leak references\n self.disable_leak_check()\n\n # Incompatible sizes\n with self.assert_invalid_sizes_over_dim(1):\n cfunc(a, b[:,1:], b)\n\n def test_typing_errors(self):\n pyfunc = np_concatenate1\n cfunc = nrtjit(pyfunc)\n\n a = np.arange(15)\n b = a.reshape((3, 5))\n c = a.astype(np.dtype([('x', np.int8)]))\n d = np.array(42)\n\n # Different dimensionalities\n with self.assertTypingError() as raises:\n cfunc(a, b, b)\n self.assertIn(\"all the input arrays must have same number of dimensions\",\n str(raises.exception))\n\n # Incompatible dtypes\n with self.assertTypingError() as raises:\n cfunc(a, c, c)\n self.assertIn(\"input arrays must have compatible dtypes\",\n str(raises.exception))\n\n # 0-d arrays\n with self.assertTypingError() as raises:\n cfunc(d, d, d)\n self.assertIn(\"zero-dimensional arrays cannot be concatenated\",\n str(raises.exception))\n\n\n@unittest.skipUnless(hasattr(np, \"stack\"), \"this Numpy doesn't have np.stack()\")\nclass TestNpStack(MemoryLeakMixin, TestCase):\n \"\"\"\n Tests for np.stack().\n \"\"\"\n\n def _3d_arrays(self):\n a = np.arange(24).reshape((4, 3, 2))\n b = a + 10\n c = (b + 10).copy(order='F')\n d = (c + 10)[::-1]\n e = (d + 10)[...,::-1]\n return a, b, c, d, e\n\n @contextlib.contextmanager\n def assert_invalid_sizes(self):\n with self.assertRaises(ValueError) as raises:\n yield\n self.assertIn(\"all input arrays must have the same shape\",\n str(raises.exception))\n\n def check_stack(self, pyfunc, cfunc, args):\n expected = pyfunc(*args)\n got = cfunc(*args)\n # Numba doesn't choose the same layout as Numpy.\n # We would like to check the result is contiguous, but we can't\n # rely on the \"flags\" attribute when there are 1-sized\n # dimensions.\n self.assertEqual(got.shape, expected.shape)\n self.assertPreciseEqual(got.flatten(), expected.flatten())\n\n def check_3d(self, pyfunc, cfunc, generate_starargs):\n def check(a, b, c, args):\n self.check_stack(pyfunc, cfunc, (a, b, c) + args)\n\n def check_all_axes(a, b, c):\n for args in generate_starargs():\n check(a, b, c, args)\n\n a, b, c, d, e = self._3d_arrays()\n\n # C, C, C\n check_all_axes(a, b, b)\n # C, C, F\n check_all_axes(a, b, c)\n # F, F, F\n check_all_axes(a.T, b.T, a.T)\n # F, F, C\n check_all_axes(a.T, b.T, c.T)\n # F, F, A\n check_all_axes(a.T, b.T, d.T)\n # A, A, A\n check_all_axes(d.T, e.T, d.T)\n\n # Different but compatible dtypes\n check_all_axes(a, b.astype(np.float64), b)\n\n def check_runtime_errors(self, cfunc, generate_starargs):\n # Exceptions leak references\n self.assert_no_memory_leak()\n self.disable_leak_check()\n\n # Inputs have different shapes\n a, b, c, d, e = self._3d_arrays()\n with self.assert_invalid_sizes():\n args = next(generate_starargs())\n cfunc(a[:-1], b, c, *args)\n\n def test_3d(self):\n \"\"\"\n stack(3d arrays, axis)\n \"\"\"\n pyfunc = np_stack2\n cfunc = nrtjit(pyfunc)\n\n def generate_starargs():\n for axis in range(3):\n yield (axis,)\n yield (-3 + axis,)\n\n self.check_3d(pyfunc, cfunc, generate_starargs)\n self.check_runtime_errors(cfunc, generate_starargs)\n\n def test_3d_no_axis(self):\n \"\"\"\n stack(3d arrays)\n \"\"\"\n pyfunc = np_stack1\n cfunc = nrtjit(pyfunc)\n\n def generate_starargs():\n yield()\n\n self.check_3d(pyfunc, cfunc, generate_starargs)\n self.check_runtime_errors(cfunc, generate_starargs)\n\n def test_0d(self):\n \"\"\"\n stack(0d arrays)\n \"\"\"\n pyfunc = np_stack1\n cfunc = nrtjit(pyfunc)\n\n a = np.array(42)\n b = np.array(-5j)\n c = np.array(True)\n\n self.check_stack(pyfunc, cfunc, (a, b, c))\n\n def check_xxstack(self, pyfunc, cfunc):\n \"\"\"\n 3d and 0d tests for hstack(), vstack(), dstack().\n \"\"\"\n def generate_starargs():\n yield()\n\n self.check_3d(pyfunc, cfunc, generate_starargs)\n # 0d\n a = np.array(42)\n b = np.array(-5j)\n c = np.array(True)\n self.check_stack(pyfunc, cfunc, (a, b, a))\n\n def test_hstack(self):\n pyfunc = np_hstack\n cfunc = nrtjit(pyfunc)\n\n self.check_xxstack(pyfunc, cfunc)\n # 1d\n a = np.arange(5)\n b = np.arange(6) + 10\n self.check_stack(pyfunc, cfunc, (a, b, b))\n # 2d\n a = np.arange(6).reshape((2, 3))\n b = np.arange(8).reshape((2, 4)) + 100\n self.check_stack(pyfunc, cfunc, (a, b, a))\n\n def test_vstack(self):\n pyfunc = np_vstack\n cfunc = nrtjit(pyfunc)\n\n self.check_xxstack(pyfunc, cfunc)\n # 1d\n a = np.arange(5)\n b = a + 10\n self.check_stack(pyfunc, cfunc, (a, b, b))\n # 2d\n a = np.arange(6).reshape((3, 2))\n b = np.arange(8).reshape((4, 2)) + 100\n self.check_stack(pyfunc, cfunc, (a, b, b))\n\n def test_dstack(self):\n pyfunc = np_dstack\n cfunc = nrtjit(pyfunc)\n\n self.check_xxstack(pyfunc, cfunc)\n # 1d\n a = np.arange(5)\n b = a + 10\n self.check_stack(pyfunc, cfunc, (a, b, b))\n # 2d\n a = np.arange(12).reshape((3, 4))\n b = a + 100\n self.check_stack(pyfunc, cfunc, (a, b, b))\n\n def test_column_stack(self):\n pyfunc = np_column_stack\n cfunc = nrtjit(pyfunc)\n\n a = np.arange(4)\n b = a + 10\n c = np.arange(12).reshape((4, 3))\n self.check_stack(pyfunc, cfunc, (a, b, c))\n\n # Exceptions leak references\n self.assert_no_memory_leak()\n self.disable_leak_check()\n\n # Invalid dims\n a = np.array(42)\n with self.assertTypingError():\n cfunc((a, a, a))\n a = a.reshape((1, 1, 1))\n with self.assertTypingError():\n cfunc((a, a, a))\n\n\ndef benchmark_refct_speed():\n def pyfunc(x, y, t):\n \"\"\"Swap array x and y for t number of times\n \"\"\"\n for i in range(t):\n x, y = y, x\n return x, y\n\n cfunc = nrtjit(pyfunc)\n\n x = np.random.random(100)\n y = np.random.random(100)\n t = 10000\n\n def bench_pyfunc():\n pyfunc(x, y, t)\n\n def bench_cfunc():\n cfunc(x, y, t)\n\n python_time = utils.benchmark(bench_pyfunc)\n numba_time = utils.benchmark(bench_cfunc)\n print(python_time)\n print(numba_time)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"numpy.diag",
"numpy.complex128",
"numpy.linspace",
"numpy.vstack",
"numpy.dtype",
"numpy.concatenate",
"numpy.random.randint",
"numpy.complex64",
"numpy.hstack",
"numpy.testing.assert_equal",
"numpy.arange",
"numpy.empty_like",
"numpy.eye",
"numpy.int8",
"numpy.stack",
"numpy.full",
"numpy.column_stack",
"numpy.zeros",
"numpy.full_like",
"numpy.int64",
"numpy.identity",
"numpy.transpose",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.random.random",
"numpy.intp",
"numpy.int32",
"numpy.dstack",
"numpy.ones",
"numpy.int16",
"numpy.float64",
"numpy.add",
"numpy.empty"
]
] |
Santidb/CS224U-Natural-Language-Understanding
|
[
"5fafc491ae46f6cb10bc5b0caaefcd98126ed78b"
] |
[
"utils.py"
] |
[
"from collections import Counter\r\nimport csv\r\nimport logging\r\nimport numpy as np\r\nimport pandas as pd\r\nimport random\r\nfrom scipy import stats\r\nfrom sklearn.base import TransformerMixin\r\nfrom sklearn.metrics import f1_score\r\nfrom sklearn.model_selection import GridSearchCV, StratifiedShuffleSplit\r\nimport sys\r\nimport os\r\n\r\n__author__ = \"Christopher Potts\"\r\n__version__ = \"CS224u, Stanford, Spring 2021\"\r\n\r\n\r\nSTART_SYMBOL = \"<s>\"\r\nEND_SYMBOL = \"</s>\"\r\nUNK_SYMBOL = \"$UNK\"\r\n\r\n\r\ndef glove2dict(src_filename):\r\n \"\"\"\r\n GloVe vectors file reader.\r\n\r\n Parameters\r\n ----------\r\n src_filename : str\r\n Full path to the GloVe file to be processed.\r\n\r\n Returns\r\n -------\r\n dict\r\n Mapping words to their GloVe vectors as `np.array`.\r\n\r\n \"\"\"\r\n # This distribution has some words with spaces, so we have to\r\n # assume its dimensionality and parse out the lines specially:\r\n if '840B.300d' in src_filename:\r\n line_parser = lambda line: line.rsplit(\" \", 300)\r\n else:\r\n line_parser = lambda line: line.strip().split()\r\n data = {}\r\n with open(src_filename, encoding='utf8') as f:\r\n while True:\r\n try:\r\n line = next(f)\r\n line = line_parser(line)\r\n data[line[0]] = np.array(line[1: ], dtype=np.float)\r\n except StopIteration:\r\n break\r\n except UnicodeDecodeError:\r\n pass\r\n return data\r\n\r\n\r\ndef d_tanh(z):\r\n \"\"\"\r\n The derivative of np.tanh. z should be a float or np.array.\r\n\r\n \"\"\"\r\n return 1.0 - z**2\r\n\r\n\r\ndef softmax(z):\r\n \"\"\"\r\n Softmax activation function. z should be a float or np.array.\r\n\r\n \"\"\"\r\n # Increases numerical stability:\r\n t = np.exp(z - np.max(z))\r\n return t / np.sum(t)\r\n\r\n\r\ndef relu(z):\r\n return np.maximum(0, z)\r\n\r\n\r\ndef d_relu(z):\r\n return np.where(z > 0, 1, 0)\r\n\r\n\r\ndef randvec(n=50, lower=-0.5, upper=0.5):\r\n \"\"\"\r\n Returns a random vector of length `n`. `w` is ignored.\r\n\r\n \"\"\"\r\n return np.array([random.uniform(lower, upper) for i in range(n)])\r\n\r\n\r\ndef randmatrix(m, n, lower=-0.5, upper=0.5):\r\n \"\"\"\r\n Creates an m x n matrix of random values in [lower, upper].\r\n\r\n \"\"\"\r\n return np.array([random.uniform(lower, upper) for i in range(m*n)]).reshape(m, n)\r\n\r\n\r\ndef safe_macro_f1(y, y_pred):\r\n \"\"\"\r\n Macro-averaged F1, forcing `sklearn` to report as a multiclass\r\n problem even when there are just two classes. `y` is the list of\r\n gold labels and `y_pred` is the list of predicted labels.\r\n\r\n \"\"\"\r\n return f1_score(y, y_pred, average='macro', pos_label=None)\r\n\r\n\r\ndef progress_bar(msg, verbose=True):\r\n \"\"\"\r\n Simple over-writing progress bar.\r\n\r\n \"\"\"\r\n if verbose:\r\n sys.stderr.write('\\r')\r\n sys.stderr.write(msg)\r\n sys.stderr.flush()\r\n\r\n\r\ndef log_of_array_ignoring_zeros(M):\r\n \"\"\"\r\n Returns an array containing the logs of the nonzero\r\n elements of M. Zeros are left alone since log(0) isn't\r\n defined.\r\n\r\n \"\"\"\r\n log_M = M.copy()\r\n mask = log_M > 0\r\n log_M[mask] = np.log(log_M[mask])\r\n return log_M\r\n\r\n\r\ndef mcnemar(y_true, pred_a, pred_b):\r\n \"\"\"\r\n McNemar's test using the chi2 distribution.\r\n\r\n Parameters\r\n ----------\r\n y_true : list of actual labels\r\n\r\n pred_a, pred_b : lists\r\n Predictions from the two systems being evaluated.\r\n Assumed to have the same length as `y_true`.\r\n\r\n Returns\r\n -------\r\n float, float (the test statistic and p value)\r\n\r\n \"\"\"\r\n c01 = 0\r\n c10 = 0\r\n for y, a, b in zip(y_true, pred_a, pred_b):\r\n if a == y and b != y:\r\n c01 += 1\r\n elif a != y and b == y:\r\n c10 += 1\r\n stat = ((np.abs(c10 - c01) - 1.0)**2) / (c10 + c01)\r\n df = 1\r\n pval = stats.chi2.sf(stat, df)\r\n return stat, pval\r\n\r\n\r\ndef fit_classifier_with_hyperparameter_search(\r\n X, y, basemod, cv, param_grid, scoring='f1_macro', verbose=True):\r\n \"\"\"\r\n Fit a classifier with hyperparameters set via cross-validation.\r\n\r\n Parameters\r\n ----------\r\n X : 2d np.array\r\n The matrix of features, one example per row.\r\n\r\n y : list\r\n The list of labels for rows in `X`.\r\n\r\n basemod : an sklearn model class instance\r\n This is the basic model-type we'll be optimizing.\r\n\r\n cv : int or an sklearn Splitter\r\n Number of cross-validation folds, or the object used to define\r\n the splits. For example, where there is a predefeined train/dev\r\n split one wants to use, one can feed in a `PredefinedSplitter`\r\n instance to use that split during cross-validation.\r\n\r\n param_grid : dict\r\n A dict whose keys name appropriate parameters for `basemod` and\r\n whose values are lists of values to try.\r\n\r\n scoring : value to optimize for (default: f1_macro)\r\n Other options include 'accuracy' and 'f1_micro'. See\r\n http://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter\r\n\r\n verbose : bool\r\n Whether to print some summary information to standard output.\r\n\r\n Prints\r\n ------\r\n To standard output (if `verbose=True`)\r\n The best parameters found.\r\n The best macro F1 score obtained.\r\n\r\n Returns\r\n -------\r\n An instance of the same class as `basemod`.\r\n A trained model instance, the best model found.\r\n\r\n \"\"\"\r\n if isinstance(cv, int):\r\n cv = StratifiedShuffleSplit(n_splits=cv, test_size=0.20)\r\n # Find the best model within param_grid:\r\n crossvalidator = GridSearchCV(basemod, param_grid, cv=cv, scoring=scoring)\r\n crossvalidator.fit(X, y)\r\n # Report some information:\r\n if verbose:\r\n print(\"Best params: {}\".format(crossvalidator.best_params_))\r\n print(\"Best score: {0:0.03f}\".format(crossvalidator.best_score_))\r\n # Return the best model found:\r\n return crossvalidator.best_estimator_\r\n\r\n\r\ndef get_vocab(X, n_words=None, mincount=1):\r\n \"\"\"\r\n Get the vocabulary for an RNN example matrix `X`, adding $UNK$ if\r\n it isn't already present.\r\n\r\n Parameters\r\n ----------\r\n X : list of lists of str\r\n\r\n n_words : int or None\r\n If this is `int > 0`, keep only the top `n_words` by frequency.\r\n\r\n mincount : int\r\n Only words with at least this many tokens are kept.\r\n\r\n Returns\r\n -------\r\n list of str\r\n\r\n \"\"\"\r\n wc = Counter([w for ex in X for w in ex])\r\n wc = wc.most_common(n_words) if n_words else wc.items()\r\n if mincount > 1:\r\n wc = {(w, c) for w, c in wc if c >= mincount}\r\n vocab = {w for w, _ in wc}\r\n vocab.add(\"$UNK\")\r\n return sorted(vocab)\r\n\r\n\r\ndef create_pretrained_embedding(\r\n lookup, vocab, required_tokens=('$UNK', \"<s>\", \"</s>\")):\r\n \"\"\"\r\n Create an embedding matrix from a lookup and a specified vocab.\r\n Words from `vocab` that are not in `lookup` are given random\r\n representations.\r\n\r\n Parameters\r\n ----------\r\n lookup : dict\r\n Must map words to their vector representations.\r\n\r\n vocab : list of str\r\n Words to create embeddings for.\r\n\r\n required_tokens : tuple of str\r\n Tokens that must have embeddings. If they are not available\r\n in the look-up, they will be given random representations.\r\n\r\n Returns\r\n -------\r\n np.array, list\r\n The np.array is an embedding for `vocab` and the `list` is\r\n the potentially expanded version of `vocab` that came in.\r\n\r\n \"\"\"\r\n dim = len(next(iter(lookup.values())))\r\n embedding = np.array([lookup.get(w, randvec(dim)) for w in vocab])\r\n for tok in required_tokens:\r\n if tok not in vocab:\r\n vocab.append(tok)\r\n embedding = np.vstack((embedding, randvec(dim)))\r\n return embedding, vocab\r\n\r\n\r\ndef fix_random_seeds(\r\n seed=42,\r\n set_system=True,\r\n set_torch=True,\r\n set_tensorflow=False,\r\n set_torch_cudnn=True):\r\n \"\"\"\r\n Fix random seeds for reproducibility.\r\n\r\n Parameters\r\n ----------\r\n seed : int\r\n Random seed to be set.\r\n\r\n set_system : bool\r\n Whether to set `np.random.seed(seed)` and `random.seed(seed)`\r\n\r\n set_tensorflow : bool\r\n Whether to set `tf.random.set_random_seed(seed)`\r\n\r\n set_torch : bool\r\n Whether to set `torch.manual_seed(seed)`\r\n\r\n set_torch_cudnn: bool\r\n Flag for whether to enable cudnn deterministic mode.\r\n Note that deterministic mode can have a performance impact,\r\n depending on your model.\r\n https://pytorch.org/docs/stable/notes/randomness.html\r\n\r\n Notes\r\n -----\r\n The function checks that PyTorch and TensorFlow are installed\r\n where the user asks to set seeds for them. If they are not\r\n installed, the seed-setting instruction is ignored. The intention\r\n is to make it easier to use this function in environments that lack\r\n one or both of these libraries.\r\n\r\n Even though the random seeds are explicitly set,\r\n the behavior may still not be deterministic (especially when a\r\n GPU is enabled), due to:\r\n\r\n * CUDA: There are some PyTorch functions that use CUDA functions\r\n that can be a source of non-determinism:\r\n https://pytorch.org/docs/stable/notes/randomness.html\r\n\r\n * PYTHONHASHSEED: On Python 3.3 and greater, hash randomization is\r\n turned on by default. This seed could be fixed before calling the\r\n python interpreter (PYTHONHASHSEED=0 python test.py). However, it\r\n seems impossible to set it inside the python program:\r\n https://stackoverflow.com/questions/30585108/disable-hash-randomization-from-within-python-program\r\n\r\n \"\"\"\r\n # set system seed\r\n if set_system:\r\n np.random.seed(seed)\r\n random.seed(seed)\r\n\r\n # set torch seed\r\n if set_torch:\r\n try:\r\n import torch\r\n except ImportError:\r\n pass\r\n else:\r\n torch.manual_seed(seed)\r\n\r\n # set torch cudnn backend\r\n if set_torch_cudnn:\r\n try:\r\n import torch\r\n except ImportError:\r\n pass\r\n else:\r\n torch.backends.cudnn.deterministic = True\r\n torch.backends.cudnn.benchmark = False\r\n\r\n # set tf seed\r\n if set_tensorflow:\r\n try:\r\n from tensorflow.compat.v1 import set_random_seed as set_tf_seed\r\n except ImportError:\r\n from tensorflow.random import set_seed as set_tf_seed\r\n except ImportError:\r\n pass\r\n else:\r\n set_tf_seed(seed)\r\n\r\n\r\nclass DenseTransformer(TransformerMixin):\r\n \"\"\"\r\n From\r\n\r\n http://zacstewart.com/2014/08/05/pipelines-of-featureunions-of-pipelines.html\r\n\r\n Some sklearn methods return sparse matrices that don't interact\r\n well with estimators that expect dense arrays or regular iterables\r\n as inputs. This little class helps manage that. Especially useful\r\n in the context of Pipelines.\r\n\r\n \"\"\"\r\n def fit(self, X, y=None, **fit_params):\r\n return self\r\n\r\n def transform(self, X, y=None, **fit_params):\r\n return X.todense()\r\n\r\n def fit_transform(self, X, y=None, **fit_params):\r\n self.fit(X, y, **fit_params)\r\n return self.transform(X)\r\n"
] |
[
[
"numpy.array",
"numpy.log",
"sklearn.model_selection.GridSearchCV",
"numpy.maximum",
"numpy.sum",
"numpy.random.seed",
"numpy.abs",
"torch.manual_seed",
"numpy.max",
"sklearn.metrics.f1_score",
"sklearn.model_selection.StratifiedShuffleSplit",
"numpy.where",
"scipy.stats.chi2.sf",
"tensorflow.random.set_seed"
]
] |
seibs/ibis-bigquery
|
[
"86594aa938d1dbfe80c4f7598085c49f4b817e69"
] |
[
"tests/system/udf/test_udf_execute.py"
] |
[
"import os\n\nimport ibis\nimport ibis.expr.datatypes as dt\nimport pandas as pd\nimport pandas.testing as tm\nimport pytest\nfrom pytest import param\n\nimport ibis_bigquery\nfrom ibis_bigquery import udf # noqa: E402\n\nPROJECT_ID = os.environ.get(\"GOOGLE_BIGQUERY_PROJECT_ID\", \"ibis-gbq\")\nDATASET_ID = \"testing\"\n\nbq_backend = ibis_bigquery.Backend()\n\n\n@pytest.fixture(scope=\"module\")\ndef alltypes(client):\n t = client.table(\"functional_alltypes\")\n expr = t[t.bigint_col.isin([10, 20])].limit(10)\n return expr\n\n\n@pytest.fixture(scope=\"module\")\ndef df(alltypes):\n return alltypes.execute()\n\n\ndef test_udf(client, alltypes, df):\n @udf(input_type=[dt.double, dt.double], output_type=dt.double)\n def my_add(a, b):\n return a + b\n\n expr = my_add(alltypes.double_col, alltypes.double_col)\n result = expr.execute()\n assert not result.empty\n\n expected = (df.double_col + df.double_col).rename(\"tmp\")\n tm.assert_series_equal(\n result.value_counts().sort_index(), expected.value_counts().sort_index(),\n )\n\n\ndef test_udf_with_struct(client, alltypes, df):\n @udf(\n input_type=[dt.double, dt.double],\n output_type=dt.Struct.from_tuples(\n [(\"width\", dt.double), (\"height\", dt.double)]\n ),\n )\n def my_struct_thing(a, b):\n class Rectangle:\n def __init__(self, width, height):\n self.width = width\n self.height = height\n\n return Rectangle(a, b)\n\n assert (\n my_struct_thing.js\n == '''\\\nCREATE TEMPORARY FUNCTION my_struct_thing_0(a FLOAT64, b FLOAT64)\nRETURNS STRUCT<width FLOAT64, height FLOAT64>\nLANGUAGE js AS \"\"\"\n'use strict';\nfunction my_struct_thing(a, b) {\n class Rectangle {\n constructor(width, height) {\n this.width = width;\n this.height = height;\n }\n }\n return (new Rectangle(a, b));\n}\nreturn my_struct_thing(a, b);\n\"\"\";'''\n )\n\n expr = my_struct_thing(alltypes.double_col, alltypes.double_col)\n result = expr.execute()\n assert not result.empty\n\n expected = pd.Series([{\"width\": c, \"height\": c} for c in df.double_col], name=\"tmp\")\n tm.assert_series_equal(result, expected)\n\n\ndef test_udf_compose(client, alltypes, df):\n @udf([dt.double], dt.double)\n def add_one(x):\n return x + 1.0\n\n @udf([dt.double], dt.double)\n def times_two(x):\n return x * 2.0\n\n t = alltypes\n expr = times_two(add_one(t.double_col))\n result = expr.execute()\n expected = ((df.double_col + 1.0) * 2.0).rename(\"tmp\")\n tm.assert_series_equal(result, expected)\n\n\ndef test_udf_scalar(client):\n @udf([dt.double, dt.double], dt.double)\n def my_add(x, y):\n return x + y\n\n expr = my_add(1, 2)\n result = client.execute(expr)\n assert result == 3\n\n\ndef test_multiple_calls_has_one_definition(client):\n @udf([dt.string], dt.double)\n def my_str_len(s):\n return s.length\n\n s = ibis.literal(\"abcd\")\n expr = my_str_len(s) + my_str_len(s)\n sql = client.compile(expr)\n expected = '''\\\nCREATE TEMPORARY FUNCTION my_str_len_0(s STRING)\nRETURNS FLOAT64\nLANGUAGE js AS \"\"\"\n'use strict';\nfunction my_str_len(s) {\n return s.length;\n}\nreturn my_str_len(s);\n\"\"\";\n\nSELECT my_str_len_0('abcd') + my_str_len_0('abcd') AS `tmp`'''\n assert sql == expected\n result = client.execute(expr)\n assert result == 8.0\n\n\ndef test_udf_libraries(client):\n @udf(\n [dt.Array(dt.string)],\n dt.double,\n # whatever symbols are exported in the library are visible inside the\n # UDF, in this case lodash defines _ and we use that here\n libraries=[\"gs://ibis-testing-libraries/lodash.min.js\"],\n )\n def string_length(strings):\n return _.sum(_.map(strings, lambda x: x.length)) # noqa: F821\n\n raw_data = [\"aaa\", \"bb\", \"c\"]\n data = ibis.literal(raw_data)\n expr = string_length(data)\n result = client.execute(expr)\n expected = sum(map(len, raw_data))\n assert result == expected\n\n\ndef test_udf_with_len(client):\n @udf([dt.string], dt.double)\n def my_str_len(x):\n return len(x)\n\n @udf([dt.Array(dt.string)], dt.double)\n def my_array_len(x):\n return len(x)\n\n assert client.execute(my_str_len(\"aaa\")) == 3\n assert client.execute(my_array_len([\"aaa\", \"bb\"])) == 2\n\n\ndef test_multiple_calls_redefinition(client):\n @udf([dt.string], dt.double)\n def my_len(s):\n return s.length\n\n s = ibis.literal(\"abcd\")\n expr = my_len(s) + my_len(s)\n\n @udf([dt.string], dt.double)\n def my_len(s):\n return s.length + 1\n\n expr = expr + my_len(s)\n\n sql = client.compile(expr)\n expected = '''\\\nCREATE TEMPORARY FUNCTION my_len_0(s STRING)\nRETURNS FLOAT64\nLANGUAGE js AS \"\"\"\n'use strict';\nfunction my_len(s) {\n return s.length;\n}\nreturn my_len(s);\n\"\"\";\n\nCREATE TEMPORARY FUNCTION my_len_1(s STRING)\nRETURNS FLOAT64\nLANGUAGE js AS \"\"\"\n'use strict';\nfunction my_len(s) {\n return (s.length + 1);\n}\nreturn my_len(s);\n\"\"\";\n\nSELECT (my_len_0('abcd') + my_len_0('abcd')) + my_len_1('abcd') AS `tmp`'''\n assert sql == expected\n\n\n@pytest.mark.parametrize(\n (\"argument_type\", \"return_type\"),\n [\n param(dt.int64, dt.float64, marks=pytest.mark.xfail(raises=TypeError)),\n param(dt.float64, dt.int64, marks=pytest.mark.xfail(raises=TypeError)),\n # complex argument type, valid return type\n param(\n dt.Array(dt.int64), dt.float64, marks=pytest.mark.xfail(raises=TypeError),\n ),\n # valid argument type, complex invalid return type\n param(\n dt.float64, dt.Array(dt.int64), marks=pytest.mark.xfail(raises=TypeError),\n ),\n # both invalid\n param(\n dt.Array(dt.Array(dt.int64)),\n dt.int64,\n marks=pytest.mark.xfail(raises=TypeError),\n ),\n # struct type with nested integer, valid return type\n param(\n dt.Struct.from_tuples([(\"x\", dt.Array(dt.int64))]),\n dt.float64,\n marks=pytest.mark.xfail(raises=TypeError),\n ),\n ],\n)\ndef test_udf_int64(client, argument_type, return_type):\n # invalid argument type, valid return type\n @udf([argument_type], return_type)\n def my_int64_add(x):\n return 1.0\n"
] |
[
[
"pandas.testing.assert_series_equal",
"pandas.Series"
]
] |
ghliu/10703_HW1
|
[
"61e1180a97e601128a6ea37e693b359f2af8929e"
] |
[
"build/lib.linux-x86_64-2.7/deeprl_hw1/rl.py"
] |
[
"# coding: utf-8\nfrom __future__ import division, absolute_import\nfrom __future__ import print_function, unicode_literals\n\nimport numpy as np\n\n\ndef evaluate_policy(env, gamma, policy, max_iterations=int(1e3), tol=1e-3):\n \"\"\"Evaluate the value of a policy.\n\n See page 87 (pg 105 pdf) of the Sutton and Barto Second Edition\n book.\n\n http://webdocs.cs.ualberta.ca/~sutton/book/bookdraft2016sep.pdf\n\n Parameters\n ----------\n env: gym.core.Environment\n The environment to compute value iteration for. Must have nS,\n nA, and P as attributes.\n gamma: float\n Discount factor, must be in range [0, 1)\n policy: np.array\n The policy to evaluate. Maps states to actions.\n max_iterations: int\n The maximum number of iterations to run before stopping.\n tol: float\n Determines when value function has converged.\n\n Returns\n -------\n np.ndarray\n The value for the given policy\n \"\"\"\n return np.zeros(env.nS)\n\n\ndef value_function_to_policy(env, gamma, value_function):\n \"\"\"Output action numbers for each state in value_function.\n\n Parameters\n ----------\n env: gym.core.Environment\n Environment to compute policy for. Must have nS, nA, and P as\n attributes.\n gamma: float\n Discount factor. Number in range [0, 1)\n value_function: np.ndarray\n Value of each state.\n\n Returns\n -------\n np.ndarray\n An array of integers. Each integer is the optimal action to take\n in that state according to the environment dynamics and the\n given value function.\n \"\"\" \n return np.zeros(env.nS, dtype='int')\n\n\ndef improve_policy(env, gamma, value_func, policy):\n \"\"\"Given a policy and value function improve the policy.\n\n See page 87 (pg 105 pdf) of the Sutton and Barto Second Edition\n book.\n\n http://webdocs.cs.ualberta.ca/~sutton/book/bookdraft2016sep.pdf\n\n Parameters\n ----------\n env: gym.core.Environment\n The environment to compute value iteration for. Must have nS,\n nA, and P as attributes.\n gamma: float\n Discount factor, must be in range [0, 1)\n value_func: np.ndarray\n Value function for the given policy.\n policy: dict or np.array\n The policy to improve. Maps states to actions.\n max_iterations: int\n The maximum number of iterations to run before stopping.\n tol: float\n Determines when value function has converged.\n\n Returns\n -------\n bool, np.ndarray\n Returns true if policy changed. Also returns the new policy.\n \"\"\"\n return False, policy\n\n\ndef policy_iteration(env, gamma, max_iterations=int(1e3), tol=1e-3):\n \"\"\"Runs policy iteration.\n\n See page 87 (pg 105 pdf) of the Sutton and Barto Second Edition\n book.\n\n http://webdocs.cs.ualberta.ca/~sutton/book/bookdraft2016sep.pdf\n\n You should use the improve_policy and evaluate_policy methods to\n implement this method.\n\n Parameters\n ----------\n env: gym.core.Environment\n The environment to compute value iteration for. Must have nS,\n nA, and P as attributes.\n gamma: float\n Discount factor, must be in range [0, 1)\n max_iterations: int\n The maximum number of iterations to run before stopping.\n tol: float\n Determines when value function has converged.\n\n Returns\n -------\n (np.ndarray, np.ndarray, int, int)\n Returns optimal policy, value function, number of policy\n improvement iterations, and number of value iterations.\n \"\"\"\n policy = np.zeros(env.nS, dtype='int')\n value_func = np.zeros(env.nS)\n\n return policy, value_func, 0, 0\n\n\ndef value_iteration(env, gamma, max_iterations=int(1e3), tol=1e-3):\n \"\"\"Runs value iteration for a given gamma and environment.\n\n See page 90 (pg 108 pdf) of the Sutton and Barto Second Edition\n book.\n\n http://webdocs.cs.ualberta.ca/~sutton/book/bookdraft2016sep.pdf\n\n Parameters\n ----------\n env: gym.core.Environment\n The environment to compute value iteration for. Must have nS,\n nA, and P as attributes.\n gamma: float\n Discount factor, must be in range [0, 1)\n max_iterations: int\n The maximum number of iterations to run before stopping.\n tol: float\n Determines when value function has converged.\n\n Returns\n -------\n np.ndarray, iteration\n The value function and the number of iterations it took to converge.\n \"\"\"\n return np.zeros(env.nS), 0\n\n\ndef print_policy(policy, action_names):\n \"\"\"Print the policy in human-readable format.\n\n Parameters\n ----------\n policy: np.ndarray\n Array of state to action number mappings\n action_names: dict\n Mapping of action numbers to characters representing the action.\n \"\"\"\n str_policy = policy.astype('str')\n for action_num, action_name in action_names.items():\n np.place(str_policy, policy == action_num, action_name)\n\n print(str_policy)\n"
] |
[
[
"numpy.zeros",
"numpy.place"
]
] |
ispc-lab/ResRace
|
[
"2661b42de40a5fc3e782a0865116d18c0b20c7d2"
] |
[
"utils/functionswo.py"
] |
[
"import math\nimport random\nimport numpy as np\nimport keyboard\nimport sys\nimport tty\nimport termios\nimport time\n'''\ndef lidar2binary(lidar_obs, map_size=[250, 250]):\n p_augment = random.uniform(0, 1)\n\n binary_map = np.ones(map_size)\n center = [125, 125]\n binary_map[center[0]-5:center[0]+6, center[1]-6:center[1]+7] = 0.67\n counter = 0\n for i in range(675):\n if lidar_obs[i] > 5.0:\n continue\n else:\n angle = math.pi*(135+0.4*i)/180\n point_loc_x = int(center[0]+math.cos(angle)*round(lidar_obs[i], 3)*25)\n point_loc_y = int(center[1]-math.sin(angle)*round(lidar_obs[i], 3)*25)\n binary_map[point_loc_x-2:point_loc_x+3, point_loc_y-2:point_loc_y+3] = 0.33\n if p_augment > 0.667:\n grey_edge = int((1-p_augment) * 0.5 * 250)\n binary_map[:grey_edge, :] = 0\n binary_map[-grey_edge:, :] = 0\n elif p_augment < 0.333:\n grey_edge = int(p_augment * 0.5 * 250)\n binary_map[:, :grey_edge] = 0\n binary_map[:, -grey_edge:] = 0\n\n return np.clip(binary_map, 0, +1)\n'''\n\ndef dict2array(origin_data):\n value_list = []\n if type(origin_data) is np.ndarray:\n return origin_data\n else:\n for key, value in origin_data.items():\n value_list.append(value)\n item = np.array(value_list[0]).flatten()\n for i in range(1, len(value_list)):\n item = np.append(item, np.array(value_list[i]).flatten())\n return item\n\n\ndef modify_action(action_dict):\n act_array = dict2array(action_dict)\n # print(act_array.shape)\n return act_array\n\n\ndef modify_obs(obs_dict):\n vehicle_pose = obs_dict['pose'] # (x, y, z, r, p, y)\n vehicle_velocity = obs_dict['velocity'] # (vx, vy, vz, alpha_x, alpha_y, alpha_z)\n vehicle_accele = obs_dict['acceleration'] # (ax, ay, az, a_a_x, a_a_y, a_a_z)\n lidar_sensor = obs_dict['lidar'] # (1080 ray)\n\n roll_velocity, roll_accele = vehicle_velocity[-3], vehicle_accele[-3]\n pitch_velocity, pitch_accele = vehicle_velocity[-2], vehicle_accele[-2]\n yaw_velocity, yaw_accele = vehicle_velocity[-1], vehicle_accele[-1]\n \n abs_velocity = math.sqrt(vehicle_velocity[0]**2+vehicle_velocity[1]**2)\n abs_accele = math.sqrt(vehicle_accele[0]**2+vehicle_accele[1]**2)\n \n imu_sensor = np.array([roll_velocity, roll_accele, pitch_velocity, pitch_accele, yaw_velocity, yaw_accele, abs_velocity, abs_accele])\n output_obs = np.concatenate(([imu_sensor] + [lidar_sensor]))\n return output_obs\n\ndef Softmax_CL(demo_rew, agent_ep_rew):\n e_teacher, e_agent = math.exp((demo_rew+200)*0.005), math.exp(agent_ep_rew*0.005)\n teacher_power = (e_teacher)/(e_teacher+e_agent)\n agent_power = 1 - teacher_power\n return teacher_power, agent_power\n\n\ndef readchar():\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(sys.stdin.fileno())\n ch = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return ch\n\ndef readkey(getchar_fn=None):\n getchar = getchar_fn or readchar\n c1 = getchar()\n if ord(c1) != 0x1b:\n return c1\n c2 = getchar()\n if ord(c2) != 0x5b:\n return c1\n c3 = getchar()\n return chr(0x10 + ord(c3) - 65)\n\ndef keyboard_action(last_action):\n steering_flag, motor_flag = False, False\n left_sig, right_sig = keyboard.is_pressed('a'), keyboard.is_pressed('d')\n acc_sig, dea_sig = keyboard.is_pressed('w'), keyboard.is_pressed('s')\n\n if (left_sig and right_sig) or (not left_sig and not right_sig):\n steering_flag = False\n last_action[0] -= 0.25*last_action[0]\n else:\n steering_flag = True\n last_action[0] += (-0.05)*int(left_sig) + 0.05*int(right_sig)\n if (acc_sig and dea_sig) or (not acc_sig and not dea_sig):\n steering_flag = False\n last_action[1] -= 0.5*last_action[1]\n else:\n steering_flag = True\n last_action[1] += (-0.05)*int(dea_sig) + 0.05*int(acc_sig)\n\n last_action[0], last_action[1] = last_action[0].round(3), last_action[1].round(3)\n output_action = np.clip(last_action, -1, +1)\n time.sleep(0.016)\n return output_action"
] |
[
[
"numpy.concatenate",
"numpy.array",
"numpy.clip"
]
] |
ibrahimsoliman97/TF_TRACK
|
[
"29218702a697900ac98e448873b21e24f9c93db4"
] |
[
"research/object_detection/data_decoders/tf_example_decoder.py"
] |
[
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tensorflow Example proto decoder for object detection.\n\nA decoder to decode string tensors containing serialized tensorflow.Example\nprotos for object detection.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport enum\nimport numpy as np\nfrom six.moves import zip\nimport tensorflow.compat.v1 as tf\nfrom tf_slim import tfexample_decoder as slim_example_decoder\nfrom object_detection.core import data_decoder\nfrom object_detection.core import standard_fields as fields\nfrom object_detection.protos import input_reader_pb2\nfrom object_detection.utils import label_map_util\nfrom object_detection.utils import shape_utils\n\n# pylint: disable=g-import-not-at-top\ntry:\n from tensorflow.contrib import lookup as contrib_lookup\n\nexcept ImportError:\n # TF 2.0 doesn't ship with contrib.\n pass\n# pylint: enable=g-import-not-at-top\n\n_LABEL_OFFSET = 1\n\n\nclass Visibility(enum.Enum):\n \"\"\"Visibility definitions.\n\n This follows the MS Coco convention (http://cocodataset.org/#format-data).\n \"\"\"\n # Keypoint is not labeled.\n UNLABELED = 0\n # Keypoint is labeled but falls outside the object segment (e.g. occluded).\n NOT_VISIBLE = 1\n # Keypoint is labeled and visible.\n VISIBLE = 2\n\n\nclass _ClassTensorHandler(slim_example_decoder.Tensor):\n \"\"\"An ItemHandler to fetch class ids from class text.\"\"\"\n\n def __init__(self,\n tensor_key,\n label_map_proto_file,\n shape_keys=None,\n shape=None,\n default_value=''):\n \"\"\"Initializes the LookupTensor handler.\n\n Simply calls a vocabulary (most often, a label mapping) lookup.\n\n Args:\n tensor_key: the name of the `TFExample` feature to read the tensor from.\n label_map_proto_file: File path to a text format LabelMapProto message\n mapping class text to id.\n shape_keys: Optional name or list of names of the TF-Example feature in\n which the tensor shape is stored. If a list, then each corresponds to\n one dimension of the shape.\n shape: Optional output shape of the `Tensor`. If provided, the `Tensor` is\n reshaped accordingly.\n default_value: The value used when the `tensor_key` is not found in a\n particular `TFExample`.\n\n Raises:\n ValueError: if both `shape_keys` and `shape` are specified.\n \"\"\"\n name_to_id = label_map_util.get_label_map_dict(\n label_map_proto_file, use_display_name=False)\n # We use a default_value of -1, but we expect all labels to be contained\n # in the label map.\n try:\n # Dynamically try to load the tf v2 lookup, falling back to contrib\n lookup = tf.compat.v2.lookup\n hash_table_class = tf.compat.v2.lookup.StaticHashTable\n except AttributeError:\n lookup = contrib_lookup\n hash_table_class = contrib_lookup.HashTable\n name_to_id_table = hash_table_class(\n initializer=lookup.KeyValueTensorInitializer(\n keys=tf.constant(list(name_to_id.keys())),\n values=tf.constant(list(name_to_id.values()), dtype=tf.int64)),\n default_value=-1)\n display_name_to_id = label_map_util.get_label_map_dict(\n label_map_proto_file, use_display_name=True)\n # We use a default_value of -1, but we expect all labels to be contained\n # in the label map.\n display_name_to_id_table = hash_table_class(\n initializer=lookup.KeyValueTensorInitializer(\n keys=tf.constant(list(display_name_to_id.keys())),\n values=tf.constant(\n list(display_name_to_id.values()), dtype=tf.int64)),\n default_value=-1)\n\n self._name_to_id_table = name_to_id_table\n self._display_name_to_id_table = display_name_to_id_table\n super(_ClassTensorHandler, self).__init__(tensor_key, shape_keys, shape,\n default_value)\n\n def tensors_to_item(self, keys_to_tensors):\n unmapped_tensor = super(_ClassTensorHandler,\n self).tensors_to_item(keys_to_tensors)\n return tf.maximum(self._name_to_id_table.lookup(unmapped_tensor),\n self._display_name_to_id_table.lookup(unmapped_tensor))\n\n\nclass _BackupHandler(slim_example_decoder.ItemHandler):\n \"\"\"An ItemHandler that tries two ItemHandlers in order.\"\"\"\n\n def __init__(self, handler, backup):\n \"\"\"Initializes the BackupHandler handler.\n\n If the first Handler's tensors_to_item returns a Tensor with no elements,\n the second Handler is used.\n\n Args:\n handler: The primary ItemHandler.\n backup: The backup ItemHandler.\n\n Raises:\n ValueError: if either is not an ItemHandler.\n \"\"\"\n if not isinstance(handler, slim_example_decoder.ItemHandler):\n raise ValueError('Primary handler is of type %s instead of ItemHandler' %\n type(handler))\n if not isinstance(backup, slim_example_decoder.ItemHandler):\n raise ValueError(\n 'Backup handler is of type %s instead of ItemHandler' % type(backup))\n self._handler = handler\n self._backup = backup\n super(_BackupHandler, self).__init__(handler.keys + backup.keys)\n\n def tensors_to_item(self, keys_to_tensors):\n item = self._handler.tensors_to_item(keys_to_tensors)\n return tf.cond(\n pred=tf.equal(tf.reduce_prod(tf.shape(item)), 0),\n true_fn=lambda: self._backup.tensors_to_item(keys_to_tensors),\n false_fn=lambda: item)\n\n\nclass TfExampleDecoder(data_decoder.DataDecoder):\n \"\"\"Tensorflow Example proto decoder.\"\"\"\n\n def __init__(self,\n load_instance_masks=False,\n instance_mask_type=input_reader_pb2.NUMERICAL_MASKS,\n label_map_proto_file=None,\n use_display_name=False,\n dct_method='',\n num_keypoints=0,\n num_additional_channels=0,\n load_multiclass_scores=False,\n load_context_features=False,\n expand_hierarchy_labels=False,\n load_dense_pose=False):\n \"\"\"Constructor sets keys_to_features and items_to_handlers.\n\n Args:\n load_instance_masks: whether or not to load and handle instance masks.\n instance_mask_type: type of instance masks. Options are provided in\n input_reader.proto. This is only used if `load_instance_masks` is True.\n label_map_proto_file: a file path to a\n object_detection.protos.StringIntLabelMap proto. If provided, then the\n mapped IDs of 'image/object/class/text' will take precedence over the\n existing 'image/object/class/label' ID. Also, if provided, it is\n assumed that 'image/object/class/text' will be in the data.\n use_display_name: whether or not to use the `display_name` for label\n mapping (instead of `name`). Only used if label_map_proto_file is\n provided.\n dct_method: An optional string. Defaults to None. It only takes\n effect when image format is jpeg, used to specify a hint about the\n algorithm used for jpeg decompression. Currently valid values\n are ['INTEGER_FAST', 'INTEGER_ACCURATE']. The hint may be ignored, for\n example, the jpeg library does not have that specific option.\n num_keypoints: the number of keypoints per object.\n num_additional_channels: how many additional channels to use.\n load_multiclass_scores: Whether to load multiclass scores associated with\n boxes.\n load_context_features: Whether to load information from context_features,\n to provide additional context to a detection model for training and/or\n inference.\n expand_hierarchy_labels: Expands the object and image labels taking into\n account the provided hierarchy in the label_map_proto_file. For positive\n classes, the labels are extended to ancestor. For negative classes,\n the labels are expanded to descendants.\n load_dense_pose: Whether to load DensePose annotations.\n\n Raises:\n ValueError: If `instance_mask_type` option is not one of\n input_reader_pb2.DEFAULT, input_reader_pb2.NUMERICAL, or\n input_reader_pb2.PNG_MASKS.\n ValueError: If `expand_labels_hierarchy` is True, but the\n `label_map_proto_file` is not provided.\n \"\"\"\n # TODO(rathodv): delete unused `use_display_name` argument once we change\n # other decoders to handle label maps similarly.\n del use_display_name\n self.keys_to_features = {\n 'image/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format':\n tf.FixedLenFeature((), tf.string, default_value='jpeg'),\n 'image/filename':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/key/sha256':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/source_id':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/height':\n tf.FixedLenFeature((), tf.int64, default_value=1),\n 'image/width':\n tf.FixedLenFeature((), tf.int64, default_value=1),\n # Image-level labels.\n 'image/class/text':\n tf.VarLenFeature(tf.string),\n 'image/class/label':\n tf.VarLenFeature(tf.int64),\n 'image/class/confidence':\n tf.VarLenFeature(tf.float32),\n # Object boxes and classes.\n 'image/object/bbox/xmin':\n tf.VarLenFeature(tf.float32),\n 'image/object/bbox/xmax':\n tf.VarLenFeature(tf.float32),\n 'image/object/bbox/ymin':\n tf.VarLenFeature(tf.float32),\n 'image/object/bbox/ymax':\n tf.VarLenFeature(tf.float32),\n 'image/object/class/label':\n tf.VarLenFeature(tf.int64),\n 'image/object/class/text':\n tf.VarLenFeature(tf.string),\n 'image/object/area':\n tf.VarLenFeature(tf.float32),\n 'image/object/is_crowd':\n tf.VarLenFeature(tf.int64),\n 'image/object/difficult':\n tf.VarLenFeature(tf.int64),\n 'image/object/group_of':\n tf.VarLenFeature(tf.int64),\n 'image/object/weight':\n tf.VarLenFeature(tf.float32),\n 'image/object/re_id':\n tf.VarLenFeature(tf.int64),\n\n }\n # We are checking `dct_method` instead of passing it directly in order to\n # ensure TF version 1.6 compatibility.\n if dct_method:\n image = slim_example_decoder.Image(\n image_key='image/encoded',\n format_key='image/format',\n channels=3,\n dct_method=dct_method)\n additional_channel_image = slim_example_decoder.Image(\n image_key='image/additional_channels/encoded',\n format_key='image/format',\n channels=1,\n repeated=True,\n dct_method=dct_method)\n else:\n image = slim_example_decoder.Image(\n image_key='image/encoded', format_key='image/format', channels=3)\n additional_channel_image = slim_example_decoder.Image(\n image_key='image/additional_channels/encoded',\n format_key='image/format',\n channels=1,\n repeated=True)\n self.items_to_handlers = {\n fields.InputDataFields.image:\n image,\n fields.InputDataFields.source_id: (\n slim_example_decoder.Tensor('image/source_id')),\n fields.InputDataFields.key: (\n slim_example_decoder.Tensor('image/key/sha256')),\n fields.InputDataFields.filename: (\n slim_example_decoder.Tensor('image/filename')),\n # Image-level labels.\n fields.InputDataFields.groundtruth_image_confidences: (\n slim_example_decoder.Tensor('image/class/confidence')),\n # Object boxes and classes.\n fields.InputDataFields.groundtruth_boxes: (\n slim_example_decoder.BoundingBox(['ymin', 'xmin', 'ymax', 'xmax'],\n 'image/object/bbox/')),\n fields.InputDataFields.groundtruth_area:\n slim_example_decoder.Tensor('image/object/area'),\n fields.InputDataFields.groundtruth_is_crowd: (\n slim_example_decoder.Tensor('image/object/is_crowd')),\n fields.InputDataFields.groundtruth_difficult: (\n slim_example_decoder.Tensor('image/object/difficult')),\n fields.InputDataFields.groundtruth_group_of: (\n slim_example_decoder.Tensor('image/object/group_of')),\n fields.InputDataFields.groundtruth_weights: (\n slim_example_decoder.Tensor('image/object/weight')),\n fields.InputDataFields.groundtruth_re_id: (\n slim_example_decoder.Tensor('image/object/re_id')),\n\n }\n if load_multiclass_scores:\n self.keys_to_features[\n 'image/object/class/multiclass_scores'] = tf.VarLenFeature(tf.float32)\n self.items_to_handlers[fields.InputDataFields.multiclass_scores] = (\n slim_example_decoder.Tensor('image/object/class/multiclass_scores'))\n\n if load_context_features:\n self.keys_to_features[\n 'image/context_features'] = tf.VarLenFeature(tf.float32)\n self.items_to_handlers[fields.InputDataFields.context_features] = (\n slim_example_decoder.ItemHandlerCallback(\n ['image/context_features', 'image/context_feature_length'],\n self._reshape_context_features))\n\n self.keys_to_features[\n 'image/context_feature_length'] = tf.FixedLenFeature((), tf.int64)\n self.items_to_handlers[fields.InputDataFields.context_feature_length] = (\n slim_example_decoder.Tensor('image/context_feature_length'))\n\n if num_additional_channels > 0:\n self.keys_to_features[\n 'image/additional_channels/encoded'] = tf.FixedLenFeature(\n (num_additional_channels,), tf.string)\n self.items_to_handlers[\n fields.InputDataFields.\n image_additional_channels] = additional_channel_image\n self._num_keypoints = num_keypoints\n if num_keypoints > 0:\n self.keys_to_features['image/object/keypoint/x'] = (\n tf.VarLenFeature(tf.float32))\n self.keys_to_features['image/object/keypoint/y'] = (\n tf.VarLenFeature(tf.float32))\n self.keys_to_features['image/object/keypoint/visibility'] = (\n tf.VarLenFeature(tf.int64))\n self.items_to_handlers[fields.InputDataFields.groundtruth_keypoints] = (\n slim_example_decoder.ItemHandlerCallback(\n ['image/object/keypoint/y', 'image/object/keypoint/x'],\n self._reshape_keypoints))\n kpt_vis_field = fields.InputDataFields.groundtruth_keypoint_visibilities\n self.items_to_handlers[kpt_vis_field] = (\n slim_example_decoder.ItemHandlerCallback(\n ['image/object/keypoint/x', 'image/object/keypoint/visibility'],\n self._reshape_keypoint_visibilities))\n if load_instance_masks:\n if instance_mask_type in (input_reader_pb2.DEFAULT,\n input_reader_pb2.NUMERICAL_MASKS):\n self.keys_to_features['image/object/mask'] = (\n tf.VarLenFeature(tf.float32))\n self.items_to_handlers[\n fields.InputDataFields.groundtruth_instance_masks] = (\n slim_example_decoder.ItemHandlerCallback(\n ['image/object/mask', 'image/height', 'image/width'],\n self._reshape_instance_masks))\n elif instance_mask_type == input_reader_pb2.PNG_MASKS:\n self.keys_to_features['image/object/mask'] = tf.VarLenFeature(tf.string)\n self.items_to_handlers[\n fields.InputDataFields.groundtruth_instance_masks] = (\n slim_example_decoder.ItemHandlerCallback(\n ['image/object/mask', 'image/height', 'image/width'],\n self._decode_png_instance_masks))\n else:\n raise ValueError('Did not recognize the `instance_mask_type` option.')\n if load_dense_pose:\n self.keys_to_features['image/object/densepose/num'] = (\n tf.VarLenFeature(tf.int64))\n self.keys_to_features['image/object/densepose/part_index'] = (\n tf.VarLenFeature(tf.int64))\n self.keys_to_features['image/object/densepose/x'] = (\n tf.VarLenFeature(tf.float32))\n self.keys_to_features['image/object/densepose/y'] = (\n tf.VarLenFeature(tf.float32))\n self.keys_to_features['image/object/densepose/u'] = (\n tf.VarLenFeature(tf.float32))\n self.keys_to_features['image/object/densepose/v'] = (\n tf.VarLenFeature(tf.float32))\n self.items_to_handlers[\n fields.InputDataFields.groundtruth_dp_num_points] = (\n slim_example_decoder.Tensor('image/object/densepose/num'))\n self.items_to_handlers[fields.InputDataFields.groundtruth_dp_part_ids] = (\n slim_example_decoder.ItemHandlerCallback(\n ['image/object/densepose/part_index',\n 'image/object/densepose/num'], self._dense_pose_part_indices))\n self.items_to_handlers[\n fields.InputDataFields.groundtruth_dp_surface_coords] = (\n slim_example_decoder.ItemHandlerCallback(\n ['image/object/densepose/x', 'image/object/densepose/y',\n 'image/object/densepose/u', 'image/object/densepose/v',\n 'image/object/densepose/num'],\n self._dense_pose_surface_coordinates))\n\n if label_map_proto_file:\n # If the label_map_proto is provided, try to use it in conjunction with\n # the class text, and fall back to a materialized ID.\n label_handler = _BackupHandler(\n _ClassTensorHandler(\n 'image/object/class/text', label_map_proto_file,\n default_value=''),\n slim_example_decoder.Tensor('image/object/class/label'))\n image_label_handler = _BackupHandler(\n _ClassTensorHandler(\n fields.TfExampleFields.image_class_text,\n label_map_proto_file,\n default_value=''),\n slim_example_decoder.Tensor(fields.TfExampleFields.image_class_label))\n else:\n label_handler = slim_example_decoder.Tensor('image/object/class/label')\n image_label_handler = slim_example_decoder.Tensor(\n fields.TfExampleFields.image_class_label)\n self.items_to_handlers[\n fields.InputDataFields.groundtruth_classes] = label_handler\n self.items_to_handlers[\n fields.InputDataFields.groundtruth_image_classes] = image_label_handler\n\n self._expand_hierarchy_labels = expand_hierarchy_labels\n self._ancestors_lut = None\n self._descendants_lut = None\n if expand_hierarchy_labels:\n if label_map_proto_file:\n ancestors_lut, descendants_lut = (\n label_map_util.get_label_map_hierarchy_lut(label_map_proto_file,\n True))\n self._ancestors_lut = tf.constant(ancestors_lut, dtype=tf.int64)\n self._descendants_lut = tf.constant(descendants_lut, dtype=tf.int64)\n else:\n raise ValueError('In order to expand labels, the label_map_proto_file '\n 'has to be provided.')\n\n def decode(self, tf_example_string_tensor):\n \"\"\"Decodes serialized tensorflow example and returns a tensor dictionary.\n\n Args:\n tf_example_string_tensor: a string tensor holding a serialized tensorflow\n example proto.\n\n Returns:\n A dictionary of the following tensors.\n fields.InputDataFields.image - 3D uint8 tensor of shape [None, None, 3]\n containing image.\n fields.InputDataFields.original_image_spatial_shape - 1D int32 tensor of\n shape [2] containing shape of the image.\n fields.InputDataFields.source_id - string tensor containing original\n image id.\n fields.InputDataFields.key - string tensor with unique sha256 hash key.\n fields.InputDataFields.filename - string tensor with original dataset\n filename.\n fields.InputDataFields.groundtruth_boxes - 2D float32 tensor of shape\n [None, 4] containing box corners.\n fields.InputDataFields.groundtruth_classes - 1D int64 tensor of shape\n [None] containing classes for the boxes.\n fields.InputDataFields.groundtruth_weights - 1D float32 tensor of\n shape [None] indicating the weights of groundtruth boxes.\n fields.InputDataFields.groundtruth_area - 1D float32 tensor of shape\n [None] containing containing object mask area in pixel squared.\n fields.InputDataFields.groundtruth_is_crowd - 1D bool tensor of shape\n [None] indicating if the boxes enclose a crowd.\n\n Optional:\n fields.InputDataFields.groundtruth_image_confidences - 1D float tensor of\n shape [None] indicating if a class is present in the image (1.0) or\n a class is not present in the image (0.0).\n fields.InputDataFields.image_additional_channels - 3D uint8 tensor of\n shape [None, None, num_additional_channels]. 1st dim is height; 2nd dim\n is width; 3rd dim is the number of additional channels.\n fields.InputDataFields.groundtruth_difficult - 1D bool tensor of shape\n [None] indicating if the boxes represent `difficult` instances.\n fields.InputDataFields.groundtruth_group_of - 1D bool tensor of shape\n [None] indicating if the boxes represent `group_of` instances.\n fields.InputDataFields.groundtruth_keypoints - 3D float32 tensor of\n shape [None, num_keypoints, 2] containing keypoints, where the\n coordinates of the keypoints are ordered (y, x).\n fields.InputDataFields.groundtruth_keypoint_visibilities - 2D bool\n tensor of shape [None, num_keypoints] containing keypoint visibilites.\n fields.InputDataFields.groundtruth_instance_masks - 3D float32 tensor of\n shape [None, None, None] containing instance masks.\n fields.InputDataFields.groundtruth_image_classes - 1D int64 of shape\n [None] containing classes for the boxes.\n fields.InputDataFields.multiclass_scores - 1D float32 tensor of shape\n [None * num_classes] containing flattened multiclass scores for\n groundtruth boxes.\n fields.InputDataFields.context_features - 1D float32 tensor of shape\n [context_feature_length * num_context_features]\n fields.InputDataFields.context_feature_length - int32 tensor specifying\n the length of each feature in context_features\n \"\"\"\n serialized_example = tf.reshape(tf_example_string_tensor, shape=[])\n decoder = slim_example_decoder.TFExampleDecoder(self.keys_to_features,\n self.items_to_handlers)\n keys = decoder.list_items()\n tensors = decoder.decode(serialized_example, items=keys)\n tensor_dict = dict(zip(keys, tensors))\n is_crowd = fields.InputDataFields.groundtruth_is_crowd\n tensor_dict[is_crowd] = tf.cast(tensor_dict[is_crowd], dtype=tf.bool)\n tensor_dict[fields.InputDataFields.image].set_shape([None, None, 3])\n tensor_dict[fields.InputDataFields.original_image_spatial_shape] = tf.shape(\n tensor_dict[fields.InputDataFields.image])[:2]\n\n if fields.InputDataFields.image_additional_channels in tensor_dict:\n channels = tensor_dict[fields.InputDataFields.image_additional_channels]\n channels = tf.squeeze(channels, axis=3)\n channels = tf.transpose(channels, perm=[1, 2, 0])\n tensor_dict[fields.InputDataFields.image_additional_channels] = channels\n\n def default_groundtruth_weights():\n return tf.ones(\n [tf.shape(tensor_dict[fields.InputDataFields.groundtruth_boxes])[0]],\n dtype=tf.float32)\n\n tensor_dict[fields.InputDataFields.groundtruth_weights] = tf.cond(\n tf.greater(\n tf.shape(\n tensor_dict[fields.InputDataFields.groundtruth_weights])[0],\n 0), lambda: tensor_dict[fields.InputDataFields.groundtruth_weights],\n default_groundtruth_weights)\n\n if fields.InputDataFields.groundtruth_keypoints in tensor_dict:\n # Set all keypoints that are not labeled to NaN.\n gt_kpt_fld = fields.InputDataFields.groundtruth_keypoints\n gt_kpt_vis_fld = fields.InputDataFields.groundtruth_keypoint_visibilities\n visibilities_tiled = tf.tile(\n tf.expand_dims(tensor_dict[gt_kpt_vis_fld], -1),\n [1, 1, 2])\n tensor_dict[gt_kpt_fld] = tf.where(\n visibilities_tiled,\n tensor_dict[gt_kpt_fld],\n np.nan * tf.ones_like(tensor_dict[gt_kpt_fld]))\n\n if self._expand_hierarchy_labels:\n input_fields = fields.InputDataFields\n image_classes, image_confidences = self._expand_image_label_hierarchy(\n tensor_dict[input_fields.groundtruth_image_classes],\n tensor_dict[input_fields.groundtruth_image_confidences])\n tensor_dict[input_fields.groundtruth_image_classes] = image_classes\n tensor_dict[input_fields.groundtruth_image_confidences] = (\n image_confidences)\n\n box_fields = [\n fields.InputDataFields.groundtruth_group_of,\n fields.InputDataFields.groundtruth_is_crowd,\n fields.InputDataFields.groundtruth_difficult,\n fields.InputDataFields.groundtruth_area,\n fields.InputDataFields.groundtruth_boxes,\n fields.InputDataFields.groundtruth_weights,\n fields.InputDataFields.groundtruth_re_id,\n ]\n\n def expand_field(field_name):\n return self._expansion_box_field_labels(\n tensor_dict[input_fields.groundtruth_classes],\n tensor_dict[field_name])\n\n # pylint: disable=cell-var-from-loop\n for field in box_fields:\n if field in tensor_dict:\n tensor_dict[field] = tf.cond(\n tf.size(tensor_dict[field]) > 0, lambda: expand_field(field),\n lambda: tensor_dict[field])\n # pylint: enable=cell-var-from-loop\n\n tensor_dict[input_fields.groundtruth_classes] = (\n self._expansion_box_field_labels(\n tensor_dict[input_fields.groundtruth_classes],\n tensor_dict[input_fields.groundtruth_classes], True))\n\n if fields.InputDataFields.groundtruth_group_of in tensor_dict:\n group_of = fields.InputDataFields.groundtruth_group_of\n tensor_dict[group_of] = tf.cast(tensor_dict[group_of], dtype=tf.bool)\n\n if fields.InputDataFields.groundtruth_dp_num_points in tensor_dict:\n tensor_dict[fields.InputDataFields.groundtruth_dp_num_points] = tf.cast(\n tensor_dict[fields.InputDataFields.groundtruth_dp_num_points],\n dtype=tf.int32)\n tensor_dict[fields.InputDataFields.groundtruth_dp_part_ids] = tf.cast(\n tensor_dict[fields.InputDataFields.groundtruth_dp_part_ids],\n dtype=tf.int32)\n\n return tensor_dict\n\n def _reshape_keypoints(self, keys_to_tensors):\n \"\"\"Reshape keypoints.\n\n The keypoints are reshaped to [num_instances, num_keypoints, 2].\n\n Args:\n keys_to_tensors: a dictionary from keys to tensors. Expected keys are:\n 'image/object/keypoint/x'\n 'image/object/keypoint/y'\n\n Returns:\n A 3-D float tensor of shape [num_instances, num_keypoints, 2] with values\n in [0, 1].\n \"\"\"\n y = keys_to_tensors['image/object/keypoint/y']\n if isinstance(y, tf.SparseTensor):\n y = tf.sparse_tensor_to_dense(y)\n y = tf.expand_dims(y, 1)\n x = keys_to_tensors['image/object/keypoint/x']\n if isinstance(x, tf.SparseTensor):\n x = tf.sparse_tensor_to_dense(x)\n x = tf.expand_dims(x, 1)\n keypoints = tf.concat([y, x], 1)\n keypoints = tf.reshape(keypoints, [-1, self._num_keypoints, 2])\n return keypoints\n\n def _reshape_keypoint_visibilities(self, keys_to_tensors):\n \"\"\"Reshape keypoint visibilities.\n\n The keypoint visibilities are reshaped to [num_instances,\n num_keypoints].\n\n The raw keypoint visibilities are expected to conform to the\n MSCoco definition. See Visibility enum.\n\n The returned boolean is True for the labeled case (either\n Visibility.NOT_VISIBLE or Visibility.VISIBLE). These are the same categories\n that COCO uses to evaluate keypoint detection performance:\n http://cocodataset.org/#keypoints-eval\n\n If image/object/keypoint/visibility is not provided, visibilities will be\n set to True for finite keypoint coordinate values, and 0 if the coordinates\n are NaN.\n\n Args:\n keys_to_tensors: a dictionary from keys to tensors. Expected keys are:\n 'image/object/keypoint/x'\n 'image/object/keypoint/visibility'\n\n Returns:\n A 2-D bool tensor of shape [num_instances, num_keypoints] with values\n in {0, 1}. 1 if the keypoint is labeled, 0 otherwise.\n \"\"\"\n x = keys_to_tensors['image/object/keypoint/x']\n vis = keys_to_tensors['image/object/keypoint/visibility']\n if isinstance(vis, tf.SparseTensor):\n vis = tf.sparse_tensor_to_dense(vis)\n if isinstance(x, tf.SparseTensor):\n x = tf.sparse_tensor_to_dense(x)\n\n default_vis = tf.where(\n tf.math.is_nan(x),\n Visibility.UNLABELED.value * tf.ones_like(x, dtype=tf.int64),\n Visibility.VISIBLE.value * tf.ones_like(x, dtype=tf.int64))\n # Use visibility if provided, otherwise use the default visibility.\n vis = tf.cond(tf.equal(tf.size(x), tf.size(vis)),\n true_fn=lambda: vis,\n false_fn=lambda: default_vis)\n vis = tf.math.logical_or(\n tf.math.equal(vis, Visibility.NOT_VISIBLE.value),\n tf.math.equal(vis, Visibility.VISIBLE.value))\n vis = tf.reshape(vis, [-1, self._num_keypoints])\n return vis\n\n def _reshape_instance_masks(self, keys_to_tensors):\n \"\"\"Reshape instance segmentation masks.\n\n The instance segmentation masks are reshaped to [num_instances, height,\n width].\n\n Args:\n keys_to_tensors: a dictionary from keys to tensors.\n\n Returns:\n A 3-D float tensor of shape [num_instances, height, width] with values\n in {0, 1}.\n \"\"\"\n height = keys_to_tensors['image/height']\n width = keys_to_tensors['image/width']\n to_shape = tf.cast(tf.stack([-1, height, width]), tf.int32)\n masks = keys_to_tensors['image/object/mask']\n if isinstance(masks, tf.SparseTensor):\n masks = tf.sparse_tensor_to_dense(masks)\n masks = tf.reshape(\n tf.cast(tf.greater(masks, 0.0), dtype=tf.float32), to_shape)\n return tf.cast(masks, tf.float32)\n\n def _reshape_context_features(self, keys_to_tensors):\n \"\"\"Reshape context features.\n\n The instance context_features are reshaped to\n [num_context_features, context_feature_length]\n\n Args:\n keys_to_tensors: a dictionary from keys to tensors.\n\n Returns:\n A 2-D float tensor of shape [num_context_features, context_feature_length]\n \"\"\"\n context_feature_length = keys_to_tensors['image/context_feature_length']\n to_shape = tf.cast(tf.stack([-1, context_feature_length]), tf.int32)\n context_features = keys_to_tensors['image/context_features']\n if isinstance(context_features, tf.SparseTensor):\n context_features = tf.sparse_tensor_to_dense(context_features)\n context_features = tf.reshape(context_features, to_shape)\n return context_features\n\n def _decode_png_instance_masks(self, keys_to_tensors):\n \"\"\"Decode PNG instance segmentation masks and stack into dense tensor.\n\n The instance segmentation masks are reshaped to [num_instances, height,\n width].\n\n Args:\n keys_to_tensors: a dictionary from keys to tensors.\n\n Returns:\n A 3-D float tensor of shape [num_instances, height, width] with values\n in {0, 1}.\n \"\"\"\n\n def decode_png_mask(image_buffer):\n image = tf.squeeze(\n tf.image.decode_image(image_buffer, channels=1), axis=2)\n image.set_shape([None, None])\n image = tf.cast(tf.greater(image, 0), dtype=tf.float32)\n return image\n\n png_masks = keys_to_tensors['image/object/mask']\n height = keys_to_tensors['image/height']\n width = keys_to_tensors['image/width']\n if isinstance(png_masks, tf.SparseTensor):\n png_masks = tf.sparse_tensor_to_dense(png_masks, default_value='')\n return tf.cond(\n tf.greater(tf.size(png_masks), 0),\n lambda: tf.map_fn(decode_png_mask, png_masks, dtype=tf.float32),\n lambda: tf.zeros(tf.cast(tf.stack([0, height, width]), dtype=tf.int32)))\n\n def _dense_pose_part_indices(self, keys_to_tensors):\n \"\"\"Creates a tensor that contains part indices for each DensePose point.\n\n Args:\n keys_to_tensors: a dictionary from keys to tensors.\n\n Returns:\n A 2-D int32 tensor of shape [num_instances, num_points] where each element\n contains the DensePose part index (0-23). The value `num_points`\n corresponds to the maximum number of sampled points across all instances\n in the image. Note that instances with less sampled points will be padded\n with zeros in the last dimension.\n \"\"\"\n num_points_per_instances = keys_to_tensors['image/object/densepose/num']\n part_index = keys_to_tensors['image/object/densepose/part_index']\n if isinstance(num_points_per_instances, tf.SparseTensor):\n num_points_per_instances = tf.sparse_tensor_to_dense(\n num_points_per_instances)\n if isinstance(part_index, tf.SparseTensor):\n part_index = tf.sparse_tensor_to_dense(part_index)\n part_index = tf.cast(part_index, dtype=tf.int32)\n max_points_per_instance = tf.cast(\n tf.math.reduce_max(num_points_per_instances), dtype=tf.int32)\n num_points_cumulative = tf.concat([\n [0], tf.math.cumsum(num_points_per_instances)], axis=0)\n\n def pad_parts_tensor(instance_ind):\n points_range_start = num_points_cumulative[instance_ind]\n points_range_end = num_points_cumulative[instance_ind + 1]\n part_inds = part_index[points_range_start:points_range_end]\n return shape_utils.pad_or_clip_nd(part_inds,\n output_shape=[max_points_per_instance])\n\n return tf.map_fn(pad_parts_tensor,\n tf.range(tf.size(num_points_per_instances)),\n dtype=tf.int32)\n\n def _dense_pose_surface_coordinates(self, keys_to_tensors):\n \"\"\"Creates a tensor that contains surface coords for each DensePose point.\n\n Args:\n keys_to_tensors: a dictionary from keys to tensors.\n\n Returns:\n A 3-D float32 tensor of shape [num_instances, num_points, 4] where each\n point contains (y, x, v, u) data for each sampled DensePose point. The\n (y, x) coordinate has normalized image locations for the point, and (v, u)\n contains the surface coordinate (also normalized) for the part. The value\n `num_points` corresponds to the maximum number of sampled points across\n all instances in the image. Note that instances with less sampled points\n will be padded with zeros in dim=1.\n \"\"\"\n num_points_per_instances = keys_to_tensors['image/object/densepose/num']\n dp_y = keys_to_tensors['image/object/densepose/y']\n dp_x = keys_to_tensors['image/object/densepose/x']\n dp_v = keys_to_tensors['image/object/densepose/v']\n dp_u = keys_to_tensors['image/object/densepose/u']\n if isinstance(num_points_per_instances, tf.SparseTensor):\n num_points_per_instances = tf.sparse_tensor_to_dense(\n num_points_per_instances)\n if isinstance(dp_y, tf.SparseTensor):\n dp_y = tf.sparse_tensor_to_dense(dp_y)\n if isinstance(dp_x, tf.SparseTensor):\n dp_x = tf.sparse_tensor_to_dense(dp_x)\n if isinstance(dp_v, tf.SparseTensor):\n dp_v = tf.sparse_tensor_to_dense(dp_v)\n if isinstance(dp_u, tf.SparseTensor):\n dp_u = tf.sparse_tensor_to_dense(dp_u)\n max_points_per_instance = tf.cast(\n tf.math.reduce_max(num_points_per_instances), dtype=tf.int32)\n num_points_cumulative = tf.concat([\n [0], tf.math.cumsum(num_points_per_instances)], axis=0)\n\n def pad_surface_coordinates_tensor(instance_ind):\n \"\"\"Pads DensePose surface coordinates for each instance.\"\"\"\n points_range_start = num_points_cumulative[instance_ind]\n points_range_end = num_points_cumulative[instance_ind + 1]\n y = dp_y[points_range_start:points_range_end]\n x = dp_x[points_range_start:points_range_end]\n v = dp_v[points_range_start:points_range_end]\n u = dp_u[points_range_start:points_range_end]\n # Create [num_points_i, 4] tensor, where num_points_i is the number of\n # sampled points for instance i.\n unpadded_tensor = tf.stack([y, x, v, u], axis=1)\n return shape_utils.pad_or_clip_nd(\n unpadded_tensor, output_shape=[max_points_per_instance, 4])\n\n return tf.map_fn(pad_surface_coordinates_tensor,\n tf.range(tf.size(num_points_per_instances)),\n dtype=tf.float32)\n\n def _expand_image_label_hierarchy(self, image_classes, image_confidences):\n \"\"\"Expand image level labels according to the hierarchy.\n\n Args:\n image_classes: Int64 tensor with the image level class ids for a sample.\n image_confidences: Float tensor signaling whether a class id is present in\n the image (1.0) or not present (0.0).\n\n Returns:\n new_image_classes: Int64 tensor equal to expanding image_classes.\n new_image_confidences: Float tensor equal to expanding image_confidences.\n \"\"\"\n\n def expand_labels(relation_tensor, confidence_value):\n \"\"\"Expand to ancestors or descendants depending on arguments.\"\"\"\n mask = tf.equal(image_confidences, confidence_value)\n target_image_classes = tf.boolean_mask(image_classes, mask)\n expanded_indices = tf.reduce_any((tf.gather(\n relation_tensor, target_image_classes - _LABEL_OFFSET, axis=0) > 0),\n axis=0)\n expanded_indices = tf.where(expanded_indices)[:, 0] + _LABEL_OFFSET\n new_groundtruth_image_classes = (\n tf.concat([\n tf.boolean_mask(image_classes, tf.logical_not(mask)),\n expanded_indices,\n ],\n axis=0))\n new_groundtruth_image_confidences = (\n tf.concat([\n tf.boolean_mask(image_confidences, tf.logical_not(mask)),\n tf.ones([tf.shape(expanded_indices)[0]],\n dtype=image_confidences.dtype) * confidence_value,\n ],\n axis=0))\n return new_groundtruth_image_classes, new_groundtruth_image_confidences\n\n image_classes, image_confidences = expand_labels(self._ancestors_lut, 1.0)\n new_image_classes, new_image_confidences = expand_labels(\n self._descendants_lut, 0.0)\n return new_image_classes, new_image_confidences\n\n def _expansion_box_field_labels(self,\n object_classes,\n object_field,\n copy_class_id=False):\n \"\"\"Expand the labels of a specific object field according to the hierarchy.\n\n Args:\n object_classes: Int64 tensor with the class id for each element in\n object_field.\n object_field: Tensor to be expanded.\n copy_class_id: Boolean to choose whether to use class id values in the\n output tensor instead of replicating the original values.\n\n Returns:\n A tensor with the result of expanding object_field.\n \"\"\"\n expanded_indices = tf.gather(\n self._ancestors_lut, object_classes - _LABEL_OFFSET, axis=0)\n if copy_class_id:\n new_object_field = tf.where(expanded_indices > 0)[:, 1] + _LABEL_OFFSET\n else:\n new_object_field = tf.repeat(\n object_field, tf.reduce_sum(expanded_indices, axis=1), axis=0)\n return new_object_field\n"
] |
[
[
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.equal",
"tensorflow.compat.v1.boolean_mask",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.constant",
"tensorflow.compat.v1.logical_not",
"tensorflow.compat.v1.sparse_tensor_to_dense",
"tensorflow.compat.v1.math.equal",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.reduce_sum",
"tensorflow.compat.v1.image.decode_image",
"tensorflow.compat.v1.transpose",
"tensorflow.compat.v1.where",
"tensorflow.compat.v1.ones_like",
"tensorflow.compat.v1.map_fn",
"tensorflow.compat.v1.FixedLenFeature",
"tensorflow.compat.v1.math.reduce_max",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.math.cumsum",
"tensorflow.compat.v1.stack",
"tensorflow.compat.v1.expand_dims",
"tensorflow.compat.v1.math.is_nan",
"tensorflow.compat.v1.gather",
"tensorflow.compat.v1.VarLenFeature",
"tensorflow.compat.v1.size",
"tensorflow.compat.v1.squeeze",
"tensorflow.compat.v1.greater"
]
] |
jzstark/icfp-demo
|
[
"c37ee25eec729c292e1717697e76e717abf20a5f"
] |
[
"mnist_train/tfgraph_train.py"
] |
[
"#!/usr/bin/env python\n\nimport numpy as np\nimport os\nimport tensorflow as tf\nfrom google.protobuf import text_format\nfrom tensorflow.python.framework import graph_io\n\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"../data/\", one_hot=True)\nbatch_size = 100\n\nfilename = 'tf_convert_mnist'\nwith open(filename + '.pbtxt', 'r') as f:\n \tmetagraph_def = tf.compat.v1.MetaGraphDef()\n \tfile_content = f.read()\n \ttext_format.Merge(file_content,metagraph_def)\n \tgraph_io.write_graph(metagraph_def, \n \t\tos.path.dirname(filename),\n \tos.path.basename(filename) + '.pb',\n \tas_text=False)\n\ninput_data = np.random.rand(100, 28, 28, 1)\n\nwith tf.Graph().as_default():\n config = tf.compat.v1.ConfigProto()\n config.gpu_options.allow_growth = True\n\n sess = tf.compat.v1.Session(config=config)\n saver = tf.compat.v1.train.import_meta_graph(filename+'.pb')\n graph = tf.compat.v1.get_default_graph()\n\n x = graph.get_tensor_by_name('x:0')\n y = tf.compat.v1.placeholder(\"float\", [None, 10])\n result = tf.compat.v1.get_collection(\"result\")[0]\n\n correct_pred = tf.equal(tf.argmax(result, 1), tf.argmax(y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n # Extra tensors for training\n cross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits_v2(\n labels=y, logits=result\n ))\n train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n\n init = tf.global_variables_initializer()\n sess.run(init)\n\n # Begin training\n for i in range(500):\n batch_x, batch_y = mnist.train.next_batch(batch_size)\n batch_x = np.reshape(batch_x, (-1,28,28,1))\n sess.run(train_step, feed_dict={x: batch_x, y: batch_y})\n \n if (i % 100 == 0):\n minibatch_loss, acc = sess.run([cross_entropy, accuracy], feed_dict={x: batch_x, y: batch_y})\n print(\"Loss:%s\" % str(minibatch_loss))\n print(\"Accuracy:%s\\n\" % str(acc))\n\n # saver.save(sess, \"owl_model.ckpt\")\n"
] |
[
[
"tensorflow.compat.v1.get_default_graph",
"tensorflow.compat.v1.ConfigProto",
"tensorflow.compat.v1.train.import_meta_graph",
"tensorflow.compat.v1.MetaGraphDef",
"tensorflow.Graph",
"numpy.reshape",
"tensorflow.cast",
"tensorflow.compat.v1.Session",
"tensorflow.global_variables_initializer",
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.get_collection",
"numpy.random.rand",
"tensorflow.train.AdamOptimizer",
"tensorflow.nn.softmax_cross_entropy_with_logits_v2",
"tensorflow.argmax"
]
] |
nicolasmota/movies-recommender
|
[
"ab6ffeaeed2e341cc6298ceffeae037056ba9ed0"
] |
[
"movies_recommender/item_similiarity/main.py"
] |
[
"import argparse\nimport json\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.metrics import pairwise_distances\n\n\nclass MoviesRecommender:\n\n def __init__(self, data_path):\n self.data_file = self._get_data_file(data_path)\n\n def _get_data_file(self, data_path):\n \"\"\" Return a json object containing the file content \"\"\"\n\n return json.load(open(data_path))\n\n def _get_movies(self):\n \"\"\" Return a dict of movies \"\"\"\n\n return self.data_file['movies']\n\n def _normalize_dataset(self):\n \"\"\" Create DataFrame including rating 5 for all watched movies and\n split to row each movie_id by user \"\"\"\n\n new_data = []\n columns = ['user_id', 'movie_id', 'rating']\n for line in self.data_file['users']:\n movies_by_user = [\n {'user_id': line['user_id'], 'movie_id': movie_id, 'rating': 5}\n for movie_id in line['movies']\n ]\n new_data.extend(movies_by_user)\n return pd.DataFrame(new_data, columns=columns)\n\n def _get_sparse_data(self):\n \"\"\" Read a normalized dataset and create a sparse matrix\n containing the ratings \"\"\"\n data = self._normalize_dataset()\n\n n_users = data.user_id.unique().shape[0]\n n_movies = data.movie_id.unique().shape[0]\n\n ratings = np.zeros((n_users, n_movies))\n\n for row in data.itertuples():\n ratings[row[1]-1, row[2]-1] = row[3]\n return ratings\n\n def _create_train_test_data(self, ratings):\n test = np.zeros(ratings.shape)\n train = ratings.copy()\n for user in range(ratings.shape[0]):\n test_ratings = np.random.choice(\n ratings[user, :].nonzero()[0],\n size=5\n )\n train[user, test_ratings] = 0.\n test[user, test_ratings] = ratings[user, test_ratings]\n\n assert(np.all((train * test) == 0))\n return train, test\n\n def _get_item_correlation(self, train):\n item_correlation = 1 - pairwise_distances(\n train.T,\n metric='correlation'\n )\n item_correlation[np.isnan(item_correlation)] = 0.\n return item_correlation\n\n def _get_top_k_movies(self, similarity, movie_id, k):\n \"\"\" Return a list of k recommended movies \"\"\"\n return [\n self._get_movies()[str(x+1)]\n for x in np.argsort(similarity[movie_id-1,:])[:-k-1:-1]\n ]\n\n def recommend(self, movie_id, k=5):\n ratings = self._get_sparse_data()\n train, _ = self._create_train_test_data(ratings)\n item_correlation = self._get_item_correlation(train)\n return self._get_top_k_movies(item_correlation, movie_id, k)\n\n\nif __name__ == \"__main__\":\n k = 5\n parser = argparse.ArgumentParser(\n description=\"Movies Recommender Application\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n parser.add_argument(\n '--input', type=str, required=True,\n dest='inputfile', help='data file path')\n\n parser.add_argument(\n '--movie_id', type=int, required=True,\n dest='movie_id', help='Movie ID')\n parser.add_argument(\n '--k', type=int, required=False,\n dest='k', help='Number of recommended movies'\n )\n\n args = parser.parse_args()\n if args.k:\n k = args.k\n movies_recommender = MoviesRecommender(args.inputfile)\n movie_ids = movies_recommender.recommend(args.movie_id, k)\n print(movie_ids)\n"
] |
[
[
"sklearn.metrics.pairwise_distances",
"numpy.isnan",
"pandas.DataFrame",
"numpy.all",
"numpy.argsort",
"numpy.zeros"
]
] |
pdn4kd/reimagined-palm-tree
|
[
"55791b0b647788f56f7eff5e8d5da4fa7ea2e7cf"
] |
[
"rvrms_batch.py"
] |
[
"'''\nUsing functionality of rvrms.py to get a bunch of RV precisions from existing observation logs. Also throws in some Gaussian noise.\n'''\nimport numpy as np\nimport rvrms\nfrom astropy import units as u\n\n#load up full simulation bits, will drop later\nimport simulation\nsim = simulation.simulation('simulation.ini') #not necessary if metadata is included\nimport datetime\nnow = str((datetime.datetime.now()).isoformat())\n\n#opening a folder with results from a previous run\n#borrowed from vis.py\nimport glob\nimport os\nsimnumber = input('Enter 5 digit sim number (including any leading zeros): ')\nsimpath = glob.glob('./results/*.'+simnumber+'/')[0]\nos.stat(simpath)\nprint(simpath)\n\nΔλ = 100 * u.angstrom\nλ_max = sim.instruments[0].λmax * u.angstrom\nλ_min = sim.instruments[0].λmin * u.angstrom\narea = sim.telescopes[0].area * u.m * u.m\ndark_current = sim.instruments[0].dark_current / u.hour\ninstrms = sim.instruments[0].general_noise\n\ntarget_list = np.genfromtxt(\"targetstars.csv\", delimiter=\",\", dtype=None, names=True, encoding=None)\nfor target in target_list:\n\tif (os.access(simpath+target['Name']+\".txt\", os.R_OK)):#with current bad setup, target listiing might not include all stars\n\t\tstar = np.genfromtxt(simpath+target['Name']+\".txt\", delimiter=\",\", names=True, encoding=None)\n\t\tif (star.shape != ()):\n\t\t\t# if we have actual observations, not just the test/setup one, we can calculate RVs\n\t\t\tprint(target['Name'])\n\t\t\tstar_rms = open(simpath+target['Name']+\"_rv.txt\", 'w')\n\t\t\tstar_rms.write(\"obs_start,obs_end,duration,altitude,azimuth,exposures,photonprec,instprec,rvprec,rvmeas,snr_actual\\n\")\n\t\t\tTeff = target['K']\n\t\t\tFeH = target['Sun']\n\t\t\tlogg = target['cms']\n\t\t\ttry:\n\t\t\t\tvsini = np.float(star['kms']) * u.km / u.s\n\t\t\t\ttheta_rot = 1.13 * np.float(star['kms'])\n\t\t\texcept:\n\t\t\t\tprint(\"Warning: v * sin(i) not found. Assuming 2 km/s\")\n\t\t\t\tvsini = 2.0 * u.km / u.s\n\t\t\t\ttheta_rot = 2.26\n\t\t\tif np.isnan(theta_rot):\n\t\t\t\tprint(\"Warning: v * sin(i) not found. Assuming 2 km/s\")\n\t\t\t\tvsini = 2.0 * u.km / u.s\n\t\t\t\ttheta_rot = 2.26\n\t\t\trstar = target['solRad'] * u.solRad\n\t\t\tdstar = target['pc'] * u.pc\n\t\t\ttry:\n\t\t\t\tvmac = star['Vmac']\n\t\t\t\tvmac + 1.0\n\t\t\texcept:\n\t\t\t\tprint(\"Warning: Macroturbulence not found. Estimating from other properties.\")\n\t\t\t\tvmac = np.float('nan')\n\t\t\t\tfor i in star:\n\t\t\t\t\texptime = i['duration'] * u.minute\n\t\t\t\t\tzenith_angle = (90-i['altitude'])*np.pi/180.0\n\t\t\t\t\tairmass = np.exp(-sim.elevation/8400)/np.cos(zenith_angle)\n\t\t\t\t\tn_expose = i['exposures']\n\t\t\t\t\tphotonrms, SNR_actual = rvrms.rvcalc(Teff, FeH, logg, vsini, theta_rot, rstar, dstar, vmac, airmass, exptime, sim.instruments[0].efficiency, area, sim.instruments[0].R, sim.instruments[0].gain, sim.instruments[0].read_noise, dark_current, sim.instruments[0].n_pix, λ_min, λ_max, Δλ, n_expose)\n\t\t\t\t\tphotonrms *= 1000\n\t\t\t\t\tvrms = np.sqrt(photonrms**2+instrms**2)\n\t\t\t\t\tvmeas = np.random.normal(0.0, vrms)\n\t\t\t\t\tline = str(i['obs_start'])+\",\"+str(i['obs_end'])+\",\"+str(i['duration'])+\",\"+str(i['altitude'])+\",\"+str(i['azimuth'])+\",\"+str(i['exposures'])+\",\"+str(photonrms)+\",\"+str(instrms)+\",\"+str(vrms)+\",\"+str(vmeas)+\",\"+str(SNR_actual)+\"\\n\"\n\t\t\t\t\tstar_rms.write(line)\n\t\t\t\t\tprint(vrms, vmeas)\n\t\t\tstar_rms.close()\n"
] |
[
[
"numpy.sqrt",
"numpy.isnan",
"numpy.cos",
"numpy.genfromtxt",
"numpy.random.normal",
"numpy.exp",
"numpy.float"
]
] |
Rashmeet09/Hierarchical-Actor-Critic-Pytorch
|
[
"90df3f356c828e08b7a7514abb934ec1b11a8521"
] |
[
"src/DDPG.py"
] |
[
"'''\nAuthor: Rashmeet Kaur Nayyar\nDeep Deterministic Policy Gradient (DDPG)\n'''\n\nimport torch\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n# Policy Network that maps states to actions\nclass Actor(torch.nn.Module):\n def __init__(self, state_dim, action_dim, action_max_bound, action_offset, hidden_size=64):\n super(Actor, self).__init__()\n self.action_max_bound = action_max_bound\n self.action_offset = action_offset\n self.layer1 = torch.nn.Linear(state_dim * 2, hidden_size)\n self.layer2 = torch.nn.Linear(hidden_size, hidden_size)\n self.layer3 = torch.nn.Linear(hidden_size, action_dim)\n self.relu = torch.nn.ReLU()\n self.tanh = torch.nn.Tanh()\n self.actor = torch.nn.Sequential(\n self.layer1,\n self.relu,\n self.layer2,\n self.relu,\n self.layer3,\n self.tanh\n )\n\n def forward(self, state, subgoal):\n x = torch.cat([state, subgoal], 1)\n out = self.actor(x) * self.action_max_bound + self.action_offset\n return out\n\n# Q-value Network that maps state and action pairs to Q-values\nclass Critic(torch.nn.Module):\n def __init__(self, state_dim, action_dim, max_horizon, hidden_size=64):\n super(Critic, self).__init__()\n self.max_horizon = max_horizon\n self.layer1 = torch.nn.Linear(state_dim * 2 + action_dim, hidden_size)\n self.layer2 = torch.nn.Linear(hidden_size, hidden_size)\n self.layer3 = torch.nn.Linear(hidden_size, 1)\n self.relu = torch.nn.ReLU()\n self.sigmoid = torch.nn.Sigmoid()\n self.critic = torch.nn.Sequential(\n self.layer1,\n self.relu,\n self.layer2,\n self.relu,\n self.layer3,\n self.sigmoid\n )\n\n def forward(self, state, action, subgoal):\n # Qvalues are bounded in the range [− max_horizon, 0] because rewards used are nonpositive.\n # The bound of − max_horizon is\n # (i) helpful for learning Q-values as the critic function does not need to learn precise Q-values \n # for the large space of irrelevant actions in which the current state is far from the goal state.\n # (ii) ensures that subgoal states that were reached in hindsight should have higher Q-values than \n # any subgoal state that istoo distant and penalized during subgoal testing.\n x = torch.cat([state, action, subgoal], 1)\n out = -self.critic(x) * self.max_horizon\n return out\n\n# Deep Deterministic Policy Gradient\nclass DDPG(object):\n def __init__(self, state_dim, action_dim, action_max_bound, action_offset, max_horizon, learning_rate):\n self.max_horizon = max_horizon\n self.loss = torch.nn.MSELoss()\n self.actor = Actor(state_dim, action_dim, action_max_bound, action_offset).to(device)\n self.critic = Critic(state_dim, action_dim, max_horizon).to(device)\n self.optimizer_actor = torch.optim.Adam(self.actor.parameters(), lr=learning_rate)\n self.optimizer_critic = torch.optim.Adam(self.critic.parameters(), lr=learning_rate)\n self.loss_actor = None\n self.loss_critic = None\n\n def update_actor_critic(self, replay_buffer, n_iterations, batch_size):\n for i in range(n_iterations):\n # sample experience from replay buffer\n state, action, reward, next_state, subgoal, discount, done = replay_buffer.sample_experience(batch_size) \n \n # compute Qvalue for state and action\n # Qvalue = self.critic(state, action, subgoal)\n\n # get next action from actor network\n next_action = self.actor(next_state, subgoal).detach()\n\n # compute target Qvalue using Qvalue for next state and action from critic network\n next_Qvalue = self.critic(next_state, next_action, subgoal).detach()\n target_Qvalue = reward + ((1 - done) * discount * next_Qvalue)\n\n # compute critic network loss and optimize its parameters to minimize the loss\n self.loss_critic = self.loss(self.critic(state, action, subgoal), target_Qvalue) \n self.optimizer_critic.zero_grad()\n self.loss_critic.backward()\n self.optimizer_critic.step() \n\n # compute actor network loss and optimize its parameters to minimize the loss\n self.loss_actor = -self.critic(state, self.actor(state, subgoal), subgoal).mean()\n self.optimizer_actor.zero_grad()\n self.loss_actor.backward()\n self.optimizer_actor.step()\n\n def get_action_from_policy(self, state, goal):\n # returns the best action recommended by the policy\n state = torch.FloatTensor(state.reshape(1, -1)).to(device)\n goal = torch.FloatTensor(goal.reshape(1, -1)).to(device)\n return self.actor(state, goal).detach().cpu().data.numpy().flatten()\n\n"
] |
[
[
"torch.nn.Sequential",
"torch.cat",
"torch.nn.Tanh",
"torch.nn.Sigmoid",
"torch.nn.Linear",
"torch.cuda.is_available",
"torch.nn.ReLU",
"torch.nn.MSELoss"
]
] |
samils7/BLG561E-Class-Incremental-Learning
|
[
"cb0e8d39eb0c469da46c7c550c19229927a2bec5"
] |
[
"trainer/incremental_icarl.py"
] |
[
"##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n## Created by: Yaoyao Liu\n## Modified from: https://github.com/hshustc/CVPR19_Incremental_Learning\n## Max Planck Institute for Informatics\n## yaoyao.liu@mpi-inf.mpg.de\n## Copyright (c) 2019\n##\n## This source code is licensed under the MIT-style license found in the\n## LICENSE file in the root directory of this source tree\n##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\"\"\" Training code for iCaRL \"\"\"\nimport torch\nimport tqdm\nimport numpy as np\nimport torch.nn as nn\nimport torchvision\nfrom torch.optim import lr_scheduler\nfrom torchvision import datasets, models, transforms\nfrom utils.misc import *\nfrom utils.process_fp import process_inputs_fp\nimport torch.nn.functional as F\n\ndef incremental_train_and_eval(the_args, epochs, fusion_vars, ref_fusion_vars, b1_model, ref_model, b2_model, ref_b2_model, tg_optimizer, tg_lr_scheduler, fusion_optimizer, fusion_lr_scheduler, trainloader, testloader, iteration, start_iteration, X_protoset_cumuls, Y_protoset_cumuls, order_list,lamda, dist, K, lw_mr, balancedloader, T=None, beta=None, fix_bn=False, weight_per_class=None, device=None):\n\n # Setting up the CUDA device\n if device is None:\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n # Set the 1st branch reference model to the evaluation mode\n ref_model.eval()\n\n # Get the number of old classes\n num_old_classes = ref_model.fc.out_features\n\n # If the 2nd branch reference is not None, set it to the evaluation mode\n if iteration > start_iteration+1:\n ref_b2_model.eval()\n\n for epoch in range(epochs):\n # Start training for the current phase, set the two branch models to the training mode\n b1_model.train()\n b2_model.train()\n\n # Fix the batch norm parameters according to the config\n if fix_bn:\n for m in b1_model.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n\n # Set all the losses to zeros\n train_loss = 0\n train_loss1 = 0\n train_loss2 = 0\n # Set the counters to zeros\n correct = 0\n total = 0\n\n # Learning rate decay\n tg_lr_scheduler.step()\n fusion_lr_scheduler.step()\n\n # Print the information\n print('\\nEpoch: %d, learning rate: ' % epoch, end='')\n print(tg_lr_scheduler.get_lr()[0])\n\n for batch_idx, (inputs, targets) in enumerate(trainloader):\n\n # Get a batch of training samples, transfer them to the device\n inputs, targets = inputs.to(device), targets.to(device)\n\n # Clear the gradient of the paramaters for the tg_optimizer\n tg_optimizer.zero_grad()\n\n # Forward the samples in the deep networks\n outputs, _ = process_inputs_fp(the_args, fusion_vars, b1_model, b2_model, inputs)\n\n if iteration == start_iteration+1:\n ref_outputs = ref_model(inputs)\n else:\n ref_outputs, ref_features_new = process_inputs_fp(the_args, ref_fusion_vars, ref_model, ref_b2_model, inputs)\n # Loss 1: logits-level distillation loss\n loss1 = nn.KLDivLoss()(F.log_softmax(outputs[:,:num_old_classes]/T, dim=1), \\\n F.softmax(ref_outputs.detach()/T, dim=1)) * T * T * beta * num_old_classes\n # Loss 2: classification loss\n loss2 = nn.CrossEntropyLoss(weight_per_class)(outputs, targets)\n # Sum up all looses\n loss = loss1 + loss2\n\n # Backward and update the parameters\n loss.backward()\n tg_optimizer.step()\n\n # Record the losses and the number of samples to compute the accuracy\n train_loss += loss.item()\n train_loss1 += loss1.item()\n train_loss2 += loss2.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n\n # Print the training losses and accuracies\n print('Train set: {}, train loss1: {:.4f}, train loss2: {:.4f}, train loss: {:.4f} accuracy: {:.4f}'.format(len(trainloader), train_loss1/(batch_idx+1), train_loss2/(batch_idx+1), train_loss/(batch_idx+1), 100.*correct/total))\n \n # Update the aggregation weights\n b1_model.eval()\n b2_model.eval()\n \n for batch_idx, (inputs, targets) in enumerate(balancedloader):\n fusion_optimizer.zero_grad()\n inputs, targets = inputs.to(device), targets.to(device)\n outputs, _ = process_inputs_fp(the_args, fusion_vars, b1_model, b2_model, inputs)\n loss = nn.CrossEntropyLoss(weight_per_class)(outputs, targets)\n loss.backward()\n fusion_optimizer.step()\n\n # Running the test for this epoch\n b1_model.eval()\n b2_model.eval()\n test_loss = 0\n correct = 0\n total = 0\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n inputs, targets = inputs.to(device), targets.to(device)\n outputs, _ = process_inputs_fp(the_args, fusion_vars, b1_model, b2_model, inputs)\n loss = nn.CrossEntropyLoss(weight_per_class)(outputs, targets)\n test_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n print('Test set: {} test loss: {:.4f} accuracy: {:.4f}'.format(len(testloader), test_loss/(batch_idx+1), 100.*correct/total))\n\n print(\"Removing register forward hook\")\n return b1_model, b2_model\n"
] |
[
[
"torch.nn.CrossEntropyLoss",
"torch.nn.KLDivLoss",
"torch.nn.functional.log_softmax",
"torch.no_grad",
"torch.cuda.is_available"
]
] |
JiayiFu/onnx-tensorflow
|
[
"578dc6a2b6f7fea2bbcd396d387841a4949e6ef9"
] |
[
"onnx_tf/backend.py"
] |
[
"\"\"\"Backend for running ONNX on Tensorflow\n\nTo run this, you will need to have Tensorflow installed as well.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\ntry:\n from itertools import izip as zip\nexcept ImportError: # will be 3.x series\n pass\n\nfrom onnx import defs\nfrom onnx import numpy_helper\nfrom onnx.backend.base import Backend\nfrom onnx.backend.base import Device\nfrom onnx.backend.base import namedtupledict\nfrom onnx.helper import make_opsetid\nimport tensorflow as tf\n\nfrom onnx_tf.backend_rep import TensorflowRep\nfrom onnx_tf.common import data_type\nfrom onnx_tf.common import exception\nfrom onnx_tf.common import get_device_option\nfrom onnx_tf.common import get_unique_suffix\nfrom onnx_tf.common import supports_device as common_supports_device\nfrom onnx_tf.common.handler_helper import get_all_backend_handlers\nfrom onnx_tf.pb_wrapper import OnnxNode\nimport onnx_tf.common as common\n\n\nclass TensorflowBackend(Backend):\n \"\"\" Tensorflow Backend for ONNX\n \"\"\"\n\n @classmethod\n def prepare(cls,\n model,\n device='CPU',\n strict=True,\n logging_level='INFO',\n **kwargs):\n \"\"\"Prepare an ONNX model for Tensorflow Backend.\n\n This function converts an ONNX model to an internel representation\n of the computational graph called TensorflowRep and returns\n the converted representation.\n\n :param model: The ONNX model to be converted.\n :param device: The device to execute this model on.\n :param strict: Whether to enforce semantic equivalence between the original model\n and the converted tensorflow model, defaults to True (yes, enforce semantic equivalence).\n Changing to False is strongly discouraged.\n Currently, the strict flag only affects the behavior of MaxPool and AveragePool ops.\n :param logging_level: The logging level, default is INFO. Change it to DEBUG\n to see more conversion details or to WARNING to see less\n\n :returns: A TensorflowRep class object representing the ONNX model\n \"\"\"\n super(TensorflowBackend, cls).prepare(model, device, **kwargs)\n common.logger.setLevel(logging_level)\n\n return cls.onnx_model_to_tensorflow_rep(model, strict)\n\n @classmethod\n def onnx_model_to_tensorflow_rep(cls, model, strict):\n \"\"\" Convert ONNX model to TensorflowRep.\n\n :param model: ONNX ModelProto object.\n :param strict: whether to enforce semantic equivalence between the original model\n and the converted tensorflow model.\n :return: TensorflowRep object.\n \"\"\"\n\n # Models with IR_VERSION less than 3 does not have opset_import set.\n # We default to minimum opset, this behavior is consistent with\n # onnx checker.\n # c.f. https://github.com/onnx/onnx/blob/427ac0c1b792363d373e3d7e4eef97fa46458420/onnx/checker.cc#L478\n if model.ir_version < 3:\n opset_import = [make_opsetid(defs.ONNX_DOMAIN, 1)]\n else:\n opset_import = model.opset_import\n return cls._onnx_graph_to_tensorflow_rep(model.graph, opset_import, strict)\n\n @classmethod\n def _onnx_graph_to_tensorflow_rep(cls, graph_def, opset, strict):\n \"\"\" Convert ONNX graph to TensorflowRep.\n\n :param graph_def: ONNX GraphProto object.\n :param opset: ONNX OperatorSetIdProto list.\n :param strict: whether to enforce semantic equivalence between the original model\n and the converted tensorflow model.\n :return: TensorflowRep object.\n \"\"\"\n handlers = cls._get_handlers(opset)\n\n tf_rep_graph = tf.Graph()\n with tf_rep_graph.as_default():\n # initializer: TensorProtos representing the values to initialize\n # a given tensor.\n # initialized: A list of names of the initialized tensors.\n if graph_def.initializer:\n input_dict_items = cls._onnx_initializer_to_input_dict_items(\n graph_def.initializer)\n initialized = {init.name for init in graph_def.initializer}\n else:\n input_dict_items = []\n initialized = set()\n\n # creating placeholders for currently unknown inputs\n for value_info in graph_def.input:\n if value_info.name in initialized:\n continue\n shape = list(\n d.dim_value if (d.dim_value > 0 and d.dim_param == \"\") else None\n for d in value_info.type.tensor_type.shape.dim)\n value_info_name = value_info.name.replace(\n \":\", \"_tf_\") + \"_\" + get_unique_suffix(\n ) if \":\" in value_info.name else value_info.name\n\n x = tf.placeholder(\n data_type.onnx2tf(value_info.type.tensor_type.elem_type),\n name=value_info_name,\n shape=shape)\n input_dict_items.append((value_info.name, x))\n\n # tensor dict: this dictionary is a map from variable names\n # to the latest produced TF tensors of the given name.\n # This dictionary will get updated as we build the graph to\n # record the names of newly produced tensors.\n tensor_dict = dict(input_dict_items)\n # Since tensor dict may be updated, we need to keep a copy\n # of the original input dict where we track the earliest\n # defined tensors so we can have access to the placeholders\n # to feed in input tensors when we run the graph.\n input_dict = dict(input_dict_items)\n\n for node in graph_def.node:\n onnx_node = OnnxNode(node)\n output_ops = cls._onnx_node_to_tensorflow_op(\n onnx_node, tensor_dict, handlers, opset=opset, strict=strict)\n curr_node_output_map = dict(zip(onnx_node.outputs, output_ops))\n tensor_dict.update(curr_node_output_map)\n\n tf_rep = TensorflowRep()\n tf_rep.graph = tf_rep_graph\n tf_rep.inputs = [\n value_info.name\n for value_info in graph_def.input\n if value_info.name not in initialized\n ]\n tf_rep.outputs = [value_info.name for value_info in graph_def.output]\n tf_rep.tensor_dict = tensor_dict\n return tf_rep\n\n @classmethod\n def run_node(cls, node, inputs, device='CPU', outputs_info=None, **kwargs):\n \"\"\" Run ONNX node.\n\n :param node: ONNX NodeProto object.\n :param inputs: Inputs.\n :param device: Device run on.\n :param outputs_info: None.\n :param kwargs: Other args.\n :return: Outputs.\n \"\"\"\n super(TensorflowBackend, cls).run_node(node, inputs, device)\n node_graph = tf.Graph()\n with node_graph.as_default():\n node = OnnxNode(node)\n device_option = get_device_option(Device(device))\n input_tensors = []\n for i in inputs:\n input_tensors.append(tf.constant(i))\n\n if isinstance(inputs, dict):\n feed_dict_raw = inputs\n else:\n assert len(node.inputs) == len(inputs)\n feed_dict_raw = dict(zip(node.inputs, inputs))\n\n # TODO: is constant the best way for feeding inputs?\n input_dict = dict(\n [(x[0], tf.constant(x[1])) for x in feed_dict_raw.items()])\n ops = cls._onnx_node_to_tensorflow_op(node, input_dict)\n\n with tf.Session() as sess:\n with tf.device(device_option):\n sess.run(tf.global_variables_initializer())\n output_vals = sess.run(ops)\n\n return namedtupledict('Outputs', node.outputs)(*output_vals)\n\n @classmethod\n def _onnx_initializer_to_input_dict_items(cls, initializer):\n \"\"\" Convert ONNX graph initializer to input dict items.\n\n :param initializer: ONNX graph initializer, list of TensorProto.\n :return: List of input dict items.\n \"\"\"\n\n def tensor2list(onnx_tensor):\n # Use the onnx.numpy_helper because the data may be raw\n return numpy_helper.to_array(onnx_tensor).flatten().tolist()\n\n return [(init.name,\n tf.constant(\n tensor2list(init),\n shape=init.dims,\n dtype=data_type.onnx2tf(init.data_type)))\n for init in initializer]\n\n @classmethod\n def _onnx_node_to_tensorflow_op(cls,\n node,\n tensor_dict,\n handlers=None,\n opset=None,\n strict=True):\n \"\"\"\n Convert onnx node to tensorflow op.\n\n Args:\n node: Onnx node object.\n tensor_dict: Tensor dict of graph.\n opset: Opset version of the operator set. Default 0 means using latest version.\n strict: whether to enforce semantic equivalence between the original model\n and the converted tensorflow model, defaults to True (yes, enforce semantic equivalence).\n Changing to False is strongly discouraged.\n Returns:\n Tensorflow op\n \"\"\"\n handlers = handlers or cls._get_handlers(opset)\n handler = handlers[node.domain].get(node.op_type, None)\n if handler:\n with tf.name_scope(node.name):\n return handler.handle(node, tensor_dict=tensor_dict, strict=strict)\n else:\n exception.OP_UNIMPLEMENTED_EXCEPT(node.op_type)\n\n @classmethod\n def _get_handlers(cls, opset):\n \"\"\" Get all backend handlers with opset.\n\n :param opset: ONNX OperatorSetIdProto list.\n :return: All backend handlers.\n \"\"\"\n opset = opset or [make_opsetid(defs.ONNX_DOMAIN, defs.onnx_opset_version())]\n opset_dict = dict([(o.domain, o.version) for o in opset])\n return get_all_backend_handlers(opset_dict)\n\n @classmethod\n def supports_device(cls, device):\n return common_supports_device(device)\n\n @classmethod\n def onnx_graph_to_tensorflow_ops(cls, graph_def, input_values,\n opset=None, strict=True):\n \"\"\"\n Converts ONNX graph to Tensorflow operations\n Args:\n graph_def: the ONNX graph to be converted\n input_values: dictionary with values/tensors to initialize\n the graph inputs. the dictionary must contain values\n for all the graph_def.input\n opset: opset version of the operator set.\n strict: whether to enforce semantic equivalence between the\n original model and the converted tensorflow model,\n defaults to True (yes, enforce semantic equivalence).\n Returns:\n array of Tensorflow Tensors\n \"\"\"\n input_dict_items = []\n # set input values for the subgraph\n for value_info in graph_def.input:\n if value_info.name in input_values:\n x = input_values[value_info.name]\n input_dict_items.append((value_info.name, x))\n\n tensor_dict = dict(input_dict_items)\n\n for node in graph_def.node:\n onnx_node = OnnxNode(node)\n output_ops = cls._onnx_node_to_tensorflow_op(onnx_node, tensor_dict,\n opset=opset,strict=strict)\n curr_node_output_map = \\\n dict(zip(onnx_node.outputs, output_ops))\n tensor_dict.update(curr_node_output_map)\n return tensor_dict\n\n\nprepare = TensorflowBackend.prepare\n\nrun_node = TensorflowBackend.run_node\n\nrun_model = TensorflowBackend.run_model\n\nsupports_device = TensorflowBackend.supports_device\n\nonnx_graph_to_tensorflow_ops = TensorflowBackend.onnx_graph_to_tensorflow_ops\n"
] |
[
[
"tensorflow.Graph",
"tensorflow.device",
"tensorflow.constant",
"tensorflow.global_variables_initializer",
"tensorflow.name_scope",
"tensorflow.Session"
]
] |
iynaur/pytorch-grad-cam
|
[
"ffec997bff40c7dba2b29005528c6a5893a11849"
] |
[
"pytorch_grad_cam/base_cam.py"
] |
[
"import cv2\nimport numpy as np\nimport torch\nimport ttach as tta\nfrom pytorch_grad_cam.activations_and_gradients import ActivationsAndGradients\nfrom pytorch_grad_cam.utils.svd_on_activations import get_2d_projection\n\n\nclass BaseCAM:\n def __init__(self, \n model, \n target_layer,\n use_cuda=False,\n reshape_transform=None):\n self.model = model.eval()\n self.target_layer = target_layer\n self.cuda = use_cuda\n if self.cuda:\n self.model = model.cuda()\n self.reshape_transform = reshape_transform\n self.activations_and_grads = ActivationsAndGradients(self.model, \n target_layer, reshape_transform)\n\n def forward(self, input_img):\n return self.model(input_img)\n\n def get_cam_weights(self,\n input_tensor,\n target_category,\n activations,\n grads):\n raise Exception(\"Not Implemented\")\n\n def get_loss(self, output, target_category):\n loss = 0\n for i in range(len(target_category)):\n loss = loss + output[i, target_category[i]]\n return loss\n\n def get_cam_image(self,\n input_tensor,\n target_category,\n activations,\n grads,\n eigen_smooth=False):\n weights = self.get_cam_weights(input_tensor, target_category, activations, grads)\n weighted_activations = weights[:, :, None, None] * activations\n if eigen_smooth:\n cam = get_2d_projection(weighted_activations)\n else:\n cam = weighted_activations.sum(axis=1)\n return cam\n\n def forward(self, input_tensor, target_category=None, eigen_smooth=False):\n\n if self.cuda:\n input_tensor = input_tensor.cuda()\n\n output = self.activations_and_grads(input_tensor)\n\n if type(target_category) is int:\n target_category = [target_category] * input_tensor.size(0)\n\n if target_category is None:\n target_category = np.argmax(output.cpu().data.numpy(), axis=-1)\n print(output.cpu().data.numpy())\n print('class id: ', target_category)\n else:\n assert(len(target_category) == input_tensor.size(0))\n\n self.model.zero_grad()\n loss = self.get_loss(output, target_category)\n loss.backward(retain_graph=True)\n\n activations = self.activations_and_grads.activations[-1].cpu().data.numpy()\n grads = self.activations_and_grads.gradients[-1].cpu().data.numpy()\n\n cam = self.get_cam_image(input_tensor, target_category, \n activations, grads, eigen_smooth)\n\n cam = np.maximum(cam, 0)\n\n result = []\n for img in cam:\n img = cv2.resize(img, input_tensor.shape[-2:][::-1])\n img = img - np.min(img)\n img = img / np.max(img)\n result.append(img)\n result = np.float32(result)\n return result\n\n def forward_augmentation_smoothing(self,\n input_tensor,\n target_category=None,\n eigen_smooth=False):\n transforms = tta.Compose(\n [\n tta.HorizontalFlip(),\n tta.Multiply(factors=[0.9, 1, 1.1]),\n ]\n )\n cams = []\n for transform in transforms:\n augmented_tensor = transform.augment_image(input_tensor)\n cam = self.forward(augmented_tensor,\n target_category, eigen_smooth)\n\n # The ttach library expects a tensor of size BxCxHxW\n cam = cam[:, None, :, :]\n cam = torch.from_numpy(cam)\n cam = transform.deaugment_mask(cam)\n\n # Back to numpy float32, HxW\n cam = cam.numpy()\n cam = cam[:, 0, :, :]\n cams.append(cam)\n\n cam = np.mean(np.float32(cams), axis=0)\n return cam\n\n def __call__(self,\n input_tensor,\n target_category=None,\n aug_smooth=False,\n eigen_smooth=False):\n if aug_smooth is True:\n return self.forward_augmentation_smoothing(input_tensor,\n target_category, eigen_smooth)\n\n return self.forward(input_tensor,\n target_category, eigen_smooth)"
] |
[
[
"numpy.maximum",
"numpy.min",
"torch.from_numpy",
"numpy.max",
"numpy.float32"
]
] |
Sid-Bisram/mcb_pipeline
|
[
"d129603b1236279d7b09307b7420edc72f2d7555"
] |
[
"question_3.py"
] |
[
"import db\r\n\r\nimport pandas as pd\r\ndef func_step_3(report_year,country_name,export_type):\r\n with db.create_db_connection() as connection:\r\n cursor=connection.cursor()\r\n cursor.callproc('Report_3',(report_year,country_name))\r\n answer=cursor.fetchall()\r\n\r\n ans_dataframe=pd.DataFrame(answer,columns=['Year','Country','Country Url','Region Code','Region','Rank Per Region','Overall Rank','Happiness Score','Happiness Status','GDP per Capita',\r\n 'Family','Social Support','Healthy life expectancy','Freedom to make life choices','Generosity','Perceptions of Corruptions'])\r\n\r\n if(export_type=='csv'):\r\n ans_dataframe.to_csv('reports/Report_3.csv',index=False)\r\n else:\r\n ans_dataframe.to_parquet('reports/Report_3.gzip',compression='gzip')\r\n\r\n print(\"File has been saved to reports folder on root directory\")\r\n\r\nif __name__ == '__main__':\r\n # func_step_3()\r\n\r\n report_year=input(\"Specify year of report: \")\r\n country_name=input(\"Enter country name: \")\r\n region_code=input(\"Enter region code: \")\r\n export_type=input(\"Do you want the report in csv or parquet please: \")\r\n\r\n if(region_code != ''):\r\n country_name=db.get_company_name(region_code)\r\n\r\n func_step_3(report_year,country_name,export_type)\r\n\r\n\r\n\r\n"
] |
[
[
"pandas.DataFrame"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.