repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
spider-specimens/todayonhistory-spider
|
[
"2852e556383442c9a0ce0035d9aafadae427b31d"
] |
[
"spider.py"
] |
[
"# -*- coding: UTF-8 -*-\nimport time\nimport requests\nimport numpy as np\nfrom bs4 import BeautifulSoup\nimport functools\nimport re\nfrom urllib.parse import urlparse, parse_qs\nimport psycopg2\n\nheaders=[\n {'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'},\n {'User-Agent':'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.12 Safari/535.11'},\n {'User-Agent': 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0)'}\n]\n\nclass Spider(object):\n \n pageSize = 40\n\n def __init__(self):\n self.db = psycopg2.connect(database=\"tih\", user=\"postgres\", password=\"123456\", host=\"127.0.0.1\", port=\"5432\")\n print(\"connect db success.\")\n\n def fetchListData(self, month, day, page = 0):\n htmlURL = \"http://www.todayonhistory.com/\" + str(month) + '/' + str(day)\n apiURL = \"http://www.todayonhistory.com/index.php?m=content&c=index&a=json_event&page=\" + str(page) + \"&pagesize=\" + str(self.pageSize) + \"&month=\" + str(month) + \"&day=\" + str(day)\n\n res = requests.get(\n htmlURL if page == 0 else apiURL,\n headers=headers[np.random.randint(0, len(headers))]\n )\n\n result = []\n\n if page == 0:\n soup = BeautifulSoup(res.text, \"html5lib\").find(id=\"container\").find_all(\"li\")\n\n for item in soup:\n \n _data = {}\n txtLink = item.select('.text > a, a.txt')\n img = item.find('img')\n year = item.select('.time .moh b')\n\n if not len(txtLink):\n continue\n\n txtLink = txtLink[0]\n description = item.select('.text > p')\n\n result.append({\n 'url': txtLink.get('href'),\n 'title': txtLink.text,\n 'thumb': img.get('data-original') if img else '',\n 'solaryear': year[0].text if len(year) else '',\n 'description': description[0].text if len(description) else '',\n })\n else:\n for item in res.json():\n result.append({\n 'url': item['url'],\n 'title': item['title'],\n 'thumb': item['thumb'],\n 'solaryear': item['solaryear'],\n 'description': item['description'],\n })\n\n return result\n \n def fetchAllListData(self, month, day):\n page = 0\n result = []\n\n while (1):\n _result = self.fetchListData(month, day, page)\n\n result = result + _result\n \n if (len(_result) == 0 or len(_result) < self.pageSize):\n break\n \n page = page + 1\n\n return result\n \n def fetchDetailData(self, url):\n res = requests.get(url, headers=headers[np.random.randint(0, len(headers))])\n\n res.encoding = 'utf-8'\n\n if (res.status_code >= 400):\n print(err)\n\n soup = BeautifulSoup(res.text, \"html5lib\")\n\n body = soup.select('.body')\n idElm = soup.select('script[src^=\"http://www.todayonhistory.com/api.php\"]')\n\n return {\n \"body\": body[0].prettify() if len(body) else '',\n 'id': parse_qs(urlparse(idElm[0].get('src')).query)['id'][0] if idElm else '',\n }\n\n def fetchDayAllData(self, month, day):\n _list = self.fetchAllListData(month, day)\n print('已获取%d月%d日全部列表数据,共计%d条,开始获取详情数据。' % (month, day, len(_list)))\n\n for index, item in enumerate(_list):\n time.sleep(np.random.rand() * 3)\n print('开始获取第%d条数据:%s页面。' % (index + 1, item['url']))\n \n _detail = self.fetchDetailData(item['url'])\n\n item['id'] = _detail['id']\n item['body'] = _detail['body']\n item['month'] = month\n item['day'] = day\n\n print('已获取第%d条数据:第三方id为%s。' % (index + 1, item['id']))\n\n self.saveData(item)\n \n print('已获取%d月%d日全部数据。' % (month, day))\n\n return _list\n\n def saveData(self, data):\n cur = self.db.cursor()\n\n now = int(time.time())\n\n cur.execute(\"insert into events (title, description, body, month, day, target, target_id, target_detail_url, create_time, update_time, status) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\", (data['title'], data['description'], data['body'], data['month'], data['day'], 'www.todayinhistory.com', data['id'], data['url'], now, now, 1))\n\n self.db.commit()\n print(\"数据已保存。\")\n\n def getAllData(self):\n for month in range(1, 12):\n maxDay = 30\n if (month in [1, 3, 5, 7, 8, 10, 12]):\n maxDay = 31\n if (month in [2]):\n maxDay = 29\n \n for day in range(1, maxDay):\n self.fetchDayAllData(month, day)\n\n print('已获取全部数据。')\n\nif __name__ == \"__main__\":\n spider = Spider()\n spider.getAllData()"
] |
[
[
"numpy.random.rand"
]
] |
Timen/tensorflow-video
|
[
"3dae3f72769c3312f6d672abefc53a4b3b38000d"
] |
[
"tensorflow_extentions/initial_state.py"
] |
[
"import tensorflow as tf\nslim = tf.contrib.slim\nfrom tensorflow.python.util import nest\n\ndef get_initial_cell_state(cell, initializer, batch_size, dtype):\n \"\"\"Return state tensor(s), initialized with initializer.\n Args:\n cell: RNNCell.\n batch_size: int, float, or unit Tensor representing the batch size.\n initializer: function with two arguments, shape and dtype, that\n determines how the state is initialized.\n dtype: the data type to use for the state.\n Returns:\n If `state_size` is an int or TensorShape, then the return value is a\n `N-D` tensor of shape `[batch_size x state_size]` initialized\n according to the initializer.\n If `state_size` is a nested list or tuple, then the return value is\n a nested list or tuple (of the same structure) of `2-D` tensors with\n the shapes `[batch_size x s]` for each s in `state_size`.\n \"\"\"\n with tf.name_scope(\"state_initializer\"):\n state_size = cell.state_size\n init_state,_ = recursive_state_shape_initialization(initializer,state_size,batch_size,dtype,0)\n state_list = []\n for tensor_tuple in init_state:\n state_list.append(tensor_tuple)\n\n return state_list\n\ndef recursive_state_shape_initialization(initializer,state_size,batch_size,dtype,index):\n if nest.is_sequence(state_size[0]):\n results = []\n for size in state_size:\n variable,index = recursive_state_shape_initialization(initializer,size,batch_size,dtype,index)\n results.append(variable)\n if isinstance(state_size, tuple):\n return tuple(results),index\n elif isinstance(state_size, list):\n return list(results),index\n else:\n \"error\"\n else:\n return initializer(state_size, batch_size, dtype,index),index+1\n\ndef make_variable_state_initializer(**kwargs):\n def variable_state_initializer(shape, batch_size, dtype, index):\n args = kwargs.copy()\n\n if args.get('name'):\n args['name'] = args['name'] + '_' + str(index)\n else:\n args['name'] = 'init_state_' + str(index)\n\n args['shape'] = shape\n args['dtype'] = dtype\n var = tf.get_variable(**args)\n var = tf.expand_dims(var, 0)\n var = tf.tile(var, tf.stack([batch_size] + [1] * len(shape)))\n var.set_shape([batch_size] + shape)\n return var\n\n return variable_state_initializer\n\ndef make_gaussian_state_initializer(initializer, deterministic_tensor=None, stddev=0.3):\n def gaussian_state_initializer(shape, batch_size, dtype, index):\n init_state = initializer(shape, batch_size, dtype, index)\n if deterministic_tensor is not None:\n return tf.cond(deterministic_tensor,\n lambda: init_state,\n lambda: init_state + tf.random_normal(tf.shape(init_state), stddev=stddev))\n else:\n return init_state + tf.random_normal(tf.shape(init_state), stddev=stddev)\n return gaussian_state_initializer\n\ndef zero_state_initializer(shape, batch_size, dtype, index):\n z = tf.zeros(tf.stack([batch_size]+shape), dtype)\n z.set_shape([batch_size]+shape)\n return z"
] |
[
[
"tensorflow.get_variable",
"tensorflow.python.util.nest.is_sequence",
"tensorflow.shape",
"tensorflow.stack",
"tensorflow.expand_dims",
"tensorflow.name_scope"
]
] |
PacktWorkshops/Applied-Deep-Learning-with-Keras
|
[
"d1372a6109e2ee9434ae47df59440577566badaa"
] |
[
"Chapter08/Exercise8.03/Exercise8.03_Unit_test.py"
] |
[
"import unittest\nimport numpy as np\nimport pandas as pd\nimport numpy.testing as np_testing\nimport pandas.testing as pd_testing\nimport os\nimport import_ipynb\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, MaxPool2D, Flatten, Dense\nimport numpy as np\nfrom tensorflow import random\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.preprocessing import image\nimport keras\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n import Exercise8_03\n self.exercise = Exercise8_03\n \n vgg_model = keras.applications.vgg16.VGG16()\n \n self.seed = 42\n np.random.seed(self.seed)\n random.set_seed(self.seed)\n last_layer = str(vgg_model.layers[-1])\n\n self.classifier= Sequential()\n for layer in vgg_model.layers:\n if str(layer) != last_layer:\n self.classifier.add(layer)\n \n for layer in self.classifier.layers:\n layer.trainable=False \n self.classifier.add(Dense(1, activation='sigmoid'))\n self.classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n \n generate_train_data = ImageDataGenerator(rescale = 1./255,\n shear_range = 0.2,\n zoom_range = 0.2,\n horizontal_flip = True)\n\n generate_test_data = ImageDataGenerator(rescale = 1./255) \n \n training_dataset = generate_train_data.flow_from_directory('../Data/Dataset/training_set',\n target_size = (224, 224),\n batch_size = 32,\n class_mode = 'binary')\n\n test_dataset = generate_test_data.flow_from_directory('../Data/Dataset/test_set',\n target_size = (224, 224),\n batch_size = 32,\n class_mode = 'binary')\n\n \n \n self.classifier.fit_generator(\n training_dataset, steps_per_epoch = 100, epochs = 10,\n validation_data = test_dataset, validation_steps = 30,\n shuffle=False)\n \n def test_model_perf(self):\n np_testing.assert_approx_equal(self.exercise.classifier.history.history['val_accuracy'][0],\n self.classifier.history.history['val_accuracy'][0], significant=1)\n\n def test_model_pred(self):\n new_image = image.load_img('../Data/Prediction/test_image_2.jpg', target_size = (224, 224))\n new_image = image.img_to_array(new_image)\n new_image = np.expand_dims(new_image, axis = 0)\n result = self.classifier.predict(new_image)\n np_testing.assert_approx_equal(self.exercise.result[0][0], result[0][0], significant=1)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"numpy.expand_dims",
"numpy.testing.assert_approx_equal",
"numpy.random.seed",
"tensorflow.random.set_seed"
]
] |
CHENGCHANGHUv/jim-schwoebelv
|
[
"2b949553362c9541a966ae7c54482a1488140e22"
] |
[
"train_audioTPOT.py"
] |
[
"'''\n================================================ \n## VOICEBOOK REPOSITORY ## \n================================================ \n\nrepository name: voicebook \nrepository version: 1.0 \nrepository link: https://github.com/jim-schwoebel/voicebook \nauthor: Jim Schwoebel \nauthor contact: js@neurolex.co \ndescription: a book and repo to get you started programming voice applications in Python - 10 chapters and 200+ scripts. \nlicense category: opensource \nlicense: Apache 2.0 license \norganization name: NeuroLex Laboratories, Inc. \nlocation: Seattle, WA \nwebsite: https://neurolex.ai \nrelease date: 2018-09-28 \n\nThis code (voicebook) is hereby released under a Apache 2.0 license license. \n\nFor more information, check out the license terms below. \n\n================================================ \n## LICENSE TERMS ## \n================================================ \n\nCopyright 2018 NeuroLex Laboratories, Inc. \n\nLicensed under the Apache License, Version 2.0 (the \"License\"); \nyou may not use this file except in compliance with the License. \nYou may obtain a copy of the License at \n\n http://www.apache.org/licenses/LICENSE-2.0 \n\nUnless required by applicable law or agreed to in writing, software \ndistributed under the License is distributed on an \"AS IS\" BASIS, \nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. \nSee the License for the specific language governing permissions and \nlimitations under the License. \n\n================================================ \n## SERVICE STATEMENT ## \n================================================ \n\nIf you are using the code written for a larger project, we are \nhappy to consult with you and help you with deployment. Our team \nhas >10 world experts in Kafka distributed architectures, microservices \nbuilt on top of Node.js / Python / Docker, and applying machine learning to \nmodel speech and text data. \n\nWe have helped a wide variety of enterprises - small businesses, \nresearchers, enterprises, and/or independent developers. \n\nIf you would like to work with us let us know @ js@neurolex.co. \n\n================================================ \n## TRAIN_AUDIOTPOT.PY ## \n================================================ \n\nAuto optimize the parameters for a classification model.\n\nFollows TPOT documentation\nhttps://github.com/EpistasisLab/tpot\n'''\nimport json, os\nimport numpy as np\nfrom tpot import TPOTClassifier\nfrom tpot import TPOTRegressor\nfrom sklearn.model_selection import train_test_split\n\n## initialize directories and classes\nmodel_dir=os.getcwd()+'/models/'\ndata_dir=os.getcwd()+'/data/'\n\nos.chdir(data_dir)\nmtype=input('classification (c) or regression (r) problem? \\n').lower().replace(' ','')\nwhile mtype not in ['c','r', 'classification','regression']:\n print('input not recognized')\n mtype=input('is this classification (c) or regression (r) problem? \\n').lower().replace(' ','')\n\none=input('what is the name of class 1? \\n')\ntwo=input('what is the name of class 2? \\n')\n\njsonfile=one+'_'+two+'_audio.json'\n\ntry:\n g=json.load(open(jsonfile))\n one=g[one]\n two=g[two]\n os.chdir(model_dir)\n\n # now preprocess data \n alldata=list()\n for i in range(len(one)):\n alldata.append(one[i])\n for i in range(len(two)):\n alldata.append(two[i])\n \n labels=list()\n for i in range(len(one)):\n labels.append(0)\n for i in range(len(two)):\n labels.append(1)\n\n alldata=np.asarray(alldata)\n labels=np.asarray(labels)\n\n # get train and test data \n X_train, X_test, y_train, y_test = train_test_split(alldata, labels, train_size=0.750, test_size=0.250)\n if mtype in [' classification', 'c']:\n tpot=TPOTClassifier(generations=5, population_size=50, verbosity=2, n_jobs=-1)\n tpotname='%s_tpotclassifier.py'%(jsonfile[0:-5])\n elif mtype in ['regression','r']:\n tpot = TPOTRegressor(generations=5, population_size=20, verbosity=2)\n tpotname='%s_tpotregression.py'%(jsonfile[0:-5])\n tpot.fit(X_train, y_train)\n accuracy=tpot.score(X_test,y_test)\n tpot.export(tpotname)\n\n # export data to .json format \n data={\n 'data': alldata.tolist(),\n 'labels': labels.tolist(),\n }\n\n jsonfilename='%s_.json'%(tpotname[0:-3])\n jsonfile=open(jsonfilename,'w')\n json.dump(data,jsonfile)\n jsonfile.close()\n\n # now edit the file and run it \n g=open(tpotname).read()\n g=g.replace(\"import numpy as np\", \"import numpy as np \\nimport json, pickle\")\n g=g.replace(\"tpot_data = pd.read_csv(\\'PATH/TO/DATA/FILE\\', sep=\\'COLUMN_SEPARATOR\\', dtype=np.float64)\",\"g=json.load(open('%s'))\\ntpot_data=g['labels']\"%(jsonfilename))\n g=g.replace(\"features = tpot_data.drop('target', axis=1).values\",\"features=g['data']\\n\")\n g=g.replace(\"tpot_data['target'].values\", \"tpot_data\")\n g=g.replace(\"results = exported_pipeline.predict(testing_features)\", \"print('saving classifier to disk')\\nf=open('%s','wb')\\npickle.dump(exported_pipeline,f)\\nf.close()\"%(jsonfilename[0:-6]+'.pickle'))\n g1=g.find('exported_pipeline = ')\n g2=g.find('exported_pipeline.fit(training_features, training_target)')\n modeltype=g[g1:g2]\n os.remove(tpotname)\n t=open(tpotname,'w')\n t.write(g)\n t.close()\n os.system('python3 %s'%(tpotname))\n\n # now write an accuracy label \n os.remove(jsonfilename)\n\n jsonfilename='%s.json'%(tpotname[0:-3])\n print('saving .JSON file (%s)'%(jsonfilename))\n jsonfile=open(jsonfilename,'w')\n if mtype in ['classification', 'c']:\n data={\n 'model name':jsonfilename[0:-5]+'.pickle',\n 'accuracy':accuracy,\n 'model type':'TPOTclassification_'+modeltype,\n }\n elif mtype in ['regression', 'r']:\n data={\n 'model name':jsonfilename[0:-5]+'.pickle',\n 'accuracy':accuracy,\n 'model type':'TPOTregression_'+modeltype,\n }\n\n json.dump(data,jsonfile)\n jsonfile.close()\n \nexcept: \n print('error, please put %s in %s'%(jsonfile, data_dir))\n print('note this can be done with train_audioclassify.py script')\n\n"
] |
[
[
"numpy.asarray",
"sklearn.model_selection.train_test_split"
]
] |
parth-couture-ai/RecommenderSystems
|
[
"0a585139de1b49d72511ce5a4a642bd427c1349a"
] |
[
"sequentialRec/neural/train.py"
] |
[
"#coding: utf-8\n'''\nAuthor: Weiping Song\nContact: songweiping@pku.edu.cn\n'''\n\nimport tensorflow as tf\nimport argparse\nimport numpy as np\nimport sys\nimport time\nimport math\n\nfrom .utils import *\nfrom .model import *\nfrom .sampler import *\n\nparser = argparse.ArgumentParser(description='Sequential or session-based recommendation')\nparser.add_argument('--model', type=str, default='tcn', help='sequential model: rnn/tcn/transformer. (default: tcn)')\nparser.add_argument('--batch_size', type=int, default=128, help='batch size (default: 128)')\nparser.add_argument('--seq_len', type=int, default=20, help='max sequence length (default: 20)')\nparser.add_argument('--dropout', type=float, default=0.2, help='dropout (default: 0.2)')\nparser.add_argument('--l2_reg', type=float, default=0.0, help='regularization scale (default: 0.0)')\nparser.add_argument('--clip', type=float, default=1., help='gradient clip (default: 1.)')\nparser.add_argument('--epochs', type=int, default=20, help='upper epoch limit (default: 20)')\nparser.add_argument('--lr', type=float, default=0.001, help='initial learning rate for Adam (default: 0.001)')\nparser.add_argument('--emsize', type=int, default=100, help='dimension of item embedding (default: 100)')\nparser.add_argument('--neg_size', type=int, default=1, help='size of negative samples (default: 10)')\nparser.add_argument('--worker', type=int, default=10, help='number of sampling workers (default: 10)')\nparser.add_argument('--nhid', type=int, default=100, help='number of hidden units (default: 100)')\nparser.add_argument('--levels', type=int, default=3, help='# of levels (default: 3)')\nparser.add_argument('--seed', type=int, default=1111, help='random seed (default: 1111)')\nparser.add_argument('--loss', type=str, default='ns', help='type of loss: ns/sampled_sm/full_sm (default: ns)')\nparser.add_argument('--data', type=str, default='gowalla', help='data set name (default: gowalla)')\nparser.add_argument('--log_interval', type=int, default=1e2, help='log interval (default: 1e2)')\nparser.add_argument('--eval_interval', type=int, default=1e3, help='eval/test interval (default: 1e3)')\n\n# ****************************** unique arguments for rnn model. *******************************************************\n# None\n\n# ***************************** unique arguemnts for tcn model.\nparser.add_argument('--ksize', type=int, default=3, help='kernel size (default: 100)')\n\n# ****************************** unique arguments for transformer model. *************************************************\nparser.add_argument('--num_blocks', type=int, default=3, help='num_blocks')\nparser.add_argument('--num_heads', type=int, default=2, help='num_heads')\nparser.add_argument('--pos_fixed', type=int, default=0, help='trainable positional embedding usually has better performance')\n\n\nargs = parser.parse_args()\ntf.set_random_seed(args.seed)\n\ntrain_data, val_data, test_data, n_items, n_users = data_generator(args)\n\ntrain_sampler = Sampler(\n data=train_data, \n n_items=n_items, \n n_users=n_users,\n batch_size=args.batch_size, \n max_len=args.seq_len,\n neg_size=args.neg_size,\n n_workers=args.worker,\n neg_method='rand')\n\nval_data = prepare_eval_test(val_data, batch_size=100, max_test_len= 20)\n\ncheckpoint_dir = '_'.join(['save', args.data, args.model, str(args.lr), str(args.l2_reg), str(args.emsize), str(args.dropout)])\n\nprint(args)\nprint ('#Item: ', n_items)\nprint ('#User: ', n_users)\n\nmodel = NeuralSeqRecommender(args, n_items, n_users)\n\nlr = args.lr\n\ndef evaluate(source, sess):\n total_hit_k = 0.0\n total_ndcg_k = 0.0\n count = 0.0\n for batch in source:\n feed_dict = {model.inp: batch[1], model.dropout: 0.}\n feed_dict[model.pos] = batch[2]\n hit, ndcg, n_target = sess.run([model.hit_at_k, model.ndcg_at_k, model.num_target], feed_dict=feed_dict)\n count += n_target\n total_hit_k += hit\n total_ndcg_k += ndcg\n\n val_hit = total_hit_k / count \n val_ndcg = total_ndcg_k / count\n\n return [val_hit, val_ndcg]\n\ndef main():\n global lr\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n init = tf.global_variables_initializer()\n sess.run(init)\n all_val_hit = [-1]\n early_stop_cn = 0\n step_count = 0\n train_loss_l = 0.\n start_time = time.time()\n print('Start training...')\n try:\n while True:\n cur_batch = train_sampler.next_batch()\n inp = np.array(cur_batch[1])\n feed_dict = {model.inp: inp, model.lr: lr, model.dropout: args.dropout}\n feed_dict[model.pos] = np.array(cur_batch[2])\n feed_dict[model.neg] = np.array(cur_batch[3])\n _, train_loss = sess.run([model.train_op, model.loss], feed_dict=feed_dict)\n train_loss_l += train_loss\n step_count += 1\n if step_count % args.log_interval == 0:\n cur_loss = train_loss_l / args.log_interval\n elapsed = time.time() - start_time\n print('| Totol step {:10d} | lr {:02.5f} | ms/batch {:5.2f} | loss {:5.3f}'.format(\n step_count, lr, elapsed * 1000 / args.log_interval, cur_loss))\n sys.stdout.flush()\n train_loss_l = 0.\n start_time = time.time()\n\n if step_count % args.eval_interval == 0:\n val_hit, val_ndcg = evaluate(val_data, sess)\n all_val_hit.append(val_hit)\n print('-' * 90)\n print('| End of step {:10d} | valid hit@20 {:8.5f} | valid ndcg@20 {:8.5f}'.format(\n step_count, val_hit, val_ndcg))\n print('=' * 90)\n sys.stdout.flush()\n\n if all_val_hit[-1] <= all_val_hit[-2]:\n lr /= 2.\n lr = max(lr, 1e-6)\n early_stop_cn += 1\n else:\n early_stop_cn = 0\n model.saver.save(sess, checkpoint_dir + '/model.ckpt')\n if early_stop_cn == 3:\n print('Validation hit decreases in three consecutive epochs. Stop Training!')\n sys.stdout.flush()\n break\n start_time = time.time()\n except Exception as e:\n print(str(e))\n train_sampler.close()\n exit(1)\n train_sampler.close()\n print('Done')\n\nif __name__ == '__main__':\n if not os.path.exists(checkpoint_dir):\n os.mkdir(checkpoint_dir)\n main()\n"
] |
[
[
"tensorflow.ConfigProto",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.set_random_seed",
"numpy.array"
]
] |
lionfish0/hypercuboid_integrator
|
[
"beabf2814cbe0020f3c6a52c53f0be720f868de8"
] |
[
"hypercuboid_integrator/__init__.py"
] |
[
"import numpy as np\n#import numba as nb\n\n#@nb.jit(nb.typeof(([[1.0,1.0],[1.0,1.0]],[True,False]))(nb.double,nb.double,nb.double,nb.double))\ndef split1d(Astart,Aend,Bstart,Bend):\n \"\"\"For a 1-d pair of lines A and B:\n given start and end locations,\n produce new set of points for A, split by B.\n \n For example:\n split1d(1,9,3,5)\n splits the line from 1 to 9 into 3 pieces,\n 1 to 3\n 3 to 5\n and\n 5 to 9\n it returns these three pairs and a list of whether\n those points were inside B.\n In this case the list is [False,True,False]\n \"\"\"\n #five options\n #1 A and B don't intersect. This shouldn't happen\n if (Astart>=Bend) or (Bstart>=Aend):\n #Not sure what to do\n assert False\n #2 B starts first, and ends inside A:\n if (Astart>=Bstart) and (Bend<Aend):\n #return Astart-Bend Bend-Aend\n return [[Astart,Bend],[Bend,Aend]], [True, False]\n #3 B extends beyond limits of A in both directions:\n if (Bstart<=Astart) and (Bend>=Aend):\n #return A\n return [[Astart,Aend]], [True]\n #4 B starts in A and finishes after A\n if (Astart<Bstart) and (Bend>=Aend):\n #return Astart-Bstart Bstart-Aend\n return [[Astart,Bstart],[Bstart,Aend]], [False,True]\n #5 B is inside A\n if (Astart<Bstart) and (Bend<Aend):\n #return Astart-Bstart Bstart-Bend Bend-Aend\n return [[Astart,Bstart],[Bstart,Bend],[Bend,Aend]], [False,True,False]\n\n#@nb.jit(nb.typeof(([[1.0,1.0],[1.0,1.0]],[True,False]))(nb.double,nb.double,nb.double,nb.double))\ndef splitslice(Astart,Aend,Bstart,Bend,d):\n \"\"\"given start and end locations, produce new set of points for A split by B\n just in dimension d.\"\"\"\n chunks, inside = split1d(Astart[d],Aend[d],Bstart[d],Bend[d])\n res = []\n for chunk in chunks:\n newstart = Astart.copy()\n newend = Aend.copy()\n newstart[d] = chunk[0]\n newend[d] = chunk[1]\n res.append([newstart,newend])\n res\n return res, inside\n\ndef split(Astart,Aend,Bstart,Bend):\n \"\"\"Given a hypercuboid, with the starting corner at Astart\n and ending corner at Aend. We want to split this hypercuboid\n into smaller hypercuboids, such that the edges of hypercubiod\n B (defined by Bstart and Bend) do not intersect any of the\n new hypercuboid edges.\n \n For example: split a hypercube, A, that extends from [0,1] to [2,3]\n with another, B, that extends from [1,1],[3,2]:\n \n split([0,1],[2,3],[1,1],[3,2])\n \n \n | ___ | | ___c\n ||A | | B___ ||a|_|\n ||___| slice with | |___| gives ||_|_|b\n |_______ |______ |_______\n 0 1 2\n results in three cuboids at:\n a [[0, 1], [1, 3]]\n b [[1, 1], [2, 2]]\n c [[1, 2], [2, 3]]\n \"\"\"\n \n splits = [[Astart,Aend]]\n \n for d in range(len(Astart)):\n newsplits = []\n insides = []\n for s in splits:\n if np.all(s[1]>Bstart) and np.all(Bend>s[0]):\n splitslices, inside = splitslice(s[0],s[1],Bstart,Bend,d)\n newsplits.extend(splitslices)\n insides.extend(inside)\n else:\n newsplits.extend([s])\n insides.append(False)\n splits = newsplits\n return splits, insides\n\ndef sumovercuboids(inputB, inputpeakgradslist,d):\n \"\"\"\n Given a list of hypercuboids (inputB) and their gradients (inputpeakgradslist), what are the integrals over\n the d dimension of them?\n \n For example:\n \n 4 ______________\n 3 | | 4 | |\n 2 | 1 |____| 2 |\n 1 | | 3 | |\n 0 |____|____|____|\n 0 1 2 3 4 5 6\n \n - integrating along the (0) x-axis has two results, one between 0 and 2 (2+6+4=12), one between 2 and 4 (2+8+4=14).\n - integrating along the (1) y-axis has three results:\n - one between 0 and 2 of 4\n - one between 2 and 4 of 8+6 = 12\n - one between 4 and 6 of 8\n \n Demonstrating this with the method.\n the inputB array is an array of arrays, each one of the smaller arrays has the start and end locations\n of the cuboid for each dimension, for example a 4d hypercube from the origin to location (2,2,2,2) would be:\n np.array([[0,2],[0,2],[0,2],[0,2]])\n below we describe our space with a series of rectangles (as drawn above):\n \n inputB = np.array([np.array([[0,2],[0,6]]),np.array([[4,6],[0,6]]),np.array([[2,4],[0,3]]),np.array([[2,4],[3,6]])])\n inputpeakgradslist = np.array([[1],[2],[3],[4]])\n \n seglist = sumovercuboids(inputB,inputpeakgradslist,0)\n \n If we integrate along dimension 0:\n [{'grad': 0, 'int': 12, 'patch': [array([0]), array([3])]},\n {'grad': 0, 'int': 14, 'patch': [array([3]), array([6])]}]\n \n If we integrate along dimension 1, then we have three results\n [{'grad': 0, 'int': 6, 'patch': [array([0]), array([2])]},\n {'grad': 0, 'int': 21, 'patch': [array([2]), array([4])]},\n {'grad': 0, 'int': 12, 'patch': [array([4]), array([6])]}]\n \n \n A more complex example:\n inputB = np.array([np.array([[0,6],[0,4],[0,1]]),np.array([[0,2],[0,4],[1,3]]),np.array([[0,2],[0,4],[3,4]]),\n np.array([[2,6],[0,4],[1,2]]),np.array([[2,4],[0,4],[2,4]]),np.array([[4,6],[0,2],[2,4]]),\n np.array([[4,6],[2,4],[2,4]])])\n inputpeakgradslist = np.array([[2],[3],[1],[4],[2],[3],[5]])\n \n [{'grad': 0, 'int': 12, 'patch': [array([0, 0]), array([4, 1])]},\n {'grad': 0, 'int': 22, 'patch': [array([0, 1]), array([4, 2])]},\n {'grad': 0, 'int': 16, 'patch': [array([0, 2]), array([2, 3])]},\n {'grad': 0, 'int': 20, 'patch': [array([2, 2]), array([4, 3])]},\n {'grad': 0, 'int': 12, 'patch': [array([0, 3]), array([2, 4])]},\n {'grad': 0, 'int': 16, 'patch': [array([2, 3]), array([4, 4])]}]\n \"\"\"\n \n #swap dimensions so we use the dimension specified as dimension zero - to make following code easier\n #(we always then integrate along the zeroth dimension)\n permutedB = inputB.copy() #make a copy so we don't screw up the copy given to us.\n temp = permutedB[:,d,:].copy()\n permutedB[:,d,:] = permutedB[:,0,:]\n permutedB[:,0,:] = temp\n \n #we need to add a final infinitely thin cuboid at the end\n #of the dimension we're summing over to tally up all the\n #results. This final cuboid has a gradient of zero, although\n #this doesn't really matter.\n outerbox = []\n for dim in range(permutedB.shape[1]):\n outerbox.append([np.min(permutedB[:,dim,0]),np.max(permutedB[:,dim,1])])\n outerbox = np.array(outerbox)\n Bendcuboid = outerbox.copy()\n Bendcuboid[0,:] = Bendcuboid[0,-1]\n B = np.r_[permutedB,[Bendcuboid]]\n peakgradslist = np.r_[inputpeakgradslist,np.array([[0]])]\n initialpatch = np.delete(outerbox,0,0).T\n seglist = []\n seglist.append({'patch':initialpatch,'grad':0,'int':0})\n laststart = 0\n orderbystart = np.argsort([b[0,0] for b in B])\n for b,p in zip(B[orderbystart],peakgradslist[orderbystart,0]):\n newlist = []\n for segdata in seglist:\n seg = segdata['patch']\n oldint = segdata['int']\n oldgrad = segdata['grad']\n delta = b[0,0]-laststart\n newint = oldint+delta*oldgrad\n newsegs, insides = split(seg[0],seg[1],b[1:,0],b[1:,1])\n for s,inside in zip(newsegs,insides):\n if inside:\n grad = p\n else:\n grad = oldgrad\n newlist.append({'patch':s,'grad':grad,'int':newint})\n laststart = b[0,0]\n seglist = newlist\n return seglist\n"
] |
[
[
"numpy.min",
"numpy.all",
"numpy.max",
"numpy.delete",
"numpy.argsort",
"numpy.array"
]
] |
soumye/recalltraces
|
[
"3d6af8692faf4bd226dafdc02c22cf0a22e5d640"
] |
[
"a2c_agent.py"
] |
[
"import numpy as np\nimport ipdb\nimport torch\nfrom models import Net\nfrom datetime import datetime\nfrom utils import select_actions, evaluate_actions, discount_with_dones\nimport os\nfrom sil_module import sil_module\nfrom bw_module import bw_module\nimport copy\n\nclass a2c_agent:\n def __init__(self, envs, args):\n self.envs = envs\n self.args = args\n # define the network. Gives V(s) and π(a|S)\n self.net = Net(self.envs.action_space.n)\n if self.args.cuda:\n self.net.cuda()\n # define the optimizer\n self.optimizer = torch.optim.RMSprop(self.net.parameters(), lr=self.args.lr, eps=self.args.eps, alpha=self.args.alpha)\n if not os.path.exists(self.args.save_dir):\n os.mkdir(self.args.save_dir)\n # check the saved path for envs..\n self.model_path = self.args.save_dir + self.args.env_name + '/'\n if not os.path.exists(self.model_path):\n os.mkdir(self.model_path)\n # get the obs..\n # the shape of the observation batch : 80x84x84x4. But we feed into the NNS as 4x84x84\n self.batch_ob_shape = (self.args.num_processes * self.args.nsteps,) + self.envs.observation_space.shape\n # Initialize observation seen by the environment. Dim : # of processes(16) X observation_space.shape(84x84x4)\n self.obs = np.zeros((self.args.num_processes,) + self.envs.observation_space.shape, dtype=self.envs.observation_space.dtype.name)\n # env already encapsulated the multiple processes\n self.obs[:] = self.envs.reset()\n # track completed processes\n self.dones = [False for _ in range(self.args.num_processes)]\n\n def learn(self):\n \"\"\"\n Train the Agent.\n \"\"\"\n if self.args.model_type == 'sil':\n sil_model = sil_module(self.net, self.args, self.optimizer)\n elif self.args.model_type == 'bw':\n bw_model = bw_module(self.net, self.args, self.optimizer, self.envs.action_space.n, self.envs.observation_space.shape)\n num_updates = self.args.total_frames // (self.args.num_processes * self.args.nsteps)\n # get the reward to calculate other information\n episode_rewards = torch.zeros([self.args.num_processes, 1])\n final_rewards = torch.zeros([self.args.num_processes, 1])\n # start to update\n for update in range(num_updates):\n # mb_obs, mb_rewards, mb_actions, mb_dones, mb_obs_next = [], [], [], [], []\n mb_obs, mb_rewards, mb_actions, mb_dones = [], [], [], []\n for step in range(self.args.nsteps):\n # Executing the action after seeing the observation\n with torch.no_grad():\n input_tensor = self._get_tensors(self.obs)\n _, pi = self.net(input_tensor)\n # select actions\n actions = select_actions(pi)\n cpu_actions = actions.squeeze(1).cpu().numpy()\n # step in gym batched environment\n # print(\"step in env\")\n obs, rewards, dones, _ = self.envs.step(cpu_actions)\n # print(\"end in env\")\n # start to store the information\n mb_obs.append(np.copy(self.obs))\n mb_actions.append(cpu_actions)\n mb_dones.append(self.dones)\n # process rewards...\n raw_rewards = copy.deepcopy(rewards)\n rewards = np.sign(rewards)\n # start to store the rewards\n mb_rewards.append(rewards)\n self.dones = dones\n for n, done in enumerate(dones):\n if done:\n self.obs[n] = self.obs[n]*0\n self.obs = obs\n\n if self.args.model_type == 'sil':\n # Update the Buffers after doing the step\n sil_model.step(input_tensor.detach().cpu().numpy(), cpu_actions, raw_rewards, dones)\n elif self.args.model_type == 'bw':\n obs_next = self._get_tensors(self.obs).detach().cpu().numpy()\n bw_model.step(input_tensor.detach().cpu().numpy(), cpu_actions, raw_rewards, dones, obs_next)\n\n raw_rewards = torch.from_numpy(np.expand_dims(np.stack(raw_rewards), 1)).float()\n episode_rewards += raw_rewards\n # get the masks\n masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in dones])\n final_rewards *= masks\n final_rewards += (1 - masks) * episode_rewards\n episode_rewards *= masks\n # update the obs\n mb_dones.append(self.dones)\n # process the rollouts\n # 5x16xobs_shape to 80 x obs_shape\n mb_obs = np.asarray(mb_obs, dtype=np.uint8).swapaxes(1, 0).reshape(self.batch_ob_shape)\n # mb_obs_next = np.asarray(mb_obs_next, dtype=np.uint8).swapaxes(1, 0).reshape(self.batch_ob_shape)\n # 5x16 To 16x5\n mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0)\n mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1, 0)\n mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0)\n mb_masks = mb_dones[:, :-1]\n mb_dones = mb_dones[:, 1:]\n with torch.no_grad():\n input_tensor = self._get_tensors(self.obs)\n last_values, _ = self.net(input_tensor)\n # compute returns via 5-step lookahead.\n for n, (rewards, done_, value) in enumerate(zip(mb_rewards, mb_dones, last_values.detach().cpu().numpy().squeeze())):\n rewards = rewards.tolist()\n done_ = done_.tolist()\n if done_[-1] == 0:\n # Passed in [value] for the estimated V(curr_obs) in TD Learning\n rewards = discount_with_dones(rewards+[value], done_ + [0], self.args.gamma)[:-1]\n else:\n rewards = discount_with_dones(rewards, done_, self.args.gamma)\n mb_rewards[n] = rewards\n # Convert 16 x 5 points to 80 flat points\n mb_rewards = mb_rewards.flatten()\n mb_actions = mb_actions.flatten()\n # start to update network. Doing A2C Update\n # print(\"doing a2c update\")\n vl, al, ent = self._update_network(mb_obs, mb_rewards, mb_actions)\n\n # start to update the sil_module or backtracking model\n if self.args.model_type == 'sil':\n mean_adv, num_samples = sil_model.train_sil_model()\n elif self.args.model_type == 'bw':\n # print(\"train bw model\")\n l_actgen, l_stategen = bw_model.train_bw_model(update)\n # print(\"train imitation\")\n l_imi = bw_model.train_imitation(update)\n\n if update % self.args.log_interval == 0:\n if self.args.model_type == 'sil':\n print('[{}] Update: {}/{}, Frames: {}, Rewards: {:.2f}, VL: {:.3f}, PL: {:.3f},' \\\n 'Ent: {:.2f}, Min: {}, Max:{}, BR:{}, E:{}, VS:{}, S:{}'.format(\\\n datetime.now(), update, num_updates, (update+1)*(self.args.num_processes * self.args.nsteps),\\\n final_rewards.mean(), vl, al, ent, final_rewards.min(), final_rewards.max(), sil_model.get_best_reward(), \\\n sil_model.num_episodes(), num_samples, sil_model.num_steps()))\n elif (self.args.model_type == 'bw') and (l_actgen and l_stategen and l_imi) is not None :\n print('[{}] Update: {}/{}, Frames: {}, Rewards: {:.2f}, VL: {:.4f}, PL: {:.4f},' \\\n 'Ent: {:.2f}, Min: {}, Max:{}, BR:{}, E:{}, S:{}, AG:{:.4f} , SG:{:.4f}, IMI:{:.4f}'.format(\\\n datetime.now(), update, num_updates, (update+1)*(self.args.num_processes * self.args.nsteps),\\\n final_rewards.mean(), vl, al, ent, final_rewards.min(), final_rewards.max(), bw_model.get_best_reward(), \\\n bw_model.num_episodes(), bw_model.num_steps(), l_actgen, l_stategen, l_imi))\n else:\n print('[{}] Update: {}/{}, Frames: {}, Rewards: {:.2f}, VL: {:.3f}, PL: {:.3f},' \\\n 'Ent: {:.2f}, Min: {}, Max:{}'.format(\\\n datetime.now(), update, num_updates, (update+1)*(self.args.num_processes * self.args.nsteps),\\\n final_rewards.mean(), vl, al, ent, final_rewards.min(), final_rewards.max()))\n torch.save(self.net.state_dict(), self.model_path + 'model.pt')\n\n def _update_network(self, obs, returns, actions):\n \"\"\"\n Learning the Policy Network using Entropy Regularized A2C.\n \"\"\"\n # evaluate the actions\n input_tensor = self._get_tensors(obs)\n values, pi = self.net(input_tensor)\n # define the tensor of actions, returns\n # convert to 2D tensor of 80x1\n returns = torch.tensor(returns, dtype=torch.float32).unsqueeze(1)\n actions = torch.tensor(actions, dtype=torch.int64).unsqueeze(1)\n if self.args.cuda:\n returns = returns.cuda()\n actions = actions.cuda()\n # evaluate actions\n action_log_probs, dist_entropy = evaluate_actions(pi, actions)\n # calculate advantages...\n advantages = returns - values\n # get the value loss\n value_loss = advantages.pow(2).mean()\n # get the action loss. We detach advantages to reduce to standard PG form upon diff\n action_loss = -(advantages.detach() * action_log_probs).mean()\n # total loss\n total_loss = action_loss + self.args.value_loss_coef * value_loss - self.args.entropy_coef * dist_entropy\n # start to update\n self.optimizer.zero_grad()\n total_loss.backward()\n torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.max_grad_norm)\n self.optimizer.step()\n return value_loss.item(), action_loss.item(), dist_entropy.item()\n\n def _get_tensors(self, obs):\n \"\"\"\n Get the input tensors...\n \"\"\"\n input_tensor = torch.tensor(np.transpose(obs, (0, 3, 1, 2)), dtype=torch.float32)\n if self.args.cuda:\n input_tensor = input_tensor.cuda()\n return input_tensor\n"
] |
[
[
"torch.zeros",
"numpy.asarray",
"numpy.stack",
"torch.tensor",
"numpy.sign",
"numpy.copy",
"torch.FloatTensor",
"torch.no_grad",
"numpy.transpose",
"numpy.zeros"
]
] |
ganileni/py_falcon
|
[
"a7e187c0140076defee1e89c678ffc96c032c4f8"
] |
[
"falcon/test/test_autoEngineShutdown.py"
] |
[
"from unittest import TestCase\nimport numpy as np\n\n# TODO -- generalize to a MATLAB function that is not FALCON\nFALCON_DIR = '../'\ntest_matrix = np.loadtxt(FALCON_DIR + 'test.csv', delimiter=',')\n\n\nclass TestAutoEngineShutdown(TestCase):\n def test_working(self):\n import falcon\n # default engine should be off\n self.assertFalse(falcon.default_engine.is_started)\n from falcon import AutoEngineShutdown, nested_test\n with AutoEngineShutdown():\n # should still be off\n self.assertFalse(falcon.default_engine.is_started)\n result = nested_test(test_matrix)\n # call should have turned engine on\n self.assertTrue(falcon.default_engine.is_started)\n # __exit__ should have turned engine off\n self.assertFalse(falcon.default_engine.is_started)\n\n def test_noconflict(self):\n \"\"\"See if the manual working mode creates conflicts.\"\"\"\n import falcon\n # default engine should be off\n self.assertFalse(falcon.default_engine.is_started)\n from falcon import AutoEngineShutdown, nested_test\n result = nested_test(test_matrix)\n # engine should be on\n self.assertTrue(falcon.default_engine.is_started)\n falcon.default_engine.shutdown_engine()\n # engine should be off again\n self.assertFalse(falcon.default_engine.is_started)\n with AutoEngineShutdown():\n # should still be off\n self.assertFalse(falcon.default_engine.is_started)\n result = nested_test(test_matrix)\n # call should have turned engine on\n self.assertTrue(falcon.default_engine.is_started)\n # __exit__ should have turned engine off\n self.assertFalse(falcon.default_engine.is_started)\n"
] |
[
[
"numpy.loadtxt"
]
] |
anhquannguyen21/Image-Processing
|
[
"63ad3f475f553bd9283b287e9b3772f41f493c79"
] |
[
"Source/LaplaceFilter.py"
] |
[
"import numpy as np\r\nimport cv2\r\n\r\nimg = cv2.imread('images/butterfly.jpg', cv2.IMREAD_GRAYSCALE)\r\nimg_out = img.copy()\r\n\r\nheight = img.shape[0]\r\nwidth = img.shape[1]\r\n\r\nlaplace = (1.0/16) * np.array(\r\n [[0, 0, -1, 0, 0],\r\n [0, -1, -2, -1, 0],\r\n [-1, -2, 16, -2, -1],\r\n [0, -1, -2, -1, 0],\r\n [0, 0, -1, 0, 0]])\r\nsum(sum(laplace))\r\n\r\nfor i in np.arange(2, height-2):\r\n for j in np.arange(2, width-2):\r\n sum = 0\r\n for k in np.arange(-2, 3):\r\n for l in np.arange(-2, 3):\r\n a = img.item(i+k, j+l)\r\n w = laplace[2+k, 2+l]\r\n sum = sum + (w * a)\r\n b = sum\r\n img_out.itemset((i,j), b)\r\n\r\ncv2.imwrite('images/filter_laplace.jpg', img_out)\r\n\r\ncv2.imshow('image',img_out)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()"
] |
[
[
"numpy.arange",
"numpy.array"
]
] |
MindMantraSIH/paathshaala
|
[
"28fcee05f49e7b5dec734d6b9c46a5630e687c5d"
] |
[
"Analytics/dashboard.py"
] |
[
"import pandas as pd\nimport numpy as np\nimport seaborn as sns\nfrom matplotlib import pyplot as plt\n\ndef analytics(request):\n df = pd.read_csv('/content/Parents Feedback (Responses) - Form Responses 1.csv')\n anx = df[df['Anxiety and pressure felt by students during exams'] > 3]\n number_of_anxious = anx['Anxiety and pressure felt by students during exams'].count()\n soc = df[df['How socially active are your children ?'] < 3]\n nuumber_of_soc_active = soc['How socially active are your children ?'].count()\n self_conf = df[df['How self confident are your children ?'] < 3]\n number_of_low_confid = self_conf['How self confident are your children ?'].count()\n sub_col = ['How satisfied are you with school ', 'Schools emphasis on practical learning',\n 'Curriculums emphasis on life and social skills']\n box_df = df['How satisfied are you with school ']\n\n to_plot = {'Number of Anxious Students': number_of_anxious, 'Less Socially Active': nuumber_of_soc_active,\n 'Less Confident': number_of_low_confid}\n labels = []\n sizes = []\n for x, y in to_plot.items():\n labels.append(x)\n sizes.append(y)\n fig = plt.figure(figsize=(28, 20))\n ax1 = plt.subplot2grid((2, 2), (0, 0))\n box_df.plot.box()\n ax1 = plt.subplot2grid((2, 2), (0, 1))\n plt.pie(sizes, labels=labels)\n plt.legend(loc='upper left')\n ax1 = plt.subplot2grid((2, 2), (1, 0))\n sns.countplot(df['Steps taken to spread awareness about mental health'])\n ax1 = plt.subplot2grid((2, 2), (1, 1))\n sns.countplot(df['Steps taken to spread awareness about Physical health'])\n plt.show()"
] |
[
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.pie",
"matplotlib.pyplot.subplot2grid",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
RaghavaDhanya/SimSwap
|
[
"9dc80e258426bbf995d2afe1ee6ea864b8f4b4ea"
] |
[
"util/videoswap_specific.py"
] |
[
"import os \nimport cv2\nimport glob\nimport torch\nimport shutil\nimport numpy as np\nfrom tqdm import tqdm\nfrom util.reverse2original import reverse2wholeimage\nimport moviepy.editor as mp\nfrom moviepy.editor import AudioFileClip, VideoFileClip \nfrom moviepy.video.io.ImageSequenceClip import ImageSequenceClip\nimport time\nfrom util.add_watermark import watermark_image\nfrom util.norm import SpecificNorm\nimport torch.nn.functional as F\n\ndef _totensor(array):\n tensor = torch.from_numpy(array)\n img = tensor.transpose(0, 1).transpose(0, 2).contiguous()\n return img.float().div(255)\n\ndef video_swap(video_path, id_vetor,specific_person_id_nonorm,id_thres, swap_model, detect_model, save_path, temp_results_dir='./temp_results', crop_size=224, no_simswaplogo = False):\n video_forcheck = VideoFileClip(video_path)\n if video_forcheck.audio is None:\n no_audio = True\n else:\n no_audio = False\n\n del video_forcheck\n\n if not no_audio:\n video_audio_clip = AudioFileClip(video_path)\n\n video = cv2.VideoCapture(video_path)\n logoclass = watermark_image('./simswaplogo/simswaplogo.png')\n ret = True\n frame_index = 0\n\n frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))\n\n # video_WIDTH = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))\n\n # video_HEIGHT = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))\n \n fps = video.get(cv2.CAP_PROP_FPS)\n if os.path.exists(temp_results_dir):\n shutil.rmtree(temp_results_dir)\n\n spNorm =SpecificNorm()\n mse = torch.nn.MSELoss().cuda()\n\n # while ret:\n for frame_index in tqdm(range(frame_count)): \n ret, frame = video.read()\n if ret:\n detect_results = detect_model.get(frame,crop_size)\n\n if detect_results is not None:\n # print(frame_index)\n if not os.path.exists(temp_results_dir):\n os.mkdir(temp_results_dir)\n frame_align_crop_list = detect_results[0]\n frame_mat_list = detect_results[1]\n\n id_compare_values = [] \n frame_align_crop_tenor_list = []\n for frame_align_crop in frame_align_crop_list:\n\n # BGR TO RGB\n # frame_align_crop_RGB = frame_align_crop[...,::-1]\n\n frame_align_crop_tenor = _totensor(cv2.cvtColor(frame_align_crop,cv2.COLOR_BGR2RGB))[None,...].cuda()\n\n frame_align_crop_tenor_arcnorm = spNorm(frame_align_crop_tenor)\n frame_align_crop_tenor_arcnorm_downsample = F.interpolate(frame_align_crop_tenor_arcnorm, scale_factor=0.5)\n frame_align_crop_crop_id_nonorm = swap_model.netArc(frame_align_crop_tenor_arcnorm_downsample)\n\n id_compare_values.append(mse(frame_align_crop_crop_id_nonorm,specific_person_id_nonorm).detach().cpu().numpy())\n frame_align_crop_tenor_list.append(frame_align_crop_tenor)\n id_compare_values_array = np.array(id_compare_values)\n min_index = np.argmin(id_compare_values_array)\n min_value = id_compare_values_array[min_index]\n if min_value < id_thres:\n swap_result = swap_model(None, frame_align_crop_tenor_list[min_index], id_vetor, None, True)[0]\n \n reverse2wholeimage([swap_result], [frame_mat_list[min_index]], crop_size, frame, logoclass,os.path.join(temp_results_dir, 'frame_{:0>7d}.jpg'.format(frame_index)),no_simswaplogo)\n else:\n if not os.path.exists(temp_results_dir):\n os.mkdir(temp_results_dir)\n frame = frame.astype(np.uint8)\n if not no_simswaplogo:\n frame = logoclass.apply_frames(frame)\n cv2.imwrite(os.path.join(temp_results_dir, 'frame_{:0>7d}.jpg'.format(frame_index)), frame)\n\n else:\n if not os.path.exists(temp_results_dir):\n os.mkdir(temp_results_dir)\n frame = frame.astype(np.uint8)\n if not no_simswaplogo:\n frame = logoclass.apply_frames(frame)\n cv2.imwrite(os.path.join(temp_results_dir, 'frame_{:0>7d}.jpg'.format(frame_index)), frame)\n else:\n break\n\n video.release()\n\n # image_filename_list = []\n path = os.path.join(temp_results_dir,'*.jpg')\n image_filenames = sorted(glob.glob(path))\n\n clips = ImageSequenceClip(image_filenames,fps = fps)\n\n if not no_audio:\n clips = clips.set_audio(video_audio_clip)\n\n\n clips.write_videofile(save_path)\n\n"
] |
[
[
"torch.from_numpy",
"numpy.argmin",
"torch.nn.functional.interpolate",
"numpy.array",
"torch.nn.MSELoss"
]
] |
diixo/examples
|
[
"40ce1758fd13929a5f741860a9f50f0a84309b6c"
] |
[
"courses/udacity_deep_learning/2_fullyconnected_hidden_adam.py"
] |
[
"# These are all the modules we'll be using later. Make sure you can import them\n# before proceeding further.\nfrom __future__ import print_function\nimport numpy as np\nimport tensorflow as tf\nfrom six.moves import cPickle as pickle\nfrom six.moves import range\n\npickle_file = 'notMNIST.pickle'\n\nwith open(pickle_file, 'rb') as f:\n save = pickle.load(f, encoding='latin1')\n train_dataset = save['train_dataset']\n train_labels = save['train_labels']\n valid_dataset = save['valid_dataset']\n valid_labels = save['valid_labels']\n test_dataset = save['test_dataset']\n test_labels = save['test_labels']\n\n #train_dataset = train_dataset > 0.1\n #train_dataset = train_dataset.astype(float)\n #valid_dataset = valid_dataset > 0.1\n #valid_dataset = valid_dataset.astype(float)\n\n del save # hint to help gc free up memory\n print('Training set', train_dataset.shape, train_labels.shape)\n print('Validation set', valid_dataset.shape, valid_labels.shape)\n print('Test set', test_dataset.shape, test_labels.shape)\n\nimage_size = 28\nnum_labels = 10\n\ndef reformat(dataset, labels):\n dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)\n # Map 0 to [1.0, 0.0, 0.0 ...], 1 to [0.0, 1.0, 0.0 ...]\n labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)\n return dataset, labels\n\ntrain_dataset, train_labels = reformat(train_dataset, train_labels)\nvalid_dataset, valid_labels = reformat(valid_dataset, valid_labels)\ntest_dataset, test_labels = reformat(test_dataset, test_labels)\nprint('Training set', train_dataset.shape, train_labels.shape)\nprint('Validation set', valid_dataset.shape, valid_labels.shape)\nprint('Test set', test_dataset.shape, test_labels.shape)\n\n# With gradient descent training, even this much data is prohibitive.\n# Subset the training data for faster turnaround.\ntrain_subset = 20000\n\nHIDDEN_NODES = 1024\nLEARNING_RATE = 0.0005\n\nprint('LearningRate:', LEARNING_RATE)\n\ngraph = tf.Graph()\nwith graph.as_default():\n # Input data.\n # Load the training, validation and test data into constants that are\n # attached to the graph.\n tf_train_dataset = tf.constant(train_dataset[:train_subset, :])\n tf_train_labels = tf.constant(train_labels[:train_subset])\n tf_valid_dataset = tf.constant(valid_dataset)\n tf_test_dataset = tf.constant(test_dataset)\n\n # Variables.\n # These are the parameters that we are going to be training. The weight\n # matrix will be initialized using random values following a (truncated)\n # normal distribution. The biases get initialized to zero.\n WEIGHTS = tf.Variable(tf.truncated_normal([HIDDEN_NODES, num_labels], stddev=0.05))\n BIASES = tf.Variable(tf.zeros([num_labels]))\n\n HIDDEN_WEIGHTS = tf.Variable(tf.truncated_normal([image_size * image_size, HIDDEN_NODES], stddev=0.05))\n HIDDEN_BIASES = tf.Variable(tf.zeros([HIDDEN_NODES]))\n\n \"\"\"\n Compute the logits WX + b and then apply D(S(WX + b), L) on them for the hidden layer\n The relu is applied on the hidden layer nodes only\n \"\"\"\n TRAIN_HIDDEN_RELU = tf.nn.relu(tf.matmul(tf_train_dataset, HIDDEN_WEIGHTS) + HIDDEN_BIASES)\n VALID_HIDDEN_RELU = tf.nn.relu(tf.matmul(tf_valid_dataset, HIDDEN_WEIGHTS) + HIDDEN_BIASES)\n TEST_HIDDEN_RELU = tf.nn.relu(tf.matmul(tf_test_dataset, HIDDEN_WEIGHTS) + HIDDEN_BIASES)\n\n # Training computation.\n # We multiply the inputs with the weight matrix, and add biases. We compute\n # the softmax and cross-entropy (it's one operation in TensorFlow, because\n # it's very common, and it can be optimized). We take the average of this\n # cross-entropy across all training examples: that's our loss.\n TRAIN_LOGITS = tf.matmul(TRAIN_HIDDEN_RELU, WEIGHTS) + BIASES\n VALID_LOGITS = tf.matmul(VALID_HIDDEN_RELU, WEIGHTS) + BIASES\n TEST_LOGITS = tf.matmul(TEST_HIDDEN_RELU, WEIGHTS) + BIASES\n\n loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf_train_labels, logits=TRAIN_LOGITS))\n\n # Optimizer.\n # We are going to find the minimum of this loss using gradient descent.\n optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE).minimize(loss)\n\n # Predictions for the training, validation, and test data.\n # These are not part of training, but merely here so that we can report\n # accuracy figures as we train.\n train_prediction = tf.nn.softmax(TRAIN_LOGITS)\n\n valid_prediction = tf.nn.softmax(VALID_LOGITS)\n test_prediction = tf.nn.softmax(TEST_LOGITS)\n\nnum_steps = 501\n\ndef accuracy(predictions, labels):\n return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))\n / predictions.shape[0])\n\nwith tf.Session(graph=graph) as session:\n # This is a one-time operation which ensures the parameters get initialized as\n # we described in the graph: random weights for the matrix, zeros for the\n # biases.\n tf.global_variables_initializer().run()\n print('Initialized')\n for step in range(num_steps):\n # Run the computations. We tell .run() that we want to run the optimizer,\n # and get the loss value and the training predictions returned as numpy\n # arrays.\n _, l, predictions = session.run([optimizer, loss, train_prediction])\n\n if (step % 100 == 0):\n print('Loss at step %d: %f' % (step, l))\n print('Training accuracy: %.1f%%' % accuracy(\n predictions, train_labels[:train_subset, :]))\n # Calling .eval() on valid_prediction is basically like calling run(), but\n # just to get that one numpy array. Note that it recomputes all its graph\n # dependencies.\n print('Validation accuracy: %.1f%%' % accuracy(valid_prediction.eval(), valid_labels))\n\n if (step % 100 == 0):\n print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))\n\n################################################################################\n"
] |
[
[
"tensorflow.Graph",
"tensorflow.nn.softmax",
"tensorflow.constant",
"tensorflow.truncated_normal",
"tensorflow.matmul",
"tensorflow.zeros",
"numpy.arange",
"tensorflow.global_variables_initializer",
"numpy.argmax",
"tensorflow.Session",
"tensorflow.nn.softmax_cross_entropy_with_logits_v2",
"tensorflow.train.AdamOptimizer"
]
] |
zjzh/sparseml
|
[
"b92df81257d47eb5aab731bc9929da7339f34667",
"b92df81257d47eb5aab731bc9929da7339f34667"
] |
[
"src/sparseml/pytorch/optim/modifier_quantization.py",
"src/sparseml/transformers/utils/export.py"
] |
[
"# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nModifier for models through quantization aware training.\n\nPyTorch version must support quantization (>=1.2, ONNX export support introduced in 1.7)\n\"\"\"\n\n\nfrom typing import Any, Dict, List, NamedTuple, Optional, Union\n\nfrom torch.nn import Module\nfrom torch.optim.optimizer import Optimizer\n\n\ntry:\n from torch import quantization as torch_quantization\n from torch.nn import intrinsic as torch_intrinsic\nexcept Exception:\n torch_quantization = None\n torch_intrinsic = None\n\nfrom sparseml.optim import ModifierProp\nfrom sparseml.pytorch.optim.modifier import PyTorchModifierYAML, ScheduledModifier\nfrom sparseml.pytorch.utils import BaseLogger\nfrom sparseml.pytorch.utils.quantization import (\n add_quant_dequant,\n configure_module_default_qconfigs,\n configure_module_qat_wrappers,\n fuse_module_conv_bn_relus,\n get_qat_qconfig,\n prepare_embeddings_qat,\n remove_activation_qat_by_layer_name,\n)\n\n\n__all__ = [\n \"QuantizationModifier\",\n]\n\n_ModuleToQuantize = NamedTuple(\n \"_ModuleToQuantize\", [(\"name\", Optional[str]), (\"module\", Module)]\n)\n\n\n@PyTorchModifierYAML()\nclass QuantizationModifier(ScheduledModifier):\n \"\"\"\n Enables quantization aware training (QAT) for a given module or its submodules\n After the start epoch, the specified module(s)' forward pass will emulate\n quantized execution and the modifier will be enabled until training is completed.\n\n | Sample yaml:\n | !QuantizationModifier\n | start_epoch: 0.0\n | submodules: ['blocks.0', 'blocks.2']\n | model_fuse_fn_name: 'fuse_module'\n | disable_quantization_observer_epoch: 2.0\n | freeze_bn_stats_epoch: 3.0\n | reduce_range: False\n\n :param start_epoch: The epoch to start the modifier at\n :param submodules: List of submodule names to perform QAT on. Leave None to quantize\n entire model. Default is None\n :param model_fuse_fn_name: Name of model function to fuse the model in place prior\n to performing QAT. Set as 'no_fuse' to skip module fusing. Leave None to use\n the default function `sparseml.pytorch.utils.fuse_module_conv_bn_relus`.\n Default is None\n :param disable_quantization_observer_epoch: Epoch to disable updates to the module's\n quantization observers. After this point, quantized weights and zero points will\n not be updated. Leave None to not disable observers during QAT. Default is None\n :param freeze_bn_stats_epoch: Epoch to stop the tracking of batch norm stats. Leave\n None to not stop tracking batch norm stats during QAT. Default is None\n :param end_epoch: Disabled, setting to anything other than -1 will raise an\n exception. For compatibility with YAML serialization only.\n :param model_fuse_fn_kwargs: dictionary of keyword argument values to be passed\n to the model fusing function\n :param quantize_embeddings: if True, will perform QAT on torch.nn.Embedding layers\n using sparseml.pytorch.utils.quantization.prepare_embeddings_qat to fake\n quantize embedding weights. Default is True. Models without embedding layers\n will be unaffected\n :param reduce_range: if True, the quantization range will be reduced by one bit.\n This may prevent overflow issues with model execution on certain hardware\n Default is False\n :param quantize_linear_activations: if False, FakeQuantize ops will not be run\n for activations of fully connected layers. this is important for quantizing\n transformer based models such as BERT where the quantized MatMul outputs\n are kept at 32 bits of precision and fake quantizing the outputs harm training\n recovery. Default is True\n \"\"\"\n\n def __init__(\n self,\n start_epoch: float = -1.0,\n submodules: Union[List[str], None] = None,\n model_fuse_fn_name: Union[str, None] = None,\n disable_quantization_observer_epoch: Union[float, None] = None,\n freeze_bn_stats_epoch: Union[float, None] = None,\n end_epoch: float = -1,\n model_fuse_fn_kwargs: Dict[str, Any] = None,\n quantize_embeddings: bool = True,\n reduce_range: bool = False,\n quantize_linear_activations: bool = True,\n ):\n if torch_quantization is None or torch_intrinsic is None:\n raise RuntimeError(\n \"Unable to import package torch.quantization and/or \"\n \"torch.nn.intrinsic. \"\n \"Try upgrading your PyTorch version to use the QuantizationModifier.\"\n )\n if end_epoch != -1:\n raise ValueError(\n \"end_epoch is disabled for QuantizationModifier and can only be set to\"\n \" -1. Given {}\".format(end_epoch)\n )\n\n super().__init__(start_epoch=start_epoch, end_epoch=-1.0, end_comparator=-1)\n\n self._start_epoch = start_epoch\n self._submodules = submodules\n self._model_fuse_fn_name = model_fuse_fn_name\n self._model_fuse_fn_kwargs = model_fuse_fn_kwargs or {}\n self._disable_quantization_observer_epoch = disable_quantization_observer_epoch\n self._freeze_bn_stats_epoch = freeze_bn_stats_epoch\n self._quantize_embeddings = quantize_embeddings\n self._reduce_range = reduce_range\n self._quantize_linear_activations = quantize_linear_activations\n\n self._modules_to_quantize = None\n self._qat_enabled = False\n self._quantization_observer_disabled = False\n self._bn_stats_frozen = False\n\n if (\n isinstance(self._model_fuse_fn_name, str)\n and self._model_fuse_fn_name.lower() == \"none\"\n ):\n self._model_fuse_fn_name = None\n if isinstance(self._submodules, list):\n self._submodules = set(self._submodules)\n\n self._validate_params()\n\n @ModifierProp()\n def submodules(self) -> Union[List[str], None]:\n \"\"\"\n :return: List of submodule names to perform QAT on. None quantizes the entire\n model\n \"\"\"\n return list(self._submodules) if self._submodules is not None else None\n\n @submodules.setter\n def submodules(self, value: Union[List[str], None]):\n \"\"\"\n :params value: List of submodule names to perform QAT on. Set None to quantize\n entire model\n \"\"\"\n self._submodules = value\n if isinstance(self._submodules, list):\n self._submodules = set(self._submodules)\n self._validate_params()\n\n @ModifierProp()\n def model_fuse_fn_name(self) -> Union[str, None]:\n \"\"\"\n :return: Name of model function to fuse the model in place prior\n to performing QAT. None to uses the default function\n `sparseml.pytorch.utils.fuse_module_conv_bn_relus`.\n \"\"\"\n return self._model_fuse_fn_name\n\n @model_fuse_fn_name.setter\n def model_fuse_fn_name(self, value: Union[str, None]):\n \"\"\"\n :params value: Name of model function to fuse the model in place prior\n to performing QAT. Set None to use the default function\n `sparseml.pytorch.utils.fuse_module_conv_bn_relus`. Set as 'no_fuse'\n to skip module fusing.\n \"\"\"\n self._model_fuse_fn_name = value\n if (\n isinstance(self._model_fuse_fn_name, str)\n and self._model_fuse_fn_name.lower() == \"none\"\n ):\n self._model_fuse_fn_name = None\n self._validate_params()\n\n @ModifierProp()\n def disable_quantization_observer_epoch(self) -> Union[float, None]:\n \"\"\"\n :return: Epoch to disable updates to the module's\n quantization observers. After this point, quantized weights and zero points\n will not be updated. When None, observers never disabled during QAT\n \"\"\"\n return self._disable_quantization_observer_epoch\n\n @disable_quantization_observer_epoch.setter\n def disable_quantization_observer_epoch(self, value: Union[float, None]):\n \"\"\"\n :params value: Epoch to disable updates to the module's\n quantization observers. After this point, quantized weights and zero points\n will not be updated. Set None to not disable observers during QAT\n \"\"\"\n self._disable_quantization_observer_epoch = value\n self._validate_params()\n\n @ModifierProp()\n def freeze_bn_stats_epoch(self) -> Union[float, None]:\n \"\"\"\n :return: Epoch to stop the tracking of batch norm stats. When\n None, batch norm stats are track for all of training\n \"\"\"\n return self._freeze_bn_stats_epoch\n\n @freeze_bn_stats_epoch.setter\n def freeze_bn_stats_epoch(self, value: Union[float, None]):\n \"\"\"\n :params value: Epoch to stop the tracking of batch norm stats. Set\n None to not stop tracking batch norm stats during QAT\n \"\"\"\n self._freeze_bn_stats_epoch = value\n self._validate_params()\n\n @ModifierProp()\n def quantize_embeddings(self) -> bool:\n \"\"\"\n :return: if True, will perform QAT on torch.nn.Embedding layers\n using sparseml.pytorch.utils.quantization.prepare_embeddings_qat to fake\n quantize embedding weights\n \"\"\"\n return self._quantize_embeddings\n\n @quantize_embeddings.setter\n def quantize_embeddings(self, value: bool):\n \"\"\"\n :params value: if True, will perform QAT on torch.nn.Embedding layers\n using sparseml.pytorch.utils.quantization.prepare_embeddings_qat to fake\n quantize embedding weights\n \"\"\"\n self._quantize_embeddings = value\n\n @ModifierProp()\n def reduce_range(self) -> bool:\n \"\"\"\n :return: if True, the quantization range will be reduced by one\n This may prevent overflow issues with model execution on certain hardware\n \"\"\"\n return self._reduce_range\n\n @ModifierProp()\n def quantize_linear_activations(self) -> bool:\n \"\"\"\n :return: if False, FakeQuantize ops will not be run\n for activations of fully connected layers. this is important for quantizing\n transformer based models such as BERT where the quantized MatMul outputs\n are kept at 32 bits of precision and fake quantizing the outputs harm\n training recovery\n \"\"\"\n return self._quantize_linear_activations\n\n def initialize(\n self,\n module: Module,\n epoch: float = 0,\n loggers: Optional[List[BaseLogger]] = None,\n **kwargs,\n ):\n \"\"\"\n Grab the module / submodule to perform QAT on\n\n :param module: the PyTorch model/module to modify\n :param epoch: The epoch to initialize the modifier and module at.\n Defaults to 0 (start of the training process)\n :param loggers: Optional list of loggers to log the modification process to\n :param kwargs: Optional kwargs to support specific arguments\n for individual modifiers.\n \"\"\"\n super().initialize(module, epoch, loggers, **kwargs)\n self._modules_to_quantize = []\n if self._submodules is not None:\n found_submodules = []\n for name, submodule in module.named_modules():\n if name in self._submodules:\n self._modules_to_quantize.append(_ModuleToQuantize(name, submodule))\n found_submodules.append(name)\n if not len(found_submodules) == len(self._submodules):\n raise RuntimeError(\n \"Could not find all provided submodules to quantize\"\n \"given: {}, found: {}\".format(\n list(self._submodules), found_submodules\n )\n )\n else:\n self._modules_to_quantize.append(_ModuleToQuantize(None, module))\n\n self._check_quantization_update(module, epoch, steps_per_epoch=0)\n\n def finalize(\n self, module: Optional[Module] = None, reset_loggers: bool = True, **kwargs\n ):\n \"\"\"\n Cleans up any state\n\n :param module: The model/module to finalize the modifier for.\n Marked optional so state can still be cleaned up on delete,\n but generally should always be passed in.\n :param reset_loggers: True to remove any currently attached loggers (default),\n False to keep the loggers attached.\n :param kwargs: Optional kwargs to support specific arguments\n for individual modifiers.\n \"\"\"\n super().finalize(module, reset_loggers, **kwargs)\n self._modules_to_quantize = None\n\n def update(\n self, module: Module, optimizer: Optimizer, epoch: float, steps_per_epoch: int\n ):\n \"\"\"\n If start_pending(), fuses the model, sets the model quantization config,\n calls torch.quantization.prepare_qat on the model to begin QAT\n If end_pending(), updates the modules layers params to their original\n trainable state.\n\n :param module: module to modify\n :param optimizer: optimizer to modify\n :param epoch: current epoch and progress within the current epoch\n :param steps_per_epoch: number of steps taken within each epoch\n (calculate batch number using this and epoch)\n \"\"\"\n super().update(module, optimizer, epoch, steps_per_epoch)\n self._check_quantization_update(module, epoch, steps_per_epoch)\n\n def update_ready(self, epoch: float, steps_per_epoch: int) -> bool:\n \"\"\"\n\n :param epoch: current epoch and progress within the current epoch\n :param steps_per_epoch: number of steps taken within each epoch\n (calculate batch number using this and epoch)\n :return: True if the modifier is pending an update and update() should be called\n \"\"\"\n if not self._initialized:\n raise RuntimeError(\"modifier must be initialized first\")\n\n if not self._enabled:\n return False\n\n pending = (\n self.start_pending(epoch, steps_per_epoch)\n or self._disable_quantization_observer_update_ready(epoch)\n or self._freeze_bn_stats_update_ready(epoch)\n )\n\n return pending\n\n def _check_quantization_update(\n self, module: Module, epoch: float, steps_per_epoch: int\n ):\n if self.start_pending(epoch, steps_per_epoch) and not self._qat_enabled:\n self._enable_module_qat(module)\n\n if self._disable_quantization_observer_update_ready(epoch):\n for _, quant_module in self._modules_to_quantize:\n quant_module.apply(torch_quantization.disable_observer)\n self._quantization_observer_disabled = True\n\n if self._freeze_bn_stats_update_ready(epoch):\n for _, quant_module in self._modules_to_quantize:\n quant_module.apply(torch_intrinsic.qat.freeze_bn_stats)\n self._bn_stats_frozen = True\n\n def _enable_module_qat(self, module: Module):\n # fuse module Conv-BNs\n if (\n self._model_fuse_fn_name is not None\n and self._model_fuse_fn_name != \"no_fuse\"\n ): # module class fn\n module_fuse_fn = getattr(module, self._model_fuse_fn_name, None)\n if module_fuse_fn is None or not callable(module_fuse_fn):\n raise ValueError(\n \"Invalid model_fuse_fn_name. \"\n \"Module has no callable function {}\".format(\n self._model_fuse_fn_name\n )\n )\n module_fuse_fn(**self._model_fuse_fn_kwargs)\n elif self._model_fuse_fn_name is None: # default auto fn\n self._model_fuse_fn_kwargs[\"inplace\"] = True\n fuse_module_conv_bn_relus(module, **self._model_fuse_fn_kwargs)\n\n # prepare each module / submodule for quantization\n qconfig = get_qat_qconfig(reduce_range=self._reduce_range)\n for name, quant_module in self._modules_to_quantize:\n # wrap any modules with wrap_qat set to True as QATWrapper(s)\n configure_module_qat_wrappers(quant_module, reduce_range=self._reduce_range)\n # set quantization config (asymmetric activations, symmetric weights)\n quant_module.qconfig = qconfig\n # wrap all conv / linear blocks in with quantization observers\n torch_quantization.propagate_qconfig_(quant_module)\n configure_module_default_qconfigs(quant_module)\n\n add_quant_dequant(quant_module, name, module)\n\n if not self._quantize_linear_activations:\n remove_activation_qat_by_layer_name(quant_module, [\"Linear\"])\n\n # set modules with proper qconfigs to QAT mode\n torch_quantization.prepare_qat(module, inplace=True)\n if self._quantize_embeddings:\n prepare_embeddings_qat(module, reduce_range=self._reduce_range)\n self._qat_enabled = True\n\n def _disable_quantization_observer_update_ready(self, epoch: float) -> bool:\n return (\n self._disable_quantization_observer_epoch is not None\n and epoch >= self._disable_quantization_observer_epoch\n and not self._quantization_observer_disabled\n )\n\n def _freeze_bn_stats_update_ready(self, epoch: float) -> bool:\n return (\n self._freeze_bn_stats_epoch is not None\n and epoch >= self._freeze_bn_stats_epoch\n and not self._bn_stats_frozen\n )\n\n def _validate_params(self):\n if (\n self._disable_quantization_observer_epoch is not None\n and self._disable_quantization_observer_epoch < self._start_epoch\n ):\n raise ValueError(\n f\"disable_quantization_observer_epoch may not be greater than \"\n f\"start_epoch for QuantizationModifier, received: \"\n f\"{self._disable_quantization_observer_epoch} with start_epoch \"\n f\"{self._start_epoch}\"\n )\n\n if (\n self._freeze_bn_stats_epoch is not None\n and self._freeze_bn_stats_epoch < self._start_epoch\n ):\n raise ValueError(\n \"freeze_bn_stats_epoch may not be greater than start_epoch\"\n \" for QuantizationModifier, received: {} with start_epoch {}\".format(\n self._freeze_bn_stats_epoch, self._start_epoch\n )\n )\n",
"# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nHelper functions and script for exporting a trained transformers model to an ONNX file\nfor use with engines such as DeepSparse\n\nscript accessible from sparseml.transformers.export_onnx\n\ncommand help:\nusage: export.py [-h] --task TASK --model_path MODEL_PATH\n [--sequence_length SEQUENCE_LENGTH]\n [--convert_qat CONVERT_QAT]\n [--finetuning_task FINETUNING_TASK]\n [--onnx_file_name ONNX_FILE_NAME]\n\nExport a trained transformers model to an ONNX file\n\noptional arguments:\n -h, --help show this help message and exit\n --task TASK Task to create the model for. i.e. mlm, qa, glue, ner\n --model_path MODEL_PATH\n Path to directory where model files for weights, config,\n and tokenizer are stored\n --sequence_length SEQUENCE_LENGTH\n Sequence length to use. Default is 384. Can be overwritten\n later\n --convert_qat CONVERT_QAT\n Set flag to not perform QAT to fully quantized conversion\n after export\n --finetuning_task FINETUNING_TASK\n optional finetuning task for text classification and token\n classification exports\n --onnx_file_name ONNX_FILE_NAME\n Name for exported ONNX file in the model directory. Default\n and reccomended value for pipeline compatibility is\n 'model.onnx'\n\nexample usage:\nsparseml.transformers.export_onnx \\\n --task question-answering \\\n --model_path /PATH/TO/SPARSIFIED/MODEL/DIRECTORY \\\n --sequence_length 128\n\"\"\"\n\nimport argparse\nimport logging\nimport os\nfrom typing import Optional\n\nimport torch\nfrom transformers import (\n AutoConfig,\n AutoModelForMaskedLM,\n AutoModelForQuestionAnswering,\n AutoModelForSequenceClassification,\n AutoModelForTokenClassification,\n AutoTokenizer,\n)\nfrom transformers.file_utils import WEIGHTS_NAME\nfrom transformers.tokenization_utils_base import PaddingStrategy\n\nfrom sparseml.pytorch.optim import ScheduledModifierManager\nfrom sparseml.pytorch.utils import export_onnx\nfrom sparseml.transformers.utils.helpers import RECIPE_NAME\n\n\n__all__ = [\"export_transformer_to_onnx\"]\n\n\n_LOGGER = logging.getLogger(__name__)\n_TASK_TO_CONSTRUCTOR = {\n # language modeling\n \"mlm\": AutoModelForMaskedLM,\n \"masked-language-modeling\": AutoModelForMaskedLM,\n # question answering\n \"qa\": AutoModelForQuestionAnswering,\n \"question-answering\": AutoModelForQuestionAnswering,\n # GLUE\n \"glue\": AutoModelForSequenceClassification,\n \"sequence-classification\": AutoModelForSequenceClassification,\n \"sentiment-analysis\": AutoModelForSequenceClassification,\n \"text-classification\": AutoModelForSequenceClassification,\n # token classification\n \"ner\": AutoModelForTokenClassification,\n \"token-classification\": AutoModelForTokenClassification,\n}\n\n\ndef export_transformer_to_onnx(\n task: str,\n model_path: str,\n sequence_length: int = 384,\n convert_qat: bool = True,\n finetuning_task: Optional[str] = None,\n onnx_file_name: str = \"model.onnx\",\n) -> str:\n \"\"\"\n Exports the saved transformers file to ONNX at batch size 1 using\n the given model path weights, config, and tokenizer\n\n :param task: task to create the model for. i.e. mlm, qa, glue, ner\n :param model_path: path to directory where model files, tokenizers,\n and configs are saved. ONNX export will also be written here\n :param sequence_length: model sequence length to use for export\n :param convert_qat: set True to convert a QAT model to fully quantized\n ONNX model. Default is True\n :param finetuning_task: optional string finetuning task for text classification\n and token classification exports\n :param onnx_file_name: name to save the exported ONNX file as. Default\n is model.onnx. Note that when loading a model directory to a deepsparse\n pipeline, it will look only for 'model.onnx'\n :return: path to the exported ONNX file\n \"\"\"\n task = \"-\".join(task.lower().split(\"_\"))\n if task not in _TASK_TO_CONSTRUCTOR:\n raise ValueError(\n f\"task {task} unsupported for export_transformer_to_onnx. Supported \"\n f\"tasks include {list(_TASK_TO_CONSTRUCTOR.keys())}\"\n )\n auto_model_constructor = _TASK_TO_CONSTRUCTOR[task]\n\n if not os.path.isdir(model_path):\n raise ValueError(\n \"model_path must be a directory that contains the trained transformer \"\n f\"files. {model_path} is not a directory\"\n )\n\n # load config and tokenizer\n config_args = {\"finetuning_task\": finetuning_task} if finetuning_task else {}\n config = AutoConfig.from_pretrained(model_path, **config_args)\n tokenizer = AutoTokenizer.from_pretrained(\n model_path, model_max_length=sequence_length\n )\n\n # load model\n model = auto_model_constructor.from_pretrained(\n model_path,\n from_tf=False,\n config=config,\n )\n\n # apply recipe if exists before loading model weights\n recipe_path = os.path.join(model_path, RECIPE_NAME)\n if os.path.isfile(recipe_path):\n ScheduledModifierManager.from_yaml(recipe_path).apply(model)\n else:\n _LOGGER.warning(f\"recipe not found under {recipe_path}\")\n\n # load weights\n load_kwargs = {} if torch.cuda.is_available() else {\"map_location\": \"cpu\"}\n state_dict = torch.load(os.path.join(model_path, WEIGHTS_NAME), **load_kwargs)\n model.load_state_dict(state_dict)\n\n # create fake model input\n inputs = tokenizer(\n \"\", return_tensors=\"pt\", padding=PaddingStrategy.MAX_LENGTH.value\n ).data # Dict[Tensor]\n\n # run export\n onnx_file_path = os.path.join(model_path, onnx_file_name)\n export_onnx(\n model,\n inputs,\n onnx_file_path,\n convert_qat=convert_qat,\n )\n\n return onnx_file_path\n\n\ndef _parse_args() -> argparse.Namespace:\n parser = argparse.ArgumentParser(\n description=\"Export a trained transformers model to an ONNX file\"\n )\n\n parser.add_argument(\n \"--task\",\n type=str,\n required=True,\n help=\"Task to create the model for. i.e. mlm, qa, glue, ner\",\n )\n parser.add_argument(\n \"--model_path\",\n required=True,\n type=str,\n help=(\n \"Path to directory where model files for weights, config, and \"\n \"tokenizer are stored\"\n ),\n )\n parser.add_argument(\n \"--sequence_length\",\n type=int,\n default=384,\n help=\"Sequence length to use. Default is 384. Can be overwritten later\",\n )\n parser.add_argument(\n \"--no_convert_qat\",\n action=\"store_false\",\n help=(\"Set flag to not perform QAT to fully quantized conversion after export\"),\n )\n parser.add_argument(\n \"--finetuning_task\",\n type=str,\n default=None,\n help=(\n \"Optional finetuning task for text classification and token \"\n \"classification exports\"\n ),\n )\n parser.add_argument(\n \"--onnx_file_name\",\n type=str,\n default=\"model.onnx\",\n help=(\n \"Name for exported ONNX file in the model directory. \"\n \"Default and reccomended value for pipeline compatibility is 'model.onnx'\"\n ),\n )\n\n return parser.parse_args()\n\n\ndef main():\n args = _parse_args()\n _LOGGER.info(f\"Exporting {args.model_path} to ONNX\")\n onnx_path = export_transformer_to_onnx(\n task=args.task,\n model_path=args.model_path,\n sequence_length=args.sequence_length,\n convert_qat=args.no_convert_qat, # False if flagged\n finetuning_task=args.finetuning_task,\n onnx_file_name=args.onnx_file_name,\n )\n _LOGGER.info(f\"Model exported to: {onnx_path}\")\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"torch.quantization.propagate_qconfig_",
"torch.quantization.prepare_qat"
],
[
"torch.cuda.is_available"
]
] |
rossumai/keras
|
[
"c83ca43005ce81de145f17d112e308b492ddc1e0"
] |
[
"tests/keras/test_callbacks.py"
] |
[
"import os\nimport multiprocessing\n\nimport numpy as np\nimport pytest\nfrom csv import reader\nfrom csv import Sniffer\nimport shutil\nfrom keras import optimizers\nfrom keras import initializers\nfrom keras import callbacks\nfrom keras.models import Sequential, Model\nfrom keras.layers import Input, Dense, Dropout, add\nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers.pooling import MaxPooling2D, GlobalAveragePooling2D\nfrom keras.utils.test_utils import get_test_data\nfrom keras.utils.test_utils import keras_test\nfrom keras import backend as K\nfrom keras.utils import np_utils\n\ninput_dim = 2\nnum_hidden = 4\nnum_class = 2\nbatch_size = 5\ntrain_samples = 20\ntest_samples = 20\n\n\n@keras_test\ndef test_TerminateOnNaN():\n np.random.seed(1337)\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_class)\n\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n cbks = [callbacks.TerminateOnNaN()]\n model = Sequential()\n initializer = initializers.Constant(value=1e5)\n for _ in range(5):\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu',\n kernel_initializer=initializer))\n model.add(Dense(num_class, activation='linear'))\n model.compile(loss='mean_squared_error',\n optimizer='rmsprop')\n\n # case 1 fit\n history = model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=20)\n loss = history.history['loss']\n assert len(loss) == 1\n assert loss[0] == np.inf\n\n # case 2 fit_generator\n def data_generator():\n max_batch_index = len(X_train) // batch_size\n i = 0\n while 1:\n yield (X_train[i * batch_size: (i + 1) * batch_size],\n y_train[i * batch_size: (i + 1) * batch_size])\n i += 1\n i = i % max_batch_index\n history = model.fit_generator(data_generator(),\n len(X_train),\n validation_data=(X_test, y_test),\n callbacks=cbks,\n epochs=20)\n loss = history.history['loss']\n assert len(loss) == 1\n assert loss[0] == np.inf or np.isnan(loss[0])\n\n\n@keras_test\ndef test_stop_training_csv(tmpdir):\n np.random.seed(1337)\n fp = str(tmpdir / 'test.csv')\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_class)\n\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n cbks = [callbacks.TerminateOnNaN(), callbacks.CSVLogger(fp)]\n model = Sequential()\n for _ in range(5):\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))\n model.add(Dense(num_class, activation='linear'))\n model.compile(loss='mean_squared_error',\n optimizer='rmsprop')\n\n def data_generator():\n i = 0\n max_batch_index = len(X_train) // batch_size\n tot = 0\n while 1:\n if tot > 3 * len(X_train):\n yield np.ones([batch_size, input_dim]) * np.nan, np.ones([batch_size, num_class]) * np.nan\n else:\n yield (X_train[i * batch_size: (i + 1) * batch_size],\n y_train[i * batch_size: (i + 1) * batch_size])\n i += 1\n tot += 1\n i = i % max_batch_index\n\n history = model.fit_generator(data_generator(),\n len(X_train) // batch_size,\n validation_data=(X_test, y_test),\n callbacks=cbks,\n epochs=20)\n loss = history.history['loss']\n assert len(loss) > 1\n assert loss[-1] == np.inf or np.isnan(loss[-1])\n\n values = []\n with open(fp) as f:\n for x in reader(f):\n values.append(x)\n\n assert 'nan' in values[-1], 'The last epoch was not logged.'\n os.remove(fp)\n\n\n@keras_test\ndef test_ModelCheckpoint(tmpdir):\n np.random.seed(1337)\n filepath = str(tmpdir / 'checkpoint.h5')\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_class)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n # case 1\n monitor = 'val_loss'\n save_best_only = False\n mode = 'auto'\n\n model = Sequential()\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))\n model.add(Dense(num_class, activation='softmax'))\n model.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n\n cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,\n save_best_only=save_best_only, mode=mode)]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=1)\n assert os.path.isfile(filepath)\n os.remove(filepath)\n\n # case 2\n mode = 'min'\n cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,\n save_best_only=save_best_only, mode=mode)]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=1)\n assert os.path.isfile(filepath)\n os.remove(filepath)\n\n # case 3\n mode = 'max'\n monitor = 'val_acc'\n cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,\n save_best_only=save_best_only, mode=mode)]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=1)\n assert os.path.isfile(filepath)\n os.remove(filepath)\n\n # case 4\n save_best_only = True\n cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,\n save_best_only=save_best_only, mode=mode)]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=1)\n assert os.path.isfile(filepath)\n os.remove(filepath)\n\n # case 5\n save_best_only = False\n period = 2\n mode = 'auto'\n filepath = 'checkpoint.{epoch:02d}.h5'\n cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,\n save_best_only=save_best_only, mode=mode,\n period=period)]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=4)\n assert os.path.isfile(filepath.format(epoch=1))\n assert os.path.isfile(filepath.format(epoch=3))\n assert not os.path.exists(filepath.format(epoch=0))\n assert not os.path.exists(filepath.format(epoch=2))\n os.remove(filepath.format(epoch=1))\n os.remove(filepath.format(epoch=3))\n assert not tmpdir.listdir()\n\n\n@keras_test\ndef test_EarlyStopping():\n np.random.seed(1337)\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_class)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n model = Sequential()\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))\n model.add(Dense(num_class, activation='softmax'))\n model.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n mode = 'max'\n monitor = 'val_acc'\n patience = 0\n cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]\n history = model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=20)\n\n mode = 'auto'\n monitor = 'val_acc'\n patience = 2\n cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]\n history = model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=20)\n\n\n@keras_test\ndef test_EarlyStopping_reuse():\n np.random.seed(1337)\n patience = 3\n data = np.random.random((100, 1))\n labels = np.where(data > 0.5, 1, 0)\n model = Sequential((\n Dense(1, input_dim=1, activation='relu'),\n Dense(1, activation='sigmoid'),\n ))\n model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])\n stopper = callbacks.EarlyStopping(monitor='acc', patience=patience)\n weights = model.get_weights()\n\n hist = model.fit(data, labels, callbacks=[stopper])\n assert len(hist.epoch) >= patience\n\n # This should allow training to go for at least `patience` epochs\n model.set_weights(weights)\n hist = model.fit(data, labels, callbacks=[stopper])\n assert len(hist.epoch) >= patience\n\n\n@keras_test\ndef test_LearningRateScheduler():\n np.random.seed(1337)\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_class)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n model = Sequential()\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))\n model.add(Dense(num_class, activation='softmax'))\n model.compile(loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\n cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=5)\n assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon()\n\n\n@keras_test\ndef test_ReduceLROnPlateau():\n np.random.seed(1337)\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_class)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n\n def make_model():\n np.random.seed(1337)\n model = Sequential()\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))\n model.add(Dense(num_class, activation='softmax'))\n\n model.compile(loss='categorical_crossentropy',\n optimizer=optimizers.SGD(lr=0.1),\n metrics=['accuracy'])\n return model\n\n model = make_model()\n\n # This should reduce the LR after the first epoch (due to high epsilon).\n cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, epsilon=10, patience=1, cooldown=5)]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=5, verbose=2)\n assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.01, atol=K.epsilon())\n\n model = make_model()\n cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, epsilon=0, patience=1, cooldown=5)]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=5, verbose=2)\n assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.1, atol=K.epsilon())\n\n\n@keras_test\ndef test_CSVLogger(tmpdir):\n np.random.seed(1337)\n filepath = str(tmpdir / 'log.tsv')\n sep = '\\t'\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_class)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n\n def make_model():\n np.random.seed(1337)\n model = Sequential()\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))\n model.add(Dense(num_class, activation='softmax'))\n\n model.compile(loss='categorical_crossentropy',\n optimizer=optimizers.SGD(lr=0.1),\n metrics=['accuracy'])\n return model\n\n # case 1, create new file with defined separator\n model = make_model()\n cbks = [callbacks.CSVLogger(filepath, separator=sep)]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=1)\n\n assert os.path.isfile(filepath)\n with open(filepath) as csvfile:\n dialect = Sniffer().sniff(csvfile.read())\n assert dialect.delimiter == sep\n del model\n del cbks\n\n # case 2, append data to existing file, skip header\n model = make_model()\n cbks = [callbacks.CSVLogger(filepath, separator=sep, append=True)]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=1)\n\n # case 3, reuse of CSVLogger object\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=1)\n\n import re\n with open(filepath) as csvfile:\n output = \" \".join(csvfile.readlines())\n assert len(re.findall('epoch', output)) == 1\n\n os.remove(filepath)\n assert not tmpdir.listdir()\n\n\n@keras_test\n@pytest.mark.skipif((K.backend() != 'tensorflow'),\n reason='Requires tensorflow backend')\ndef test_TensorBoard(tmpdir):\n np.random.seed(np.random.randint(1, 1e7))\n filepath = str(tmpdir / 'logs')\n\n (X_train, y_train), (X_test, y_test) = get_test_data(\n num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_class)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n\n def data_generator(train):\n if train:\n max_batch_index = len(X_train) // batch_size\n else:\n max_batch_index = len(X_test) // batch_size\n i = 0\n while 1:\n if train:\n # simulate multi-input/output models\n yield (X_train[i * batch_size: (i + 1) * batch_size],\n y_train[i * batch_size: (i + 1) * batch_size])\n else:\n yield (X_test[i * batch_size: (i + 1) * batch_size],\n y_test[i * batch_size: (i + 1) * batch_size])\n i += 1\n i = i % max_batch_index\n\n inp = Input((input_dim,))\n hidden = Dense(num_hidden, activation='relu')(inp)\n hidden = Dropout(0.1)(hidden)\n output = Dense(num_class, activation='softmax')(hidden)\n model = Model(inputs=inp, outputs=output)\n model.compile(loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\n tsb = callbacks.TensorBoard(log_dir=filepath, histogram_freq=1,\n write_images=True, write_grads=True,\n embeddings_freq=1,\n embeddings_layer_names=['dense_1'],\n batch_size=5)\n cbks = [tsb]\n\n # fit without validation data\n model.fit(X_train, y_train, batch_size=batch_size,\n callbacks=cbks, epochs=3)\n\n # fit with validation data and accuracy\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test),\n callbacks=cbks, epochs=2)\n\n # fit generator without validation data\n model.fit_generator(data_generator(True), len(X_train), epochs=2,\n callbacks=cbks)\n\n # fit generator with validation data and accuracy\n model.fit_generator(data_generator(True), len(X_train), epochs=2,\n validation_data=(X_test, y_test),\n callbacks=cbks)\n\n assert os.path.isdir(filepath)\n shutil.rmtree(filepath)\n assert not tmpdir.listdir()\n\n\n@keras_test\n@pytest.mark.skipif((K.backend() != 'tensorflow'),\n reason='Requires tensorflow backend')\ndef test_TensorBoard_multi_input_output(tmpdir):\n np.random.seed(np.random.randint(1, 1e7))\n filepath = str(tmpdir / 'logs')\n\n (X_train, y_train), (X_test, y_test) = get_test_data(\n num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_class)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n\n def data_generator(train):\n if train:\n max_batch_index = len(X_train) // batch_size\n else:\n max_batch_index = len(X_test) // batch_size\n i = 0\n while 1:\n if train:\n # simulate multi-input/output models\n yield ([X_train[i * batch_size: (i + 1) * batch_size]] * 2,\n [y_train[i * batch_size: (i + 1) * batch_size]] * 2)\n else:\n yield ([X_test[i * batch_size: (i + 1) * batch_size]] * 2,\n [y_test[i * batch_size: (i + 1) * batch_size]] * 2)\n i += 1\n i = i % max_batch_index\n\n inp1 = Input((input_dim,))\n inp2 = Input((input_dim,))\n inp = add([inp1, inp2])\n hidden = Dense(num_hidden, activation='relu')(inp)\n hidden = Dropout(0.1)(hidden)\n output1 = Dense(num_class, activation='softmax')(hidden)\n output2 = Dense(num_class, activation='softmax')(hidden)\n model = Model(inputs=[inp1, inp2], outputs=[output1, output2])\n model.compile(loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\n tsb = callbacks.TensorBoard(log_dir=filepath, histogram_freq=1,\n write_images=True, write_grads=True,\n embeddings_freq=1,\n embeddings_layer_names=['dense_1'],\n batch_size=5)\n cbks = [tsb]\n\n # fit without validation data\n model.fit([X_train] * 2, [y_train] * 2, batch_size=batch_size,\n callbacks=cbks, epochs=3)\n\n # fit with validation data and accuracy\n model.fit([X_train] * 2, [y_train] * 2, batch_size=batch_size,\n validation_data=([X_test] * 2, [y_test] * 2),\n callbacks=cbks, epochs=2)\n\n # fit generator without validation data\n model.fit_generator(data_generator(True), len(X_train), epochs=2,\n callbacks=cbks)\n\n # fit generator with validation data and accuracy\n model.fit_generator(data_generator(True), len(X_train), epochs=2,\n validation_data=([X_test] * 2, [y_test] * 2),\n callbacks=cbks)\n\n assert os.path.isdir(filepath)\n shutil.rmtree(filepath)\n assert not tmpdir.listdir()\n\n\n@keras_test\n@pytest.mark.skipif((K.backend() != 'tensorflow'),\n reason='Requires tensorflow backend')\ndef test_TensorBoard_convnet(tmpdir):\n np.random.seed(np.random.randint(1, 1e7))\n filepath = str(tmpdir / 'logs')\n\n input_shape = (16, 16, 3)\n (x_train, y_train), (x_test, y_test) = get_test_data(num_train=500,\n num_test=200,\n input_shape=input_shape,\n classification=True,\n num_classes=4)\n y_train = np_utils.to_categorical(y_train)\n y_test = np_utils.to_categorical(y_test)\n\n model = Sequential([\n Conv2D(filters=8, kernel_size=3,\n activation='relu',\n input_shape=input_shape),\n MaxPooling2D(pool_size=2),\n Conv2D(filters=4, kernel_size=(3, 3),\n activation='relu', padding='same'),\n GlobalAveragePooling2D(),\n Dense(y_test.shape[-1], activation='softmax')\n ])\n model.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n tsb = callbacks.TensorBoard(log_dir=filepath, histogram_freq=1,\n write_images=True, write_grads=True,\n batch_size=16)\n cbks = [tsb]\n model.summary()\n history = model.fit(x_train, y_train, epochs=2, batch_size=16,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n verbose=0)\n assert os.path.isdir(filepath)\n shutil.rmtree(filepath)\n assert not tmpdir.listdir()\n\n\n@keras_test\ndef test_CallbackValData():\n np.random.seed(1337)\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_class)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n model = Sequential()\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))\n model.add(Dense(num_class, activation='softmax'))\n model.compile(loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\n cbk = callbacks.LambdaCallback(on_train_end=lambda x: 1)\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=[cbk], epochs=1)\n\n def data_generator(train):\n if train:\n max_batch_index = len(X_train) // batch_size\n else:\n max_batch_index = len(X_test) // batch_size\n i = 0\n while 1:\n if train:\n yield (X_train[i * batch_size: (i + 1) * batch_size],\n y_train[i * batch_size: (i + 1) * batch_size])\n else:\n yield (X_test[i * batch_size: (i + 1) * batch_size],\n y_test[i * batch_size: (i + 1) * batch_size])\n i += 1\n i = i % max_batch_index\n\n cbk2 = callbacks.LambdaCallback(on_train_end=lambda x: 1)\n model.fit_generator(data_generator(True), len(X_train), epochs=1,\n validation_data=(X_test, y_test),\n callbacks=[cbk2])\n\n # callback validation data should always have x, y, and sample weights\n assert len(cbk.validation_data) == len(cbk2.validation_data) == 3\n assert cbk.validation_data[0] is cbk2.validation_data[0]\n assert cbk.validation_data[1] is cbk2.validation_data[1]\n assert cbk.validation_data[2].shape == cbk2.validation_data[2].shape\n\n\n@keras_test\ndef test_LambdaCallback():\n np.random.seed(1337)\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_class)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n model = Sequential()\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))\n model.add(Dense(num_class, activation='softmax'))\n model.compile(loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\n # Start an arbitrary process that should run during model training and be terminated after training has completed.\n def f():\n while True:\n pass\n\n p = multiprocessing.Process(target=f)\n p.start()\n cleanup_callback = callbacks.LambdaCallback(on_train_end=lambda logs: p.terminate())\n\n cbks = [cleanup_callback]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=5)\n p.join()\n assert not p.is_alive()\n\n\n@keras_test\n@pytest.mark.skipif((K.backend() != 'tensorflow'),\n reason=\"Requires tensorflow backend\")\ndef test_TensorBoard_with_ReduceLROnPlateau(tmpdir):\n import shutil\n np.random.seed(np.random.randint(1, 1e7))\n filepath = str(tmpdir / 'logs')\n\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_class)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n\n model = Sequential()\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))\n model.add(Dense(num_class, activation='softmax'))\n model.compile(loss='binary_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\n cbks = [\n callbacks.ReduceLROnPlateau(\n monitor='val_loss',\n factor=0.5,\n patience=4,\n verbose=1),\n callbacks.TensorBoard(\n log_dir=filepath)]\n\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=2)\n\n assert os.path.isdir(filepath)\n shutil.rmtree(filepath)\n assert not tmpdir.listdir()\n\n\nif __name__ == '__main__':\n pytest.main([__file__])\n"
] |
[
[
"numpy.random.random",
"numpy.random.seed",
"numpy.isnan",
"numpy.ones",
"numpy.where",
"numpy.random.randint"
]
] |
modusdatascience/sklearntools
|
[
"6cb87edcb501440266622fe4c738be3f9015a859"
] |
[
"sklearntools/sym/adapters/calibrated_classifier_cv.py"
] |
[
"from sklearn.exceptions import NotFittedError\nfrom ..input_size import input_size, register_input_size\nfrom sklearn.calibration import CalibratedClassifierCV, _CalibratedClassifier\nfrom ..sym_predict_proba import register_sym_predict_proba,\\\n sym_predict_proba\nfrom six.moves import reduce\nfrom operator import __add__\nfrom sympy.core.numbers import RealNumber, Zero\nfrom ..sym_predict import sym_predict\nfrom ..syms import syms, register_syms\nfrom ..base import fallback\nfrom ..sym_decision_function import sym_decision_function\n\n# @register_input_size(CalibratedClassifierCV)\n# def input_size_calibrated_classifier_cv(estimator):\n# if not hasattr(estimator, 'calibrated_classifiers_'):\n# raise NotFittedError()\n# return input_size(estimator.calibrated_classifiers_[0])\n\n@register_sym_predict_proba(CalibratedClassifierCV)\ndef sym_predict_proba_calibrated_classifier_cv(estimator):\n if not hasattr(estimator, 'calibrated_classifiers_'):\n raise NotFittedError()\n return reduce(__add__, map(sym_predict_proba, estimator.calibrated_classifiers_)) / RealNumber(len(estimator.calibrated_classifiers_))\n\n@register_syms(CalibratedClassifierCV)\ndef syms_calibrated_classifier_cv(estimator):\n return syms(estimator.calibrated_classifiers_[0])\n\n# @register_input_size(_CalibratedClassifier)\n# def input_size__calibrated_classifier(estimator):\n# return input_size(estimator.base_estimator)\n\n@register_syms(_CalibratedClassifier)\ndef syms__calibrated_classifier(estimator):\n return syms(estimator.base_estimator)\n\n@register_sym_predict_proba(_CalibratedClassifier)\ndef sym_predict_proba__calibrated_classifier(estimator):\n if hasattr(estimator.base_estimator, 'decision_function'):\n inner_pred = sym_decision_function(estimator.base_estimator)\n elif hasattr(estimator.base_estimator, 'predict_proba'):\n inner_pred = sym_predict_proba(estimator.base_estimator)\n# inner_pred = fallback(sym_decision_function, sym_predict_proba)(estimator.base_estimator)\n result = Zero()\n for cal in estimator.calibrators_:\n variables = syms(cal)\n if len(variables) != 1:\n raise ValueError()\n var = variables[0]\n result += sym_predict(cal).subs({var: inner_pred})\n return result / RealNumber(len(estimator.calibrators_))\n\ndef sym_predict_proba_parts__calibrated_classifier(estimator):\n if hasattr(estimator.base_estimator, 'decision_function'):\n inner_pred = sym_decision_function(estimator.base_estimator)\n elif hasattr(estimator.base_estimator, 'predict_proba'):\n inner_pred = sym_predict_proba(estimator.base_estimator)\n result = Zero()\n var = None\n for cal in estimator.calibrators_:\n variables = syms(cal)\n if len(variables) != 1 or (var != variables[0] and var is not None):\n raise ValueError()\n var = variables[0]\n result += sym_predict(cal)\n result = result / RealNumber(len(estimator.calibrators_))\n return ((var,), [result], (syms(estimator.base_estimato), inner_pred, None))\n"
] |
[
[
"sklearn.exceptions.NotFittedError"
]
] |
sjperkins/arrow
|
[
"eb20a3dbc7732f612e5ce54be5f4291440829350"
] |
[
"python/pyarrow/tests/test_array.py"
] |
[
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom collections.abc import Iterable\nimport datetime\nimport decimal\nimport hypothesis as h\nimport hypothesis.strategies as st\nimport itertools\nimport pickle\nimport pytest\nimport struct\nimport sys\nimport weakref\n\nimport numpy as np\ntry:\n import pickle5\nexcept ImportError:\n pickle5 = None\nimport pytz\n\nimport pyarrow as pa\nimport pyarrow.tests.strategies as past\n\n\ndef test_total_bytes_allocated():\n assert pa.total_allocated_bytes() == 0\n\n\ndef test_weakref():\n arr = pa.array([1, 2, 3])\n wr = weakref.ref(arr)\n assert wr() is not None\n del arr\n assert wr() is None\n\n\ndef test_getitem_NULL():\n arr = pa.array([1, None, 2])\n assert arr[1].as_py() is None\n assert arr[1].is_valid is False\n assert isinstance(arr[1], pa.Int64Scalar)\n\n\ndef test_constructor_raises():\n # This could happen by wrong capitalization.\n # ARROW-2638: prevent calling extension class constructors directly\n with pytest.raises(TypeError):\n pa.Array([1, 2])\n\n\ndef test_list_format():\n arr = pa.array([[1], None, [2, 3, None]])\n result = arr.to_string()\n expected = \"\"\"\\\n[\n [\n 1\n ],\n null,\n [\n 2,\n 3,\n null\n ]\n]\"\"\"\n assert result == expected\n\n\ndef test_string_format():\n arr = pa.array(['', None, 'foo'])\n result = arr.to_string()\n expected = \"\"\"\\\n[\n \"\",\n null,\n \"foo\"\n]\"\"\"\n assert result == expected\n\n\ndef test_long_array_format():\n arr = pa.array(range(100))\n result = arr.to_string(window=2)\n expected = \"\"\"\\\n[\n 0,\n 1,\n ...\n 98,\n 99\n]\"\"\"\n assert result == expected\n\n\ndef test_binary_format():\n arr = pa.array([b'\\x00', b'', None, b'\\x01foo', b'\\x80\\xff'])\n result = arr.to_string()\n expected = \"\"\"\\\n[\n 00,\n ,\n null,\n 01666F6F,\n 80FF\n]\"\"\"\n assert result == expected\n\n\ndef test_binary_total_values_length():\n arr = pa.array([b'0000', None, b'11111', b'222222', b'3333333'],\n type='binary')\n large_arr = pa.array([b'0000', None, b'11111', b'222222', b'3333333'],\n type='large_binary')\n\n assert arr.total_values_length == 22\n assert arr.slice(1, 3).total_values_length == 11\n assert large_arr.total_values_length == 22\n assert large_arr.slice(1, 3).total_values_length == 11\n\n\ndef test_to_numpy_zero_copy():\n arr = pa.array(range(10))\n\n np_arr = arr.to_numpy()\n\n # check for zero copy (both arrays using same memory)\n arrow_buf = arr.buffers()[1]\n assert arrow_buf.address == np_arr.ctypes.data\n\n arr = None\n import gc\n gc.collect()\n\n # Ensure base is still valid\n assert np_arr.base is not None\n expected = np.arange(10)\n np.testing.assert_array_equal(np_arr, expected)\n\n\ndef test_to_numpy_unsupported_types():\n # ARROW-2871: Some primitive types are not yet supported in to_numpy\n bool_arr = pa.array([True, False, True])\n\n with pytest.raises(ValueError):\n bool_arr.to_numpy()\n\n result = bool_arr.to_numpy(zero_copy_only=False)\n expected = np.array([True, False, True])\n np.testing.assert_array_equal(result, expected)\n\n null_arr = pa.array([None, None, None])\n\n with pytest.raises(ValueError):\n null_arr.to_numpy()\n\n result = null_arr.to_numpy(zero_copy_only=False)\n expected = np.array([None, None, None], dtype=object)\n np.testing.assert_array_equal(result, expected)\n\n arr = pa.array([1, 2, None])\n\n with pytest.raises(ValueError, match=\"with 1 nulls\"):\n arr.to_numpy()\n\n\ndef test_to_numpy_writable():\n arr = pa.array(range(10))\n np_arr = arr.to_numpy()\n\n # by default not writable for zero-copy conversion\n with pytest.raises(ValueError):\n np_arr[0] = 10\n\n np_arr2 = arr.to_numpy(zero_copy_only=False, writable=True)\n np_arr2[0] = 10\n assert arr[0].as_py() == 0\n\n # when asking for writable, cannot do zero-copy\n with pytest.raises(ValueError):\n arr.to_numpy(zero_copy_only=True, writable=True)\n\n\n@pytest.mark.parametrize('unit', ['s', 'ms', 'us', 'ns'])\ndef test_to_numpy_datetime64(unit):\n arr = pa.array([1, 2, 3], pa.timestamp(unit))\n expected = np.array([1, 2, 3], dtype=\"datetime64[{}]\".format(unit))\n np_arr = arr.to_numpy()\n np.testing.assert_array_equal(np_arr, expected)\n\n\n@pytest.mark.parametrize('unit', ['s', 'ms', 'us', 'ns'])\ndef test_to_numpy_timedelta64(unit):\n arr = pa.array([1, 2, 3], pa.duration(unit))\n expected = np.array([1, 2, 3], dtype=\"timedelta64[{}]\".format(unit))\n np_arr = arr.to_numpy()\n np.testing.assert_array_equal(np_arr, expected)\n\n\ndef test_to_numpy_dictionary():\n # ARROW-7591\n arr = pa.array([\"a\", \"b\", \"a\"]).dictionary_encode()\n expected = np.array([\"a\", \"b\", \"a\"], dtype=object)\n np_arr = arr.to_numpy(zero_copy_only=False)\n np.testing.assert_array_equal(np_arr, expected)\n\n\n@pytest.mark.pandas\ndef test_to_pandas_zero_copy():\n import gc\n\n arr = pa.array(range(10))\n\n for i in range(10):\n series = arr.to_pandas()\n assert sys.getrefcount(series) == 2\n series = None # noqa\n\n assert sys.getrefcount(arr) == 2\n\n for i in range(10):\n arr = pa.array(range(10))\n series = arr.to_pandas()\n arr = None\n gc.collect()\n\n # Ensure base is still valid\n\n # Because of py.test's assert inspection magic, if you put getrefcount\n # on the line being examined, it will be 1 higher than you expect\n base_refcount = sys.getrefcount(series.values.base)\n assert base_refcount == 2\n series.sum()\n\n\n@pytest.mark.nopandas\n@pytest.mark.pandas\ndef test_asarray():\n # ensure this is tested both when pandas is present or not (ARROW-6564)\n\n arr = pa.array(range(4))\n\n # The iterator interface gives back an array of Int64Value's\n np_arr = np.asarray([_ for _ in arr])\n assert np_arr.tolist() == [0, 1, 2, 3]\n assert np_arr.dtype == np.dtype('O')\n assert type(np_arr[0]) == pa.lib.Int64Value\n\n # Calling with the arrow array gives back an array with 'int64' dtype\n np_arr = np.asarray(arr)\n assert np_arr.tolist() == [0, 1, 2, 3]\n assert np_arr.dtype == np.dtype('int64')\n\n # An optional type can be specified when calling np.asarray\n np_arr = np.asarray(arr, dtype='str')\n assert np_arr.tolist() == ['0', '1', '2', '3']\n\n # If PyArrow array has null values, numpy type will be changed as needed\n # to support nulls.\n arr = pa.array([0, 1, 2, None])\n assert arr.type == pa.int64()\n np_arr = np.asarray(arr)\n elements = np_arr.tolist()\n assert elements[:3] == [0., 1., 2.]\n assert np.isnan(elements[3])\n assert np_arr.dtype == np.dtype('float64')\n\n # DictionaryType data will be converted to dense numpy array\n arr = pa.DictionaryArray.from_arrays(\n pa.array([0, 1, 2, 0, 1]), pa.array(['a', 'b', 'c']))\n np_arr = np.asarray(arr)\n assert np_arr.dtype == np.dtype('object')\n assert np_arr.tolist() == ['a', 'b', 'c', 'a', 'b']\n\n\n@pytest.mark.parametrize('ty', [\n None,\n pa.null(),\n pa.int8(),\n pa.string()\n])\ndef test_nulls(ty):\n arr = pa.nulls(3, type=ty)\n expected = pa.array([None, None, None], type=ty)\n\n assert len(arr) == 3\n assert arr.equals(expected)\n\n if ty is None:\n assert arr.type == pa.null()\n else:\n assert arr.type == ty\n\n\ndef test_array_from_scalar():\n today = datetime.date.today()\n now = datetime.datetime.now()\n now_utc = now.replace(tzinfo=pytz.utc)\n now_with_tz = now_utc.astimezone(pytz.timezone('US/Eastern'))\n oneday = datetime.timedelta(days=1)\n\n cases = [\n (None, 1, pa.array([None])),\n (None, 10, pa.nulls(10)),\n (-1, 3, pa.array([-1, -1, -1], type=pa.int64())),\n (2.71, 2, pa.array([2.71, 2.71], type=pa.float64())),\n (\"string\", 4, pa.array([\"string\"] * 4)),\n (\n pa.scalar(8, type=pa.uint8()),\n 17,\n pa.array([8] * 17, type=pa.uint8())\n ),\n (pa.scalar(None), 3, pa.array([None, None, None])),\n (pa.scalar(True), 11, pa.array([True] * 11)),\n (today, 2, pa.array([today] * 2)),\n (now, 10, pa.array([now] * 10)),\n (\n now_with_tz,\n 2,\n pa.array(\n [now_utc] * 2,\n type=pa.timestamp('us', tz=pytz.timezone('US/Eastern'))\n )\n ),\n (now.time(), 9, pa.array([now.time()] * 9)),\n (oneday, 4, pa.array([oneday] * 4)),\n (False, 9, pa.array([False] * 9)),\n ([1, 2], 2, pa.array([[1, 2], [1, 2]])),\n (\n pa.scalar([-1, 3], type=pa.large_list(pa.int8())),\n 5,\n pa.array([[-1, 3]] * 5, type=pa.large_list(pa.int8()))\n ),\n ({'a': 1, 'b': 2}, 3, pa.array([{'a': 1, 'b': 2}] * 3))\n ]\n\n for value, size, expected in cases:\n arr = pa.repeat(value, size)\n assert len(arr) == size\n assert arr.type.equals(expected.type)\n assert arr.equals(expected)\n if expected.type == pa.null():\n assert arr.null_count == size\n else:\n assert arr.null_count == 0\n\n\ndef test_array_from_dictionary_scalar():\n dictionary = ['foo', 'bar', 'baz']\n arr = pa.DictionaryArray.from_arrays([2, 1, 2, 0], dictionary=dictionary)\n\n result = pa.repeat(arr[0], 5)\n expected = pa.DictionaryArray.from_arrays([2] * 5, dictionary=dictionary)\n assert result.equals(expected)\n\n result = pa.repeat(arr[3], 5)\n expected = pa.DictionaryArray.from_arrays([0] * 5, dictionary=dictionary)\n assert result.equals(expected)\n\n\ndef test_array_getitem():\n arr = pa.array(range(10, 15))\n lst = arr.to_pylist()\n\n for idx in range(-len(arr), len(arr)):\n assert arr[idx].as_py() == lst[idx]\n for idx in range(-2 * len(arr), -len(arr)):\n with pytest.raises(IndexError):\n arr[idx]\n for idx in range(len(arr), 2 * len(arr)):\n with pytest.raises(IndexError):\n arr[idx]\n\n # check that numpy scalars are supported\n for idx in range(-len(arr), len(arr)):\n assert arr[np.int32(idx)].as_py() == lst[idx]\n\n\ndef test_array_slice():\n arr = pa.array(range(10))\n\n sliced = arr.slice(2)\n expected = pa.array(range(2, 10))\n assert sliced.equals(expected)\n\n sliced2 = arr.slice(2, 4)\n expected2 = pa.array(range(2, 6))\n assert sliced2.equals(expected2)\n\n # 0 offset\n assert arr.slice(0).equals(arr)\n\n # Slice past end of array\n assert len(arr.slice(len(arr))) == 0\n assert len(arr.slice(len(arr) + 2)) == 0\n assert len(arr.slice(len(arr) + 2, 100)) == 0\n\n with pytest.raises(IndexError):\n arr.slice(-1)\n\n with pytest.raises(ValueError):\n arr.slice(2, -1)\n\n # Test slice notation\n assert arr[2:].equals(arr.slice(2))\n assert arr[2:5].equals(arr.slice(2, 3))\n assert arr[-5:].equals(arr.slice(len(arr) - 5))\n\n n = len(arr)\n for start in range(-n * 2, n * 2):\n for stop in range(-n * 2, n * 2):\n res = arr[start:stop]\n res.validate()\n expected = arr.to_pylist()[start:stop]\n assert res.to_pylist() == expected\n assert res.to_numpy().tolist() == expected\n\n\ndef test_array_slice_negative_step():\n # ARROW-2714\n np_arr = np.arange(20)\n arr = pa.array(np_arr)\n chunked_arr = pa.chunked_array([arr])\n\n cases = [\n slice(None, None, -1),\n slice(None, 6, -2),\n slice(10, 6, -2),\n slice(8, None, -2),\n slice(2, 10, -2),\n slice(10, 2, -2),\n slice(None, None, 2),\n slice(0, 10, 2),\n ]\n\n for case in cases:\n result = arr[case]\n expected = pa.array(np_arr[case])\n assert result.equals(expected)\n\n result = pa.record_batch([arr], names=['f0'])[case]\n expected = pa.record_batch([expected], names=['f0'])\n assert result.equals(expected)\n\n result = chunked_arr[case]\n expected = pa.chunked_array([np_arr[case]])\n assert result.equals(expected)\n\n\ndef test_array_diff():\n # ARROW-6252\n arr1 = pa.array(['foo'], type=pa.utf8())\n arr2 = pa.array(['foo', 'bar', None], type=pa.utf8())\n arr3 = pa.array([1, 2, 3])\n arr4 = pa.array([[], [1], None], type=pa.list_(pa.int64()))\n\n assert arr1.diff(arr1) == ''\n assert arr1.diff(arr2) == '''\n@@ -1, +1 @@\n+\"bar\"\n+null\n'''\n assert arr1.diff(arr3).strip() == '# Array types differed: string vs int64'\n assert arr1.diff(arr3).strip() == '# Array types differed: string vs int64'\n assert arr1.diff(arr4).strip() == ('# Array types differed: string vs '\n 'list<item: int64>')\n\n\ndef test_array_iter():\n arr = pa.array(range(10))\n\n for i, j in zip(range(10), arr):\n assert i == j.as_py()\n\n assert isinstance(arr, Iterable)\n\n\ndef test_struct_array_slice():\n # ARROW-2311: slicing nested arrays needs special care\n ty = pa.struct([pa.field('a', pa.int8()),\n pa.field('b', pa.float32())])\n arr = pa.array([(1, 2.5), (3, 4.5), (5, 6.5)], type=ty)\n assert arr[1:].to_pylist() == [{'a': 3, 'b': 4.5},\n {'a': 5, 'b': 6.5}]\n\n\ndef test_array_factory_invalid_type():\n\n class MyObject:\n pass\n\n arr = np.array([MyObject()])\n with pytest.raises(ValueError):\n pa.array(arr)\n\n\ndef test_array_ref_to_ndarray_base():\n arr = np.array([1, 2, 3])\n\n refcount = sys.getrefcount(arr)\n arr2 = pa.array(arr) # noqa\n assert sys.getrefcount(arr) == (refcount + 1)\n\n\ndef test_array_eq():\n # ARROW-2150 / ARROW-9445: we define the __eq__ behavior to be\n # data equality (not element-wise equality)\n arr1 = pa.array([1, 2, 3], type=pa.int32())\n arr2 = pa.array([1, 2, 3], type=pa.int32())\n arr3 = pa.array([1, 2, 3], type=pa.int64())\n\n assert (arr1 == arr2) is True\n assert (arr1 != arr2) is False\n assert (arr1 == arr3) is False\n assert (arr1 != arr3) is True\n\n assert (arr1 == 1) is False\n assert (arr1 == None) is False # noqa: E711\n\n\ndef test_array_from_buffers():\n values_buf = pa.py_buffer(np.int16([4, 5, 6, 7]))\n nulls_buf = pa.py_buffer(np.uint8([0b00001101]))\n arr = pa.Array.from_buffers(pa.int16(), 4, [nulls_buf, values_buf])\n assert arr.type == pa.int16()\n assert arr.to_pylist() == [4, None, 6, 7]\n\n arr = pa.Array.from_buffers(pa.int16(), 4, [None, values_buf])\n assert arr.type == pa.int16()\n assert arr.to_pylist() == [4, 5, 6, 7]\n\n arr = pa.Array.from_buffers(pa.int16(), 3, [nulls_buf, values_buf],\n offset=1)\n assert arr.type == pa.int16()\n assert arr.to_pylist() == [None, 6, 7]\n\n with pytest.raises(TypeError):\n pa.Array.from_buffers(pa.int16(), 3, ['', ''], offset=1)\n\n\ndef test_string_binary_from_buffers():\n array = pa.array([\"a\", None, \"b\", \"c\"])\n\n buffers = array.buffers()\n copied = pa.StringArray.from_buffers(\n len(array), buffers[1], buffers[2], buffers[0], array.null_count,\n array.offset)\n assert copied.to_pylist() == [\"a\", None, \"b\", \"c\"]\n\n binary_copy = pa.Array.from_buffers(pa.binary(), len(array),\n array.buffers(), array.null_count,\n array.offset)\n assert binary_copy.to_pylist() == [b\"a\", None, b\"b\", b\"c\"]\n\n copied = pa.StringArray.from_buffers(\n len(array), buffers[1], buffers[2], buffers[0])\n assert copied.to_pylist() == [\"a\", None, \"b\", \"c\"]\n\n sliced = array[1:]\n buffers = sliced.buffers()\n copied = pa.StringArray.from_buffers(\n len(sliced), buffers[1], buffers[2], buffers[0], -1, sliced.offset)\n assert copied.to_pylist() == [None, \"b\", \"c\"]\n assert copied.null_count == 1\n\n # Slice but exclude all null entries so that we don't need to pass\n # the null bitmap.\n sliced = array[2:]\n buffers = sliced.buffers()\n copied = pa.StringArray.from_buffers(\n len(sliced), buffers[1], buffers[2], None, -1, sliced.offset)\n assert copied.to_pylist() == [\"b\", \"c\"]\n assert copied.null_count == 0\n\n\n@pytest.mark.parametrize('list_type_factory', [pa.list_, pa.large_list])\ndef test_list_from_buffers(list_type_factory):\n ty = list_type_factory(pa.int16())\n array = pa.array([[0, 1, 2], None, [], [3, 4, 5]], type=ty)\n assert array.type == ty\n\n buffers = array.buffers()\n\n with pytest.raises(ValueError):\n # No children\n pa.Array.from_buffers(ty, 4, [None, buffers[1]])\n\n child = pa.Array.from_buffers(pa.int16(), 6, buffers[2:])\n copied = pa.Array.from_buffers(ty, 4, buffers[:2], children=[child])\n assert copied.equals(array)\n\n with pytest.raises(ValueError):\n # too many children\n pa.Array.from_buffers(ty, 4, [None, buffers[1]],\n children=[child, child])\n\n\ndef test_struct_from_buffers():\n ty = pa.struct([pa.field('a', pa.int16()), pa.field('b', pa.utf8())])\n array = pa.array([{'a': 0, 'b': 'foo'}, None, {'a': 5, 'b': ''}],\n type=ty)\n buffers = array.buffers()\n\n with pytest.raises(ValueError):\n # No children\n pa.Array.from_buffers(ty, 3, [None, buffers[1]])\n\n children = [pa.Array.from_buffers(pa.int16(), 3, buffers[1:3]),\n pa.Array.from_buffers(pa.utf8(), 3, buffers[3:])]\n copied = pa.Array.from_buffers(ty, 3, buffers[:1], children=children)\n assert copied.equals(array)\n\n with pytest.raises(ValueError):\n # not enough many children\n pa.Array.from_buffers(ty, 3, [buffers[0]],\n children=children[:1])\n\n\ndef test_struct_from_arrays():\n a = pa.array([4, 5, 6], type=pa.int64())\n b = pa.array([\"bar\", None, \"\"])\n c = pa.array([[1, 2], None, [3, None]])\n expected_list = [\n {'a': 4, 'b': 'bar', 'c': [1, 2]},\n {'a': 5, 'b': None, 'c': None},\n {'a': 6, 'b': '', 'c': [3, None]},\n ]\n\n # From field names\n arr = pa.StructArray.from_arrays([a, b, c], [\"a\", \"b\", \"c\"])\n assert arr.type == pa.struct(\n [(\"a\", a.type), (\"b\", b.type), (\"c\", c.type)])\n assert arr.to_pylist() == expected_list\n\n with pytest.raises(ValueError):\n pa.StructArray.from_arrays([a, b, c], [\"a\", \"b\"])\n\n arr = pa.StructArray.from_arrays([], [])\n assert arr.type == pa.struct([])\n assert arr.to_pylist() == []\n\n # From fields\n fa = pa.field(\"a\", a.type, nullable=False)\n fb = pa.field(\"b\", b.type)\n fc = pa.field(\"c\", c.type)\n arr = pa.StructArray.from_arrays([a, b, c], fields=[fa, fb, fc])\n assert arr.type == pa.struct([fa, fb, fc])\n assert not arr.type[0].nullable\n assert arr.to_pylist() == expected_list\n\n with pytest.raises(ValueError):\n pa.StructArray.from_arrays([a, b, c], fields=[fa, fb])\n\n arr = pa.StructArray.from_arrays([], fields=[])\n assert arr.type == pa.struct([])\n assert arr.to_pylist() == []\n\n # Inconsistent fields\n fa2 = pa.field(\"a\", pa.int32())\n with pytest.raises(ValueError, match=\"int64 vs int32\"):\n pa.StructArray.from_arrays([a, b, c], fields=[fa2, fb, fc])\n\n arrays = [a, b, c]\n fields = [fa, fb, fc]\n # With mask\n mask = pa.array([True, False, False])\n arr = pa.StructArray.from_arrays(arrays, fields=fields, mask=mask)\n assert arr.to_pylist() == [None] + expected_list[1:]\n\n arr = pa.StructArray.from_arrays(arrays, names=['a', 'b', 'c'], mask=mask)\n assert arr.to_pylist() == [None] + expected_list[1:]\n\n # Bad masks\n with pytest.raises(ValueError, match='Mask must be'):\n pa.StructArray.from_arrays(arrays, fields, mask=[True, False, False])\n\n with pytest.raises(ValueError, match='not contain nulls'):\n pa.StructArray.from_arrays(\n arrays, fields, mask=pa.array([True, False, None]))\n\n with pytest.raises(ValueError, match='Mask must be'):\n pa.StructArray.from_arrays(\n arrays, fields, mask=pa.chunked_array([mask]))\n\n\ndef test_struct_array_from_chunked():\n # ARROW-11780\n # Check that we don't segfault when trying to build\n # a StructArray from a chunked array.\n chunked_arr = pa.chunked_array([[1, 2, 3], [4, 5, 6]])\n\n with pytest.raises(TypeError, match=\"Expected Array\"):\n pa.StructArray.from_arrays([chunked_arr], [\"foo\"])\n\n\ndef test_dictionary_from_numpy():\n indices = np.repeat([0, 1, 2], 2)\n dictionary = np.array(['foo', 'bar', 'baz'], dtype=object)\n mask = np.array([False, False, True, False, False, False])\n\n d1 = pa.DictionaryArray.from_arrays(indices, dictionary)\n d2 = pa.DictionaryArray.from_arrays(indices, dictionary, mask=mask)\n\n assert d1.indices.to_pylist() == indices.tolist()\n assert d1.indices.to_pylist() == indices.tolist()\n assert d1.dictionary.to_pylist() == dictionary.tolist()\n assert d2.dictionary.to_pylist() == dictionary.tolist()\n\n for i in range(len(indices)):\n assert d1[i].as_py() == dictionary[indices[i]]\n\n if mask[i]:\n assert d2[i].as_py() is None\n else:\n assert d2[i].as_py() == dictionary[indices[i]]\n\n\ndef test_dictionary_to_numpy():\n expected = pa.array(\n [\"foo\", \"bar\", None, \"foo\"]\n ).to_numpy(zero_copy_only=False)\n a = pa.DictionaryArray.from_arrays(\n pa.array([0, 1, None, 0]),\n pa.array(['foo', 'bar'])\n )\n np.testing.assert_array_equal(a.to_numpy(zero_copy_only=False),\n expected)\n\n with pytest.raises(pa.ArrowInvalid):\n # If this would be changed to no longer raise in the future,\n # ensure to test the actual result because, currently, to_numpy takes\n # for granted that when zero_copy_only=True there will be no nulls\n # (it's the decoding of the DictionaryArray that handles the nulls and\n # this is only activated with zero_copy_only=False)\n a.to_numpy(zero_copy_only=True)\n\n anonulls = pa.DictionaryArray.from_arrays(\n pa.array([0, 1, 1, 0]),\n pa.array(['foo', 'bar'])\n )\n expected = pa.array(\n [\"foo\", \"bar\", \"bar\", \"foo\"]\n ).to_numpy(zero_copy_only=False)\n np.testing.assert_array_equal(anonulls.to_numpy(zero_copy_only=False),\n expected)\n\n with pytest.raises(pa.ArrowInvalid):\n anonulls.to_numpy(zero_copy_only=True)\n\n afloat = pa.DictionaryArray.from_arrays(\n pa.array([0, 1, 1, 0]),\n pa.array([13.7, 11.0])\n )\n expected = pa.array([13.7, 11.0, 11.0, 13.7]).to_numpy()\n np.testing.assert_array_equal(afloat.to_numpy(zero_copy_only=True),\n expected)\n np.testing.assert_array_equal(afloat.to_numpy(zero_copy_only=False),\n expected)\n\n afloat2 = pa.DictionaryArray.from_arrays(\n pa.array([0, 1, None, 0]),\n pa.array([13.7, 11.0])\n )\n expected = pa.array(\n [13.7, 11.0, None, 13.7]\n ).to_numpy(zero_copy_only=False)\n np.testing.assert_allclose(\n afloat2.to_numpy(zero_copy_only=False),\n expected,\n equal_nan=True\n )\n\n # Testing for integers can reveal problems related to dealing\n # with None values, as a numpy array of int dtype\n # can't contain NaN nor None.\n aints = pa.DictionaryArray.from_arrays(\n pa.array([0, 1, None, 0]),\n pa.array([7, 11])\n )\n expected = pa.array([7, 11, None, 7]).to_numpy(zero_copy_only=False)\n np.testing.assert_allclose(\n aints.to_numpy(zero_copy_only=False),\n expected,\n equal_nan=True\n )\n\n\ndef test_dictionary_from_boxed_arrays():\n indices = np.repeat([0, 1, 2], 2)\n dictionary = np.array(['foo', 'bar', 'baz'], dtype=object)\n\n iarr = pa.array(indices)\n darr = pa.array(dictionary)\n\n d1 = pa.DictionaryArray.from_arrays(iarr, darr)\n\n assert d1.indices.to_pylist() == indices.tolist()\n assert d1.dictionary.to_pylist() == dictionary.tolist()\n\n for i in range(len(indices)):\n assert d1[i].as_py() == dictionary[indices[i]]\n\n\ndef test_dictionary_from_arrays_boundscheck():\n indices1 = pa.array([0, 1, 2, 0, 1, 2])\n indices2 = pa.array([0, -1, 2])\n indices3 = pa.array([0, 1, 2, 3])\n\n dictionary = pa.array(['foo', 'bar', 'baz'])\n\n # Works fine\n pa.DictionaryArray.from_arrays(indices1, dictionary)\n\n with pytest.raises(pa.ArrowException):\n pa.DictionaryArray.from_arrays(indices2, dictionary)\n\n with pytest.raises(pa.ArrowException):\n pa.DictionaryArray.from_arrays(indices3, dictionary)\n\n # If we are confident that the indices are \"safe\" we can pass safe=False to\n # disable the boundschecking\n pa.DictionaryArray.from_arrays(indices2, dictionary, safe=False)\n\n\ndef test_dictionary_indices():\n # https://issues.apache.org/jira/browse/ARROW-6882\n indices = pa.array([0, 1, 2, 0, 1, 2])\n dictionary = pa.array(['foo', 'bar', 'baz'])\n arr = pa.DictionaryArray.from_arrays(indices, dictionary)\n arr.indices.validate(full=True)\n\n\n@pytest.mark.parametrize(('list_array_type', 'list_type_factory'),\n [(pa.ListArray, pa.list_),\n (pa.LargeListArray, pa.large_list)])\ndef test_list_from_arrays(list_array_type, list_type_factory):\n offsets_arr = np.array([0, 2, 5, 8], dtype='i4')\n offsets = pa.array(offsets_arr, type='int32')\n pyvalues = [b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h']\n values = pa.array(pyvalues, type='binary')\n\n result = list_array_type.from_arrays(offsets, values)\n expected = pa.array([pyvalues[:2], pyvalues[2:5], pyvalues[5:8]],\n type=list_type_factory(pa.binary()))\n\n assert result.equals(expected)\n\n # With nulls\n offsets = [0, None, 2, 6]\n values = [b'a', b'b', b'c', b'd', b'e', b'f']\n\n result = list_array_type.from_arrays(offsets, values)\n expected = pa.array([values[:2], None, values[2:]],\n type=list_type_factory(pa.binary()))\n\n assert result.equals(expected)\n\n # Another edge case\n offsets2 = [0, 2, None, 6]\n result = list_array_type.from_arrays(offsets2, values)\n expected = pa.array([values[:2], values[2:], None],\n type=list_type_factory(pa.binary()))\n assert result.equals(expected)\n\n # raise on invalid array\n offsets = [1, 3, 10]\n values = np.arange(5)\n with pytest.raises(ValueError):\n list_array_type.from_arrays(offsets, values)\n\n # Non-monotonic offsets\n offsets = [0, 3, 2, 6]\n values = list(range(6))\n result = list_array_type.from_arrays(offsets, values)\n with pytest.raises(ValueError):\n result.validate(full=True)\n\n\ndef test_map_from_arrays():\n offsets_arr = np.array([0, 2, 5, 8], dtype='i4')\n offsets = pa.array(offsets_arr, type='int32')\n pykeys = [b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h']\n pyitems = list(range(len(pykeys)))\n pypairs = list(zip(pykeys, pyitems))\n pyentries = [pypairs[:2], pypairs[2:5], pypairs[5:8]]\n keys = pa.array(pykeys, type='binary')\n items = pa.array(pyitems, type='i4')\n\n result = pa.MapArray.from_arrays(offsets, keys, items)\n expected = pa.array(pyentries, type=pa.map_(pa.binary(), pa.int32()))\n\n assert result.equals(expected)\n\n # With nulls\n offsets = [0, None, 2, 6]\n pykeys = [b'a', b'b', b'c', b'd', b'e', b'f']\n pyitems = [1, 2, 3, None, 4, 5]\n pypairs = list(zip(pykeys, pyitems))\n pyentries = [pypairs[:2], None, pypairs[2:]]\n keys = pa.array(pykeys, type='binary')\n items = pa.array(pyitems, type='i4')\n\n result = pa.MapArray.from_arrays(offsets, keys, items)\n expected = pa.array(pyentries, type=pa.map_(pa.binary(), pa.int32()))\n\n assert result.equals(expected)\n\n # check invalid usage\n\n offsets = [0, 1, 3, 5]\n keys = np.arange(5)\n items = np.arange(5)\n _ = pa.MapArray.from_arrays(offsets, keys, items)\n\n # raise on invalid offsets\n with pytest.raises(ValueError):\n pa.MapArray.from_arrays(offsets + [6], keys, items)\n\n # raise on length of keys != items\n with pytest.raises(ValueError):\n pa.MapArray.from_arrays(offsets, keys, np.concatenate([items, items]))\n\n # raise on keys with null\n keys_with_null = list(keys)[:-1] + [None]\n assert len(keys_with_null) == len(items)\n with pytest.raises(ValueError):\n pa.MapArray.from_arrays(offsets, keys_with_null, items)\n\n\ndef test_fixed_size_list_from_arrays():\n values = pa.array(range(12), pa.int64())\n result = pa.FixedSizeListArray.from_arrays(values, 4)\n assert result.to_pylist() == [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]\n assert result.type.equals(pa.list_(pa.int64(), 4))\n\n # raise on invalid values / list_size\n with pytest.raises(ValueError):\n pa.FixedSizeListArray.from_arrays(values, -4)\n\n with pytest.raises(ValueError):\n # array with list size 0 cannot be constructed with from_arrays\n pa.FixedSizeListArray.from_arrays(pa.array([], pa.int64()), 0)\n\n with pytest.raises(ValueError):\n # length of values not multiple of 5\n pa.FixedSizeListArray.from_arrays(values, 5)\n\n\ndef test_variable_list_from_arrays():\n values = pa.array([1, 2, 3, 4], pa.int64())\n offsets = pa.array([0, 2, 4])\n result = pa.ListArray.from_arrays(offsets, values)\n assert result.to_pylist() == [[1, 2], [3, 4]]\n assert result.type.equals(pa.list_(pa.int64()))\n\n offsets = pa.array([0, None, 2, 4])\n result = pa.ListArray.from_arrays(offsets, values)\n assert result.to_pylist() == [[1, 2], None, [3, 4]]\n\n # raise if offset out of bounds\n with pytest.raises(ValueError):\n pa.ListArray.from_arrays(pa.array([-1, 2, 4]), values)\n\n with pytest.raises(ValueError):\n pa.ListArray.from_arrays(pa.array([0, 2, 5]), values)\n\n\ndef test_union_from_dense():\n binary = pa.array([b'a', b'b', b'c', b'd'], type='binary')\n int64 = pa.array([1, 2, 3], type='int64')\n types = pa.array([0, 1, 0, 0, 1, 1, 0], type='int8')\n logical_types = pa.array([11, 13, 11, 11, 13, 13, 11], type='int8')\n value_offsets = pa.array([0, 0, 1, 2, 1, 2, 3], type='int32')\n py_value = [b'a', 1, b'b', b'c', 2, 3, b'd']\n\n def check_result(result, expected_field_names, expected_type_codes,\n expected_type_code_values):\n result.validate(full=True)\n actual_field_names = [result.type[i].name\n for i in range(result.type.num_fields)]\n assert actual_field_names == expected_field_names\n assert result.type.mode == \"dense\"\n assert result.type.type_codes == expected_type_codes\n assert result.to_pylist() == py_value\n assert expected_type_code_values.equals(result.type_codes)\n assert value_offsets.equals(result.offsets)\n assert result.field(0).equals(binary)\n assert result.field(1).equals(int64)\n with pytest.raises(KeyError):\n result.field(-1)\n with pytest.raises(KeyError):\n result.field(2)\n\n # without field names and type codes\n check_result(pa.UnionArray.from_dense(types, value_offsets,\n [binary, int64]),\n expected_field_names=['0', '1'],\n expected_type_codes=[0, 1],\n expected_type_code_values=types)\n\n # with field names\n check_result(pa.UnionArray.from_dense(types, value_offsets,\n [binary, int64],\n ['bin', 'int']),\n expected_field_names=['bin', 'int'],\n expected_type_codes=[0, 1],\n expected_type_code_values=types)\n\n # with type codes\n check_result(pa.UnionArray.from_dense(logical_types, value_offsets,\n [binary, int64],\n type_codes=[11, 13]),\n expected_field_names=['0', '1'],\n expected_type_codes=[11, 13],\n expected_type_code_values=logical_types)\n\n # with field names and type codes\n check_result(pa.UnionArray.from_dense(logical_types, value_offsets,\n [binary, int64],\n ['bin', 'int'], [11, 13]),\n expected_field_names=['bin', 'int'],\n expected_type_codes=[11, 13],\n expected_type_code_values=logical_types)\n\n # Bad type ids\n arr = pa.UnionArray.from_dense(logical_types, value_offsets,\n [binary, int64])\n with pytest.raises(pa.ArrowInvalid):\n arr.validate(full=True)\n arr = pa.UnionArray.from_dense(types, value_offsets, [binary, int64],\n type_codes=[11, 13])\n with pytest.raises(pa.ArrowInvalid):\n arr.validate(full=True)\n\n # Offset larger than child size\n bad_offsets = pa.array([0, 0, 1, 2, 1, 2, 4], type='int32')\n arr = pa.UnionArray.from_dense(types, bad_offsets, [binary, int64])\n with pytest.raises(pa.ArrowInvalid):\n arr.validate(full=True)\n\n\ndef test_union_from_sparse():\n binary = pa.array([b'a', b' ', b'b', b'c', b' ', b' ', b'd'],\n type='binary')\n int64 = pa.array([0, 1, 0, 0, 2, 3, 0], type='int64')\n types = pa.array([0, 1, 0, 0, 1, 1, 0], type='int8')\n logical_types = pa.array([11, 13, 11, 11, 13, 13, 11], type='int8')\n py_value = [b'a', 1, b'b', b'c', 2, 3, b'd']\n\n def check_result(result, expected_field_names, expected_type_codes,\n expected_type_code_values):\n result.validate(full=True)\n assert result.to_pylist() == py_value\n actual_field_names = [result.type[i].name\n for i in range(result.type.num_fields)]\n assert actual_field_names == expected_field_names\n assert result.type.mode == \"sparse\"\n assert result.type.type_codes == expected_type_codes\n assert expected_type_code_values.equals(result.type_codes)\n assert result.field(0).equals(binary)\n assert result.field(1).equals(int64)\n with pytest.raises(pa.ArrowTypeError):\n result.offsets\n with pytest.raises(KeyError):\n result.field(-1)\n with pytest.raises(KeyError):\n result.field(2)\n\n # without field names and type codes\n check_result(pa.UnionArray.from_sparse(types, [binary, int64]),\n expected_field_names=['0', '1'],\n expected_type_codes=[0, 1],\n expected_type_code_values=types)\n\n # with field names\n check_result(pa.UnionArray.from_sparse(types, [binary, int64],\n ['bin', 'int']),\n expected_field_names=['bin', 'int'],\n expected_type_codes=[0, 1],\n expected_type_code_values=types)\n\n # with type codes\n check_result(pa.UnionArray.from_sparse(logical_types, [binary, int64],\n type_codes=[11, 13]),\n expected_field_names=['0', '1'],\n expected_type_codes=[11, 13],\n expected_type_code_values=logical_types)\n\n # with field names and type codes\n check_result(pa.UnionArray.from_sparse(logical_types, [binary, int64],\n ['bin', 'int'],\n [11, 13]),\n expected_field_names=['bin', 'int'],\n expected_type_codes=[11, 13],\n expected_type_code_values=logical_types)\n\n # Bad type ids\n arr = pa.UnionArray.from_sparse(logical_types, [binary, int64])\n with pytest.raises(pa.ArrowInvalid):\n arr.validate(full=True)\n arr = pa.UnionArray.from_sparse(types, [binary, int64],\n type_codes=[11, 13])\n with pytest.raises(pa.ArrowInvalid):\n arr.validate(full=True)\n\n # Invalid child length\n with pytest.raises(pa.ArrowInvalid):\n arr = pa.UnionArray.from_sparse(logical_types, [binary, int64[1:]])\n\n\ndef test_union_array_to_pylist_with_nulls():\n # ARROW-9556\n arr = pa.UnionArray.from_sparse(\n pa.array([0, 1, 0, 0, 1], type=pa.int8()),\n [\n pa.array([0.0, 1.1, None, 3.3, 4.4]),\n pa.array([True, None, False, True, False]),\n ]\n )\n assert arr.to_pylist() == [0.0, None, None, 3.3, False]\n\n arr = pa.UnionArray.from_dense(\n pa.array([0, 1, 0, 0, 0, 1, 1], type=pa.int8()),\n pa.array([0, 0, 1, 2, 3, 1, 2], type=pa.int32()),\n [\n pa.array([0.0, 1.1, None, 3.3]),\n pa.array([True, None, False])\n ]\n )\n assert arr.to_pylist() == [0.0, True, 1.1, None, 3.3, None, False]\n\n\ndef test_union_array_slice():\n # ARROW-2314\n arr = pa.UnionArray.from_sparse(pa.array([0, 0, 1, 1], type=pa.int8()),\n [pa.array([\"a\", \"b\", \"c\", \"d\"]),\n pa.array([1, 2, 3, 4])])\n assert arr[1:].to_pylist() == [\"b\", 3, 4]\n\n binary = pa.array([b'a', b'b', b'c', b'd'], type='binary')\n int64 = pa.array([1, 2, 3], type='int64')\n types = pa.array([0, 1, 0, 0, 1, 1, 0], type='int8')\n value_offsets = pa.array([0, 0, 2, 1, 1, 2, 3], type='int32')\n\n arr = pa.UnionArray.from_dense(types, value_offsets, [binary, int64])\n lst = arr.to_pylist()\n for i in range(len(arr)):\n for j in range(i, len(arr)):\n assert arr[i:j].to_pylist() == lst[i:j]\n\n\ndef _check_cast_case(case, *, safe=True, check_array_construction=True):\n in_data, in_type, out_data, out_type = case\n if isinstance(out_data, pa.Array):\n assert out_data.type == out_type\n expected = out_data\n else:\n expected = pa.array(out_data, type=out_type)\n\n # check casting an already created array\n if isinstance(in_data, pa.Array):\n assert in_data.type == in_type\n in_arr = in_data\n else:\n in_arr = pa.array(in_data, type=in_type)\n casted = in_arr.cast(out_type, safe=safe)\n casted.validate(full=True)\n assert casted.equals(expected)\n\n # constructing an array with out type which optionally involves casting\n # for more see ARROW-1949\n if check_array_construction:\n in_arr = pa.array(in_data, type=out_type, safe=safe)\n assert in_arr.equals(expected)\n\n\ndef test_cast_integers_safe():\n safe_cases = [\n (np.array([0, 1, 2, 3], dtype='i1'), 'int8',\n np.array([0, 1, 2, 3], dtype='i4'), pa.int32()),\n (np.array([0, 1, 2, 3], dtype='i1'), 'int8',\n np.array([0, 1, 2, 3], dtype='u4'), pa.uint16()),\n (np.array([0, 1, 2, 3], dtype='i1'), 'int8',\n np.array([0, 1, 2, 3], dtype='u1'), pa.uint8()),\n (np.array([0, 1, 2, 3], dtype='i1'), 'int8',\n np.array([0, 1, 2, 3], dtype='f8'), pa.float64())\n ]\n\n for case in safe_cases:\n _check_cast_case(case)\n\n unsafe_cases = [\n (np.array([50000], dtype='i4'), 'int32', 'int16'),\n (np.array([70000], dtype='i4'), 'int32', 'uint16'),\n (np.array([-1], dtype='i4'), 'int32', 'uint16'),\n (np.array([50000], dtype='u2'), 'uint16', 'int16')\n ]\n for in_data, in_type, out_type in unsafe_cases:\n in_arr = pa.array(in_data, type=in_type)\n\n with pytest.raises(pa.ArrowInvalid):\n in_arr.cast(out_type)\n\n\ndef test_cast_none():\n # ARROW-3735: Ensure that calling cast(None) doesn't segfault.\n arr = pa.array([1, 2, 3])\n\n with pytest.raises(ValueError):\n arr.cast(None)\n\n\ndef test_cast_list_to_primitive():\n # ARROW-8070: cast segfaults on unsupported cast from list<binary> to utf8\n arr = pa.array([[1, 2], [3, 4]])\n with pytest.raises(NotImplementedError):\n arr.cast(pa.int8())\n\n arr = pa.array([[b\"a\", b\"b\"], [b\"c\"]], pa.list_(pa.binary()))\n with pytest.raises(NotImplementedError):\n arr.cast(pa.binary())\n\n\ndef test_slice_chunked_array_zero_chunks():\n # ARROW-8911\n arr = pa.chunked_array([], type='int8')\n assert arr.num_chunks == 0\n\n result = arr[:]\n assert result.equals(arr)\n\n # Do not crash\n arr[:5]\n\n\ndef test_cast_chunked_array():\n arrays = [pa.array([1, 2, 3]), pa.array([4, 5, 6])]\n carr = pa.chunked_array(arrays)\n\n target = pa.float64()\n casted = carr.cast(target)\n expected = pa.chunked_array([x.cast(target) for x in arrays])\n assert casted.equals(expected)\n\n\ndef test_cast_chunked_array_empty():\n # ARROW-8142\n for typ1, typ2 in [(pa.dictionary(pa.int8(), pa.string()), pa.string()),\n (pa.int64(), pa.int32())]:\n\n arr = pa.chunked_array([], type=typ1)\n result = arr.cast(typ2)\n expected = pa.chunked_array([], type=typ2)\n assert result.equals(expected)\n\n\ndef test_chunked_array_data_warns():\n with pytest.warns(FutureWarning):\n res = pa.chunked_array([[]]).data\n assert isinstance(res, pa.ChunkedArray)\n\n\ndef test_cast_integers_unsafe():\n # We let NumPy do the unsafe casting\n unsafe_cases = [\n (np.array([50000], dtype='i4'), 'int32',\n np.array([50000], dtype='i2'), pa.int16()),\n (np.array([70000], dtype='i4'), 'int32',\n np.array([70000], dtype='u2'), pa.uint16()),\n (np.array([-1], dtype='i4'), 'int32',\n np.array([-1], dtype='u2'), pa.uint16()),\n (np.array([50000], dtype='u2'), pa.uint16(),\n np.array([50000], dtype='i2'), pa.int16())\n ]\n\n for case in unsafe_cases:\n _check_cast_case(case, safe=False)\n\n\ndef test_floating_point_truncate_safe():\n safe_cases = [\n (np.array([1.0, 2.0, 3.0], dtype='float32'), 'float32',\n np.array([1, 2, 3], dtype='i4'), pa.int32()),\n (np.array([1.0, 2.0, 3.0], dtype='float64'), 'float64',\n np.array([1, 2, 3], dtype='i4'), pa.int32()),\n (np.array([-10.0, 20.0, -30.0], dtype='float64'), 'float64',\n np.array([-10, 20, -30], dtype='i4'), pa.int32()),\n ]\n for case in safe_cases:\n _check_cast_case(case, safe=True)\n\n\ndef test_floating_point_truncate_unsafe():\n unsafe_cases = [\n (np.array([1.1, 2.2, 3.3], dtype='float32'), 'float32',\n np.array([1, 2, 3], dtype='i4'), pa.int32()),\n (np.array([1.1, 2.2, 3.3], dtype='float64'), 'float64',\n np.array([1, 2, 3], dtype='i4'), pa.int32()),\n (np.array([-10.1, 20.2, -30.3], dtype='float64'), 'float64',\n np.array([-10, 20, -30], dtype='i4'), pa.int32()),\n ]\n for case in unsafe_cases:\n # test safe casting raises\n with pytest.raises(pa.ArrowInvalid, match='truncated'):\n _check_cast_case(case, safe=True)\n\n # test unsafe casting truncates\n _check_cast_case(case, safe=False)\n\n\ndef test_decimal_to_int_safe():\n safe_cases = [\n (\n [decimal.Decimal(\"123456\"), None, decimal.Decimal(\"-912345\")],\n pa.decimal128(32, 5),\n [123456, None, -912345],\n pa.int32()\n ),\n (\n [decimal.Decimal(\"1234\"), None, decimal.Decimal(\"-9123\")],\n pa.decimal128(19, 10),\n [1234, None, -9123],\n pa.int16()\n ),\n (\n [decimal.Decimal(\"123\"), None, decimal.Decimal(\"-91\")],\n pa.decimal128(19, 10),\n [123, None, -91],\n pa.int8()\n ),\n ]\n for case in safe_cases:\n _check_cast_case(case)\n _check_cast_case(case, safe=True)\n\n\ndef test_decimal_to_int_value_out_of_bounds():\n out_of_bounds_cases = [\n (\n np.array([\n decimal.Decimal(\"1234567890123\"),\n None,\n decimal.Decimal(\"-912345678901234\")\n ]),\n pa.decimal128(32, 5),\n [1912276171, None, -135950322],\n pa.int32()\n ),\n (\n [decimal.Decimal(\"123456\"), None, decimal.Decimal(\"-912345678\")],\n pa.decimal128(32, 5),\n [-7616, None, -19022],\n pa.int16()\n ),\n (\n [decimal.Decimal(\"1234\"), None, decimal.Decimal(\"-9123\")],\n pa.decimal128(32, 5),\n [-46, None, 93],\n pa.int8()\n ),\n ]\n\n for case in out_of_bounds_cases:\n # test safe casting raises\n with pytest.raises(pa.ArrowInvalid,\n match='Integer value out of bounds'):\n _check_cast_case(case)\n\n # XXX `safe=False` can be ignored when constructing an array\n # from a sequence of Python objects (ARROW-8567)\n _check_cast_case(case, safe=False, check_array_construction=False)\n\n\ndef test_decimal_to_int_non_integer():\n non_integer_cases = [\n (\n [\n decimal.Decimal(\"123456.21\"),\n None,\n decimal.Decimal(\"-912345.13\")\n ],\n pa.decimal128(32, 5),\n [123456, None, -912345],\n pa.int32()\n ),\n (\n [decimal.Decimal(\"1234.134\"), None, decimal.Decimal(\"-9123.1\")],\n pa.decimal128(19, 10),\n [1234, None, -9123],\n pa.int16()\n ),\n (\n [decimal.Decimal(\"123.1451\"), None, decimal.Decimal(\"-91.21\")],\n pa.decimal128(19, 10),\n [123, None, -91],\n pa.int8()\n ),\n ]\n\n for case in non_integer_cases:\n # test safe casting raises\n msg_regexp = 'Rescaling Decimal128 value would cause data loss'\n with pytest.raises(pa.ArrowInvalid, match=msg_regexp):\n _check_cast_case(case)\n\n _check_cast_case(case, safe=False)\n\n\ndef test_decimal_to_decimal():\n arr = pa.array(\n [decimal.Decimal(\"1234.12\"), None],\n type=pa.decimal128(19, 10)\n )\n result = arr.cast(pa.decimal128(15, 6))\n expected = pa.array(\n [decimal.Decimal(\"1234.12\"), None],\n type=pa.decimal128(15, 6)\n )\n assert result.equals(expected)\n\n msg_regexp = 'Rescaling Decimal128 value would cause data loss'\n with pytest.raises(pa.ArrowInvalid, match=msg_regexp):\n result = arr.cast(pa.decimal128(9, 1))\n\n result = arr.cast(pa.decimal128(9, 1), safe=False)\n expected = pa.array(\n [decimal.Decimal(\"1234.1\"), None],\n type=pa.decimal128(9, 1)\n )\n assert result.equals(expected)\n\n with pytest.raises(pa.ArrowInvalid,\n match='Decimal value does not fit in precision'):\n result = arr.cast(pa.decimal128(5, 2))\n\n\ndef test_safe_cast_nan_to_int_raises():\n arr = pa.array([np.nan, 1.])\n\n with pytest.raises(pa.ArrowInvalid, match='truncated'):\n arr.cast(pa.int64(), safe=True)\n\n\ndef test_cast_signed_to_unsigned():\n safe_cases = [\n (np.array([0, 1, 2, 3], dtype='i1'), pa.uint8(),\n np.array([0, 1, 2, 3], dtype='u1'), pa.uint8()),\n (np.array([0, 1, 2, 3], dtype='i2'), pa.uint16(),\n np.array([0, 1, 2, 3], dtype='u2'), pa.uint16())\n ]\n\n for case in safe_cases:\n _check_cast_case(case)\n\n\ndef test_cast_from_null():\n in_data = [None] * 3\n in_type = pa.null()\n out_types = [\n pa.null(),\n pa.uint8(),\n pa.float16(),\n pa.utf8(),\n pa.binary(),\n pa.binary(10),\n pa.list_(pa.int16()),\n pa.list_(pa.int32(), 4),\n pa.large_list(pa.uint8()),\n pa.decimal128(19, 4),\n pa.timestamp('us'),\n pa.timestamp('us', tz='UTC'),\n pa.timestamp('us', tz='Europe/Paris'),\n pa.duration('us'),\n pa.struct([pa.field('a', pa.int32()),\n pa.field('b', pa.list_(pa.int8())),\n pa.field('c', pa.string())]),\n pa.dictionary(pa.int32(), pa.string()),\n ]\n for out_type in out_types:\n _check_cast_case((in_data, in_type, in_data, out_type))\n\n out_types = [\n\n pa.union([pa.field('a', pa.binary(10)),\n pa.field('b', pa.string())], mode=pa.lib.UnionMode_DENSE),\n pa.union([pa.field('a', pa.binary(10)),\n pa.field('b', pa.string())], mode=pa.lib.UnionMode_SPARSE),\n ]\n in_arr = pa.array(in_data, type=pa.null())\n for out_type in out_types:\n with pytest.raises(NotImplementedError):\n in_arr.cast(out_type)\n\n\ndef test_cast_string_to_number_roundtrip():\n cases = [\n (pa.array([\"1\", \"127\", \"-128\"]),\n pa.array([1, 127, -128], type=pa.int8())),\n (pa.array([None, \"18446744073709551615\"]),\n pa.array([None, 18446744073709551615], type=pa.uint64())),\n ]\n for in_arr, expected in cases:\n casted = in_arr.cast(expected.type, safe=True)\n casted.validate(full=True)\n assert casted.equals(expected)\n casted_back = casted.cast(in_arr.type, safe=True)\n casted_back.validate(full=True)\n assert casted_back.equals(in_arr)\n\n\ndef test_cast_dictionary():\n arr = pa.DictionaryArray.from_arrays(\n pa.array([0, 1, None], type=pa.int32()),\n pa.array([\"foo\", \"bar\"]))\n assert arr.cast(pa.string()).equals(pa.array([\"foo\", \"bar\", None]))\n with pytest.raises(pa.ArrowInvalid):\n # Shouldn't crash (ARROW-7077)\n arr.cast(pa.int32())\n\n\ndef test_view():\n # ARROW-5992\n arr = pa.array(['foo', 'bar', 'baz'], type=pa.utf8())\n expected = pa.array(['foo', 'bar', 'baz'], type=pa.binary())\n\n assert arr.view(pa.binary()).equals(expected)\n assert arr.view('binary').equals(expected)\n\n\ndef test_unique_simple():\n cases = [\n (pa.array([1, 2, 3, 1, 2, 3]), pa.array([1, 2, 3])),\n (pa.array(['foo', None, 'bar', 'foo']),\n pa.array(['foo', None, 'bar'])),\n (pa.array(['foo', None, 'bar', 'foo'], pa.large_binary()),\n pa.array(['foo', None, 'bar'], pa.large_binary())),\n ]\n for arr, expected in cases:\n result = arr.unique()\n assert result.equals(expected)\n result = pa.chunked_array([arr]).unique()\n assert result.equals(expected)\n\n\ndef test_value_counts_simple():\n cases = [\n (pa.array([1, 2, 3, 1, 2, 3]),\n pa.array([1, 2, 3]),\n pa.array([2, 2, 2], type=pa.int64())),\n (pa.array(['foo', None, 'bar', 'foo']),\n pa.array(['foo', None, 'bar']),\n pa.array([2, 1, 1], type=pa.int64())),\n (pa.array(['foo', None, 'bar', 'foo'], pa.large_binary()),\n pa.array(['foo', None, 'bar'], pa.large_binary()),\n pa.array([2, 1, 1], type=pa.int64())),\n ]\n for arr, expected_values, expected_counts in cases:\n for arr_in in (arr, pa.chunked_array([arr])):\n result = arr_in.value_counts()\n assert result.type.equals(\n pa.struct([pa.field(\"values\", arr.type),\n pa.field(\"counts\", pa.int64())]))\n assert result.field(\"values\").equals(expected_values)\n assert result.field(\"counts\").equals(expected_counts)\n\n\ndef test_unique_value_counts_dictionary_type():\n indices = pa.array([3, 0, 0, 0, 1, 1, 3, 0, 1, 3, 0, 1])\n dictionary = pa.array(['foo', 'bar', 'baz', 'qux'])\n\n arr = pa.DictionaryArray.from_arrays(indices, dictionary)\n\n unique_result = arr.unique()\n expected = pa.DictionaryArray.from_arrays(indices.unique(), dictionary)\n assert unique_result.equals(expected)\n\n result = arr.value_counts()\n result.field('values').equals(unique_result)\n result.field('counts').equals(pa.array([3, 5, 4], type='int64'))\n\n\ndef test_dictionary_encode_simple():\n cases = [\n (pa.array([1, 2, 3, None, 1, 2, 3]),\n pa.DictionaryArray.from_arrays(\n pa.array([0, 1, 2, None, 0, 1, 2], type='int32'),\n [1, 2, 3])),\n (pa.array(['foo', None, 'bar', 'foo']),\n pa.DictionaryArray.from_arrays(\n pa.array([0, None, 1, 0], type='int32'),\n ['foo', 'bar'])),\n (pa.array(['foo', None, 'bar', 'foo'], type=pa.large_binary()),\n pa.DictionaryArray.from_arrays(\n pa.array([0, None, 1, 0], type='int32'),\n pa.array(['foo', 'bar'], type=pa.large_binary()))),\n ]\n for arr, expected in cases:\n result = arr.dictionary_encode()\n assert result.equals(expected)\n result = pa.chunked_array([arr]).dictionary_encode()\n assert result.num_chunks == 1\n assert result.chunk(0).equals(expected)\n result = pa.chunked_array([], type=arr.type).dictionary_encode()\n assert result.num_chunks == 0\n assert result.type == expected.type\n\n\ndef test_dictionary_encode_sliced():\n cases = [\n (pa.array([1, 2, 3, None, 1, 2, 3])[1:-1],\n pa.DictionaryArray.from_arrays(\n pa.array([0, 1, None, 2, 0], type='int32'),\n [2, 3, 1])),\n (pa.array([None, 'foo', 'bar', 'foo', 'xyzzy'])[1:-1],\n pa.DictionaryArray.from_arrays(\n pa.array([0, 1, 0], type='int32'),\n ['foo', 'bar'])),\n (pa.array([None, 'foo', 'bar', 'foo', 'xyzzy'],\n type=pa.large_string())[1:-1],\n pa.DictionaryArray.from_arrays(\n pa.array([0, 1, 0], type='int32'),\n pa.array(['foo', 'bar'], type=pa.large_string()))),\n ]\n for arr, expected in cases:\n result = arr.dictionary_encode()\n assert result.equals(expected)\n result = pa.chunked_array([arr]).dictionary_encode()\n assert result.num_chunks == 1\n assert result.type == expected.type\n assert result.chunk(0).equals(expected)\n result = pa.chunked_array([], type=arr.type).dictionary_encode()\n assert result.num_chunks == 0\n assert result.type == expected.type\n\n # ARROW-9143 dictionary_encode after slice was segfaulting\n array = pa.array(['foo', 'bar', 'baz'])\n array.slice(1).dictionary_encode()\n\n\ndef test_dictionary_encode_zero_length():\n # User-facing experience of ARROW-7008\n arr = pa.array([], type=pa.string())\n encoded = arr.dictionary_encode()\n assert len(encoded.dictionary) == 0\n encoded.validate(full=True)\n\n\ndef test_dictionary_decode():\n cases = [\n (pa.array([1, 2, 3, None, 1, 2, 3]),\n pa.DictionaryArray.from_arrays(\n pa.array([0, 1, 2, None, 0, 1, 2], type='int32'),\n [1, 2, 3])),\n (pa.array(['foo', None, 'bar', 'foo']),\n pa.DictionaryArray.from_arrays(\n pa.array([0, None, 1, 0], type='int32'),\n ['foo', 'bar'])),\n (pa.array(['foo', None, 'bar', 'foo'], type=pa.large_binary()),\n pa.DictionaryArray.from_arrays(\n pa.array([0, None, 1, 0], type='int32'),\n pa.array(['foo', 'bar'], type=pa.large_binary()))),\n ]\n for expected, arr in cases:\n result = arr.dictionary_decode()\n assert result.equals(expected)\n\n\ndef test_cast_time32_to_int():\n arr = pa.array(np.array([0, 1, 2], dtype='int32'),\n type=pa.time32('s'))\n expected = pa.array([0, 1, 2], type='i4')\n\n result = arr.cast('i4')\n assert result.equals(expected)\n\n\ndef test_cast_time64_to_int():\n arr = pa.array(np.array([0, 1, 2], dtype='int64'),\n type=pa.time64('us'))\n expected = pa.array([0, 1, 2], type='i8')\n\n result = arr.cast('i8')\n assert result.equals(expected)\n\n\ndef test_cast_timestamp_to_int():\n arr = pa.array(np.array([0, 1, 2], dtype='int64'),\n type=pa.timestamp('us'))\n expected = pa.array([0, 1, 2], type='i8')\n\n result = arr.cast('i8')\n assert result.equals(expected)\n\n\ndef test_cast_date32_to_int():\n arr = pa.array([0, 1, 2], type='i4')\n\n result1 = arr.cast('date32')\n result2 = result1.cast('i4')\n\n expected1 = pa.array([\n datetime.date(1970, 1, 1),\n datetime.date(1970, 1, 2),\n datetime.date(1970, 1, 3)\n ]).cast('date32')\n\n assert result1.equals(expected1)\n assert result2.equals(arr)\n\n\ndef test_cast_duration_to_int():\n arr = pa.array(np.array([0, 1, 2], dtype='int64'),\n type=pa.duration('us'))\n expected = pa.array([0, 1, 2], type='i8')\n\n result = arr.cast('i8')\n assert result.equals(expected)\n\n\ndef test_cast_binary_to_utf8():\n binary_arr = pa.array([b'foo', b'bar', b'baz'], type=pa.binary())\n utf8_arr = binary_arr.cast(pa.utf8())\n expected = pa.array(['foo', 'bar', 'baz'], type=pa.utf8())\n\n assert utf8_arr.equals(expected)\n\n non_utf8_values = [('mañana').encode('utf-16-le')]\n non_utf8_binary = pa.array(non_utf8_values)\n assert non_utf8_binary.type == pa.binary()\n with pytest.raises(ValueError):\n non_utf8_binary.cast(pa.string())\n\n non_utf8_all_null = pa.array(non_utf8_values, mask=np.array([True]),\n type=pa.binary())\n # No error\n casted = non_utf8_all_null.cast(pa.string())\n assert casted.null_count == 1\n\n\ndef test_cast_date64_to_int():\n arr = pa.array(np.array([0, 1, 2], dtype='int64'),\n type=pa.date64())\n expected = pa.array([0, 1, 2], type='i8')\n\n result = arr.cast('i8')\n\n assert result.equals(expected)\n\n\ndef test_date64_from_builtin_datetime():\n val1 = datetime.datetime(2000, 1, 1, 12, 34, 56, 123456)\n val2 = datetime.datetime(2000, 1, 1)\n result = pa.array([val1, val2], type='date64')\n result2 = pa.array([val1.date(), val2.date()], type='date64')\n\n assert result.equals(result2)\n\n as_i8 = result.view('int64')\n assert as_i8[0].as_py() == as_i8[1].as_py()\n\n\n@pytest.mark.parametrize(('ty', 'values'), [\n ('bool', [True, False, True]),\n ('uint8', range(0, 255)),\n ('int8', range(0, 128)),\n ('uint16', range(0, 10)),\n ('int16', range(0, 10)),\n ('uint32', range(0, 10)),\n ('int32', range(0, 10)),\n ('uint64', range(0, 10)),\n ('int64', range(0, 10)),\n ('float', [0.0, 0.1, 0.2]),\n ('double', [0.0, 0.1, 0.2]),\n ('string', ['a', 'b', 'c']),\n ('binary', [b'a', b'b', b'c']),\n (pa.binary(3), [b'abc', b'bcd', b'cde'])\n])\ndef test_cast_identities(ty, values):\n arr = pa.array(values, type=ty)\n assert arr.cast(ty).equals(arr)\n\n\npickle_test_parametrize = pytest.mark.parametrize(\n ('data', 'typ'),\n [\n ([True, False, True, True], pa.bool_()),\n ([1, 2, 4, 6], pa.int64()),\n ([1.0, 2.5, None], pa.float64()),\n (['a', None, 'b'], pa.string()),\n ([], None),\n ([[1, 2], [3]], pa.list_(pa.int64())),\n ([[4, 5], [6]], pa.large_list(pa.int16())),\n ([['a'], None, ['b', 'c']], pa.list_(pa.string())),\n ([(1, 'a'), (2, 'c'), None],\n pa.struct([pa.field('a', pa.int64()), pa.field('b', pa.string())]))\n ]\n)\n\n\n@pickle_test_parametrize\ndef test_array_pickle(data, typ):\n # Allocate here so that we don't have any Arrow data allocated.\n # This is needed to ensure that allocator tests can be reliable.\n array = pa.array(data, type=typ)\n for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):\n result = pickle.loads(pickle.dumps(array, proto))\n assert array.equals(result)\n\n\ndef test_array_pickle_dictionary():\n # not included in the above as dictionary array cannot be created with\n # the pa.array function\n array = pa.DictionaryArray.from_arrays([0, 1, 2, 0, 1], ['a', 'b', 'c'])\n for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):\n result = pickle.loads(pickle.dumps(array, proto))\n assert array.equals(result)\n\n\n@h.given(\n past.arrays(\n past.all_types,\n size=st.integers(min_value=0, max_value=10)\n )\n)\ndef test_pickling(arr):\n data = pickle.dumps(arr)\n restored = pickle.loads(data)\n assert arr.equals(restored)\n\n\n@pickle_test_parametrize\ndef test_array_pickle5(data, typ):\n # Test zero-copy pickling with protocol 5 (PEP 574)\n picklemod = pickle5 or pickle\n if pickle5 is None and picklemod.HIGHEST_PROTOCOL < 5:\n pytest.skip(\"need pickle5 package or Python 3.8+\")\n\n array = pa.array(data, type=typ)\n addresses = [buf.address if buf is not None else 0\n for buf in array.buffers()]\n\n for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):\n buffers = []\n pickled = picklemod.dumps(array, proto, buffer_callback=buffers.append)\n result = picklemod.loads(pickled, buffers=buffers)\n assert array.equals(result)\n\n result_addresses = [buf.address if buf is not None else 0\n for buf in result.buffers()]\n assert result_addresses == addresses\n\n\n@pytest.mark.parametrize(\n 'narr',\n [\n np.arange(10, dtype=np.int64),\n np.arange(10, dtype=np.int32),\n np.arange(10, dtype=np.int16),\n np.arange(10, dtype=np.int8),\n np.arange(10, dtype=np.uint64),\n np.arange(10, dtype=np.uint32),\n np.arange(10, dtype=np.uint16),\n np.arange(10, dtype=np.uint8),\n np.arange(10, dtype=np.float64),\n np.arange(10, dtype=np.float32),\n np.arange(10, dtype=np.float16),\n ]\n)\ndef test_to_numpy_roundtrip(narr):\n arr = pa.array(narr)\n assert narr.dtype == arr.to_numpy().dtype\n np.testing.assert_array_equal(narr, arr.to_numpy())\n np.testing.assert_array_equal(narr[:6], arr[:6].to_numpy())\n np.testing.assert_array_equal(narr[2:], arr[2:].to_numpy())\n np.testing.assert_array_equal(narr[2:6], arr[2:6].to_numpy())\n\n\ndef test_array_uint64_from_py_over_range():\n arr = pa.array([2 ** 63], type=pa.uint64())\n expected = pa.array(np.array([2 ** 63], dtype='u8'))\n assert arr.equals(expected)\n\n\ndef test_array_conversions_no_sentinel_values():\n arr = np.array([1, 2, 3, 4], dtype='int8')\n refcount = sys.getrefcount(arr)\n arr2 = pa.array(arr) # noqa\n assert sys.getrefcount(arr) == (refcount + 1)\n\n assert arr2.type == 'int8'\n\n arr3 = pa.array(np.array([1, np.nan, 2, 3, np.nan, 4], dtype='float32'),\n type='float32')\n assert arr3.type == 'float32'\n assert arr3.null_count == 0\n\n\ndef test_time32_time64_from_integer():\n # ARROW-4111\n result = pa.array([1, 2, None], type=pa.time32('s'))\n expected = pa.array([datetime.time(second=1),\n datetime.time(second=2), None],\n type=pa.time32('s'))\n assert result.equals(expected)\n\n result = pa.array([1, 2, None], type=pa.time32('ms'))\n expected = pa.array([datetime.time(microsecond=1000),\n datetime.time(microsecond=2000), None],\n type=pa.time32('ms'))\n assert result.equals(expected)\n\n result = pa.array([1, 2, None], type=pa.time64('us'))\n expected = pa.array([datetime.time(microsecond=1),\n datetime.time(microsecond=2), None],\n type=pa.time64('us'))\n assert result.equals(expected)\n\n result = pa.array([1000, 2000, None], type=pa.time64('ns'))\n expected = pa.array([datetime.time(microsecond=1),\n datetime.time(microsecond=2), None],\n type=pa.time64('ns'))\n assert result.equals(expected)\n\n\ndef test_binary_string_pandas_null_sentinels():\n # ARROW-6227\n def _check_case(ty):\n arr = pa.array(['string', np.nan], type=ty, from_pandas=True)\n expected = pa.array(['string', None], type=ty)\n assert arr.equals(expected)\n _check_case('binary')\n _check_case('utf8')\n\n\ndef test_pandas_null_sentinels_raise_error():\n # ARROW-6227\n cases = [\n ([None, np.nan], 'null'),\n (['string', np.nan], 'binary'),\n (['string', np.nan], 'utf8'),\n (['string', np.nan], 'large_binary'),\n (['string', np.nan], 'large_utf8'),\n ([b'string', np.nan], pa.binary(6)),\n ([True, np.nan], pa.bool_()),\n ([decimal.Decimal('0'), np.nan], pa.decimal128(12, 2)),\n ([0, np.nan], pa.date32()),\n ([0, np.nan], pa.date32()),\n ([0, np.nan], pa.date64()),\n ([0, np.nan], pa.time32('s')),\n ([0, np.nan], pa.time64('us')),\n ([0, np.nan], pa.timestamp('us')),\n ([0, np.nan], pa.duration('us')),\n ]\n for case, ty in cases:\n # Both types of exceptions are raised. May want to clean that up\n with pytest.raises((ValueError, TypeError)):\n pa.array(case, type=ty)\n\n # from_pandas option suppresses failure\n result = pa.array(case, type=ty, from_pandas=True)\n assert result.null_count == (1 if ty != 'null' else 2)\n\n\n@pytest.mark.pandas\ndef test_pandas_null_sentinels_index():\n # ARROW-7023 - ensure that when passing a pandas Index, \"from_pandas\"\n # semantics are used\n import pandas as pd\n idx = pd.Index([1, 2, np.nan], dtype=object)\n result = pa.array(idx)\n expected = pa.array([1, 2, np.nan], from_pandas=True)\n assert result.equals(expected)\n\n\ndef test_array_from_numpy_datetimeD():\n arr = np.array([None, datetime.date(2017, 4, 4)], dtype='datetime64[D]')\n\n result = pa.array(arr)\n expected = pa.array([None, datetime.date(2017, 4, 4)], type=pa.date32())\n assert result.equals(expected)\n\n\ndef test_array_from_naive_datetimes():\n arr = pa.array([\n None,\n datetime.datetime(2017, 4, 4, 12, 11, 10),\n datetime.datetime(2018, 1, 1, 0, 2, 0)\n ])\n assert arr.type == pa.timestamp('us', tz=None)\n\n\n@pytest.mark.parametrize(('dtype', 'type'), [\n ('datetime64[s]', pa.timestamp('s')),\n ('datetime64[ms]', pa.timestamp('ms')),\n ('datetime64[us]', pa.timestamp('us')),\n ('datetime64[ns]', pa.timestamp('ns'))\n])\ndef test_array_from_numpy_datetime(dtype, type):\n data = [\n None,\n datetime.datetime(2017, 4, 4, 12, 11, 10),\n datetime.datetime(2018, 1, 1, 0, 2, 0)\n ]\n\n # from numpy array\n arr = pa.array(np.array(data, dtype=dtype))\n expected = pa.array(data, type=type)\n assert arr.equals(expected)\n\n # from list of numpy scalars\n arr = pa.array(list(np.array(data, dtype=dtype)))\n assert arr.equals(expected)\n\n\ndef test_array_from_different_numpy_datetime_units_raises():\n data = [\n None,\n datetime.datetime(2017, 4, 4, 12, 11, 10),\n datetime.datetime(2018, 1, 1, 0, 2, 0)\n ]\n s = np.array(data, dtype='datetime64[s]')\n ms = np.array(data, dtype='datetime64[ms]')\n data = list(s[:2]) + list(ms[2:])\n\n with pytest.raises(pa.ArrowNotImplementedError):\n pa.array(data)\n\n\n@pytest.mark.parametrize('unit', ['ns', 'us', 'ms', 's'])\ndef test_array_from_list_of_timestamps(unit):\n n = np.datetime64('NaT', unit)\n x = np.datetime64('2017-01-01 01:01:01.111111111', unit)\n y = np.datetime64('2018-11-22 12:24:48.111111111', unit)\n\n a1 = pa.array([n, x, y])\n a2 = pa.array([n, x, y], type=pa.timestamp(unit))\n\n assert a1.type == a2.type\n assert a1.type.unit == unit\n assert a1[0] == a2[0]\n\n\ndef test_array_from_timestamp_with_generic_unit():\n n = np.datetime64('NaT')\n x = np.datetime64('2017-01-01 01:01:01.111111111')\n y = np.datetime64('2018-11-22 12:24:48.111111111')\n\n with pytest.raises(pa.ArrowNotImplementedError,\n match='Unbound or generic datetime64 time unit'):\n pa.array([n, x, y])\n\n\n@pytest.mark.parametrize(('dtype', 'type'), [\n ('timedelta64[s]', pa.duration('s')),\n ('timedelta64[ms]', pa.duration('ms')),\n ('timedelta64[us]', pa.duration('us')),\n ('timedelta64[ns]', pa.duration('ns'))\n])\ndef test_array_from_numpy_timedelta(dtype, type):\n data = [\n None,\n datetime.timedelta(1),\n datetime.timedelta(0, 1)\n ]\n\n # from numpy array\n np_arr = np.array(data, dtype=dtype)\n arr = pa.array(np_arr)\n assert isinstance(arr, pa.DurationArray)\n assert arr.type == type\n expected = pa.array(data, type=type)\n assert arr.equals(expected)\n assert arr.to_pylist() == data\n\n # from list of numpy scalars\n arr = pa.array(list(np.array(data, dtype=dtype)))\n assert arr.equals(expected)\n assert arr.to_pylist() == data\n\n\ndef test_array_from_numpy_timedelta_incorrect_unit():\n # generic (no unit)\n td = np.timedelta64(1)\n\n for data in [[td], np.array([td])]:\n with pytest.raises(NotImplementedError):\n pa.array(data)\n\n # unsupported unit\n td = np.timedelta64(1, 'M')\n for data in [[td], np.array([td])]:\n with pytest.raises(NotImplementedError):\n pa.array(data)\n\n\ndef test_array_from_numpy_ascii():\n arr = np.array(['abcde', 'abc', ''], dtype='|S5')\n\n arrow_arr = pa.array(arr)\n assert arrow_arr.type == 'binary'\n expected = pa.array(['abcde', 'abc', ''], type='binary')\n assert arrow_arr.equals(expected)\n\n mask = np.array([False, True, False])\n arrow_arr = pa.array(arr, mask=mask)\n expected = pa.array(['abcde', None, ''], type='binary')\n assert arrow_arr.equals(expected)\n\n # Strided variant\n arr = np.array(['abcde', 'abc', ''] * 5, dtype='|S5')[::2]\n mask = np.array([False, True, False] * 5)[::2]\n arrow_arr = pa.array(arr, mask=mask)\n\n expected = pa.array(['abcde', '', None, 'abcde', '', None, 'abcde', ''],\n type='binary')\n assert arrow_arr.equals(expected)\n\n # 0 itemsize\n arr = np.array(['', '', ''], dtype='|S0')\n arrow_arr = pa.array(arr)\n expected = pa.array(['', '', ''], type='binary')\n assert arrow_arr.equals(expected)\n\n\ndef test_array_from_numpy_unicode():\n dtypes = ['<U5', '>U5']\n\n for dtype in dtypes:\n arr = np.array(['abcde', 'abc', ''], dtype=dtype)\n\n arrow_arr = pa.array(arr)\n assert arrow_arr.type == 'utf8'\n expected = pa.array(['abcde', 'abc', ''], type='utf8')\n assert arrow_arr.equals(expected)\n\n mask = np.array([False, True, False])\n arrow_arr = pa.array(arr, mask=mask)\n expected = pa.array(['abcde', None, ''], type='utf8')\n assert arrow_arr.equals(expected)\n\n # Strided variant\n arr = np.array(['abcde', 'abc', ''] * 5, dtype=dtype)[::2]\n mask = np.array([False, True, False] * 5)[::2]\n arrow_arr = pa.array(arr, mask=mask)\n\n expected = pa.array(['abcde', '', None, 'abcde', '', None,\n 'abcde', ''], type='utf8')\n assert arrow_arr.equals(expected)\n\n # 0 itemsize\n arr = np.array(['', '', ''], dtype='<U0')\n arrow_arr = pa.array(arr)\n expected = pa.array(['', '', ''], type='utf8')\n assert arrow_arr.equals(expected)\n\n\ndef test_array_string_from_non_string():\n # ARROW-5682 - when converting to string raise on non string-like dtype\n with pytest.raises(TypeError):\n pa.array(np.array([1, 2, 3]), type=pa.string())\n\n\ndef test_array_string_from_all_null():\n # ARROW-5682\n vals = np.array([None, None], dtype=object)\n arr = pa.array(vals, type=pa.string())\n assert arr.null_count == 2\n\n vals = np.array([np.nan, np.nan], dtype='float64')\n # by default raises, but accept as all-null when from_pandas=True\n with pytest.raises(TypeError):\n pa.array(vals, type=pa.string())\n arr = pa.array(vals, type=pa.string(), from_pandas=True)\n assert arr.null_count == 2\n\n\ndef test_array_from_masked():\n ma = np.ma.array([1, 2, 3, 4], dtype='int64',\n mask=[False, False, True, False])\n result = pa.array(ma)\n expected = pa.array([1, 2, None, 4], type='int64')\n assert expected.equals(result)\n\n with pytest.raises(ValueError, match=\"Cannot pass a numpy masked array\"):\n pa.array(ma, mask=np.array([True, False, False, False]))\n\n\ndef test_array_from_shrunken_masked():\n ma = np.ma.array([0], dtype='int64')\n result = pa.array(ma)\n expected = pa.array([0], type='int64')\n assert expected.equals(result)\n\n\ndef test_array_from_invalid_dim_raises():\n msg = \"only handle 1-dimensional arrays\"\n arr2d = np.array([[1, 2, 3], [4, 5, 6]])\n with pytest.raises(ValueError, match=msg):\n pa.array(arr2d)\n\n arr0d = np.array(0)\n with pytest.raises(ValueError, match=msg):\n pa.array(arr0d)\n\n\ndef test_array_from_strided_bool():\n # ARROW-6325\n arr = np.ones((3, 2), dtype=bool)\n result = pa.array(arr[:, 0])\n expected = pa.array([True, True, True])\n assert result.equals(expected)\n result = pa.array(arr[0, :])\n expected = pa.array([True, True])\n assert result.equals(expected)\n\n\ndef test_boolean_true_count_false_count():\n # ARROW-9145\n arr = pa.array([True, True, None, False, None, True] * 1000)\n assert arr.true_count == 3000\n assert arr.false_count == 1000\n\n\ndef test_buffers_primitive():\n a = pa.array([1, 2, None, 4], type=pa.int16())\n buffers = a.buffers()\n assert len(buffers) == 2\n null_bitmap = buffers[0].to_pybytes()\n assert 1 <= len(null_bitmap) <= 64 # XXX this is varying\n assert bytearray(null_bitmap)[0] == 0b00001011\n\n # Slicing does not affect the buffers but the offset\n a_sliced = a[1:]\n buffers = a_sliced.buffers()\n a_sliced.offset == 1\n assert len(buffers) == 2\n null_bitmap = buffers[0].to_pybytes()\n assert 1 <= len(null_bitmap) <= 64 # XXX this is varying\n assert bytearray(null_bitmap)[0] == 0b00001011\n\n assert struct.unpack('hhxxh', buffers[1].to_pybytes()) == (1, 2, 4)\n\n a = pa.array(np.int8([4, 5, 6]))\n buffers = a.buffers()\n assert len(buffers) == 2\n # No null bitmap from Numpy int array\n assert buffers[0] is None\n assert struct.unpack('3b', buffers[1].to_pybytes()) == (4, 5, 6)\n\n a = pa.array([b'foo!', None, b'bar!!'])\n buffers = a.buffers()\n assert len(buffers) == 3\n null_bitmap = buffers[0].to_pybytes()\n assert bytearray(null_bitmap)[0] == 0b00000101\n offsets = buffers[1].to_pybytes()\n assert struct.unpack('4i', offsets) == (0, 4, 4, 9)\n values = buffers[2].to_pybytes()\n assert values == b'foo!bar!!'\n\n\ndef test_buffers_nested():\n a = pa.array([[1, 2], None, [3, None, 4, 5]], type=pa.list_(pa.int64()))\n buffers = a.buffers()\n assert len(buffers) == 4\n # The parent buffers\n null_bitmap = buffers[0].to_pybytes()\n assert bytearray(null_bitmap)[0] == 0b00000101\n offsets = buffers[1].to_pybytes()\n assert struct.unpack('4i', offsets) == (0, 2, 2, 6)\n # The child buffers\n null_bitmap = buffers[2].to_pybytes()\n assert bytearray(null_bitmap)[0] == 0b00110111\n values = buffers[3].to_pybytes()\n assert struct.unpack('qqq8xqq', values) == (1, 2, 3, 4, 5)\n\n a = pa.array([(42, None), None, (None, 43)],\n type=pa.struct([pa.field('a', pa.int8()),\n pa.field('b', pa.int16())]))\n buffers = a.buffers()\n assert len(buffers) == 5\n # The parent buffer\n null_bitmap = buffers[0].to_pybytes()\n assert bytearray(null_bitmap)[0] == 0b00000101\n # The child buffers: 'a'\n null_bitmap = buffers[1].to_pybytes()\n assert bytearray(null_bitmap)[0] == 0b00000011\n values = buffers[2].to_pybytes()\n assert struct.unpack('bxx', values) == (42,)\n # The child buffers: 'b'\n null_bitmap = buffers[3].to_pybytes()\n assert bytearray(null_bitmap)[0] == 0b00000110\n values = buffers[4].to_pybytes()\n assert struct.unpack('4xh', values) == (43,)\n\n\ndef test_nbytes_sizeof():\n a = pa.array(np.array([4, 5, 6], dtype='int64'))\n assert a.nbytes == 8 * 3\n assert sys.getsizeof(a) >= object.__sizeof__(a) + a.nbytes\n a = pa.array([1, None, 3], type='int64')\n assert a.nbytes == 8*3 + 1\n assert sys.getsizeof(a) >= object.__sizeof__(a) + a.nbytes\n a = pa.array([[1, 2], None, [3, None, 4, 5]], type=pa.list_(pa.int64()))\n assert a.nbytes == 1 + 4 * 4 + 1 + 6 * 8\n assert sys.getsizeof(a) >= object.__sizeof__(a) + a.nbytes\n\n\ndef test_invalid_tensor_constructor_repr():\n # ARROW-2638: prevent calling extension class constructors directly\n with pytest.raises(TypeError):\n repr(pa.Tensor([1]))\n\n\ndef test_invalid_tensor_construction():\n with pytest.raises(TypeError):\n pa.Tensor()\n\n\n@pytest.mark.parametrize(('offset_type', 'list_type_factory'),\n [(pa.int32(), pa.list_), (pa.int64(), pa.large_list)])\ndef test_list_array_flatten(offset_type, list_type_factory):\n typ2 = list_type_factory(\n list_type_factory(\n pa.int64()\n )\n )\n arr2 = pa.array([\n None,\n [\n [1, None, 2],\n None,\n [3, 4]\n ],\n [],\n [\n [],\n [5, 6],\n None\n ],\n [\n [7, 8]\n ]\n ], type=typ2)\n offsets2 = pa.array([0, 0, 3, 3, 6, 7], type=offset_type)\n\n typ1 = list_type_factory(pa.int64())\n arr1 = pa.array([\n [1, None, 2],\n None,\n [3, 4],\n [],\n [5, 6],\n None,\n [7, 8]\n ], type=typ1)\n offsets1 = pa.array([0, 3, 3, 5, 5, 7, 7, 9], type=offset_type)\n\n arr0 = pa.array([\n 1, None, 2,\n 3, 4,\n 5, 6,\n 7, 8\n ], type=pa.int64())\n\n assert arr2.flatten().equals(arr1)\n assert arr2.offsets.equals(offsets2)\n assert arr2.values.equals(arr1)\n assert arr1.flatten().equals(arr0)\n assert arr1.offsets.equals(offsets1)\n assert arr1.values.equals(arr0)\n assert arr2.flatten().flatten().equals(arr0)\n assert arr2.values.values.equals(arr0)\n\n\n@pytest.mark.parametrize(('offset_type', 'list_type_factory'),\n [(pa.int32(), pa.list_), (pa.int64(), pa.large_list)])\ndef test_list_value_parent_indices(offset_type, list_type_factory):\n arr = pa.array(\n [\n [0, 1, 2],\n None,\n [],\n [3, 4]\n ], type=list_type_factory(pa.int32()))\n expected = pa.array([0, 0, 0, 3, 3], type=offset_type)\n assert arr.value_parent_indices().equals(expected)\n\n\n@pytest.mark.parametrize(('offset_type', 'list_type_factory'),\n [(pa.int32(), pa.list_), (pa.int64(), pa.large_list)])\ndef test_list_value_lengths(offset_type, list_type_factory):\n arr = pa.array(\n [\n [0, 1, 2],\n None,\n [],\n [3, 4]\n ], type=list_type_factory(pa.int32()))\n expected = pa.array([3, None, 0, 2], type=offset_type)\n assert arr.value_lengths().equals(expected)\n\n\n@pytest.mark.parametrize('list_type_factory', [pa.list_, pa.large_list])\ndef test_list_array_flatten_non_canonical(list_type_factory):\n # Non-canonical list array (null elements backed by non-empty sublists)\n typ = list_type_factory(pa.int64())\n arr = pa.array([[1], [2, 3], [4, 5, 6]], type=typ)\n buffers = arr.buffers()[:2]\n buffers[0] = pa.py_buffer(b\"\\x05\") # validity bitmap\n arr = arr.from_buffers(arr.type, len(arr), buffers, children=[arr.values])\n assert arr.to_pylist() == [[1], None, [4, 5, 6]]\n assert arr.offsets.to_pylist() == [0, 1, 3, 6]\n\n flattened = arr.flatten()\n flattened.validate(full=True)\n assert flattened.type == typ.value_type\n assert flattened.to_pylist() == [1, 4, 5, 6]\n\n # .values is the physical values array (including masked elements)\n assert arr.values.to_pylist() == [1, 2, 3, 4, 5, 6]\n\n\n@pytest.mark.parametrize('klass', [pa.ListArray, pa.LargeListArray])\ndef test_list_array_values_offsets_sliced(klass):\n # ARROW-7301\n arr = klass.from_arrays(offsets=[0, 3, 4, 6], values=[1, 2, 3, 4, 5, 6])\n assert arr.values.to_pylist() == [1, 2, 3, 4, 5, 6]\n assert arr.offsets.to_pylist() == [0, 3, 4, 6]\n\n # sliced -> values keeps referring to full values buffer, but offsets is\n # sliced as well so the offsets correctly point into the full values array\n # sliced -> flatten() will return the sliced value array.\n arr2 = arr[1:]\n assert arr2.values.to_pylist() == [1, 2, 3, 4, 5, 6]\n assert arr2.offsets.to_pylist() == [3, 4, 6]\n assert arr2.flatten().to_pylist() == [4, 5, 6]\n i = arr2.offsets[0].as_py()\n j = arr2.offsets[1].as_py()\n assert arr2[0].as_py() == arr2.values[i:j].to_pylist() == [4]\n\n\ndef test_fixed_size_list_array_flatten():\n typ2 = pa.list_(pa.list_(pa.int64(), 2), 3)\n arr2 = pa.array([\n [\n [1, 2],\n [3, 4],\n [5, 6],\n ],\n None,\n [\n [7, None],\n None,\n [8, 9]\n ],\n ], type=typ2)\n assert arr2.type.equals(typ2)\n\n typ1 = pa.list_(pa.int64(), 2)\n arr1 = pa.array([\n [1, 2], [3, 4], [5, 6],\n None, None, None,\n [7, None], None, [8, 9]\n ], type=typ1)\n assert arr1.type.equals(typ1)\n assert arr2.flatten().equals(arr1)\n\n typ0 = pa.int64()\n arr0 = pa.array([\n 1, 2, 3, 4, 5, 6,\n None, None, None, None, None, None,\n 7, None, None, None, 8, 9,\n ], type=typ0)\n assert arr0.type.equals(typ0)\n assert arr1.flatten().equals(arr0)\n assert arr2.flatten().flatten().equals(arr0)\n\n\ndef test_struct_array_flatten():\n ty = pa.struct([pa.field('x', pa.int16()),\n pa.field('y', pa.float32())])\n a = pa.array([(1, 2.5), (3, 4.5), (5, 6.5)], type=ty)\n xs, ys = a.flatten()\n assert xs.type == pa.int16()\n assert ys.type == pa.float32()\n assert xs.to_pylist() == [1, 3, 5]\n assert ys.to_pylist() == [2.5, 4.5, 6.5]\n xs, ys = a[1:].flatten()\n assert xs.to_pylist() == [3, 5]\n assert ys.to_pylist() == [4.5, 6.5]\n\n a = pa.array([(1, 2.5), None, (3, 4.5)], type=ty)\n xs, ys = a.flatten()\n assert xs.to_pylist() == [1, None, 3]\n assert ys.to_pylist() == [2.5, None, 4.5]\n xs, ys = a[1:].flatten()\n assert xs.to_pylist() == [None, 3]\n assert ys.to_pylist() == [None, 4.5]\n\n a = pa.array([(1, None), (2, 3.5), (None, 4.5)], type=ty)\n xs, ys = a.flatten()\n assert xs.to_pylist() == [1, 2, None]\n assert ys.to_pylist() == [None, 3.5, 4.5]\n xs, ys = a[1:].flatten()\n assert xs.to_pylist() == [2, None]\n assert ys.to_pylist() == [3.5, 4.5]\n\n a = pa.array([(1, None), None, (None, 2.5)], type=ty)\n xs, ys = a.flatten()\n assert xs.to_pylist() == [1, None, None]\n assert ys.to_pylist() == [None, None, 2.5]\n xs, ys = a[1:].flatten()\n assert xs.to_pylist() == [None, None]\n assert ys.to_pylist() == [None, 2.5]\n\n\ndef test_struct_array_field():\n ty = pa.struct([pa.field('x', pa.int16()),\n pa.field('y', pa.float32())])\n a = pa.array([(1, 2.5), (3, 4.5), (5, 6.5)], type=ty)\n\n x0 = a.field(0)\n y0 = a.field(1)\n x1 = a.field(-2)\n y1 = a.field(-1)\n x2 = a.field('x')\n y2 = a.field('y')\n\n assert isinstance(x0, pa.lib.Int16Array)\n assert isinstance(y1, pa.lib.FloatArray)\n assert x0.equals(pa.array([1, 3, 5], type=pa.int16()))\n assert y0.equals(pa.array([2.5, 4.5, 6.5], type=pa.float32()))\n assert x0.equals(x1)\n assert x0.equals(x2)\n assert y0.equals(y1)\n assert y0.equals(y2)\n\n for invalid_index in [None, pa.int16()]:\n with pytest.raises(TypeError):\n a.field(invalid_index)\n\n for invalid_index in [3, -3]:\n with pytest.raises(IndexError):\n a.field(invalid_index)\n\n for invalid_name in ['z', '']:\n with pytest.raises(KeyError):\n a.field(invalid_name)\n\n\ndef test_empty_cast():\n types = [\n pa.null(),\n pa.bool_(),\n pa.int8(),\n pa.int16(),\n pa.int32(),\n pa.int64(),\n pa.uint8(),\n pa.uint16(),\n pa.uint32(),\n pa.uint64(),\n pa.float16(),\n pa.float32(),\n pa.float64(),\n pa.date32(),\n pa.date64(),\n pa.binary(),\n pa.binary(length=4),\n pa.string(),\n ]\n\n for (t1, t2) in itertools.product(types, types):\n try:\n # ARROW-4766: Ensure that supported types conversion don't segfault\n # on empty arrays of common types\n pa.array([], type=t1).cast(t2)\n except (pa.lib.ArrowNotImplementedError, pa.ArrowInvalid):\n continue\n\n\ndef test_nested_dictionary_array():\n dict_arr = pa.DictionaryArray.from_arrays([0, 1, 0], ['a', 'b'])\n list_arr = pa.ListArray.from_arrays([0, 2, 3], dict_arr)\n assert list_arr.to_pylist() == [['a', 'b'], ['a']]\n\n dict_arr = pa.DictionaryArray.from_arrays([0, 1, 0], ['a', 'b'])\n dict_arr2 = pa.DictionaryArray.from_arrays([0, 1, 2, 1, 0], dict_arr)\n assert dict_arr2.to_pylist() == ['a', 'b', 'a', 'b', 'a']\n\n\ndef test_array_from_numpy_str_utf8():\n # ARROW-3890 -- in Python 3, NPY_UNICODE arrays are produced, but in Python\n # 2 they are NPY_STRING (binary), so we must do UTF-8 validation\n vec = np.array([\"toto\", \"tata\"])\n vec2 = np.array([\"toto\", \"tata\"], dtype=object)\n\n arr = pa.array(vec, pa.string())\n arr2 = pa.array(vec2, pa.string())\n expected = pa.array([\"toto\", \"tata\"])\n assert arr.equals(expected)\n assert arr2.equals(expected)\n\n # with mask, separate code path\n mask = np.array([False, False], dtype=bool)\n arr = pa.array(vec, pa.string(), mask=mask)\n assert arr.equals(expected)\n\n # UTF8 validation failures\n vec = np.array([('mañana').encode('utf-16-le')])\n with pytest.raises(ValueError):\n pa.array(vec, pa.string())\n\n with pytest.raises(ValueError):\n pa.array(vec, pa.string(), mask=np.array([False]))\n\n\n@pytest.mark.large_memory\ndef test_numpy_binary_overflow_to_chunked():\n # ARROW-3762, ARROW-5966\n\n # 2^31 + 1 bytes\n values = [b'x']\n unicode_values = ['x']\n\n # Make 10 unique 1MB strings then repeat then 2048 times\n unique_strings = {\n i: b'x' * ((1 << 20) - 1) + str(i % 10).encode('utf8')\n for i in range(10)\n }\n unicode_unique_strings = {i: x.decode('utf8')\n for i, x in unique_strings.items()}\n values += [unique_strings[i % 10] for i in range(1 << 11)]\n unicode_values += [unicode_unique_strings[i % 10]\n for i in range(1 << 11)]\n\n for case, ex_type in [(values, pa.binary()),\n (unicode_values, pa.utf8())]:\n arr = np.array(case)\n arrow_arr = pa.array(arr)\n arr = None\n\n assert isinstance(arrow_arr, pa.ChunkedArray)\n assert arrow_arr.type == ex_type\n\n # Split up into 16MB chunks. 128 * 16 = 2048, so 129\n assert arrow_arr.num_chunks == 129\n\n value_index = 0\n for i in range(arrow_arr.num_chunks):\n chunk = arrow_arr.chunk(i)\n for val in chunk:\n assert val.as_py() == case[value_index]\n value_index += 1\n\n\n@pytest.mark.large_memory\ndef test_list_child_overflow_to_chunked():\n kilobyte_string = 'x' * 1024\n two_mega = 2**21\n\n vals = [[kilobyte_string]] * (two_mega - 1)\n arr = pa.array(vals)\n assert isinstance(arr, pa.Array)\n assert len(arr) == two_mega - 1\n\n vals = [[kilobyte_string]] * two_mega\n arr = pa.array(vals)\n assert isinstance(arr, pa.ChunkedArray)\n assert len(arr) == two_mega\n assert len(arr.chunk(0)) == two_mega - 1\n assert len(arr.chunk(1)) == 1\n\n\ndef test_infer_type_masked():\n # ARROW-5208\n ty = pa.infer_type(['foo', 'bar', None, 2],\n mask=[False, False, False, True])\n assert ty == pa.utf8()\n\n # all masked\n ty = pa.infer_type(['foo', 'bar', None, 2],\n mask=np.array([True, True, True, True]))\n assert ty == pa.null()\n\n # length 0\n assert pa.infer_type([], mask=[]) == pa.null()\n\n\ndef test_array_masked():\n # ARROW-5208\n arr = pa.array([4, None, 4, 3.],\n mask=np.array([False, True, False, True]))\n assert arr.type == pa.int64()\n\n # ndarray dtype=object argument\n arr = pa.array(np.array([4, None, 4, 3.], dtype=\"O\"),\n mask=np.array([False, True, False, True]))\n assert arr.type == pa.int64()\n\n\ndef test_array_invalid_mask_raises():\n # ARROW-10742\n cases = [\n ([1, 2], np.array([False, False], dtype=\"O\"),\n pa.ArrowInvalid, \"must be boolean dtype\"),\n\n ([1, 2], np.array([[False], [False]]),\n pa.ArrowInvalid, \"must be 1D array\"),\n\n ([1, 2, 3], np.array([False, False]),\n pa.ArrowInvalid, \"different length\"),\n\n (np.array([1, 2]), np.array([False, False], dtype=\"O\"),\n TypeError, \"must be boolean dtype\"),\n\n (np.array([1, 2]), np.array([[False], [False]]),\n ValueError, \"must be 1D array\"),\n\n (np.array([1, 2, 3]), np.array([False, False]),\n ValueError, \"different length\"),\n ]\n for obj, mask, ex, msg in cases:\n with pytest.raises(ex, match=msg):\n pa.array(obj, mask=mask)\n\n\ndef test_array_from_large_pyints():\n # ARROW-5430\n with pytest.raises(OverflowError):\n # too large for int64 so dtype must be explicitly provided\n pa.array([int(2 ** 63)])\n\n\ndef test_array_protocol():\n\n class MyArray:\n def __init__(self, data):\n self.data = data\n\n def __arrow_array__(self, type=None):\n return pa.array(self.data, type=type)\n\n arr = MyArray(np.array([1, 2, 3], dtype='int64'))\n result = pa.array(arr)\n expected = pa.array([1, 2, 3], type=pa.int64())\n assert result.equals(expected)\n result = pa.array(arr, type=pa.int64())\n expected = pa.array([1, 2, 3], type=pa.int64())\n assert result.equals(expected)\n result = pa.array(arr, type=pa.float64())\n expected = pa.array([1, 2, 3], type=pa.float64())\n assert result.equals(expected)\n\n # raise error when passing size or mask keywords\n with pytest.raises(ValueError):\n pa.array(arr, mask=np.array([True, False, True]))\n with pytest.raises(ValueError):\n pa.array(arr, size=3)\n\n # ensure the return value is an Array\n class MyArrayInvalid:\n def __init__(self, data):\n self.data = data\n\n def __arrow_array__(self, type=None):\n return np.array(self.data)\n\n arr = MyArrayInvalid(np.array([1, 2, 3], dtype='int64'))\n with pytest.raises(TypeError):\n pa.array(arr)\n\n # ARROW-7066 - allow ChunkedArray output\n class MyArray2:\n def __init__(self, data):\n self.data = data\n\n def __arrow_array__(self, type=None):\n return pa.chunked_array([self.data], type=type)\n\n arr = MyArray2(np.array([1, 2, 3], dtype='int64'))\n result = pa.array(arr)\n expected = pa.chunked_array([[1, 2, 3]], type=pa.int64())\n assert result.equals(expected)\n\n\ndef test_concat_array():\n concatenated = pa.concat_arrays(\n [pa.array([1, 2]), pa.array([3, 4])])\n assert concatenated.equals(pa.array([1, 2, 3, 4]))\n\n\ndef test_concat_array_different_types():\n with pytest.raises(pa.ArrowInvalid):\n pa.concat_arrays([pa.array([1]), pa.array([2.])])\n\n\ndef test_concat_array_invalid_type():\n # ARROW-9920 - do not segfault on non-array input\n\n with pytest.raises(TypeError, match=\"should contain Array objects\"):\n pa.concat_arrays([None])\n\n arr = pa.chunked_array([[0, 1], [3, 4]])\n with pytest.raises(TypeError, match=\"should contain Array objects\"):\n pa.concat_arrays(arr)\n\n\n@pytest.mark.pandas\ndef test_to_pandas_timezone():\n # https://issues.apache.org/jira/browse/ARROW-6652\n arr = pa.array([1, 2, 3], type=pa.timestamp('s', tz='Europe/Brussels'))\n s = arr.to_pandas()\n assert s.dt.tz is not None\n arr = pa.chunked_array([arr])\n s = arr.to_pandas()\n assert s.dt.tz is not None\n"
] |
[
[
"numpy.isnan",
"numpy.arange",
"numpy.asarray",
"numpy.uint8",
"numpy.int8",
"pandas.Index",
"numpy.dtype",
"numpy.datetime64",
"numpy.testing.assert_array_equal",
"numpy.timedelta64",
"numpy.ones",
"numpy.int16",
"numpy.concatenate",
"numpy.int32",
"numpy.ma.array",
"numpy.repeat",
"numpy.array"
]
] |
SuperStar0907/lecam-gan
|
[
"e502c9b182345ddd03d29edda56b76caa7d8fb41"
] |
[
"third_party/inception_tf.py"
] |
[
"import fnmatch\nimport importlib\nimport inspect\nimport scipy\nimport numpy as np\nimport os\nimport shutil\nimport sys\nimport types\nimport io\nimport pickle\nimport re\nimport requests\nimport html\nimport hashlib\nimport glob\nimport uuid\nfrom typing import Any, List, Tuple, Union\nimport torch\n\nimport third_party.dnnlib.tflib\nfrom third_party import dnnlib\nfrom third_party import utils\n\n\ndef prepare_inception_metrics(dataset, parallel, config):\n dataset = dataset.strip('_hdf5')\n dnnlib.tflib.init_tf()\n inception_v3_features = dnnlib.util.load_pkl(\n 'http://d36zk2xti64re0.cloudfront.net/stylegan1/networks/metrics/inception_v3_features.pkl')\n inception_v3_softmax = dnnlib.util.load_pkl(\n 'http://d36zk2xti64re0.cloudfront.net/stylegan1/networks/metrics/inception_v3_softmax.pkl')\n try:\n mu_real, sigma_real = dnnlib.util.load_pkl(dataset + '_inception_moments.pkl')\n except:\n print('Calculating inception features for the training set...')\n loader = utils.get_data_loaders(\n **{**config, 'train': False, 'mirror_augment': False,\n 'use_multiepoch_sampler': False, 'load_in_mem': False, 'pin_memory': False})[0]\n pool = []\n num_gpus = torch.cuda.device_count()\n for images, _ in loader:\n images = ((images.numpy() * 0.5 + 0.5)\n * 255 + 0.5).astype(np.uint8)\n pool.append(inception_v3_features.run(images, clone_on_cpu=True,\n num_gpus=num_gpus, assume_frozen=True))\n pool = np.concatenate(pool)\n mu_real, sigma_real = np.mean(pool, axis=0), np.cov(pool, rowvar=False)\n dnnlib.util.save_pkl((mu_real, sigma_real), dataset + '_inception_moments.pkl')\n mu_real, sigma_real = dnnlib.util.load_pkl(dataset + '_inception_moments.pkl')\n\n def get_inception_metrics(sample, num_inception_images, num_splits=10, prints=True, use_torch=True):\n pool, logits = accumulate_inception_activations(\n sample, inception_v3_features, inception_v3_softmax, num_inception_images)\n IS_mean, IS_std = calculate_inception_score(logits, num_splits)\n mu_fake, sigma_fake = np.mean(pool, axis=0), np.cov(pool, rowvar=False)\n m = np.square(mu_fake - mu_real).sum()\n s, _ = scipy.linalg.sqrtm(\n np.dot(sigma_fake, sigma_real), disp=False) # pylint: disable=no-member\n dist = m + np.trace(sigma_fake + sigma_real - 2*s)\n FID = np.real(dist)\n return IS_mean, IS_std, FID\n return get_inception_metrics\n\n\ndef accumulate_inception_activations(sample, inception_v3_features, inception_v3_softmax, num_inception_images):\n pool, logits = [], []\n cnt = 0\n num_gpus = torch.cuda.device_count()\n while cnt < num_inception_images:\n images, _ = sample()\n images = ((images.cpu().numpy() * 0.5 + 0.5)\n * 255 + 0.5).astype(np.uint8)\n pool.append(inception_v3_features.run(images,\n num_gpus=num_gpus, assume_frozen=True))\n logits.append(inception_v3_softmax.run(images,\n num_gpus=num_gpus, assume_frozen=True))\n cnt += images.shape[0]\n return np.concatenate(pool), np.concatenate(logits, 0)\n\n\ndef calculate_inception_score(pred, num_splits=10):\n scores = []\n for index in range(num_splits):\n pred_chunk = pred[index * (pred.shape[0] // num_splits) : (index + 1) * (pred.shape[0] // num_splits), :]\n kl_inception = pred_chunk * \\\n (np.log(pred_chunk) - np.log(np.expand_dims(np.mean(pred_chunk, 0), 0)))\n kl_inception = np.mean(np.sum(kl_inception, 1))\n scores.append(np.exp(kl_inception))\n return np.mean(scores), np.std(scores)\n"
] |
[
[
"numpy.square",
"numpy.dot",
"numpy.log",
"numpy.sum",
"numpy.concatenate",
"numpy.std",
"numpy.real",
"numpy.mean",
"numpy.cov",
"torch.cuda.device_count",
"numpy.exp",
"numpy.trace"
]
] |
purusharths/NumCpp
|
[
"269b0ff1341b06480c9679285cb6c6c6843df06b"
] |
[
"test/pytest/test_filters.py"
] |
[
"import numpy as np\r\nimport scipy.ndimage.filters as filters\r\nimport os\r\nimport sys\r\nsys.path.append(os.path.abspath(r'../lib'))\r\nimport NumCppPy as NumCpp # noqa E402\r\n\r\n\r\nmodes = {'reflect': NumCpp.Mode.REFLECT,\r\n 'constant': NumCpp.Mode.CONSTANT,\r\n 'nearest': NumCpp.Mode.NEAREST,\r\n 'mirror': NumCpp.Mode.MIRROR,\r\n 'wrap': NumCpp.Mode.WRAP}\r\n\r\n\r\n####################################################################################\r\ndef test_seed():\r\n np.random.seed(4)\r\n\r\n\r\n####################################################################################\r\ndef test_complementaryMedianFilter1d():\r\n for mode in modes.keys():\r\n size = np.random.randint(1000, 2000, [1, ]).item()\r\n cShape = NumCpp.Shape(1, size)\r\n cArray = NumCpp.NdArray(cShape)\r\n data = np.random.randint(100, 1000, [size, ])\r\n cArray.setArray(data)\r\n kernalSize = 0\r\n while kernalSize % 2 == 0:\r\n kernalSize = np.random.randint(5, 15)\r\n constantValue = np.random.randint(0, 5, [1, ]).item() # only actaully needed for constant boundary condition\r\n dataOutC = NumCpp.complementaryMedianFilter1d(cArray,\r\n kernalSize,\r\n modes[mode],\r\n constantValue).getNumpyArray().flatten()\r\n dataOutPy = data - filters.generic_filter(data, np.median, footprint=np.ones([kernalSize, ]),\r\n mode=mode, cval=constantValue)\r\n assert np.array_equal(dataOutC, dataOutPy)\r\n\r\n\r\n####################################################################################\r\ndef test_convolve1d():\r\n for mode in modes.keys():\r\n size = np.random.randint(1000, 2000, [1, ]).item()\r\n cShape = NumCpp.Shape(1, size)\r\n cArray = NumCpp.NdArray(cShape)\r\n data = np.random.randint(100, 1000, [size, ]).astype(float)\r\n cArray.setArray(data)\r\n kernalSize = 0\r\n while kernalSize % 2 == 0:\r\n kernalSize = np.random.randint(5, 15)\r\n weights = np.random.randint(1, 5, [kernalSize, ])\r\n cWeights = NumCpp.NdArray(1, kernalSize)\r\n cWeights.setArray(weights)\r\n constantValue = np.random.randint(0, 5, [1, ]).item() # only actaully needed for constant boundary condition\r\n dataOutC = NumCpp.convolve1d(cArray, cWeights, modes[mode], constantValue).getNumpyArray().flatten()\r\n dataOutPy = filters.convolve(data, weights, mode=mode, cval=constantValue)\r\n assert np.array_equal(np.round(dataOutC, 8), np.round(dataOutPy, 8))\r\n\r\n\r\n####################################################################################\r\ndef test_gaussianFilter1d():\r\n for mode in modes.keys():\r\n size = np.random.randint(1000, 2000, [1, ]).item()\r\n cShape = NumCpp.Shape(1, size)\r\n cArray = NumCpp.NdArray(cShape)\r\n data = np.random.randint(100, 1000, [size, ]).astype(float)\r\n cArray.setArray(data)\r\n kernalSize = 0\r\n while kernalSize % 2 == 0:\r\n kernalSize = np.random.randint(5, 15)\r\n sigma = np.random.rand(1).item() * 2\r\n constantValue = np.random.randint(0, 5, [1, ]).item() # only actaully needed for constant boundary condition\r\n dataOutC = NumCpp.gaussianFilter1d(cArray, sigma, modes[mode], constantValue).getNumpyArray().flatten()\r\n dataOutPy = filters.gaussian_filter(data, sigma, mode=mode, cval=constantValue)\r\n assert np.array_equal(np.round(dataOutC, 7), np.round(dataOutPy, 7))\r\n\r\n\r\n####################################################################################\r\ndef test_maximumFilter1d():\r\n for mode in modes.keys():\r\n size = np.random.randint(1000, 2000, [1, ]).item()\r\n cShape = NumCpp.Shape(1, size)\r\n cArray = NumCpp.NdArray(cShape)\r\n data = np.random.randint(100, 1000, [size, ])\r\n cArray.setArray(data)\r\n kernalSize = 0\r\n while kernalSize % 2 == 0:\r\n kernalSize = np.random.randint(5, 15)\r\n constantValue = np.random.randint(0, 5, [1, ]).item() # only actaully needed for constant boundary condition\r\n dataOutC = NumCpp.maximumFilter1d(cArray, kernalSize, modes[mode], constantValue).getNumpyArray().flatten()\r\n dataOutPy = filters.generic_filter(data, np.max, footprint=np.ones([kernalSize, ]),\r\n mode=mode, cval=constantValue)\r\n assert np.array_equal(dataOutC, dataOutPy)\r\n\r\n\r\n####################################################################################\r\ndef test_medianFilter1d():\r\n for mode in modes.keys():\r\n size = np.random.randint(1000, 2000, [1, ]).item()\r\n cShape = NumCpp.Shape(1, size)\r\n cArray = NumCpp.NdArray(cShape)\r\n data = np.random.randint(100, 1000, [size, ])\r\n cArray.setArray(data)\r\n kernalSize = 0\r\n while kernalSize % 2 == 0:\r\n kernalSize = np.random.randint(5, 15)\r\n constantValue = np.random.randint(0, 5, [1, ]).item() # only actaully needed for constant boundary condition\r\n dataOutC = NumCpp.medianFilter1d(cArray, kernalSize, modes[mode], constantValue).getNumpyArray().flatten()\r\n dataOutPy = filters.generic_filter(data, np.median, footprint=np.ones([kernalSize, ]),\r\n mode=mode, cval=constantValue)\r\n assert np.array_equal(dataOutC, dataOutPy)\r\n\r\n\r\n####################################################################################\r\ndef test_minumumFilter1d():\r\n for mode in modes.keys():\r\n size = np.random.randint(1000, 2000, [1, ]).item()\r\n cShape = NumCpp.Shape(1, size)\r\n cArray = NumCpp.NdArray(cShape)\r\n data = np.random.randint(100, 1000, [size, ])\r\n cArray.setArray(data)\r\n kernalSize = 0\r\n while kernalSize % 2 == 0:\r\n kernalSize = np.random.randint(5, 15)\r\n constantValue = np.random.randint(0, 5, [1, ]).item() # only actaully needed for constant boundary condition\r\n dataOutC = NumCpp.minumumFilter1d(cArray, kernalSize, modes[mode], constantValue).getNumpyArray().flatten()\r\n dataOutPy = filters.generic_filter(data, np.min, footprint=np.ones([kernalSize, ]),\r\n mode=mode, cval=constantValue)\r\n assert np.array_equal(dataOutC, dataOutPy)\r\n\r\n\r\n####################################################################################\r\ndef test_percentileFilter1d():\r\n for mode in modes.keys():\r\n size = np.random.randint(1000, 2000, [1, ]).item()\r\n cShape = NumCpp.Shape(1, size)\r\n cArray = NumCpp.NdArray(cShape)\r\n data = np.random.randint(100, 1000, [size, ]).astype(float)\r\n cArray.setArray(data)\r\n kernalSize = 0\r\n while kernalSize % 2 == 0:\r\n kernalSize = np.random.randint(5, 15)\r\n percentile = np.random.randint(0, 101, [1, ]).item()\r\n constantValue = np.random.randint(0, 5, [1, ]).item() # only actaully needed for constant boundary condition\r\n dataOutC = NumCpp.percentileFilter1d(cArray,\r\n kernalSize,\r\n percentile,\r\n modes[mode],\r\n constantValue).getNumpyArray().flatten()\r\n dataOutPy = filters.generic_filter(data, np.percentile, footprint=np.ones([kernalSize, ]),\r\n mode=mode, cval=constantValue, extra_arguments=(percentile,))\r\n assert np.array_equal(np.round(dataOutC, 7), np.round(dataOutPy, 7))\r\n\r\n\r\n####################################################################################\r\ndef test_rankFilter1d():\r\n for mode in modes.keys():\r\n size = np.random.randint(1000, 2000, [1, ]).item()\r\n cShape = NumCpp.Shape(1, size)\r\n cArray = NumCpp.NdArray(cShape)\r\n data = np.random.randint(100, 1000, [size, ]).astype(float)\r\n cArray.setArray(data)\r\n kernalSize = 0\r\n while kernalSize % 2 == 0:\r\n kernalSize = np.random.randint(5, 15)\r\n rank = np.random.randint(0, kernalSize - 1, [1, ]).item()\r\n constantValue = np.random.randint(0, 5, [1, ]).item() # only actaully needed for constant boundary condition\r\n dataOutC = NumCpp.rankFilter1d(cArray, kernalSize, rank, modes[mode], constantValue).getNumpyArray().flatten()\r\n dataOutPy = filters.rank_filter(data, rank, footprint=np.ones([kernalSize, ]), mode=mode, cval=constantValue)\r\n assert np.array_equal(dataOutC, dataOutPy)\r\n\r\n\r\n####################################################################################\r\ndef test_uniformFilter1d():\r\n for mode in modes.keys():\r\n size = np.random.randint(1000, 2000, [1, ]).item()\r\n cShape = NumCpp.Shape(1, size)\r\n cArray = NumCpp.NdArray(cShape)\r\n data = np.random.randint(100, 1000, [size, ]).astype(float)\r\n cArray.setArray(data)\r\n kernalSize = 0\r\n while kernalSize % 2 == 0:\r\n kernalSize = np.random.randint(5, 15)\r\n constantValue = np.random.randint(0, 5, [1, ]).item() # only actaully needed for constant boundary condition\r\n dataOutC = NumCpp.uniformFilter1d(cArray, kernalSize, modes[mode], constantValue).getNumpyArray().flatten()\r\n dataOutPy = filters.generic_filter(data, np.mean, footprint=np.ones([kernalSize, ]),\r\n mode=mode, cval=constantValue)\r\n assert np.array_equal(dataOutC, dataOutPy)\r\n\r\n\r\n####################################################################################\r\ndef test_complementaryMedianFilter():\r\n for mode in modes.keys():\r\n shape = np.random.randint(100, 200, [2, ]).tolist()\r\n cShape = NumCpp.Shape(shape[0], shape[1]) # noqa\r\n cArray = NumCpp.NdArray(cShape)\r\n data = np.random.randint(100, 1000, shape) # noqa\r\n cArray.setArray(data)\r\n kernalSize = 0\r\n while kernalSize % 2 == 0:\r\n kernalSize = np.random.randint(5, 15)\r\n constantValue = np.random.randint(0, 5, [1, ]).item() # only actaully needed for constant boundary condition\r\n dataOutC = NumCpp.complementaryMedianFilter(cArray, kernalSize, modes[mode], constantValue).getNumpyArray()\r\n dataOutPy = data - filters.median_filter(data, size=kernalSize, mode=mode, cval=constantValue)\r\n assert np.array_equal(dataOutC, dataOutPy)\r\n\r\n\r\n####################################################################################\r\ndef test_convolve():\r\n for mode in modes.keys():\r\n shape = np.random.randint(100, 200, [2, ]).tolist()\r\n cShape = NumCpp.Shape(shape[0], shape[1]) # noqa\r\n cArray = NumCpp.NdArray(cShape)\r\n data = np.random.randint(10, 20, shape).astype(float) # noqa\r\n cArray.setArray(data)\r\n kernalSize = 0\r\n while kernalSize % 2 == 0:\r\n kernalSize = np.random.randint(5, 15)\r\n constantValue = np.random.randint(0, 5, [1, ]).item() # only actaully needed for constant boundary condition\r\n weights = np.random.randint(-2, 3, [kernalSize, kernalSize]).astype(float)\r\n cWeights = NumCpp.NdArray(kernalSize)\r\n cWeights.setArray(weights)\r\n dataOutC = NumCpp.convolve(cArray, kernalSize, cWeights, modes[mode], constantValue).getNumpyArray()\r\n dataOutPy = filters.convolve(data, weights, mode=mode, cval=constantValue)\r\n assert np.array_equal(dataOutC, dataOutPy)\r\n\r\n\r\n####################################################################################\r\ndef test_gaussianFilter():\r\n for mode in modes.keys():\r\n shape = np.random.randint(100, 200, [2, ]).tolist()\r\n cShape = NumCpp.Shape(shape[0], shape[1]) # noqa\r\n cArray = NumCpp.NdArray(cShape)\r\n data = np.random.randint(100, 1000, shape).astype(float) # noqa\r\n cArray.setArray(data)\r\n constantValue = np.random.randint(0, 5, [1, ]).item() # only actaully needed for constant boundary condition\r\n sigma = np.random.rand(1).item() * 2\r\n dataOutC = NumCpp.gaussianFilter(cArray, sigma, modes[mode], constantValue).getNumpyArray()\r\n dataOutPy = filters.gaussian_filter(data, sigma, mode=mode, cval=constantValue)\r\n assert np.array_equal(np.round(dataOutC, 2), np.round(dataOutPy, 2))\r\n\r\n\r\n####################################################################################\r\ndef test_laplaceFilter():\r\n for mode in modes.keys():\r\n shape = np.random.randint(100, 200, [2, ]).tolist()\r\n cShape = NumCpp.Shape(shape[0], shape[1]) # noqa\r\n cArray = NumCpp.NdArray(cShape)\r\n data = np.random.randint(100, 1000, shape).astype(float) # noqa\r\n cArray.setArray(data)\r\n constantValue = np.random.randint(0, 5, [1, ]).item() # only actaully needed for constant boundary condition\r\n dataOutC = NumCpp.laplaceFilter(cArray, modes[mode], constantValue).getNumpyArray()\r\n dataOutPy = filters.laplace(data, mode=mode, cval=constantValue)\r\n assert np.array_equal(dataOutC, dataOutPy)\r\n\r\n\r\n####################################################################################\r\ndef test_maximumFilter():\r\n for mode in modes.keys():\r\n shape = np.random.randint(100, 200, [2, ]).tolist()\r\n cShape = NumCpp.Shape(shape[0], shape[1]) # noqa\r\n cArray = NumCpp.NdArray(cShape)\r\n data = np.random.randint(100, 1000, shape) # noqa\r\n cArray.setArray(data)\r\n kernalSize = 0\r\n while kernalSize % 2 == 0:\r\n kernalSize = np.random.randint(5, 15)\r\n constantValue = np.random.randint(0, 5, [1, ]).item() # only actaully needed for constant boundary condition\r\n dataOutC = NumCpp.maximumFilter(cArray, kernalSize, modes[mode], constantValue).getNumpyArray()\r\n dataOutPy = filters.maximum_filter(data, size=kernalSize, mode=mode, cval=constantValue)\r\n assert np.array_equal(dataOutC, dataOutPy)\r\n\r\n\r\n####################################################################################\r\ndef test_median_filter():\r\n for mode in modes.keys():\r\n shape = np.random.randint(100, 200, [2, ]).tolist()\r\n cShape = NumCpp.Shape(shape[0], shape[1]) # noqa\r\n cArray = NumCpp.NdArray(cShape)\r\n data = np.random.randint(100, 1000, shape) # noqa\r\n cArray.setArray(data)\r\n kernalSize = 0\r\n while kernalSize % 2 == 0:\r\n kernalSize = np.random.randint(5, 15)\r\n constantValue = np.random.randint(0, 5, [1, ]).item() # only actaully needed for constant boundary condition\r\n dataOutC = NumCpp.medianFilter(cArray, kernalSize, modes[mode], constantValue).getNumpyArray()\r\n dataOutPy = filters.median_filter(data, size=kernalSize, mode=mode, cval=constantValue)\r\n assert np.array_equal(dataOutC, dataOutPy)\r\n\r\n\r\n####################################################################################\r\ndef test_minimum_filter():\r\n for mode in modes.keys():\r\n shape = np.random.randint(100, 200, [2, ]).tolist()\r\n cShape = NumCpp.Shape(shape[0], shape[1]) # noqa\r\n cArray = NumCpp.NdArray(cShape)\r\n data = np.random.randint(100, 1000, shape) # noqa\r\n cArray.setArray(data)\r\n kernalSize = 0\r\n while kernalSize % 2 == 0:\r\n kernalSize = np.random.randint(5, 15)\r\n constantValue = np.random.randint(0, 5, [1, ]).item() # only actaully needed for constant boundary condition\r\n dataOutC = NumCpp.minimumFilter(cArray, kernalSize, modes[mode], constantValue).getNumpyArray()\r\n dataOutPy = filters.minimum_filter(data, size=kernalSize, mode=mode, cval=constantValue)\r\n assert np.array_equal(dataOutC, dataOutPy)\r\n\r\n\r\n####################################################################################\r\ndef test_percentileFilter():\r\n for mode in modes.keys():\r\n shape = np.random.randint(100, 200, [2, ]).tolist()\r\n cShape = NumCpp.Shape(shape[0], shape[1]) # noqa\r\n cArray = NumCpp.NdArray(cShape)\r\n data = np.random.randint(100, 1000, shape) # noqa\r\n cArray.setArray(data)\r\n kernalSize = 0\r\n while kernalSize % 2 == 0:\r\n kernalSize = np.random.randint(5, 15)\r\n percentile = np.random.randint(0, 101, [1, ]).item()\r\n constantValue = np.random.randint(0, 5, [1, ]).item() # only actaully needed for constant boundary condition\r\n dataOutC = NumCpp.percentileFilter(cArray, kernalSize, percentile, modes[mode], constantValue).getNumpyArray()\r\n dataOutPy = filters.percentile_filter(data, percentile, size=kernalSize, mode=mode, cval=constantValue)\r\n assert np.array_equal(dataOutC, dataOutPy)\r\n\r\n\r\n####################################################################################\r\ndef test_rankFilter():\r\n for mode in modes.keys():\r\n shape = np.random.randint(100, 200, [2, ]).tolist()\r\n cShape = NumCpp.Shape(shape[0], shape[1]) # noqa\r\n cArray = NumCpp.NdArray(cShape)\r\n data = np.random.randint(100, 1000, shape) # noqa\r\n cArray.setArray(data)\r\n kernalSize = 0\r\n while kernalSize % 2 == 0:\r\n kernalSize = np.random.randint(5, 15)\r\n rank = np.random.randint(0, kernalSize**2 - 1, [1, ]).item()\r\n constantValue = np.random.randint(0, 5, [1, ]).item() # only actaully needed for constant boundary condition\r\n dataOutC = NumCpp.rankFilter(cArray, kernalSize, rank, modes[mode], constantValue).getNumpyArray()\r\n dataOutPy = filters.rank_filter(data, rank, size=kernalSize, mode=mode, cval=constantValue)\r\n assert np.array_equal(dataOutC, dataOutPy)\r\n\r\n\r\n####################################################################################\r\ndef test_uniform_filter():\r\n for mode in modes.keys():\r\n shape = np.random.randint(100, 200, [2, ]).tolist()\r\n cShape = NumCpp.Shape(shape[0], shape[1]) # noqa\r\n cArray = NumCpp.NdArray(cShape)\r\n data = np.random.randint(100, 1000, shape).astype(float) # noqa\r\n cArray.setArray(data)\r\n kernalSize = 0\r\n while kernalSize % 2 == 0:\r\n kernalSize = np.random.randint(5, 15)\r\n constantValue = np.random.randint(0, 5, [1, ]).item() # only actaully needed for constant boundary condition\r\n dataOutC = NumCpp.uniformFilter(cArray, kernalSize, modes[mode], constantValue).getNumpyArray()\r\n dataOutPy = filters.uniform_filter(data, size=kernalSize, mode=mode, cval=constantValue)\r\n assert np.array_equal(np.round(dataOutC, 8), np.round(dataOutPy, 8))\r\n"
] |
[
[
"scipy.ndimage.filters.laplace",
"scipy.ndimage.filters.maximum_filter",
"scipy.ndimage.filters.uniform_filter",
"numpy.array_equal",
"numpy.random.seed",
"numpy.ones",
"numpy.round",
"scipy.ndimage.filters.gaussian_filter",
"scipy.ndimage.filters.percentile_filter",
"numpy.random.rand",
"scipy.ndimage.filters.minimum_filter",
"scipy.ndimage.filters.convolve",
"scipy.ndimage.filters.median_filter",
"scipy.ndimage.filters.rank_filter",
"numpy.random.randint"
]
] |
hanyas/sds
|
[
"3c195fb9cbd88a9284287d62c0eacb6afc4598a7"
] |
[
"examples/pendulum/sac_policy.py"
] |
[
"import numpy as np\n\nimport gym\nimport sds\n\nfrom stable_baselines import SAC\nfrom stable_baselines.sac.policies import MlpPolicy\nfrom stable_baselines.common.vec_env import DummyVecEnv\n\n\nenv = gym.make('Pendulum-ID-v1')\nenv._max_episode_steps = 200\nenv.unwrapped.dt = 0.02\nenv.unwrapped.sigma = 1e-4\nenv.unwrapped.uniform = True\n\nulim = env.action_space.high\n\ndm_obs = env.observation_space.shape[0]\ndm_act = env.action_space.shape[0]\n\nenv = DummyVecEnv([lambda: env])\n\nmodel = SAC(MlpPolicy, env,\n gamma=0.99, verbose=1,\n learning_rate=1e-3,\n policy_kwargs={'layers': [64, 64],\n 'reg_weight': 1e-32})\n\nmodel.learn(total_timesteps=100000, log_interval=10)\n\n\nobs, act = [], []\nnb_rollouts, nb_steps = 25, 200\nfor n in range(nb_rollouts):\n _obs = np.empty((nb_steps, dm_obs))\n _act = np.empty((nb_steps, dm_act))\n\n x = env.reset()\n for t in range(nb_steps):\n u, _ = model.predict(x)\n _obs[t, :], _act[t, :] = x, u\n u = np.clip(u, -ulim, ulim)\n x, r, _, _ = env.step(u)\n\n obs.append(_obs)\n act.append(_act)\n\n\nimport matplotlib.pyplot as plt\n\nfig, ax = plt.subplots(nrows=1, ncols=dm_obs + dm_act, figsize=(12, 4))\nfor _obs, _act in zip(obs, act):\n for k, col in enumerate(ax[:-1]):\n col.plot(_obs[:, k])\n ax[-1].plot(_act)\nplt.show()\n\n# # save ctl\n# model.save(\"sac_pendulum\")\n"
] |
[
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots",
"numpy.empty",
"numpy.clip"
]
] |
Titorat/snps
|
[
"406d2b93303901c81639ccb1734119e0896958ac"
] |
[
"tests/__init__.py"
] |
[
"\"\"\"\nBSD 3-Clause License\n\nCopyright (c) 2019, Andrew Riha\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\n\nimport os\nimport shutil\nimport tempfile\nfrom unittest import TestCase\nfrom unittest.mock import Mock, patch\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import is_object_dtype, is_unsigned_integer_dtype\n\nfrom snps import SNPs\nfrom snps.utils import gzip_file, zip_file\n\n\nclass BaseSNPsTestCase(TestCase):\n def simulate_snps(\n self,\n chrom=\"1\",\n pos_start=1,\n pos_max=248140902,\n pos_step=100,\n genotype=\"AA\",\n insert_nulls=True,\n null_snp_step=101,\n complement_genotype_one_chrom=False,\n complement_genotype_two_chroms=False,\n complement_snp_step=50,\n ):\n s = SNPs()\n\n s._build = 37\n\n positions = np.arange(pos_start, pos_max, pos_step, dtype=np.uint32)\n snps = pd.DataFrame(\n {\"chrom\": chrom},\n index=pd.Index(\n [\"rs\" + str(x + 1) for x in range(len(positions))], name=\"rsid\"\n ),\n )\n snps[\"pos\"] = positions\n snps[\"genotype\"] = genotype\n\n if insert_nulls:\n snps.loc[snps.iloc[0::null_snp_step, :].index, \"genotype\"] = np.nan\n\n indices = snps.iloc[0::complement_snp_step, :].index\n if complement_genotype_two_chroms:\n snps.loc[indices, \"genotype\"] = snps.loc[indices, \"genotype\"].apply(\n self.complement_two_chroms\n )\n elif complement_genotype_one_chrom:\n snps.loc[indices, \"genotype\"] = snps.loc[indices, \"genotype\"].apply(\n self.complement_one_chrom\n )\n\n s._snps = snps\n\n return s\n\n @property\n def downloads_enabled(self):\n \"\"\"Property indicating if downloads are enabled.\n\n Only download from external resources when an environment variable named\n \"DOWNLOADS_ENABLED\" is set to \"true\".\n\n Returns\n -------\n bool\n \"\"\"\n return True if os.getenv(\"DOWNLOADS_ENABLED\") == \"true\" else False\n\n @staticmethod\n def get_complement(base):\n if base == \"A\":\n return \"T\"\n elif base == \"G\":\n return \"C\"\n elif base == \"C\":\n return \"G\"\n elif base == \"T\":\n return \"A\"\n else:\n return base\n\n def complement_one_chrom(self, genotype):\n if pd.isnull(genotype):\n return np.nan\n\n complement = \"\"\n\n for base in list(genotype):\n complement += self.get_complement(base)\n complement += genotype[1]\n return complement\n\n def complement_two_chroms(self, genotype):\n if pd.isnull(genotype):\n return np.nan\n\n complement = \"\"\n\n for base in list(genotype):\n complement += self.get_complement(base)\n\n return complement\n\n @staticmethod\n def create_snp_df(rsid, chrom, pos, genotype):\n df = pd.DataFrame(\n {\"rsid\": rsid, \"chrom\": chrom, \"pos\": pos, \"genotype\": genotype},\n columns=[\"rsid\", \"chrom\", \"pos\", \"genotype\"],\n )\n df.rsid = df.rsid.astype(object)\n df.chrom = df.chrom.astype(object)\n df.pos = df.pos.astype(np.uint32)\n df.genotype = df.genotype.astype(object)\n df = df.set_index(\"rsid\")\n return df\n\n def load_assign_PAR_SNPs(self, path):\n \"\"\"Load and assign PAR SNPs.\n\n If downloads are not enabled, use a minimal subset of the real responses.\n\n Parameters\n ----------\n path : str\n\n Returns\n -------\n SNPs\n\n References\n ----------\n 1. National Center for Biotechnology Information, Variation Services, RefSNP,\n https://api.ncbi.nlm.nih.gov/variation/v0/\n 2. Yates et. al. (doi:10.1093/bioinformatics/btu613),\n `<http://europepmc.org/search/?query=DOI:10.1093/bioinformatics/btu613>`_\n 3. Zerbino et. al. (doi.org/10.1093/nar/gkx1098), https://doi.org/10.1093/nar/gkx1098\n 4. Sherry ST, Ward MH, Kholodov M, Baker J, Phan L, Smigielski EM, Sirotkin K.\n dbSNP: the NCBI database of genetic variation. Nucleic Acids Res. 2001 Jan 1;\n 29(1):308-11.\n 5. Database of Single Nucleotide Polymorphisms (dbSNP). Bethesda (MD): National Center\n for Biotechnology Information, National Library of Medicine. dbSNP accession:\n rs28736870, rs113313554, rs758419898, and rs113378274 (dbSNP Build ID: 151).\n Available from: http://www.ncbi.nlm.nih.gov/SNP/\n \"\"\"\n effects = [\n {\n \"refsnp_id\": \"758419898\",\n \"create_date\": \"2015-04-1T22:25Z\",\n \"last_update_date\": \"2019-07-14T04:19Z\",\n \"last_update_build_id\": \"153\",\n \"primary_snapshot_data\": {\n \"placements_with_allele\": [\n {\n \"seq_id\": \"NC_000024.9\",\n \"placement_annot\": {\n \"seq_id_traits_by_assembly\": [\n {\"assembly_name\": \"GRCh37.p13\"}\n ]\n },\n \"alleles\": [\n {\n \"allele\": {\n \"spdi\": {\n \"seq_id\": \"NC_000024.9\",\n \"position\": 7364103,\n }\n }\n }\n ],\n }\n ]\n },\n },\n {\n \"refsnp_id\": \"28736870\",\n \"create_date\": \"2005-05-24T14:43Z\",\n \"last_update_date\": \"2019-07-14T04:18Z\",\n \"last_update_build_id\": \"153\",\n \"primary_snapshot_data\": {\n \"placements_with_allele\": [\n {\n \"seq_id\": \"NC_000023.10\",\n \"placement_annot\": {\n \"seq_id_traits_by_assembly\": [\n {\"assembly_name\": \"GRCh37.p13\"}\n ]\n },\n \"alleles\": [\n {\n \"allele\": {\n \"spdi\": {\n \"seq_id\": \"NC_000023.10\",\n \"position\": 220769,\n }\n }\n }\n ],\n }\n ]\n },\n },\n {\n \"refsnp_id\": \"113313554\",\n \"create_date\": \"2010-07-4T18:13Z\",\n \"last_update_date\": \"2019-07-14T04:18Z\",\n \"last_update_build_id\": \"153\",\n \"primary_snapshot_data\": {\n \"placements_with_allele\": [\n {\n \"seq_id\": \"NC_000024.9\",\n \"placement_annot\": {\n \"seq_id_traits_by_assembly\": [\n {\"assembly_name\": \"GRCh37.p13\"}\n ]\n },\n \"alleles\": [\n {\n \"allele\": {\n \"spdi\": {\n \"seq_id\": \"NC_000024.9\",\n \"position\": 535257,\n }\n }\n }\n ],\n }\n ]\n },\n },\n {\n \"refsnp_id\": \"113378274\",\n \"create_date\": \"2010-07-4T18:14Z\",\n \"last_update_date\": \"2016-03-3T10:51Z\",\n \"last_update_build_id\": \"147\",\n \"merged_snapshot_data\": {\"merged_into\": [\"72608386\"]},\n },\n {\n \"refsnp_id\": \"72608386\",\n \"create_date\": \"2009-02-14T01:08Z\",\n \"last_update_date\": \"2019-07-14T04:05Z\",\n \"last_update_build_id\": \"153\",\n \"primary_snapshot_data\": {\n \"placements_with_allele\": [\n {\n \"seq_id\": \"NC_000023.10\",\n \"placement_annot\": {\n \"seq_id_traits_by_assembly\": [\n {\"assembly_name\": \"GRCh37.p13\"}\n ]\n },\n \"alleles\": [\n {\n \"allele\": {\n \"spdi\": {\n \"seq_id\": \"NC_000023.10\",\n \"position\": 91941055,\n }\n }\n }\n ],\n }\n ]\n },\n },\n ]\n\n if self.downloads_enabled:\n return SNPs(path, assign_par_snps=True, deduplicate_XY_chrom=False)\n else:\n mock = Mock(side_effect=effects)\n with patch(\"snps.ensembl.EnsemblRestClient.perform_rest_action\", mock):\n return SNPs(path, assign_par_snps=True, deduplicate_XY_chrom=False)\n\n def _get_test_assembly_mapping_data(self, source, target, strands, mappings):\n return {\n \"1\": {\n \"mappings\": [\n {\n \"original\": {\n \"seq_region_name\": \"1\",\n \"strand\": strands[0],\n \"start\": mappings[0],\n \"end\": mappings[0],\n \"assembly\": f\"{source}\",\n },\n \"mapped\": {\n \"seq_region_name\": \"1\",\n \"strand\": strands[1],\n \"start\": mappings[1],\n \"end\": mappings[1],\n \"assembly\": f\"{target}\",\n },\n },\n {\n \"original\": {\n \"seq_region_name\": \"1\",\n \"strand\": strands[2],\n \"start\": mappings[2],\n \"end\": mappings[2],\n \"assembly\": f\"{source}\",\n },\n \"mapped\": {\n \"seq_region_name\": \"1\",\n \"strand\": strands[3],\n \"start\": mappings[3],\n \"end\": mappings[3],\n \"assembly\": f\"{target}\",\n },\n },\n {\n \"original\": {\n \"seq_region_name\": \"1\",\n \"strand\": strands[4],\n \"start\": mappings[4],\n \"end\": mappings[4],\n \"assembly\": f\"{source}\",\n },\n \"mapped\": {\n \"seq_region_name\": \"1\",\n \"strand\": strands[5],\n \"start\": mappings[5],\n \"end\": mappings[5],\n \"assembly\": f\"{target}\",\n },\n },\n ]\n },\n \"3\": {\n \"mappings\": [\n {\n \"original\": {\n \"seq_region_name\": \"3\",\n \"strand\": strands[6],\n \"start\": mappings[6],\n \"end\": mappings[6],\n \"assembly\": f\"{source}\",\n },\n \"mapped\": {\n \"seq_region_name\": \"3\",\n \"strand\": strands[7],\n \"start\": mappings[7],\n \"end\": mappings[7],\n \"assembly\": f\"{target}\",\n },\n }\n ]\n },\n }\n\n def NCBI36_GRCh37(self):\n return self._get_test_assembly_mapping_data(\n \"NCBI36\",\n \"GRCh37\",\n [1, 1, 1, 1, 1, 1, 1, -1],\n [\n 742429,\n 752566,\n 143649677,\n 144938320,\n 143649678,\n 144938321,\n 50908372,\n 50927009,\n ],\n )\n\n def GRCh37_NCBI36(self):\n return self._get_test_assembly_mapping_data(\n \"GRCh37\",\n \"NCBI36\",\n [1, 1, 1, 1, 1, 1, 1, -1],\n [\n 752566,\n 742429,\n 144938320,\n 143649677,\n 144938321,\n 143649678,\n 50927009,\n 50908372,\n ],\n )\n\n def GRCh37_GRCh38(self):\n return self._get_test_assembly_mapping_data(\n \"GRCh37\",\n \"GRCh38\",\n [1, 1, 1, -1, 1, -1, 1, 1],\n [\n 752566,\n 817186,\n 144938320,\n 148946169,\n 144938321,\n 148946168,\n 50927009,\n 50889578,\n ],\n )\n\n def GRCh37_GRCh38_PAR(self):\n return {\n \"X\": {\n \"mappings\": [\n {\n \"original\": {\n \"seq_region_name\": \"X\",\n \"strand\": 1,\n \"start\": 220770,\n \"end\": 220770,\n \"assembly\": \"GRCh37\",\n },\n \"mapped\": {\n \"seq_region_name\": \"X\",\n \"strand\": 1,\n \"start\": 304103,\n \"end\": 304103,\n \"assembly\": \"GRCh38\",\n },\n },\n {\n \"original\": {\n \"seq_region_name\": \"X\",\n \"strand\": 1,\n \"start\": 91941056,\n \"end\": 91941056,\n \"assembly\": \"GRCh37\",\n },\n \"mapped\": {\n \"seq_region_name\": \"X\",\n \"strand\": 1,\n \"start\": 92686057,\n \"end\": 92686057,\n \"assembly\": \"GRCh38\",\n },\n },\n ]\n },\n \"Y\": {\n \"mappings\": [\n {\n \"original\": {\n \"seq_region_name\": \"Y\",\n \"strand\": 1,\n \"start\": 535258,\n \"end\": 535258,\n \"assembly\": \"GRCh37\",\n },\n \"mapped\": {\n \"seq_region_name\": \"Y\",\n \"strand\": 1,\n \"start\": 624523,\n \"end\": 624523,\n \"assembly\": \"GRCh38\",\n },\n }\n ]\n },\n }\n\n def snps_NCBI36(self):\n return self.create_snp_df(\n rsid=[\"rs3094315\", \"rs2500347\", \"rsIndelTest\", \"rs11928389\"],\n chrom=[\"1\", \"1\", \"1\", \"3\"],\n pos=[742429, 143649677, 143649678, 50908372],\n genotype=[\"AA\", np.nan, \"ID\", \"AG\"],\n )\n\n def snps_GRCh37(self):\n return self.create_snp_df(\n rsid=[\"rs3094315\", \"rs2500347\", \"rsIndelTest\", \"rs11928389\"],\n chrom=[\"1\", \"1\", \"1\", \"3\"],\n pos=[752566, 144938320, 144938321, 50927009],\n genotype=[\"AA\", np.nan, \"ID\", \"TC\"],\n )\n\n def snps_GRCh38(self):\n return self.create_snp_df(\n rsid=[\"rs3094315\", \"rsIndelTest\", \"rs2500347\", \"rs11928389\"],\n chrom=[\"1\", \"1\", \"1\", \"3\"],\n pos=[817186, 148946168, 148946169, 50889578],\n genotype=[\"AA\", \"ID\", np.nan, \"TC\"],\n )\n\n def snps_GRCh37_PAR(self):\n return self.create_snp_df(\n rsid=[\"rs28736870\", \"rs113378274\", \"rs113313554\", \"rs758419898\"],\n chrom=[\"X\", \"X\", \"Y\", \"PAR\"],\n pos=[220770, 91941056, 535258, 1],\n genotype=[\"AA\", \"AA\", \"AA\", \"AA\"],\n )\n\n def snps_GRCh38_PAR(self):\n return self.create_snp_df(\n rsid=[\"rs28736870\", \"rs113378274\", \"rs113313554\"],\n chrom=[\"X\", \"X\", \"Y\"],\n pos=[304103, 92686057, 624523],\n genotype=[\"AA\", \"AA\", \"AA\"],\n )\n\n def generic_snps(self):\n return self.create_snp_df(\n rsid=[\"rs\" + str(i) for i in range(1, 9)],\n chrom=[\"1\"] * 8,\n pos=list(range(101, 109)),\n genotype=[\"AA\", \"CC\", \"GG\", \"TT\", np.nan, \"GC\", \"TC\", \"AT\"],\n )\n\n def generic_snps_vcf(self):\n df = self.generic_snps()\n return df.append(\n self.create_snp_df(\n rsid=[\"rs\" + str(i) for i in range(12, 18)],\n chrom=[\"1\"] * 6,\n pos=list(range(112, 118)),\n genotype=[np.nan] * 6,\n )\n )\n\n def run_parsing_tests(\n self, file, source, phased=False, build=37, build_detected=False, snps_df=None\n ):\n self.make_parsing_assertions(\n self.parse_file(file), source, phased, build, build_detected, snps_df\n )\n self.make_parsing_assertions(\n self.parse_bytes(file), source, phased, build, build_detected, snps_df\n )\n\n with tempfile.TemporaryDirectory() as tmpdir:\n base = os.path.basename(file)\n dest = os.path.join(tmpdir, f\"{base}.gz\")\n gzip_file(file, dest)\n self.make_parsing_assertions(\n self.parse_file(dest), source, phased, build, build_detected, snps_df\n )\n self.make_parsing_assertions(\n self.parse_bytes(dest), source, phased, build, build_detected, snps_df\n )\n # remove .gz extension\n shutil.move(dest, dest[:-3])\n self.make_parsing_assertions(\n self.parse_file(dest[:-3]),\n source,\n phased,\n build,\n build_detected,\n snps_df,\n )\n\n dest = os.path.join(tmpdir, f\"{base}.zip\")\n zip_file(file, dest, base)\n self.make_parsing_assertions(\n self.parse_file(dest), source, phased, build, build_detected, snps_df\n )\n self.make_parsing_assertions(\n self.parse_bytes(dest), source, phased, build, build_detected, snps_df\n )\n # remove .zip extension\n shutil.move(dest, dest[:-4])\n self.make_parsing_assertions(\n self.parse_file(dest[:-4]),\n source,\n phased,\n build,\n build_detected,\n snps_df,\n )\n\n def run_parsing_tests_vcf(\n self,\n file,\n source=\"vcf\",\n phased=False,\n unannotated=False,\n rsids=(),\n build=37,\n build_detected=False,\n snps_df=None,\n ):\n # https://samtools.github.io/hts-specs/VCFv4.2.pdf\n # this tests for homozygous snps, heterozygous snps, multiallelic snps,\n # phased snps, and snps with missing rsID\n self.make_parsing_assertions_vcf(\n self.parse_file(file, rsids),\n source,\n phased,\n unannotated,\n rsids,\n build,\n build_detected,\n snps_df,\n )\n self.make_parsing_assertions_vcf(\n self.parse_bytes(file, rsids),\n source,\n phased,\n unannotated,\n rsids,\n build,\n build_detected,\n snps_df,\n )\n\n with tempfile.TemporaryDirectory() as tmpdir:\n base = os.path.basename(file)\n dest = os.path.join(tmpdir, f\"{base}.gz\")\n gzip_file(file, dest)\n self.make_parsing_assertions_vcf(\n self.parse_file(dest, rsids),\n source,\n phased,\n unannotated,\n rsids,\n build,\n build_detected,\n snps_df,\n )\n self.make_parsing_assertions_vcf(\n self.parse_bytes(dest, rsids),\n source,\n phased,\n unannotated,\n rsids,\n build,\n build_detected,\n snps_df,\n )\n # remove .gz extension\n shutil.move(dest, dest[:-3])\n self.make_parsing_assertions_vcf(\n self.parse_file(dest[:-3], rsids),\n source,\n phased,\n unannotated,\n rsids,\n build,\n build_detected,\n snps_df,\n )\n\n def make_normalized_dataframe_assertions(self, df):\n self.assertEqual(df.index.name, \"rsid\")\n self.assertTrue(is_object_dtype(df.index.dtype))\n self.assertTrue(is_object_dtype(df.chrom.dtype))\n self.assertTrue(is_unsigned_integer_dtype(df.pos.dtype))\n self.assertTrue(is_object_dtype(df.genotype.dtype))\n\n def parse_file(self, file, rsids=()):\n return SNPs(file, rsids=rsids)\n\n def parse_bytes(self, file, rsids=()):\n with open(file, \"rb\") as f:\n return SNPs(f.read(), rsids=rsids)\n\n def make_parsing_assertions(\n self, snps, source, phased, build, build_detected, snps_df\n ):\n if snps_df is None:\n snps_df = self.generic_snps()\n\n # these are useful for debugging if there is a problem\n print(\"Observed:\")\n print(snps.snps)\n print(snps.snps.info())\n print(\"Expected:\")\n print(snps_df)\n print(snps_df.info())\n\n self.assertEqual(snps.source, source)\n pd.testing.assert_frame_equal(snps.snps, snps_df, check_exact=True)\n self.assertTrue(snps.phased) if phased else self.assertFalse(snps.phased)\n self.assertEqual(snps.build, build)\n self.assertTrue(snps.build_detected) if build_detected else self.assertFalse(\n snps.build_detected\n )\n self.make_normalized_dataframe_assertions(snps.snps)\n\n def make_parsing_assertions_vcf(\n self, snps, source, phased, unannotated, rsids, build, build_detected, snps_df\n ):\n if snps_df is None:\n snps_df = self.generic_snps_vcf()\n\n self.assertEqual(snps.source, source)\n\n if unannotated:\n self.assertTrue(snps.unannotated_vcf)\n self.assertEqual(0, snps.count)\n else:\n self.assertFalse(snps.unannotated_vcf)\n pd.testing.assert_frame_equal(\n snps.snps, snps_df.loc[rsids], check_exact=True\n ) if rsids else pd.testing.assert_frame_equal(\n snps.snps, snps_df, check_exact=True\n )\n\n self.assertTrue(snps.phased) if phased else self.assertFalse(snps.phased)\n self.assertEqual(snps.build, build)\n self.assertTrue(snps.build_detected) if build_detected else self.assertFalse(\n snps.build_detected\n )\n self.make_normalized_dataframe_assertions(snps.snps)\n"
] |
[
[
"pandas.api.types.is_object_dtype",
"pandas.isnull",
"numpy.arange",
"pandas.DataFrame",
"pandas.testing.assert_frame_equal",
"pandas.api.types.is_unsigned_integer_dtype"
]
] |
sddshao/Auto-Lianliankan
|
[
"5bf7f1bbc6e38223f4213e8742ef098b3dad9769"
] |
[
"run.py"
] |
[
"'''\npython 版本:3.5\nopencv 下载链接: \nhttps://www.lfd.uci.edu/~gohlke/pythonlibs/#opencv\n选择版本:opencv_python‑3.4.1‑cp35‑cp35m‑win_amd64.whl\npywin32 下载链接:\nhttps://www.lfd.uci.edu/~gohlke/pythonlibs/#pywin32\n选择版本:pywin32-223-cp35-cp35m-win_amd64.whl \n'''\n\nimport metching\nimport cv2\nimport numpy as np\nimport win32api\nimport win32gui\nimport win32con\nfrom PIL import ImageGrab\nimport time\nfrom config import *\n\n\n# 获取窗体坐标位置(左上)\ndef getGameWindowPosition():\n # FindWindow(lpClassName=None, lpWindowName=None) 窗口类名 窗口标题名\n window = win32gui.FindWindow(None,WINDOW_TITLE)\n # 没有定位到游戏窗体\n while not window:\n print('定位游戏窗体失败,5秒后重试...')\n time.sleep(5)\n window = win32gui.FindWindow(None,WINDOW_TITLE)\n # 定位到游戏窗体\n win32gui.SetForegroundWindow(window) # 将窗体顶置\n pos = win32gui.GetWindowRect(window)\n print(\"定位到游戏窗体:\" + str(pos))\n return (pos[0],pos[1])\n\n# 获取一张完整的屏幕截图\ndef getScreenImage():\n print('捕获屏幕截图...')\n scim = ImageGrab.grab() # 屏幕截图,获取到的是Image类型对象\n scim.save('screen.png')\n return cv2.imread(\"screen.png\") # opencv 读取,拿到的是ndarray存储的图像\n\n# 从屏幕截图中识别\ndef getAllSquare(screen_image,game_pos):\n print('图像切片处理...')\n # 通过游戏窗体,找到连连看连接的区域:\n game_x = game_pos[0] + MARGIN_LEFT\n game_y = game_pos[1] + MARGIN_HEIGHT\n # 从连接区域左上开始,把图像切割成一个一个的小块,切割标准是按照小块的横纵坐标。\n all_square = []\n for x in range(0,H_NUM):\n # line_square = []\n for y in range(0,V_NUM):\n # ndarray的切片方法,[纵坐标起始位置:纵坐标结束为止,横坐标起始位置:横坐标结束位置]\n square = screen_image[game_y + y * SQUARE_HEIGHT :game_y + (y+1) * SQUARE_HEIGHT,game_x + x * SQUARE_WIDTH:game_x + (x+1) * SQUARE_WIDTH]\n all_square.append(square)\n # 因为有些图片的边缘不一致造成干扰(主要是空白区域的切图),所以把每张小方块向内缩小一部分再\n # 对所有的方块进行处理屏蔽掉外边缘 然后返回\n return list(map(lambda square : square[SUB_LT_Y:SUB_RB_Y,SUB_LT_X:SUB_RB_X],all_square))\n # 上面这行相当于下面这4行\n # new_all_square = []\n # for square in all_square:\n # s = square[SUB_LT_Y:SUB_RB_Y, SUB_LT_X:SUB_RB_X]\n # new_all_square.append(s)\n # return new_all_square\n\n# 判断图像是否与已经在列表中的图像相同,如果是返回True\ndef isImageExist(img,img_list):\n for existed_img in img_list:\n b = np.subtract(existed_img,img) # 图片数组进行比较,返回的是两个图片像素点差值的数组,\n if not np.any(b): # 如果全部是0,说明两图片完全相同。\n return True\n else:\n continue\n return False\n\n# 获取所有的方块类型\ndef getAllSquareTypes(all_square):\n print(\"将图像矩阵按类型归入类型列表...\")\n types = []\n # 先把空白添加到数组中,作为0号\n empty_img = cv2.imread('empty.png')\n types.append(empty_img)\n for square in all_square:\n # 如果这个图像不存在的话将图像保存起来\n if not isImageExist(square,types):\n types.append(square)\n return types\n\n# 将所有的方块与类型进行比较,转置成数字\ndef getAllSquareRecord(all_square_list,types):\n print(\"将所有的方块与类型进行比较,转置成数字矩阵...\")\n record = [] # 整个记录的二维数组\n line = [] # 记录一行\n for square in all_square_list: # 把所有的方块和保存起来的所有类型做对比\n num = 0\n for type in types: # 所有类型\n res = cv2.subtract(square,type) # 作比较\n if not np.any(res): # 如果两个图片一样\n line.append(num) # 将类型的数字记录进这一行\n break # 并且跳出循环\n num += 1 # 如果没有匹配上,则类型数加1\n\n if len(line) == V_NUM: # 如果校验完这一行已经有了11个数据,则另起一行\n record.append(line)\n line = []\n print(record)\n return record\n\n# 自动消除\ndef autoRelease(result,game_x,game_y):\n for i in range(0,len(result)):\n for j in range(0,len(result[0])):\n # 以上两个for循环,定位第一个选中点\n if result[i][j] != 0:\n for m in range(0,len(result)):\n for n in range(0,len(result[0])):\n if result[m][n] != 0:\n # 后两个for循环定位第二个选中点\n if metching.canConnect(i,j,m,n,result):\n # 执行消除算法并返回\n result[i][j] = 0\n result[m][n] = 0\n print('可消除点:'+ str(i+1) + ',' + str(j+1) + '和' + str(m+1) + ',' + str(n+1))\n x1 = game_x + j*SQUARE_WIDTH\n y1 = game_y + i*SQUARE_HEIGHT\n x2 = game_x + n*SQUARE_WIDTH\n y2 = game_y + m*SQUARE_HEIGHT\n win32api.SetCursorPos((x1 + 15,y1 + 18))\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, x1+15, y1+18, 0, 0)\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, x1+15, y1+18, 0, 0)\n time.sleep(TIME_INTERVAL)\n\n win32api.SetCursorPos((x2 + 15, y2 + 18))\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, x2 + 15, y2 + 18, 0, 0)\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, x2 + 15, y2 + 18, 0, 0)\n time.sleep(TIME_INTERVAL)\n return True\n return False\n\ndef autoRemove(squares,game_pos):\n # 每次消除一对儿,QQ的连连看最多105对儿\n game_x = game_pos[0] + MARGIN_LEFT\n game_y = game_pos[1] + MARGIN_HEIGHT\n # 判断是否消除完了?如果没有的话,点击重列后继续消除\n for i in range(0,105):\n autoRelease(squares,game_x,game_y)\n\n\nif __name__ == '__main__':\n # 1、定位游戏窗体\n game_pos = getGameWindowPosition()\n time.sleep(1)\n # 2、从屏幕截图一张,通过opencv读取\n screen_image = getScreenImage()\n # 3、图像切片,把截图中的连连看切成一个一个的小方块,保存在一个数组中\n all_square_list = getAllSquare(screen_image,game_pos)\n # 4、切片处理后的图片,相同的作为一种类型,放在数组中。\n types = getAllSquareTypes(all_square_list)\n # 5、将切片处理后的图片,转换成相对应的数字矩阵。注意 拿到的数组是横纵逆向的,转置一下。\n result = np.transpose(getAllSquareRecord(all_square_list,types))\n # 6、执行自动消除\n autoRemove(result,game_pos)\n # 7、消除完成,释放资源。\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\n\n\n\n\n\n"
] |
[
[
"numpy.subtract",
"numpy.any"
]
] |
wullli/flatlander
|
[
"2c7fbd3d025f2a05c40895ec735a92d7a6bfb1ad",
"2c7fbd3d025f2a05c40895ec735a92d7a6bfb1ad"
] |
[
"flatlander/test/observations/fixed_tree_test.py",
"flatlander/envs/utils/global_gym_env.py"
] |
[
"import unittest\n\nimport numpy as np\nimport numpy.testing as npt\n\nfrom flatlander.envs.observations.fixed_tree_obs import FixedTreeObservation\nfrom flatlander.test.observations.dummy_tree_builder import DummyBuilder, DummyBuilderForward, \\\n DummyBuilderForwardAlternative, DummyBuilderBackward\n\n\nclass FixedTreeObservationTest(unittest.TestCase):\n \"\"\"\n 0: 'B',\n 1: 'L',\n 2: 'F',\n 3: 'R',\n 4: 'S',\n \"\"\"\n\n def prep_obs(self):\n self.obs = FixedTreeObservation({'max_depth': 2, 'shortest_path_max_depth': 30})\n self.obs._builder._builder = DummyBuilder(self.obs._builder._builder)\n\n def prep_obs_forward(self):\n self.obs = FixedTreeObservation({'max_depth': 2, 'shortest_path_max_depth': 30})\n self.obs._builder._builder = DummyBuilderForward(self.obs._builder._builder)\n\n def prep_obs_forward_alt(self):\n self.obs = FixedTreeObservation({'max_depth': 2, 'shortest_path_max_depth': 30})\n self.obs._builder._builder = DummyBuilderForwardAlternative(self.obs._builder._builder)\n\n def prep_obs_backward(self):\n self.obs = FixedTreeObservation({'max_depth': 2, 'shortest_path_max_depth': 30})\n self.obs._builder._builder = DummyBuilderBackward(self.obs._builder._builder)\n\n def test_root_position(self):\n self.prep_obs()\n obs = self.obs.builder().get(handle=0)\n print(obs)\n assert np.all(obs[-1] != -1)\n\n def test_leaf_position_forward_tree(self):\n self.prep_obs_forward()\n obs = self.obs.builder().get(handle=0)\n print(obs)\n assert np.all(obs[0] != -1)\n assert np.all(obs[1] != -1)\n assert np.all(obs[4] != -1)\n assert np.all(obs[15] != -1)\n assert np.all(obs[16] != -1)\n assert np.all(obs[19] != -1)\n assert np.all(obs[20] != -1)\n\n def test_leaf_position_fix_tree(self):\n self.prep_obs_forward_alt()\n obs = self.obs.builder().get(handle=0)\n print(obs)\n assert np.all(obs[0] != -1)\n\n def test_leaf_position_backward_tree(self):\n self.prep_obs_backward()\n obs = self.obs.builder().get(handle=0)\n print(obs)\n assert np.all(obs[1] != -1)\n\n\n def test_leaf_position_left_tree(self):\n self.prep_obs()\n obs = self.obs.builder().get(handle=0)\n print(obs)\n assert np.all(obs[13] != -1)\n assert np.all(obs[14] != -1)\n assert np.all(obs[18] != -1)\n assert np.all(obs[19] != -1)\n assert np.all(obs[20] != -1)\n\n",
"from collections import defaultdict\nfrom typing import Dict, NamedTuple, Any, Optional, Callable\n\nimport gym\nimport numpy as np\n\nfrom flatland.envs.rail_env import RailEnv, RailEnvActions\nfrom flatlander.envs.utils.gym_env import StepOutput\n\n\nclass GlobalFlatlandGymEnv(gym.Env):\n action_space = gym.spaces.Discrete(5)\n\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': 10,\n 'semantics.autoreset': True\n }\n\n def __init__(self,\n rail_env: RailEnv,\n observation_space: gym.spaces.Space,\n regenerate_rail_on_reset: bool = True,\n regenerate_schedule_on_reset: bool = True, config=None, **kwargs) -> None:\n super().__init__()\n self._agents_done = []\n self._agent_scores = defaultdict(float)\n self._agent_steps = defaultdict(int)\n self._regenerate_rail_on_reset = regenerate_rail_on_reset\n self._regenerate_schedule_on_reset = regenerate_schedule_on_reset\n self.rail_env = rail_env\n self.observation_space = observation_space\n self.exclude_done_agents = config.get(\"exclude_done_agents\", True)\n self.fill_done_agents = config.get(\"fill_done_agents\", True)\n self.global_done_signal = config.get(\"global_done_signal\", False)\n self._step_out: Callable = self.get_independent_done_observations if self.exclude_done_agents \\\n else self.get_global_done_observations\n\n def step(self, action_dict: Dict[int, RailEnvActions]) -> StepOutput:\n obs, rewards, dones, infos = self.rail_env.step(action_dict)\n\n o, r, d = self._step_out(obs, dones)\n\n assert len(obs) > 0\n assert all([x is not None for x in (dones, rewards, obs)])\n\n return StepOutput(obs=o, reward=r, done=d, info={agent: {\n 'max_episode_steps': self.rail_env._max_episode_steps,\n 'num_agents': self.rail_env.get_num_agents(),\n 'agent_done': dones[agent] and agent not in self.rail_env.active_agents,\n 'agent_score': self._agent_scores[agent],\n 'agent_step': self._agent_steps[agent],\n } for agent in o.keys()})\n\n def get_independent_done_observations(self, obs, dones):\n o, r, d = {}, {}, {}\n for handle, done in dones.items():\n if handle != \"__all__\":\n if done and handle not in self._agents_done:\n r[handle] = 0\n o[handle] = obs[handle]\n d[handle] = done\n self._agents_done.append(handle)\n elif handle not in self._agents_done:\n o[handle] = obs[handle]\n r[handle] = -1\n d[handle] = done\n else:\n d[handle] = done\n\n global_reward = np.sum(list(r.values()), dtype=np.float) if not d[\"__all__\"] else 1.\n r = {handle: global_reward for handle in r.keys()}\n return o, r, d\n\n def get_global_done_observations(self, obs, dones):\n o, r, d = {}, {}, {}\n for handle, done in dones.items():\n if handle != \"__all__\":\n if done:\n r[handle] = 0\n o[handle] = np.zeros(shape=self.observation_space.shape) if self.fill_done_agents else obs[handle]\n else:\n r[handle] = -1\n o[handle] = obs[handle]\n if self.global_done_signal:\n d[handle] = dones[\"__all__\"]\n else:\n d[handle] = done\n\n global_reward = np.mean(list(r.values()), dtype=np.float) if not d[\"__all__\"] else 1.\n r = {handle: global_reward for handle in r.keys()}\n assert len(o.keys()) == self.rail_env.get_num_agents()\n return o, r, d\n\n def reset(self, random_seed: Optional[int] = None) -> Dict[int, Any]:\n self._agents_done = []\n self._agent_scores = defaultdict(float)\n self._agent_steps = defaultdict(int)\n obs, infos = self.rail_env.reset(regenerate_rail=self._regenerate_rail_on_reset,\n regenerate_schedule=self._regenerate_schedule_on_reset,\n random_seed=random_seed)\n return {k: o for k, o in obs.items() if not k == '__all__'}\n\n def render(self, mode='human'):\n return self.rail_env.render(mode)\n\n def close(self):\n self.rail_env.close()\n"
] |
[
[
"numpy.all"
],
[
"numpy.zeros"
]
] |
prenigma/Optimus
|
[
"7f450eda8ecf00db38478b69ee11abb453a7b3ad"
] |
[
"optimus/helpers/functions.py"
] |
[
"import collections\nimport functools\nimport glob\nimport ntpath\nimport os\nimport random\nimport re\nimport subprocess\nimport sys\nimport tempfile\nfrom collections import Counter\nfrom pathlib import Path\nfrom urllib.request import Request, urlopen\n\nimport fastnumbers\nimport humanize\nimport numpy as np\nimport pandas as pd\nimport six\nfrom fastnumbers import isint, isfloat\nfrom string_grouper import match_strings\n\nfrom optimus import ROOT_DIR\nfrom optimus.engines import functions as F # Used in eval\nfrom optimus.helpers.check import is_url\nfrom optimus.helpers.columns import parse_columns\nfrom optimus.helpers.converter import any_dataframe_to_pandas\nfrom optimus.helpers.core import val_to_list, one_list_to_val\nfrom optimus.helpers.logger import logger\nfrom optimus.helpers.raiseit import RaiseIt\nfrom optimus.infer import is_\n\nF = F # To do not remove the import accidentally when using pycharm auto clean import feature\n\n\ndef random_int(n=5):\n \"\"\"\n Create a random string of ints\n :return:\n \"\"\"\n return str(random.randint(1, 10 ** n))\n\n\ndef collect_as_list(df):\n return df.rdd.flatMap(lambda x: x).collect()\n\n\ndef collect_as_dict(df, limit=None):\n \"\"\"\n Return a dict from a Collect result\n [(col_name, row_value),(col_name_1, row_value_2),(col_name_3, row_value_3),(col_name_4, row_value_4)]\n :return:\n \"\"\"\n\n dict_result = []\n\n df = any_dataframe_to_pandas(df)\n\n # if there is only an element in the dict just return the value\n if len(dict_result) == 1:\n dict_result = next(iter(dict_result.values()))\n else:\n col_names = parse_columns(df, \"*\")\n\n # Because asDict can return messed columns names we order\n for index, row in df.iterrows():\n # _row = row.asDict()\n r = collections.OrderedDict()\n # for col_name, value in row.iteritems():\n for col_name in col_names:\n r[col_name] = row[col_name]\n dict_result.append(r)\n return dict_result\n\n\n# def collect_as_dict(df, limit=None):\n# \"\"\"\n# Return a dict from a Collect result\n# :param df:\n# :return:\n# \"\"\"\n# # # Explore this approach seems faster\n# # use_unicode = True\n# # from pyspark.serializers import UTF8Deserializer\n# # from pyspark.rdd import RDD\n# # rdd = df._jdf.toJSON()\n# # r = RDD(rdd.toJavaRDD(), df._sc, UTF8Deserializer(use_unicode))\n# # if limit is None:\n# # r.collect()\n# # else:\n# # r.take(limit)\n# # return r\n# #\n# from optimus.helpers.columns import parse_columns\n# dict_result = []\n#\n# # if there is only an element in the dict just return the value\n# if len(dict_result) == 1:\n# dict_result = next(iter(dict_result.values()))\n# else:\n# col_names = parse_columns(df, \"*\")\n#\n# # Because asDict can return messed columns names we order\n# for row in df.collect():\n# _row = row.asDict()\n# r = collections.OrderedDict()\n# for col in col_names:\n# r[col] = _row[col]\n# dict_result.append(r)\n# return dict_result\n\n\ndef filter_list(val, index=0):\n \"\"\"\n Convert a list to None, int, str or a list filtering a specific index\n [] to None\n ['test'] to test\n\n :param val:\n :param index:\n :return:\n \"\"\"\n if len(val) == 0:\n return None\n else:\n return one_list_to_val([column[index] for column in val])\n\n\ndef absolute_path(files, format=\"posix\"):\n \"\"\"\n User project base folder to construct and absolute path\n :param files: path files\n :param format: posix or uri\n :return:\n \"\"\"\n files = val_to_list(files)\n result = None\n if format == \"uri\":\n result = [Path(ROOT_DIR + file).as_uri() for file in files]\n elif format == \"posix\":\n result = [Path(ROOT_DIR + file).as_posix() for file in files]\n else:\n RaiseIt.value_error(format, [\"posix\", \"uri\"])\n\n result = one_list_to_val(result)\n return result\n\n\ndef format_path(path, format=\"posix\"):\n \"\"\"\n Format a path depending fo the operative system\n :param path:\n :param format:\n :return:\n \"\"\"\n if format == \"uri\":\n result = Path(path).as_uri()\n elif format == \"posix\":\n result = Path(path).as_posix()\n return result\n\n\ndef java_version():\n version = subprocess.check_output(['java', '-version'], stderr=subprocess.STDOUT)\n pattern = '\\\"(\\d+\\.\\d+).*\\\"'\n print(re.search(pattern, version).groups()[0])\n\n\ndef setup_google_colab():\n \"\"\"\n Check if we are in Google Colab and setup it up\n :return:\n \"\"\"\n from optimus.helpers.constants import JAVA_PATH_COLAB\n from optimus.engines.spark.constants import SPARK_PATH_COLAB\n from optimus.engines.spark.constants import SPARK_URL\n from optimus.engines.spark.constants import SPARK_FILE\n\n IN_COLAB = 'google.colab' in sys.modules\n\n if IN_COLAB:\n if not os.path.isdir(JAVA_PATH_COLAB) or not os.path.isdir(SPARK_PATH_COLAB):\n print(\"Installing Optimus, Java8 and Spark. It could take 3 min...\")\n commands = [\n \"apt-get install openjdk-8-jdk-headless -qq > /dev/null\",\n \"wget -q {SPARK_URL}\".format(SPARK_URL=SPARK_URL),\n \"tar xf {SPARK_FILE}\".format(SPARK_FILE=SPARK_FILE)\n ]\n\n cmd = \" && \".join(commands)\n\n p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)\n p_stdout = p.stdout.read().decode(\"ascii\")\n p_stderr = p.stderr.read().decode(\"ascii\")\n print(p_stdout, p_stderr)\n\n else:\n print(\"Settings env vars\")\n # Always configure the env vars\n\n os.environ[\"JAVA_HOME\"] = JAVA_PATH_COLAB\n os.environ[\"SPARK_HOME\"] = SPARK_PATH_COLAB\n\n\ndef is_pyarrow_installed():\n \"\"\"\n Check if pyarrow is installed\n :return:\n \"\"\"\n try:\n import pyarrow\n have_arrow = True\n except ImportError:\n have_arrow = False\n return have_arrow\n\n\ndef check_env_vars(env_vars):\n \"\"\"\n Check if a environment var exist\n :param env_vars: Environment var name\n :return:\n \"\"\"\n\n for env_var in env_vars:\n if env_var in os.environ:\n logger.print(env_var + \"=\" + os.environ.get(env_var))\n else:\n logger.print(env_var + \" is not set\")\n\n\n# Reference https://nvie.com/posts/modifying-deeply-nested-structures/\n\n\ndef ellipsis(data, length=20):\n \"\"\"\n Add a \"...\" if a string y greater than a specific length\n :param data:\n :param length: length taking into account to cut the string\n :return:\n \"\"\"\n data = str(data)\n return (data[:length] + '..') if len(data) > length else data\n\n\ndef create_buckets(lower_bound, upper_bound, bins):\n \"\"\"\n Create a dictionary with bins\n :param lower_bound: low range\n :param upper_bound: high range\n :param bins: number of buckets\n :return:\n \"\"\"\n range_value = (upper_bound - lower_bound) / bins\n low = lower_bound\n\n buckets = []\n\n if bins == 1:\n buckets.append({\"lower\": low, \"upper\": low + 1, \"bucket\": 0})\n else:\n for i in range(0, bins):\n high = low + range_value\n buckets.append({\"lower\": low, \"upper\": high, \"bucket\": i})\n low = high\n\n # Ensure that the upper bound is exactly the higher value.\n # Because floating point calculation it can miss the upper bound in the final sum\n\n buckets[bins - 1][\"upper\"] = upper_bound\n return buckets\n\n\ndef deep_sort(obj):\n \"\"\"\n Recursively sort list or dict nested lists\n \"\"\"\n\n if isinstance(obj, dict):\n _sorted = {}\n for key in sorted(obj):\n _sorted[key] = deep_sort(obj[key])\n\n elif isinstance(obj, list):\n new_list = []\n for val in obj:\n new_list.append(deep_sort(val))\n _sorted = sorted(new_list)\n\n else:\n _sorted = obj\n\n return _sorted\n\n\ndef infer_dataframes_keys(df_left: pd.DataFrame, df_right: pd.DataFrame):\n \"\"\"\n Infer the possible key columns in two data frames\n :param df_left: \n :param df_right: \n :return: \n \"\"\"\n result = []\n\n df_left = df_left.dropna().astype(str)\n df_right = df_right.dropna().astype(str)\n\n # Search column names wiht *id* substring\n def check_ids_columns(_df):\n return [x for x in _df.columns if re.search(r\"_id| id|id_| id \", x)]\n\n ids_columns_left = check_ids_columns(df_left)\n ids_columns_right = check_ids_columns(df_right)\n if len(ids_columns_left) == len(ids_columns_right):\n for i, j in zip(ids_columns_left, ids_columns_right):\n result.append((i, j,))\n\n # Numeric median len\n def min_max_len(_df):\n\n df_is_int = _df.applymap(lambda value: fastnumbers.isint(value)).sum()\n df_is_int = df_is_int[df_is_int == len(_df)]\n int_columns_names = df_is_int.index.values\n int_columns_df = _df[int_columns_names]\n string_len = int_columns_df.applymap(lambda value: len(value))\n return (int_columns_names, string_len.min().values, string_len.max().values)\n\n min_max_df_left = min_max_len(df_left)\n min_max_df_right = min_max_len(df_right)\n\n def median_len(arr, idx):\n \"\"\"\n Calculate median len of the columns string\n :param arr:\n :param idx:\n :return:\n \"\"\"\n _min = arr[1][idx]\n _max = arr[2][idx]\n if _min != _max:\n _median = _max - _min\n else:\n _median = _max\n return _median\n\n for i, col_l in enumerate(min_max_df_left[0]):\n median_left = median_len(min_max_df_left, i)\n for j, col_r in enumerate(min_max_df_right[0]):\n median_right = median_len(min_max_df_right, j)\n if median_left == median_right:\n result.append((col_l, col_r,))\n\n # String Clustering\n for col_l in df_left:\n for col_r in df_right:\n try:\n m = match_strings(df_left[col_l], df_right[col_r], min_similarity=0.05)\n if len(m) > 0:\n result.append((col_l, col_r,))\n except ValueError:\n pass\n # Count tuples\n return [(count,) + item for item, count in Counter(result).items()]\n\n\ndef update_dict(d, u):\n \"\"\"\n Update only the given keys\n :param d:\n :param u:\n :return:\n \"\"\"\n # python 3.8+ compatibility\n try:\n collectionsAbc = collections.abc\n except ModuleNotFoundError:\n collectionsAbc = collections\n\n for k, v in six.iteritems(u):\n dv = d.get(k, {})\n if not isinstance(dv, collectionsAbc.Mapping):\n d[k] = v\n elif isinstance(v, collectionsAbc.Mapping):\n d[k] = update_dict(dv, v)\n else:\n d[k] = v\n return d\n\n\ndef reduce_mem_usage(df, categorical=True, categorical_threshold=50, verbose=False):\n \"\"\"\n Change the columns datatypes to reduce the memory usage. Also identify\n :param df:\n :param categorical:\n :param categorical_threshold:\n :param verbose:\n :return:\n \"\"\"\n\n # Reference https://www.kaggle.com/arjanso/reducing-dataframe-memory-size-by-65/notebook\n\n start_mem_usg = df.ext.size()\n\n ints = df.applymap(isint).sum().compute().to_dict()\n floats = df.applymap(isfloat).sum().compute().to_dict()\n nulls = df.isnull().sum().compute().to_dict()\n total_rows = len(df)\n\n columns_dtype = {}\n for x, y in ints.items():\n\n if ints[x] == nulls[x]:\n dtype = \"object\"\n elif floats[x] == total_rows:\n dtype = \"numerical\"\n elif total_rows <= ints[x] + nulls[x]:\n dtype = \"numerical\"\n else:\n dtype = \"object\"\n columns_dtype[x] = dtype\n\n numerical_int = [col for col, dtype in columns_dtype.items() if dtype == \"numerical\"]\n final = {}\n\n if len(numerical_int) > 0:\n min_max = df.cols.range(numerical_int)\n\n import numpy as np\n for col_name in min_max.keys():\n _min = min_max[col_name][\"min\"]\n _max = min_max[col_name][\"max\"]\n if _min >= 0:\n if _max < 255:\n final[col_name] = np.uint8\n elif _max < 65535:\n final[col_name] = np.uint16\n elif _max < 4294967295:\n final[col_name] = np.uint32\n else:\n final[col_name] = np.uint64\n else:\n if _min > np.iinfo(np.int8).min and _max < np.iinfo(np.int8).max:\n final[col_name] = np.int8\n elif _min > np.iinfo(np.int16).min and _max < np.iinfo(np.int16).max:\n final[col_name] = np.int16\n elif _min > np.iinfo(np.int32).min and _max < np.iinfo(np.int32).max:\n final[col_name] = np.int32\n elif _min > np.iinfo(np.int64).min and _max < np.iinfo(np.int64).max:\n final[col_name] = np.int64\n # print(final[col_name])\n\n object_int = [col for col, dtype in columns_dtype.items() if dtype == \"object\"]\n if len(object_int) > 0:\n count_values = df.cols.value_counts(object_int)\n\n # if categorical is True:\n # for col_name in object_int:\n # if len(count_values[col_name]) <= categorical_threshold:\n # final[col_name] = \"category\"\n\n df = df.astype(final)\n mem_usg = df.ext.size()\n\n if verbose is True:\n print(\"Memory usage after optimization:\", humanize.naturalsize(start_mem_usg))\n print(\"Memory usage before optimization is: \", humanize.naturalsize(mem_usg))\n print(round(100 * mem_usg / start_mem_usg), \"% of the initial size\")\n\n return df\n\n\ndef downloader(url, file_format):\n \"\"\"\n Send the request to download a file\n \"\"\"\n\n def write_file(response, file, chunk_size=8192):\n \"\"\"\n Load the data from the http request and save it to disk\n :param response: data returned from the server\n :param file:\n :param chunk_size: size chunk size of the data\n :return:\n \"\"\"\n total_size = response.headers['Content-Length'].strip() if 'Content-Length' in response.headers else 100\n total_size = int(total_size)\n bytes_so_far = 0\n\n while 1:\n chunk = response.read(chunk_size)\n bytes_so_far += len(chunk)\n if not chunk:\n break\n file.write(chunk)\n total_size = bytes_so_far if bytes_so_far > total_size else total_size\n\n return bytes_so_far\n\n # try to infer the file format using the file extension\n if file_format is None:\n filename, file_format = os.path.splitext(url)\n file_format = file_format.replace('.', '')\n\n i = url.rfind('/')\n data_name = url[(i + 1):]\n\n headers = {\"User-Agent\": \"Optimus Data Downloader/1.0\"}\n\n req = Request(url, None, headers)\n\n logger.print(\"Downloading %s from %s\", data_name, url)\n\n # It seems that avro need a .avro extension file\n with tempfile.NamedTemporaryFile(suffix=\".\" + file_format, delete=False) as f:\n bytes_downloaded = write_file(urlopen(req), f)\n path = f.name\n\n if bytes_downloaded > 0:\n logger.print(\"Downloaded %s bytes\", bytes_downloaded)\n\n logger.print(\"Creating DataFrame for %s. Please wait...\", data_name)\n\n return path\n\n\n@functools.lru_cache(maxsize=128)\ndef prepare_path(path, file_format=None):\n \"\"\"d\n Helper to return the file to be loaded and the file name.\n This will memoise\n :param path: Path to the file to be loaded\n :param file_format: format file\n :return:\n \"\"\"\n r = []\n if is_url(path):\n file = downloader(path, file_format)\n file_name = ntpath.basename(path)\n r = [(file, file_name,)]\n else:\n for file_name in glob.glob(path, recursive=True):\n r.append((file_name, ntpath.basename(file_name),))\n if len(r) == 0:\n raise Exception(\"File not found\")\n return r\n\n\ndef set_func(pdf, value, where, output_col, parser, default=None):\n \"\"\"\n Core implementation of the set function\n :param pdf:\n :param value:\n :param where:\n :param output_col:\n :param parser:\n :param default:\n :return:\n \"\"\"\n\n col_names = list(filter(lambda x: x != \"__match__\", pdf.cols.names()))\n\n profiler_dtype_to_python = {\"decimal\": \"float\", \"int\": \"int\", \"string\": \"str\", \"datetime\": \"datetime\",\n \"bool\":\"bool\"}\n df = pdf.cols.cast(col_names, profiler_dtype_to_python[parser])\n try:\n if where is None:\n return eval(value)\n else:\n # Reference https://stackoverflow.com/questions/33769860/pandas-apply-but-only-for-rows-where-a-condition-is-met\n mask = (eval(where))\n if (output_col not in pdf.cols.names()) and (default is not None):\n pdf[output_col] = pdf[default]\n\n pdf.loc[mask, output_col] = eval(value)\n return pdf[output_col]\n\n except (ValueError, TypeError) as e:\n logger.print(e)\n\n # raise\n return np.nan\n\n\ndef set_function_parser(df, value, where, default=None):\n \"\"\"\n We infer the data type that must be used to make a calculation using the set function\n :param df:\n :param value:\n :param where:\n :return:\n \"\"\"\n value = str(value)\n where = str(where)\n\n def prepare_columns(cols):\n \"\"\"\n Extract the columns names from the value and where clauses\n :param cols:\n :return:\n \"\"\"\n if cols is not None:\n r = val_to_list([f_col[1:len(f_col) - 1] for f_col in\n re.findall(r\"(df\\['[A-Za-z0-9_ -]*'\\])\", cols.replace(\"\\\"\", \"'\"))])\n a = [re.findall(r\"'([^']*)'\", i)[0] for i in r]\n\n else:\n a = []\n return a\n\n if default is None:\n default = []\n\n # if default is in\n columns = prepare_columns(value) + prepare_columns(where) + val_to_list(default)\n columns = list(set(columns))\n if columns:\n first_columns = columns[0]\n column_dtype = df.cols.infer_profiler_dtypes(first_columns)[first_columns][\"dtype\"]\n\n else:\n if fastnumbers.fast_int(value):\n column_dtype = \"int\"\n elif fastnumbers.fast_float(value):\n column_dtype = \"decimal\"\n else:\n column_dtype = \"string\"\n\n # if column_dtype in PROFILER_NUMERIC_DTYPES:\n # func = lambda x: fastnumbers.fast_float(x) if x is not None else None\n # elif column_dtype in PROFILER_STRING_DTYPES or column_dtype is None:\n # func = lambda x: str(x) if not pd.isnull(x) else None\n\n return columns, column_dtype\n\n\n# value = \"dd/MM/yyyy hh:mm:ss-sss MA\"\ndef match_date(value):\n \"\"\"\n Returns Create a regex from a string with a date format\n :param value:\n :return:\n \"\"\"\n formats = [\"d\", \"dd\", \"M\", \"MM\", \"yy\", \"yyyy\", \"h\", \"hh\", \"H\", \"HH\", \"kk\", \"k\", \"m\", \"mm\", \"s\", \"ss\", \"sss\", \"/\",\n \":\", \"-\", \" \", \"+\", \"|\", \"mi\"]\n formats.sort(key=len, reverse=True)\n\n result = []\n\n start = 0\n\n end = len(value)\n found = False\n\n while start < end:\n found = False\n for f in formats:\n if value.startswith(f, start):\n start = start + len(f)\n result.append(f)\n found = True\n break\n if found is False:\n raise ValueError('{} is not a valid date format'.format(value[start]))\n\n exprs = []\n for f in result:\n # Separators\n if f in [\"/\", \":\", \"-\", \" \", \"|\", \"+\", \" \"]:\n exprs.append(\"\\\\\" + f)\n # elif f == \":\":\n # exprs.append(\"\\\\:\")\n # elif f == \"-\":\n # exprs.append(\"\\\\-\")\n # elif f == \" \":\n # exprs.append(\" \")\n # elif f == \"|\":\n # exprs.append(\"\\\\|\")\n # elif f == \"+\":\n # exprs.append(\"\\\\+\")\n\n # Day\n # d -> 1 ... 31\n # dd -> 01 ... 31\n\n elif f == \"d\":\n exprs.append(\"(3[01]|[12][0-9]|0?[1-9])\")\n elif f == \"dd\":\n exprs.append(\"(3[01]|[12][0-9]|0[1-9])\")\n\n # Month\n # M -> 1 ... 12\n # MM -> 01 ... 12\n elif f == \"M\":\n exprs.append(\"(1[0-2]|0?[1-9])\")\n elif f == \"MM\":\n exprs.append(\"(1[0-2]|0[1-9])\")\n\n # Year\n # yy -> 00 ... 99\n # yyyy -> 0000 ... 9999\n elif f == \"yy\":\n exprs.append(\"[0-9]{2}\")\n elif f == \"yyyy\":\n exprs.append(\"[0-9]{4}\")\n\n # Hours\n # h -> 1,2 ... 12\n # hh -> 01,02 ... 12\n # H -> 0,1 ... 23\n # HH -> 00,01 ... 23\n # k -> 1,2 ... 24\n # kk -> 01,02 ... 24\n elif f == \"h\":\n exprs.append(\"(1[0-2]|0?[1-9])\")\n elif f == \"hh\":\n exprs.append(\"(1[0-2]|0[1-9])\")\n elif f == \"H\":\n exprs.append(\"(0?[0-9]|1[0-9]|2[0-3]|[0-9])\")\n elif f == \"HH\":\n exprs.append(\"(0[0-9]|1[0-9]|2[0-3]|[0-9])\")\n elif f == \"k\":\n exprs.append(\"(0?[1-9]|1[0-9]|2[0-4]|[1-9])\")\n elif f == \"kk\":\n exprs.append(\"(0[1-9]|1[0-9]|2[0-4])\")\n\n # Minutes\n # m -> 0 ... 59\n # mm -> 00 .. 59\n elif f == \"m\":\n exprs.append(\"[1-5]?[0-9]\")\n elif f == \"mm\":\n exprs.append(\"[0-5][0-9]\")\n\n # Seconds\n # s -> 0 ... 59\n # ss -> 00 .. 59\n elif f == \"s\":\n exprs.append(\"[1-5]?[0-9]\")\n elif f == \"ss\":\n exprs.append(\"[0-5][0-9]\")\n\n # Milliseconds\n # sss -> 0 ... 999\n elif f == \"sss\":\n exprs.append(\"[0-9]{3}\")\n\n # Extras\n # mi -> Meridian indicator (AM am Am) (PM pm Pm) (m M)\n elif f == \"mi\":\n exprs.append(\"([AaPp][Mm]|[Mm]).?\")\n\n return \"\".join(exprs)\n\n\n# print(\"^\" + match_date(value) + \"$\")\n\ndef ipython_vars(globals_vars, dtype=None):\n \"\"\"\n Return the list of data frames depending on the type\n :param globals_vars: globals() from the notebook\n :param dtype: 'pandas', 'cudf', 'dask' or 'dask_cudf'\n :return:\n \"\"\"\n tmp = globals_vars.copy()\n vars = [(k, v, type(v)) for k, v in tmp.items() if\n not k.startswith('_') and k != 'tmp' and k != 'In' and k != 'Out' and not hasattr(v, '__call__')]\n\n if dtype == \"dask_cudf\":\n from dask_cudf.core import DataFrame as DaskCUDFDataFrame\n _dtype = DaskCUDFDataFrame\n elif dtype == \"cudf\":\n from cudf.core import DataFrame as CUDFDataFrame\n _dtype = CUDFDataFrame\n elif dtype == \"dask\":\n from dask.dataframe.core import DataFrame\n _dtype = DataFrame\n elif dtype == \"pandas\":\n import pandas as pd\n PandasDataFrame = pd.DataFrame\n _dtype = PandasDataFrame\n\n return [name for name, instance, aa in vars if is_(instance, _dtype)]\n\n\n# Taken from https://github.com/Kemaweyan/singleton_decorator/\nclass _SingletonWrapper:\n \"\"\"\n A singleton wrapper class. Its instances would be created\n for each decorated class.\n \"\"\"\n\n def __init__(self, cls):\n self.__wrapped__ = cls\n self._instance = None\n\n def __call__(self, *args, **kwargs):\n \"\"\"Returns a single instance of decorated class\"\"\"\n if self._instance is None:\n self._instance = self.__wrapped__(*args, **kwargs)\n return self._instance\n\n\ndef singleton(cls):\n \"\"\"\n A singleton decorator. Returns a wrapper objects. A call on that object\n returns a single instance object of decorated class. Use the __wrapped__\n attribute to access decorated class directly in unit tests\n \"\"\"\n return _SingletonWrapper(cls)\n"
] |
[
[
"numpy.iinfo"
]
] |
alec-tschantz/action-oriented
|
[
"910b436ee2e90a9d3caeb70cb7f44b35011b6c65"
] |
[
"src/core/trials.py"
] |
[
"import numpy as np\nfrom core.env import Environment\nfrom core.config import *\n\n\ndef learn_trial(mdp, n_steps, record_states=False):\n env = Environment()\n obv = env.observe()\n mdp.reset(obv)\n states = np.zeros([N_CONTROL, N_STATES, N_STATES])\n\n for step in range(n_steps):\n prev_obv = obv\n action = mdp.step(obv)\n obv = env.act(action)\n mdp.update(action, obv, prev_obv)\n if record_states:\n states[action, obv, prev_obv] += 1\n\n if record_states:\n return mdp, states\n return mdp\n\n\ndef test_distance(mdp, steps):\n env = Environment()\n obv = env.observe()\n mdp.reset(obv)\n\n for _ in range(steps):\n action = mdp.step(obv)\n obv = env.act(action)\n\n return (env.distance() - env.source_size) + 1\n\n\ndef test_passive_accuracy(mdp, n_steps):\n env = Environment()\n obv = env.observe()\n mdp.reset(obv)\n acc = 0\n\n for _ in range(n_steps):\n random_action = np.random.choice([0, 1])\n pred, t_pred = mdp.predict_obv(random_action, obv)\n _ = mdp.step(obv)\n obv = env.act(random_action)\n acc += diff(t_pred, pred)\n\n return acc\n\n\ndef test_active_accuracy(mdp, n_steps):\n env = Environment()\n obv = env.observe()\n mdp.reset(obv)\n acc = 0\n\n for _ in range(n_steps):\n action = mdp.step(obv)\n pred, t_pred = mdp.predict_obv(action, obv)\n acc += diff(t_pred, pred)\n obv = env.act(action)\n\n return acc\n\n\ndef diff(p, q):\n return np.mean(np.square(p - q))\n"
] |
[
[
"numpy.square",
"numpy.zeros",
"numpy.random.choice"
]
] |
kaikun213/fonduer
|
[
"79434aaed22c730ea043fe0909e88e0e39a4b8e5"
] |
[
"src/fonduer/utils/utils_udf.py"
] |
[
"import logging\nfrom typing import (\n Any,\n Callable,\n Dict,\n Iterable,\n Iterator,\n List,\n Optional,\n Sequence,\n Set,\n Tuple,\n Type,\n Union,\n)\n\nimport numpy as np\nfrom scipy.sparse import csr_matrix\nfrom sqlalchemy import String, Table\nfrom sqlalchemy.dialects.postgresql import ARRAY, insert\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy.sql.expression import cast\n\nfrom fonduer.candidates.models import Candidate\nfrom fonduer.parser.models import Document\n\nlogger = logging.getLogger(__name__)\n\n# Flag value to signal that no filtering on split should be applied. Not an\n# integer to ensure that it won't conflict with a user's split value.\nALL_SPLITS = \"ALL\"\n\n\ndef _get_cand_values(candidate: Candidate, key_table: Table) -> List:\n \"\"\"Get the corresponding values for the key_table.\"\"\"\n # NOTE: Import just before checking to avoid circular imports.\n from fonduer.features.models import FeatureKey\n from fonduer.supervision.models import GoldLabelKey, LabelKey\n\n if key_table == FeatureKey:\n return candidate.features\n elif key_table == LabelKey:\n return candidate.labels\n elif key_table == GoldLabelKey:\n return candidate.gold_labels\n else:\n raise ValueError(f\"{key_table} is not a valid key table.\")\n\n\ndef _batch_postgres_query(\n table: Table, records: List[Dict[str, Any]]\n) -> Iterator[List[Dict[str, Any]]]:\n \"\"\"Break the list into chunks that can be processed as a single statement.\n\n Postgres query cannot be too long or it will fail.\n See: https://dba.stackexchange.com/questions/131399/is-there-a-maximum-\n length-constraint-for-a-postgres-query\n\n :param records: The full list of records to batch.\n :type records: iterable\n :param table: The sqlalchemy table.\n :return: A generator of lists of records.\n \"\"\"\n if not records:\n return\n\n POSTGRESQL_MAX = 0x3FFFFFFF\n\n # Create preamble and measure its length\n preamble = (\n \"INSERT INTO \"\n + table.__tablename__\n + \" (\"\n + \", \".join(records[0].keys())\n + \") VALUES (\"\n + \", \".join([\"?\"] * len(records[0].keys()))\n + \")\\n\"\n )\n start = 0\n end = 0\n total_len = len(preamble)\n while end < len(records):\n record_len = sum([len(str(v)) for v in records[end].values()])\n\n # Pre-increment to include the end element in the slice\n end += 1\n\n if total_len + record_len >= POSTGRESQL_MAX:\n logger.debug(f\"Splitting query due to length ({total_len} chars).\")\n yield records[start:end]\n start = end\n # Reset the total query length\n total_len = len(preamble)\n else:\n total_len += record_len\n\n yield records[start:end]\n\n\ndef get_sparse_matrix_keys(session: Session, key_table: Table) -> List:\n \"\"\"Return a list of keys for the sparse matrix.\"\"\"\n return session.query(key_table).order_by(key_table.name).all()\n\n\ndef batch_upsert_records(\n session: Session, table: Table, records: List[Dict[str, Any]]\n) -> None:\n \"\"\"Batch upsert records into postgresql database.\"\"\"\n if not records:\n return\n for record_batch in _batch_postgres_query(table, records):\n stmt = insert(table.__table__)\n stmt = stmt.on_conflict_do_update(\n constraint=table.__table__.primary_key,\n set_={\n \"keys\": stmt.excluded.get(\"keys\"),\n \"values\": stmt.excluded.get(\"values\"),\n },\n )\n session.execute(stmt, record_batch)\n session.commit()\n\n\ndef get_sparse_matrix(\n session: Session,\n key_table: Table,\n cand_lists: Union[Sequence[Candidate], Iterable[Sequence[Candidate]]],\n key: Optional[str] = None,\n) -> List[csr_matrix]:\n \"\"\"Load sparse matrix of GoldLabels for each candidate_class.\"\"\"\n result = []\n cand_lists = cand_lists if isinstance(cand_lists, (list, tuple)) else [cand_lists]\n\n for cand_list in cand_lists:\n if len(cand_list) == 0:\n raise ValueError(\"cand_lists contain empty cand_list.\")\n candidate_class = cand_list[0].__tablename__\n\n # Keys are used as a global index\n if key:\n keys_map = {key: 0}\n key_size = len(keys_map)\n else:\n all_keys = get_sparse_matrix_keys(session, key_table)\n key_size = len(all_keys)\n keys_map = {}\n for (i, k) in enumerate(all_keys):\n if candidate_class in k.candidate_classes:\n keys_map[k.name] = i\n\n indptr = [0]\n indices = []\n data = []\n for cand in cand_list:\n values = _get_cand_values(cand, key_table)\n if values:\n for cand_key, cand_value in zip(values[0].keys, values[0].values):\n if cand_key in keys_map:\n indices.append(keys_map[cand_key])\n data.append(cand_value)\n\n indptr.append(len(indices))\n\n result.append(\n csr_matrix((data, indices, indptr), shape=(len(cand_list), key_size))\n )\n\n return result\n\n\ndef unshift_label_matrix(L_sparse: csr_matrix) -> np.ndarray:\n \"\"\"Unshift a sparse label matrix (ABSTAIN as 0) to a dense one (ABSTAIN as -1).\"\"\"\n return L_sparse.toarray() - 1\n\n\ndef shift_label_matrix(L: np.ndarray) -> csr_matrix:\n \"\"\"Shift a dense label matrix (ABSTAIN as -1) to a sparse one (ABSTAIN as 0).\"\"\"\n return csr_matrix(L + 1)\n\n\ndef get_docs_from_split(\n session: Session, candidate_classes: Iterable[Type[Candidate]], split: int\n) -> Set[Document]:\n \"\"\"Return a list of documents that contain the candidates in the split.\"\"\"\n # Only grab the docs containing candidates from the given split.\n sub_query = session.query(Candidate.id).filter(Candidate.split == split).subquery()\n split_docs: Set[Document] = set()\n for candidate_class in candidate_classes:\n split_docs.update(\n cand.document\n for cand in session.query(candidate_class)\n .filter(candidate_class.id.in_(sub_query))\n .all()\n )\n return split_docs\n\n\ndef get_mapping(\n table: Table,\n candidates: Iterable[Candidate],\n generator: Callable[[Candidate], Iterator[Tuple]],\n) -> Iterator[Dict[str, Any]]:\n \"\"\"Generate map of keys and values for the candidate from the generator.\n\n :param table: The table we will be inserting into (i.e. Feature or Label).\n :param candidates: The candidates to get mappings for.\n :param generator: A generator yielding (candidate_id, key, value) tuples.\n :return: Generator of dictionaries of {\"candidate_id\": _, \"keys\": _, \"values\": _}\n :rtype: generator of dict\n \"\"\"\n for cand in candidates:\n # Grab the old values\n if len(getattr(cand, table.__tablename__ + \"s\")) != 0:\n temp = getattr(cand, table.__tablename__ + \"s\")[0]\n cand_map = dict(zip(temp.keys, temp.values))\n else:\n cand_map = {}\n\n for cid, key, value in generator(cand):\n if value == 0:\n # Make sure this key does not exist in cand_map\n cand_map.pop(key, None)\n continue\n cand_map[key] = value\n\n # Assemble label arguments\n yield {\n \"candidate_id\": cand.id,\n \"keys\": [*cand_map.keys()],\n \"values\": [*cand_map.values()],\n }\n\n\ndef drop_all_keys(\n session: Session, key_table: Table, candidate_classes: Iterable[Type[Candidate]]\n) -> None:\n \"\"\"Bulk drop annotation keys for all the candidate_classes in the table.\n\n Rather than directly dropping the keys, this removes the candidate_classes\n specified for the given keys only. If all candidate_classes are removed for\n a key, the key is dropped.\n\n :param key_table: The sqlalchemy class to insert into.\n :param candidate_classes: A list of candidate classes to drop.\n \"\"\"\n if not candidate_classes:\n return\n\n set_of_candidate_classes: Set[str] = set(\n [c.__tablename__ for c in candidate_classes]\n )\n\n # Select all rows that contain ANY of the candidate_classes\n all_rows = (\n session.query(key_table)\n .filter(\n key_table.candidate_classes.overlap(\n cast(set_of_candidate_classes, ARRAY(String))\n )\n )\n .all()\n )\n to_delete = set()\n to_update = []\n\n # All candidate classes will be the same for all keys, so just look at one\n for row in all_rows:\n # Remove the selected candidate_classes. If empty, mark for deletion.\n row.candidate_classes = list(\n set(row.candidate_classes) - set_of_candidate_classes\n )\n if len(row.candidate_classes) == 0:\n to_delete.add(row.name)\n else:\n to_update.append(\n {\"name\": row.name, \"candidate_classes\": row.candidate_classes}\n )\n\n # Perform all deletes\n if to_delete:\n query = session.query(key_table).filter(key_table.name.in_(to_delete))\n query.delete(synchronize_session=\"fetch\")\n\n # Perform all updates\n if to_update:\n for batch in _batch_postgres_query(key_table, to_update):\n stmt = insert(key_table.__table__)\n stmt = stmt.on_conflict_do_update(\n constraint=key_table.__table__.primary_key,\n set_={\n \"name\": stmt.excluded.get(\"name\"),\n \"candidate_classes\": stmt.excluded.get(\"candidate_classes\"),\n },\n )\n session.execute(stmt, batch)\n session.commit()\n\n\ndef drop_keys(session: Session, key_table: Table, keys: Dict) -> None:\n \"\"\"Bulk drop annotation keys to the specified table.\n\n Rather than directly dropping the keys, this removes the candidate_classes\n specified for the given keys only. If all candidate_classes are removed for\n a key, the key is dropped.\n\n :param key_table: The sqlalchemy class to insert into.\n :param keys: A map of {name: [candidate_classes]}.\n \"\"\"\n # Do nothing if empty\n if not keys:\n return\n\n for key_batch in _batch_postgres_query(\n key_table, [{\"name\": k[0], \"candidate_classes\": k[1]} for k in keys.items()]\n ):\n all_rows = (\n session.query(key_table)\n .filter(key_table.name.in_([key[\"name\"] for key in key_batch]))\n .all()\n )\n\n to_delete = set()\n to_update = []\n\n # All candidate classes will be the same for all keys, so just look at one\n candidate_classes = key_batch[0][\"candidate_classes\"]\n for row in all_rows:\n # Remove the selected candidate_classes. If empty, mark for deletion.\n row.candidate_classes = list(\n set(row.candidate_classes) - set(candidate_classes)\n )\n if len(row.candidate_classes) == 0:\n to_delete.add(row.name)\n else:\n to_update.append(\n {\"name\": row.name, \"candidate_classes\": row.candidate_classes}\n )\n\n # Perform all deletes\n if to_delete:\n query = session.query(key_table).filter(key_table.name.in_(to_delete))\n query.delete(synchronize_session=\"fetch\")\n\n # Perform all updates\n if to_update:\n stmt = insert(key_table.__table__)\n stmt = stmt.on_conflict_do_update(\n constraint=key_table.__table__.primary_key,\n set_={\n \"name\": stmt.excluded.get(\"name\"),\n \"candidate_classes\": stmt.excluded.get(\"candidate_classes\"),\n },\n )\n session.execute(stmt, to_update)\n session.commit()\n\n\ndef upsert_keys(session: Session, key_table: Table, keys: Dict) -> None:\n \"\"\"Bulk add annotation keys to the specified table.\n\n :param key_table: The sqlalchemy class to insert into.\n :param keys: A map of {name: [candidate_classes]}.\n \"\"\"\n # Do nothing if empty\n if not keys:\n return\n\n for key_batch in _batch_postgres_query(\n key_table, [{\"name\": k[0], \"candidate_classes\": k[1]} for k in keys.items()]\n ):\n stmt = insert(key_table.__table__)\n stmt = stmt.on_conflict_do_update(\n constraint=key_table.__table__.primary_key,\n set_={\n \"name\": stmt.excluded.get(\"name\"),\n \"candidate_classes\": stmt.excluded.get(\"candidate_classes\"),\n },\n )\n while True:\n try:\n session.execute(stmt, key_batch)\n session.commit()\n break\n except Exception as e:\n logger.debug(e)\n"
] |
[
[
"scipy.sparse.csr_matrix"
]
] |
afcarl/trees_ensemble
|
[
"7994798337c7aa54a80a4cfe9d2c05afe3a2f430"
] |
[
"ensemble.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 4 13:01:02 2016\n\n@author: rob\n\"\"\"\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\nacc_weights = np.array([0.964, 0, 0.958, 0, 0.964, 0])\ntest_weights = np.array([0.964, 0.958, 0.964])\n\n#Load the assemble dataset\ndata_ass = np.genfromtxt('assembling_data.csv',delimiter = ',',skip_header=1)\ny_ass = data_ass[:,1]\n\n\"\"\"Save all the individual logits in a dictionary\"\"\"\n# Note that every load-statement has different settings with respect to starting\n# columns and rows. Adapt this to your need\nlogits = {}\nlogits_test = {}\nlogits['logits_1'] = np.genfromtxt('logits_nn_1hidden6apr.csv')\nlogits['logits_2'] = np.genfromtxt('Probabilities_Classes_LassoR1.csv',delimiter = ',',skip_header=1)[:,1:]\nlogits['logits_3'] = np.genfromtxt('predicted_probabilities_random_forest.csv',delimiter = ',',skip_header=1)[:,1:]\nlogits['logits_4'] = np.genfromtxt('predicted_probabilities_boosting.csv',delimiter = ',',skip_header=1)[:,1:]\nlogits['logits_5'] = np.genfromtxt('predicted_probabilities_bagging.csv',delimiter = ',',skip_header=1)[:,1:]\nlogits['logits_6'] = np.genfromtxt('Probabilities_Classes_RidgeR.csv',delimiter = ',',skip_header=1)[:,1:]\nlogits_test['logits_1'] = np.genfromtxt('logits_nn_1hidden_test8april.csv')\nlogits_test['logits_2'] = np.genfromtxt('random_forest_prediction_test2.csv',delimiter = ',',skip_header=1)[:,1:]\nlogits_test['logits_3'] = np.genfromtxt('bagging_probabilities_modelingTest.csv',delimiter = ',',skip_header=1)[:,1:]\nNtest = logits_test['logits_1'].shape[0]\n\n#Expected sizes\nD = 7\nN = 3000\n#Check these expected sizes\nassert logits['logits_1'].shape == (N,D), 'Wrong size of logits_1'\nassert logits['logits_2'].shape == (N,D), 'Wrong size of logits_2'\nassert logits['logits_3'].shape == (N,D), 'Wrong size of logits_3'\nassert logits['logits_4'].shape == (N,D), 'Wrong size of logits_4'\nassert logits['logits_5'].shape == (N,D), 'Wrong size of logits_5'\n\n#Perform weighted sum over individual logits\nlogits_weighted_sum = np.zeros((N,D))\nfor n in xrange(len(acc_weights)):\n logits_weighted_sum += acc_weights[n]*logits['logits_'+str(n+1)]\nlogits_weighted_sum /= np.sum(acc_weights)\n\n#Perform weighted sum over individual logits over testset\nlogits_test_sum = np.zeros((Ntest,D))\nfor n in xrange(len(test_weights)):\n logits_test_sum += test_weights[n]*logits_test['logits_'+str(n+1)]\nlogits_test_sum /= np.sum(test_weights)\n\n#Make predictions\npred = {}\nacc = {}\nconf = {}\nytrue = np.expand_dims(y_ass,axis=1)\nfor n in xrange(len(acc_weights)):\n logits_n = logits['logits_'+str(n+1)]\n pp = np.argmax(logits_n,axis=1)\n pred['classifier_'+str(n+1)] = pp\n ypp = np.expand_dims(pp,axis=1)\n print('Confusion matrix for classifier %s'%(n+1))\n print(confusion_matrix(ytrue,ypp))\n #Save the accuracy for later printing\n acc['classifier_'+str(n+1)] = np.mean(ytrue==ypp)\n #Calculate the average confidence at the falsely classified samples\n ind_false = np.where(ytrue!=ypp)\n ind_false = ind_false[0]\n class_false = np.squeeze(ytrue[ind_false]).astype(int)\n conf_false = logits_n[ind_false,class_false]\n conf['classifier_'+str(n+1)] = np.mean(conf_false)\n\n#Print the accuracies\nfor n in xrange(len(acc_weights)):\n print('Accuracy for classifier %s is %.3f'%(n+1,acc['classifier_'+str(n+1)]))\n \n#Print the confidences\nfor n in xrange(len(acc_weights)):\n print('Average confidence at misclassified samples for classifier %s is %.3f'%(n+1,conf['classifier_'+str(n+1)]))\n \n\n#Check if the weighted sum makes sense\nassert np.linalg.norm(np.sum(logits_weighted_sum,axis=1)-1) < 0.001,'The weighted sum seems not to result in a probability distribution'\n\nensemble_pred = np.argmax(logits_weighted_sum,axis=1)\nensemble_pred = np.expand_dims(ensemble_pred,axis=1)\nacc_ens = np.mean(ensemble_pred == ytrue)\nassert len(ensemble_pred) == N, 'Something in the sizes of argmax faulted'\nprint('Ensemble accuracy is %.3f'%(acc_ens))\n\n\n#Make predictions on the testset\ntest_pred = np.argmax(logits_test_sum,axis=1)\ntest_pred = np.expand_dims(test_pred,axis=1)\ntest_pred = np.concatenate((np.expand_dims(np.arange(1,20001,1),axis=1),test_pred),axis=1)\n\n# Check the consistency of the different classifiers in the ensemble\npred_ens = {}\nfor n in xrange(len(test_weights)):\n logits_n = logits_test['logits_'+str(n+1)]\n pp = np.argmax(logits_n,axis=1)\n pred_ens['classifier_'+str(n+1)] = pp\n\nconsis1 = (pred_ens['classifier_1'] == pred_ens['classifier_2'])\nconsis2 = (pred_ens['classifier_1'] == pred_ens['classifier_3'])\nconsis3 = (pred_ens['classifier_2'] == pred_ens['classifier_3'])\nconsis = consis1 & consis2\nconsis = np.mean(consis)\nprint('\\n')\nprint('Consistency 1 & 2 %.3f'%(np.mean(consis1)))\nprint('Consistency 1 & 2 %.3f'%(np.mean(consis2)))\nprint('Consistency 3 & 2 %.3f'%(np.mean(consis3)))\nprint('The three classifiers are consistent on %.3f'%(consis))\n\n\n# Save the predictions for the testset\nnp.savetxt('prediction_24.csv',test_pred)"
] |
[
[
"numpy.expand_dims",
"numpy.arange",
"numpy.squeeze",
"sklearn.metrics.confusion_matrix",
"numpy.genfromtxt",
"numpy.argmax",
"numpy.mean",
"numpy.where",
"numpy.savetxt",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] |
lacava/PS-Tree
|
[
"c9e2d93944429e4902709226651fceec84913dd1"
] |
[
"pstree/cluster_gp_sklearn.py"
] |
[
"import builtins\nimport operator\nimport sys\nimport time\nimport traceback\nimport warnings\nfrom collections import deque, defaultdict\nfrom itertools import compress\n\nimport numpy\nimport pyximport\nfrom deap.gp import Terminal\nfrom deap.tools import selNSGA2, selRandom, selSPEA2, selLexicase, selNSGA3\nfrom icecream import ic\nfrom scipy.stats import pearsonr, PearsonRConstantInputWarning, PearsonRNearConstantInputWarning\nfrom sklearn.cluster import KMeans\nfrom sklearn.dummy import DummyRegressor\nfrom sklearn.ensemble import BaggingRegressor, RandomForestClassifier\nfrom sklearn.linear_model import LinearRegression, LassoCV, LogisticRegression\nfrom sklearn.linear_model import RidgeCV, ElasticNetCV\nfrom sklearn.linear_model._coordinate_descent import _alpha_grid\nfrom sklearn.mixture import BayesianGaussianMixture\nfrom sklearn.model_selection import cross_validate\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import LinearSVR\nfrom sympy import parse_expr, Piecewise, srepr\n\nfrom .gp_visualization_utils import multigene_gp_to_string\nfrom . import cluster_gp_tools\nfrom .common_utils import gene_to_string\nfrom .custom_sklearn_tools import LassoRidge, RFERegressor\nfrom .multigene_gp import *\n\nwarnings.simplefilter(\"ignore\", category=PearsonRConstantInputWarning)\nwarnings.simplefilter(\"ignore\", category=PearsonRNearConstantInputWarning)\n# warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n\npyximport.install(setup_args={\"include_dirs\": numpy.get_include()})\n\nfrom deap import creator, base, tools, gp\nfrom deap.algorithms import varAnd\nfrom deap.base import Fitness\nfrom sklearn.base import BaseEstimator, RegressorMixin, TransformerMixin\nfrom sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier, _tree\n\nfrom .cluster_gp_tools import add_pset_function, selAutomaticEpsilonLexicase, \\\n selBestSum, selMOEAD, selIBEA, c_deepcopy\nfrom .gp_function import *\nfrom glmnet import ElasticNet\n\n\nclass FeatureTransformer(TransformerMixin, BaseEstimator):\n def __init__(self, compiled_individuals):\n self.compiled_individuals = compiled_individuals\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, copy=None):\n all_features = []\n for func in self.compiled_individuals:\n yp = func(*X.T)\n if not isinstance(yp, np.ndarray) or yp.size == 1:\n yp = np.full(len(X), yp)\n all_features.append(np.squeeze(yp).reshape(-1, 1))\n all_features = np.concatenate(all_features, axis=1)\n return all_features\n\n\ndef train_normalization(func):\n def call(self, X, y, *param, **dict_param):\n if self.normalize:\n X = self.x_scaler.fit_transform(X)\n y = self.y_scaler.fit_transform(y.reshape(-1, 1)).squeeze()\n result = func(self, X, y, *param, **dict_param)\n return result\n\n return call\n\n\ndef get_labels(tree, X, soft_tree=False):\n if isinstance(tree, DecisionTreeClassifier) or isinstance(tree, KMeans) or \\\n isinstance(tree, BayesianGaussianMixture) or isinstance(tree, GaussianNB) \\\n or isinstance(tree, RandomForestClassifier) or isinstance(tree, LogisticRegression):\n if soft_tree:\n if hasattr(tree, 'predict_proba'):\n tree.labels_ = tree.predict_proba(X)\n else:\n tree.labels_ = tree.predict(X).astype(int)\n else:\n tree.labels_ = tree.predict(X).astype(int)\n elif isinstance(tree, DecisionTreeRegressor) or isinstance(tree, PseudoPartition):\n tree.labels_ = tree.apply(X)\n else:\n print(type(tree))\n raise Exception\n return tree.labels_\n\n\ndef predict_normalization(func):\n def call(self, X, y=None, *param, **dict_param):\n if self.normalize:\n X = self.x_scaler.transform(X)\n y_predict = func(self, X, y, *param, **dict_param)\n if self.normalize:\n y_predict = self.y_scaler.inverse_transform(y_predict)\n return y_predict\n\n return call\n\n\nclass NormalizationRegressor(BaseEstimator, RegressorMixin):\n def __init__(self, normalize=True, **params):\n self.normalize = normalize\n if normalize:\n self.x_scaler = StandardScaler()\n self.y_scaler = StandardScaler()\n\n\ndef represents_int(s):\n try:\n int(s)\n return True\n except ValueError:\n return False\n\n\nclass FastMeasure(Fitness):\n def __init__(self, values=()):\n super().__init__(values)\n self._values = None\n\n def getValues(self):\n if self._values is None:\n self._values = tuple(map(operator.truediv, self.wvalues, self.weights))\n return self._values\n\n def setValues(self, values):\n try:\n self.wvalues = tuple(map(operator.mul, values, self.weights))\n self._values = tuple(map(operator.truediv, self.wvalues, self.weights))\n except TypeError:\n _, _, traceback = sys.exc_info()\n raise TypeError(\"Both weights and assigned values must be a \"\n \"sequence of numbers when assigning to values of \"\n \"%r. Currently assigning value(s) %r of %r to a \"\n \"fitness with weights %s.\"\n % (self.__class__, values, type(values),\n self.weights)).with_traceback(traceback)\n\n def delValues(self):\n self.wvalues = ()\n self._values = ()\n\n values = property(getValues, setValues, delValues,\n (\"Fitness values. Use directly ``individual.fitness.values = values`` \"\n \"in order to set the fitness and ``del individual.fitness.values`` \"\n \"in order to clear (invalidate) the fitness. The (unweighted) fitness \"\n \"can be directly accessed via ``individual.fitness.values``.\"))\n\n\nclass EnsembleRidge(RidgeCV):\n def __init__(self, alphas=None):\n super().__init__()\n self.model = BaggingRegressor(RidgeCV(alphas=alphas), n_estimators=3)\n\n def fit(self, X, y, sample_weight=None):\n self.model.fit(X, y)\n self.coef_ = np.mean([m.coef_ for m in self.model.estimators_], axis=0)\n self.best_score_ = np.mean([m.best_score_ for m in self.model.estimators_], axis=0)\n return self\n\n def predict(self, X):\n return self.model.predict(X)\n\n\nclass GPRegressor(NormalizationRegressor):\n def __init__(self, input_names=None, n_pop=50, n_gen=200, max_arity=2, height_limit=6, constant_range=2,\n cross_rate=0.9, mutate_rate=0.1, verbose=False, basic_primitive=True, gene_num=1, random_float=False,\n log_dict_size=int(1e9), archive_size=None, category_num=1, cluster_gp=True,\n select=selRandom, test_fun=None, train_test_fun=None, samples=20, min_samples_leaf=1,\n max_depth=None, linear_scale=False, regression_type=None, regression_regularization=0,\n score_function=None, validation_selection=True, ridge_alpha='np.logspace(0, 4)',\n survival_selection='NSGA2', feature_normalization=True, structure_diversity=True,\n space_partition_fun=None, adaptive_tree=True, original_features=True,\n new_surrogate_function=True, advanced_elimination=True, super_object=None, final_prune='Lasso',\n correlation_elimination=False, tree_shrinkage=False, size_objective=True, soft_label=False,\n initial_height=None, **params):\n \"\"\"\n :param n_pop: size of population\n :param n_gen: number of generations\n \"\"\"\n super().__init__(**params)\n self.initial_height = initial_height\n self.soft_label = soft_label\n self.average_size = sys.maxsize\n self.advanced_elimination = advanced_elimination\n self.new_surrogate_function = new_surrogate_function\n self.structure_diversity = structure_diversity\n self.adaptive_tree = adaptive_tree\n self.validation_selection = validation_selection\n self.ridge_alpha = ridge_alpha\n self.score_function = score_function\n self.regression_type = regression_type\n self.regression_regularization = regression_regularization\n if hasattr(creator, 'FitnessMin'):\n del creator.FitnessMin\n if hasattr(creator, 'Individual'):\n del creator.Individual\n\n self.toolbox = None\n self.category_num = category_num\n # \"cluster_gp\" is the decisive control parameter\n self.cluster_gp = cluster_gp\n self.test_fun = test_fun\n self.train_test_fun = train_test_fun\n self.samples = samples\n self.best_distribution_test = None\n self.linear_scale = linear_scale\n self.min_samples_leaf = min_samples_leaf\n self.max_depth = max_depth\n self.accuracy_list = []\n self.select = select\n self.pipelines = []\n self.best_cv = np.inf\n self.best_pop = None\n self.best_leaf_node_num = None\n self.best_tree = None\n self.survival_selection = survival_selection\n self.archive_size = archive_size\n self.input_names = input_names\n self.n_pop = n_pop\n self.n_gen = n_gen\n self.max_arity = max_arity\n self.verbose = verbose\n self.basic_primitive = basic_primitive\n self.params = params\n self.height_limit = height_limit\n self.constant_range = constant_range\n self.cross_rate = cross_rate\n self.mutate_rate = mutate_rate\n self.gene_num = gene_num\n self.random_float = random_float\n self.log_dict_size = log_dict_size\n self.feature_normalization = feature_normalization\n self.space_partition_fun = space_partition_fun\n self.original_features = original_features\n self.update_iteration = []\n self.current_gen = 0\n self.better_pop_flag = False\n self.super_object: PSTreeRegressor = super_object\n self.last_loss = None\n self.final_prune = final_prune\n self.size_objective = size_objective\n self.tree_shrinkage = tree_shrinkage\n self.correlation_elimination = correlation_elimination\n\n def get_predicted_list(self, pop):\n predicted_list = []\n for ind in pop:\n predicted_list.append(ind.predicted_value)\n return predicted_list\n\n def evaluate(self, individuals, final_model=None):\n compiled_individuals = [self.toolbox.compile(individual) for individual in individuals]\n all_features = self.feature_construction(compiled_individuals, self.train_data)\n fitness, pipelines, score = self.model_construction(all_features, final_model)\n\n if self.verbose:\n print('score', score / len(self.Y))\n\n # correlation = np.corrcoef(np.array([p['Ridge'].coef_ for p in pipelines]))\n self.adaptive_tree_generation(self.feature_construction(compiled_individuals, self.train_data,\n self.original_features),\n pipelines)\n\n if (self.validation_selection and score < self.best_cv) or (not self.validation_selection) or \\\n (final_model != None):\n # record the best individual in the training process\n self.update_iteration.append((self.current_gen, score / len(self.Y)))\n self.best_cv = score\n self.pipelines = pipelines\n self.best_pop = individuals[:]\n self.better_pop_flag = True\n if self.adaptive_tree:\n self.best_features = all_features\n self.best_label = self.category\n self.best_leaf_node_num = self.super_object.max_leaf_nodes\n # assert len(pipelines) == category_num + 1, f\"{category_num + 1},{len(pipelines)}\"\n\n fitness = np.array(fitness)\n assert len(fitness.shape) == 2, fitness.shape\n fitness_dimension = len(pipelines)\n\n for i, ind in enumerate(individuals):\n if self.size_objective:\n target_dimension = fitness_dimension + 1\n ind.fitness.weights = tuple([1 for _ in range(target_dimension)])\n ind.fitness.values = tuple(np.abs(fitness[:, i])) + \\\n (-0.01 * max(len(ind), self.average_size) / self.average_size,)\n else:\n target_dimension = fitness_dimension\n ind.fitness.weights = tuple([1 for _ in range(target_dimension)])\n ind.fitness.values = tuple(np.abs(fitness[:, i]))\n assert len(ind.fitness.values) == target_dimension\n assert len(ind.fitness.wvalues) == target_dimension\n return tuple(fitness)\n\n def model_construction(self, all_features, final_model):\n fitness = []\n pipelines = []\n score = 0\n if len(self.category.shape) == 1:\n category_num = np.max(self.category)\n else:\n category_num = self.category.shape[1] - 1\n\n def dummy_regressor_construction(x, y):\n coef = np.zeros(x.shape[1])\n constant = np.mean(y)\n regr = Pipeline(\n [\n (\"Scaler\", StandardScaler()),\n (\"Ridge\", DummyRegressor(strategy='constant', constant=constant)),\n ]\n )\n regr.fit(features, y)\n regr['Ridge'].coef_ = coef\n regr['Ridge'].intercept_ = constant\n # append coefficients and pipelines to the archive\n fitness.append(coef)\n pipelines.append(regr)\n return coef, regr\n\n for i in range(category_num + 1):\n def check_rule(x):\n # if number of samples <2 :unable to execute leave-one CV\n # if number of samples <10 :unable to execute 5-fold CV\n if x < 2 or (x < 10 and not (self.ridge_alpha == 'RidgeCV' and final_model == None)):\n return True\n else:\n return False\n\n if len(self.category.shape) == 1:\n category = self.category == i\n Y_true = self.Y[category]\n features = all_features[category]\n\n if check_rule(np.sum(category)):\n dummy_regressor_construction(features, Y_true)\n continue\n else:\n # soft decision tree\n Y_true = self.Y\n features = all_features\n\n if check_rule(np.count_nonzero(self.category[:, i])):\n dummy_regressor_construction(features, Y_true)\n continue\n\n # if (np.sum(category) < all_features.shape[1] and self.adaptive_tree) or (np.sum(category) < 5):\n # warnings.simplefilter(\"ignore\", category=ConvergenceWarning)\n\n def get_lasso():\n # if len(self.category.shape) == 1:\n # ridge_model = LassoCV(max_iter=100000)\n # else:\n alphas = _alpha_grid(features, Y_true, normalize=True)\n ridge_model = ElasticNet(alpha=1, lambda_path=alphas, n_splits=5, tol=1e-4)\n return ridge_model\n\n def get_elastic_net(ratio):\n # if len(self.category.shape) == 1:\n # ridge_model = ElasticNetCV(l1_ratio=ratio, max_iter=100000)\n # else:\n alphas = _alpha_grid(features, Y_true, l1_ratio=ratio, normalize=True)\n ridge_model = ElasticNet(alpha=ratio, lambda_path=alphas, n_splits=5, tol=1e-4)\n return ridge_model\n\n if self.ridge_alpha == 'Lasso':\n ridge_model = get_lasso()\n elif self.ridge_alpha == 'Linear':\n ridge_model = LinearRegression()\n elif 'ElasticNet' in self.ridge_alpha:\n ratio = float(self.ridge_alpha.split('-')[1])\n ridge_model = get_elastic_net(ratio)\n elif self.ridge_alpha == 'LinearSVR':\n ridge_model = LinearSVR()\n elif self.ridge_alpha == 'EnsembleRidge':\n ridge_model = EnsembleRidge(np.logspace(0, 4))\n else:\n ridge_model = RidgeCV(alphas=eval(self.ridge_alpha))\n\n if final_model == 'Lasso':\n ridge_model = get_lasso()\n elif final_model == 'ElasticNet':\n ridge_model = get_elastic_net(0.5)\n elif final_model == 'LassoRidge':\n ridge_model = LassoRidge(get_lasso(), ridge_model)\n elif final_model == 'RFE':\n ridge_model = RFERegressor(get_lasso(), n_features_to_select=10, step=5)\n # elif final_model == 'Ridge':\n # ridge_model = RidgeCV(alphas=np.logspace(-4, 4))\n\n if self.feature_normalization:\n steps = [\n (\"Scaler\", StandardScaler()),\n (\"Ridge\", ridge_model),\n ]\n pipe = Pipeline(steps)\n else:\n pipe = Pipeline([\n (\"Ridge\", ridge_model),\n ])\n\n if self.validation_selection:\n # record the best individual in the training process\n ridge: RidgeCV = pipe[\"Ridge\"]\n try:\n if len(self.category.shape) == 1:\n pipe.fit(features, Y_true)\n else:\n weight = np.nan_to_num(self.category[:, i], posinf=0, neginf=0)\n pipe.fit(features, Y_true, Ridge__sample_weight=weight)\n except Exception as e:\n traceback.print_exc()\n ic(e, features.shape, Y_true.shape)\n # not converge\n dummy_regressor_construction(features, Y_true)\n continue\n if isinstance(ridge, RidgeCV):\n if len(self.category.shape) == 1:\n score += abs(len(Y_true) * ridge.best_score_)\n else:\n score += abs(np.sum(self.category[:, i]) * ridge.best_score_)\n elif isinstance(ridge, ElasticNet):\n if len(self.category.shape) == 1:\n score += -1 * abs(len(Y_true) * np.max(ridge.cv_mean_score_))\n else:\n score += -1 * abs(np.sum(self.category[:, i]) * np.max(ridge.cv_mean_score_))\n elif isinstance(ridge, LassoCV):\n score += abs(len(Y_true) * np.min(np.sum(ridge.mse_path_, axis=1)))\n elif isinstance(ridge, ElasticNetCV):\n score += abs(len(Y_true) * np.min(np.sum(ridge.mse_path_, axis=1)))\n elif isinstance(ridge, RFERegressor):\n score += 0\n elif isinstance(ridge, LassoRidge):\n score += 0\n else:\n raise Exception\n if isinstance(ridge, ElasticNet):\n feature_importances = np.mean(np.abs(ridge.coef_path_), axis=1)\n else:\n feature_importances = np.abs(ridge.coef_)\n else:\n pipe.fit(features, np.squeeze(Y_true))\n feature_importances = np.abs(pipe['Ridge'].coef_)\n fitness.append(feature_importances)\n pipelines.append(pipe)\n return fitness, pipelines, score\n\n def feature_construction(self, compiled_individuals, x, original_features=False):\n # construct all features\n if original_features:\n all_features = [x]\n else:\n all_features = []\n for func in compiled_individuals:\n yp = func(*x.T)\n if not isinstance(yp, np.ndarray) or yp.size == 1:\n yp = np.full(len(x), yp)\n all_features.append(np.squeeze(yp).reshape(-1, 1))\n all_features = np.concatenate(all_features, axis=1)\n all_features = np.nan_to_num(all_features, posinf=0, neginf=0)\n return all_features\n\n def adaptive_tree_generation(self, all_features, pipelines):\n if self.adaptive_tree:\n if self.soft_label:\n original_all_features = all_features\n prob = softmax(np.array([(p.predict(all_features[:, self.train_data.shape[1]:])\n - self.Y) ** 2 * -1 for p in pipelines]),\n axis=0)\n sample = np.random.rand(len(pipelines), all_features.shape[0])\n matrix = prob > sample\n features = np.concatenate([all_features[s] for s in matrix], axis=0)\n label = np.concatenate([np.full(np.sum(s == True), i) for i, s in enumerate(matrix)], axis=0)\n all_features = features\n _, decision_tree = self.space_partition_fun(all_features, label)\n self.category = decision_tree.predict_proba(original_all_features)\n # decision_tree.labels_ = self.category\n else:\n # assign data point to new partitions\n label = np.zeros(len(self.Y))\n best_fitness = np.full(len(self.Y), np.inf)\n for i, p in enumerate(pipelines):\n # np.array([(p.predict(all_features[:, self.train_data.shape[1]:]) - self.Y) ** 2 for p in pipelines])\n if self.original_features:\n loss = (p.predict(all_features[:, self.train_data.shape[1]:]) - self.Y) ** 2\n else:\n loss = (p.predict(all_features) - self.Y) ** 2\n label[loss < best_fitness] = i\n best_fitness[loss < best_fitness] = loss[loss < best_fitness]\n self.category, decision_tree = self.space_partition_fun(all_features, label)\n\n def statistic_fun(self, ind):\n # return loss and time\n if self.test_fun is not None:\n if not self.better_pop_flag:\n return self.last_loss\n self.better_pop_flag = False\n train_test_loss = self.train_test_fun.predict_loss()\n test_loss = self.test_fun.predict_loss()\n self.last_loss = (train_test_loss, time.time(), test_loss)\n return self.last_loss\n return (time.time(),)\n\n def fit(self, X, y=None, category=None):\n if not hasattr(self, 'fit_function'):\n raise Exception(\"Fit function must be specified!\")\n\n if (not hasattr(self, 'input_names')) or (self.input_names is None):\n self.input_names = [f'X{i}' for i in range(X.shape[1])]\n\n self.train_data = X\n self.Y = y\n\n verbose = self.verbose\n if verbose:\n self.stats = tools.Statistics(key=lambda ind: ind.fitness.values)\n self.stats.register(\"avg\", np.mean, axis=0)\n self.stats.register(\"std\", np.std, axis=0)\n self.stats.register(\"min\", np.min, axis=0)\n self.stats.register(\"max\", np.max, axis=0)\n else:\n self.stats = tools.Statistics(key=self.statistic_fun)\n self.stats.register(\"min\", np.min, axis=0)\n self.stats.register(\"max\", np.max, axis=0)\n\n backup_X = X.copy()\n backup_y = y.copy()\n self.lazy_init(self.input_names)\n\n if category is None:\n category = np.full([y.shape[0]], 0)\n\n self.category = category\n\n self.fit_function()\n\n assert np.all(backup_X == X), \"Data has been changed unexpected!\"\n assert np.all(backup_y == y), \"Data has been changed unexpected!\"\n return self\n\n def feature_synthesis(self, x, pop, original_features=False):\n compiled_pop = [self.toolbox.compile(individual) for individual in pop]\n return self.feature_construction(compiled_pop, x, original_features)\n\n def predict(self, X, y=None, category=None):\n # save_object([str(x) for x in self.hof.items], 'model.pkl')\n if (category is None) or (not self.cluster_gp):\n category = np.full([X.shape[0]], 0)\n\n Yp = np.zeros(X.shape[0])\n if len(category.shape) == 1:\n category_num = np.max(category)\n else:\n category_num = category.shape[1] - 1\n\n X = self.feature_synthesis(X, self.best_pop)\n for i in range(category_num + 1):\n if len(category.shape) == 1:\n loc = np.where(category == i)\n current_c = category == i\n if np.sum(current_c) == -0:\n continue\n features = X[current_c]\n else:\n features = X\n\n if len(features.shape) == 1:\n features = features.reshape(1, len(features))\n assert features.shape[1] >= len(self.best_pop), features.shape[1]\n if len(category.shape) == 1:\n Yp.put(loc, self.pipelines[i].predict(features))\n else:\n Yp += np.multiply(self.pipelines[i].predict(features), category[:, i])\n return Yp\n\n def __deepcopy__(self, memodict={}):\n return c_deepcopy(self)\n\n def lazy_init(self, input_names):\n pset = gp.PrimitiveSet(\"MAIN\", len(input_names), prefix='X')\n toolbox = base.Toolbox()\n toolbox.register('evaluate', self.evaluate)\n toolbox.register('select', self.select)\n\n self.pset = pset\n self.toolbox = toolbox\n\n add_pset_function(pset, self.max_arity, self.basic_primitive)\n if hasattr(gp, 'rand101'):\n # delete existing constant generator\n delattr(gp, 'rand101')\n if self.random_float:\n pset.addEphemeralConstant('rand101', lambda: random.uniform(-self.constant_range, self.constant_range))\n else:\n pset.addEphemeralConstant(\"rand101\", lambda: random.randint(-self.constant_range, self.constant_range))\n\n creator.create(\"FitnessMin\", FastMeasure, weights=tuple([1 for _ in range(self.category_num)]))\n creator.create(\"Individual\", gp.PrimitiveTree, fitness=creator.FitnessMin)\n\n if self.initial_height is None:\n toolbox.register(\"expr\", gp.genHalfAndHalf, pset=pset, min_=0, max_=2)\n else:\n a, b = self.initial_height.split('-')\n a, b = int(a), int(b)\n toolbox.register(\"expr\", gp.genHalfAndHalf, pset=pset, min_=a, max_=b)\n toolbox.register(\"individual\", tools.initIterate, creator.Individual, toolbox.expr)\n toolbox.register(\"population\", tools.initRepeat, list, toolbox.individual)\n toolbox.register(\"compile\", gp.compile, pset=pset)\n\n toolbox.register(\"expr_mut\", gp.genFull, min_=0, max_=2)\n toolbox.register(\"mate\", gp.cxOnePoint)\n toolbox.register(\"mutate\", gp.mutUniform, expr=toolbox.expr_mut, pset=pset)\n\n toolbox.decorate(\"mate\", gp.staticLimit(key=operator.attrgetter(\"height\"), max_value=self.height_limit))\n toolbox.decorate(\"mutate\", gp.staticLimit(key=operator.attrgetter(\"height\"), max_value=self.height_limit))\n if self.n_gen == 0:\n self.pop = self.generation_original_features()\n else:\n self.pop = toolbox.population(n=self.n_pop)\n\n def generation_original_features(self):\n pop = []\n for x in self.pset.terminals[object]:\n if type(x) is Terminal:\n tree = gp.PrimitiveTree([x])\n tree.fitness = creator.FitnessMin()\n pop.append(tree)\n assert len(pop) == self.train_data.shape[1]\n return pop\n\n def fit_function(self):\n self.pop, self.log_book = self.moea(self.pop, self.toolbox,\n self.cross_rate, self.mutate_rate,\n self.n_gen, stats=self.stats,\n halloffame=None, verbose=self.verbose,\n params=self.params)\n\n def moea(self, population, toolbox, cxpb, mutpb, ngen, stats=None,\n halloffame=None, verbose=__debug__, params=None):\n if self.new_surrogate_function is True:\n def individual_to_tuple(ind):\n return tuple(self.feature_synthesis(self.train_data[:20], [ind]).flatten().tolist())\n elif str(self.new_surrogate_function).startswith('First'):\n sample_count = int(self.new_surrogate_function.split('-')[1])\n\n def individual_to_tuple(ind):\n return tuple(self.feature_synthesis(self.train_data[:sample_count], [ind]).flatten().tolist())\n else:\n individual_to_tuple = cluster_gp_tools.individual_to_tuple\n\n logbook = tools.Logbook()\n logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])\n\n # Evaluate the individuals with an invalid fitness\n toolbox.evaluate(population)\n\n if halloffame is not None:\n halloffame.update(population)\n\n record = stats.compile(population) if stats else {}\n logbook.record(gen=0, nevals=len(population), **record)\n if verbose:\n print(logbook.stream)\n\n diversity_list = []\n pop_size_list = []\n log_dict = LogDict(self.log_dict_size * len(population))\n for p in population:\n ind_tuple = individual_to_tuple(p)\n p.ind_tuple = ind_tuple\n log_dict.insert(ind_tuple, p.fitness.values)\n pop_size = len(population)\n # assigning the crowding distance to each individual\n if self.select == selTournamentDCD and self.survival_selection == 'NSGA2':\n population = selNSGA2(population, pop_size)\n\n # Begin the generational process\n for gen in range(1, ngen + 1):\n if self.basic_primitive == 'dynamic' and self.current_gen > (self.n_gen // 2):\n self.pset.addPrimitive(np.sin, 1)\n self.pset.addPrimitive(np.cos, 1)\n if self.tree_shrinkage and (gen % 50) == 0:\n self.super_object.max_leaf_nodes = max(self.super_object.max_leaf_nodes // 2, 1)\n self.current_gen = gen\n if self.structure_diversity:\n count = 0\n new_offspring = []\n while (len(new_offspring) < pop_size):\n count += 1\n # Select the next generation individuals\n # if self.survival_selection == 'Random':\n # offspring = selRandom(population, 2)\n # else:\n offspring = toolbox.select(population, 2)\n offspring = offspring[:]\n parent_tuple = [o.ind_tuple for o in offspring]\n # Vary the pool of individuals\n offspring = varAnd(offspring, toolbox, cxpb, mutpb)\n for o in offspring:\n if len(new_offspring) < pop_size:\n ind_tuple = individual_to_tuple(o)\n if count > pop_size * 50:\n # if too many trials failed, then we just allow to use repetitive individuals\n o.ind_tuple = ind_tuple\n log_dict.insert(ind_tuple, -1)\n new_offspring.append(o)\n continue\n if not log_dict.exist(ind_tuple):\n if self.advanced_elimination and (\n np.abs(pearsonr(ind_tuple, parent_tuple[0])[0]) >= 0.95 or\n np.abs(pearsonr(ind_tuple, parent_tuple[1])[0]) >= 0.95):\n log_dict.insert(ind_tuple, -1)\n continue\n o.ind_tuple = ind_tuple\n log_dict.insert(ind_tuple, -1)\n new_offspring.append(o)\n\n offspring = new_offspring\n\n else:\n offspring = toolbox.select(population, len(population))\n # Vary the pool of individuals\n offspring = varAnd(offspring, toolbox, cxpb, mutpb)\n assert len(offspring) == pop_size, print(len(offspring), pop_size)\n self.average_size = np.mean([len(p) for p in population])\n\n # Evaluate the individuals with an invalid fitness\n if self.correlation_elimination:\n corr_matrix = np.abs(np.corrcoef(np.array([p.ind_tuple for p in population + offspring])))\n # Select upper triangle of correlation matrix\n upper = np.triu(corr_matrix, k=1)\n # Find index of feature columns with correlation greater than 0.95\n to_drop = [any(upper[i] > 0.95) for i in range(0, upper.shape[0])]\n parent = list(compress(population + offspring, np.invert(to_drop)))\n toolbox.evaluate(parent)\n else:\n toolbox.evaluate(offspring + population)\n\n # diversity = diversity_measure(offspring) / pop_size\n # diversity_list.append(diversity)\n pop_size_list.append(len(offspring))\n log_dict.gc()\n\n # Update the hall of fame with the generated individuals\n if halloffame is not None:\n halloffame.update(offspring)\n\n # Replace the current population by the offspring\n if self.survival_selection == 'NSGA2':\n # if len(offspring[0].fitness.wvalues) > 2:\n # high_dimensional = True\n # self.random_objectives = np.random.uniform(0, 1, size=(len(offspring[0].fitness.wvalues), 2))\n # for ind in offspring + population:\n # setattr(ind, 'original_fitness', ind.fitness.values)\n # setattr(ind, 'original_weights', ind.fitness.weights)\n # fitness = np.array(ind.fitness.wvalues) @ self.random_objectives\n # ind.fitness.weights = (1,) * len(fitness)\n # ind.fitness.values = list(fitness)\n # else:\n # high_dimensional = False\n if self.correlation_elimination:\n population[:] = selNSGA2(parent, pop_size)\n else:\n population[:] = selNSGA2(population + offspring, pop_size)\n # population = list(filter(lambda x: np.sum(x.fitness.wvalues) > 0, population))\n # if high_dimensional:\n # for ind in population:\n # ind.fitness.weights = getattr(ind, 'original_weights')\n # ind.fitness.values = getattr(ind, 'original_fitness')\n elif self.survival_selection == 'IBEA':\n population[:] = selIBEA(population + offspring, pop_size)\n elif self.survival_selection == 'SPEA2':\n population[:] = selSPEA2(population + offspring, pop_size)\n elif self.survival_selection == 'NSGA3':\n ref_points = tools.uniform_reference_points(nobj=len(population[0].fitness.wvalues))\n population[:] = selNSGA3(population + offspring, pop_size, ref_points)\n elif self.survival_selection == 'Lexicase':\n def selLexicasePlus(individuals: list, k: int):\n selected_individuals = []\n while len(selected_individuals) < k:\n lexicase_inds = selLexicase(individuals, 1)\n for x in lexicase_inds:\n individuals.remove(x)\n selected_individuals.extend(lexicase_inds)\n return selected_individuals\n\n population[:] = selLexicasePlus(population + offspring, pop_size)\n elif self.survival_selection == 'AutomaticEpsilonLexicase':\n def selAutomaticEpsilonLexicasePlus(individuals: list, k: int):\n selected_individuals = []\n while len(selected_individuals) < k:\n lexicase_inds = selAutomaticEpsilonLexicase(individuals, 1)\n for x in lexicase_inds:\n individuals.remove(x)\n selected_individuals.extend(lexicase_inds)\n return selected_individuals\n\n population[:] = selAutomaticEpsilonLexicasePlus(population + offspring, pop_size)\n elif self.survival_selection == 'Random':\n def selSample(individuals, k):\n return random.sample(individuals, k)\n\n population[:] = selSample(population + offspring, pop_size)\n elif self.survival_selection == 'Best':\n population[:] = selBestSum(population + offspring, pop_size)\n elif self.survival_selection == 'MOEA/D':\n population[:] = selMOEAD(population + offspring, pop_size)\n else:\n raise Exception\n assert len(population) <= pop_size\n\n # if self.test_fun != None:\n # # reevaluate population\n # self.evaluate(population)\n\n # Append the current generation statistics to the logbook\n record = stats.compile(population) if stats else {}\n logbook.record(gen=gen, nevals=len(population), **record)\n if verbose:\n print(logbook.stream)\n\n # final process\n # select top-N individuals\n # selNSGA2(population, len(population))\n # assert len(population) == pop_size\n # toolbox.evaluate(self.best_pop, final_model=True)\n\n self.super_object.max_leaf_nodes = self.best_leaf_node_num\n self.super_object.soft_tree = self.super_object.final_soft_tree\n if self.final_prune is not None:\n toolbox.evaluate(self.best_pop, final_model=self.final_prune)\n\n features = self.feature_synthesis(self.train_data, self.best_pop,\n self.original_features)\n self.adaptive_tree_generation(features, self.pipelines)\n return population, logbook\n\n\nclass NormalizedGPRegressor(GPRegressor):\n def __init__(self, **params):\n super().__init__(**params)\n\n @train_normalization\n def fit(self, X, y=None, category=None):\n super().fit(X, y, category)\n\n @predict_normalization\n def predict(self, X, y=None, category=None):\n super().predict(X, y, category)\n\n\ndef diversity_measure(pop):\n fit = set()\n for ind in pop:\n fit.add(ind.fitness.values[0])\n return len(fit)\n\n\nclass PSTreeRegressor(NormalizationRegressor):\n \"\"\"\n An upper-level class for PS-Tree\n \"\"\"\n\n def __init__(self, regr_class, tree_class, min_samples_leaf=1, max_depth=None, max_leaf_nodes=4, random_seed=0,\n restricted_classification_tree=True, basic_primitive=False,\n soft_tree=True, final_soft_tree=True, adaptive_tree=True, random_state=0, **params):\n \"\"\"\n regr_class: the class name for base learner\n tree_class: the class name for the upper-level decision tree\n \"\"\"\n super().__init__(**params)\n self.random_state = random_state\n # reset_random(random_state)\n self.regr_class = regr_class\n self.tree_class = tree_class\n self.max_depth = max_depth\n self.min_samples_leaf = min_samples_leaf\n self.max_leaf_nodes = max_leaf_nodes\n self.random_seed = random_seed\n self.params = params\n self.restricted_classification_tree = restricted_classification_tree\n self.basic_primitive = basic_primitive\n self.soft_tree = soft_tree\n self.final_soft_tree = soft_tree & final_soft_tree\n self.adaptive_tree = adaptive_tree\n\n @train_normalization\n def fit(self, X: np.ndarray, y=None):\n self.train_data = X\n self.train_label = y\n if self.min_samples_leaf in ['Auto', 'Auto-4', 'Auto-6', 'Auto-8']:\n best_size = automatically_determine_best_size(X, y, self.min_samples_leaf)\n self.min_samples_leaf = best_size\n if type(self.min_samples_leaf) is str:\n raise Exception\n\n category, _ = self.space_partition(X, y)\n if self.adaptive_tree is True:\n self.tree_class = DecisionTreeClassifier\n if self.adaptive_tree == 'Soft':\n self.tree_class = LogisticRegression\n\n self.regr: GPRegressor = self.regr_class(max_depth=self.max_depth, min_samples_leaf=self.min_samples_leaf,\n space_partition_fun=self.space_partition,\n basic_primitive=self.basic_primitive, soft_tree=self.soft_tree,\n adaptive_tree=self.adaptive_tree, super_object=self, **self.params)\n self.regr.fit(X, y, category)\n return self\n\n def space_partition(self, X, y):\n # other partition methods\n if self.tree_class == PseudoPartition or self.max_leaf_nodes == 1:\n self.tree = PseudoPartition()\n # return self.tree.apply(X), self.tree\n elif self.tree_class == DecisionTreeClassifier:\n if self.restricted_classification_tree:\n self.tree = self.tree_class(\n max_depth=self.max_depth,\n min_samples_leaf=self.min_samples_leaf,\n max_leaf_nodes=self.max_leaf_nodes,\n random_state=self.random_seed)\n else:\n self.tree = self.tree_class(\n max_depth=self.max_depth,\n min_samples_leaf=self.min_samples_leaf,\n random_state=self.random_seed,\n ccp_alpha=0.01)\n elif self.tree_class == LogisticRegression:\n self.tree = LogisticRegression(\n solver='liblinear'\n )\n elif self.tree_class == DecisionTreeRegressor:\n self.tree = self.tree_class(\n max_depth=self.max_depth,\n min_samples_leaf=self.min_samples_leaf,\n max_leaf_nodes=self.max_leaf_nodes,\n random_state=self.random_seed)\n elif self.tree_class == KMeans:\n self.tree = KMeans(n_clusters=self.max_leaf_nodes, random_state=self.random_seed)\n elif self.tree_class == BayesianGaussianMixture:\n self.tree = BayesianGaussianMixture(n_components=self.max_leaf_nodes, max_iter=1000,\n random_state=self.random_seed)\n elif self.tree_class == GaussianNB:\n self.tree = GaussianNB()\n elif self.tree_class == RandomForestClassifier:\n self.tree = RandomForestClassifier(n_estimators=10,\n max_depth=self.max_depth,\n min_samples_leaf=self.min_samples_leaf,\n max_leaf_nodes=self.max_leaf_nodes,\n random_state=self.random_seed)\n else:\n raise Exception\n\n if hasattr(self, 'regr') and self.regr.original_features == 'original':\n self.tree.fit(X[:, :self.train_data.shape[1]], y)\n self.tree.labels_ = get_labels(self.tree, X[:, :self.train_data.shape[1]],\n self.soft_tree)\n else:\n self.tree.fit(X, y)\n self.tree.labels_ = get_labels(self.tree, X, self.soft_tree)\n if len(self.tree.labels_.shape) == 1:\n cluster_num = self.tree.labels_.max() + 1\n category, category_index = self.category_generation(cluster_num, y)\n self.params['category_num'] = category_index\n else:\n category = self.tree.labels_\n self.params['category_num'] = category.shape[1]\n # if isinstance(self.tree, DecisionTreeClassifier):\n # print('loss', accuracy_score(y, self.tree.predict(X)), np.unique(self.tree.labels_).__len__())\n # if isinstance(self.tree, DecisionTreeRegressor):\n # print('loss', r2_score(y, self.tree.predict(X)), np.unique(self.tree.labels_).__len__())\n return category, self.tree\n\n def category_generation(self, cluster_num, y):\n category = np.full([y.shape[0]], 0)\n category_index = 0\n self.label_map = {}\n for i in range(cluster_num):\n if not np.any(self.tree.labels_ == i):\n continue\n category[np.where(self.tree.labels_ == i)] = category_index\n self.label_map[i] = category_index\n category_index += 1\n return category, category_index\n\n @predict_normalization\n def predict(self, X, y=None):\n if self.regr.adaptive_tree:\n features = self.regr.feature_synthesis(X, self.regr.best_pop,\n original_features=self.regr.original_features)\n if self.regr.original_features == 'original':\n labels = get_labels(self.tree, features[:, :self.train_data.shape[1]])\n else:\n labels = get_labels(self.tree, features, self.soft_tree)\n else:\n labels = get_labels(self.tree, X)\n backup_X = X.copy()\n\n if len(labels.shape) == 1:\n labels = self.category_map(labels)\n\n y_predict = self.regr.predict(X, y, category=labels)\n\n assert np.all(backup_X == X), \"Data has been changed unexpected!\"\n return y_predict\n\n def category_map(self, labels):\n for i, label in enumerate(labels):\n if label in self.label_map.keys():\n labels[i] = self.label_map[label]\n else:\n # The untrained cluster.\n labels[i] = -1\n return labels\n\n def __deepcopy__(self, memodict={}):\n return c_deepcopy(self)\n\n def get_params(self, deep=True):\n # The current version of scikit-learn does not support this function well,\n # which cause an error when the constructor parameter has a parameter that is an estimator class.\n out = dict()\n for key in self._get_param_names():\n value = getattr(self, key, None)\n if deep and hasattr(value, 'get_params') and not isinstance(value, type):\n deep_items = value.get_params().items()\n out.update((key + '__' + k, val) for k, val in deep_items)\n out[key] = value\n\n params = out\n params = {\n **self.params,\n **params\n }\n return params\n\n def model(self, partition=0):\n features = []\n for id in range(self.regr.train_data.shape[1]):\n features.append(parse_expr(f'X{id}'))\n for p in self.regr.best_pop:\n features.append(parse_expr(gene_to_string(p)))\n\n regr, feature_names = self, [f'X{id}' for id in range(self.regr.train_data.shape[1] + len(self.regr.best_pop))]\n tree_ = regr.tree.tree_\n feature_name = [feature_names[i]\n if i != _tree.TREE_UNDEFINED else \"undefined!\"\n for i in tree_.feature]\n\n if regr.tree.tree_.node_count==1:\n # single model\n return srepr(multigene_gp_to_string(0, regr.regr))\n\n all_expressions = []\n all_conditions = []\n\n def recurse(node):\n if tree_.feature[node] != _tree.TREE_UNDEFINED:\n name = feature_name[node]\n index = int(name.replace('X', ''))\n name = features[index]\n threshold = tree_.threshold[node]\n\n node_condition = f'({name} <= {threshold})'\n all_conditions.append(node_condition)\n recurse(tree_.children_left[node])\n all_conditions.pop(-1)\n\n node_condition = f'({name} > {threshold})'\n all_conditions.append(node_condition)\n recurse(tree_.children_right[node])\n all_conditions.pop(-1)\n else:\n tree_values = tree_.value[node][0]\n tree_values = tree_values / tree_values.sum()\n # print(node)\n expr = None\n for i in range(len(regr.regr.pipelines)):\n ex1 = multigene_gp_to_string(i, regr.regr)\n if expr is None:\n expr = tree_values[i] * ex1\n else:\n expr += tree_values[i] * ex1\n condition = '&'.join(all_conditions)\n all_expressions.append((expr, parse_expr(condition)))\n\n recurse(0)\n return srepr(Piecewise(*tuple(all_expressions)))\n\n\nclass SequentialTreeGPRegressor(NormalizationRegressor):\n def __init__(self, regr_class, min_samples_leaf=1, min_impurity_decrease=0, random_seed=0, **params):\n super().__init__(**params)\n self.regr_class = regr_class\n self.min_samples_leaf = min_samples_leaf\n self.min_impurity_decrease = min_impurity_decrease\n self.random_seed = random_seed\n self.params = params\n\n @train_normalization\n def fit(self, X: np.ndarray, y=None):\n self.tree = DecisionTreeRegressor(min_samples_leaf=self.min_samples_leaf,\n min_impurity_decrease=self.min_impurity_decrease,\n random_state=self.random_seed)\n self.tree.fit(X, y)\n self.tree.labels_ = self.tree.apply(X)\n cluster_num = self.tree.labels_.max() + 1\n\n category_index = 0\n self.label_map = {}\n self.regr = []\n\n if 'test_fun' in self.params and self.params['test_fun'] is not None:\n test_fun = self.params['test_fun']\n self.test_x = test_fun.x\n self.test_y = test_fun.y\n test_label = self.tree.apply(self.test_x)\n\n train_test_fun = self.params['train_test_fun']\n self.train_x = train_test_fun.x\n self.train_y = train_test_fun.y\n train_label = self.tree.apply(self.train_x)\n\n for i in range(cluster_num):\n if not np.any(self.tree.labels_ == i):\n continue\n\n if 'test_fun' in self.params and self.params['test_fun'] is not None:\n test_fun = self.params['test_fun']\n test_fun.x = self.test_x[test_label == i]\n test_fun.y = self.test_y[test_label == i]\n self.params['test_fun'] = test_fun\n\n train_test_fun = self.params['train_test_fun']\n train_test_fun.x = self.train_x[train_label == i]\n train_test_fun.y = self.train_y[train_label == i]\n self.params['train_test_fun'] = train_test_fun\n self.label_map[i] = category_index\n category_index += 1\n\n regr = self.regr_class(category_num=1, **self.params)\n self.regr.append(regr)\n regr.fit(X[self.tree.labels_ == i], y[self.tree.labels_ == i],\n np.zeros((np.sum(self.tree.labels_ == i, )), dtype=int))\n return self\n\n @predict_normalization\n def predict(self, X, y=None):\n labels = self.tree.apply(X)\n for i, label in enumerate(labels):\n if label in self.label_map.keys():\n labels[i] = self.label_map[label]\n else:\n # The untrained cluster.\n labels[i] = -1\n\n y_predict = np.zeros((X.shape[0],))\n for i in range(len(self.regr)):\n if np.sum(labels == i) > 0:\n y_predict[labels == i] = self.regr[i].predict(X[labels == i],\n category=np.zeros((np.sum(labels == i),), dtype=int))\n return y_predict\n\n def __deepcopy__(self, memodict={}):\n return c_deepcopy(self)\n\n def get_params(self, deep=True):\n # The current version of scikit-learn does not support this function well,\n # which cause an error when the constructor parameter has a parameter that is an estimator class.\n out = dict()\n for key in self._get_param_names():\n value = getattr(self, key, None)\n if deep and hasattr(value, 'get_params') and not isinstance(value, type):\n deep_items = value.get_params().items()\n out.update((key + '__' + k, val) for k, val in deep_items)\n out[key] = value\n\n params = out\n params = {\n **self.params,\n **params\n }\n return params\n\n\nclass PseudoCluster(BaseEstimator):\n def __init__(self, n_clusters=10, random_state=0):\n self.n_clusters = n_clusters\n self.random_state = random_state\n\n def fit(self, X, y=None):\n pass\n\n def predict(self, X, y=None):\n total_size = X.shape[0]\n cluster_size = total_size // self.n_clusters\n remain_size = total_size % self.n_clusters\n labels = np.array([], dtype=np.int)\n for i in range(self.n_clusters):\n labels = np.append(labels, np.full(cluster_size, i, dtype=np.int))\n if i < remain_size:\n labels = np.append(labels, np.full(1, i, dtype=np.int))\n np.random.shuffle(labels)\n return labels\n\n def __deepcopy__(self, memodict={}):\n return c_deepcopy(self)\n\n\nclass LogDict:\n def __init__(self, max_size):\n self.fitness_dict = {}\n self.queue = deque()\n self.max_size = max_size\n return\n\n def insert(self, x, fitness):\n if x not in self.fitness_dict:\n self.queue.append(x)\n self.fitness_dict[x] = fitness\n\n def gc(self):\n while len(self.queue) >= self.max_size:\n old = self.queue.popleft()\n del self.fitness_dict[old]\n\n def update(self, x, fitness):\n self.fitness_dict[x] = fitness\n\n def exist(self, x):\n return x in self.fitness_dict.keys()\n\n def get(self, x):\n if x in self.fitness_dict.keys():\n return self.fitness_dict[x]\n else:\n return None\n\n\nclass PseudoPartition(BaseEstimator):\n def __init__(self, **param):\n class zero: pass\n\n self.tree_ = zero()\n # this is because node count is the number of all nodes\n # no branch nodes exist in this tree\n setattr(self.tree_, 'node_count', 1)\n # there is only one leaf node in this pseudo tree\n setattr(self.tree_, 'n_leaves', 1)\n\n def fit(self, X, y=None):\n return np.zeros(len(X)).astype(np.int)\n\n def predict(self, X, y=None):\n pass\n\n def apply(self, X):\n return np.zeros(len(X)).astype(np.int)\n\n def __deepcopy__(self, memodict={}):\n return c_deepcopy(self)\n\n\nclass PiecewisePolynomialTree(BaseEstimator):\n \"\"\"\n This is a simple PL-Tree which could be used for determine the best partition number\n \"\"\"\n\n def __init__(self, min_samples_leaf=None, **param):\n self.label_model = defaultdict()\n self.min_samples_leaf = min_samples_leaf\n\n def fit(self, X, y=None):\n self.tree_ = DecisionTreeRegressor(min_samples_leaf=self.min_samples_leaf)\n self.tree_.fit(X, y)\n labels = self.tree_.apply(X)\n # X = PolynomialFeatures(degree=2).fit_transform(X)\n for l in np.unique(labels):\n index = labels == l\n lr = RidgeCV(np.logspace(0, 4))\n lr.fit(X[index], y[index])\n self.label_model[l] = lr\n\n def predict(self, X, y=None):\n # y_pred = np.zeros((X.shape[0], 1))\n y_pred = np.zeros((X.shape[0],))\n labels = self.tree_.apply(X)\n # X = PolynomialFeatures(degree=2).fit_transform(X)\n for l in self.label_model.keys():\n index = labels == l\n if np.sum(index) > 0:\n y_pred[index] = self.label_model[l].predict(X[index])\n return y_pred\n\n def apply(self, X):\n return np.zeros(len(X)).astype(np.int)\n\n def __deepcopy__(self, memodict={}):\n return copy.deepcopy(self)\n\n\ndef selTournament(individuals, k, tournsize, fit_attr=\"fitness\"):\n \"\"\"Select the best individual among *tournsize* randomly chosen\n individuals, *k* times. The list returned contains\n references to the input *individuals*.\n\n :param individuals: A list of individuals to select from.\n :param k: The number of individuals to select.\n :param tournsize: The number of individuals participating in each tournament.\n :param fit_attr: The attribute of individuals to use as selection criterion\n :returns: A list of selected individuals.\n\n This function uses the :func:`~random.choice` function from the python base\n :mod:`random` module.\n \"\"\"\n chosen = []\n for i in range(k):\n aspirants = selRandom(individuals, tournsize)\n chosen.append(builtins.max(aspirants, key=lambda x: np.sum(x.fitness.wvalues)))\n return chosen\n\n\ndef selTournamentDCD(individuals, k):\n \"\"\"\n A simplified version of the tournament selection operator based on dominance and crowding distance\n\n :param individuals: A list of individuals to select from.\n :param k: The number of individuals to select.\n :returns: A list of selected individuals.\n \"\"\"\n\n def tourn(ind1, ind2):\n if ind1.fitness.dominates(ind2.fitness):\n return ind1\n elif ind2.fitness.dominates(ind1.fitness):\n return ind2\n\n if hasattr(ind1.fitness, 'crowding_dist') and hasattr(ind2.fitness, 'crowding_dist'):\n if ind1.fitness.crowding_dist < ind2.fitness.crowding_dist:\n return ind2\n elif ind1.fitness.crowding_dist > ind2.fitness.crowding_dist:\n return ind1\n\n if random.random() <= 0.5:\n return ind1\n return ind2\n\n chosen = []\n for i in range(0, k, 2):\n individuals_sample = random.sample(individuals, 4)\n chosen.append(tourn(individuals_sample[0], individuals_sample[1]))\n chosen.append(tourn(individuals_sample[2], individuals_sample[3]))\n return chosen\n\n\ndef automatically_determine_best_size(X, y, min_samples_leaf):\n \"\"\"\n Automatically determine the best tree size\n \"\"\"\n low_score = -np.inf\n best_size = 0\n for size in {\n 'Auto': reversed([50, 100, 150, 200]),\n 'Auto-4': reversed([25, 50, 100, 500]),\n 'Auto-6': reversed([50, 75, 100, 125, 150, 200]),\n 'Auto-8': reversed([25, 50, 75, 100, 125, 150, 200, 500]),\n }[min_samples_leaf]:\n dt = PiecewisePolynomialTree(min_samples_leaf=size)\n score = cross_validate(dt, X, y, scoring='neg_mean_squared_error', cv=5,\n return_train_score=True)\n mean_score = np.mean(score['test_score'])\n if mean_score > low_score:\n # current score is better\n low_score = mean_score\n best_size = size\n return best_size\n"
] |
[
[
"sklearn.linear_model.RidgeCV",
"sklearn.tree.DecisionTreeRegressor",
"sklearn.linear_model.LogisticRegression",
"sklearn.cluster.KMeans",
"sklearn.naive_bayes.GaussianNB",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.mixture.BayesianGaussianMixture",
"scipy.stats.pearsonr",
"sklearn.dummy.DummyRegressor",
"sklearn.pipeline.Pipeline",
"sklearn.linear_model._coordinate_descent._alpha_grid",
"sklearn.linear_model.LinearRegression",
"sklearn.model_selection.cross_validate",
"sklearn.preprocessing.StandardScaler",
"numpy.get_include",
"sklearn.svm.LinearSVR"
]
] |
ariel415el/CTSegmentation-Pytorch
|
[
"9584c20fd009b93211d6d89afa0df7aaecd31468"
] |
[
"inference.py"
] |
[
"import argparse\nimport json\nimport os\n\nimport numpy as np\nimport torch\nimport SimpleITK as sitk\nfrom torchvision.transforms import InterpolationMode\nfrom scipy import ndimage\nimport torch.nn.functional as F\nimport cc3d\nfrom scipy.ndimage.morphology import binary_opening, binary_dilation, binary_erosion\n\nfrom config import ExperimentConfigs\nfrom datasets.ct_dataset import get_transforms, LITS2017_VALSETS\nfrom datasets.visualize_data import write_volume_slices\nfrom models import get_model\nfrom torchvision.transforms import Resize\nfrom metrics import TverskyScore, compute_Recal\n\n\ndef get_model_from_dir(model_dir, ckpt_name):\n config = json.load(open(f'{model_dir}/exp_configs.json'))\n config = ExperimentConfigs(**config)\n ckpt = torch.load(f'{model_dir}/{ckpt_name}.pth')\n\n # get model\n model = get_model(config.get_model_config())\n model.load_state_dict(ckpt['model'])\n model.to(config.device)\n\n return model, config\n\n\ndef clean_liver_prediction(pred_volume):\n \"\"\"\n Keep only largest blob in binary mask\n \"\"\"\n # Convert to binary-mask\n binary_mask = pred_volume.cpu().detach().bool().numpy()\n\n # erode to separate loosley connected blobs\n binary_mask = binary_erosion(binary_mask, iterations=5)\n\n if not binary_mask.any():\n return torch.from_numpy(binary_mask)\n # Find 3D blobs\n cc = cc3d.connected_components(binary_mask)\n clean_pred_volume = max([image for (label, image) in cc3d.each(cc, binary=True, in_place=False)],\n key=lambda x: x.sum())\n\n # Restore blob to original\n clean_pred_volume = binary_dilation(clean_pred_volume, iterations=5)\n\n clean_pred_volume = torch.from_numpy(clean_pred_volume)\n\n return clean_pred_volume\n\n\ndef read_case(ct_dir, gt_dir, fname):\n ct_nii = sitk.ReadImage(os.path.join(ct_dir, fname))\n ct_volume = sitk.GetArrayFromImage(ct_nii) # shape (S, 512, 512)\n gt_path = os.path.join(gt_dir, fname.replace('volume', 'segmentation'))\n\n gt_volume = sitk.GetArrayFromImage(sitk.ReadImage(gt_path, sitk.sitkUInt8)) if os.path.exists(gt_path) else None\n\n return ct_volume, gt_volume, ct_nii.GetSpacing()\n\n\nclass TwoStepsSegmentor:\n def __init__(self, liver_segmentation_model_dir, tumor_segmentation_model_dir):\n self.liver_segmentation_model, self.liver_segmentation_cfg = get_model_from_dir(liver_segmentation_model_dir, 'best')\n _, self.liver_segmentation_transforms = get_transforms(self.liver_segmentation_cfg.get_data_config())\n\n # Load Tumor model and cnfigs\n self.tumor_model, self.tumor_cfg = get_model_from_dir(tumor_segmentation_model_dir, 'best')\n _, self.tumor_transforms = get_transforms(self.tumor_cfg.get_data_config())\n\n def predict(self, cropped_ct):\n # Predict liver\n input_ct = self.liver_segmentation_transforms((cropped_ct, np.ones_like(cropped_ct)))[0].unsqueeze(0).float().cuda()\n predicted_liver_mask = self.liver_segmentation_model.predict_volume(input_ct)[0].argmax(0).cpu()\n predicted_liver_mask = clean_liver_prediction(predicted_liver_mask).long()\n\n # Predict tumors\n input_ct = self.tumor_transforms((cropped_ct, np.ones_like(cropped_ct)))[0].unsqueeze(0).float().cuda() # shape (S, 128, 128)\n\n predicted_tumor_mask = self.tumor_model.predict_volume(input_ct)[0].argmax(0).cpu() # shape (S, 128, 128)\n\n multiclass_mask = predicted_liver_mask\n multiclass_mask[torch.logical_and(multiclass_mask == 1, predicted_tumor_mask == 1)] = 2\n\n return multiclass_mask\n\n\nclass OneStepsSegmentor:\n def __init__(self, multiclass_model_dir):\n self.multiclass_model, self.multiclass_cfg = get_model_from_dir(multiclass_model_dir, 'best')\n _, self.multiclass_transforms = get_transforms(self.multiclass_cfg.get_data_config())\n\n def predict(self, cropped_ct):\n input_ct = self.multiclass_transforms((cropped_ct, np.ones_like(cropped_ct)))[0].unsqueeze(0).float().cuda() # shape (S, 128, 128)\n multiclass_mask = self.multiclass_model.predict_volume(input_ct)[0].argmax(0).cpu() # shape (S, 128, 128)\n\n return multiclass_mask\n\n\ndef inference(ct_path, gt_path, liver_localization_model_dir, multiclass_segmentation_model_dir, normalized_mms=None, liver_crop_padding=(3, 20,20), dump_debug_images=False):\n with torch.no_grad():\n outputs_dir = os.path.join(os.path.dirname(ct_path), 'end2end_prediction')\n os.makedirs(outputs_dir, exist_ok=True)\n\n liver_localization_model, liver_localization_cfg = get_model_from_dir(liver_localization_model_dir, 'best')\n _, liver_localization_transforms = get_transforms(liver_localization_cfg.get_data_config())\n\n segmentor = OneStepsSegmentor(multiclass_segmentation_model_dir)\n\n liver_dice_scores = []\n tumor_dice_scores = []\n tumor_recalls = []\n for fname in os.listdir(ct_path):\n print(f\"Case-{fname}\")\n ct_volume, gt_volume, spacing = read_case(ct_path, gt_path, fname)\n\n # localize liver\n liver_input = liver_localization_transforms((ct_volume.copy(), np.ones_like(ct_volume).astype(np.uint8)))[0].unsqueeze(0).float().cuda()\n predicted_liver_mask = liver_localization_model.predict_volume(liver_input)[0].argmax(0).cpu() # shape (S, 128, 128)\n\n # Clean liver and restore to origial size\n predicted_liver_mask = clean_liver_prediction(predicted_liver_mask) # shape (S, 128, 128)\n predicted_liver_mask = Resize(ct_volume.shape[-2:], interpolation=InterpolationMode.NEAREST)(predicted_liver_mask) # shape (S, 512, 512)\n\n # Crop around liver for tumor segmentaiton # shape (S, h, w)\n nwhere = np.where(predicted_liver_mask)\n liver_crop = tuple([slice(max(0, x.min() - liver_crop_padding[i]), x.max() + liver_crop_padding[i]) for i, x in enumerate(nwhere)])\n cropped_ct = ct_volume[liver_crop] # shape (S, h, w)\n\n if normalized_mms is not None:\n cropped_ct = ndimage.zoom(cropped_ct, (spacing[-1] / normalized_mms, 1, 1), order=3)\n\n multiclass_mask = segmentor.predict(cropped_ct)\n\n # Restore input resolution\n multiclass_mask = F.interpolate(multiclass_mask.float().unsqueeze(0).unsqueeze(0), size=ct_volume[liver_crop].shape[-3:], mode='nearest')[0,0].long()\n\n # Create final prediction mask\n final_mask = torch.zeros(ct_volume.shape).long()\n final_mask[liver_crop] = multiclass_mask\n\n # write segmentation map\n new_seg = sitk.GetImageFromArray(final_mask, sitk.sitkInt8)\n new_seg.SetSpacing(spacing)\n sitk.WriteImage(new_seg, os.path.join(outputs_dir, fname.replace('volume', 'segmentation')))\n\n # # Compute dice\n if gt_volume is not None:\n gt_volume = torch.from_numpy(gt_volume)\n liver_score = TverskyScore(0.5,0.5)((final_mask != 0).long().unsqueeze(0), (gt_volume != 0).long().unsqueeze(0), torch.ones_like(gt_volume).unsqueeze(0))\n tumor_score = TverskyScore(0.5,0.5)((final_mask == 2).long().unsqueeze(0), (gt_volume == 2).long().unsqueeze(0), torch.ones_like(gt_volume).unsqueeze(0))\n tumor_recall = compute_Recal((final_mask == 2).long().unsqueeze(0), (gt_volume == 2).long().unsqueeze(0), torch.ones_like(gt_volume).unsqueeze(0))\n liver_dice_scores.append(liver_score)\n tumor_dice_scores.append(tumor_score)\n tumor_recalls.append(tumor_recall)\n\n # dump debug images\n if dump_debug_images:\n ct_volume = torch.from_numpy(np.clip(ct_volume, -100, 400).astype(float))\n write_volume_slices(ct_volume, [final_mask, gt_volume], os.path.join(outputs_dir, f\"{os.path.splitext(fname)[0]}_{liver_score.item():.2f}_{tumor_score.item():.2f}\"))\n\n print(f\"AVG Dice per case: Liver: {np.mean(liver_dice_scores)}, Tumor: {np.mean(tumor_dice_scores)}, Tumor-Recall: {np.mean(tumor_recalls)}\")\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Preprocess Lits2017 dataset')\n parser.add_argument('ct_dir')\n parser.add_argument('--gt_dir', default=\"\", help='If GT is not specified no Dice score is computed')\n parser.add_argument('--localization_model_dir', default='trained_models/liver_localization/VGGUNet_Aug_Loss(0.0Dice+0.0WCE+1.0CE)_V-A')\n parser.add_argument('--segmentation_model_dir', default='trained_models/multiclass_segmentation/VGGUNet2_5D_Aug_FNE-0.5_Loss(0.0Dice+0.0WCE+1.0CE)_V-A')\n parser.add_argument('--debug', action='store_true')\n args = parser.parse_args()\n\n inference(args.ct_dir, args.gt_dir, args.localization_model_dir, args.segmentation_model_dir, dump_debug_images=args.debug)\n"
] |
[
[
"scipy.ndimage.morphology.binary_dilation",
"numpy.ones_like",
"torch.load",
"torch.zeros",
"numpy.clip",
"scipy.ndimage.zoom",
"torch.from_numpy",
"scipy.ndimage.morphology.binary_erosion",
"torch.no_grad",
"numpy.mean",
"torch.logical_and",
"numpy.where",
"torch.ones_like"
]
] |
hyoo/candle_lib
|
[
"33fe91879a09035fab1b94361c0d65d4595e8d0d"
] |
[
"candle/pytorch_utils.py"
] |
[
"from __future__ import absolute_import\n\nfrom typing import Dict\n\nimport torch\nimport torch.nn\nimport torch.nn.functional as F\nimport torch.nn.init\nimport torch.optim\n\nfrom .helper_utils import set_seed as set_seed_defaultUtils\n\n\ndef set_pytorch_threads(): # for compatibility\n pass\n\n\ndef set_pytorch_seed(seed):\n \"\"\"Set the random number seed to the desired value\n\n Parameters\n ----------\n seed : integer\n Random number seed.\n \"\"\"\n\n set_seed_defaultUtils(seed)\n torch.manual_seed(seed)\n\n\ndef get_pytorch_function(name: str):\n mapping = {}\n\n # loss\n mapping[\"mse\"] = torch.nn.MSELoss()\n mapping[\"binary_crossentropy\"] = torch.nn.BCELoss()\n mapping[\"categorical_crossentropy\"] = torch.nn.CrossEntropyLoss()\n mapping[\"smoothL1\"] = torch.nn.SmoothL1Loss()\n\n mapped = mapping.get(name)\n if not mapped:\n raise Exception('No pytorch function found for \"{}\"'.format(name))\n\n return mapped\n\n\ndef build_pytorch_activation(activation: str):\n\n # activation\n if activation == \"relu\":\n return torch.nn.ReLU()\n elif activation == \"sigmoid\":\n return torch.nn.Sigmoid()\n elif activation == \"tanh\":\n return torch.nn.Tanh()\n\n\ndef build_pytorch_optimizer(\n model, optimizer: str, lr: float, kerasDefaults: Dict, trainable_only: bool = True\n):\n if trainable_only:\n params = filter(lambda p: p.requires_grad, model.parameters())\n else:\n params = model.parameters()\n\n # schedule = optimizers.optimizer.Schedule() # constant lr (equivalent to default keras setting)\n\n if optimizer == \"sgd\":\n return torch.optim.GradientDescentMomentum(\n params,\n lr=lr,\n momentum_coef=kerasDefaults[\"momentum_sgd\"],\n nesterov=kerasDefaults[\"nesterov_sgd\"],\n )\n\n elif optimizer == \"rmsprop\":\n return torch.optim.RMSprop(\n model.parameters(),\n lr=lr,\n alpha=kerasDefaults[\"rho\"],\n eps=kerasDefaults[\"epsilon\"],\n )\n\n elif optimizer == \"adagrad\":\n return torch.optim.Adagrad(\n model.parameters(), lr=lr, eps=kerasDefaults[\"epsilon\"]\n )\n\n elif optimizer == \"adadelta\":\n return torch.optim.Adadelta(\n params, eps=kerasDefaults[\"epsilon\"], rho=kerasDefaults[\"rho\"]\n )\n\n elif optimizer == \"adam\":\n return torch.optim.Adam(\n params,\n lr=lr,\n betas=[kerasDefaults[\"beta_1\"], kerasDefaults[\"beta_2\"]],\n eps=kerasDefaults[\"epsilon\"],\n )\n\n\ndef pytorch_initialize(weights, initializer, kerasDefaults, seed=None, constant=0.0):\n\n if initializer == \"constant\":\n return torch.nn.init.constant_(weights, val=constant)\n\n elif initializer == \"uniform\":\n return torch.nn.init.uniform(\n weights,\n a=kerasDefaults[\"minval_uniform\"],\n b=kerasDefaults[\"maxval_uniform\"],\n )\n\n elif initializer == \"normal\":\n return torch.nn.init.normal(\n weights,\n mean=kerasDefaults[\"mean_normal\"],\n std=kerasDefaults[\"stddev_normal\"],\n )\n\n elif initializer == \"glorot_normal\": # not quite Xavier\n return torch.nn.init.xavier_normal(weights)\n\n elif initializer == \"glorot_uniform\":\n return torch.nn.init.xavier_uniform_(weights)\n\n elif initializer == \"he_normal\":\n return torch.nn.init.kaiming_uniform(weights)\n\n\ndef pytorch_xent(y_true, y_pred):\n return F.cross_entropy(y_pred, y_true)\n\n\ndef pytorch_mse(y_true, y_pred):\n return F.mse_loss(y_pred, y_true)\n"
] |
[
[
"torch.optim.GradientDescentMomentum",
"torch.optim.Adadelta",
"torch.nn.init.xavier_normal",
"torch.nn.CrossEntropyLoss",
"torch.nn.Sigmoid",
"torch.nn.SmoothL1Loss",
"torch.optim.Adam",
"torch.nn.init.uniform",
"torch.nn.init.constant_",
"torch.nn.BCELoss",
"torch.nn.functional.mse_loss",
"torch.manual_seed",
"torch.nn.functional.cross_entropy",
"torch.nn.Tanh",
"torch.nn.init.xavier_uniform_",
"torch.nn.ReLU",
"torch.nn.MSELoss",
"torch.nn.init.kaiming_uniform",
"torch.nn.init.normal"
]
] |
stevesolun/nlp-architect-1
|
[
"d2c1ea15ce20517af52d32ac87e5ccd009b4a620",
"d2c1ea15ce20517af52d32ac87e5ccd009b4a620"
] |
[
"nlp_architect/models/transformers/sequence_classification.py",
"examples/sparse_gnmt/gnmt/model.py"
] |
[
"# ******************************************************************************\n# Copyright 2017-2019 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ******************************************************************************\n\nimport logging\nimport os\nfrom typing import List, Union\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader, SequentialSampler, TensorDataset\nfrom transformers import (\n BertForSequenceClassification,\n RobertaForSequenceClassification,\n XLMForSequenceClassification,\n XLNetForSequenceClassification,\n)\n\nfrom nlp_architect.data.sequence_classification import SequenceClsInputExample\nfrom nlp_architect.models.transformers.base_model import InputFeatures, TransformerBase\nfrom nlp_architect.models.transformers.quantized_bert import QuantizedBertForSequenceClassification\nfrom nlp_architect.utils.metrics import accuracy\n\nlogger = logging.getLogger(__name__)\n\n\nclass TransformerSequenceClassifier(TransformerBase):\n \"\"\"\n Transformer sequence classifier\n\n Args:\n model_type (str): transformer base model type\n labels (List[str], optional): list of labels. Defaults to None.\n task_type (str, optional): task type (classification/regression). Defaults to\n classification.\n metric_fn ([type], optional): metric to use for evaluation. Defaults to\n simple_accuracy.\n \"\"\"\n\n MODEL_CLASS = {\n \"bert\": BertForSequenceClassification,\n \"quant_bert\": QuantizedBertForSequenceClassification,\n \"xlnet\": XLNetForSequenceClassification,\n \"xlm\": XLMForSequenceClassification,\n \"roberta\": RobertaForSequenceClassification,\n }\n\n def __init__(\n self,\n model_type: str,\n labels: List[str] = None,\n task_type=\"classification\",\n metric_fn=accuracy,\n load_quantized=False,\n *args,\n **kwargs,\n ):\n assert model_type in self.MODEL_CLASS.keys(), \"unsupported model type\"\n self.labels = labels\n self.num_labels = len(labels)\n super(TransformerSequenceClassifier, self).__init__(\n model_type, labels=labels, num_labels=self.num_labels, *args, **kwargs\n )\n self.model_class = self.MODEL_CLASS[model_type]\n if model_type == \"quant_bert\" and load_quantized:\n self.model = self.model_class.from_pretrained(\n self.model_name_or_path,\n from_tf=bool(\".ckpt\" in self.model_name_or_path),\n config=self.config,\n from_8bit=load_quantized,\n )\n else:\n self.model = self.model_class.from_pretrained(\n self.model_name_or_path,\n from_tf=bool(\".ckpt\" in self.model_name_or_path),\n config=self.config,\n )\n self.task_type = task_type\n self.metric_fn = metric_fn\n self.to(self.device, self.n_gpus)\n\n def train(\n self,\n train_data_set: DataLoader,\n dev_data_set: Union[DataLoader, List[DataLoader]] = None,\n test_data_set: Union[DataLoader, List[DataLoader]] = None,\n gradient_accumulation_steps: int = 1,\n per_gpu_train_batch_size: int = 8,\n max_steps: int = -1,\n num_train_epochs: int = 3,\n max_grad_norm: float = 1.0,\n logging_steps: int = 50,\n save_steps: int = 100,\n ):\n \"\"\"\n Train a model\n\n Args:\n train_data_set (DataLoader): training data set\n dev_data_set (Union[DataLoader, List[DataLoader]], optional): development set.\n Defaults to None.\n test_data_set (Union[DataLoader, List[DataLoader]], optional): test set.\n Defaults to None.\n gradient_accumulation_steps (int, optional): num of gradient accumulation steps.\n Defaults to 1.\n per_gpu_train_batch_size (int, optional): per GPU train batch size. Defaults to 8.\n max_steps (int, optional): max steps. Defaults to -1.\n num_train_epochs (int, optional): number of train epochs. Defaults to 3.\n max_grad_norm (float, optional): max gradient normalization. Defaults to 1.0.\n logging_steps (int, optional): number of steps between logging. Defaults to 50.\n save_steps (int, optional): number of steps between model save. Defaults to 100.\n \"\"\"\n self._train(\n train_data_set,\n dev_data_set,\n test_data_set,\n gradient_accumulation_steps,\n per_gpu_train_batch_size,\n max_steps,\n num_train_epochs,\n max_grad_norm,\n logging_steps=logging_steps,\n save_steps=save_steps,\n )\n\n def evaluate_predictions(self, logits, label_ids):\n \"\"\"\n Run evaluation of given logits and truth labels\n\n Args:\n logits: model logits\n label_ids: truth label ids\n \"\"\"\n preds = self._postprocess_logits(logits)\n label_ids = label_ids.numpy()\n result = self.metric_fn(preds, label_ids)\n try:\n output_eval_file = os.path.join(self.output_path, \"eval_results.txt\")\n except TypeError:\n output_eval_file = os.devnull\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Evaluation results *****\")\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n\n def convert_to_tensors(\n self,\n examples: List[SequenceClsInputExample],\n max_seq_length: int = 128,\n include_labels: bool = True,\n ) -> TensorDataset:\n \"\"\"\n Convert examples to tensor dataset\n\n Args:\n examples (List[SequenceClsInputExample]): examples\n max_seq_length (int, optional): max sequence length. Defaults to 128.\n include_labels (bool, optional): include labels. Defaults to True.\n\n Returns:\n TensorDataset:\n \"\"\"\n features = self._convert_examples_to_features(\n examples,\n max_seq_length,\n self.tokenizer,\n self.task_type,\n include_labels,\n pad_on_left=bool(self.model_type in [\"xlnet\"]),\n pad_token=self.tokenizer.convert_tokens_to_ids([self.tokenizer.pad_token])[0],\n pad_token_segment_id=4 if self.model_type in [\"xlnet\"] else 0,\n )\n # Convert to Tensors and build dataset\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)\n if include_labels:\n if self.task_type == \"classification\":\n all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)\n elif self.task_type == \"regression\":\n all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.float)\n return TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n return TensorDataset(all_input_ids, all_input_mask, all_segment_ids)\n\n def inference(\n self,\n examples: List[SequenceClsInputExample],\n max_seq_length: int,\n batch_size: int = 64,\n evaluate=False,\n ):\n \"\"\"\n Run inference on given examples\n\n Args:\n examples (List[SequenceClsInputExample]): examples\n batch_size (int, optional): batch size. Defaults to 64.\n\n Returns:\n logits\n \"\"\"\n data_set = self.convert_to_tensors(\n examples, max_seq_length=max_seq_length, include_labels=evaluate\n )\n inf_sampler = SequentialSampler(data_set)\n inf_dataloader = DataLoader(data_set, sampler=inf_sampler, batch_size=batch_size)\n logits = self._evaluate(inf_dataloader)\n if not evaluate:\n preds = self._postprocess_logits(logits)\n else:\n logits, label_ids = logits\n preds = self._postprocess_logits(logits)\n self.evaluate_predictions(logits, label_ids)\n return preds\n\n def _postprocess_logits(self, logits):\n preds = logits.numpy()\n if self.task_type == \"classification\":\n preds = np.argmax(preds, axis=1)\n elif self.task_type == \"regression\":\n preds = np.squeeze(preds)\n return preds\n\n def _convert_examples_to_features(\n self,\n examples,\n max_seq_length,\n tokenizer,\n task_type,\n include_labels=True,\n pad_on_left=False,\n pad_token=0,\n pad_token_segment_id=0,\n mask_padding_with_zero=True,\n ):\n \"\"\"Loads a data file into a list of `InputBatch`s\n `cls_token_at_end` define the location of the CLS token:\n - False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]\n - True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]\n `cls_token_segment_id` define the segment id associated to the CLS token\n (0 for BERT, 2 for XLNet)\n \"\"\"\n\n if include_labels:\n label_map = {label: i for i, label in enumerate(self.labels)}\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n logger.info(\"Writing example %d of %d\", ex_index, len(examples))\n\n inputs = tokenizer.encode_plus(\n example.text,\n example.text_b,\n add_special_tokens=True,\n max_length=max_seq_length,\n )\n input_ids, token_type_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"]\n\n attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n padding_length = max_seq_length - len(input_ids)\n if pad_on_left:\n input_ids = ([pad_token] * padding_length) + input_ids\n attention_mask = (\n [0 if mask_padding_with_zero else 1] * padding_length\n ) + attention_mask\n token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids\n else:\n input_ids = input_ids + ([pad_token] * padding_length)\n attention_mask = attention_mask + (\n [0 if mask_padding_with_zero else 1] * padding_length\n )\n token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)\n\n assert len(input_ids) == max_seq_length\n assert len(attention_mask) == max_seq_length\n assert len(token_type_ids) == max_seq_length\n\n if include_labels:\n if task_type == \"classification\":\n label_id = label_map[example.label]\n elif task_type == \"regression\":\n label_id = float(example.label)\n else:\n raise KeyError(task_type)\n else:\n label_id = None\n\n features.append(\n InputFeatures(\n input_ids=input_ids,\n input_mask=attention_mask,\n segment_ids=token_type_ids,\n label_id=label_id,\n )\n )\n return features\n",
"# ******************************************************************************\n# Copyright 2017-2018 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ******************************************************************************\n# Changes Made from original:\n# import paths\n# pruning operations\n# ******************************************************************************\n# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: skip-file\n\"\"\"Basic sequence-to-sequence model with dynamic RNN support.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport collections\n\nimport tensorflow as tf\nfrom tensorflow.contrib.model_pruning import get_masks, get_thresholds\nfrom tensorflow.contrib.model_pruning.python import pruning\nfrom tensorflow.contrib.model_pruning.python.layers import core_layers\n\nfrom . import model_helper\nfrom .utils import misc_utils as utils, iterator_utils\n\nutils.check_tensorflow_version()\n\n__all__ = [\"BaseModel\", \"Model\"]\n\n\nclass TrainOutputTuple(\n collections.namedtuple(\n \"TrainOutputTuple\",\n (\n \"train_summary\",\n \"train_loss\",\n \"predict_count\",\n \"global_step\",\n \"word_count\",\n \"batch_size\",\n \"grad_norm\",\n \"learning_rate\",\n \"mask_update_op\",\n \"pruning_summary\",\n ),\n )\n):\n \"\"\"To allow for flexibily in returing different outputs.\"\"\"\n\n pass\n\n\nclass EvalOutputTuple(\n collections.namedtuple(\"EvalOutputTuple\", (\"eval_loss\", \"predict_count\", \"batch_size\"))\n):\n \"\"\"To allow for flexibily in returing different outputs.\"\"\"\n\n pass\n\n\nclass InferOutputTuple(\n collections.namedtuple(\n \"InferOutputTuple\", (\"infer_logits\", \"infer_summary\", \"sample_id\", \"sample_words\")\n )\n):\n \"\"\"To allow for flexibily in returing different outputs.\"\"\"\n\n pass\n\n\nclass BaseModel(object):\n \"\"\"Sequence-to-sequence base class.\"\"\"\n\n def __init__(\n self,\n hparams,\n mode,\n iterator,\n source_vocab_table,\n target_vocab_table,\n reverse_target_vocab_table=None,\n scope=None,\n extra_args=None,\n ):\n \"\"\"Create the model.\n\n Args:\n hparams: Hyperparameter configurations.\n mode: TRAIN | EVAL | INFER\n iterator: Dataset Iterator that feeds data.\n source_vocab_table: Lookup table mapping source words to ids.\n target_vocab_table: Lookup table mapping target words to ids.\n reverse_target_vocab_table: Lookup table mapping ids to target words. Only\n required in INFER mode. Defaults to None.\n scope: scope of the model.\n extra_args: model_helper.ExtraArgs, for passing customizable functions.\n\n \"\"\"\n # Set params\n self._set_params_initializer(\n hparams, mode, iterator, source_vocab_table, target_vocab_table, scope, extra_args\n )\n\n # Not used in general seq2seq models; when True, ignore decoder & training\n self.extract_encoder_layers = (\n hasattr(hparams, \"extract_encoder_layers\") and hparams.extract_encoder_layers\n )\n\n # Train graph\n res = self.build_graph(hparams, scope=scope)\n if not self.extract_encoder_layers:\n self._set_train_or_infer(res, reverse_target_vocab_table, hparams)\n\n # Saver\n self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=hparams.num_keep_ckpts)\n\n def _set_params_initializer(\n self,\n hparams,\n mode,\n iterator,\n source_vocab_table,\n target_vocab_table,\n scope,\n extra_args=None,\n ):\n \"\"\"Set various params for self and initialize.\"\"\"\n assert isinstance(iterator, iterator_utils.BatchedInput)\n self.iterator = iterator\n self.mode = mode\n self.src_vocab_table = source_vocab_table\n self.tgt_vocab_table = target_vocab_table\n\n self.src_vocab_size = hparams.src_vocab_size\n self.tgt_vocab_size = hparams.tgt_vocab_size\n self.num_gpus = hparams.num_gpus\n self.time_major = hparams.time_major\n\n if hparams.use_char_encode:\n assert not self.time_major, \"Can't use time major for\" \" char-level inputs.\"\n\n self.dtype = tf.float32\n self.num_sampled_softmax = hparams.num_sampled_softmax\n\n # extra_args: to make it flexible for adding external customizable code\n self.single_cell_fn = None\n if extra_args:\n self.single_cell_fn = extra_args.single_cell_fn\n\n # Set num units\n self.num_units = hparams.num_units\n\n # Set num layers\n self.num_encoder_layers = hparams.num_encoder_layers\n self.num_decoder_layers = hparams.num_decoder_layers\n assert self.num_encoder_layers\n assert self.num_decoder_layers\n\n # Set num residual layers\n if hasattr(hparams, \"num_residual_layers\"): # compatible common_test_utils\n self.num_encoder_residual_layers = hparams.num_residual_layers\n self.num_decoder_residual_layers = hparams.num_residual_layers\n else:\n self.num_encoder_residual_layers = hparams.num_encoder_residual_layers\n self.num_decoder_residual_layers = hparams.num_decoder_residual_layers\n\n # Batch size\n self.batch_size = tf.size(self.iterator.source_sequence_length)\n\n # Global step\n self.global_step = tf.Variable(0, trainable=False)\n\n # Initializer\n self.random_seed = hparams.random_seed\n initializer = model_helper.get_initializer(\n hparams.init_op, self.random_seed, hparams.init_weight\n )\n tf.get_variable_scope().set_initializer(initializer)\n\n # Embeddings\n if extra_args and extra_args.encoder_emb_lookup_fn:\n self.encoder_emb_lookup_fn = extra_args.encoder_emb_lookup_fn\n else:\n self.encoder_emb_lookup_fn = tf.nn.embedding_lookup\n self.init_embeddings(hparams, scope)\n\n def _set_train_or_infer(self, res, reverse_target_vocab_table, hparams):\n \"\"\"Set up training and inference.\"\"\"\n if self.mode == tf.contrib.learn.ModeKeys.TRAIN:\n self.train_loss = res[1]\n self.word_count = tf.reduce_sum(self.iterator.source_sequence_length) + tf.reduce_sum(\n self.iterator.target_sequence_length\n )\n elif self.mode == tf.contrib.learn.ModeKeys.EVAL:\n self.eval_loss = res[1]\n elif self.mode == tf.contrib.learn.ModeKeys.INFER:\n self.infer_logits, _, self.final_context_state, self.sample_id = res\n self.sample_words = reverse_target_vocab_table.lookup(tf.to_int64(self.sample_id))\n\n if self.mode != tf.contrib.learn.ModeKeys.INFER:\n # Count the number of predicted words for compute ppl.\n self.predict_count = tf.reduce_sum(self.iterator.target_sequence_length)\n\n params = tf.trainable_variables()\n\n # Gradients and SGD update operation for training the model.\n # Arrange for the embedding vars to appear at the beginning.\n if self.mode == tf.contrib.learn.ModeKeys.TRAIN:\n self.learning_rate = tf.constant(hparams.learning_rate)\n # warm-up\n self.learning_rate = self._get_learning_rate_warmup(hparams)\n # decay\n self.learning_rate = self._get_learning_rate_decay(hparams)\n\n # Optimizer\n if hparams.optimizer == \"sgd\":\n opt = tf.train.GradientDescentOptimizer(self.learning_rate)\n elif hparams.optimizer == \"adam\":\n opt = tf.train.AdamOptimizer(self.learning_rate)\n else:\n raise ValueError(\"Unknown optimizer type %s\" % hparams.optimizer)\n\n # Gradients\n gradients = tf.gradients(\n self.train_loss,\n params,\n colocate_gradients_with_ops=hparams.colocate_gradients_with_ops,\n )\n\n clipped_grads, grad_norm_summary, grad_norm = model_helper.gradient_clip(\n gradients, max_gradient_norm=hparams.max_gradient_norm\n )\n self.grad_norm_summary = grad_norm_summary\n self.grad_norm = grad_norm\n\n self.update = opt.apply_gradients(\n zip(clipped_grads, params), global_step=self.global_step\n )\n\n # Summary\n self.train_summary = self._get_train_summary()\n elif self.mode == tf.contrib.learn.ModeKeys.INFER:\n self.infer_summary = self._get_infer_summary(hparams)\n\n # Print trainable variables\n utils.print_out(\"# Trainable variables\")\n utils.print_out(\"Format: <name>, <shape>, <(soft) device placement>\")\n for param in params:\n utils.print_out(\" %s, %s, %s\" % (param.name, str(param.get_shape()), param.op.device))\n\n def _get_learning_rate_warmup(self, hparams):\n \"\"\"Get learning rate warmup.\"\"\"\n warmup_steps = hparams.warmup_steps\n warmup_scheme = hparams.warmup_scheme\n utils.print_out(\n \" learning_rate=%g, warmup_steps=%d, warmup_scheme=%s\"\n % (hparams.learning_rate, warmup_steps, warmup_scheme)\n )\n\n # Apply inverse decay if global steps less than warmup steps.\n # Inspired by https://arxiv.org/pdf/1706.03762.pdf (Section 5.3)\n # When step < warmup_steps,\n # learing_rate *= warmup_factor ** (warmup_steps - step)\n if warmup_scheme == \"t2t\":\n # 0.01^(1/warmup_steps): we start with a lr, 100 times smaller\n warmup_factor = tf.exp(tf.log(0.01) / warmup_steps)\n inv_decay = warmup_factor ** (tf.to_float(warmup_steps - self.global_step))\n else:\n raise ValueError(\"Unknown warmup scheme %s\" % warmup_scheme)\n\n return tf.cond(\n self.global_step < hparams.warmup_steps,\n lambda: inv_decay * self.learning_rate,\n lambda: self.learning_rate,\n name=\"learning_rate_warump_cond\",\n )\n\n def _get_decay_info(self, hparams):\n \"\"\"Return decay info based on decay_scheme.\"\"\"\n if hparams.decay_scheme in [\"luong5\", \"luong10\", \"luong234\"]:\n decay_factor = 0.5\n if hparams.decay_scheme == \"luong5\":\n start_decay_step = int(hparams.num_train_steps / 2)\n decay_times = 5\n elif hparams.decay_scheme == \"luong10\":\n start_decay_step = int(hparams.num_train_steps / 2)\n decay_times = 10\n elif hparams.decay_scheme == \"luong234\":\n start_decay_step = int(hparams.num_train_steps * 2 / 3)\n decay_times = 4\n remain_steps = hparams.num_train_steps - start_decay_step\n decay_steps = int(remain_steps / decay_times)\n elif not hparams.decay_scheme: # no decay\n start_decay_step = hparams.num_train_steps\n decay_steps = 0\n decay_factor = 1.0\n elif hparams.decay_scheme:\n raise ValueError(\"Unknown decay scheme %s\" % hparams.decay_scheme)\n return start_decay_step, decay_steps, decay_factor\n\n def _get_learning_rate_decay(self, hparams):\n \"\"\"Get learning rate decay.\"\"\"\n start_decay_step, decay_steps, decay_factor = self._get_decay_info(hparams)\n utils.print_out(\n \" decay_scheme=%s, start_decay_step=%d, decay_steps %d, \"\n \"decay_factor %g\" % (hparams.decay_scheme, start_decay_step, decay_steps, decay_factor)\n )\n\n return tf.cond(\n self.global_step < start_decay_step,\n lambda: self.learning_rate,\n lambda: tf.train.exponential_decay(\n self.learning_rate,\n (self.global_step - start_decay_step),\n decay_steps,\n decay_factor,\n staircase=True,\n ),\n name=\"learning_rate_decay_cond\",\n )\n\n def init_embeddings(self, hparams, scope):\n \"\"\"Init embeddings.\"\"\"\n (\n self.embedding_encoder,\n self.embedding_decoder,\n ) = model_helper.create_emb_for_encoder_and_decoder(\n share_vocab=hparams.share_vocab,\n src_vocab_size=self.src_vocab_size,\n tgt_vocab_size=self.tgt_vocab_size,\n src_embed_size=self.num_units,\n tgt_embed_size=self.num_units,\n num_enc_partitions=hparams.num_enc_emb_partitions,\n num_dec_partitions=hparams.num_dec_emb_partitions,\n src_vocab_file=hparams.src_vocab_file,\n tgt_vocab_file=hparams.tgt_vocab_file,\n src_embed_file=hparams.src_embed_file,\n tgt_embed_file=hparams.tgt_embed_file,\n use_char_encode=hparams.use_char_encode,\n scope=scope,\n embed_type=hparams.embedding_type,\n )\n\n def _get_train_summary(self):\n \"\"\"Get train summary.\"\"\"\n train_summary = tf.summary.merge(\n [\n tf.summary.scalar(\"lr\", self.learning_rate),\n tf.summary.scalar(\"train_loss\", self.train_loss),\n ]\n + self.grad_norm_summary\n )\n return train_summary\n\n def train(self, sess):\n \"\"\"Execute train graph.\"\"\"\n assert self.mode == tf.contrib.learn.ModeKeys.TRAIN\n output_tuple = TrainOutputTuple(\n train_summary=self.train_summary,\n train_loss=self.train_loss,\n predict_count=self.predict_count,\n global_step=self.global_step,\n word_count=self.word_count,\n batch_size=self.batch_size,\n grad_norm=self.grad_norm,\n learning_rate=self.learning_rate,\n mask_update_op=self.mask_update_op,\n pruning_summary=self.pruning_summary,\n )\n return sess.run([self.update, output_tuple])\n\n def eval(self, sess):\n \"\"\"Execute eval graph.\"\"\"\n assert self.mode == tf.contrib.learn.ModeKeys.EVAL\n output_tuple = EvalOutputTuple(\n eval_loss=self.eval_loss, predict_count=self.predict_count, batch_size=self.batch_size\n )\n return sess.run(output_tuple)\n\n def build_graph(self, hparams, scope=None):\n \"\"\"Subclass must implement this method.\n\n Creates a sequence-to-sequence model with dynamic RNN decoder API.\n Args:\n hparams: Hyperparameter configurations.\n scope: VariableScope for the created subgraph; default \"dynamic_seq2seq\".\n\n Returns:\n A tuple of the form (logits, loss_tuple, final_context_state, sample_id),\n where:\n logits: float32 Tensor [batch_size x num_decoder_symbols].\n loss: loss = the total loss / batch_size.\n final_context_state: the final state of decoder RNN.\n sample_id: sampling indices.\n\n Raises:\n ValueError: if encoder_type differs from mono and bi, or\n attention_option is not (luong | scaled_luong |\n bahdanau | normed_bahdanau).\n \"\"\"\n utils.print_out(\"# Creating %s graph ...\" % self.mode)\n\n # Projection\n if not self.extract_encoder_layers:\n with tf.variable_scope(scope or \"build_network\"):\n with tf.variable_scope(\"decoder/output_projection\"):\n if hparams.projection_type == \"sparse\":\n self.output_layer = core_layers.MaskedFullyConnected(\n hparams.tgt_vocab_size, use_bias=False, name=\"output_projection\"\n )\n elif hparams.projection_type == \"dense\":\n self.output_layer = tf.layers.Dense(\n hparams.tgt_vocab_size, use_bias=False, name=\"output_projection\"\n )\n else:\n raise ValueError(\"Unknown projection type %s!\" % hparams.projection_type)\n\n with tf.variable_scope(scope or \"dynamic_seq2seq\", dtype=self.dtype):\n # Encoder\n if hparams.language_model: # no encoder for language modeling\n utils.print_out(\" language modeling: no encoder\")\n self.encoder_outputs = None\n encoder_state = None\n else:\n self.encoder_outputs, encoder_state = self._build_encoder(hparams)\n\n # Skip decoder if extracting only encoder layers\n if self.extract_encoder_layers:\n return\n\n # Decoder\n logits, decoder_cell_outputs, sample_id, final_context_state = self._build_decoder(\n self.encoder_outputs, encoder_state, hparams\n )\n\n # Loss\n if self.mode != tf.contrib.learn.ModeKeys.INFER:\n with tf.device(\n model_helper.get_device_str(self.num_encoder_layers - 1, self.num_gpus)\n ):\n loss = self._compute_loss(logits, decoder_cell_outputs)\n else:\n loss = tf.constant(0.0)\n\n # model pruning\n if hparams.pruning_hparams is not None:\n pruning_hparams = pruning.get_pruning_hparams().parse(hparams.pruning_hparams)\n self.p = pruning.Pruning(pruning_hparams, global_step=self.global_step)\n self.mask_update_op = self.p.conditional_mask_update_op()\n masks = get_masks()\n thresholds = get_thresholds()\n masks_s = []\n for index, mask in enumerate(masks):\n masks_s.append(\n tf.summary.scalar(mask.name + \"/sparsity\", tf.nn.zero_fraction(mask))\n )\n masks_s.append(\n tf.summary.scalar(\n thresholds[index].op.name + \"/threshold\", thresholds[index]\n )\n )\n masks_s.append(tf.summary.histogram(mask.name + \"/mask_tensor\", mask))\n self.pruning_summary = tf.summary.merge(\n [\n tf.summary.scalar(\"sparsity\", self.p._sparsity),\n tf.summary.scalar(\"last_mask_update_step\", self.p._last_update_step),\n ]\n + masks_s\n )\n else:\n self.mask_update_op = tf.no_op()\n self.pruning_summary = tf.no_op()\n\n return logits, loss, final_context_state, sample_id\n\n @abc.abstractmethod\n def _build_encoder(self, hparams):\n \"\"\"Subclass must implement this.\n\n Build and run an RNN encoder.\n\n Args:\n hparams: Hyperparameters configurations.\n\n Returns:\n A tuple of encoder_outputs and encoder_state.\n \"\"\"\n pass\n\n def _build_encoder_cell(self, hparams, num_layers, num_residual_layers, base_gpu=0):\n \"\"\"Build a multi-layer RNN cell that can be used by encoder.\"\"\"\n\n return model_helper.create_rnn_cell(\n unit_type=hparams.unit_type,\n num_units=self.num_units,\n num_layers=num_layers,\n num_residual_layers=num_residual_layers,\n forget_bias=hparams.forget_bias,\n dropout=hparams.dropout,\n num_gpus=hparams.num_gpus,\n mode=self.mode,\n base_gpu=base_gpu,\n single_cell_fn=self.single_cell_fn,\n )\n\n def _get_infer_maximum_iterations(self, hparams, source_sequence_length):\n \"\"\"Maximum decoding steps at inference time.\"\"\"\n if hparams.tgt_max_len_infer:\n maximum_iterations = hparams.tgt_max_len_infer\n utils.print_out(\" decoding maximum_iterations %d\" % maximum_iterations)\n else:\n # TODO(thangluong): add decoding_length_factor flag\n decoding_length_factor = 2.0\n max_encoder_length = tf.reduce_max(source_sequence_length)\n maximum_iterations = tf.to_int32(\n tf.round(tf.to_float(max_encoder_length) * decoding_length_factor)\n )\n return maximum_iterations\n\n def _build_decoder(self, encoder_outputs, encoder_state, hparams):\n \"\"\"Build and run a RNN decoder with a final projection layer.\n\n Args:\n encoder_outputs: The outputs of encoder for every time step.\n encoder_state: The final state of the encoder.\n hparams: The Hyperparameters configurations.\n\n Returns:\n A tuple of final logits and final decoder state:\n logits: size [time, batch_size, vocab_size] when time_major=True.\n \"\"\"\n tgt_sos_id = tf.cast(self.tgt_vocab_table.lookup(tf.constant(hparams.sos)), tf.int32)\n tgt_eos_id = tf.cast(self.tgt_vocab_table.lookup(tf.constant(hparams.eos)), tf.int32)\n iterator = self.iterator\n\n # maximum_iteration: The maximum decoding steps.\n maximum_iterations = self._get_infer_maximum_iterations(\n hparams, iterator.source_sequence_length\n )\n\n # Decoder.\n with tf.variable_scope(\"decoder\") as decoder_scope:\n cell, decoder_initial_state = self._build_decoder_cell(\n hparams, encoder_outputs, encoder_state, iterator.source_sequence_length\n )\n\n # Optional ops depends on which mode we are in and which loss function we\n # are using.\n logits = tf.no_op()\n decoder_cell_outputs = None\n\n # Train or eval\n if self.mode != tf.contrib.learn.ModeKeys.INFER:\n # decoder_emp_inp: [max_time, batch_size, num_units]\n target_input = iterator.target_input\n if self.time_major:\n target_input = tf.transpose(target_input)\n decoder_emb_inp = tf.nn.embedding_lookup(self.embedding_decoder, target_input)\n\n # Helper\n helper = tf.contrib.seq2seq.TrainingHelper(\n decoder_emb_inp, iterator.target_sequence_length, time_major=self.time_major\n )\n\n # Decoder\n my_decoder = tf.contrib.seq2seq.BasicDecoder(\n cell,\n helper,\n decoder_initial_state,\n )\n\n # Dynamic decoding\n outputs, final_context_state, _ = tf.contrib.seq2seq.dynamic_decode(\n my_decoder,\n output_time_major=self.time_major,\n swap_memory=True,\n scope=decoder_scope,\n )\n\n sample_id = outputs.sample_id\n\n if self.num_sampled_softmax > 0:\n # Note: this is required when using sampled_softmax_loss.\n decoder_cell_outputs = outputs.rnn_output\n\n # Note: there's a subtle difference here between train and inference.\n # We could have set output_layer when create my_decoder\n # and shared more code between train and inference.\n # We chose to apply the output_layer to all timesteps for speed:\n # 10% improvements for small models & 20% for larger ones.\n # If memory is a concern, we should apply output_layer per timestep.\n num_layers = self.num_decoder_layers\n num_gpus = self.num_gpus\n device_id = num_layers if num_layers < num_gpus else (num_layers - 1)\n # Colocate output layer with the last RNN cell if there is no extra GPU\n # available. Otherwise, put last layer on a separate GPU.\n with tf.device(model_helper.get_device_str(device_id, num_gpus)):\n logits = self.output_layer(outputs.rnn_output)\n\n if self.num_sampled_softmax > 0:\n logits = tf.no_op() # unused when using sampled softmax loss.\n\n # Inference\n else:\n infer_mode = hparams.infer_mode\n start_tokens = tf.fill([self.batch_size], tgt_sos_id)\n end_token = tgt_eos_id\n utils.print_out(\n \" decoder: infer_mode=%sbeam_width=%d, length_penalty=%f\"\n % (infer_mode, hparams.beam_width, hparams.length_penalty_weight)\n )\n\n if infer_mode == \"beam_search\":\n beam_width = hparams.beam_width\n length_penalty_weight = hparams.length_penalty_weight\n\n my_decoder = tf.contrib.seq2seq.BeamSearchDecoder(\n cell=cell,\n embedding=self.embedding_decoder,\n start_tokens=start_tokens,\n end_token=end_token,\n initial_state=decoder_initial_state,\n beam_width=beam_width,\n output_layer=self.output_layer,\n length_penalty_weight=length_penalty_weight,\n )\n elif infer_mode == \"sample\":\n # Helper\n sampling_temperature = hparams.sampling_temperature\n assert sampling_temperature > 0.0, (\n \"sampling_temperature must greater than 0.0 when using sample\" \" decoder.\"\n )\n helper = tf.contrib.seq2seq.SampleEmbeddingHelper(\n self.embedding_decoder,\n start_tokens,\n end_token,\n softmax_temperature=sampling_temperature,\n seed=self.random_seed,\n )\n elif infer_mode == \"greedy\":\n helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(\n self.embedding_decoder, start_tokens, end_token\n )\n else:\n raise ValueError(\"Unknown infer_mode '%s'\", infer_mode)\n\n if infer_mode != \"beam_search\":\n my_decoder = tf.contrib.seq2seq.BasicDecoder(\n cell,\n helper,\n decoder_initial_state,\n output_layer=self.output_layer, # applied per timestep\n )\n\n # Dynamic decoding\n outputs, final_context_state, _ = tf.contrib.seq2seq.dynamic_decode(\n my_decoder,\n maximum_iterations=maximum_iterations,\n output_time_major=self.time_major,\n swap_memory=True,\n scope=decoder_scope,\n )\n\n if infer_mode == \"beam_search\":\n sample_id = outputs.predicted_ids\n else:\n logits = outputs.rnn_output\n sample_id = outputs.sample_id\n\n return logits, decoder_cell_outputs, sample_id, final_context_state\n\n def get_max_time(self, tensor):\n time_axis = 0 if self.time_major else 1\n return tensor.shape[time_axis].value or tf.shape(tensor)[time_axis]\n\n @abc.abstractmethod\n def _build_decoder_cell(self, hparams, encoder_outputs, encoder_state, source_sequence_length):\n \"\"\"Subclass must implement this.\n\n Args:\n hparams: Hyperparameters configurations.\n encoder_outputs: The outputs of encoder for every time step.\n encoder_state: The final state of the encoder.\n source_sequence_length: sequence length of encoder_outputs.\n\n Returns:\n A tuple of a multi-layer RNN cell used by decoder and the intial state of\n the decoder RNN.\n \"\"\"\n pass\n\n def _softmax_cross_entropy_loss(self, logits, decoder_cell_outputs, labels):\n \"\"\"Compute softmax loss or sampled softmax loss.\"\"\"\n if self.num_sampled_softmax > 0:\n\n is_sequence = decoder_cell_outputs.shape.ndims == 3\n\n if is_sequence:\n labels = tf.reshape(labels, [-1, 1])\n inputs = tf.reshape(decoder_cell_outputs, [-1, self.num_units])\n\n crossent = tf.nn.sampled_softmax_loss(\n weights=tf.transpose(self.output_layer.kernel),\n biases=self.output_layer.bias or tf.zeros([self.tgt_vocab_size]),\n labels=labels,\n inputs=inputs,\n num_sampled=self.num_sampled_softmax,\n num_classes=self.tgt_vocab_size,\n partition_strategy=\"div\",\n seed=self.random_seed,\n )\n\n if is_sequence:\n if self.time_major:\n crossent = tf.reshape(crossent, [-1, self.batch_size])\n else:\n crossent = tf.reshape(crossent, [self.batch_size, -1])\n\n else:\n crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits)\n\n return crossent\n\n def _compute_loss(self, logits, decoder_cell_outputs):\n \"\"\"Compute optimization loss.\"\"\"\n target_output = self.iterator.target_output\n if self.time_major:\n target_output = tf.transpose(target_output)\n max_time = self.get_max_time(target_output)\n\n crossent = self._softmax_cross_entropy_loss(logits, decoder_cell_outputs, target_output)\n\n target_weights = tf.sequence_mask(\n self.iterator.target_sequence_length, max_time, dtype=self.dtype\n )\n if self.time_major:\n target_weights = tf.transpose(target_weights)\n\n loss = tf.reduce_sum(crossent * target_weights) / tf.to_float(self.batch_size)\n return loss\n\n def _get_infer_summary(self, hparams):\n del hparams\n return tf.no_op()\n\n def infer(self, sess):\n assert self.mode == tf.contrib.learn.ModeKeys.INFER\n output_tuple = InferOutputTuple(\n infer_logits=self.infer_logits,\n infer_summary=self.infer_summary,\n sample_id=self.sample_id,\n sample_words=self.sample_words,\n )\n return sess.run(output_tuple)\n\n def decode(self, sess):\n \"\"\"Decode a batch.\n\n Args:\n sess: tensorflow session to use.\n\n Returns:\n A tuple consiting of outputs, infer_summary.\n outputs: of size [batch_size, time]\n \"\"\"\n output_tuple = self.infer(sess)\n sample_words = output_tuple.sample_words\n infer_summary = output_tuple.infer_summary\n\n # make sure outputs is of shape [batch_size, time] or [beam_width,\n # batch_size, time] when using beam search.\n if self.time_major:\n sample_words = sample_words.transpose()\n elif sample_words.ndim == 3:\n # beam search output in [batch_size, time, beam_width] shape.\n sample_words = sample_words.transpose([2, 0, 1])\n return sample_words, infer_summary\n\n def build_encoder_states(self, include_embeddings=False):\n \"\"\"Stack encoder states and return tensor [batch, length, layer, size].\"\"\"\n assert self.mode == tf.contrib.learn.ModeKeys.INFER\n if include_embeddings:\n stack_state_list = tf.stack([self.encoder_emb_inp] + self.encoder_state_list, 2)\n else:\n stack_state_list = tf.stack(self.encoder_state_list, 2)\n\n # transform from [length, batch, ...] -> [batch, length, ...]\n if self.time_major:\n stack_state_list = tf.transpose(stack_state_list, [1, 0, 2, 3])\n\n return stack_state_list\n\n\nclass Model(BaseModel):\n \"\"\"Sequence-to-sequence dynamic model.\n\n This class implements a multi-layer recurrent neural network as encoder,\n and a multi-layer recurrent neural network decoder.\n \"\"\"\n\n def _build_encoder_from_sequence(self, hparams, sequence, sequence_length):\n \"\"\"Build an encoder from a sequence.\n\n Args:\n hparams: hyperparameters.\n sequence: tensor with input sequence data.\n sequence_length: tensor with length of the input sequence.\n\n Returns:\n encoder_outputs: RNN encoder outputs.\n encoder_state: RNN encoder state.\n\n Raises:\n ValueError: if encoder_type is neither \"uni\" nor \"bi\".\n \"\"\"\n num_layers = self.num_encoder_layers\n num_residual_layers = self.num_encoder_residual_layers\n\n if self.time_major:\n sequence = tf.transpose(sequence)\n\n with tf.variable_scope(\"encoder\") as scope:\n dtype = scope.dtype\n\n self.encoder_emb_inp = self.encoder_emb_lookup_fn(self.embedding_encoder, sequence)\n\n # Encoder_outputs: [max_time, batch_size, num_units]\n if hparams.encoder_type == \"uni\":\n utils.print_out(\n \" num_layers = %d, num_residual_layers=%d\" % (num_layers, num_residual_layers)\n )\n cell = self._build_encoder_cell(hparams, num_layers, num_residual_layers)\n\n encoder_outputs, encoder_state = tf.nn.dynamic_rnn(\n cell,\n self.encoder_emb_inp,\n dtype=dtype,\n sequence_length=sequence_length,\n time_major=self.time_major,\n swap_memory=True,\n )\n elif hparams.encoder_type == \"bi\":\n num_bi_layers = int(num_layers / 2)\n num_bi_residual_layers = int(num_residual_layers / 2)\n utils.print_out(\n \" num_bi_layers = %d, num_bi_residual_layers=%d\"\n % (num_bi_layers, num_bi_residual_layers)\n )\n\n encoder_outputs, bi_encoder_state = self._build_bidirectional_rnn(\n inputs=self.encoder_emb_inp,\n sequence_length=sequence_length,\n dtype=dtype,\n hparams=hparams,\n num_bi_layers=num_bi_layers,\n num_bi_residual_layers=num_bi_residual_layers,\n )\n\n if num_bi_layers == 1:\n encoder_state = bi_encoder_state\n else:\n # alternatively concat forward and backward states\n encoder_state = []\n for layer_id in range(num_bi_layers):\n encoder_state.append(bi_encoder_state[0][layer_id]) # forward\n encoder_state.append(bi_encoder_state[1][layer_id]) # backward\n encoder_state = tuple(encoder_state)\n else:\n raise ValueError(\"Unknown encoder_type %s\" % hparams.encoder_type)\n\n # Use the top layer for now\n self.encoder_state_list = [encoder_outputs]\n\n return encoder_outputs, encoder_state\n\n def _build_encoder(self, hparams):\n \"\"\"Build encoder from source.\"\"\"\n utils.print_out(\"# Build a basic encoder\")\n return self._build_encoder_from_sequence(\n hparams, self.iterator.source, self.iterator.source_sequence_length\n )\n\n def _build_bidirectional_rnn(\n self,\n inputs,\n sequence_length,\n dtype,\n hparams,\n num_bi_layers,\n num_bi_residual_layers,\n base_gpu=0,\n ):\n \"\"\"Create and call biddirectional RNN cells.\n\n Args:\n num_residual_layers: Number of residual layers from top to bottom. For\n example, if `num_bi_layers=4` and `num_residual_layers=2`, the last 2 RNN\n layers in each RNN cell will be wrapped with `ResidualWrapper`.\n base_gpu: The gpu device id to use for the first forward RNN layer. The\n i-th forward RNN layer will use `(base_gpu + i) % num_gpus` as its\n device id. The `base_gpu` for backward RNN cell is `(base_gpu +\n num_bi_layers)`.\n\n Returns:\n The concatenated bidirectional output and the bidirectional RNN cell\"s\n state.\n \"\"\"\n # Construct forward and backward cells\n fw_cell = self._build_encoder_cell(\n hparams, num_bi_layers, num_bi_residual_layers, base_gpu=base_gpu\n )\n bw_cell = self._build_encoder_cell(\n hparams, num_bi_layers, num_bi_residual_layers, base_gpu=(base_gpu + num_bi_layers)\n )\n\n bi_outputs, bi_state = tf.nn.bidirectional_dynamic_rnn(\n fw_cell,\n bw_cell,\n inputs,\n dtype=dtype,\n sequence_length=sequence_length,\n time_major=self.time_major,\n swap_memory=True,\n )\n\n return tf.concat(bi_outputs, -1), bi_state\n\n def _build_decoder_cell(\n self, hparams, encoder_outputs, encoder_state, source_sequence_length, base_gpu=0\n ):\n \"\"\"Build an RNN cell that can be used by decoder.\"\"\"\n # We only make use of encoder_outputs in attention-based models\n if hparams.attention:\n raise ValueError(\"BasicModel doesn't support attention.\")\n\n cell = model_helper.create_rnn_cell(\n unit_type=hparams.unit_type,\n num_units=self.num_units,\n num_layers=self.num_decoder_layers,\n num_residual_layers=self.num_decoder_residual_layers,\n forget_bias=hparams.forget_bias,\n dropout=hparams.dropout,\n num_gpus=self.num_gpus,\n mode=self.mode,\n single_cell_fn=self.single_cell_fn,\n base_gpu=base_gpu,\n )\n\n if hparams.language_model:\n encoder_state = cell.zero_state(self.batch_size, self.dtype)\n elif not hparams.pass_hidden_state:\n raise ValueError(\n \"For non-attentional model, \" \"pass_hidden_state needs to be set to True\"\n )\n\n # For beam search, we need to replicate encoder infos beam_width times\n if self.mode == tf.contrib.learn.ModeKeys.INFER and hparams.infer_mode == \"beam_search\":\n decoder_initial_state = tf.contrib.seq2seq.tile_batch(\n encoder_state, multiplier=hparams.beam_width\n )\n else:\n decoder_initial_state = encoder_state\n\n return cell, decoder_initial_state\n"
] |
[
[
"torch.utils.data.TensorDataset",
"torch.utils.data.SequentialSampler",
"torch.utils.data.DataLoader",
"numpy.squeeze",
"torch.tensor",
"numpy.argmax"
],
[
"tensorflow.cond",
"tensorflow.nn.dynamic_rnn",
"tensorflow.concat",
"tensorflow.zeros",
"tensorflow.reduce_sum",
"tensorflow.stack",
"tensorflow.global_variables",
"tensorflow.nn.bidirectional_dynamic_rnn",
"tensorflow.contrib.model_pruning.get_thresholds",
"tensorflow.train.AdamOptimizer",
"tensorflow.summary.scalar",
"tensorflow.to_int64",
"tensorflow.Variable",
"tensorflow.gradients",
"tensorflow.train.exponential_decay",
"tensorflow.to_float",
"tensorflow.contrib.seq2seq.SampleEmbeddingHelper",
"tensorflow.trainable_variables",
"tensorflow.contrib.seq2seq.BasicDecoder",
"tensorflow.fill",
"tensorflow.contrib.model_pruning.python.pruning.Pruning",
"tensorflow.shape",
"tensorflow.layers.Dense",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.no_op",
"tensorflow.contrib.model_pruning.python.layers.core_layers.MaskedFullyConnected",
"tensorflow.contrib.model_pruning.python.pruning.get_pruning_hparams",
"tensorflow.sequence_mask",
"tensorflow.size",
"tensorflow.nn.embedding_lookup",
"tensorflow.summary.histogram",
"tensorflow.contrib.seq2seq.TrainingHelper",
"tensorflow.contrib.seq2seq.dynamic_decode",
"tensorflow.reduce_max",
"tensorflow.contrib.seq2seq.GreedyEmbeddingHelper",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.reshape",
"tensorflow.contrib.seq2seq.BeamSearchDecoder",
"tensorflow.contrib.seq2seq.tile_batch",
"tensorflow.nn.zero_fraction",
"tensorflow.contrib.model_pruning.get_masks",
"tensorflow.log",
"tensorflow.variable_scope",
"tensorflow.get_variable_scope"
]
] |
coolo/Character-level-cnn-pytorch
|
[
"954fe0e53a1c403b0ec1a886e3d6ef66e36df981"
] |
[
"classify-server.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Viet Nguyen <nhviet1009@gmail.com>\n\"\"\"\nimport os\nimport sys\nimport json\nimport glob\nimport argparse\nimport socket\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nimport shutil\nimport http.server\nimport socketserver\nimport logging\n\nfrom src.utils import *\nfrom src.dataset import MyDataset\nfrom src.character_level_cnn import CharacterLevelCNN\n\n\ndef get_args():\n parser = argparse.ArgumentParser(\n \"\"\"Implementation of the model described in the paper: Character-level convolutional networks for text classification\"\"\")\n parser.add_argument(\"-a\", \"--alphabet\", type=str,\n default=\"\"\"abcdefghijklmnopqrstuvwxyz0123456789,;.!?:'\\\"/\\\\|_@#$%^&*~`+-=<>()[]{}\"\"\")\n parser.add_argument(\"-m\", \"--max_length\", type=int, default=1014)\n parser.add_argument(\"-f\", \"--feature\", type=str, choices=[\"large\", \"small\"], default=\"small\",\n help=\"small for 256 conv feature map, large for 1024 conv feature map\")\n parser.add_argument(\"-i\", \"--input\", type=str, default=\"input\", help=\"path to input folder\")\n args = parser.parse_args()\n return args\n\nmodel = None\n\nclass ClassifyRequestHandler(http.server.BaseHTTPRequestHandler):\n def do_POST(self):\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n test_set = MyDataset(None, data=post_data, max_length=1014)\n\n # it should be possible to fetch te_feature from test_set without going\n # through dataloader, but I was unable to find the magic call\n test_generator = DataLoader(test_set)\n te_feature, te_label = iter(test_generator).next()\n\n model.eval()\n if torch.cuda.is_available():\n te_feature = te_feature.cuda()\n with torch.no_grad():\n te_predictions = model(te_feature)\n out = F.softmax(te_predictions, 1)\n weight = torch.argmax(out[0])\n weighti = int(out[0][1].item() * 1000)\n print(True if weight == 1 else False, weighti)\n response = { 'license': (True if weight == 1 else False) }\n if weight == 1:\n response['confidence'] = (weighti - 500) / 5\n else:\n response['confidence'] = (500 - weighti) / 5\n self.wfile.write(json.dumps(response).encode('utf-8'))\n\n \ndef serve(opt):\n global model\n if torch.cuda.is_available():\n model = torch.load(opt.input)\n else:\n model = torch.load(opt.input, map_location='cpu')\n\n PORT = 5000\n\n Handler = ClassifyRequestHandler\n\n with socketserver.TCPServer((\"\", PORT), Handler) as httpd:\n print(\"serving at port\", PORT)\n try:\n httpd.serve_forever()\n except KeyboardInterrupt:\n pass\n httpd.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n httpd.shutdown()\n httpd.server_close()\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n opt = get_args()\n serve(opt)\n"
] |
[
[
"torch.nn.functional.softmax",
"torch.load",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.cuda.is_available",
"torch.argmax"
]
] |
sagarsetru/msd_analysis_floA_floT
|
[
"9e880d4a107fef44063faec7de32ea38175782ef"
] |
[
"dataParserScript_v2.py"
] |
[
"# Sagar Setru\n# 2016 07 14\n\nimport pandas as pd\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport itertools\n\n\n#def loadDataFile( baseDir, subBaseDir, fileName):\n# \"helper function to load bed files\"\n# return pd.read_csv(baseDir+subBaseDir+'/'+fileName, sep='\\t', header=None)\n##...\n\ndef compute_MSD(path):\n totalsize=len(path)\n msd=[]\n for i in range(totalsize-1):\n j=i+1\n msd.append(np.sum((path[0:-j]-path[j::])**2)/float(totalsize-j))\n #...\n msd=np.array(msd)\n return msd\n#...\n#http://stackoverflow.com/questions/26472653/computing-the-mean-square-displacement-of-a-2d-random-walk-in-python\n#%%\ndef compute_vel(path,t):\n totalsize=len(path)\n velx=[]\n vely=[]\n for i in range(totalsize-1):\n j=i+1\n delta_t = t[j]-t[i]\n velx.append( (path[j][0]-path[i][0]) / delta_t)\n vely.append( (path[j][1]-path[i][1]) / delta_t)\n #...\n velx=np.array(velx)\n vely=np.array(vely)\n return velx, vely, delta_t\n\ndef compute_VAC(velx,vely):\n totalsize = len(velx)\n vac=[]\n for tau in range(totalsize):\n tau=tau+1\n deltaCoords = np.multiply(velx[0+tau:totalsize],velx[0:totalsize-tau]) + np.multiply(vely[0+tau:totalsize],vely[0:totalsize-tau])\n val = np.sum(deltaCoords) # dx^2+dy^2+dz^2\n vac.append(np.mean(val)) # average\n # print velx[1+tau:totalsize]\n # print ' '\n # print deltaCoords\n # print ' '\n # print tau\n #...\n vac = np.array(vac)\n return vac\n\n# directory to save analysis\nsaveDir = \"/Users/sagarsetru/Documents/Princeton/wingreen/for NED/analysis/\"\n\n# =1 if saving msd, vac data\ndoSave = 1\n# =1 to display files that get saved\nprintFilesSaved = 0\n# =1 if you want to display ID, directory\nverbose = 0\n# =1 if plotting\ndoPlot = 0\n\n#=1 if setting a maximum tau\nsetMax = 0\n#set maximum number of time steps to plot, will only use if setMax == 1\nmaxTau = 10\n\n#base directory for data\n#baseDir = \"/Users/sagarsetru/Documents/Princeton/wingreen/for NED/\"\n#\n#subBaseDir_a1 = \"FloA/1/\"\n#subBaseDir_a2 = \"FloA/2/\"\n#subBaseDir_a3 = \"FloA/3/\"\n#subBaseDir_t1 = \"FloT/1/\"\n#subBaseDir_t2 = \"FloT/2/\"\n#subBaseDir_t3 = \"FloT/3/\"\n#subBaseDirs = [\"FloA/1/\", \"FloA/2/\", \"FloA/3/\", \"FloT/1/\", \"FloT/2/\", \"FloT/3/\"]\n##subBaseDirs = [\"FloA/1/\", \"FloA/2/\",]\n#\n#fileNames_tracks = [\"01FloA-tracks.xls\",\n#\"02FloA-tracks.xls\",\n#\"03FloA-tracks.xls\",\n#\"01FloT-tracks.xls\",\n#\"02FloT-tracks.xls\",\n#\"03FloT-tracks.xls\"]\n# \n#fileNames_spots = [\"01FloA-spots.xls\",\n#\"02FloA-spots.xls\",\n#\"03FloA-spots.xls\",\n#\"01FloT-spots.xls\",\n#\"02FloT-spots.xls\",\n#\"03FloT-spots.xls\"]\n\n#fnameSpots = '/Volumes/Extreme SSD/lipidDomainData/files_spots.txt'\n#fnameTracks = '/Volumes/Extreme SSD/lipidDomainData/files_tracks.txt'\n\n\n#fnameSpots = '/Volumes/Extreme SSD/lipidDomainData/Sagar MSD/files_spots.txt'\n#fnameTracks = '/Volumes/Extreme SSD/lipidDomainData/Sagar MSD/files_tracks.txt'\n#\n#fnameSpots = '/Volumes/Extreme SSD/lipidDomainData/Sagar MSD/Flotillins/files_fig13e_spots.txt'\n#fnameTracks = '/Volumes/Extreme SSD/lipidDomainData/Sagar MSD/Flotillins/files_fig13e_tracks.txt'\n\nfnameTracks = '/Volumes/Extreme SSD/lipidDomainData/Sagar MSD/Flotillins/files_fig13e_ctrl_tracks.txt'\nfnameSpots = '/Volumes/Extreme SSD/lipidDomainData/Sagar MSD/Flotillins/files_fig13e_ctrl_spots.txt'\n\n#fnameSpots = '/Volumes/Extreme SSD/lipidDomainData/Sagar MSD/files_spots_ctrlVal.txt'\n#fnameTracks = '/Volumes/Extreme SSD/lipidDomainData/Sagar MSD/files_tracks_ctrlVal.txt'\n\n#fnameSpots = '/Volumes/Extreme SSD/lipidDomainData/Sagar MSD/files_spots_ugt.txt'\n#fnameTracks = '/Volumes/Extreme SSD/lipidDomainData/Sagar MSD/files_tracks_ugt.txt'\n\n#fnameSpots = '/Volumes/Extreme SSD/lipidDomainData/Sagar MSD/files2_spots.txt'\n#fnameTracks = '/Volumes/Extreme SSD/lipidDomainData/Sagar MSD/files2_tracks.txt'\n#fnameSpots = '/Volumes/Extreme SSD/lipidDomainData/Sagar MSD/files3_spots.txt'\n#fnameTracks = '/Volumes/Extreme SSD/lipidDomainData/Sagar MSD/files3_tracks.txt'\nbaseDirString = '/Volumes/Extreme SSD/lipidDomainData/Sagar MSD/'\n\n# from https://stackoverflow.com/questions/3277503/how-to-read-a-file-line-by-line-into-a-list\nwith open(fnameSpots) as f:\n txtfilesSpots = f.readlines()\ntxtfilesSpots = [x.strip() for x in txtfilesSpots]\ntxtfilesSpots.sort()\n\nwith open(fnameTracks) as f:\n txtfilesTracks = f.readlines()\ntxtfilesTracks = [x.strip() for x in txtfilesTracks]\ntxtfilesTracks.sort()\n\n#%%\ncounter1 = 0\ncounterCorrupted = 0\n\n# loop to fill dictionaries\n#for subBaseDir in subBaseDirs:\nfor spotFile, trackFile in zip(txtfilesSpots,txtfilesTracks):\n\n counter1 += 1\n \n \n# if counter1 == 255:\n# continue\n# #...\n# \n# if counter1 == 256:\n# continue\n# #...\n# \n# if counter1 == 257:\n# continue\n# #...\n# \n# if counter1 == 258:\n# continue\n# #...\n# \n# if counter1 == 259:\n# continue\n# #...\n \n #get current file names\n# fileName_track = fileNames_tracks[counter1]\n# fileName_spot = fileNames_spots[counter1]\n \n # load track file\n# flo_tracks = loadDataFile( baseDir, subBaseDir, fileName_track )\n flo_tracks = pd.read_csv(trackFile, header=None)\n \n # load spot file\n# flo_spots = loadDataFile( baseDir, subBaseDir, fileName_spot )\n flo_spots = pd.read_csv(spotFile,header=None)\n \n # get paths of each file, make sure they are the same\n fPathTrack = os.path.split(trackFile)[0]\n fPathSpot = os.path.split(spotFile)[0]\n# print(fPathTrack)\n# print(fPathSpot)\n \n # get number of the movie within this replicate\n trackMovieNum = os.path.split(trackFile)[1][0]\n spotMovieNum = os.path.split(spotFile)[1][0]\n \n # if they are equal, then move on, otherwise there is an error\n if fPathTrack == fPathSpot and trackMovieNum == spotMovieNum:\n \n fPath = fPathTrack\n \n subBaseDir = fPath[len(baseDirString):]\n \n movieNum = trackMovieNum\n \n keyForDict = fPath+movieNum\n\n else:\n \n if fPathTrack != fPathSpot:\n \n print('Paths of spots and tracks files are not identical! Fix.')\n \n print('Path for track file:')\n print(fPathTrack)\n \n print('Path for spot file:')\n print(fPathSpot)\n \n #...\n \n if trackMovieNum != spotMovieNum:\n \n print('Movie numbers within this replicate are not the same! Fix.')\n \n print('Track movie number:')\n print(trackMovieNum)\n \n print('Spot movie number:')\n print(spotMovieNum)\n \n #...\n \n continue\n #...\n \n print('Working on: ')\n print(keyForDict)\n print('file '+str(counter1)+' out of '+str(len(txtfilesSpots)))\n print(' ')\n \n # get track ID from spot file\n# ID_spots = flo_spots.loc[1:,3]\n ID_spots = flo_spots.loc[1:,2]\n\n # get ndarray of unique IDs\n ID_unique = ID_spots.unique()\n # get number of unique IDs\n n_ID = ID_unique.size\n \n # get x position from spot file\n# x_spots = flo_spots.loc[1:,5]\n x_spots = flo_spots.loc[1:,4]\n \n # get y position from spot file\n# y_spots = flo_spots.loc[1:,6]\n y_spots = flo_spots.loc[1:,5]\n \n # get time stamp from spot file\n# t_spots = flo_spots.loc[1:,8]\n t_spots = flo_spots.loc[1:,7]\n\n counter_IDs = -1\n # loop through unique IDs\n for ID in ID_unique:\n counter_IDs += 1\n if verbose == 1:\n# print(subBaseDir)\n print('Path:')\n print(fPath)\n print()\n print('Movie number:')\n print(movieNum)\n print()\n print('ID:')\n print(ID)\n #...\n \n ID_indices = ID_spots[ID_spots == ID].index.tolist()\n # get x, y, t values\n x_df = x_spots[ID_indices]\n y_df = y_spots[ID_indices]\n t_df = t_spots[ID_indices]\n # get x y positions as a list of tuples [ [x1,y1],[x2,y2],...]\n xy = []\n t=[]\n for IDind in ID_indices:\n x_val = float(x_df[IDind])\n y_val = float(y_df[IDind])\n t_val = float(t_df[IDind])\n xy.append([x_val,y_val])\n t.append([t_val])\n #...\n # convert to numpy array\n xy=np.array(xy)\n t=np.array(t)\n\n # skip if only going up to a certain length\n if setMax == 1:\n if len(t) > maxTau:\n# print('skipping')\n continue\n #...\n #...\n \n # skip if the track is just 1 time point long\n if len(xy) <= 1:\n continue\n #...\n\n # check if there are repeat times\n repeatInds = np.setdiff1d(np.arange(len(t)), np.unique(t, return_index=True)[1])\n if not repeatInds.size:\n corrupted = 0\n else:\n corrupted = 1\n counterCorrupted += 1\n #print 'corrupted'\n #print ID\n# print('skipping')\n continue\n \n # calculate the MSD\n msd = compute_MSD(xy)\n\n # get velocities, delta_t\n velx, vely, delta_t = compute_vel(xy,t)\n \n# if counter1 == 1:\n# break\n #...\n\n # get velocity velocity autocorrelation\n vac = compute_VAC(velx,vely)\n \n # get tau values for msd\n tau_msd = np.multiply(np.array(range(len(msd)))+1,delta_t)\n \n # get tau values for vac\n tau_vac = np.multiply(np.array(range(len(vac)))+1,delta_t)\n \n msd_and_tau = np.vstack((tau_msd,msd)).T\n \n vac_and_tau = np.vstack((tau_vac,vac)).T\n\n #plt.plot(msd)\n #plt.plot(vac)\n #print vac\n if doSave == 1:\n #move to analysis directory for saving data\n os.chdir(saveDir)\n\n #make directory to save files\n if not os.path.exists(subBaseDir):\n os.makedirs(subBaseDir)\n #...\n os.chdir(subBaseDir)\n #save MSD to csv file\n if printFilesSaved == 1:\n print('Saving msd_track'+ID+'.csv')\n #...\n np.savetxt('msd_track'+ID+'.csv',msd_and_tau,delimiter=',')\n\n #save VAC to csv file\n if printFilesSaved == 1:\n print('Saving vac_track'+ID+'.csv')\n #...\n np.savetxt('vac_track'+ID+'.csv',vac_and_tau,delimiter=',')\n #...\n\n # do plotting\n if doPlot == 1:\n #move to analysis directory for saving plots\n os.chdir(saveDir)\n\n #make directory to save files\n if not os.path.exists(subBaseDir):\n os.makedirs(subBaseDir)\n #...\n os.chdir(subBaseDir)\n \n plt.figure(0)\n plt.plot(np.multiply(np.array(range(len(vac)))+1,delta_t),vac)\n plt.xlabel(r'$\\tau$',fontsize=18)\n plt.ylabel('VAC',fontsize=18)\n plt.tick_params(axis='both', which='major', labelsize=15)\n\n\n plt.figure(1)\n plt.plot(np.multiply(np.array(range(len(msd)))+1,delta_t),msd)\n plt.xlabel(r'$\\tau$',fontsize=18)\n plt.ylabel('MSD',fontsize=18)\n plt.tick_params(axis='both', which='major', labelsize=15)\n \n if doPlot == 1:\n if setMax == 1:\n plt.figure(0)\n plt.savefig('vac_'+str(maxTau)+'tau'+fileName_spot[0:6]+'.pdf')\n plt.clf()\n\n plt.figure(1)\n plt.savefig('msd_'+str(maxTau)+'tau'+fileName_spot[0:6]+'.pdf')\n plt.clf()\n else:\n plt.figure(0)\n plt.savefig('vac_all'+fileName_spot[0:6]+'.pdf')\n plt.clf()\n\n plt.figure(1)\n plt.savefig('msd_all'+fileName_spot[0:6]+'.pdf')\n plt.clf()\n\n #with PdfPages('vac_curves.pdf') as pdf:\n # plt.figure(0)\n # pdf.savefig()\n # plt.close()\n \n #with PdfPages('msd_curves.pdf') as pdf:\n # plt.figure(1)\n # pdf.savefig()\n # plt.close() \n #plt.style.use('ggplot')\n\n"
] |
[
[
"pandas.read_csv",
"numpy.multiply",
"numpy.unique",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.clf",
"numpy.mean",
"numpy.savetxt",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.tick_params",
"numpy.sum",
"numpy.vstack",
"matplotlib.pyplot.figure"
]
] |
mohi7solanki/causalml
|
[
"73e41837a8edb7220de1e2d102157000b2ccd476"
] |
[
"tests/test_meta_learners.py"
] |
[
"import numpy as np\nimport pandas as pd\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom xgboost import XGBRegressor\nfrom xgboost import XGBClassifier\n\nfrom causalml.dataset import synthetic_data\nfrom causalml.dataset import make_uplift_classification\nfrom causalml.inference.meta import BaseSLearner, BaseSRegressor, BaseSClassifier, LRSRegressor\nfrom causalml.inference.meta import BaseTLearner, BaseTRegressor, BaseTClassifier, XGBTRegressor, MLPTRegressor\nfrom causalml.inference.meta import BaseXLearner, BaseXClassifier, BaseXRegressor\nfrom causalml.inference.meta import BaseRLearner, BaseRClassifier, BaseRRegressor\nfrom causalml.metrics import ape, gini, get_cumgain\n\nfrom .const import RANDOM_SEED, N_SAMPLE, ERROR_THRESHOLD, CONTROL_NAME, TREATMENT_NAMES, CONVERSION\n\n\ndef test_synthetic_data():\n y, X, treatment, tau, b, e = synthetic_data(mode=1, n=N_SAMPLE, p=8, sigma=.1)\n\n assert (y.shape[0] == X.shape[0] and y.shape[0] == treatment.shape[0] and\n y.shape[0] == tau.shape[0] and y.shape[0] == b.shape[0] and\n y.shape[0] == e.shape[0])\n\n y, X, treatment, tau, b, e = synthetic_data(mode=2, n=N_SAMPLE, p=8, sigma=.1)\n\n assert (y.shape[0] == X.shape[0] and y.shape[0] == treatment.shape[0] and\n y.shape[0] == tau.shape[0] and y.shape[0] == b.shape[0] and\n y.shape[0] == e.shape[0])\n\n y, X, treatment, tau, b, e = synthetic_data(mode=3, n=N_SAMPLE, p=8, sigma=.1)\n\n assert (y.shape[0] == X.shape[0] and y.shape[0] == treatment.shape[0] and\n y.shape[0] == tau.shape[0] and y.shape[0] == b.shape[0] and\n y.shape[0] == e.shape[0])\n\n y, X, treatment, tau, b, e = synthetic_data(mode=4, n=N_SAMPLE, p=8, sigma=.1)\n\n assert (y.shape[0] == X.shape[0] and y.shape[0] == treatment.shape[0] and\n y.shape[0] == tau.shape[0] and y.shape[0] == b.shape[0] and\n y.shape[0] == e.shape[0])\n\n\ndef test_BaseSLearner(generate_regression_data):\n y, X, treatment, tau, b, e = generate_regression_data()\n\n learner = BaseSLearner(learner=LinearRegression())\n\n # check the accuracy of the ATE estimation\n ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y, return_ci=True)\n assert (ate_p >= lb) and (ate_p <= ub)\n assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD\n\n\ndef test_BaseSRegressor(generate_regression_data):\n y, X, treatment, tau, b, e = generate_regression_data()\n\n learner = BaseSRegressor(learner=XGBRegressor())\n\n # check the accuracy of the ATE estimation\n ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)\n assert (ate_p >= lb) and (ate_p <= ub)\n assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD\n\n # check the accuracy of the CATE estimation with the bootstrap CI\n cate_p, _, _ = learner.fit_predict(X=X, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)\n assert gini(tau, cate_p.flatten()) > .5\n\n\ndef test_LRSRegressor(generate_regression_data):\n y, X, treatment, tau, b, e = generate_regression_data()\n\n learner = LRSRegressor()\n\n # check the accuracy of the ATE estimation\n ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y)\n assert (ate_p >= lb) and (ate_p <= ub)\n assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD\n\n\ndef test_BaseTLearner(generate_regression_data):\n y, X, treatment, tau, b, e = generate_regression_data()\n\n learner = BaseTLearner(learner=XGBRegressor())\n\n # check the accuracy of the ATE estimation\n ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y)\n assert (ate_p >= lb) and (ate_p <= ub)\n assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD\n\n # check the accuracy of the CATE estimation with the bootstrap CI\n cate_p, _, _ = learner.fit_predict(X=X, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)\n assert gini(tau, cate_p.flatten()) > .5\n\n\ndef test_BaseTRegressor(generate_regression_data):\n y, X, treatment, tau, b, e = generate_regression_data()\n\n learner = BaseTRegressor(learner=XGBRegressor())\n\n # check the accuracy of the ATE estimation\n ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y)\n assert (ate_p >= lb) and (ate_p <= ub)\n assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD\n\n # check the accuracy of the CATE estimation with the bootstrap CI\n cate_p, _, _ = learner.fit_predict(X=X, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)\n assert gini(tau, cate_p.flatten()) > .5\n\n\ndef test_MLPTRegressor(generate_regression_data):\n y, X, treatment, tau, b, e = generate_regression_data()\n\n learner = MLPTRegressor()\n\n # check the accuracy of the ATE estimation\n ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y)\n assert (ate_p >= lb) and (ate_p <= ub)\n assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD\n\n # check the accuracy of the CATE estimation with the bootstrap CI\n cate_p, _, _ = learner.fit_predict(X=X, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)\n assert gini(tau, cate_p.flatten()) > .5\n\n\ndef test_XGBTRegressor(generate_regression_data):\n y, X, treatment, tau, b, e = generate_regression_data()\n\n learner = XGBTRegressor()\n\n # check the accuracy of the ATE estimation\n ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y)\n assert (ate_p >= lb) and (ate_p <= ub)\n assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD\n\n # check the accuracy of the CATE estimation with the bootstrap CI\n cate_p, _, _ = learner.fit_predict(X=X, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)\n assert gini(tau, cate_p.flatten()) > .5\n\n\ndef test_BaseXLearner(generate_regression_data):\n y, X, treatment, tau, b, e = generate_regression_data()\n\n learner = BaseXLearner(learner=XGBRegressor())\n\n # check the accuracy of the ATE estimation\n ate_p, lb, ub = learner.estimate_ate(X=X, p=e, treatment=treatment, y=y)\n assert (ate_p >= lb) and (ate_p <= ub)\n assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD\n\n # check the accuracy of the CATE estimation with the bootstrap CI\n cate_p, _, _ = learner.fit_predict(X=X, p=e, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)\n assert gini(tau, cate_p.flatten()) > .5\n\n\ndef test_BaseXRegressor(generate_regression_data):\n y, X, treatment, tau, b, e = generate_regression_data()\n\n learner = BaseXRegressor(learner=XGBRegressor())\n\n # check the accuracy of the ATE estimation\n ate_p, lb, ub = learner.estimate_ate(X=X, p=e, treatment=treatment, y=y)\n assert (ate_p >= lb) and (ate_p <= ub)\n assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD\n\n # check the accuracy of the CATE estimation with the bootstrap CI\n cate_p, _, _ = learner.fit_predict(X=X, p=e, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)\n assert gini(tau, cate_p.flatten()) > .5\n\n\ndef test_BaseRLearner(generate_regression_data):\n y, X, treatment, tau, b, e = generate_regression_data()\n\n learner = BaseRLearner(learner=XGBRegressor())\n\n # check the accuracy of the ATE estimation\n ate_p, lb, ub = learner.estimate_ate(X=X, p=e, treatment=treatment, y=y)\n assert (ate_p >= lb) and (ate_p <= ub)\n assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD\n\n # check the accuracy of the CATE estimation with the bootstrap CI\n cate_p, _, _ = learner.fit_predict(X=X, p=e, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)\n assert gini(tau, cate_p.flatten()) > .5\n\n\ndef test_BaseRRegressor(generate_regression_data):\n y, X, treatment, tau, b, e = generate_regression_data()\n\n learner = BaseRRegressor(learner=XGBRegressor())\n\n # check the accuracy of the ATE estimation\n ate_p, lb, ub = learner.estimate_ate(X=X, p=e, treatment=treatment, y=y)\n assert (ate_p >= lb) and (ate_p <= ub)\n assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD\n\n # check the accuracy of the CATE estimation with the bootstrap CI\n cate_p, _, _ = learner.fit_predict(X=X, p=e, treatment=treatment, y=y, return_ci=True, n_bootstraps=10)\n assert gini(tau, cate_p.flatten()) > .5\n\n\ndef test_BaseSClassifier(generate_classification_data):\n\n np.random.seed(RANDOM_SEED)\n\n df, x_names = generate_classification_data()\n\n df['treatment_group_key'] = np.where(df['treatment_group_key'] == CONTROL_NAME, 0, 1)\n\n df_train, df_test = train_test_split(df,\n test_size=0.2,\n random_state=RANDOM_SEED)\n\n uplift_model = BaseSClassifier(learner=XGBClassifier())\n\n uplift_model.fit(X=df_train[x_names].values,\n treatment=df_train['treatment_group_key'].values,\n y=df_train[CONVERSION].values)\n\n y_pred = uplift_model.predict(X=df_test[x_names].values,\n treatment=df_test['treatment_group_key'].values)\n\n auuc_metrics = pd.DataFrame(\n np.c_[y_pred, df_test['treatment_group_key'].values, df_test[CONVERSION].values],\n columns=['y_pred', 'W', CONVERSION])\n\n cumgain = get_cumgain(auuc_metrics,\n outcome_col=CONVERSION,\n treatment_col='W',\n steps=15)\n\n # Check if the cumulative gain when using the model's prediction is\n # higher than it would be under random targeting\n assert cumgain['y_pred'].sum() > cumgain['Random'].sum()\n\n\ndef test_BaseTClassifier(generate_classification_data):\n\n np.random.seed(RANDOM_SEED)\n\n df, x_names = generate_classification_data()\n\n df['treatment_group_key'] = np.where(df['treatment_group_key'] == CONTROL_NAME, 0, 1)\n\n df_train, df_test = train_test_split(df,\n test_size=0.2,\n random_state=RANDOM_SEED)\n\n uplift_model = BaseTClassifier(learner=LogisticRegression())\n\n uplift_model.fit(X=df_train[x_names].values,\n treatment=df_train['treatment_group_key'].values,\n y=df_train[CONVERSION].values)\n\n y_pred = uplift_model.predict(X=df_test[x_names].values,\n treatment=df_test['treatment_group_key'].values)\n\n auuc_metrics = pd.DataFrame(\n np.c_[y_pred, df_test['treatment_group_key'].values, df_test[CONVERSION].values],\n columns=['y_pred', 'W', CONVERSION])\n\n cumgain = get_cumgain(auuc_metrics,\n outcome_col=CONVERSION,\n treatment_col='W',\n steps=15)\n\n # Check if the cumulative gain when using the model's prediction is\n # higher than it would be under random targeting\n assert cumgain['y_pred'].sum() > cumgain['Random'].sum()\n\n\ndef test_BaseXClassifier(generate_classification_data):\n\n np.random.seed(RANDOM_SEED)\n\n df, x_names = generate_classification_data()\n\n df['treatment_group_key'] = np.where(df['treatment_group_key'] == CONTROL_NAME, 0, 1)\n\n propensity_model = LogisticRegression()\n propensity_model.fit(X=df[x_names].values, y=df['treatment_group_key'].values)\n df['propensity_score'] = propensity_model.predict_proba(df[x_names].values)[:, 1]\n\n df_train, df_test = train_test_split(df,\n test_size=0.2,\n random_state=RANDOM_SEED)\n\n uplift_model = BaseXClassifier(control_outcome_learner=XGBClassifier(),\n control_effect_learner=XGBRegressor(),\n treatment_outcome_learner=XGBClassifier(),\n treatment_effect_learner=XGBRegressor())\n\n uplift_model.fit(X=df_train[x_names].values,\n treatment=df_train['treatment_group_key'].values,\n y=df_train[CONVERSION].values)\n\n y_pred = uplift_model.predict(X=df_test[x_names].values,\n p=df_test['propensity_score'].values)\n\n auuc_metrics = pd.DataFrame(\n np.c_[y_pred, df_test['treatment_group_key'].values, df_test[CONVERSION].values],\n columns=['y_pred', 'W', CONVERSION])\n\n cumgain = get_cumgain(auuc_metrics,\n outcome_col=CONVERSION,\n treatment_col='W',\n steps=15)\n\n # Check if the cumulative gain when using the model's prediction is\n # higher than it would be under random targeting\n assert cumgain['y_pred'].sum() > cumgain['Random'].sum()\n\n\ndef test_BaseRClassifier(generate_classification_data):\n\n np.random.seed(RANDOM_SEED)\n\n df, x_names = generate_classification_data()\n\n df['treatment_group_key'] = np.where(df['treatment_group_key'] == CONTROL_NAME, 0, 1)\n\n propensity_model = LogisticRegression()\n propensity_model.fit(X=df[x_names].values, y=df['treatment_group_key'].values)\n df['propensity_score'] = propensity_model.predict_proba(df[x_names].values)[:, 1]\n\n df_train, df_test = train_test_split(df,\n test_size=0.2,\n random_state=RANDOM_SEED)\n\n uplift_model = BaseRClassifier(outcome_learner=XGBClassifier(),\n effect_learner=XGBRegressor())\n\n uplift_model.fit(X=df_train[x_names].values,\n p=df_train['propensity_score'].values,\n treatment=df_train['treatment_group_key'].values,\n y=df_train[CONVERSION].values)\n\n y_pred = uplift_model.predict(X=df_test[x_names].values)\n\n auuc_metrics = pd.DataFrame(\n np.c_[y_pred, df_test['treatment_group_key'].values, df_test[CONVERSION].values],\n columns=['y_pred', 'W', CONVERSION])\n\n cumgain = get_cumgain(auuc_metrics,\n outcome_col=CONVERSION,\n treatment_col='W',\n steps=15)\n\n # Check if the cumulative gain when using the model's prediction is\n # higher than it would be under random targeting\n assert cumgain['y_pred'].sum() > cumgain['Random'].sum()\n"
] |
[
[
"sklearn.linear_model.LogisticRegression",
"numpy.random.seed",
"sklearn.model_selection.train_test_split",
"pandas.DataFrame",
"sklearn.linear_model.LinearRegression",
"numpy.where"
]
] |
ahmetcik/ML-and-LA-from-scratch
|
[
"9718ccb68c228be6cdcb0b14b97f9d0acdfc28ca"
] |
[
"logistic_regression.py"
] |
[
"import numpy as np\n\nclass LogisticRegression(object):\n \n def __init__(self, intercept=True, alpha=0.01, max_steps=2000,):\n self.intercept = intercept\n self.alpha = alpha # learning rate\n self.max_steps = max_steps\n\n def fit(self, X, Y): \n if self.intercept:\n X = np.insert(X, -1, 1., axis=1)\n \n coefs = np.zeros(X.shape[1])\n for _ in range(self.max_steps):\n pred = self.sigmoid(X @ coefs)\n gradient = np.dot(X.T, (pred - Y)) \n coefs -= self.alpha * gradient\n \n self.coefs = coefs\n def predict(self, X): \n if self.intercept:\n X = np.insert(X, -1, 1., axis=1)\n return self.sigmoid(X @ self.coefs).round().astype(int)\n\n def sigmoid(self, x):\n return 1. / (1 + np.exp(-x))\n\nif __name__ == \"__main__\":\n from itertools import product\n import matplotlib.pyplot as plt \n X = [[0,1.2],\n [1,1],\n [1,0],\n [2,0],\n [0,2],\n [1,2],\n [2,3],\n [0,1],\n ]\n X = np.array(X)\n Y = np.array([0, 0, 0, 0, 1,1,1,1])\n\n log = LogisticRegression()\n log.fit(X, Y)\n y_pred = log.predict(X)\n \n\n X_pred = list(product(np.linspace(0, 2, 100), np.linspace(0, 3, 100)))\n X_pred = np.array(X_pred)\n Y_pred = log.predict(X_pred)\n\n plt.scatter(X_pred[:, 0], X_pred[:,1], c=Y_pred, alpha=.3, linewidths=0.)\n plt.scatter(X[:, 0], X[:,1], c=Y, edgecolors='k')\n plt.show()\n\n"
] |
[
[
"numpy.dot",
"matplotlib.pyplot.scatter",
"numpy.linspace",
"numpy.insert",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.show"
]
] |
HumanCompatibleAI/seals
|
[
"08dc04e1b21e4bd2d83e0447e1d9e40902471961"
] |
[
"src/seals/diagnostics/init_shift.py"
] |
[
"\"\"\"Environment with shift in initial state distribution.\"\"\"\n\nimport functools\nimport itertools\n\nimport numpy as np\n\nfrom seals import base_envs\n\n\nclass InitShiftEnv(base_envs.TabularModelMDP):\n \"\"\"Tests for robustness to initial state shift.\n\n Many LfH algorithms learn from expert demonstrations. This can be\n problematic when the environment the demonstrations were gathered in\n differs even slightly from the learner's environment.\n\n This task illustrates this problem. We have a depth-2 full binary tree\n where the agent moves left or right until reaching a leaf. The expert\n starts at the root s_0, whereas the learner starts at the left branch s_1\n and so can only reach leaves s_3 and s_4. Reward is only given at the\n leaves.\n\n The expert always move to the highest reward leaf s_6, so any algorithm\n that relies on demonstrations will not know whether it is better to go to\n s_3 or s_4. By contrast, feedback such as preference comparison can\n disambiguate this case.\n \"\"\"\n\n def __init__(self, initial_state: int):\n \"\"\"Constructs environment.\n\n Args:\n initial_state: fixed initial state.\n\n Raises:\n ValueError: `initial_state` not in [0,6].\n \"\"\"\n nS = 7\n nA = 2\n\n if not 0 <= initial_state < nS:\n raise ValueError(f\"Initial state {initial_state} must lie in [0,{nS})\")\n\n self._initial_state = initial_state\n\n non_leaves = np.arange(3)\n leaves = np.arange(3, 7)\n\n transition_matrix = np.zeros((nS, nA, nS))\n\n for state, action in itertools.product(non_leaves, range(nA)):\n next_state = 2 * state + 1 + action\n transition_matrix[state, action, next_state] = 1.0\n\n transition_matrix[leaves, :, leaves] = 1.0\n\n reward_matrix = np.zeros((nS,))\n reward_matrix[leaves] = [1, -1, -1, 2]\n\n super().__init__(\n transition_matrix=transition_matrix,\n reward_matrix=reward_matrix,\n )\n\n def initial_state(self) -> int:\n \"\"\"Returns initial state defined in constructor.\"\"\"\n return self._initial_state\n\n\nInitShiftTrainEnv = functools.partial(InitShiftEnv, initial_state=0)\nInitShiftTestEnv = functools.partial(InitShiftEnv, initial_state=1)\n"
] |
[
[
"numpy.arange",
"numpy.zeros"
]
] |
nkalupahana/onshape-to-robot
|
[
"fd0f8897131611935ad7e7cd90c3ed60b2bb31f2"
] |
[
"onshape_to_robot/stl_combine.py"
] |
[
"import numpy as np\nimport shutil\nimport math\nimport subprocess\nimport stl\nimport os\nfrom stl import mesh\nfrom colorama import Fore, Back, Style\n\n\ndef load_mesh(stl_file):\n return mesh.Mesh.from_file(stl_file)\n\n\ndef save_mesh(mesh, stl_file):\n mesh.save(stl_file, mode=stl.Mode.BINARY)\n\n\ndef combine_meshes(m1, m2):\n return mesh.Mesh(np.concatenate([m1.data, m2.data]))\n\n\ndef apply_matrix(mesh, matrix):\n rotation = matrix[0:3, 0:3]\n translation = matrix[0:3, 3:4].T.tolist()\n\n def transform(points):\n return (rotation*np.matrix(points).T).T + translation*len(points)\n\n mesh.v0 = transform(mesh.v0)\n mesh.v1 = transform(mesh.v1)\n mesh.v2 = transform(mesh.v2)\n mesh.normals = transform(mesh.normals)\n\n\n# Script taken from doing the needed operation\n# (Filters > Remeshing, Simplification and Reconstruction >\n# Quadric Edge Collapse Decimation, with parameters:\n# 0.9 percentage reduction (10%), 0.3 Quality threshold (70%)\n# Target number of faces is ignored with those parameters\n# conserving face normals, planar simplification and\n# post-simplimfication cleaning)\n# And going to Filter > Show current filter script\nfilter_script_mlx = \"\"\"<!DOCTYPE FilterScript>\n<FilterScript>\n <filter name=\"Quadric Edge Collapse Decimation\">\n <Param type=\"RichFloat\" value=\"%reduction%\" name=\"TargetPerc\"/>\n <Param type=\"RichFloat\" value=\"0.3\" name=\"QualityThr\"/>\n <Param type=\"RichBool\" value=\"false\" name=\"PreserveBoundary\"/>\n <Param type=\"RichFloat\" value=\"1\" name=\"BoundaryWeight\"/>\n <Param type=\"RichBool\" value=\"false\" name=\"PreserveNormal\"/>\n <Param type=\"RichBool\" value=\"false\" name=\"PreserveTopology\"/>\n <Param type=\"RichBool\" value=\"false\" name=\"OptimalPlacement\"/>\n <Param type=\"RichBool\" value=\"true\" name=\"PlanarQuadric\"/>\n <Param type=\"RichBool\" value=\"false\" name=\"QualityWeight\"/>\n <Param type=\"RichBool\" value=\"true\" name=\"AutoClean\"/>\n <Param type=\"RichBool\" value=\"false\" name=\"Selected\"/>\n </filter>\n</FilterScript>\n\"\"\"\n\n\ndef create_tmp_filter_file(filename='filter_file_tmp.mlx', reduction=0.9):\n with open('/tmp/' + filename, 'w') as f:\n f.write(filter_script_mlx.replace('%reduction%', str(reduction)))\n return '/tmp/' + filename\n\n\ndef reduce_faces(in_file, out_file, reduction=0.5):\n filter_script_path = create_tmp_filter_file(reduction=reduction)\n # Add input mesh\n command = \"meshlabserver -i \" + in_file\n # Add the filter script\n command += \" -s \" + filter_script_path\n # Add the output filename and output flags\n command += \" -o \" + out_file + \" -om vn fn\"\n command += \" > /tmp/meshlab.log 2>&1\"\n # Execute command\n # print(\"Going to execute: \" + command)\n output = subprocess.check_output(command, shell=True)\n # last_line = output.splitlines()[-1]\n # print(\"Done:\")\n #print(in_file + \" > \" + out_file + \": \" + last_line)\n\n\ndef simplify_stl(stl_file, max_size=3):\n size_M = os.path.getsize(stl_file)/(1024*1024)\n\n if size_M > max_size:\n print(Fore.BLUE + '+ '+os.path.basename(stl_file) +\n (' is %.2f M, running mesh simplification' % size_M))\n shutil.copyfile(stl_file, '/tmp/simplify.stl')\n reduce_faces('/tmp/simplify.stl', stl_file, max_size / size_M)\n"
] |
[
[
"numpy.concatenate",
"numpy.matrix"
]
] |
mshaikh2/sagan
|
[
"c978d18b0400eddecde303e30900107093029876"
] |
[
"PyTorch-cSAWGAN/Discriminators.py"
] |
[
"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.autograd as autograd\nimport torch.optim as optim\nimport torchvision\nimport torchvision.transforms as transforms\nimport Components\n\nclass cSAWGANDiscriminatorV1(nn.Module):\n def __init__(self, in_channels, num_classes):\n super(cSAWGANDiscriminatorV1, self).__init__()\n # (N, in_channels, 32, 32)\n self.l1 = nn.Sequential(\n nn.utils.spectral_norm(nn.Conv2d(in_channels=in_channels, out_channels=256, kernel_size=4, stride=2, padding=1)),\n nn.BatchNorm2d(num_features=256),\n nn.LeakyReLU(0.2)\n )\n\n self.attn = Components.SelfAttention(in_channels=256)\n\n # (N, 256, 16, 16)\n self.l2 = nn.Sequential(\n nn.utils.spectral_norm(nn.Conv2d(in_channels=256, out_channels=512, kernel_size=4, stride=2, padding=1)),\n nn.BatchNorm2d(num_features=512),\n nn.LeakyReLU(0.2)\n )\n\n # (N, 512, 32, 32)\n self.l3 = nn.Sequential(\n nn.utils.spectral_norm(nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=4, stride=2, padding=1)),\n nn.BatchNorm2d(num_features=1024),\n nn.LeakyReLU(0.2)\n )\n\n # (N, 1024, 4, 4)\n self.l4 = nn.Sequential(\n nn.utils.spectral_norm(nn.Conv2d(in_channels=1024, out_channels=1024, kernel_size=4, stride=1, padding=0)),\n nn.BatchNorm2d(num_features=1024),\n nn.LeakyReLU(0.2)\n )\n\n # (N, 1024, 1, 1)\n self.l5 = nn.Sequential(\n nn.utils.spectral_norm(nn.Linear(in_features=1024+num_classes, out_features=1))\n )\n\n\n def forward(self, x, y):\n res = self.l1(x)\n res, attn_map = self.attn(res)\n res = self.l2(res)\n res = self.l3(res)\n res = self.l4(res)\n res = self.l5(torch.cat([res.view(-1, 1024), y], dim=1))\n #return res, attn_map\n return res"
] |
[
[
"torch.nn.Linear",
"torch.nn.Conv2d",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d"
]
] |
bicycleman15/pytorch-classification
|
[
"01e480dad9ea1e9bbf0810b35c1103dd76e06510"
] |
[
"train.py"
] |
[
"import argparse\nimport os\nimport shutil\nimport random\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom utils import Logger, AverageMeter, accuracy, mkdir_p, parse_args\nfrom utils import get_lr, save_checkpoint\nfrom tqdm import tqdm\n\nfrom solver.runners import train, test\nfrom solver import loss_dict\n\nfrom calibration_library.ece_loss import ECELoss\nfrom calibration_library.cce_loss import CCELossFast\n\nfrom models import model_dict\nfrom data_loaders import dataloader_dict, dataset_nclasses_dict\n\nimport logging\n\ndef create_loss_save_str(args):\n loss_name = args.lossname\n\n save_str = loss_name\n\n if \"LS\" in loss_name:\n save_str += f\"_alpha={args.alpha}\"\n \n if \"DCA\" in loss_name:\n save_str += f\"_beta={args.beta}\"\n \n if \"FL\" in loss_name:\n save_str += f\"_gamma={args.gamma}\"\n \n return save_str\n\nif __name__ == \"__main__\":\n \n torch.manual_seed(1)\n random.seed(1)\n \n args = parse_args()\n\n assert args.dataset in dataloader_dict\n assert args.model in model_dict\n assert args.lossname in loss_dict\n assert args.dataset in dataset_nclasses_dict\n\n loss_save_string = create_loss_save_str(args)\n orig_loss_string = loss_save_string\n\n if len(args.prefix):\n loss_save_string = args.prefix + \"-\" + loss_save_string\n\n # prepare save path\n model_save_pth = f\"{args.checkpoint}/{args.dataset}/{args.model}/{loss_save_string}\"\n if not os.path.isdir(model_save_pth):\n mkdir_p(model_save_pth)\n\n logging.basicConfig(level=logging.DEBUG, \n format=\"%(levelname)s: %(message)s\",\n handlers=[\n logging.FileHandler(filename=os.path.join(model_save_pth, \"train.log\")),\n logging.StreamHandler()\n ])\n logging.info(f\"Setting up logging folder : {model_save_pth}\")\n\n num_classes = dataset_nclasses_dict[args.dataset]\n criterion = loss_dict[args.lossname](alpha=args.alpha, beta=args.beta, gamma=args.gamma, n_classes=num_classes)\n\n logging.info(f\"Using loss function : {orig_loss_string}\")\n \n # prepare model\n logging.info(f\"Using model : {args.model}\")\n model = model_dict[args.model](num_classes=num_classes)\n model.cuda()\n\n # set up dataset\n logging.info(f\"Using dataset : {args.dataset}\")\n trainloader, testloader = dataloader_dict[args.dataset](args)\n\n\n # set up metrics\n ece_evaluator = ECELoss(n_classes = num_classes) \n fastcce_evaluator = CCELossFast(n_classes = num_classes)\n\n logging.info(f\"Setting up optimizer : {args.optimizer}\")\n\n if args.optimizer == \"sgd\":\n optimizer = optim.SGD(model.parameters(), \n lr=args.lr, \n momentum=args.momentum, \n weight_decay=args.weight_decay)\n\n elif args.optimizer == \"adam\":\n optimizer = optim.Adam(model.parameters(),\n lr=args.learning_rate,\n weight_decay=args.weight_decay)\n \n logging.info(f\"Step sizes : {args.schedule_steps} | lr-decay-factor : {args.lr_decay_factor}\")\n scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.schedule_steps, gamma=args.lr_decay_factor)\n\n start_epoch = args.start_epoch\n\n if args.resume:\n # Load checkpoint.\n\n logging.info(f'Resuming from saved checkpoint: {args.resume}')\n\n assert os.path.isfile(args.resume)\n args.checkpoint = os.path.dirname(args.resume)\n\n saved_model_dict = torch.load(args.resume)\n start_epoch = saved_model_dict['epoch']\n model.load_state_dict(saved_model_dict['state_dict'])\n optimizer.load_state_dict(saved_model_dict['optimizer'])\n scheduler.load_state_dict(saved_model_dict['scheduler'])\n \n model.cuda()\n \n best_acc = 0.\n\n # set up loggers\n logger = Logger(os.path.join(model_save_pth, 'train_metrics.txt')) \n logger.set_names(['lr', 'train_loss', 'val_loss', 'top1_train', 'top1', 'top3', 'top5', 'SCE', 'ECE'])\n\n for epoch in range(start_epoch, args.epochs):\n\n logging.info('Epoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, get_lr(optimizer)))\n\n train_loss, top1_train, _, _ = train(trainloader, model, criterion, optimizer)\n test_loss, top1, top3, top5, cce_score, ece_score = test(testloader, model, criterion, ece_evaluator, fastcce_evaluator)\n\n scheduler.step()\n\n # append logger file\n logger.append([get_lr(optimizer), train_loss, test_loss, top1_train, top1, top3, top5, cce_score, ece_score])\n\n logging.info(\"End of epoch {} stats: train_loss : {:.4f} | val_loss : {:.4f} | top1_train : {:.4f} | top1 : {:.4f} | ECE : {:.5f} | SCE : {:.5f}\".format(\n epoch+1,\n train_loss,\n test_loss,\n top1_train,\n top1,\n ece_score,\n cce_score\n ))\n\n # save model\n is_best = top1 > best_acc\n best_acc = max(top1, best_acc)\n\n save_checkpoint({\n 'epoch': epoch + 1,\n 'state_dict': model.state_dict(),\n 'acc': top1,\n 'best_acc': best_acc,\n 'optimizer' : optimizer.state_dict(),\n 'scheduler' : scheduler.state_dict(),\n 'dataset' : args.dataset,\n 'model' : args.model\n }, is_best, checkpoint=model_save_pth)\n\n # DO UMAP T_SNE ....\n logger.close()\n\n logging.info('Best accuracy obtained: {}'.format(best_acc))"
] |
[
[
"torch.optim.lr_scheduler.MultiStepLR",
"torch.manual_seed",
"torch.load"
]
] |
keerthanss/Synthesia-to-Sheet
|
[
"2a74db9673cfd9fbdce53eaf1ac65f60820c59ab"
] |
[
"synthesia_to_sheet/Song.py"
] |
[
"import cv2\nimport numpy as np\n\nfrom sklearn.cluster import KMeans\n\nfrom ClassDefinitions.Hand import Hand\n\nclass Song:\n \"\"\"docstring for Song\"\"\"\n\n def __init__(self, piano):\n self.piano = piano\n self.key_presses = [] # list of key presses for each frame\n self.length_of_song = 0 # stores the length of the key_presses\n\n self._kmeans = None\n self._color_map = dict()\n self._BLACK = 100\n self._WHITE = 200\n return\n\n def _store_presses_in_key(self, frame_index, key_presses_in_frame, labels):\n key_presses_in_frame = set(key_presses_in_frame)\n for i,k in enumerate(self.piano.keys):\n if k.IsPressed:\n if i in key_presses_in_frame:\n continue\n else:\n k.IsPressed = False\n k.Presses[-1][-1] = frame_index\n else:\n if i in key_presses_in_frame:\n k.IsPressed = True\n new_press = [self._color_map[labels[i]], frame_index, None] # hand, start_frame, end_frame\n k.Presses.append(new_press)\n else:\n continue\n return\n\n def _identify_key_presses_in_frame(self, frame):\n frame_colors = [ frame[ k.Location[1] ][ k.Location[0] ] for k in self.piano.keys ]\n labels = self._kmeans.predict(frame_colors)\n key_presses_in_frame = [ i for (i, _label) in enumerate(labels) if self._color_map[_label] not in [self._BLACK, self._WHITE] ]\n self.key_presses.append( key_presses_in_frame )\n self.length_of_song += 1\n self._store_presses_in_key(self.length_of_song, key_presses_in_frame, labels)\n return\n\n def _train_kmeans(self, list_of_frames, k=4, train_size=500):\n colors = []\n for frame in list_of_frames[0:train_size]:\n for key in self.piano.keys:\n y, x = key.Location\n current_color = frame[x][y]\n colors.append(current_color)\n self._kmeans = KMeans(n_clusters=k).fit(colors)\n cluster_centers = self._kmeans.cluster_centers_\n cluster_centers_1D = np.sqrt(np.sum(np.square(cluster_centers), axis=1))\n self._init_color_map(list_of_frames, cluster_centers_1D)\n return\n\n def _init_color_map(self, list_of_frames, cluster_centers_1D):\n k = cluster_centers_1D.shape[0]\n white_index = np.argmax(cluster_centers_1D)\n black_index = np.argmin(cluster_centers_1D)\n remaining_colors = [ i for (i, _) in enumerate(cluster_centers_1D) if i != white_index and i != black_index ]\n if k == 3:\n self._color_map[white_index] = self._WHITE\n self._color_map[black_index] = self._BLACK\n self._color_map[remaining_colors[0]] = Hand.Generic\n return\n\n # For k = 4, we need to map hand to color\n # first we need a reference frame in which both hand presses are present\n # then we categorise based on the location\n left_index, right_index = 0, 0\n\n for frame in list_of_frames:\n frame_colors = [ frame[ k.Location[1] ][ k.Location[0] ] for k in self.piano.keys ]\n number_of_distinct_colors = np.unique(self._kmeans.predict(frame_colors)).shape[0]\n if number_of_distinct_colors == 4:\n reference_frame = frame\n break\n\n frame_colors = [ reference_frame[ k.Location[1] ][ k.Location[0] ] for k in self.piano.keys ]\n labels = self._kmeans.predict(frame_colors)\n temp_idx1, temp_idx2 = 0, 0\n for l in labels:\n if temp_idx1 == 0:\n if l == remaining_colors[0]:\n temp_idx1 = l\n elif temp_idx2 == 0:\n if l == remaining_colors[1]:\n temp_idx2 = l\n else:\n break\n\n y1, _ = self.piano.keys[temp_idx1].Location\n y2, _ = self.piano.keys[temp_idx2].Location\n\n left_index = remaining_colors[ 0 if y1 < y2 else 1 ]\n right_index = filter(lambda x : x not in [left_index, white_index, black_index], range(4))[0]\n\n self._color_map[white_index] = self._WHITE\n self._color_map[black_index] = self._BLACK\n self._color_map[left_index] = Hand.Left\n self._color_map[right_index] = Hand.Right\n return\n\n def process_video(self, list_of_frames):\n self._train_kmeans(list_of_frames, k=4, train_size=500) # NOTE: k=4 if both hands are shown in different colors, else k=3.\n map(self._identify_key_presses_in_frame, list_of_frames)\n self._store_presses_in_key(self.length_of_song, [], []) # mark the ending in case some keys are pressed until the last frame\n\n # Debug helper - Plis test and tell :P\n # for i, frame in enumerate(list_of_frames):\n # for idx in self.key_presses[i]:\n # key = self.piano.keys[idx]\n # cv2.circle(frame, key.Location, 3, (0,0,255), -1)\n # cv2.imshow('frame',frame)\n # if cv2.waitKey(1) & 0xFF == ord('q'):\n # break\n #\n # for i, k in enumerate(self.piano.keys):\n # if k.Presses:\n # print i, k.Presses\n return\n"
] |
[
[
"numpy.square",
"numpy.argmax",
"numpy.argmin",
"sklearn.cluster.KMeans"
]
] |
windysavage/dfdc_deepfake_challenge
|
[
"d10b54cf933282366157a031954b046d87d57009"
] |
[
"training/zoo/classifiers.py"
] |
[
"from functools import partial\n\nimport numpy as np\nimport torch\nfrom timm.models.efficientnet import tf_efficientnet_b4_ns, tf_efficientnet_b3_ns, \\\n tf_efficientnet_b5_ns, tf_efficientnet_b2_ns, tf_efficientnet_b6_ns, tf_efficientnet_b7_ns\nfrom torch import nn\nfrom torch.nn.modules.dropout import Dropout\nfrom torch.nn.modules.linear import Linear\nfrom torch.nn.modules.pooling import AdaptiveAvgPool2d\n\nencoder_params = {\n \"tf_efficientnet_b3_ns\": {\n \"features\": 1536,\n \"init_op\": partial(tf_efficientnet_b3_ns, pretrained=True, drop_path_rate=0.2)\n },\n \"tf_efficientnet_b2_ns\": {\n \"features\": 1408,\n \"init_op\": partial(tf_efficientnet_b2_ns, pretrained=False, drop_path_rate=0.2)\n },\n \"tf_efficientnet_b4_ns\": {\n \"features\": 1792,\n \"init_op\": partial(tf_efficientnet_b4_ns, pretrained=True, drop_path_rate=0.5)\n },\n \"tf_efficientnet_b5_ns\": {\n \"features\": 2048,\n \"init_op\": partial(tf_efficientnet_b5_ns, pretrained=True, drop_path_rate=0.2)\n },\n \"tf_efficientnet_b4_ns_03d\": {\n \"features\": 1792,\n \"init_op\": partial(tf_efficientnet_b4_ns, pretrained=True, drop_path_rate=0.3)\n },\n \"tf_efficientnet_b5_ns_03d\": {\n \"features\": 2048,\n \"init_op\": partial(tf_efficientnet_b5_ns, pretrained=True, drop_path_rate=0.3)\n },\n \"tf_efficientnet_b5_ns_04d\": {\n \"features\": 2048,\n \"init_op\": partial(tf_efficientnet_b5_ns, pretrained=True, drop_path_rate=0.4)\n },\n \"tf_efficientnet_b6_ns\": {\n \"features\": 2304,\n \"init_op\": partial(tf_efficientnet_b6_ns, pretrained=True, drop_path_rate=0.2)\n },\n \"tf_efficientnet_b7_ns\": {\n \"features\": 2560,\n \"init_op\": partial(tf_efficientnet_b7_ns, pretrained=False, drop_path_rate=0.2)\n },\n \"tf_efficientnet_b6_ns_04d\": {\n \"features\": 2304,\n \"init_op\": partial(tf_efficientnet_b6_ns, pretrained=True, drop_path_rate=0.4)\n },\n}\n\n\ndef setup_srm_weights(input_channels: int = 3) -> torch.Tensor:\n \"\"\"Creates the SRM kernels for noise analysis.\"\"\"\n # note: values taken from Zhou et al., \"Learning Rich Features for Image Manipulation Detection\", CVPR2018\n srm_kernel = torch.from_numpy(np.array([\n [ # srm 1/2 horiz\n [0., 0., 0., 0., 0.], # noqa: E241,E201\n [0., 0., 0., 0., 0.], # noqa: E241,E201\n [0., 1., -2., 1., 0.], # noqa: E241,E201\n [0., 0., 0., 0., 0.], # noqa: E241,E201\n [0., 0., 0., 0., 0.], # noqa: E241,E201\n ], [ # srm 1/4\n [0., 0., 0., 0., 0.], # noqa: E241,E201\n [0., -1., 2., -1., 0.], # noqa: E241,E201\n [0., 2., -4., 2., 0.], # noqa: E241,E201\n [0., -1., 2., -1., 0.], # noqa: E241,E201\n [0., 0., 0., 0., 0.], # noqa: E241,E201\n ], [ # srm 1/12\n [-1., 2., -2., 2., -1.], # noqa: E241,E201\n [2., -6., 8., -6., 2.], # noqa: E241,E201\n [-2., 8., -12., 8., -2.], # noqa: E241,E201\n [2., -6., 8., -6., 2.], # noqa: E241,E201\n [-1., 2., -2., 2., -1.], # noqa: E241,E201\n ]\n ])).float()\n srm_kernel[0] /= 2\n srm_kernel[1] /= 4\n srm_kernel[2] /= 12\n return srm_kernel.view(3, 1, 5, 5).repeat(1, input_channels, 1, 1)\n\n\ndef setup_srm_layer(input_channels: int = 3) -> torch.nn.Module:\n \"\"\"Creates a SRM convolution layer for noise analysis.\"\"\"\n weights = setup_srm_weights(input_channels)\n conv = torch.nn.Conv2d(input_channels, out_channels=3,\n kernel_size=5, stride=1, padding=2, bias=False)\n with torch.no_grad():\n conv.weight = torch.nn.Parameter(weights, requires_grad=False)\n return conv\n\n\nclass DeepFakeClassifierSRM(nn.Module):\n def __init__(self, encoder, dropout_rate=0.5) -> None:\n super().__init__()\n self.encoder = encoder_params[encoder][\"init_op\"]()\n self.encoder.load_state_dict(\n torch.load(\"model/tf_efficientnet_b7_ns.pth\"))\n\n self.avg_pool = AdaptiveAvgPool2d((1, 1))\n self.srm_conv = setup_srm_layer(3)\n self.dropout = Dropout(dropout_rate)\n self.fc = Linear(encoder_params[encoder][\"features\"], 1)\n\n def forward(self, x):\n noise = self.srm_conv(x)\n x = self.encoder.forward_features(noise)\n x = self.avg_pool(x).flatten(1)\n x = self.dropout(x)\n x = self.fc(x)\n return x\n\n\nclass GlobalWeightedAvgPool2d(nn.Module):\n \"\"\"\n Global Weighted Average Pooling from paper \"Global Weighted Average\n Pooling Bridges Pixel-level Localization and Image-level Classification\"\n \"\"\"\n\n def __init__(self, features: int, flatten=False):\n super().__init__()\n self.conv = nn.Conv2d(features, 1, kernel_size=1, bias=True)\n self.flatten = flatten\n\n def fscore(self, x):\n m = self.conv(x)\n m = m.sigmoid().exp()\n return m\n\n def norm(self, x: torch.Tensor):\n return x / x.sum(dim=[2, 3], keepdim=True)\n\n def forward(self, x):\n input_x = x\n x = self.fscore(x)\n x = self.norm(x)\n x = x * input_x\n x = x.sum(dim=[2, 3], keepdim=not self.flatten)\n return x\n\n\nclass DeepFakeClassifier(nn.Module):\n def __init__(self, encoder, dropout_rate=0.0) -> None:\n super().__init__()\n self.encoder = encoder_params[encoder][\"init_op\"]()\n self.avg_pool = AdaptiveAvgPool2d((1, 1))\n self.dropout = Dropout(dropout_rate)\n self.fc = Linear(encoder_params[encoder][\"features\"], 1)\n\n def forward(self, x):\n x = self.encoder.forward_features(x)\n x = self.avg_pool(x).flatten(1)\n x = self.dropout(x)\n x = self.fc(x)\n return x\n\n\nclass DeepFakeClassifierGWAP(nn.Module):\n def __init__(self, encoder, dropout_rate=0.5) -> None:\n super().__init__()\n self.encoder = encoder_params[encoder][\"init_op\"]()\n self.avg_pool = GlobalWeightedAvgPool2d(\n encoder_params[encoder][\"features\"])\n self.dropout = Dropout(dropout_rate)\n self.fc = Linear(encoder_params[encoder][\"features\"], 1)\n\n def forward(self, x):\n x = self.encoder.forward_features(x)\n x = self.avg_pool(x).flatten(1)\n x = self.dropout(x)\n x = self.fc(x)\n return x\n"
] |
[
[
"torch.nn.Parameter",
"torch.nn.modules.linear.Linear",
"torch.load",
"torch.nn.Conv2d",
"torch.nn.modules.pooling.AdaptiveAvgPool2d",
"torch.no_grad",
"torch.nn.modules.dropout.Dropout",
"numpy.array"
]
] |
jens-k/sleap
|
[
"4e99ed037f1f7f41d9f15e2efaac638fc7e12b09"
] |
[
"sleap/nn/training.py"
] |
[
"\"\"\"Training functionality and high level APIs.\"\"\"\n\nimport os\nimport re\nfrom datetime import datetime\nfrom time import time\nimport logging\n\nimport tensorflow as tf\nimport numpy as np\n\nimport attr\nfrom typing import Optional, Callable, List, Union, Text, TypeVar\nfrom abc import ABC, abstractmethod\n\nimport cattr\nimport json\nimport copy\n\nimport sleap\nfrom sleap.util import get_package_file\n\n# Config\nfrom sleap.nn.config import (\n TrainingJobConfig,\n SingleInstanceConfmapsHeadConfig,\n CentroidsHeadConfig,\n CenteredInstanceConfmapsHeadConfig,\n MultiInstanceConfig,\n)\n\n# Model\nfrom sleap.nn.model import Model\n\n# Data\nfrom sleap.nn.config import LabelsConfig\nfrom sleap.nn.data.pipelines import LabelsReader\nfrom sleap.nn.data.pipelines import (\n Pipeline,\n SingleInstanceConfmapsPipeline,\n CentroidConfmapsPipeline,\n TopdownConfmapsPipeline,\n BottomUpPipeline,\n KeyMapper,\n)\nfrom sleap.nn.data.training import split_labels\n\n# Optimization\nfrom sleap.nn.config import OptimizationConfig\nfrom sleap.nn.losses import OHKMLoss, PartLoss\nfrom tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping\n\n# Outputs\nfrom sleap.nn.config import (\n OutputsConfig,\n ZMQConfig,\n TensorBoardConfig,\n CheckpointingConfig,\n)\nfrom sleap.nn.callbacks import (\n TrainingControllerZMQ,\n ProgressReporterZMQ,\n ModelCheckpointOnEvent,\n)\nfrom tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint, CSVLogger\n\n# Visualization\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom sleap.nn.callbacks import TensorBoardMatplotlibWriter, MatplotlibSaver\nfrom sleap.nn.viz import plot_img, plot_confmaps, plot_peaks, plot_pafs\n\n\nlogger = logging.getLogger(__name__)\n\n\n@attr.s(auto_attribs=True)\nclass DataReaders:\n \"\"\"Container class for SLEAP labels that serve as training data sources.\n\n Attributes:\n training_labels_reader: LabelsReader pipeline provider for a training data from\n a sleap.Labels instance.\n validation_labels_reader: LabelsReader pipeline provider for a validation data\n from a sleap.Labels instance.\n test_labels_reader: LabelsReader pipeline provider for a test set data from a\n sleap.Labels instance. This is not necessary for training.\n \"\"\"\n\n training_labels_reader: LabelsReader\n validation_labels_reader: LabelsReader\n test_labels_reader: Optional[LabelsReader] = None\n\n @classmethod\n def from_config(\n cls,\n labels_config: LabelsConfig,\n training: Union[Text, sleap.Labels],\n validation: Union[Text, sleap.Labels, float],\n test: Optional[Union[Text, sleap.Labels]] = None,\n video_search_paths: Optional[List[Text]] = None,\n update_config: bool = False,\n ) -> \"DataReaders\":\n \"\"\"Create data readers from a (possibly incomplete) configuration.\"\"\"\n # Use config values if not provided in the arguments.\n if training is None:\n training = labels_config.training_labels\n if validation is None:\n if labels_config.validation_labels is not None:\n validation = labels_config.validation_labels\n else:\n validation = labels_config.validation_fraction\n if test is None:\n test = labels_config.test_labels\n\n # Update the config fields with arguments (if not a full sleap.Labels instance).\n if update_config:\n if isinstance(training, Text):\n labels_config.training_labels = training\n if isinstance(validation, Text):\n labels_config.validation_labels = validation\n elif isinstance(validation, float):\n validation_labels = labels_config.validation_fraction\n if isinstance(test, Text):\n labels_config.test_labels = test\n\n # Build class.\n # TODO: use labels_config.search_path_hints for loading\n return cls.from_labels(\n training=training,\n validation=validation,\n test=test,\n video_search_paths=video_search_paths,\n )\n\n @classmethod\n def from_labels(\n cls,\n training: Union[Text, sleap.Labels],\n validation: Union[Text, sleap.Labels, float],\n test: Optional[Union[Text, sleap.Labels]] = None,\n video_search_paths: Optional[List[Text]] = None,\n ) -> \"DataReaders\":\n \"\"\"Create data readers from sleap.Labels datasets as data providers.\"\"\"\n\n if isinstance(training, str):\n print(\"video search paths: \", video_search_paths)\n training = sleap.Labels.load_file(training, video_search=video_search_paths)\n print(training.videos)\n\n if isinstance(validation, str):\n validation = sleap.Labels.load_file(\n validation, video_search=video_search_paths\n )\n elif isinstance(validation, float):\n training, validation = split_labels(training, [-1, validation])\n\n if isinstance(test, str):\n test = sleap.Labels.load_file(test, video_search=video_search_paths)\n\n test_reader = None\n if test is not None:\n test_reader = LabelsReader.from_user_instances(test)\n\n return cls(\n training_labels_reader=LabelsReader.from_user_instances(training),\n validation_labels_reader=LabelsReader.from_user_instances(validation),\n test_labels_reader=test_reader,\n )\n\n @property\n def training_labels(self) -> sleap.Labels:\n \"\"\"Return the sleap.Labels underlying the training data reader.\"\"\"\n return self.training_labels_reader.labels\n\n @property\n def validation_labels(self) -> sleap.Labels:\n \"\"\"Return the sleap.Labels underlying the validation data reader.\"\"\"\n return self.validation_labels_reader.labels\n\n @property\n def test_labels(self) -> sleap.Labels:\n \"\"\"Return the sleap.Labels underlying the test data reader.\"\"\"\n if self.test_labels_reader is None:\n raise ValueError(\"No test labels provided to data reader.\")\n return self.test_labels_reader.labels\n\n\ndef setup_optimizer(config: OptimizationConfig) -> tf.keras.optimizers.Optimizer:\n \"\"\"Set up model optimizer from config.\"\"\"\n if config.optimizer == \"adam\":\n optimizer = tf.keras.optimizers.Adam(\n learning_rate=config.initial_learning_rate, amsgrad=True\n )\n else:\n # TODO: explicit lookup\n optimizer = config.optimizer\n return optimizer\n\n\ndef setup_losses(config: OptimizationConfig) -> Callable[[tf.Tensor], tf.Tensor]:\n \"\"\"Set up model loss function from config.\"\"\"\n losses = [tf.keras.losses.MeanSquaredError()]\n\n if config.hard_keypoint_mining.online_mining:\n losses.append(OHKMLoss.from_config(config.hard_keypoint_mining))\n logging.info(f\" OHKM enabled: {config.hard_keypoint_mining}\")\n\n def loss_fn(y_gt, y_pr):\n loss = 0\n for loss_fn in losses:\n loss += loss_fn(y_gt, y_pr)\n return loss\n\n return loss_fn\n\n\ndef setup_metrics(\n config: OptimizationConfig, part_names: Optional[List[Text]] = None\n) -> List[Union[tf.keras.losses.Loss, tf.keras.metrics.Metric]]:\n \"\"\"Set up training metrics from config.\"\"\"\n metrics = []\n\n if config.hard_keypoint_mining.online_mining:\n metrics.append(OHKMLoss.from_config(config.hard_keypoint_mining))\n\n if part_names is not None:\n for channel_ind, part_name in enumerate(part_names):\n metrics.append(PartLoss(channel_ind=channel_ind, name=part_name))\n\n return metrics\n\n\ndef setup_optimization_callbacks(\n config: OptimizationConfig,\n) -> List[tf.keras.callbacks.Callback]:\n \"\"\"Set up optimization callbacks from config.\"\"\"\n callbacks = []\n if config.learning_rate_schedule.reduce_on_plateau:\n callbacks.append(\n ReduceLROnPlateau(\n monitor=\"val_loss\",\n mode=\"min\",\n factor=config.learning_rate_schedule.reduction_factor,\n patience=config.learning_rate_schedule.plateau_patience,\n min_delta=config.learning_rate_schedule.plateau_min_delta,\n cooldown=config.learning_rate_schedule.plateau_cooldown,\n min_lr=config.learning_rate_schedule.min_learning_rate,\n verbose=1,\n )\n )\n logging.info(f\" Learning rate schedule: {config.learning_rate_schedule}\")\n\n if config.early_stopping.stop_training_on_plateau:\n callbacks.append(\n EarlyStopping(\n monitor=\"val_loss\",\n mode=\"min\",\n patience=config.early_stopping.plateau_patience,\n min_delta=config.early_stopping.plateau_min_delta,\n verbose=1,\n )\n )\n logging.info(f\" Early stopping: {config.early_stopping}\")\n\n return callbacks\n\n\ndef get_timestamp() -> Text:\n \"\"\"Return the date and time as a string.\"\"\"\n return datetime.now().strftime(\"%y%m%d_%H%M%S\")\n\n\ndef setup_new_run_folder(\n config: OutputsConfig, base_run_name: Optional[Text] = None\n) -> Text:\n \"\"\"Create a new run folder from config.\"\"\"\n run_path = None\n if config.save_outputs:\n # Auto-generate run name.\n if config.run_name is None:\n config.run_name = get_timestamp()\n if isinstance(base_run_name, str):\n config.run_name = config.run_name + \".\" + base_run_name\n\n # Find new run name suffix if needed.\n if config.run_name_suffix is None:\n config.run_name_suffix = \"\"\n run_path = os.path.join(\n config.runs_folder, f\"{config.run_name_prefix}{config.run_name}\"\n )\n i = 0\n while os.path.exists(run_path):\n i += 1\n config.run_name_suffix = f\"_{i}\"\n run_path = os.path.join(\n config.runs_folder,\n f\"{config.run_name_prefix}{config.run_name}{config.run_name_suffix}\",\n )\n\n # Build run path.\n run_path = config.run_path\n\n return run_path\n\n\ndef setup_zmq_callbacks(zmq_config: ZMQConfig) -> List[tf.keras.callbacks.Callback]:\n \"\"\"Set up ZeroMQ callbacks from config.\"\"\"\n callbacks = []\n\n if zmq_config.subscribe_to_controller:\n callbacks.append(\n TrainingControllerZMQ(\n address=zmq_config.controller_address,\n poll_timeout=zmq_config.controller_polling_timeout,\n )\n )\n logger.info(f\" ZMQ controller subcribed to: {zmq_config.controller_address}\")\n if zmq_config.publish_updates:\n callbacks.append(ProgressReporterZMQ(address=zmq_config.publish_address))\n logger.info(f\" ZMQ progress reporter publish on: {zmq_config.publish_address}\")\n\n return callbacks\n\n\ndef setup_checkpointing(\n config: CheckpointingConfig, run_path: Text\n) -> List[tf.keras.callbacks.Callback]:\n \"\"\"Set up model checkpointing callbacks from config.\"\"\"\n callbacks = []\n if config.initial_model:\n callbacks.append(\n ModelCheckpointOnEvent(\n filepath=os.path.join(run_path, \"initial_model.h5\"), event=\"train_begin\"\n )\n )\n\n if config.best_model:\n callbacks.append(\n ModelCheckpoint(\n filepath=os.path.join(run_path, \"best_model.h5\"),\n monitor=\"val_loss\",\n save_best_only=True,\n save_weights_only=False,\n save_freq=\"epoch\",\n verbose=0,\n )\n )\n\n if config.every_epoch:\n callbacks.append(\n ModelCheckpointOnEvent(\n filepath=os.path.join(run_path, \"model.epoch%04d.h5\"), event=\"epoch_end\"\n )\n )\n\n if config.latest_model:\n callbacks.append(\n ModelCheckpointOnEvent(\n filepath=os.path.join(run_path, \"latest_model.h5\"), event=\"epoch_end\"\n )\n )\n\n if config.final_model:\n callbacks.append(\n ModelCheckpointOnEvent(\n filepath=os.path.join(run_path, \"final_model.h5\"), event=\"train_end\"\n )\n )\n\n return callbacks\n\n\ndef setup_tensorboard(\n config: TensorBoardConfig, run_path: Text\n) -> List[tf.keras.callbacks.Callback]:\n \"\"\"Set up TensorBoard callbacks from config.\"\"\"\n callbacks = []\n if config.write_logs:\n callbacks.append(\n TensorBoard(\n log_dir=run_path,\n histogram_freq=0,\n write_graph=config.architecture_graph,\n update_freq=config.loss_frequency,\n profile_batch=2 if config.profile_graph else 0,\n embeddings_freq=0,\n embeddings_metadata=None,\n )\n )\n\n return callbacks\n\n\ndef setup_output_callbacks(\n config: OutputsConfig, run_path: Optional[Text] = None\n) -> List[tf.keras.callbacks.Callback]:\n \"\"\"Set up training outputs callbacks from config.\"\"\"\n callbacks = []\n if config.save_outputs and run_path is not None:\n callbacks.extend(setup_checkpointing(config.checkpointing, run_path))\n callbacks.extend(setup_tensorboard(config.tensorboard, run_path))\n\n if config.log_to_csv:\n callbacks.append(\n CSVLogger(filename=os.path.join(run_path, \"training_log.csv\"))\n )\n callbacks.extend(setup_zmq_callbacks(config.zmq))\n return callbacks\n\n\ndef setup_visualization(\n config: OutputsConfig,\n run_path: Text,\n viz_fn: Callable[[], matplotlib.figure.Figure],\n name: Text,\n) -> List[tf.keras.callbacks.Callback]:\n \"\"\"Set up visualization callbacks from config.\"\"\"\n callbacks = []\n\n try:\n matplotlib.use(\"Qt5Agg\")\n except ImportError:\n print(\n \"Unable to use Qt backend for matplotlib. \"\n \"This probably means Qt is running headless.\"\n )\n\n if config.save_visualizations and config.save_outputs:\n callbacks.append(\n MatplotlibSaver(\n save_folder=os.path.join(run_path, \"viz\"), plot_fn=viz_fn, prefix=name\n )\n )\n\n if (\n config.tensorboard.write_logs\n and config.tensorboard.visualizations\n and config.save_outputs\n ):\n callbacks.append(\n TensorBoardMatplotlibWriter(\n log_dir=os.path.join(run_path, name), plot_fn=viz_fn, tag=name\n )\n )\n\n return callbacks\n\n\ndef sanitize_scope_name(name: Text) -> Text:\n \"\"\"Sanitizes string which will be used as TensorFlow scope name.\"\"\"\n # Add \".\" to beginning if first character isn't acceptable\n name = re.sub(\"^([^A-Za-z0-9.])\", \".\\\\1\", name)\n # Replace invalid characters with \"_\"\n name = re.sub(\"([^A-Za-z0-9._])\", \"_\", name)\n return name\n\n\nPipelineBuilder = TypeVar(\n \"PipelineBuilder\",\n CentroidConfmapsPipeline,\n TopdownConfmapsPipeline,\n BottomUpPipeline,\n SingleInstanceConfmapsPipeline,\n)\n\n\n@attr.s(auto_attribs=True)\nclass Trainer(ABC):\n \"\"\"Base trainer class that provides general model training functionality.\n\n This class is intended to be instantiated using the `from_config()` class method,\n which will return the appropriate subclass based on the input configuration.\n\n This class should not be used directly. It is intended to be subclassed by a model\n output type-specific trainer that provides more specific functionality.\n\n Attributes:\n data_readers: A `DataReaders` instance that contains training data providers.\n model: A `Model` instance describing the SLEAP model to train.\n config: A `TrainingJobConfig` that describes the training parameters.\n initial_config: This attribute will contain a copy of the input configuration\n before any attributes are updated in `config`.\n pipeline_builder: A model output type-specific data pipeline builder to create\n pipelines that generate data used for training. This must be specified in\n subclasses.\n training_pipeline: The data pipeline that generates examples from the training\n set for optimization.\n validation_pipeline: The data pipeline that generates examples from the\n validation set for optimization.\n training_viz_pipeline: The data pipeline that generates examples from the\n training set for visualization.\n validation_viz_pipeline: The data pipeline that generates examples from the\n validation set for visualization.\n optimization_callbacks: Keras callbacks related to optimization.\n output_callbacks: Keras callbacks related to outputs.\n visualization_callbacks: Keras callbacks related to visualization.\n run_path: The path to the run folder that will contain training results, if any.\n \"\"\"\n\n data_readers: DataReaders\n model: Model\n config: TrainingJobConfig\n initial_config: Optional[TrainingJobConfig] = None\n\n pipeline_builder: PipelineBuilder = attr.ib(init=False)\n training_pipeline: Pipeline = attr.ib(init=False)\n validation_pipeline: Pipeline = attr.ib(init=False)\n training_viz_pipeline: Pipeline = attr.ib(init=False)\n validation_viz_pipeline: Pipeline = attr.ib(init=False)\n\n optimization_callbacks: List[tf.keras.callbacks.Callback] = attr.ib(\n factory=list, init=False\n )\n output_callbacks: List[tf.keras.callbacks.Callback] = attr.ib(\n factory=list, init=False\n )\n visualization_callbacks: List[tf.keras.callbacks.Callback] = attr.ib(\n factory=list, init=False\n )\n\n run_path: Optional[Text] = attr.ib(default=None, init=False)\n\n @classmethod\n def from_config(\n cls,\n config: TrainingJobConfig,\n training_labels: Optional[Union[Text, sleap.Labels]] = None,\n validation_labels: Optional[Union[Text, sleap.Labels, float]] = None,\n test_labels: Optional[Union[Text, sleap.Labels]] = None,\n video_search_paths: Optional[List[Text]] = None,\n ) -> \"Trainer\":\n \"\"\"Initialize the trainer from a training job configuration.\n \n Args:\n config: A `TrainingJobConfig` instance.\n training_labels: Training labels to use instead of the ones in the config,\n if any. If a path is specified, it will overwrite the one in the config.\n validation_labels: Validation labels to use instead of the ones in the\n config, if any. If a path is specified, it will overwrite the one in\n the config.\n test_labels: Teset labels to use instead of the ones in the config, if any.\n If a path is specified, it will overwrite the one in the config.\n \"\"\"\n # Copy input config before we make any changes.\n initial_config = copy.deepcopy(config)\n\n # Create data readers and store loaded skeleton.\n data_readers = DataReaders.from_config(\n config.data.labels,\n training=training_labels,\n validation=validation_labels,\n test=test_labels,\n video_search_paths=video_search_paths,\n update_config=True,\n )\n config.data.labels.skeletons = data_readers.training_labels.skeletons\n\n # Create model.\n model = Model.from_config(\n config.model, skeleton=config.data.labels.skeletons[0], update_config=True\n )\n\n # Determine output type to create type-specific model trainer.\n head_config = config.model.heads.which_oneof()\n trainer_cls = None\n if isinstance(head_config, CentroidsHeadConfig):\n trainer_cls = CentroidConfmapsModelTrainer\n elif isinstance(head_config, CenteredInstanceConfmapsHeadConfig):\n trainer_cls = TopdownConfmapsModelTrainer\n elif isinstance(head_config, MultiInstanceConfig):\n trainer_cls = BottomUpModelTrainer\n elif isinstance(head_config, SingleInstanceConfmapsHeadConfig):\n trainer_cls = SingleInstanceModelTrainer\n else:\n raise ValueError(\n \"Model head not specified or configured. Check the config.model.heads\"\n \" setting.\"\n )\n\n return trainer_cls(\n config=config,\n initial_config=initial_config,\n data_readers=data_readers,\n model=model,\n )\n\n @abstractmethod\n def _update_config(self):\n \"\"\"Implement in subclasses.\"\"\"\n pass\n\n @abstractmethod\n def _setup_pipeline_builder(self):\n \"\"\"Implement in subclasses.\"\"\"\n pass\n\n @property\n @abstractmethod\n def input_keys(self):\n \"\"\"Implement in subclasses.\"\"\"\n pass\n\n @property\n @abstractmethod\n def output_keys(self):\n \"\"\"Implement in subclasses.\"\"\"\n pass\n\n @abstractmethod\n def _setup_visualization(self):\n \"\"\"Implement in subclasses.\"\"\"\n pass\n\n def _setup_model(self):\n \"\"\"Set up the keras model.\"\"\"\n # Infer the input shape by evaluating the data pipeline.\n logger.info(\"Building test pipeline...\")\n t0 = time()\n base_pipeline = self.pipeline_builder.make_base_pipeline(\n self.data_readers.training_labels_reader\n )\n base_example = next(iter(base_pipeline.make_dataset()))\n input_shape = base_example[self.input_keys[0]].shape\n # TODO: extend input shape determination for multi-input\n logger.info(f\"Loaded test example. [{time() - t0:.3f}s]\")\n logger.info(f\" Input shape: {input_shape}\")\n\n # Create the tf.keras.Model instance.\n self.model.make_model(input_shape)\n logger.info(\"Created Keras model.\")\n logger.info(f\" Backbone: {self.model.backbone}\")\n logger.info(f\" Max stride: {self.model.maximum_stride}\")\n logger.info(f\" Parameters: {self.model.keras_model.count_params():3,d}\")\n logger.info(\" Heads: \")\n for i, head in enumerate(self.model.heads):\n logger.info(f\" heads[{i}] = {head}\")\n\n @property\n def keras_model(self) -> tf.keras.Model:\n \"\"\"Alias for `self.model.keras_model`.\"\"\"\n return self.model.keras_model\n\n def _setup_pipelines(self):\n \"\"\"Set up training data pipelines for consumption by the keras model.\"\"\"\n # Create the training and validation pipelines with appropriate tensor names.\n key_mapper = KeyMapper(\n [\n {\n input_key: input_name\n for input_key, input_name in zip(\n self.input_keys, self.keras_model.input_names\n )\n },\n {\n output_key: output_name\n for output_key, output_name in zip(\n self.output_keys, self.keras_model.output_names\n )\n },\n ]\n )\n self.training_pipeline = (\n self.pipeline_builder.make_training_pipeline(\n self.data_readers.training_labels_reader\n )\n + key_mapper\n )\n logger.info(f\"Training set: n = {len(self.data_readers.training_labels)}\")\n self.validation_pipeline = (\n self.pipeline_builder.make_training_pipeline(\n self.data_readers.validation_labels_reader\n )\n + key_mapper\n )\n logger.info(f\"Validation set: n = {len(self.data_readers.validation_labels)}\")\n\n def _setup_optimization(self):\n \"\"\"Set up optimizer, loss functions and compile the model.\"\"\"\n optimizer = setup_optimizer(self.config.optimization)\n loss_fn = setup_losses(self.config.optimization)\n\n # TODO: Implement general part loss reporting.\n part_names = None\n if isinstance(self.pipeline_builder, TopdownConfmapsPipeline):\n part_names = [\n sanitize_scope_name(name) for name in self.model.heads[0].part_names\n ]\n metrics = setup_metrics(self.config.optimization, part_names=part_names)\n\n self.optimization_callbacks = setup_optimization_callbacks(\n self.config.optimization\n )\n\n self.keras_model.compile(\n optimizer=optimizer,\n loss=loss_fn,\n metrics=metrics,\n loss_weights={\n output_name: head.loss_weight\n for output_name, head in zip(\n self.keras_model.output_names, self.model.heads\n )\n },\n )\n\n def _setup_outputs(self):\n \"\"\"Set up output-related functionality.\"\"\"\n if self.config.outputs.save_outputs:\n # Build path to run folder.\n self.run_path = setup_new_run_folder(\n self.config.outputs, base_run_name=type(self.model.backbone).__name__\n )\n\n # Setup output callbacks.\n self.output_callbacks = setup_output_callbacks(\n self.config.outputs, run_path=self.run_path\n )\n\n if self.run_path is not None and self.config.outputs.save_outputs:\n # Create run directory.\n os.makedirs(self.run_path, exist_ok=True)\n logger.info(f\"Created run path: {self.run_path}\")\n\n # Save configs.\n if self.initial_config is not None:\n self.initial_config.save_json(\n os.path.join(self.run_path, \"initial_config.json\")\n )\n\n self.config.save_json(os.path.join(self.run_path, \"training_config.json\"))\n\n # Save input (ground truth) labels.\n sleap.Labels.save_file(\n self.data_readers.training_labels_reader.labels,\n os.path.join(self.run_path, \"labels_gt.train.slp\"),\n )\n sleap.Labels.save_file(\n self.data_readers.validation_labels_reader.labels,\n os.path.join(self.run_path, \"labels_gt.val.slp\"),\n )\n if self.data_readers.test_labels_reader is not None:\n sleap.Labels.save_file(\n self.data_readers.test_labels_reader.labels,\n os.path.join(self.run_path, \"labels_gt.test.slp\"),\n )\n\n @property\n def callbacks(self) -> List[tf.keras.callbacks.Callback]:\n \"\"\"Return all callbacks currently configured.\"\"\"\n callbacks = (\n self.optimization_callbacks\n + self.visualization_callbacks\n + self.output_callbacks\n )\n\n # Some callbacks should be called after all previous ones since they depend on\n # the state of some shared objects (e.g., tf.keras.Model).\n final_callbacks = []\n for callback in callbacks[::-1]:\n if isinstance(callback, tf.keras.callbacks.EarlyStopping):\n final_callbacks.append(callback)\n callbacks.remove(callback)\n\n return callbacks + final_callbacks\n\n def setup(self):\n \"\"\"Set up data pipeline and model for training.\"\"\"\n logger.info(f\"Setting up for training...\")\n t0 = time()\n self._update_config()\n logger.info(f\"Setting up pipeline builders...\")\n self._setup_pipeline_builder()\n logger.info(f\"Setting up model...\")\n self._setup_model()\n logger.info(f\"Setting up data pipelines...\")\n self._setup_pipelines()\n logger.info(f\"Setting up optimization...\")\n self._setup_optimization()\n logger.info(f\"Setting up outputs...\")\n self._setup_outputs()\n logger.info(f\"Setting up visualization...\")\n self._setup_visualization()\n logger.info(f\"Finished trainer set up. [{time() - t0:.1f}s]\")\n\n def train(self):\n \"\"\"Execute the optimization loop to train the model.\"\"\"\n if self.keras_model is None:\n self.setup()\n\n logger.info(f\"Creating tf.data.Datasets for training data generation...\")\n t0 = time()\n training_ds = self.training_pipeline.make_dataset()\n validation_ds = self.validation_pipeline.make_dataset()\n logger.info(f\"Finished creating training datasets. [{time() - t0:.1f}s]\")\n\n logger.info(f\"Starting training loop...\")\n t0 = time()\n history = self.keras_model.fit(\n training_ds,\n epochs=self.config.optimization.epochs,\n validation_data=validation_ds,\n steps_per_epoch=self.config.optimization.batches_per_epoch,\n validation_steps=self.config.optimization.val_batches_per_epoch,\n callbacks=self.callbacks,\n verbose=2,\n )\n logger.info(f\"Finished training loop. [{(time() - t0) / 60:.1f} min]\")\n\n\n@attr.s(auto_attribs=True)\nclass CentroidConfmapsModelTrainer(Trainer):\n \"\"\"Trainer for models that output centroid confidence maps.\"\"\"\n\n pipeline_builder: CentroidConfmapsPipeline = attr.ib(init=False)\n\n def _update_config(self):\n \"\"\"Update the configuration with inferred values.\"\"\"\n if self.config.data.preprocessing.pad_to_stride is None:\n self.config.data.preprocessing.pad_to_stride = self.model.maximum_stride\n\n if self.config.optimization.batches_per_epoch is None:\n n_training_examples = len(self.data_readers.training_labels)\n n_training_batches = (\n n_training_examples // self.config.optimization.batch_size\n )\n self.config.optimization.batches_per_epoch = max(\n self.config.optimization.min_batches_per_epoch, n_training_batches\n )\n\n if self.config.optimization.val_batches_per_epoch is None:\n n_validation_examples = len(self.data_readers.validation_labels)\n n_validation_batches = (\n n_validation_examples // self.config.optimization.batch_size\n )\n self.config.optimization.val_batches_per_epoch = max(\n self.config.optimization.min_val_batches_per_epoch, n_validation_batches\n )\n\n def _setup_pipeline_builder(self):\n \"\"\"Initialize pipeline builder.\"\"\"\n self.pipeline_builder = CentroidConfmapsPipeline(\n data_config=self.config.data,\n optimization_config=self.config.optimization,\n centroid_confmap_head=self.model.heads[0],\n )\n\n @property\n def input_keys(self) -> List[Text]:\n \"\"\"Return example keys to be mapped to model inputs.\"\"\"\n return [\"image\"]\n\n @property\n def output_keys(self) -> List[Text]:\n \"\"\"Return example keys to be mapped to model outputs.\"\"\"\n return [\"centroid_confidence_maps\"]\n\n def _setup_visualization(self):\n \"\"\"Set up visualization pipelines and callbacks.\"\"\"\n # Create visualization/inference pipelines.\n self.training_viz_pipeline = self.pipeline_builder.make_viz_pipeline(\n self.data_readers.training_labels_reader, self.keras_model\n )\n self.validation_viz_pipeline = self.pipeline_builder.make_viz_pipeline(\n self.data_readers.validation_labels_reader, self.keras_model\n )\n\n # Create static iterators.\n training_viz_ds_iter = iter(self.training_viz_pipeline.make_dataset())\n validation_viz_ds_iter = iter(self.validation_viz_pipeline.make_dataset())\n\n def visualize_example(example):\n img = example[\"image\"].numpy()\n cms = example[\"predicted_centroid_confidence_maps\"].numpy()\n pts_gt = example[\"centroids\"].numpy()\n pts_pr = example[\"predicted_centroids\"].numpy()\n\n scale = 1.0\n if img.shape[0] < 512:\n scale = 2.0\n if img.shape[0] < 256:\n scale = 4.0\n fig = plot_img(img, dpi=72 * scale, scale=scale)\n plot_confmaps(cms, output_scale=cms.shape[0] / img.shape[0])\n plot_peaks(pts_gt, pts_pr, paired=False)\n return fig\n\n self.visualization_callbacks.extend(\n setup_visualization(\n self.config.outputs,\n run_path=self.run_path,\n viz_fn=lambda: visualize_example(next(training_viz_ds_iter)),\n name=f\"train\",\n )\n )\n self.visualization_callbacks.extend(\n setup_visualization(\n self.config.outputs,\n run_path=self.run_path,\n viz_fn=lambda: visualize_example(next(validation_viz_ds_iter)),\n name=f\"validation\",\n )\n )\n\n\n@attr.s(auto_attribs=True)\nclass SingleInstanceModelTrainer(Trainer):\n \"\"\"Trainer for models that output single-instance confidence maps.\"\"\"\n\n pipeline_builder: SingleInstanceConfmapsPipeline = attr.ib(init=False)\n\n def _update_config(self):\n \"\"\"Update the configuration with inferred values.\"\"\"\n if self.config.data.preprocessing.pad_to_stride is None:\n self.config.data.preprocessing.pad_to_stride = self.model.maximum_stride\n\n if self.config.optimization.batches_per_epoch is None:\n n_training_examples = len(\n self.data_readers.training_labels_reader.labels.user_instances\n )\n n_training_batches = (\n n_training_examples // self.config.optimization.batch_size\n )\n self.config.optimization.batches_per_epoch = max(\n self.config.optimization.min_batches_per_epoch, n_training_batches\n )\n\n if self.config.optimization.val_batches_per_epoch is None:\n n_validation_examples = len(\n self.data_readers.validation_labels_reader.labels.user_instances\n )\n n_validation_batches = (\n n_validation_examples // self.config.optimization.batch_size\n )\n self.config.optimization.val_batches_per_epoch = max(\n self.config.optimization.min_val_batches_per_epoch, n_validation_batches\n )\n\n def _setup_pipeline_builder(self):\n # Initialize pipeline builder.\n self.pipeline_builder = SingleInstanceConfmapsPipeline(\n data_config=self.config.data,\n optimization_config=self.config.optimization,\n single_instance_confmap_head=self.model.heads[0],\n )\n\n @property\n def input_keys(self) -> List[Text]:\n \"\"\"Return example keys to be mapped to model inputs.\"\"\"\n return [\"image\"]\n\n @property\n def output_keys(self) -> List[Text]:\n \"\"\"Return example keys to be mapped to model outputs.\"\"\"\n return [\"confidence_maps\"]\n\n def _setup_visualization(self):\n \"\"\"Set up visualization pipelines and callbacks.\"\"\"\n # Create visualization/inference pipelines.\n self.training_viz_pipeline = self.pipeline_builder.make_viz_pipeline(\n self.data_readers.training_labels_reader, self.keras_model\n )\n self.validation_viz_pipeline = self.pipeline_builder.make_viz_pipeline(\n self.data_readers.validation_labels_reader, self.keras_model\n )\n\n # Create static iterators.\n training_viz_ds_iter = iter(self.training_viz_pipeline.make_dataset())\n validation_viz_ds_iter = iter(self.validation_viz_pipeline.make_dataset())\n\n def visualize_example(example):\n img = example[\"image\"].numpy()\n cms = example[\"predicted_confidence_maps\"].numpy()\n pts_gt = example[\"instances\"].numpy()[0]\n pts_pr = example[\"predicted_points\"].numpy()\n\n scale = 1.0\n if img.shape[0] < 512:\n scale = 2.0\n if img.shape[0] < 256:\n scale = 4.0\n fig = plot_img(img, dpi=72 * scale, scale=scale)\n plot_confmaps(cms, output_scale=cms.shape[0] / img.shape[0])\n plot_peaks(pts_gt, pts_pr, paired=True)\n return fig\n\n self.visualization_callbacks.extend(\n setup_visualization(\n self.config.outputs,\n run_path=self.run_path,\n viz_fn=lambda: visualize_example(next(training_viz_ds_iter)),\n name=f\"train\",\n )\n )\n self.visualization_callbacks.extend(\n setup_visualization(\n self.config.outputs,\n run_path=self.run_path,\n viz_fn=lambda: visualize_example(next(validation_viz_ds_iter)),\n name=f\"validation\",\n )\n )\n\n\n@attr.s(auto_attribs=True)\nclass TopdownConfmapsModelTrainer(Trainer):\n \"\"\"Trainer for models that output instance centered confidence maps.\"\"\"\n\n pipeline_builder: TopdownConfmapsPipeline = attr.ib(init=False)\n\n def _update_config(self):\n \"\"\"Update the configuration with inferred values.\"\"\"\n if self.config.data.preprocessing.pad_to_stride is None:\n self.config.data.preprocessing.pad_to_stride = 1\n\n if self.config.data.instance_cropping.crop_size is None:\n self.config.data.instance_cropping.crop_size = sleap.nn.data.instance_cropping.find_instance_crop_size(\n self.data_readers.training_labels,\n padding=self.config.data.instance_cropping.crop_size_detection_padding,\n maximum_stride=self.model.maximum_stride,\n input_scaling=self.config.data.preprocessing.input_scaling,\n )\n\n if self.config.optimization.batches_per_epoch is None:\n n_training_examples = len(\n self.data_readers.training_labels_reader.labels.user_instances\n )\n n_training_batches = (\n n_training_examples // self.config.optimization.batch_size\n )\n self.config.optimization.batches_per_epoch = max(\n self.config.optimization.min_batches_per_epoch, n_training_batches\n )\n\n if self.config.optimization.val_batches_per_epoch is None:\n n_validation_examples = len(\n self.data_readers.validation_labels_reader.labels.user_instances\n )\n n_validation_batches = (\n n_validation_examples // self.config.optimization.batch_size\n )\n self.config.optimization.val_batches_per_epoch = max(\n self.config.optimization.min_val_batches_per_epoch, n_validation_batches\n )\n\n def _setup_pipeline_builder(self):\n # Initialize pipeline builder.\n self.pipeline_builder = TopdownConfmapsPipeline(\n data_config=self.config.data,\n optimization_config=self.config.optimization,\n instance_confmap_head=self.model.heads[0],\n )\n\n @property\n def input_keys(self) -> List[Text]:\n \"\"\"Return example keys to be mapped to model inputs.\"\"\"\n return [\"instance_image\"]\n\n @property\n def output_keys(self) -> List[Text]:\n \"\"\"Return example keys to be mapped to model outputs.\"\"\"\n return [\"instance_confidence_maps\"]\n\n def _setup_visualization(self):\n \"\"\"Set up visualization pipelines and callbacks.\"\"\"\n # Create visualization/inference pipelines.\n self.training_viz_pipeline = self.pipeline_builder.make_viz_pipeline(\n self.data_readers.training_labels_reader, self.keras_model\n )\n self.validation_viz_pipeline = self.pipeline_builder.make_viz_pipeline(\n self.data_readers.validation_labels_reader, self.keras_model\n )\n\n # Create static iterators.\n training_viz_ds_iter = iter(self.training_viz_pipeline.make_dataset())\n validation_viz_ds_iter = iter(self.validation_viz_pipeline.make_dataset())\n\n def visualize_example(example):\n img = example[\"instance_image\"].numpy()\n cms = example[\"predicted_instance_confidence_maps\"].numpy()\n pts_gt = example[\"center_instance\"].numpy()\n pts_pr = example[\"predicted_center_instance_points\"].numpy()\n\n scale = 1.0\n if img.shape[0] < 512:\n scale = 2.0\n if img.shape[0] < 256:\n scale = 4.0\n fig = plot_img(img, dpi=72 * scale, scale=scale)\n plot_confmaps(cms, output_scale=cms.shape[0] / img.shape[0])\n plot_peaks(pts_gt, pts_pr, paired=True)\n return fig\n\n self.visualization_callbacks.extend(\n setup_visualization(\n self.config.outputs,\n run_path=self.run_path,\n viz_fn=lambda: visualize_example(next(training_viz_ds_iter)),\n name=f\"train\",\n )\n )\n self.visualization_callbacks.extend(\n setup_visualization(\n self.config.outputs,\n run_path=self.run_path,\n viz_fn=lambda: visualize_example(next(validation_viz_ds_iter)),\n name=f\"validation\",\n )\n )\n\n\n@attr.s(auto_attribs=True)\nclass BottomUpModelTrainer(Trainer):\n \"\"\"Trainer for models that output multi-instance confidence maps and PAFs.\"\"\"\n\n pipeline_builder: BottomUpPipeline = attr.ib(init=False)\n\n def _update_config(self):\n \"\"\"Update the configuration with inferred values.\"\"\"\n if self.config.data.preprocessing.pad_to_stride is None:\n self.config.data.preprocessing.pad_to_stride = self.model.maximum_stride\n\n if self.config.optimization.batches_per_epoch is None:\n n_training_examples = len(self.data_readers.training_labels)\n n_training_batches = (\n n_training_examples // self.config.optimization.batch_size\n )\n self.config.optimization.batches_per_epoch = max(\n self.config.optimization.min_batches_per_epoch, n_training_batches\n )\n\n if self.config.optimization.val_batches_per_epoch is None:\n n_validation_examples = len(self.data_readers.validation_labels)\n n_validation_batches = (\n n_validation_examples // self.config.optimization.batch_size\n )\n self.config.optimization.val_batches_per_epoch = max(\n self.config.optimization.min_val_batches_per_epoch, n_validation_batches\n )\n\n def _setup_pipeline_builder(self):\n # Initialize pipeline builder.\n self.pipeline_builder = BottomUpPipeline(\n data_config=self.config.data,\n optimization_config=self.config.optimization,\n confmaps_head=self.model.heads[0],\n pafs_head=self.model.heads[1],\n )\n\n @property\n def input_keys(self) -> List[Text]:\n \"\"\"Return example keys to be mapped to model inputs.\"\"\"\n return [\"image\"]\n\n @property\n def output_keys(self) -> List[Text]:\n \"\"\"Return example keys to be mapped to model outputs.\"\"\"\n return [\"confidence_maps\", \"part_affinity_fields\"]\n\n def _setup_visualization(self):\n \"\"\"Set up visualization pipelines and callbacks.\"\"\"\n # Create visualization/inference pipelines.\n self.training_viz_pipeline = self.pipeline_builder.make_viz_pipeline(\n self.data_readers.training_labels_reader, self.keras_model\n )\n self.validation_viz_pipeline = self.pipeline_builder.make_viz_pipeline(\n self.data_readers.validation_labels_reader, self.keras_model\n )\n\n # Create static iterators.\n training_viz_ds_iter = iter(self.training_viz_pipeline.make_dataset())\n validation_viz_ds_iter = iter(self.validation_viz_pipeline.make_dataset())\n\n def visualize_confmaps_example(example):\n img = example[\"image\"].numpy()\n cms = example[\"predicted_confidence_maps\"].numpy()\n pts_gt = example[\"instances\"].numpy()\n pts_pr = example[\"predicted_peaks\"].numpy()\n\n scale = 1.0\n if img.shape[0] < 512:\n scale = 2.0\n if img.shape[0] < 256:\n scale = 4.0\n fig = plot_img(img, dpi=72 * scale, scale=scale)\n plot_confmaps(cms, output_scale=cms.shape[0] / img.shape[0])\n plot_peaks(pts_gt, pts_pr, paired=False)\n return fig\n\n def visualize_pafs_example(example):\n img = example[\"image\"].numpy()\n pafs = example[\"predicted_part_affinity_fields\"].numpy()\n\n scale = 1.0\n if img.shape[0] < 512:\n scale = 2.0\n if img.shape[0] < 256:\n scale = 4.0\n fig = plot_img(img, dpi=72 * scale, scale=scale)\n plot_pafs(\n pafs,\n output_scale=pafs.shape[0] / img.shape[0],\n stride=1,\n scale=8.0,\n width=1.0,\n )\n return fig\n\n self.visualization_callbacks.extend(\n setup_visualization(\n self.config.outputs,\n run_path=self.run_path,\n viz_fn=lambda: visualize_confmaps_example(next(training_viz_ds_iter)),\n name=f\"train\",\n )\n )\n self.visualization_callbacks.extend(\n setup_visualization(\n self.config.outputs,\n run_path=self.run_path,\n viz_fn=lambda: visualize_confmaps_example(next(validation_viz_ds_iter)),\n name=f\"validation\",\n )\n )\n\n # Memory leak:\n # self.visualization_callbacks.extend(\n # setup_visualization(\n # self.config.outputs,\n # run_path=self.run_path,\n # viz_fn=lambda: visualize_pafs_example(next(training_viz_ds_iter)),\n # name=f\"train_pafs\",\n # )\n # )\n # self.visualization_callbacks.extend(\n # setup_visualization(\n # self.config.outputs,\n # run_path=self.run_path,\n # viz_fn=lambda: visualize_pafs_example(next(validation_viz_ds_iter)),\n # name=f\"validation_pafs\",\n # )\n # )\n\n\ndef main():\n \"\"\"Create CLI for training and run.\"\"\"\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"training_job_path\", help=\"Path to training job profile JSON file.\"\n )\n parser.add_argument(\"labels_path\", help=\"Path to labels file to use for training.\")\n parser.add_argument(\n \"--video-paths\",\n type=str,\n default=\"\",\n help=\"List of paths for finding videos in case paths inside labels file need fixing.\",\n )\n parser.add_argument(\n \"--val_labels\",\n \"--val\",\n help=\"Path to labels file to use for validation (overrides training job path if set).\",\n )\n parser.add_argument(\n \"--test_labels\",\n \"--test\",\n help=\"Path to labels file to use for test (overrides training job path if set).\",\n )\n parser.add_argument(\n \"--tensorboard\",\n action=\"store_true\",\n help=\"Enables TensorBoard logging to the run path.\",\n )\n parser.add_argument(\n \"--save_viz\",\n action=\"store_true\",\n help=\"Enables saving of prediction visualizations to the run folder.\",\n )\n parser.add_argument(\n \"--zmq\", action=\"store_true\", help=\"Enables ZMQ logging (for GUI).\"\n )\n parser.add_argument(\n \"--run_name\",\n default=\"\",\n help=\"Run name to use when saving file, overrides other run name settings.\",\n )\n parser.add_argument(\"--prefix\", default=\"\", help=\"Prefix to prepend to run name.\")\n parser.add_argument(\"--suffix\", default=\"\", help=\"Suffix to append to run name.\")\n\n args, _ = parser.parse_known_args()\n\n # Find job configuration file.\n job_filename = args.training_job_path\n if not os.path.exists(job_filename):\n profile_dir = get_package_file(\"sleap/training_profiles\")\n\n if os.path.exists(os.path.join(profile_dir, job_filename)):\n job_filename = os.path.join(profile_dir, job_filename)\n else:\n raise FileNotFoundError(f\"Could not find training profile: {job_filename}\")\n\n # Load job configuration.\n job_config = TrainingJobConfig.load_json(job_filename)\n\n # Override config settings for CLI-based training.\n job_config.outputs.save_outputs = True\n job_config.outputs.tensorboard.write_logs = args.tensorboard\n job_config.outputs.zmq.publish_updates = args.zmq\n job_config.outputs.zmq.subscribe_to_controller = args.zmq\n if args.run_name != \"\":\n job_config.outputs.run_name = args.run_name\n if args.prefix != \"\":\n job_config.outputs.run_name_prefix = args.prefix\n if args.suffix != \"\":\n job_config.outputs.run_name_suffix = args.suffix\n job_config.outputs.save_visualizations = args.save_viz\n\n logger.info(f\"Training labels file: {args.labels_path}\")\n logger.info(f\"Training profile: {job_filename}\")\n logger.info(\"\")\n\n # Log configuration to console.\n logger.info(\"Arguments:\")\n logger.info(json.dumps(vars(args), indent=4))\n logger.info(\"\")\n logger.info(\"Training job:\")\n logger.info(job_config.to_json())\n logger.info(\"\")\n\n logger.info(\"Initializing trainer...\")\n # Create a trainer and run!\n trainer = Trainer.from_config(\n job_config,\n training_labels=args.labels_path,\n validation_labels=args.val_labels,\n test_labels=args.test_labels,\n video_search_paths=args.video_paths.split(\",\"),\n )\n trainer.train()\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"tensorflow.keras.losses.MeanSquaredError",
"matplotlib.use",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.callbacks.TensorBoard",
"tensorflow.keras.callbacks.EarlyStopping"
]
] |
yogendras843/Edureka-Mastering-Python-Assignments
|
[
"b47e926d753732a8fd697baa60df2d7c2a6c9f02"
] |
[
"Module 6/Module_6_Problem_2.py"
] |
[
"\"\"\"\n@author: Maneesh D\n@date: 29-May-17\n@intepreter: Python 3.6\n\nCreate random vector of size 50 and replace the maximum value by 0 and minimum value by 100.\n\"\"\"\nimport numpy as np\n\n\nrand_vector = np.random.randint(low=1, high=100, size=50)\nprint(rand_vector)\n\nprint(\"-\" * 12 + \"Replacing the min value with 0 and max value with 100\" + \"-\" * 12)\n\n# Sorting the array. array[0] will have the min value and array[len-1] will have max value.\nrand_vector.sort()\n\nmin_val = rand_vector[0]\nmax_val = rand_vector[49]\nprint(\"Minimum Value Elemnet=\", min_val, \" Maximum Value Element=\", max_val)\n\nfor index, val in enumerate(rand_vector):\n if val == min_val:\n rand_vector[index] = 0\n elif val == max_val:\n rand_vector[index] = 100\n\nprint(rand_vector)\n"
] |
[
[
"numpy.random.randint"
]
] |
Qiskit/monodromy
|
[
"97829c1289c8a79b6ae3ab29eb7f4ad1ea79d5ab"
] |
[
"monodromy/coordinates.py"
] |
[
"\"\"\"\nmonodromy/coordinates.py\n\nThis file contains routines for converting from one system to another. There are\nseveral common coordinate systems / choices of Weyl alcove in pu_4:\n\n+ (Balanced) canonical parameters: These are the (a, b, c) in the exponential\n\n exp(-i(a XX + b YY + c ZZ))\n\n with a, b, c further chosen to satisfy pi/4 ≥ a ≥ b ≥ |c|. One may further\n _normalize_ these coordinates by dividing by pi, so that the polytope becomes\n integrally specified.\n\n+ (Positive) canonical parameters: These are the (a, b, c) in the exponential\n\n exp(-i(a XX + b YY + c ZZ))\n\n with a, b, c further chosen to satisfy a ≥ b ≥ c ≥ 0 and pi - a ≥ b. One may\n further _normalize_ these coordinates by dividing by pi, so that the polytope\n becomes integrally specified.\n\n+ Monodromy coordinates: These are the a, b, c in the formula\n\n exp(diag(2 pi i a, 2 pi i b, 2 pi i c, -2 pi i(a + b + c)),\n\n with a, b, c further chosen to satisfy a ≥ b ≥ c ≥ -(a + b + c) ≥ a - 1 and\n b + c ≥ 0. These coordinates are always taken to be \"normalized\".\n\n NOTE: In the Monodromy Polytope paper, we used the condition c + 1/2 ≥ a\n instead. This is mathematically legal, but the resulting alcove (a\n triangular prism) is related to the canonical alcove (a tetrahedron) by\n a \"scissors congruence\" rather than by a linear transformation. For\n this reason, we prefer this alternative normalization condition.\n\nNOTE: Working in pu_4 rather than su_4 adds one extra inequality, corresponding\n to a preference for one of the half-spaces picked out by reflection\n through the `rho` operator.\n\"\"\"\n\nfrom copy import copy\nfrom functools import reduce\nimport math\nfrom typing import List, Optional\n\nimport numpy as np\n\nfrom .polytopes import ConvexPolytope, make_convex_polytope, Polytope\nfrom .utilities import clear_memoization, epsilon\n\n\npositive_canonical_alcove = make_convex_polytope([\n [1, -1, 0, 0], # 1 >= c1\n [0, 1, -1, 0], # c1 >= c2\n [0, 0, 1, -1], # c2 >= c3\n [1, -1, -1, 0], # 1 - c1 >= c2\n [0, 0, 1, 1], # c3 >= -c2\n], name=\"A_SU(4)^can\")\n\"\"\"\nInequalities defining the standard choice of fundamental Weyl alcove in\nnormalized positive canonical coordinates for SU(4).\n\ncf. Eqn 6 of Entropy.\n\"\"\"\n\n\npositive_canonical_alcove_c2 = make_convex_polytope([\n *positive_canonical_alcove.convex_subpolytopes[0].inequalities,\n [0, 0, 0, 1], # c3 >= 0, the C2 inequality\n], name=\"A_PU(4)^can\")\n\"\"\"\nInequalities defining the standard choice of fundamental Weyl alcove in\nnormalized positive canonical coordinates for PU(4).\n\ncf. Eqn 6 of Entropy.\n\"\"\"\n\n\nmonodromy_alcove = make_convex_polytope([\n [0, 1, -1, 0, ], # a1 - a2 >= 0\n [0, 0, 1, -1, ], # a2 - a3 >= 0\n [0, 1, 1, 2, ], # a3 - a4 >= 0\n [1, -2, -1, -1, ], # a4 - (a1 - 1) >= 0\n], name=\"A_SU(4)^monodromy\")\n\"\"\"\nInequalities defining the fundamental Weyl alcove used in monodromy polytope\ncalculations for SU(4).\n\"\"\"\n\n\nmonodromy_alcove_c2 = make_convex_polytope([\n [1, -2, -1, -1],\n [0, 0, 1, 1],\n [0, 1, -1, 0],\n [0, 0, 1, -1]\n], name=\"A_PU(4)^monodromy\")\n# monodromy_alcove_c2 = make_convex_polytope([\n# *monodromy_alcove.convex_subpolytopes[0].inequalities,\n# [0, 0, 1, 1, ], # a2 + a3 >= 0 , the C2 inequality\n# ]).reduce()\n\"\"\"\nInequalities defining the fundamental Weyl alcove used in monodromy polytope\ncalculations for PU(4).\n\"\"\"\n\n\nmonodromy_alcove_c2_pcs = make_convex_polytope([\n *monodromy_alcove.convex_subpolytopes[0].inequalities,\n [1, -2, 0, 2, ], # a3 + 1/2 >= a1 , the C2 inequality\n], name=\"A_PU(4)^PCS\")\n\"\"\"\nInequalities defining the fundamental Weyl alcove used in monodromy polytope\ncalculations for PU(4) according to Peterson-Crooks-Smith.\n\nIMPORTANT NOTE: This is only scissors-congruent to alcove_c2, which is itself\n linearly equivalent to positive_canonical_convex_polytope. We advise _not_\n using these coordinates.\n\"\"\"\n\n\ndef normalize_logspec_A(coordinate):\n \"\"\"\n Utility function for rotating a LogSpec tuple (targeting A)\n into its normal form.\n\n Expects a sorted input in the range [0, 1].\n \"\"\"\n total = sum(coordinate)\n if total > epsilon:\n return normalize_logspec_A([*coordinate[1:], coordinate[0] - 1])\n elif total < -epsilon:\n raise ValueError(\n f\"Over-rotated: {total}. I don't think this should happen.\"\n )\n else:\n return coordinate\n\n\ndef normalize_logspec_AC2(coordinate):\n \"\"\"\n Utility function for rotating a LogSpec tuple (targeting A_{C_2})\n into its normal form.\n\n Expects a sorted input in the range [0, 1].\n \"\"\"\n partially_normalized_coordinate = normalize_logspec_A(coordinate)\n if partially_normalized_coordinate[1] >= \\\n -partially_normalized_coordinate[2]:\n return partially_normalized_coordinate\n else:\n return [partially_normalized_coordinate[2] + 1 / 2,\n partially_normalized_coordinate[3] + 1 / 2,\n partially_normalized_coordinate[0] - 1 / 2,\n partially_normalized_coordinate[1] - 1 / 2]\n\n\ndef unitary_to_monodromy_coordinate(unitary):\n \"\"\"\n Given a unitary matrix, produces its alcove coordinate.\n \"\"\"\n unitary = unitary * np.linalg.det(unitary) ** (-1 / 4)\n sysy = np.array([\n [0, 0, 0, 1],\n [0, 0, -1, 0],\n [0, -1, 0, 0],\n [1, 0, 0, 0],\n ], dtype=complex)\n gammaQ = reduce(np.dot, [unitary, sysy, unitary.T, sysy])\n logspec_coordinate = np.real(\n np.log(np.linalg.eigvals(gammaQ)) / (2 * np.pi * 1j))\n return normalize_logspec_AC2(\n sorted(np.mod(logspec_coordinate, 1.0), reverse=True)\n )\n\n\ndef monodromy_to_positive_canonical_coordinate(x, y, z):\n \"\"\"\n Given a monodromy alcove coordinate, produces its image as an unnormalized\n positive canonical coordinate.\n \"\"\"\n normalizing_factor = np.pi\n return (\n (x + y) / 2 * normalizing_factor,\n (z + x) / 2 * normalizing_factor,\n (y + z) / 2 * normalizing_factor\n )\n\n\ndef positive_canonical_to_monodromy_coordinate(x, y, z):\n \"\"\"\n Converts an unnormalized positive canonical coordinate to a monodromy\n coordinate.\n \"\"\"\n normalizing_factor = np.pi\n\n return ((x + y - z) / normalizing_factor,\n (x - y + z) / normalizing_factor,\n (-x + y + z) / normalizing_factor)\n\n\ndef positive_to_balanced_canonical_coordinate(x, y, z):\n \"\"\"\n Converts a balanced canonical coordinate to a positive one.\n \"\"\"\n if x <= np.pi / 4:\n return x, y, z\n else:\n return tuple(sorted([np.pi / 2 - x, y, -z], reverse=True))\n\n\ndef monodromy_to_positive_canonical_polytope(\n monodromy_polytope: Polytope,\n coordinates: Optional[List[int]] = None\n) -> Polytope:\n \"\"\"\n Converts a Polytope in monodromy coordinates to a Polytope in positive\n canonical coordinates, normalized so that CX lies at (1/2, 0, 0).\n \"\"\"\n coordinates = [0, 1, 2, 3] if coordinates is None else coordinates\n\n canonical_convex_subpolytopes = []\n for convex_subpolytope in monodromy_polytope.convex_subpolytopes:\n inequalities, equalities = [], []\n for inequality in convex_subpolytope.inequalities:\n new_inequality = copy(inequality)\n k, x, y, z = (inequality[c] for c in coordinates)\n for c, v in zip(coordinates, [2 * k, x + y - z, x - y + z, -x + y + z]):\n new_inequality[c] = v\n for c in range(len(new_inequality)):\n if c not in coordinates:\n new_inequality[c] *= 2\n inequalities.append(new_inequality)\n\n for equality in convex_subpolytope.equalities:\n new_equality = copy(equality)\n k, x, y, z = (equality[c] for c in coordinates)\n for c, v in zip(coordinates, [2 * k, x + y - z, x - y + z, -x + y + z]):\n new_equality[c] = v\n for c in range(len(new_equality)):\n if c not in coordinates:\n new_equality[c] *= 2\n equalities.append(new_equality)\n\n canonical_convex_subpolytopes.append(ConvexPolytope(\n inequalities=inequalities,\n equalities=equalities,\n ))\n\n canonical_polytope = copy(monodromy_polytope)\n clear_memoization(canonical_polytope)\n\n canonical_polytope.convex_subpolytopes = canonical_convex_subpolytopes\n return canonical_polytope\n\n\ndef average_infidelity(p, q):\n \"\"\"\n Computes the infidelity distance between two points p, q expressed in\n positive canonical coordinates.\n \"\"\"\n\n a, b, c = p\n d, e, f = q\n\n return 1 - 1 / 20 * (4 + 16 * (\n math.cos(a - d) ** 2 * math.cos(b - e) ** 2 * math.cos(c - f) ** 2 +\n math.sin(a - d) ** 2 * math.sin(b - e) ** 2 * math.sin(c - f) ** 2\n ))\n\n\ndef rho_reflect(polytope, coordinates=None):\n \"\"\"\n Applies rho-reflection to the indicated `coordinates` of `polytope`.\n If `coordinates` is not supplied, uses the final three coordinates.\n \"\"\"\n\n if coordinates is None:\n coordinates = [0, -3, -2, -1]\n\n # an inequality\n # d + x a1 + y a2 + z a3 >= 0\n # induces on rho-application\n # d + x (a3 + 1/2) + y (a4 + 1/2) + z (a1 - 1/2) >= 0, or\n # (d + 1/2 x + 1/2 y - 1/2 z) + (z - y) a1 + (-y) a2 + (x - y) a3 >= 0.\n rho_subpolytopes = []\n for convex_subpolytope in polytope.convex_subpolytopes:\n rotated_equalities, rotated_inequalities = [], []\n for inequality in convex_subpolytope.inequalities:\n d = inequality[coordinates[0]]\n x = inequality[coordinates[1]]\n y = inequality[coordinates[2]]\n z = inequality[coordinates[3]]\n\n new_inequality = [2 * x for x in inequality]\n new_inequality[coordinates[0]] = 2 * d + x + y - z\n new_inequality[coordinates[1]] = 2 * z - 2 * y\n new_inequality[coordinates[2]] = 2 * 0 - 2 * y\n new_inequality[coordinates[3]] = 2 * x - 2 * y\n\n rotated_inequalities.append(new_inequality)\n\n for equality in convex_subpolytope.equalities:\n d = equality[coordinates[0]]\n x = equality[coordinates[1]]\n y = equality[coordinates[2]]\n z = equality[coordinates[3]]\n\n new_equality = [2 * x for x in equality]\n new_equality[coordinates[0]] = 2 * d + x + y - z\n new_equality[coordinates[1]] = 2 * z - 2 * y\n new_equality[coordinates[2]] = 2 * 0 - 2 * y\n new_equality[coordinates[3]] = 2 * x - 2 * y\n\n rotated_equalities.append(new_equality)\n\n rho_subpolytopes.append(ConvexPolytope(\n inequalities=rotated_inequalities,\n equalities=rotated_equalities,\n ))\n\n return Polytope(convex_subpolytopes=rho_subpolytopes)\n\n\ndef mirror_positive_canonical_coordinate(coordinate):\n \"\"\"\n Produces the SWAP-mirror of an unnormalized positive canonical coordinate.\n \"\"\"\n alpha, beta, gamma = coordinate\n if alpha >= np.pi / 4 + epsilon:\n return [np.pi / 4 - gamma, np.pi / 4 - beta, np.pi / 4 + alpha]\n else:\n return [np.pi / 4 + gamma, np.pi / 4 - beta, np.pi / 4 - alpha]\n\n\ndef mirror_monodromy_polytope(polytope):\n \"\"\"\n Calculates the mirror of a polytope expressed in monodromy coordinates.\n \"\"\"\n low_polytope = copy(polytope)\n low_polytope.convex_subpolytopes = []\n for cp in polytope.convex_subpolytopes:\n # for a < 1/2, get (1/2 + c, 1/2 - b, 1/2 - a)\n mirror_cp = ConvexPolytope(\n inequalities=[[1, -2, 0, 0]],\n equalities=[],\n name=cp.name\n )\n # k + c1 a + c2 b + c3 c ≥ 0 becomes\n # (2 k + c1 + c2 + c3) + -2 c3 a + -2 c2 b + 2 c1 c ≥ 0\n for ineq in cp.inequalities:\n k, c1, c2, c3, = ineq\n mirror_cp.inequalities.append([\n 2*k + c1 + c2 + c3, -2 * c3, -2 * c2, 2 * c1\n ])\n for eq in cp.equalities:\n k, c1, c2, c3, = eq\n mirror_cp.equalities.append([\n 2 * k + c1 + c2 + c3, -2 * c3, -2 * c2, 2 * c1\n ])\n low_polytope.convex_subpolytopes.append(mirror_cp)\n\n high_polytope = copy(polytope)\n high_polytope.convex_subpolytopes = []\n for cp in polytope.convex_subpolytopes:\n # for a > 1/2, get (1/2 - c, 1/2 - b, a - 1/2).\n mirror_cp = ConvexPolytope(\n inequalities=[[1, -2, 0, 0]],\n equalities=[],\n name=cp.name\n )\n # k + c1 a + c2 b + c3 c ≥ 0 becomes\n # (2 k + c1 + c2 - c3) + 2 c3 a - 2 c2 b - 2 c1 c ≥ 0\n for ineq in cp.inequalities:\n k, c1, c2, c3, = ineq\n mirror_cp.inequalities.append([\n 2 * k + c1 + c2 - c3, 2 * c3, -2 * c2, -2 * c1\n ])\n for eq in cp.equalities:\n k, c1, c2, c3, = eq\n mirror_cp.equalities.append([\n 2 * k + c1 + c2 - c3, 2 * c3, -2 * c2, -2 * c1\n ])\n high_polytope.convex_subpolytopes.append(mirror_cp)\n\n return (\n low_polytope\n .union(high_polytope)\n .intersect(positive_canonical_alcove_c2)\n )\n\n\ndef monodromy_to_monodromy_pcs_polytope(polytope):\n \"\"\"\n Converts a `polytope`, expressed in monodromy coordinates with the standard\n rho-normalization condition, to a polytope in monodromy coordinates with the\n Peterson-Crooks-Smith normalization condition instead.\n \"\"\"\n\n return polytope.intersect(monodromy_alcove_c2_pcs).union(\n rho_reflect(polytope).intersect(monodromy_alcove_c2_pcs)\n )\n"
] |
[
[
"numpy.linalg.det",
"numpy.array",
"numpy.linalg.eigvals",
"numpy.mod"
]
] |
bitbacchus/agent_based_COVID_SEIRX
|
[
"9957b443c06f86f33794c8404d8e6a2a4d964fa6"
] |
[
"src/scseirx/model_SEIRX.py"
] |
[
"import numpy as np\nimport networkx as nx\nfrom math import gamma\nfrom scipy.optimize import root_scalar\n\nfrom mesa import Model\nfrom mesa.time import RandomActivation, SimultaneousActivation\nfrom mesa.datacollection import DataCollector\n\nfrom scseirx.testing_strategy import Testing\n\n## data collection functions ##\ndef get_N_diagnostic_tests(model):\n return model.number_of_diagnostic_tests\n\n\ndef get_N_preventive_screening_tests(model):\n return model.number_of_preventive_screening_tests\n\n\ndef get_infection_state(agent):\n if agent.exposed == True: return 'exposed'\n elif agent.infectious == True: return 'infectious'\n elif agent.recovered == True: return 'recovered'\n else: return 'susceptible'\n\ndef get_quarantine_state(agent):\n if agent.quarantined == True: return True\n else: return False\n\n\ndef get_undetected_infections(model):\n return model.undetected_infections\n\n\ndef get_predetected_infections(model):\n return model.predetected_infections\n\n\ndef get_pending_test_infections(model):\n return model.pending_test_infections\n\n\ndef get_diagnostic_test_detected_infections_student(model):\n return model.positive_tests[model.Testing.diagnostic_test_type]['student']\ndef get_diagnostic_test_detected_infections_teacher(model):\n return model.positive_tests[model.Testing.diagnostic_test_type]['teacher']\ndef get_diagnostic_test_detected_infections_family_member(model):\n return model.positive_tests[model.Testing.diagnostic_test_type]['family_member']\ndef get_diagnostic_test_detected_infections_resident(model):\n return model.positive_tests[model.Testing.diagnostic_test_type]['resident']\ndef get_diagnostic_test_detected_infections_employee(model):\n return model.positive_tests[model.Testing.diagnostic_test_type]['employee']\ndef get_diagnostic_test_detected_infections_unistudent(model):\n return model.positive_tests[model.Testing.diagnostic_test_type]['unistudent']\ndef get_diagnostic_test_detected_infections_lecturer(model):\n return model.positive_tests[model.Testing.diagnostic_test_type]['lecturer']\n\ndiagnostic_test_detected_infections_funcs = {\n 'student':get_diagnostic_test_detected_infections_student,\n 'teacher':get_diagnostic_test_detected_infections_teacher,\n 'family_member':get_diagnostic_test_detected_infections_family_member,\n 'resident':get_diagnostic_test_detected_infections_resident,\n 'employee':get_diagnostic_test_detected_infections_employee,\n 'unistudent':get_diagnostic_test_detected_infections_unistudent,\n 'lecturer':get_diagnostic_test_detected_infections_lecturer\n}\n\ndef get_preventive_test_detected_infections_student(model):\n return model.positive_tests[model.Testing.preventive_screening_test_type]['student']\ndef get_preventive_test_detected_infections_teacher(model):\n return model.positive_tests[model.Testing.preventive_screening_test_type]['teacher']\ndef get_preventive_test_detected_infections_family_member(model):\n return model.positive_tests[model.Testing.preventive_screening_test_type]['family_member']\ndef get_preventive_test_detected_infections_resident(model):\n return model.positive_tests[model.Testing.preventive_screening_test_type]['resident']\ndef get_preventive_test_detected_infections_employee(model):\n return model.positive_tests[model.Testing.preventive_screening_test_type]['employee']\ndef get_preventive_test_detected_infections_unistudent(model):\n return model.positive_tests[model.Testing.preventive_screening_test_type]['unistudent']\ndef get_preventive_test_detected_infections_lecturer(model):\n return model.positive_tests[model.Testing.preventive_screening_test_type]['lecturer']\n\npreventive_test_detected_infections_funcs = {\n 'student':get_preventive_test_detected_infections_student,\n 'teacher':get_preventive_test_detected_infections_teacher,\n 'family_member':get_preventive_test_detected_infections_family_member,\n 'resident':get_preventive_test_detected_infections_resident,\n 'employee':get_preventive_test_detected_infections_employee,\n 'unistudent':get_preventive_test_detected_infections_unistudent,\n 'lecturer':get_preventive_test_detected_infections_lecturer\n}\n\n\n# parameter sanity check functions\n\n\ndef check_positive(var):\n\tassert var >= 0, 'negative number'\n\treturn var\n\n\ndef check_bool(var):\n\tassert type(var) == bool, 'not a bool'\n\treturn var\n\n\ndef check_positive_int(var):\n if var == None:\n return var\n assert type(var) == int, 'not an integer'\n assert var >= 0, 'negative number'\n return var\n\n\ndef check_contact_type_dict(var):\n\tassert type(var) == dict, 'not a dictionary'\n\tassert set(var.keys()).issubset({'very_far', 'far', 'intermediate', 'close'}), \\\n\t\t'does not contain the correct contact types (has to be very_far, far, intermediate or close)'\n\tassert all((isinstance(i, int) or isinstance(i, float)) for i in var.values()), \\\n\t\t'contact type weights are not numeric'\n\n\treturn var\n\n\ndef check_K1_contact_types(var):\n for area in var:\n assert area in ['very_far', 'far', 'intermediate',\n 'close'], 'K1 contact type not recognised'\n return var\n\n\ndef check_testing(var):\n assert var in ['diagnostic', 'background', 'preventive',\n 'background+preventive', False], \\\n 'unknown testing mode: {}'.format(var)\n\n return var\n\n\n\ndef check_probability(var):\n\tassert (type(var) == float) or (var == 0) or (var == 1), \\\n\t\t '{} not a float'.format(var)\n\tassert var >= 0, 'probability negative'\n\tassert var <= 1, 'probability larger than 1'\n\treturn var\n\n\ndef check_graph(var):\n assert type(var) in [nx.Graph, nx.MultiGraph], 'not a networkx graph'\n assert len(var.nodes) > 0, 'graph has no nodes'\n assert len(var.edges) > 0, 'graph has no edges'\n areas = [e[2]['contact_type'] for e in var.edges(data=True)]\n areas = set(areas)\n for a in areas:\n assert a in {'very_far', 'far', 'intermediate',\n 'close'}, 'contact type {} not recognised'.format(a)\n return var\n\n\ndef check_index_case(var, agent_types):\n\tallowed_strings = agent_types[:]\n\tallowed_strings.extend(['continuous'])\n\tassert var in allowed_strings, 'unknown index case mode'\n\treturn var\n\n\ndef check_discount(var):\n if var['slope'] != None:\n assert var['slope'] <= 0, 'slope needs to be <= 0 or None'\n assert np.abs(var['slope']) <= 1, 'absolute value of slope needs to be <= 1'\n assert var['intercept'], 'intercept needs to be positive'\n assert var['intercept'], 'intercept needs to be <= 1'\n return var\n\n\ndef get_weibull_shape(k, mu, var):\n '''\n Calculates the shape parameter of a Weibull distribution, given its mean\n mu and its variance var\n '''\n return var / mu**2 - gamma(1 + 2/k) / gamma(1+1/k)**2 + 1\n\n\n\ndef get_weibull_scale(mu, k):\n '''\n Calculates the scale parameter of a Weibull distribution, given its mean\n mu and its shape parameter k\n '''\n return mu / gamma(1 + 1/k)\n\n\ndef weibull_two_param(shape, scale):\n '''\n A two-parameter Weibull distribution, based on numpy ramdon's single\n parameter distribution. We use this distribution in the simulation to draw\n random epidemiological parameters for agents from the given distribution\n See https://numpy.org/doc/stable/reference/random/generated/numpy.random.weibull.html\n '''\n return scale * np.random.weibull(shape)\n\n\nclass SEIRX(Model):\n '''\n A model with a number of different agents that reproduces\n the SEIRX dynamics of pandemic spread in a facility. Note:\n all times are set to correspond to days\n\n G : networkx undirected graph\n Interaction graph between agents. Edges have to have edge the edge \n attribute 'contact_type' specifying the closeness of contacts, which can\n be ['very far', 'far', 'intermediate' and 'close']. Nodes have to have \n the node attribute 'type' which specifies the agent type of the given \n node (for example 'student' or 'teacher' in a school scenario).\n In addition, nodes can have the attribute 'unit', which assigns them to \n a unit in space (for example a 'class' in a school scenario).\n\n verbosity : integer in [0, 1, 2]\n Controls text output to std out to track simulation progress and \n transmission dynamics. Default = 0.\n\n base_transmission_risk : float\n Base probability to transmit an infection durig a contact of type \n \"close\" if no other measures or biological parameters influence the\n infection risk.\n\n testing : str\n Determines the testing strategy of the model. Default: 'diagnostic'.\n 'diagnostic': only diagnostic tests for symptomatic agents\n 'background': adds background screens of all agents after a positive\n diagnostic test\n 'preventive': adds preventive screens of agent groups to diagnostic\n testing. Screens happen in time intervals specified \n separately for each agent group in the variable \n 'screening_interval'.\n 'background+preventive': preventive screens AND background screens on\n top of diagnostic testing.\n\n infection_duration : int or [float, float] \n Parameter determining the infection duration. Default: 11 \n NOTE: includes the time an agent is exposed but not yet infectious at \n the beginning of an infection.\n positive integer: mean or median of the infection duration in days\n list of two floats: mean and standard deviation of a distribution\n specifying the infection duration in days. These\n numbers will be used to construct a Weibull\n distribution from which the infection duration will\n be drawn for every agent individually\n\n exposure_duration : int or [float, float] \n Parameter determining the exposure duration. Default: 4. \n Sets the time from transmission to becoming infectious\n positive integer: mean or median of the exposure duration in days\n list of two floats: mean and standard deviation of a distribution\n specifying the exposure duration in days. These\n numbers will be used to construct a Weibull\n distributoin from which the exposure duration will\n be drawn for every agent individually.\n\n time_until_symptoms : int or [float, float] \n Sets the time from transmission to (potentially) developing symptoms. \n Default: 6. Symptom probability has to be set for each agent group \n individually using the parameter 'symptom_probability'\n positive integer: mean or median of the time until symptoms in days\n list of two floats: mean and standard deviation of a distribution\n specifying the time until symptoms in days. These\n numbers will be used to construct a Weibull\n distribution from which the time until symptoms will\n be drawn for every agent individually.\n\n quarantine_duration : int\n Positive integer, sets the time a positively tested agent is quarantined\n in days. Default: 14. \n\n subclinical_modifier : float\n Modifies the infectiousness of asymptomatic cases. Example: if \n subclinical_modifier = 0.5, the infectiousness of an asymptomatic case \n will be reduced to 50%. Default: 1.0.\n\n infection_risk_contact_type_weights: dictionary \n Has to have the form\n {\n 'very_far': float, \n 'far': float, \n 'intermediate':float, \n 'close': float\n }\n Sets transmission risk multipliers for different contact types of agents \n specified in the contact network G. Default: {'very_far': 0.1,\n 'far': 0.5, 'intermediate': 1, 'close': 3}\n\n\n K1_contact_types : list of strings\n Definition of contact types for which agents are considered \"K1 contact \n persons\" if they had contact to a positively tested person with a \n specified contact intensity. The list may only include strings from the\n list ['very_far', 'far', 'intermediate', 'close'], i.e. the keys of the\n infection_risk_contact_type_weights dictionary. Default = ['close'].\n\n diagnostic_test_type : str \n Specifies the test technology and test result turnover time used for \n diagnostic testing. For example 'same_day_antigen' or 'two_day_PCR'. \n See module \"Testing\" for different implemented testing techologies.\n Default: 'one_day_PCR'. \n\n preventive_screening_test_type : str\n Specifies the test technology and test result turnover time used for \n preventive sreening. For example 'same_day_antigen' or 'two_day_PCR'. \n See module \"Testing\" for different implemented testing techologies.\n Default: 'one_day_PCR'.\n\n follow_up_testing_interval : int or None\n Sets the time a follow-up screen (background screen) is initiated after \n an initial screen triggered by a positive test result. Only applies if \n the testing strategy is 'background' or 'preventive'. If \"None\" is \n chosen, no background screens are performed. Default: None. \n\n liberating_testing : bool \n Flag that specifies, whether or not an agent is released from quarantine \n after returning a negative test result. Default: False.\n\n\tindex_case: str\n Specifies how infections are introduced into the facility. Can bei \n either a scenario-specific agent type (for example \"student\" or \n \"teacher\" in the school scenario) or \"continuous\". \n agent_type: If an agent type is specified, a single randomly chosen \n agent from this agent group will become the index case \n and no further index cases will be introduced into the \n scenario.\n 'continuous': In this case, agents have a continuous risk to become\n index cases in every simulation step. The risk has to\n be specified for every agent group individually, using\n the 'index_probability' parameter. If only a single\n agent group has a non-zero index probability, then only\n agents from this group can become index cases.\n Default = 'employee' (nursing home scenario), 'teacher' (school \n scenario), 'lecturer' (university scenario). \n\n\n agent_types: dictionary of dictionaries\n Specifies the parameters 'screening_interval', 'index_probability', \n 'mask' and 'vaccination_probability' individually for every agent group\n in the model. The dictionary therefore needs to have an entry for every\n agent type and every entry needs to have the following form:\n {\n agent_type:\n {\n screening_interval : int\n Number of days between each preventive screen in this agent \n group.\n index_probability : float \n Probability to become an index case in each time step. Needs to\n be in in the range [0, 1]. \n mask : bool\n Whether or not the agent type is wearing a mask.\n vaccination_probability : float\n Probability of an agent in the agent group to be vaccinated.\n Needs to be in the range [0, 1].\n }\n }\n The dictionary's keys are the names of the agent types which have to\n correspond to the node attributes in the contact graph. \n\n age_transmission_risk_discount : dictionary\n Specifies the linear relationship between agent age and transmission \n risk. Applies both to the risk of transmitting and the risk of receiving \n an infection. This is only used in scenarios in which agents have an age\n attribute (school). The dictionary needs to have the following fields:\n 'slope' : float\n The slope of the linear relationship.\n 'intercept' : float\n The intercept of the linear relationship.\n Default: {'slope':-0.02, 'intercept:1'}\n\n\n age_symptom_modification : dictionary\n Specifies the linear relationship between agent age and the probability\n to develop symptoms. This is only used in scenarios in which agents have\n an age attribute (school). The dictionary needs to have the following\n fields:\n 'slope' : float\n The slope of the linear relationship.\n 'intercept' : float\n The intercept of the linear relationship.\n Default: {'slope':-0.02545, 'intercept:0.854545'}\n\n mask_filter_efficiency : dictionary\n Specifies the effectiveness of masks to prevent the transmission and\n reception of an infection. The dictionary needs to have the following\n fields:\n 'inhale' : float\n Reduction of the risk to become infected if the receiving agent\n wears a mask.\n 'exhale' : float\n Reduction of the risk to transmit an infection if the \n transmitting agent wears a mask.\n Default: {'inhale':0, 'exhale':0}\n\n transmission_risk_ventilation_modifier : float\n Reduction of the transmission risk due to ventilation. Default: 0.\n\n transmission_risk_vaccination_modifier : dictionary\n Reduction of the transmission and reception risk due to vaccination.\n The dictionary needs to have the following fields:\n 'reception' : float\n Reduction of the probability to get infected if the infectee is\n vaccinated.\n 'transmission' : float\n Reduction of the probability to transmit the infection if the\n infected agent is vaccinated.\n Default: {'reception':1, 'transmission':0}\n\n N_days_in_network : int\n Number of distinct days the graph object contains. If the simulation\n duration (in days) surpasses the number of days in the graph, the\n simulation loops back to the first day etc. Default: 7.\n\n seed : int\n Fixes the seed of the simulation to enable repeatable simulation runs. \n If seed = None, the simulation will be initialized at random.\n Default: None.\n '''\n\n def __init__(self, G,\n verbosity = 0,\n base_transmission_risk = 0.05,\n testing='diagnostic',\n exposure_duration = [5.0, 1.9],\n time_until_symptoms = [6.4, 0.8],\n infection_duration = [10.91, 3.95],\n quarantine_duration = 10,\n subclinical_modifier = 0.6,\n infection_risk_contact_type_weights = {\n 'very_far': 0.1,\n 'far': 0.25,\n 'intermediate': 0.5,\n 'close': 1},\n K1_contact_types = ['close'],\n diagnostic_test_type = 'one_day_PCR',\n preventive_screening_test_type = 'same_day_antigen',\n follow_up_testing_interval = None,\n liberating_testing = False,\n index_case = 'teacher',\n agent_types = {\n 'teacher': {'screening_interval': None,\n 'index_probability': 0,\n 'mask':False,\n 'vaccination_ratio': 0},\n 'student': {'screening_interval': None,\n 'index_probability': 0,\n 'mask':False,\n 'vaccination_ratio': 0},\n 'family_member':{'screening_interval': None,\n 'index_probability': 0,\n 'mask':False,\n 'vaccination_ratio': 0}},\n age_transmission_risk_discount = \\\n {'slope':-0.02,\n 'intercept':1},\n age_symptom_modification = \\\n {'slope':-0.02545,\n 'intercept':0.854545},\n mask_filter_efficiency = {'exhale':0, 'inhale':0},\n transmission_risk_ventilation_modifier = 0,\n transmission_risk_vaccination_modifier = {\n 'reception':1,\n 'transmission':0},\n N_days_in_network = 7,\n seed = None):\n\n # mesa models already implement fixed seeds through their own random\n # number generations. Sadly, we need to use the Weibull distribution\n # here, which is not implemented in mesa's random number generation\n # module. Therefore, we need to initialize the numpy random number\n # generator with the given seed as well\n if seed != None:\n np.random.seed(seed)\n\n # sets the (daily) transmission risk for a household contact without\n # any precautions. Target infection ratios are taken from literature\n # and the value of the base_transmission_risk is calibrated such that\n # the simulation produces the correct infection ratios in a household\n # setting with the given distributions for epidemiological parameters\n # of agents\n self.base_transmission_risk = base_transmission_risk\n \t# sets the level of detail of text output to stdout (0 = no output)\n self.verbosity = check_positive_int(verbosity)\n # flag to turn off the testing & tracing strategy\n self.testing = check_testing(testing)\n self.running = True # needed for the batch runner implemented by mesa\n # set the interaction mode to simultaneous activation\n self.schedule = SimultaneousActivation(self)\n\n\n # internal step counter used to launch screening tests\n self.Nstep = 0\n\n # since we may have day-specific contact networks, we need\n # to keep track of the day of the week. Since the index case\n # per default is introduced at step 0 in index case mode, we\n # need to offset the starting day by a random number of days\n # to prevent artifacts from always starting on the same day of the week\n self.N_days_in_network = N_days_in_network\n self.day_offset = self.random.randint(1, 8)\n self.day = self.Nstep + self.day_offset\n\n ## epidemiological parameters: can be either a single integer or the\n # mean and standard deviation of a distribution\n self.epi_params = {}\n # counter to track the number of pathological parameter combinations\n # that had to be re-rolled (only here for debugging and control reasons)\n self.param_rerolls = 0\n\n for param, param_name in zip([exposure_duration, time_until_symptoms,\n infection_duration],['exposure_duration', 'time_until_symptoms',\n 'infection_duration']):\n\n if isinstance(param, int):\n self.epi_params[param_name] = check_positive_int(param)\n\n elif isinstance(param, list) and len(param) == 2:\n\n mu = check_positive(param[0])\n var = check_positive(param[1]**2)\n shape = root_scalar(get_weibull_shape, args=(mu, var),\n method='toms748', bracket=[0.2, 500]).root\n scale = get_weibull_scale(mu, shape)\n\n self.epi_params[param_name] = [shape, scale]\n else:\n print('{} format not recognized, should be either a single '+\\\n 'int or a tuple of two positive numbers'.format(param_name))\n\n\n # duration of quarantine\n self.quarantine_duration = check_positive_int(quarantine_duration)\n\n self.infection_risk_area_weights = check_contact_type_dict(\n infection_risk_contact_type_weights)\n\n # modifier for infectiosness for asymptomatic cases\n self.subclinical_modifier = check_positive(subclinical_modifier)\n # modifiers for the infection risk, depending on contact type\n self.infection_risk_contact_type_weights = infection_risk_contact_type_weights\n\n # modifications for age-dependent transmission and reception risks and\n # symptom probabilities\n self.age_transmission_risk_discount = \\\n check_discount(age_transmission_risk_discount)\n\n self.age_symptom_modification = age_symptom_modification\n #check_discount(age_symptom_modification)\n\n self.mask_filter_efficiency = mask_filter_efficiency\n self.transmission_risk_ventilation_modifier = \\\n transmission_risk_ventilation_modifier\n self.transmission_risk_vaccination_modifier = \\\n transmission_risk_vaccination_modifier\n ## agents and their interactions\n # interaction graph of agents\n self.G = check_graph(G)\n # add weights as edge attributes so they can be visualised easily\n if type(self.G) == nx.MultiGraph:\n for (u, v, key, contact_type) in self.G.edges(keys=True,\n data='contact_type'):\n self.G[u][v][key]['weight'] = \\\n self.infection_risk_contact_type_weights[contact_type]\n else:\n for e in G.edges(data=True):\n G[e[0]][e[1]]['weight'] = self.infection_risk_contact_type_weights\\\n \t[G[e[0]][e[1]]['contact_type']]\n\n # extract the different agent types from the contact graph\n self.agent_types = list(agent_types.keys())\n # dictionary of available agent classes with agent types and classes\n self.agent_classes = {}\n if 'resident' in agent_types:\n from scseirx.agent_resident import resident\n self.agent_classes['resident'] = resident\n if 'employee' in agent_types:\n from scseirx.agent_employee import employee\n self.agent_classes['employee'] = employee\n if 'student' in agent_types:\n from scseirx.agent_student import student\n self.agent_classes['student'] = student\n if 'teacher' in agent_types:\n from scseirx.agent_teacher import teacher\n self.agent_classes['teacher'] = teacher\n if 'family_member' in agent_types:\n from scseirx.agent_family_member import family_member\n self.agent_classes['family_member'] = family_member\n if 'lecturer' in agent_types:\n from scseirx.agent_lecturer import lecturer\n self.agent_classes['lecturer'] = lecturer\n if 'unistudent' in agent_types:\n from scseirx.agent_unistudent import unistudent\n self.agent_classes['unistudent'] = unistudent\n\n ## set agent characteristics for all agent groups\n # list of agent characteristics\n params = ['screening_interval','index_probability', 'mask' ,'vaccination_ratio',\n 'voluntary_testing_rate']\n\n # default values that are used in case a characteristic is not specified\n # for an agent group\n defaults = {'screening_interval':None,\n 'index_probability':0,\n 'mask':False,\n 'vaccination_ratio':0,\n 'voluntary_testing_rate':1\n }\n\n # sanity checks that are applied to parameters passed to the class\n # constructor to make sure they conform to model expectations\n check_funcs = [check_positive_int, check_probability, check_bool,\n check_probability, check_probability]\n\n # member dicts that store the parameter values for each agent group\n self.screening_intervals = {}\n self.index_probabilities = {}\n self.masks = {}\n self.vaccination_probabilities = {}\n self.voluntary_testing_rates = {}\n\n\n param_dicts = [self.screening_intervals, self.index_probabilities,\n self.masks, self.vaccination_probabilities, self.voluntary_testing_rates]\n\n # iterate over all possible agent parameters and agent groups: set the\n # respective value to the value passed through the constructor or to\n # the default value if no value has been passed\n for param,param_dict,check_func in zip(params,param_dicts,check_funcs):\n for at in self.agent_types:\n try:\n param_dict.update({at:check_func(agent_types[at][param])})\n except KeyError:\n param_dict.update({at:defaults[param]})\n\n # pass all parameters relevant for the testing strategy to the testing\n # class. NOTE: this separation is not a strictly necessary design\n # decision but I like to keep the parameters related to testing and\n # tracing in a separate place\n self.Testing = Testing(self, diagnostic_test_type,\n preventive_screening_test_type,\n check_positive_int(follow_up_testing_interval),\n self.screening_intervals,\n check_bool(liberating_testing),\n check_K1_contact_types(K1_contact_types),\n verbosity)\n\n\n # specifies either continuous probability for index cases in agent\n # groups based on the 'index_probability' for each agent group, or a\n # single (randomly chosen) index case in the passed agent group\n self.index_case = check_index_case(index_case, self.agent_types)\n\n self.num_agents = {}\n\n ## add agents\n # extract the agent nodes from the graph and add them to the scheduler\n for agent_type in self.agent_types:\n IDs = [x for x,y in G.nodes(data=True) if y['type'] == agent_type]\n self.num_agents.update({agent_type:len(IDs)})\n\n # get the agent locations (units) from the graph node attributes\n units = [self.G.nodes[ID]['unit'] for ID in IDs]\n\n # determine the agents that will be vaccinated, given the \n # vaccination ratio of the respective agent group\n vaccination_status = np.asarray([False] * len(IDs))\n if self.vaccination_probabilities[agent_type] > 0:\n n = round(self.vaccination_probabilities[agent_type] * len(IDs))\n idx = list(range(len(IDs)))\n rnd_idx = np.asarray(self.random.sample(idx, n))\n vaccination_status[rnd_idx] = True\n\n\n for ID, unit, vaccinated in zip(IDs, units, vaccination_status):\n\n tmp_epi_params = {}\n # for each of the three epidemiological parameters, check if\n # the parameter is an integer (if yes, pass it directly to the\n # agent constructor), or if it is specified by the shape and\n # scale parameters of a Weibull distribution. In the latter\n # case, draw a new number for every agent from the distribution\n # NOTE: parameters drawn from the distribution are rounded to\n # the nearest integer\n while True:\n for param_name, param in self.epi_params.items():\n if isinstance(param, int):\n tmp_epi_params[param_name] = param\n\n else:\n tmp_epi_params[param_name] = \\\n round(weibull_two_param(param[0], param[1]))\n\n if tmp_epi_params['exposure_duration'] > 0 and \\\n tmp_epi_params['time_until_symptoms'] >= \\\n tmp_epi_params['exposure_duration'] and\\\n tmp_epi_params['infection_duration'] > \\\n tmp_epi_params['exposure_duration']:\n break\n else:\n self.param_rerolls += 1\n if verbosity > 1:\n print('pathological epi-param case found!')\n print(tmp_epi_params)\n\n # check if the agent participates in voluntary testing\n p = self.voluntary_testing_rates[agent_type]\n voluntary_testing = np.random.choice([True, False],\n p=[p, 1-p])\n\n # construct the agent object\n a = self.agent_classes[agent_type](ID, unit, self,\n tmp_epi_params['exposure_duration'],\n tmp_epi_params['time_until_symptoms'],\n tmp_epi_params['infection_duration'],\n vaccinated,\n voluntary_testing,\n verbosity)\n self.schedule.add(a)\n\n\n\t\t# infect the first agent in single index case mode\n if self.index_case != 'continuous':\n infection_targets = [\n a for a in self.schedule.agents if a.type == index_case]\n # pick a random agent to infect in the selected agent group\n target = self.random.randint(0, len(infection_targets) - 1)\n infection_targets[target].exposed = True\n if self.verbosity > 0:\n print('{} exposed: {}'.format(index_case,\n infection_targets[target].ID))\n\n\n # list of agents that were tested positive this turn\n self.newly_positive_agents = []\n # flag that indicates if there were new positive tests this turn\n self.new_positive_tests = False\n # dictionary of flags that indicate whether a given agent group has\n # been creened this turn\n self.screened_agents= {\n 'reactive':{agent_type: False for agent_type in self.agent_types},\n 'follow_up':{agent_type: False for agent_type in self.agent_types},\n 'preventive':{agent_type: False for agent_type in self.agent_types}}\n\n\n # dictionary of counters that count the days since a given agent group\n # was screened. Initialized differently for different index case modes\n if (self.index_case == 'continuous') or \\\n \t (not np.any(list(self.Testing.screening_intervals.values()))):\n \tself.days_since_last_agent_screen = {agent_type: 0 for agent_type in\n \tself.agent_types}\n # NOTE: if we initialize these variables with 0 in the case of a single\n # index case, we introduce a bias since in 'single index case mode' the\n # first index case will always become exposed in step 0. To realize\n # random states of the preventive sceening procedure with respect to the\n # incidence of the index case, we have to randomly pick the days since\n # the last screen for the agent group from which the index case is\n else:\n \tself.days_since_last_agent_screen = {}\n \tfor agent_type in self.agent_types:\n \t\tif self.Testing.screening_intervals[agent_type] != None:\n \t\t\tself.days_since_last_agent_screen.update({\n \t\t\t\tagent_type: self.random.choice(range(0,\n \t\t\t\t self.Testing.screening_intervals[agent_type] + 1))})\n \t\telse:\n \t\t\tself.days_since_last_agent_screen.update({agent_type: 0})\n\n # dictionary of flags that indicates whether a follow-up screen for a\n # given agent group is scheduled\n self.scheduled_follow_up_screen = {agent_type: False for agent_type in\n \tself.agent_types}\n\n # counters\n self.number_of_diagnostic_tests = 0\n self.number_of_preventive_screening_tests = 0\n self.positive_tests = {self.Testing.preventive_screening_test_type:\n {agent_type:0 for agent_type in self.agent_types},\n self.Testing.diagnostic_test_type:\n {agent_type:0 for agent_type in self.agent_types}}\n\n self.undetected_infections = 0\n self.predetected_infections = 0\n self.pending_test_infections = 0\n self.quarantine_counters = {agent_type:0 for agent_type in agent_types.keys()}\n self.false_negative = 0\n\n # data collectors to save population counts and agent states every\n # time step\n\n model_reporters = {\n 'N_diagnostic_tests':get_N_diagnostic_tests,\n 'N_preventive_screening_tests':get_N_preventive_screening_tests,\n 'undetected_infections':get_undetected_infections,\n 'predetected_infections':get_predetected_infections,\n 'pending_test_infections':get_pending_test_infections\n }\n\n for agent_type in self.agent_types:\n model_reporters.update({\n 'diagnostic_test_detected_infections_{}'.format(agent_type):\\\n diagnostic_test_detected_infections_funcs[agent_type]\n })\n model_reporters.update({\n 'preventive_test_detected_infections_{}'.format(agent_type):\\\n preventive_test_detected_infections_funcs[agent_type]\n })\n\n\n self.datacollector = DataCollector(\n model_reporters=model_reporters,\n agent_reporters=\n \t{\n \t'infection_state': get_infection_state,\n 'quarantine_state': get_quarantine_state\n })\n\n\n ## transmission risk modifiers\n def get_transmission_risk_contact_type_modifier(self, source, target):\n # construct the edge key as combination between agent IDs and day\n n1 = source.ID\n n2 = target.ID\n tmp = [n1, n2]\n tmp.sort()\n n1, n2 = tmp\n key = '{}{}d{}'.format(n1, n2, self.day)\n contact_weight = self.G.get_edge_data(n1, n2, key)['weight']\n\n # the link weight is a multiplicative modifier of the link strength.\n # contacts of type \"close\" have, by definition, a weight of 1. Contacts\n # of type intermediate, far or very far have a weight < 1 and therefore\n # are less likely to transmit an infection. For example, if the contact\n # type far has a weight of 0.2, a contact of type far has only a 20%\n # chance of transmitting an infection, when compared to a contact of\n # type close. To calculate the probability of success p in the Bernoulli\n # trial, we need to reduce the base risk (or base probability of success)\n # by the modifications introduced by preventive measures. These\n # modifications are formulated in terms of \"probability of failure\", or\n # \"q\". A low contact weight has a high probability of failure, therefore\n # we return q = 1 - contact_weight here.\n q1 = 1 - contact_weight\n\n return q1\n\n\n def get_transmission_risk_age_modifier_transmission(self, source):\n '''linear function such that at age 18 the risk is that of an adult (=1).\n The slope of the line needs to be calibrated.\n '''\n age = source.age\n max_age = 18\n if age <= max_age:\n age_weight = self.age_transmission_risk_discount['slope'] * \\\n np.abs(age - max_age) + self.age_transmission_risk_discount['intercept']\n\n # The age weight can be interpreted as multiplicative factor that\n # reduces the chance for transmission with decreasing age. The slope\n # of the age_transmission_discount function is the decrease (in % of\n # the transmission risk for an 18 year old or above) of transmission\n # risk with every year a person is younger than 18 (the intercept is\n # 1 by definition).\n # To calculate the probability of success p in the Bernoulli\n # trial, we need to reduce the base risk (or base probability of \n # success) by the modifications introduced by preventive measures. \n # These modifications are formulated in terms of \"probability of \n # failure\", or \"q\". A low age weight has a high probability of \n # failure, therefore we return q = 1 - age_weight here.\n q2 = 1 - age_weight\n else:\n q2 = 0\n\n return q2\n\n\n def get_transmission_risk_age_modifier_reception(self, target):\n '''linear function such that at age 18 the risk is that of an adult (=1).\n The slope of the line needs to be calibrated.\n '''\n age = target.age\n max_age = 18\n if age <= max_age:\n age_weight = self.age_transmission_risk_discount['slope'] * \\\n np.abs(age - max_age) + self.age_transmission_risk_discount['intercept']\n # see description in get_transmission_risk_age_modifier_transmission\n q3 = 1 - age_weight\n else:\n q3 = 0\n\n return q3\n\n\n # infectiousness is constant and high until symptom onset and then\n # decreases monotonically until agents are not infectious anymore\n # at the end of the infection_duration\n def get_transmission_risk_progression_modifier(self, source):\n if source.days_since_exposure < source.exposure_duration:\n progression_weight = 0\n elif source.days_since_exposure <= source.time_until_symptoms:\n progression_weight = 1\n elif source.days_since_exposure > source.time_until_symptoms and \\\n source.days_since_exposure <= source.infection_duration:\n # we add 1 in the denominator, such that the source is also\n # (slightly) infectious on the last day of the infection_duration\n progression_weight = \\\n (source.days_since_exposure - source.time_until_symptoms) / \\\n (source.infection_duration - source.time_until_symptoms + 1)\n else:\n progression_weight = 0\n # see description in get_transmission_risk_age_modifier_transmission\n q4 = 1 - progression_weight\n\n return q4\n\n def get_transmission_risk_subclinical_modifier(self, source):\n if source.symptomatic_course == False:\n subclinical_weight = self.subclinical_modifier\n else:\n subclinical_weight = 1\n # see description in get_transmission_risk_age_modifier_transmission\n q5 = 1 - subclinical_weight\n return q5\n\n def get_transmission_risk_exhale_modifier(self, source):\n if source.mask:\n exhale_weight = self.mask_filter_efficiency['exhale']\n else:\n exhale_weight = 1\n # see description in get_transmission_risk_age_modifier_transmission\n q6 = 1 - exhale_weight\n return q6\n\n\n def get_transmission_risk_inhale_modifier(self, target):\n if target.mask:\n inhale_weight = self.mask_filter_efficiency['inhale']\n else:\n inhale_weight = 1\n # see description in get_transmission_risk_age_modifier_transmission\n q7 = 1 - inhale_weight\n return q7\n\n\n def get_transmission_risk_ventilation_modifier(self):\n ventilation_weight = self.transmission_risk_ventilation_modifier\n # see description in get_transmission_risk_age_modifier_transmission\n q8 = 1 - ventilation_weight\n return q8\n\n def get_transmission_risk_vaccination_modifier_reception(self, a):\n if a.vaccinated:\n q9 = self.transmission_risk_vaccination_modifier['reception']\n else:\n q9 = 0\n return q9\n\n def get_transmission_risk_vaccination_modifier_transmission(self, a):\n if a.vaccinated:\n q10 = self.transmission_risk_vaccination_modifier['transmission']\n else:\n q10 = 0\n return q10\n\n def test_agent(self, a, test_type):\n a.tested = True\n a.pending_test = test_type\n if test_type == self.Testing.diagnostic_test_type:\n self.number_of_diagnostic_tests += 1\n else:\n self.number_of_preventive_screening_tests += 1\n\n if a.exposed:\n # tests that happen in the period of time in which the agent is\n # exposed but not yet infectious. \n # Note: tests[test_type]['time_until_testable'] is negative for\n # tests that can detect an infection before agents become infectious\n if a.days_since_exposure >= a.exposure_duration + \\\n self.Testing.tests[test_type]['time_until_testable']:\n \n if self.verbosity > 1:\n print('{} {} sent positive sample (even though not infectious yet)'\n .format(a.type, a.ID))\n a.sample = 'positive'\n self.predetected_infections += 1\n self.positive_tests[test_type][a.type] += 1\n else:\n if self.verbosity > 1: print('{} {} sent negative sample'\n .format(a.type, a.ID))\n a.sample = 'negative'\n\n elif a.infectious:\n # tests that happen in the period of time in which the agent is\n # infectious and the infection is detectable by a given test\n # Note: tests[test_type]['time_until_testable'] is negative for \n # tests that can detect an infection before agents become \n # infectious. tests[test_type]['time_testable'] is negative for\n # tests that cease to detect an infection before agents stop being\n # infectious\n if a.days_since_exposure >= a.exposure_duration + \\\n self.Testing.tests[test_type]['time_until_testable'] and \\\n a.days_since_exposure <= a.infection_duration + \\\n self.Testing.tests[test_type]['time_testable']:\n if self.verbosity > 1:\n print('{} {} sent positive sample'.format(a.type, a.ID))\n a.sample = 'positive'\n self.positive_tests[test_type][a.type] += 1\n\n # track the undetected infections to assess how important they are\n # for infection spread\n else:\n if self.verbosity > 1:\n print('{} {} sent negative sample (even though infectious)'\n .format(a.type, a.ID))\n a.sample = 'negative'\n self.undetected_infections += 1\n\n else:\n if self.verbosity > 1: print('{} {} sent negative sample'\n .format(a.type, a.ID))\n a.sample = 'negative'\n\n # for same-day testing, immediately act on the results of the test\n if a.days_since_tested >= self.Testing.tests[test_type]['time_until_test_result']:\n a.act_on_test_result()\n\n def screen_agents(self, agent_group, test_type, screen_type):\n # only test agents that have not been tested already in this simulation\n # step and that are not already known positive cases\n\n if self.verbosity > 0:\n print('initiating {} {} screen'\\\n .format(screen_type, agent_group))\n\n untested_agents = [a for a in self.schedule.agents if\n (a.tested == False and a.known_positive == False\n and a.type == agent_group)]\n\n if len(untested_agents) > 0:\n self.screened_agents[screen_type][agent_group] = True\n self.days_since_last_agent_screen[agent_group] = 0\n\n # only test agents if they participate in voluntary testing\n if screen_type == 'preventive':\n for a in untested_agents:\n if a.voluntary_testing:\n self.test_agent(a, test_type)\n else:\n if self.verbosity > 1:\n print('not testing {} {}, not participating in voluntary testing'\\\n .format(agent_group, a.ID))\n else:\n for a in untested_agents:\n self.test_agent(a, test_type)\n\n if self.verbosity > 0:\n print()\n else:\n if self.verbosity > 0:\n print('no agents tested because all agents have already been tested')\n\n # the type of the test used in the pending test result is stored in the\n # variable pending_test\n\n def collect_test_results(self):\n agents_with_test_results = [a for a in self.schedule.agents if\n (a.pending_test and\n a.days_since_tested >= self.Testing.tests[a.pending_test]['time_until_test_result'])]\n\n return agents_with_test_results\n\n def trace_contacts(self, a):\n if a.quarantined == False:\n a.quarantined = True\n a.quarantine_start = self.Nstep\n\n if self.verbosity > 0:\n print('qurantined {} {}'.format(a.type, a.ID))\n\n # find all agents that share edges with the agent\n # that are classified as K1 contact types in the testing\n # strategy\n if a in self.G.nodes():\n K1_contacts = [e[1] for e in self.G.edges(a.ID, data=True) if\n e[2]['contact_type'] in self.Testing.K1_contact_types]\n K1_contacts = [a for a in self.schedule.agents if a.ID in K1_contacts]\n\n for K1_contact in K1_contacts:\n if self.verbosity > 0:\n print('quarantined {} {} (K1 contact of {} {})'\n .format(K1_contact.type, K1_contact.ID, a.type, a.ID))\n K1_contact.quarantined = True\n K1_contact.quarantine_start = self.Nstep\n\n def test_symptomatic_agents(self):\n # find symptomatic agents that have not been tested yet and are not\n # in quarantine and test them\n newly_symptomatic_agents = np.asarray([a for a in self.schedule.agents\n if (a.symptoms == True and a.tested == False and a.quarantined == False)])\n\n for a in newly_symptomatic_agents:\n # all symptomatic agents are quarantined by default\n if self.verbosity > 0:\n print('quarantined: {} {}'.format(a.type, a.ID))\n a.quarantined = True\n a.quarantine_start = self.Nstep\n\n self.test_agent(a, self.Testing.diagnostic_test_type)\n\n def quarantine_contacts(self):\n # trace and quarantine contacts of newly positive agents\n if len(self.newly_positive_agents) > 0:\n if self.verbosity > 0: print('new positive test(s) from {}'\n .format([a.ID for a in self.newly_positive_agents]))\n\n # send all K1 contacts of positive agents into quarantine\n for a in self.newly_positive_agents:\n self.trace_contacts(a)\n\n # indicate that a screen should happen because there are new\n # positive test results\n self.new_positive_tests = True\n self.newly_positive_agents = []\n\n else:\n self.new_positive_tests = False\n\n\n def step(self):\n self.day = (self.Nstep + self.day_offset) % self.N_days_in_network + 1\n # if the connection graph is time-resloved, set the graph that is\n # used to determine connections in this step to the sub-graph corres-\n # ponding to the current day of the week\n if self.dynamic_connections:\n self.G = self.day_connections[self.day]\n\n if self.verbosity > 0:\n print('day {}'.format(self.day))\n\n if self.testing:\n for agent_type in self.agent_types:\n for screen_type in ['reactive', 'follow_up', 'preventive']:\n self.screened_agents[screen_type][agent_type] = False\n\n if self.verbosity > 0:\n print('* testing and tracing *')\n\n self.test_symptomatic_agents()\n\n\n # collect and act on new test results\n agents_with_test_results = self.collect_test_results()\n for a in agents_with_test_results:\n a.act_on_test_result()\n\n self.quarantine_contacts()\n\n # screening:\n # a screen should take place if\n # (a) there are new positive test results\n # (b) as a follow-up screen for a screen that was initiated because\n # of new positive cases\n # (c) if there is a preventive screening policy and it is time for\n # a preventive screen in a given agent group\n\n # (a)\n if (self.testing == 'background' or self.testing == 'background+preventive')\\\n and self.new_positive_tests == True:\n for agent_type in self.screening_agents:\n self.screen_agents(\n agent_type, self.Testing.diagnostic_test_type, 'reactive')\n self.scheduled_follow_up_screen[agent_type] = True\n\n # (b)\n elif (self.testing == 'background' or self.testing == 'background+preventive') and \\\n self.Testing.follow_up_testing_interval != None and \\\n sum(list(self.scheduled_follow_up_screen.values())) > 0:\n for agent_type in self.screening_agents:\n if self.scheduled_follow_up_screen[agent_type] and\\\n self.days_since_last_agent_screen[agent_type] >=\\\n self.Testing.follow_up_testing_interval:\n self.screen_agents(\n agent_type, self.Testing.diagnostic_test_type, 'follow_up')\n else:\n if self.verbosity > 0:\n print('not initiating {} follow-up screen (last screen too close)'\\\n .format(agent_type))\n\n # (c) \n elif (self.testing == 'preventive' or self.testing == 'background+preventive')and \\\n np.any(list(self.Testing.screening_intervals.values())):\n\n for agent_type in self.screening_agents:\n interval = self.Testing.screening_intervals[agent_type]\n assert interval in [7, 3, 2, None], \\\n 'testing interval {} for agent type {} not supported!'\\\n .format(interval, agent_type)\n\n # (c.1) testing every 7 days = testing on Mondays\n if interval == 7 and self.day % interval == 1:\n self.screen_agents(agent_type,\n self.Testing.preventive_screening_test_type,\\\n 'preventive')\n # (c.2) testing every 3 days = testing on Mo & Turs\n elif interval == 3 and self.day % interval in [1, 4]:\n self.screen_agents(agent_type,\n self.Testing.preventive_screening_test_type,\\\n 'preventive')\n # (c.3) testing every 2 days = testing on Mo, Wed & Fri\n elif interval == 2 and self.day % interval in [1, 3, 5]:\n self.screen_agents(agent_type,\n self.Testing.preventive_screening_test_type,\\\n 'preventive')\n # No interval specified = no testing, even if testing\n # mode == preventive\n elif interval == None:\n pass\n else:\n if self.verbosity > 0:\n print('not initiating {} preventive screen (wrong day)'\\\n .format(agent_type))\n else:\n # do nothing\n pass\n\n for agent_type in self.agent_types:\n if not (self.screened_agents['reactive'][agent_type] or \\\n self.screened_agents['follow_up'][agent_type] or \\\n self.screened_agents['preventive'][agent_type]):\n self.days_since_last_agent_screen[agent_type] += 1\n\n\n if self.verbosity > 0: print('* agent interaction *')\n self.datacollector.collect(self)\n self.schedule.step()\n self.Nstep += 1\n"
] |
[
[
"numpy.random.weibull",
"numpy.abs",
"numpy.random.seed",
"numpy.random.choice",
"numpy.asarray",
"scipy.optimize.root_scalar"
]
] |
Tarpelite/nConvSA
|
[
"15f71895cebf4f980e888d7405db0a07d80bcdf9"
] |
[
"examples/run_nConv_adv.py"
] |
[
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa).\"\"\"\n\n\nimport argparse\nimport glob\nimport json\nimport logging\nimport os\nimport random\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm, trange\n\n\n\nfrom transformers import (\n WEIGHTS_NAME,\n AdamW,\n AlbertConfig,\n AlbertForSequenceClassification,\n AlbertTokenizer,\n BertConfig,\n BertForSequenceClassification,\n BertTokenizer,\n DistilBertConfig,\n DistilBertForSequenceClassification,\n DistilBertTokenizer,\n FlaubertConfig,\n FlaubertForSequenceClassification,\n FlaubertTokenizer,\n RobertaConfig,\n RobertaForSequenceClassification,\n RobertaTokenizer,\n XLMConfig,\n XLMForSequenceClassification,\n XLMRobertaConfig,\n XLMRobertaForSequenceClassification,\n XLMRobertaTokenizer,\n XLMTokenizer,\n XLNetConfig,\n XLNetForSequenceClassification,\n XLNetTokenizer,\n get_linear_schedule_with_warmup,\n)\n\nfrom utils_nConv import nConv_convert_examples_to_features as convert_examples_to_features\nfrom utils_nConv import nConvProcessor\nimport csv\n\n\n\ntry:\n from torch.utils.tensorboard import SummaryWriter\nexcept ImportError:\n from tensorboardX import SummaryWriter\n\nfrom sklearn.metrics import f1_score\n\n# from pudb import set_trace\n# set_trace()\n\nlogger = logging.getLogger(__name__)\n\n\n\nALL_MODELS = sum(\n (\n tuple(conf.pretrained_config_archive_map.keys())\n for conf in (\n BertConfig,\n XLNetConfig,\n XLMConfig,\n RobertaConfig,\n DistilBertConfig,\n AlbertConfig,\n XLMRobertaConfig,\n FlaubertConfig,\n )\n ),\n (),\n)\n\nMODEL_CLASSES = {\n \"bert\": (BertConfig, BertForSequenceClassification, BertTokenizer),\n \"xlnet\": (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),\n \"xlm\": (XLMConfig, XLMForSequenceClassification, XLMTokenizer),\n \"roberta\": (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),\n \"distilbert\": (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer),\n \"albert\": (AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer),\n \"xlmroberta\": (XLMRobertaConfig, XLMRobertaForSequenceClassification, XLMRobertaTokenizer),\n \"flaubert\": (FlaubertConfig, FlaubertForSequenceClassification, FlaubertTokenizer),\n}\n\ndef compute_metrics(preds, labels):\n res = {}\n res[\"f1\"] = f1_score(labels, preds, average=\"macro\")\n\n return res\n\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n\ndef train(args, train_dataset, model, tokenizer):\n \"\"\" Train the model \"\"\"\n if args.local_rank in [-1, 0]:\n tb_writer = SummaryWriter()\n\n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)\n\n if args.max_steps > 0:\n t_total = args.max_steps\n args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1\n else:\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], \"weight_decay\": 0.0},\n ]\n\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total\n )\n\n # Check if saved optimizer or scheduler states exist\n if os.path.isfile(os.path.join(args.model_name_or_path, \"optimizer.pt\")) and os.path.isfile(\n os.path.join(args.model_name_or_path, \"scheduler.pt\")\n ):\n # Load in optimizer and scheduler states\n optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"optimizer.pt\")))\n scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"scheduler.pt\")))\n\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n\n # multi-gpu training (should be after apex fp16 initialization)\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Distributed training (should be after apex fp16 initialization)\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True,\n )\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n logger.info(\n \" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n args.train_batch_size\n * args.gradient_accumulation_steps\n * (torch.distributed.get_world_size() if args.local_rank != -1 else 1),\n )\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n global_step = 0\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n # Check if continuing training from a checkpoint\n # if os.path.exists(args.model_name_or_path):\n # # set global_step to gobal_step of last saved checkpoint from model path\n # global_step = int(args.model_name_or_path.split(\"-\")[-1].split(\"/\")[0])\n # epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)\n # steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)\n\n # logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n # logger.info(\" Continuing training from epoch %d\", epochs_trained)\n # logger.info(\" Continuing training from global step %d\", global_step)\n # logger.info(\" Will skip the first %d steps in the first epoch\", steps_trained_in_current_epoch)\n\n tr_loss, logging_loss = 0.0, 0.0\n model.zero_grad()\n train_iterator = trange(\n epochs_trained, int(args.num_train_epochs), desc=\"Epoch\", disable=args.local_rank not in [-1, 0],\n )\n set_seed(args) # Added here for reproductibility\n for _ in train_iterator:\n # epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=args.local_rank not in [-1, 0])\n iter_bar = tqdm(train_dataloader, desc=\"Iter(loss=X.XXX)\")\n for step, batch in enumerate(iter_bar):\n\n # Skip past any already trained steps if resuming training\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n\n model.train()\n batch = tuple(t.to(args.device) for t in batch)\n inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"labels\": batch[3]}\n if args.model_type != \"distilbert\":\n inputs[\"token_type_ids\"] = (\n batch[2] if args.model_type in [\"bert\", \"xlnet\", \"albert\"] else None\n ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids\n \n\n # ======================= Code for adversarial training ========================\n input_ids = inputs[\"input_ids\"]\n input_mask = inputs[\"attention_mask\"]\n if isinstance(model, torch.nn.DataParallel):\n embeds_init = model.module.bert.embeddings.word_embeddings(input_ids)\n else:\n embeds_init = model.bert.embeddings.word_embeddings(input_ids)\n \n if args.adv_init_mag > 0:\n input_mask = input_mask.to(embeds_init)\n input_lengths = torch.sum(input_mask, 1)\n\n if args.norm_type == \"l2\":\n delta = torch.zeros_like(embeds_init).uniform_(-1, 1) * input_mask.unsqueeze(2)\n dims = input_lengths * embeds_init.size(-1)\n mag = args.adv_init_mag / torch.sqrt(dims)\n delta = (delta*mag.view(-1, 1, 1)).detach()\n elif args.norm_type == \"linf\":\n delta = torch.zeros_like(embeds_init).uniform_(-args.adv_init_mag, args.adv_init_mag) * input_mask.unsqueeze(2)\n else:\n delta = torch.zeros_like(embeds_init)\n \n for astep in range(args.adv_steps):\n delta.requires_grad_()\n inputs[\"inputs_embeds\"] = delta + embeds_init\n inputs[\"input_ids\"] = None\n \n outputs = model(**inputs)\n loss = outputs[0]\n\n # (1) backward\n if args.n_gpu > 1:\n loss = loss.mean()\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gredient_accumulation_steps\n \n loss = loss / args.adv_steps\n\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scale_loss:\n scale_loss.backward()\n else:\n loss.backward()\n \n\n # (2) get gradient on delta\n delta_grad = delta.grad.clone().detach()\n\n # (3) update and clip\n if args.norm_type == \"l2\":\n denorm = torch.norm(delta_grad.view(delta_grad.size(0), -1), dim=1).view(-1, 1, 1)\n denorm = torch.clamp(denorm, min=1e-8)\n delta = (delta + args.adv_lr*delta_grad / denorm).detach()\n if args.adv_max_norm > 0:\n delta_norm = torch.norm(delta.view(delta.size(0), -1).float(), p=2, dim=1).detach()\n exceed_mask = (delta_norm > args.adv_max_norm).to(embeds_init)\n reweights = (args.adv_max_norm /delta_norm * exceed_mask \\\n + (1 - exceed_mask)).view(-1, 1 ,1)\n \n delta = (delta * reweights).detach()\n elif args.norm_type == \"linf\":\n denorm = torch.norm(delta_grad.view(delta_grad.size(0), -1), dim=1, p=float(\"inf\")).view(-1, 1, 1)\n denorm = torch.clamp(denorm, min=1e-8)\n delta = (delta + args.adv_lr * delta_grad / denorm).detach()\n if args.adv_max_norm > 0:\n delta = torch.clamp(delta, -args.adv_max_norm, args.adv_max_norm).detach()\n else:\n\n print(\"Norm type {} not specified.\".format(args.norm_type))\n exit()\n \n if isinstance(model, torch.nn.DataParallel):\n embeds_init = model.module.bert.embeddings.word_embeddings(input_ids)\n else:\n embeds_init = model.bert.embeddings.word_embeddings(input_ids)\n \n # ===================== End Adv training ======================\n\n\n tr_loss += loss.item()\n iter_bar.set_description(\"Iter (loss=%5.3f)\" % loss.item())\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:\n logs = {}\n if (\n args.local_rank == -1 and args.evaluate_during_training\n ): # Only evaluate when single GPU otherwise metrics may not average well\n results = evaluate(args, model, tokenizer)\n for key, value in results.items():\n eval_key = \"eval_{}\".format(key)\n logs[eval_key] = value\n\n loss_scalar = (tr_loss - logging_loss) / args.logging_steps\n learning_rate_scalar = scheduler.get_lr()[0]\n logs[\"learning_rate\"] = learning_rate_scalar\n logs[\"loss\"] = loss_scalar\n logging_loss = tr_loss\n\n for key, value in logs.items():\n tb_writer.add_scalar(key, value, global_step)\n print(json.dumps({**logs, **{\"step\": global_step}}))\n\n if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:\n # Save model checkpoint\n output_dir = os.path.join(args.output_dir, \"checkpoint-{}\".format(global_step))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Take care of distributed/parallel training\n model_to_save.save_pretrained(output_dir)\n tokenizer.save_pretrained(output_dir)\n\n torch.save(args, os.path.join(output_dir, \"training_args.bin\"))\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n torch.save(optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n torch.save(scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n logger.info(\"Saving optimizer and scheduler states to %s\", output_dir)\n\n if args.max_steps > 0 and global_step > args.max_steps:\n iter_bar.close()\n break\n if args.max_steps > 0 and global_step > args.max_steps:\n train_iterator.close()\n break\n\n if args.local_rank in [-1, 0]:\n tb_writer.close()\n\n return global_step, tr_loss / global_step\n\n\ndef evaluate(args, model, tokenizer, prefix=\"\"):\n # Loop to handle MNLI double evaluation (matched, mis-matched)\n eval_task_names = (\"mnli\", \"mnli-mm\") if args.task_name == \"mnli\" else (args.task_name,)\n eval_outputs_dirs = (args.output_dir, args.output_dir + \"-MM\") if args.task_name == \"mnli\" else (args.output_dir,)\n\n results = {}\n for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):\n eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)\n\n if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(eval_output_dir)\n\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n # Note that DistributedSampler samples randomly\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n # multi-gpu eval\n if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):\n model = torch.nn.DataParallel(model)\n\n # Eval!\n logger.info(\"***** Running evaluation {} *****\".format(prefix))\n logger.info(\" Num examples = %d\", len(eval_dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n eval_loss = 0.0\n nb_eval_steps = 0\n preds = None\n out_label_ids = None\n \n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n model.eval()\n batch = tuple(t.to(args.device) for t in batch)\n\n with torch.no_grad():\n inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"labels\": batch[3]}\n if args.model_type != \"distilbert\":\n inputs[\"token_type_ids\"] = (\n batch[2] if args.model_type in [\"bert\", \"xlnet\", \"albert\"] else None\n ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids\n outputs = model(**inputs)\n tmp_eval_loss, logits = outputs[:2]\n\n eval_loss += tmp_eval_loss.mean().item()\n nb_eval_steps += 1\n if preds is None:\n preds = logits.detach().cpu().numpy()\n out_label_ids = inputs[\"labels\"].detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, inputs[\"labels\"].detach().cpu().numpy(), axis=0)\n\n eval_loss = eval_loss / nb_eval_steps\n if args.output_mode == \"classification\":\n preds = np.argmax(preds, axis=1)\n elif args.output_mode == \"regression\":\n preds = np.squeeze(preds)\n result = compute_metrics(preds, out_label_ids)\n results.update(result)\n\n output_eval_file = os.path.join(eval_output_dir, prefix, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Eval results {} *****\".format(prefix))\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n\n return results\n\n\ndef test(args, model, tokenizer, prefix=\"\"):\n # Loop to handle MNLI double evaluation (matched, mis-matched)\n eval_task_names = (\"mnli\", \"mnli-mm\") if args.task_name == \"mnli\" else (args.task_name,)\n eval_outputs_dirs = (args.output_dir, args.output_dir + \"-MM\") if args.task_name == \"mnli\" else (args.output_dir,)\n\n results = {}\n for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):\n eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True, test=True)\n\n if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(eval_output_dir)\n\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n # Note that DistributedSampler samples randomly\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n # multi-gpu eval\n if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):\n model = torch.nn.DataParallel(model)\n\n # Eval!\n logger.info(\"***** Running prediction {} *****\".format(prefix))\n logger.info(\" Num examples = %d\", len(eval_dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n eval_loss = 0.0\n nb_eval_steps = 0\n preds = None\n out_label_ids = None\n \n for batch in tqdm(eval_dataloader, desc=\"Prediction\"):\n model.eval()\n batch = tuple(t.to(args.device) for t in batch)\n\n with torch.no_grad():\n inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"labels\": batch[3]}\n if args.model_type != \"distilbert\":\n inputs[\"token_type_ids\"] = (\n batch[2] if args.model_type in [\"bert\", \"xlnet\", \"albert\"] else None\n ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids\n outputs = model(**inputs)\n tmp_eval_loss, logits = outputs[:2]\n\n eval_loss += tmp_eval_loss.mean().item()\n nb_eval_steps += 1\n if preds is None:\n preds = logits.detach().cpu().numpy()\n out_label_ids = inputs[\"labels\"].detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, inputs[\"labels\"].detach().cpu().numpy(), axis=0)\n\n eval_loss = eval_loss / nb_eval_steps\n if args.output_mode == \"classification\":\n preds = np.argmax(preds, axis=1)\n elif args.output_mode == \"regression\":\n preds = np.squeeze(preds)\n result = compute_metrics(preds, out_label_ids)\n results.update(result)\n\n test_file = os.path.join(args.data_dir, \"test.tsv\")\n\n\n if not args.do_save_all:\n\n with open(test_file, \"r\", encoding=\"utf-8\") as f:\n all_test_ids = [x[0] for x in list(csv.reader(f, delimiter=\"\\t\", quotechar=None))]\n \n assert len(all_test_ids) == len(preds)\n\n result_file = os.path.join(args.output_dir, \"result.csv\")\n with open(result_file, \"w+\", encoding=\"utf-8\") as f:\n line = \",\".join(['id', 'y']) + \"\\n\"\n f.write(line)\n for id_, y in zip(all_test_ids, preds):\n y = int(y) - 1\n line=\",\".join([str(id_).strip(), str(y)]) + \"\\n\"\n f.write(line)\n \n else:\n with open(test_file, \"r\", encoding=\"utf-8\") as f:\n all_test_data = list(csv.reader(f, delimiter=\"\\t\", quotechar=None))\n \n result_file = os.path.join(args.output_dir, \"result.csv\")\n logger.info(\"writing predictions to {}\".format(result_file))\n with open(result_file, \"w+\", encoding=\"uft-8\") as f:\n for record, y in zip(all_test_data, preds):\n y = int(y) -1\n record = record + [str(y)]\n line = \"\\t\".join(record) + \"\\n\"\n f.write(line)\n\n return results\n\n\n\n\ndef load_and_cache_examples(args, task, tokenizer, evaluate=False, test=False):\n if args.local_rank not in [-1, 0] and not evaluate:\n torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n\n processor = nConvProcessor()\n output_mode = \"classification\"\n # Load data features from cache or dataset file\n part = \"train\"\n get_func = processor.get_train_examples\n if evaluate:\n part = \"evaluate\"\n get_func=processor.get_dev_examples\n if test:\n part = \"test\"\n get_func = processor.get_test_examples\n cached_features_file = os.path.join(\n args.data_dir,\n \"cached_{}_{}_{}_{}\".format(\n part,\n list(filter(None, args.model_name_or_path.split(\"/\"))).pop(),\n str(args.max_seq_length),\n str(task),\n ),\n )\n if os.path.exists(cached_features_file) and not args.overwrite_cache:\n logger.info(\"Loading features from cached file %s\", cached_features_file)\n features = torch.load(cached_features_file)\n else:\n logger.info(\"Creating features from dataset file at %s\", args.data_dir)\n label_list = processor.get_labels()\n if task in [\"mnli\", \"mnli-mm\"] and args.model_type in [\"roberta\", \"xlmroberta\"]:\n # HACK(label indices are swapped in RoBERTa pretrained model)\n label_list[1], label_list[2] = label_list[2], label_list[1]\n\n examples = get_func(args.data_dir) \n features = convert_examples_to_features(\n examples,\n tokenizer,\n label_list=label_list,\n max_length=args.max_seq_length,\n output_mode=output_mode,\n pad_on_left=bool(args.model_type in [\"xlnet\"]), # pad on the left for xlnet\n pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],\n pad_token_segment_id=4 if args.model_type in [\"xlnet\"] else 0,\n )\n if args.local_rank in [-1, 0]:\n logger.info(\"Saving features into cached file %s\", cached_features_file)\n torch.save(features, cached_features_file)\n\n if args.local_rank == 0 and not evaluate:\n torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n\n # Convert to Tensors and build dataset\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)\n all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)\n if output_mode == \"classification\":\n all_labels = torch.tensor([f.label for f in features], dtype=torch.long)\n elif output_mode == \"regression\":\n all_labels = torch.tensor([f.label for f in features], dtype=torch.float)\n\n dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)\n return dataset\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n # Required parameters\n parser.add_argument(\n \"--data_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The input data dir. Should contain the .tsv files (or other data files) for the task.\",\n )\n parser.add_argument(\n \"--model_type\",\n default=None,\n type=str,\n required=True,\n help=\"Model type selected in the list: \" + \", \".join(MODEL_CLASSES.keys()),\n )\n parser.add_argument(\n \"--model_name_or_path\",\n default=None,\n type=str,\n required=True,\n help=\"Path to pre-trained model or shortcut name selected in the list: \" + \", \".join(ALL_MODELS),\n )\n parser.add_argument(\n \"--task_name\",\n default=\"nConv\",\n type=str\n )\n parser.add_argument(\n \"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\",\n )\n\n # Other parameters\n parser.add_argument(\n \"--config_name\", default=\"\", type=str, help=\"Pretrained config name or path if not the same as model_name\",\n )\n parser.add_argument(\n \"--tokenizer_name\",\n default=\"\",\n type=str,\n help=\"Pretrained tokenizer name or path if not the same as model_name\",\n )\n parser.add_argument(\n \"--cache_dir\",\n default=\"\",\n type=str,\n help=\"Where do you want to store the pre-trained models downloaded from s3\",\n )\n parser.add_argument(\n \"--max_seq_length\",\n default=128,\n type=int,\n help=\"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\",\n )\n parser.add_argument(\"--do_train\", action=\"store_true\", help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\", action=\"store_true\", help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\n \"--evaluate_during_training\", action=\"store_true\", help=\"Run evaluation during training at each logging step.\",\n )\n parser.add_argument(\n \"--do_lower_case\", action=\"store_true\", help=\"Set this flag if you are using an uncased model.\",\n )\n\n parser.add_argument(\n \"--per_gpu_train_batch_size\", default=8, type=int, help=\"Batch size per GPU/CPU for training.\",\n )\n parser.add_argument(\n \"--per_gpu_eval_batch_size\", default=8, type=int, help=\"Batch size per GPU/CPU for evaluation.\",\n )\n parser.add_argument(\n \"--gradient_accumulation_steps\",\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n )\n parser.add_argument(\"--learning_rate\", default=5e-5, type=float, help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--weight_decay\", default=0.0, type=float, help=\"Weight decay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float, help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float, help=\"Max gradient norm.\")\n parser.add_argument(\n \"--num_train_epochs\", default=3.0, type=float, help=\"Total number of training epochs to perform.\",\n )\n parser.add_argument(\n \"--max_steps\",\n default=-1,\n type=int,\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\",\n )\n parser.add_argument(\"--warmup_steps\", default=0, type=int, help=\"Linear warmup over warmup_steps.\")\n\n parser.add_argument(\"--logging_steps\", type=int, default=500, help=\"Log every X updates steps.\")\n parser.add_argument(\"--save_steps\", type=int, default=500, help=\"Save checkpoint every X updates steps.\")\n parser.add_argument(\n \"--eval_all_checkpoints\",\n action=\"store_true\",\n help=\"Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number\",\n )\n parser.add_argument(\"--no_cuda\", action=\"store_true\", help=\"Avoid using CUDA when available\")\n parser.add_argument(\n \"--overwrite_output_dir\", action=\"store_true\", help=\"Overwrite the content of the output directory\",\n )\n parser.add_argument(\n \"--overwrite_cache\", action=\"store_true\", help=\"Overwrite the cached training and evaluation sets\",\n )\n parser.add_argument(\"--seed\", type=int, default=42, help=\"random seed for initialization\")\n\n parser.add_argument(\n \"--fp16\",\n action=\"store_true\",\n help=\"Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit\",\n )\n parser.add_argument(\n \"--fp16_opt_level\",\n type=str,\n default=\"O1\",\n help=\"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\n \"See details at https://nvidia.github.io/apex/amp.html\",\n )\n parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"For distributed training: local_rank\")\n parser.add_argument(\"--server_ip\", type=str, default=\"\", help=\"For distant debugging.\")\n parser.add_argument(\"--server_port\", type=str, default=\"\", help=\"For distant debugging.\")\n parser.add_argument(\"--do_test\", action=\"store_true\")\n parser.add_argument(\"--do_save_all\", action=\"store_true\")\n\n parser.add_argument('--adv_lr', type=float, default=0)\n parser.add_argument('--adv_steps', type=int, default=1, help=\"should be at least 1\")\n parser.add_argument('--adv_init_mag', type=float, default=0)\n parser.add_argument('--norm_type', type=str, default=\"l2\", choices=[\"l2\", \"linf\"])\n parser.add_argument('--adv_max_norm', type=float, default=0, help=\"set to 0 to be unlimited\")\n\n args = parser.parse_args()\n\n if (\n os.path.exists(args.output_dir)\n and os.listdir(args.output_dir)\n and args.do_train\n and not args.overwrite_output_dir\n ):\n raise ValueError(\n \"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.\".format(\n args.output_dir\n )\n )\n\n # Setup distant debugging if needed\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend=\"nccl\")\n args.n_gpu = 1\n args.device = device\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,\n )\n logger.warning(\n \"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n args.local_rank,\n device,\n args.n_gpu,\n bool(args.local_rank != -1),\n args.fp16,\n )\n\n # Set seed\n set_seed(args)\n\n # Prepare GLUE task\n args.task_name = args.task_name.lower()\n \n processor = nConvProcessor()\n args.output_mode = \"classification\"\n label_list = processor.get_labels()\n num_labels = len(label_list)\n\n # Load pretrained model and tokenizer\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n\n args.model_type = args.model_type.lower()\n config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\n config = config_class.from_pretrained(\n args.config_name if args.config_name else args.model_name_or_path,\n num_labels=num_labels,\n finetuning_task=args.task_name,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n tokenizer = tokenizer_class.from_pretrained(\n args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,\n do_lower_case=args.do_lower_case,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n model = model_class.from_pretrained(\n args.model_name_or_path,\n from_tf=bool(\".ckpt\" in args.model_name_or_path),\n config=config,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n\n if args.local_rank == 0:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n\n model.to(args.device)\n\n logger.info(\"Training/evaluation parameters %s\", args)\n\n # Training\n if args.do_train:\n train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)\n global_step, tr_loss = train(args, train_dataset, model, tokenizer)\n logger.info(\" global_step = %s, average loss = %s\", global_step, tr_loss)\n\n # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()\n if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n # Create output directory if needed\n if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(args.output_dir)\n\n logger.info(\"Saving model checkpoint to %s\", args.output_dir)\n # Save a trained model, configuration and tokenizer using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Take care of distributed/parallel training\n model_to_save.save_pretrained(args.output_dir)\n tokenizer.save_pretrained(args.output_dir)\n\n # Good practice: save your training arguments together with the trained model\n torch.save(args, os.path.join(args.output_dir, \"training_args.bin\"))\n\n # Load a trained model and vocabulary that you have fine-tuned\n model = model_class.from_pretrained(args.output_dir)\n tokenizer = tokenizer_class.from_pretrained(args.output_dir)\n model.to(args.device)\n\n # Evaluation\n results = {}\n if args.do_eval and args.local_rank in [-1, 0]:\n tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)\n checkpoints = [args.output_dir]\n if args.eval_all_checkpoints:\n checkpoints = list(\n os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + \"/**/\" + WEIGHTS_NAME, recursive=True))\n )\n logging.getLogger(\"transformers.modeling_utils\").setLevel(logging.WARN) # Reduce logging\n logger.info(\"Evaluate the following checkpoints: %s\", checkpoints)\n for checkpoint in checkpoints:\n global_step = checkpoint.split(\"-\")[-1] if len(checkpoints) > 1 else \"\"\n prefix = checkpoint.split(\"/\")[-1] if checkpoint.find(\"checkpoint\") != -1 else \"\"\n\n model = model_class.from_pretrained(checkpoint)\n model.to(args.device)\n result = evaluate(args, model, tokenizer, prefix=prefix)\n result = dict((k + \"_{}\".format(global_step), v) for k, v in result.items())\n results.update(result)\n\n if args.do_test and args.local_rank in [-1, 0]:\n\n tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)\n checkpoints = [args.output_dir]\n if args.eval_all_checkpoints:\n checkpoints = list(\n os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + \"/**/\" + WEIGHTS_NAME, recursive=True))\n )\n logging.getLogger(\"transformers.modeling_utils\").setLevel(logging.WARN) # Reduce logging\n logger.info(\"Evaluate the following checkpoints: %s\", checkpoints)\n for checkpoint in checkpoints:\n global_step = checkpoint.split(\"-\")[-1] if len(checkpoints) > 1 else \"\"\n prefix = checkpoint.split(\"/\")[-1] if checkpoint.find(\"checkpoint\") != -1 else \"\"\n\n model = model_class.from_pretrained(checkpoint)\n model.to(args.device)\n result = test(args, model, tokenizer, prefix=prefix)\n\n return results\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"torch.load",
"numpy.squeeze",
"torch.utils.data.DataLoader",
"torch.sum",
"torch.no_grad",
"torch.cuda.manual_seed_all",
"torch.cuda.is_available",
"torch.device",
"torch.distributed.get_rank",
"sklearn.metrics.f1_score",
"torch.save",
"torch.clamp",
"torch.distributed.init_process_group",
"torch.utils.data.distributed.DistributedSampler",
"torch.sqrt",
"torch.utils.data.TensorDataset",
"torch.distributed.barrier",
"torch.tensor",
"numpy.argmax",
"torch.zeros_like",
"torch.cuda.device_count",
"torch.distributed.get_world_size",
"torch.nn.parallel.DistributedDataParallel",
"numpy.random.seed",
"torch.cuda.set_device",
"torch.manual_seed",
"torch.utils.data.SequentialSampler",
"torch.utils.data.RandomSampler",
"torch.nn.DataParallel"
]
] |
530824679/2D_ObjectDetect
|
[
"cdae94bfbcb3d6ef9123e05dca3bf4f4b8501d62"
] |
[
"evaluate.py"
] |
[
"import numpy as np\nimport tensorflow as tf\nfrom collections import Counter\nfrom utils.process_utils import calculate_iou, non_maximum_suppression\n\ndef evaluate(y_pred, y_true, num_classes, score_thresh=0.5, iou_thresh=0.5):\n\n num_images = y_true[0].shape[0]\n true_labels_dict = {i:0 for i in range(num_classes)} # {class: count}\n pred_labels_dict = {i:0 for i in range(num_classes)}\n true_positive_dict = {i:0 for i in range(num_classes)}\n\n for i in range(num_images):\n true_labels_list, true_boxes_list = [], []\n for j in range(3): # three feature maps\n true_probs_temp = y_true[j][i][...,5: ]\n true_boxes_temp = y_true[j][i][...,0:4]\n\n object_mask = true_probs_temp.sum(axis=-1) > 0\n\n true_probs_temp = true_probs_temp[object_mask]\n true_boxes_temp = true_boxes_temp[object_mask]\n\n true_labels_list += np.argmax(true_probs_temp, axis=-1).tolist()\n true_boxes_list += true_boxes_temp.tolist()\n\n if len(true_labels_list) != 0:\n for cls, count in Counter(true_labels_list).items(): true_labels_dict[cls] += count\n\n pred_boxes = y_pred[0][i:i+1]\n pred_confs = y_pred[1][i:i+1]\n pred_probs = y_pred[2][i:i+1]\n\n pred_boxes, pred_confs, pred_labels = non_maximum_suppression(pred_boxes, pred_confs, pred_probs)\n\n true_boxes = np.array(true_boxes_list)\n box_centers, box_sizes = true_boxes[:,0:2], true_boxes[:,2:4]\n\n true_boxes[:,0:2] = box_centers - box_sizes / 2.\n true_boxes[:,2:4] = true_boxes[:,0:2] + box_sizes\n\n pred_labels_list = [] if pred_labels is None else pred_labels.tolist()\n if pred_labels_list == []: continue\n\n detected = []\n for k in range(len(true_labels_list)):\n # compute iou between predicted box and ground_truth boxes\n iou = calculate_iou(true_boxes[k:k+1], pred_boxes)\n m = np.argmax(iou) # Extract index of largest overlap\n if iou[m] >= iou_thresh and true_labels_list[k] == pred_labels_list[m] and m not in detected:\n pred_labels_dict[true_labels_list[k]] += 1\n detected.append(m)\n pred_labels_list = [pred_labels_list[m] for m in detected]\n\n for c in range(num_classes):\n t = true_labels_list.count(c)\n p = pred_labels_list.count(c)\n true_positive_dict[c] += p if t >= p else t\n\n recall = sum(true_positive_dict.values()) / (sum(true_labels_dict.values()) + 1e-6)\n precision = sum(true_positive_dict.values()) / (sum(pred_labels_dict.values()) + 1e-6)\n avg_prec = [true_positive_dict[i] / (true_labels_dict[i] + 1e-6) for i in range(num_classes)]\n mAP = sum(avg_prec) / (sum([avg_prec[i] != 0 for i in range(num_classes)]) + 1e-6)\n\n return recall, precision, mAP\n\n"
] |
[
[
"numpy.array",
"numpy.argmax"
]
] |
zeffon/logparser
|
[
"4ef7bf0c63652a44dc6dd583b1e15526d5036b32"
] |
[
"logparser/Drain/Drain.py"
] |
[
"\"\"\"\r\nDescription : This file implements the Drain algorithm for log parsing\r\nAuthor : LogPAI team\r\nLicense : MIT\r\n\"\"\"\r\n\r\nimport re\r\nimport os\r\nfrom sys import version_info\r\nimport numpy as np\r\nimport pandas as pd\r\nimport hashlib\r\nfrom datetime import datetime\r\n\r\n\r\nclass Logcluster:\r\n def __init__(self, logTemplate='', logIDL=None):\r\n self.logTemplate = logTemplate\r\n if logIDL is None:\r\n logIDL = []\r\n self.logIDL = logIDL\r\n\r\n\r\nclass Node:\r\n def __init__(self, childD=None, depth=0, digitOrtoken=None):\r\n if childD is None:\r\n childD = dict()\r\n self.childD = childD\r\n self.depth = depth\r\n self.digitOrtoken = digitOrtoken\r\n\r\n\r\nclass LogParser:\r\n def __init__(self, log_format, indir='./', outdir='./result/', depth=4, st=0.4, \r\n maxChild=100, rex=[], keep_para=True):\r\n \"\"\"\r\n Attributes\r\n ----------\r\n rex : regular expressions used in preprocessing (step1)\r\n path : the input path stores the input log file name\r\n depth : depth of all leaf nodes\r\n st : similarity threshold\r\n maxChild : max number of children of an internal node\r\n logName : the name of the input file containing raw log messages\r\n savePath : the output path stores the file containing structured logs\r\n \"\"\"\r\n self.path = indir\r\n self.depth = depth - 2\r\n self.st = st\r\n self.maxChild = maxChild\r\n self.logName = None\r\n self.savePath = outdir\r\n self.df_log = None\r\n self.log_format = log_format\r\n self.rex = rex\r\n self.keep_para = keep_para\r\n\r\n def hasNumbers(self, s):\r\n return any(char.isdigit() for char in s)\r\n\r\n def treeSearch(self, rn, seq):\r\n retLogClust = None\r\n\r\n seqLen = len(seq)\r\n if seqLen not in rn.childD:\r\n return retLogClust\r\n\r\n parentn = rn.childD[seqLen]\r\n\r\n currentDepth = 1\r\n for token in seq:\r\n if currentDepth >= self.depth or currentDepth > seqLen:\r\n break\r\n\r\n if token in parentn.childD:\r\n parentn = parentn.childD[token]\r\n elif '<*>' in parentn.childD:\r\n parentn = parentn.childD['<*>']\r\n else:\r\n return retLogClust\r\n currentDepth += 1\r\n\r\n logClustL = parentn.childD\r\n\r\n retLogClust = self.fastMatch(logClustL, seq)\r\n\r\n return retLogClust\r\n\r\n def addSeqToPrefixTree(self, rn, logClust):\r\n seqLen = len(logClust.logTemplate)\r\n if seqLen not in rn.childD:\r\n firtLayerNode = Node(depth=1, digitOrtoken=seqLen)\r\n rn.childD[seqLen] = firtLayerNode\r\n else:\r\n firtLayerNode = rn.childD[seqLen]\r\n\r\n parentn = firtLayerNode\r\n\r\n currentDepth = 1\r\n for token in logClust.logTemplate:\r\n\r\n #Add current log cluster to the leaf node\r\n if currentDepth >= self.depth or currentDepth > seqLen:\r\n if len(parentn.childD) == 0:\r\n parentn.childD = [logClust]\r\n else:\r\n parentn.childD.append(logClust)\r\n break\r\n\r\n #If token not matched in this layer of existing tree. \r\n if token not in parentn.childD:\r\n if not self.hasNumbers(token):\r\n if '<*>' in parentn.childD:\r\n if len(parentn.childD) < self.maxChild:\r\n newNode = Node(depth=currentDepth + 1, digitOrtoken=token)\r\n parentn.childD[token] = newNode\r\n parentn = newNode\r\n else:\r\n parentn = parentn.childD['<*>']\r\n else:\r\n if len(parentn.childD)+1 < self.maxChild:\r\n newNode = Node(depth=currentDepth+1, digitOrtoken=token)\r\n parentn.childD[token] = newNode\r\n parentn = newNode\r\n elif len(parentn.childD)+1 == self.maxChild:\r\n newNode = Node(depth=currentDepth+1, digitOrtoken='<*>')\r\n parentn.childD['<*>'] = newNode\r\n parentn = newNode\r\n else:\r\n parentn = parentn.childD['<*>']\r\n \r\n else:\r\n if '<*>' not in parentn.childD:\r\n newNode = Node(depth=currentDepth+1, digitOrtoken='<*>')\r\n parentn.childD['<*>'] = newNode\r\n parentn = newNode\r\n else:\r\n parentn = parentn.childD['<*>']\r\n\r\n #If the token is matched\r\n else:\r\n parentn = parentn.childD[token]\r\n\r\n currentDepth += 1\r\n\r\n #seq1 is template\r\n def seqDist(self, seq1, seq2):\r\n assert len(seq1) == len(seq2)\r\n simTokens = 0\r\n numOfPar = 0\r\n\r\n for token1, token2 in zip(seq1, seq2):\r\n if token1 == '<*>':\r\n numOfPar += 1\r\n continue\r\n if token1 == token2:\r\n simTokens += 1 \r\n\r\n retVal = float(simTokens) / len(seq1)\r\n\r\n return retVal, numOfPar\r\n\r\n\r\n def fastMatch(self, logClustL, seq):\r\n retLogClust = None\r\n\r\n maxSim = -1\r\n maxNumOfPara = -1\r\n maxClust = None\r\n\r\n for logClust in logClustL:\r\n curSim, curNumOfPara = self.seqDist(logClust.logTemplate, seq)\r\n if curSim>maxSim or (curSim==maxSim and curNumOfPara>maxNumOfPara):\r\n maxSim = curSim\r\n maxNumOfPara = curNumOfPara\r\n maxClust = logClust\r\n\r\n if maxSim >= self.st:\r\n retLogClust = maxClust \r\n\r\n return retLogClust\r\n\r\n def getTemplate(self, seq1, seq2):\r\n assert len(seq1) == len(seq2)\r\n retVal = []\r\n\r\n i = 0\r\n for word in seq1:\r\n if word == seq2[i]:\r\n retVal.append(word)\r\n else:\r\n retVal.append('<*>')\r\n\r\n i += 1\r\n\r\n return retVal\r\n\r\n def outputResult(self, logClustL):\r\n log_templates = [0] * self.df_log.shape[0]\r\n log_templateids = [0] * self.df_log.shape[0]\r\n df_events = []\r\n for logClust in logClustL:\r\n template_str = ' '.join(logClust.logTemplate)\r\n occurrence = len(logClust.logIDL)\r\n template_id = hashlib.md5(template_str.encode('utf-8')).hexdigest()[0:8]\r\n for logID in logClust.logIDL:\r\n logID -= 1\r\n log_templates[logID] = template_str\r\n log_templateids[logID] = template_id\r\n df_events.append([template_id, template_str, occurrence])\r\n\r\n df_event = pd.DataFrame(df_events, columns=['EventId', 'EventTemplate', 'Occurrences'])\r\n self.df_log['EventId'] = log_templateids\r\n self.df_log['EventTemplate'] = log_templates\r\n\r\n if self.keep_para:\r\n self.df_log[\"ParameterList\"] = self.df_log.apply(self.get_parameter_list, axis=1) \r\n self.df_log.to_csv(os.path.join(self.savePath, self.logName + '_structured.csv'), index=False)\r\n\r\n\r\n occ_dict = dict(self.df_log['EventTemplate'].value_counts())\r\n df_event = pd.DataFrame()\r\n df_event['EventTemplate'] = self.df_log['EventTemplate'].unique()\r\n df_event['EventId'] = df_event['EventTemplate'].map(lambda x: hashlib.md5(x.encode('utf-8')).hexdigest()[0:8])\r\n df_event['Occurrences'] = df_event['EventTemplate'].map(occ_dict)\r\n df_event.to_csv(os.path.join(self.savePath, self.logName + '_templates.csv'), index=False, columns=[\"EventId\", \"EventTemplate\", \"Occurrences\"])\r\n\r\n\r\n def printTree(self, node, dep):\r\n pStr = '' \r\n for i in range(dep):\r\n pStr += '\\t'\r\n\r\n if node.depth == 0:\r\n pStr += 'Root'\r\n elif node.depth == 1:\r\n pStr += '<' + str(node.digitOrtoken) + '>'\r\n else:\r\n pStr += node.digitOrtoken\r\n\r\n print(pStr)\r\n\r\n if node.depth == self.depth:\r\n return 1\r\n for child in node.childD:\r\n self.printTree(node.childD[child], dep+1)\r\n\r\n\r\n def parse(self, logName):\r\n print('Parsing file: ' + os.path.join(self.path, logName))\r\n start_time = datetime.now()\r\n self.logName = logName\r\n rootNode = Node()\r\n logCluL = []\r\n\r\n self.load_data()\r\n\r\n count = 0\r\n for idx, line in self.df_log.iterrows():\r\n logID = line['LineId']\r\n logmessageL = self.preprocess(line['Content']).strip().split()\r\n # logmessageL = filter(lambda x: x != '', re.split('[\\s=:,]', self.preprocess(line['Content'])))\r\n matchCluster = self.treeSearch(rootNode, logmessageL)\r\n\r\n #Match no existing log cluster\r\n if matchCluster is None:\r\n newCluster = Logcluster(logTemplate=logmessageL, logIDL=[logID])\r\n logCluL.append(newCluster)\r\n self.addSeqToPrefixTree(rootNode, newCluster)\r\n\r\n #Add the new log message to the existing cluster\r\n else:\r\n newTemplate = self.getTemplate(logmessageL, matchCluster.logTemplate)\r\n matchCluster.logIDL.append(logID)\r\n if ' '.join(newTemplate) != ' '.join(matchCluster.logTemplate): \r\n matchCluster.logTemplate = newTemplate\r\n\r\n count += 1\r\n if count % 1000 == 0 or count == len(self.df_log):\r\n print('Processed {0:.1f}% of log lines.'.format(count * 100.0 / len(self.df_log)))\r\n\r\n\r\n if not os.path.exists(self.savePath):\r\n os.makedirs(self.savePath)\r\n\r\n self.outputResult(logCluL)\r\n\r\n print('Parsing done. [Time taken: {!s}]'.format(datetime.now() - start_time))\r\n\r\n def load_data(self):\r\n headers, regex = self.generate_logformat_regex(self.log_format)\r\n self.df_log = self.log_to_dataframe(os.path.join(self.path, self.logName), regex, headers, self.log_format)\r\n\r\n def preprocess(self, line):\r\n for currentRex in self.rex:\r\n line = re.sub(currentRex, '<*>', line)\r\n return line\r\n\r\n def log_to_dataframe(self, log_file, regex, headers, logformat):\r\n \"\"\" Function to transform log file to dataframe \r\n \"\"\"\r\n log_messages = []\r\n linecount = 0\r\n with open(log_file, 'r') as fin:\r\n for line in fin.readlines():\r\n try:\r\n match = regex.search(line.strip())\r\n message = [match.group(header) for header in headers]\r\n log_messages.append(message)\r\n linecount += 1\r\n except Exception as e:\r\n pass\r\n logdf = pd.DataFrame(log_messages, columns=headers)\r\n logdf.insert(0, 'LineId', None)\r\n logdf['LineId'] = [i + 1 for i in range(linecount)]\r\n return logdf\r\n\r\n\r\n def generate_logformat_regex(self, logformat):\r\n \"\"\" Function to generate regular expression to split log messages\r\n \"\"\"\r\n headers = []\r\n splitters = re.split(r'(<[^<>]+>)', logformat)\r\n regex = ''\r\n for k in range(len(splitters)):\r\n if k % 2 == 0:\r\n splitter = re.sub(' +', '\\\\\\s+', splitters[k])\r\n regex += splitter\r\n else:\r\n header = splitters[k].strip('<').strip('>')\r\n regex += '(?P<%s>.*?)' % header\r\n headers.append(header)\r\n regex = re.compile('^' + regex + '$')\r\n return headers, regex\r\n\r\n def get_parameter_list(self, row):\r\n template_regex = re.sub(r\"<.{1,5}>\", \"<*>\", row[\"EventTemplate\"])\r\n if \"<*>\" not in template_regex: return []\r\n template_regex = re.sub(r'([^A-Za-z0-9])', r'\\\\\\1', template_regex)\r\n if version_info.major == 2: # 判断python主版本\r\n template_regex = re.sub(r'\\\\ +', r'\\s+', template_regex)\r\n else:\r\n template_regex = re.sub(r'\\\\ ', r' ', template_regex)\r\n template_regex = \"^\" + template_regex.replace(\"\\<\\*\\>\", \"(.*?)\") + \"$\"\r\n parameter_list = re.findall(template_regex, row[\"Content\"])\r\n parameter_list = parameter_list[0] if parameter_list else ()\r\n parameter_list = list(parameter_list) if isinstance(parameter_list, tuple) else [parameter_list]\r\n return parameter_list"
] |
[
[
"pandas.DataFrame"
]
] |
rloganiv/meercat-aux
|
[
"4d9006095e9fb91034f8dae0baaa81a1567f6606"
] |
[
"meercat/cluster_w_grinch.py"
] |
[
"\"\"\"\nCluster embeddings using GRINCH.\n\"\"\"\nimport argparse\nimport csv\nimport logging\n\nimport numpy as np\n\nfrom meercat.grinch_alg import Grinch\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef find_threshold(grinch, target, max_iters=100):\n logger.info(f'Finding threshold. Target # of clusts: {target}.')\n bounds = [0.0, 1.0]\n n_clusters = -1\n epsilon = grinch.points.shape[0] / 1000.0\n logger.info(f'Epsilon: {epsilon}')\n i = 0\n while abs(n_clusters - target) > epsilon and i < max_iters:\n i += 1\n threshold = (bounds[0] + bounds[1]) / 2\n clusters = grinch.flat_clustering(threshold)\n n_clusters = len(np.unique(clusters))\n logger.info(f'Threshold: {threshold}, # of clusts: {n_clusters}')\n if n_clusters < target:\n bounds[0] = threshold\n else:\n bounds[1] = threshold\n return clusters\n\n\ndef main(args):\n # Load embeddings\n logger.info('Loading embeddings')\n entity_vocab = {}\n entity_ids = []\n embeddings = []\n with open(args.input, 'r') as f:\n reader = csv.reader(f, delimiter='\\t')\n for line in reader:\n uid, entity, *embedding = line\n embedding = [float(x) for x in embedding]\n embeddings.append(embedding)\n if entity not in entity_vocab:\n entity_vocab[entity] = len(entity_vocab)\n entity_id = entity_vocab[entity]\n entity_ids.append(entity_id)\n embeddings = np.array(embeddings, dtype=np.float32)\n\n grinch = Grinch(points=embeddings, active_leaf_limit=args.limit, pruning_strategy=args.strategy)\n grinch.build_dendrogram()\n\n if args.threshold is not None:\n clusters = grinch.flat_clustering(args.threshold)\n else:\n target = len(entity_vocab)\n clusters = find_threshold(grinch, target)\n\n with open(args.output, 'w') as g:\n for t, p in zip(entity_ids, clusters):\n g.write('%i, %i\\n' % (t, p))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--input', type=str, required=True)\n parser.add_argument('--output', type=str, required=True)\n parser.add_argument('--threshold', type=float, default=None)\n parser.add_argument('--limit', type=int, default=None)\n parser.add_argument('--strategy', type=str, default='similarity')\n args = parser.parse_args()\n \n logging.basicConfig(level=logging.INFO)\n\n main(args)\n"
] |
[
[
"numpy.array",
"numpy.unique"
]
] |
georgeRenard/CrackingTheCodingInterview
|
[
"ba9866e8e7a8c9942464d76b13af08ea6b15f3f9"
] |
[
"linked-lists/removeDups.py"
] |
[
"import sys\nimport numpy\n\n\ndef problem():\n \"\"\"\n Remove Dups! Write code to remove duplicates from an unsorted linked list.\n FOLLOW UP\n How would you solve this problem if a temporary buffer is not allowed? \n \"\"\"\n pass\n\n\nclass Node():\n \n\n # If it is so important to remove all the duplicates we can do some tricky case optimizations\n # We can chain specifically duplicate nodes together and keep all the distinct items in a map\n # This way we can reduce the overall time to O(d) where d is the number of duplicates\n def __init__(self, item):\n self.item = item \n self.next = None\n self.next_alike = None\n self.prev = None\n self.distinct_map = {item: self}\n self.count = 0\n \n \n def add(self, item):\n \n if item == None:\n raise Exception(\"You cannot add a 'None' value\")\n\n current = self \n\n while current.next is not None:\n current = current.next\n\n current.next = Node(item)\n self.count += 1\n \n\n def __add(self, node):\n \n current = self\n while current.next is not None:\n current = current.next\n \n current.next = node\n node.prev = current\n self.count += 1\n\n\n def add_memory_heavy(self, item):\n \n if item == None:\n raise Exception(\"You cannot add a 'None' value\")\n \n node = Node(item) \n \n if item in self.distinct_map:\n n = self.distinct_map[item]\n while n.next_alike is not None:\n n = n.next_alike \n n.next_alike = node\n else:\n self.distinct_map[item] = node\n\n self.__add(node)\n\n \n # In my opinion, the best conceivable runtime is O(n), but in reality you could do some optimizations by adding more\n # pointers and even using more memory to do even better\n def remove_duplicates(self):\n self.__remove_duplicates_buffer_free()\n\n\n def __remove_duplicates_fast_memory_heavy(self):\n \"\"\"\n This function will require a pointer backwards and it will be able to perform in O(d * duplicate_count) runtime where d is the \n number of distinct elements in the map and in O(2n) additional memory\n We can speed up things by introducing a little bit more caching\n \"\"\"\n for (item, start_node) in self.distinct_map.items():\n current = start_node.next_alike\n while current is not None:\n self.__remove_node(current) \n current = current.next_alike \n \n\n def __remove_duplicates_buffer_free(self):\n \n bit_vector = 0 \n\n current = self\n \n nodes_to_remove = []\n\n while current is not None:\n \n mask = 1 << current.item\n if mask & bit_vector == 0:\n bit_vector = bit_vector | mask\n else:\n nodes_to_remove.append(current)\n\n current = current.next\n \n for node in nodes_to_remove:\n self.__remove_node(node)\n \n \n \n def __remove_duplicates_memory_heavy(self):\n \"\"\"\n This function removes all duplicates from the singly-linked list in O(n) time and O(n) memory\n \"\"\"\n m = {self.item: 1} \n current = self.next\n prev = self\n nodes_to_remove = []\n\n while current is not None:\n\n if current.item not in m:\n m[current.item] = 1\n else:\n nodes_to_remove.append((prev, current))\n\n prev = current \n current = current.next\n \n for i in range(len(nodes_to_remove) - 1):\n this = nodes_to_remove[i]\n nx = nodes_to_remove[i + 1] \n if this[1] == nx[0]:\n nodes_to_remove[i + 1] = (this[0], nx[1]) \n continue\n \n self.__remove(this) \n\n if len(nodes_to_remove) != 0:\n self.__remove(nodes_to_remove[-1])\n \n\n\n def remove(self, item):\n\n parent = self.find_parent_node(item)\n\n if parent is None:\n raise Exception(\"Item {0} is not present in the list\".format(item))\n\n\n def __remove(self, parent_child_pair):\n\n parent = parent_child_pair[0]\n child = parent_child_pair[1]\n\n parent.next = child.next\n self.count -= 1 \n\n def __remove_node(self, node):\n \n parent = node.prev\n nx = node.next\n \n parent.next = nx\n if nx is not None:\n nx.prev = parent\n\n\n def find_parent_node(self, item):\n pass\n\n \n def print(self):\n\n current = self\n items = []\n\n while current is not None:\n items.append(current.item)\n current = current.next\n \n return items\n\n \n\nif __name__ == '__main__':\n \n args = sys.argv[1:]\n linked_list = Node(1)\n integers = [numpy.random.randint(50) for i in range(int(args[0]))]\n [linked_list.add_memory_heavy(item) for item in integers]\n integers.append(1)\n linked_list.remove_duplicates()\n\n s1 = set(integers)\n s2 = linked_list.print()\n\n s1 = sorted(s1)\n s2 = sorted(s2)\n \n print(s1)\n print(s2)\n for (l, r) in zip(s1, s2):\n if l != r:\n print(\"Sets are not equal\")\n exit() \n\n print(\"Congratulations... Sets are equal\") \n \n\n"
] |
[
[
"numpy.random.randint"
]
] |
AlgTUDelft/WCSAC
|
[
"6536929bb3a167090752305373bf16e726994305"
] |
[
"wc_sac/sac/saclag.py"
] |
[
"#Portions of the code are adapted from Safety Starter Agents and Spinning Up, released by OpenAI under the MIT license.\n#!/usr/bin/env python\n\nfrom functools import partial\nimport numpy as np\nimport tensorflow as tf\nimport gym\nimport time\nfrom wc_sac.utils.logx import EpochLogger\nfrom wc_sac.utils.mpi_tf import sync_all_params, MpiAdamOptimizer\nfrom wc_sac.utils.mpi_tools import mpi_fork, mpi_sum, proc_id, mpi_statistics_scalar, num_procs\nfrom safety_gym.envs.engine import Engine\nfrom gym.envs.registration import register\n\nEPS = 1e-8\n\n'''\nconfig1 = {\n 'placements_extents': [-1.5, -1.5, 1.5, 1.5],\n 'robot_base': 'xmls/point.xml',\n 'task': 'goal',\n 'goal_size': 0.3,\n 'goal_keepout': 0.305,\n 'goal_locations': [(1.1, 1.1)],\n 'observe_goal_lidar': True,\n 'observe_hazards': True,\n 'constrain_hazards': True,\n 'lidar_max_dist': 3,\n 'lidar_num_bins': 16,\n 'hazards_num': 1,\n 'hazards_size': 0.7,\n 'hazards_keepout': 0.705,\n 'hazards_locations': [(0, 0)]\n }\n\n\nregister(id='StaticEnv-v0',\n entry_point='safety_gym.envs.mujoco:Engine',\n kwargs={'config': config1})\n\nconfig2 = {\n 'placements_extents': [-1.5, -1.5, 1.5, 1.5],\n 'robot_base': 'xmls/point.xml',\n 'task': 'goal',\n 'goal_size': 0.3,\n 'goal_keepout': 0.305,\n 'observe_goal_lidar': True,\n 'observe_hazards': True,\n 'constrain_hazards': True,\n 'lidar_max_dist': 3,\n 'lidar_num_bins': 16,\n 'hazards_num': 3,\n 'hazards_size': 0.3,\n 'hazards_keepout': 0.305\n }\n\nregister(id='DynamicEnv-v0',\n entry_point='safety_gym.envs.mujoco:Engine',\n kwargs={'config': config2})\n'''\n\n\ndef placeholder(dim=None):\n return tf.placeholder(dtype=tf.float32, shape=(None,dim) if dim else (None,))\n\ndef placeholders(*args):\n return [placeholder(dim) for dim in args]\n\ndef mlp(x, hidden_sizes=(64,), activation=tf.tanh, output_activation=None):\n for h in hidden_sizes[:-1]:\n x = tf.layers.dense(x, units=h, activation=activation)\n return tf.layers.dense(x, units=hidden_sizes[-1], activation=output_activation)\n\ndef get_vars(scope):\n return [x for x in tf.global_variables() if scope in x.name]\n\ndef count_vars(scope):\n v = get_vars(scope)\n return sum([np.prod(var.shape.as_list()) for var in v])\n\ndef gaussian_likelihood(x, mu, log_std):\n pre_sum = -0.5 * (((x-mu)/(tf.exp(log_std)+EPS))**2 + 2*log_std + np.log(2*np.pi))\n return tf.reduce_sum(pre_sum, axis=1)\n\ndef get_target_update(main_name, target_name, polyak):\n ''' Get a tensorflow op to update target variables based on main variables '''\n main_vars = {x.name: x for x in get_vars(main_name)}\n targ_vars = {x.name: x for x in get_vars(target_name)}\n assign_ops = []\n for v_targ in targ_vars:\n assert v_targ.startswith(target_name), f'bad var name {v_targ} for {target_name}'\n v_main = v_targ.replace(target_name, main_name, 1)\n assert v_main in main_vars, f'missing var name {v_main}'\n assign_op = tf.assign(targ_vars[v_targ], polyak*targ_vars[v_targ] + (1-polyak)*main_vars[v_main])\n assign_ops.append(assign_op)\n return tf.group(assign_ops)\n\n\n\"\"\"\nPolicies\n\"\"\"\n\nLOG_STD_MAX = 2\nLOG_STD_MIN = -20\n\ndef mlp_gaussian_policy(x, a, hidden_sizes, activation, output_activation):\n act_dim = a.shape.as_list()[-1]\n net = mlp(x, list(hidden_sizes), activation, activation)\n mu = tf.layers.dense(net, act_dim, activation=output_activation)\n log_std = tf.layers.dense(net, act_dim, activation=None)\n log_std = tf.clip_by_value(log_std, LOG_STD_MIN, LOG_STD_MAX)\n\n std = tf.exp(log_std)\n pi = mu + tf.random_normal(tf.shape(mu)) * std\n logp_pi = gaussian_likelihood(pi, mu, log_std)\n return mu, pi, logp_pi\n\ndef apply_squashing_func(mu, pi, logp_pi):\n # Adjustment to log prob\n '''\n '''\n logp_pi -= tf.reduce_sum(2*(np.log(2) - pi - tf.nn.softplus(-2*pi)), axis=1)\n\n # Squash those unbounded actions!\n mu = tf.tanh(mu)\n pi = tf.tanh(pi)\n return mu, pi, logp_pi\n\n\n\"\"\"\nActors and Critics\n\"\"\"\ndef mlp_actor(x, a, name='pi', hidden_sizes=(64,64), activation=tf.nn.relu,\n output_activation=None, policy=mlp_gaussian_policy, action_space=None):\n # policy\n with tf.variable_scope(name):\n mu, pi, logp_pi = policy(x, a, hidden_sizes, activation, output_activation)\n mu, pi, logp_pi = apply_squashing_func(mu, pi, logp_pi)\n\n # make sure actions are in correct range\n action_scale = action_space.high[0]\n mu *= action_scale\n pi *= action_scale\n\n return mu, pi, logp_pi\n\n\ndef mlp_critic(x, a, pi, name, hidden_sizes=(64,64), activation=tf.nn.relu,\n output_activation=None, policy=mlp_gaussian_policy, action_space=None):\n\n fn_mlp = lambda x : tf.squeeze(mlp(x=x,\n hidden_sizes=list(hidden_sizes)+[1],\n activation=activation,\n output_activation=None),\n axis=1)\n with tf.variable_scope(name):\n critic = fn_mlp(tf.concat([x,a], axis=-1))\n\n with tf.variable_scope(name, reuse=True):\n critic_pi = fn_mlp(tf.concat([x,pi], axis=-1))\n\n return critic, critic_pi\n\n\nclass ReplayBuffer:\n \"\"\"\n A simple FIFO experience replay buffer for SAC agents.\n \"\"\"\n\n def __init__(self, obs_dim, act_dim, size):\n self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32)\n self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32)\n self.acts_buf = np.zeros([size, act_dim], dtype=np.float32)\n self.rews_buf = np.zeros(size, dtype=np.float32)\n self.costs_buf = np.zeros(size, dtype=np.float32)\n self.done_buf = np.zeros(size, dtype=np.float32)\n self.ptr, self.size, self.max_size = 0, 0, size\n\n def store(self, obs, act, rew, next_obs, done, cost):\n self.obs1_buf[self.ptr] = obs\n self.obs2_buf[self.ptr] = next_obs\n self.acts_buf[self.ptr] = act\n self.rews_buf[self.ptr] = rew\n self.costs_buf[self.ptr] = cost\n self.done_buf[self.ptr] = done\n self.ptr = (self.ptr+1) % self.max_size\n self.size = min(self.size+1, self.max_size)\n\n def sample_batch(self, batch_size=32):\n idxs = np.random.randint(0, self.size, size=batch_size)\n return dict(obs1=self.obs1_buf[idxs],\n obs2=self.obs2_buf[idxs],\n acts=self.acts_buf[idxs],\n rews=self.rews_buf[idxs],\n costs=self.costs_buf[idxs],\n done=self.done_buf[idxs])\n\n\n\"\"\"\nSoft Actor-Critic\n\"\"\"\ndef sac(env_fn, actor_fn=mlp_actor, critic_fn=mlp_critic, ac_kwargs=dict(), seed=0,\n steps_per_epoch=1000, epochs=100, replay_size=int(1e6), gamma=0.99,\n polyak=0.995, lr=1e-4, batch_size=1024, local_start_steps=int(1e3),\n max_ep_len=1000, logger_kwargs=dict(), save_freq=10, local_update_after=int(1e3),\n update_freq=1, render=False, \n fixed_entropy_bonus=None, entropy_constraint=-1.0,\n fixed_cost_penalty=None, cost_constraint=None, cost_lim=None,\n reward_scale=1, lr_scale = 1, damp_scale = 0,\n ):\n \"\"\"\n\n Args:\n env_fn : A function which creates a copy of the environment.\n The environment must satisfy the OpenAI Gym API.\n\n actor_fn: A function which takes in placeholder symbols\n for state, ``x_ph``, and action, ``a_ph``, and returns the actor\n outputs from the agent's Tensorflow computation graph:\n\n =========== ================ ======================================\n Symbol Shape Description\n =========== ================ ======================================\n ``mu`` (batch, act_dim) | Computes mean actions from policy\n | given states.\n ``pi`` (batch, act_dim) | Samples actions from policy given\n | states.\n ``logp_pi`` (batch,) | Gives log probability, according to\n | the policy, of the action sampled by\n | ``pi``. Critical: must be differentiable\n | with respect to policy parameters all\n | the way through action sampling.\n =========== ================ ======================================\n\n critic_fn: A function which takes in placeholder symbols\n for state, ``x_ph``, action, ``a_ph``, and policy ``pi``,\n and returns the critic outputs from the agent's Tensorflow computation graph:\n\n =========== ================ ======================================\n Symbol Shape Description\n =========== ================ ======================================\n ``critic`` (batch,) | Gives one estimate of Q* for\n | states in ``x_ph`` and actions in\n | ``a_ph``.\n ``critic_pi`` (batch,) | Gives another estimate of Q* for\n | states in ``x_ph`` and actions in\n | ``a_ph``.\n =========== ================ ======================================\n\n ac_kwargs (dict): Any kwargs appropriate for the actor_fn / critic_fn\n function you provided to SAC.\n\n seed (int): Seed for random number generators.\n\n steps_per_epoch (int): Number of steps of interaction (state-action pairs)\n for the agent and the environment in each epoch.\n\n epochs (int): Number of epochs to run and train agent.\n\n replay_size (int): Maximum length of replay buffer.\n\n gamma (float): Discount factor. (Always between 0 and 1.)\n\n polyak (float): Interpolation factor in polyak averaging for target\n networks. Target networks are updated towards main networks\n according to:\n\n .. math:: \\\\theta_{\\\\text{targ}} \\\\leftarrow\n \\\\rho \\\\theta_{\\\\text{targ}} + (1-\\\\rho) \\\\theta\n\n where :math:`\\\\rho` is polyak. (Always between 0 and 1, usually\n close to 1.)\n\n lr (float): Learning rate (used for both policy and value learning).\n\n batch_size (int): Minibatch size for SGD.\n\n local_start_steps (int): Number of steps for uniform-random action selection,\n before running real policy. Helps exploration.\n\n max_ep_len (int): Maximum length of trajectory / episode / rollout.\n\n logger_kwargs (dict): Keyword args for EpochLogger.\n\n save_freq (int): How often (in terms of gap between epochs) to save\n the current policy and value function.\n\n fixed_entropy_bonus (float or None): Fixed bonus to reward for entropy.\n Units are (points of discounted sum of future reward) / (nats of policy entropy).\n If None, use ``entropy_constraint`` to set bonus value instead.\n\n entropy_constraint (float): If ``fixed_entropy_bonus`` is None,\n Adjust entropy bonus to maintain at least this much entropy.\n Actual constraint value is multiplied by the dimensions of the action space.\n Units are (nats of policy entropy) / (action dimenson).\n\n fixed_cost_penalty (float or None): Fixed penalty to reward for cost.\n Units are (points of discounted sum of future reward) / (points of discounted sum of future costs).\n If None, use ``cost_constraint`` to set penalty value instead.\n\n cost_constraint (float or None): If ``fixed_cost_penalty`` is None,\n Adjust cost penalty to maintain at most this much cost.\n Units are (points of discounted sum of future costs).\n Note: to get an approximate cost_constraint from a cost_lim (undiscounted sum of costs),\n multiply cost_lim by (1 - gamma ** episode_len) / (1 - gamma).\n If None, use cost_lim to calculate constraint.\n\n cost_lim (float or None): If ``cost_constraint`` is None,\n calculate an approximate constraint cost from this cost limit.\n Units are (expectation of undiscounted sum of costs in a single episode).\n If None, cost_lim is not used, and if no cost constraints are used, do naive optimization.\n \"\"\"\n use_costs = fixed_cost_penalty or cost_constraint or cost_lim\n\n logger = EpochLogger(**logger_kwargs)\n logger.save_config(locals())\n\n # Env instantiation\n env, test_env = env_fn(), env_fn()\n \n obs_dim = env.observation_space.shape[0]\n act_dim = env.action_space.shape[0]\n\n # Setting seeds\n seed += 10000 * proc_id()\n tf.set_random_seed(seed)\n np.random.seed(seed)\n env.seed(seed)\n test_env.seed(seed)\n\n # Action limit for clamping: critically, assumes all dimensions share the same bound!\n act_limit = env.action_space.high[0]\n\n # Share information about action space with policy architecture\n ac_kwargs['action_space'] = env.action_space\n\n # Inputs to computation graph\n x_ph, a_ph, x2_ph, r_ph, d_ph, c_ph = placeholders(obs_dim, act_dim, obs_dim, None, None, None)\n\n # Main outputs from computation graph\n with tf.variable_scope('main'):\n mu, pi, logp_pi = actor_fn(x_ph, a_ph, **ac_kwargs)\n qr1, qr1_pi = critic_fn(x_ph, a_ph, pi, name='qr1', **ac_kwargs)\n qr2, qr2_pi = critic_fn(x_ph, a_ph, pi, name='qr2', **ac_kwargs)\n qc, qc_pi = critic_fn(x_ph, a_ph, pi, name='qc', **ac_kwargs)\n \n\n with tf.variable_scope('main', reuse=True):\n # Additional policy output from a different observation placeholder\n # This lets us do separate optimization updates (actor, critics, etc)\n # in a single tensorflow op.\n _, pi2, logp_pi2 = actor_fn(x2_ph, a_ph, **ac_kwargs)\n\n # Target value network\n with tf.variable_scope('target'):\n _, qr1_pi_targ = critic_fn(x2_ph, a_ph, pi2, name='qr1', **ac_kwargs)\n _, qr2_pi_targ = critic_fn(x2_ph, a_ph, pi2, name='qr2', **ac_kwargs)\n _, qc_pi_targ = critic_fn(x2_ph, a_ph, pi2, name='qc', **ac_kwargs)\n\n # Entropy bonus\n if fixed_entropy_bonus is None:\n with tf.variable_scope('entreg'):\n soft_alpha = tf.get_variable('soft_alpha',\n initializer=0.0,\n trainable=True,\n dtype=tf.float32)\n alpha = tf.nn.softplus(soft_alpha)\n else:\n alpha = tf.constant(fixed_entropy_bonus)\n log_alpha = tf.log(alpha)\n\n # Cost penalty\n if use_costs:\n if fixed_cost_penalty is None:\n with tf.variable_scope('costpen'):\n soft_beta = tf.get_variable('soft_beta',\n initializer=0.0,\n trainable=True,\n dtype=tf.float32)\n beta = tf.nn.softplus(soft_beta)\n log_beta = tf.log(beta)\n else:\n beta = tf.constant(fixed_cost_penalty)\n log_beta = tf.log(beta)\n else:\n beta = 0.0 # costs do not contribute to policy optimization\n print('Not using costs')\n\n # Experience buffer\n replay_buffer = ReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=replay_size)\n\n # Count variables\n if proc_id()==0:\n var_counts = tuple(count_vars(scope) for scope in \n ['main/pi', 'main/qr1', 'main/qr2', 'main/qc', 'main'])\n print(('\\nNumber of parameters: \\t pi: %d, \\t qr1: %d, \\t qr2: %d, \\t qc: %d, \\t total: %d\\n')%var_counts)\n\n # Min Double-Q:\n min_q_pi = tf.minimum(qr1_pi, qr2_pi)\n min_q_pi_targ = tf.minimum(qr1_pi_targ, qr2_pi_targ)\n\n # Targets for Q and V regression\n q_backup = tf.stop_gradient(r_ph + gamma*(1-d_ph)*(min_q_pi_targ - alpha * logp_pi2))\n qc_backup = tf.stop_gradient(c_ph + gamma*(1-d_ph)*qc_pi_targ)\n \n cost_constraint = cost_lim * (1 - gamma ** max_ep_len) / (1 - gamma) / max_ep_len\n damp = damp_scale * tf.reduce_mean(cost_constraint - qc)\n \n # Soft actor-critic losses\n pi_loss = tf.reduce_mean(alpha * logp_pi - min_q_pi + (beta - damp) * qc_pi)\n qr1_loss = 0.5 * tf.reduce_mean((q_backup - qr1)**2)\n qr2_loss = 0.5 * tf.reduce_mean((q_backup - qr2)**2)\n qc_loss = 0.5 * tf.reduce_mean((qc_backup - qc)**2)\n q_loss = qr1_loss + qr2_loss + qc_loss\n\n # Loss for alpha\n entropy_constraint *= act_dim\n pi_entropy = -tf.reduce_mean(logp_pi)\n # alpha_loss = - soft_alpha * (entropy_constraint - pi_entropy)\n alpha_loss = - alpha * (entropy_constraint - pi_entropy)\n print('using entropy constraint', entropy_constraint)\n\n # Loss for beta\n if use_costs:\n if cost_constraint is None:\n # Convert assuming equal cost accumulated each step\n # Note this isn't the case, since the early in episode doesn't usually have cost,\n # but since our algorithm optimizes the discounted infinite horizon from each entry\n # in the replay buffer, we should be approximately correct here.\n # It's worth checking empirical total undiscounted costs to see if they match.\n cost_constraint = cost_lim * (1 - gamma ** max_ep_len) / (1 - gamma) / max_ep_len\n print('using cost constraint', cost_constraint)\n beta_loss = beta * (cost_constraint - qc)\n\n # Policy train op\n # (has to be separate from value train op, because qr1_pi appears in pi_loss)\n train_pi_op = MpiAdamOptimizer(learning_rate=lr).minimize(pi_loss, var_list=get_vars('main/pi'), name='train_pi')\n\n # Value train op\n with tf.control_dependencies([train_pi_op]):\n train_q_op = MpiAdamOptimizer(learning_rate=lr).minimize(q_loss, var_list=get_vars('main/q'), name='train_q')\n\n if fixed_entropy_bonus is None:\n entreg_optimizer = MpiAdamOptimizer(learning_rate=lr)\n with tf.control_dependencies([train_q_op]):\n train_entreg_op = entreg_optimizer.minimize(alpha_loss, var_list=get_vars('entreg'))\n\n if use_costs and fixed_cost_penalty is None:\n costpen_optimizer = MpiAdamOptimizer(learning_rate=lr*lr_scale)\n if fixed_entropy_bonus is None:\n with tf.control_dependencies([train_entreg_op]):\n train_costpen_op = costpen_optimizer.minimize(beta_loss, var_list=get_vars('costpen'))\n else:\n with tf.control_dependencies([train_q_op]):\n train_costpen_op = costpen_optimizer.minimize(beta_loss, var_list=get_vars('costpen'))\n \n\n # Polyak averaging for target variables\n target_update = get_target_update('main', 'target', polyak)\n\n # Single monolithic update with explicit control dependencies\n with tf.control_dependencies([train_pi_op]):\n with tf.control_dependencies([train_q_op]):\n grouped_update = tf.group([target_update])\n\n if fixed_entropy_bonus is None:\n grouped_update = tf.group([grouped_update, train_entreg_op])\n if use_costs and fixed_cost_penalty is None:\n grouped_update = tf.group([grouped_update, train_costpen_op])\n\n # Initializing targets to match main variables\n # As a shortcut, use our exponential moving average update w/ coefficient zero\n target_init = get_target_update('main', 'target', 0.0)\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n sess.run(target_init)\n\n # Sync params across processes\n sess.run(sync_all_params())\n\n # Setup model saving\n logger.setup_tf_saver(sess, inputs={'x': x_ph, 'a': a_ph},\n outputs={'mu': mu, 'pi': pi, 'qr1': qr1, 'qr2': qr2, 'qc': qc})\n\n def get_action(o, deterministic=False):\n act_op = mu if deterministic else pi\n return sess.run(act_op, feed_dict={x_ph: o.reshape(1,-1)})[0]\n \n def test_agent(n=10):\n for j in range(n):\n o, r, d, ep_ret, ep_cost, ep_len, ep_goals, = test_env.reset(), 0, False, 0, 0, 0, 0\n while not(d or (ep_len == max_ep_len)):\n # Take deterministic actions at test time\n o, r, d, info = test_env.step(get_action(o, True))\n if render and proc_id() == 0 and j == 0:\n test_env.render()\n ep_ret += r\n ep_cost += info.get('cost', 0)\n ep_len += 1\n ep_goals += 1 if info.get('goal_met', False) else 0\n logger.store(TestEpRet=ep_ret, TestEpCost=ep_cost, TestEpLen=ep_len, TestEpGoals=ep_goals)\n\n start_time = time.time()\n o, r, d, ep_ret, ep_cost, ep_len, ep_goals = env.reset(), 0, False, 0, 0, 0, 0\n total_steps = steps_per_epoch * epochs\n\n # variables to measure in an update\n vars_to_get = dict(LossPi=pi_loss, LossQR1=qr1_loss, LossQR2=qr2_loss, LossQC=qc_loss,\n QR1Vals=qr1, QR2Vals=qr2, QCVals = qc, LogPi=logp_pi, PiEntropy=pi_entropy,\n Alpha=alpha, LogAlpha=log_alpha, LossAlpha=alpha_loss)\n if use_costs:\n vars_to_get.update(dict(Beta=beta, LogBeta=log_beta, LossBeta=beta_loss))\n\n print('starting training', proc_id())\n\n # Main loop: collect experience in env and update/log each epoch\n number_model = 0\n cum_cost = 0\n local_steps = 0\n local_steps_per_epoch = steps_per_epoch // num_procs()\n local_batch_size = batch_size // num_procs()\n epoch_start_time = time.time()\n for t in range(total_steps // num_procs()):\n \"\"\"\n Until local_start_steps have elapsed, randomly sample actions\n from a uniform distribution for better exploration. Afterwards,\n use the learned policy.\n \"\"\"\n if t > local_start_steps:\n a = get_action(o)\n else:\n a = env.action_space.sample()\n\n # Step the env\n o2, r, d, info = env.step(a)\n r *= reward_scale # yee-haw\n c = info.get('cost', 0)\n ep_ret += r\n ep_cost += c\n ep_len += 1\n ep_goals += 1 if info.get('goal_met', False) else 0\n local_steps += 1\n \n # Track cumulative cost over training\n cum_cost += c\n\n # Ignore the \"done\" signal if it comes from hitting the time\n # horizon (that is, when it's an artificial terminal signal\n # that isn't based on the agent's state)\n d = False if ep_len==max_ep_len else d\n\n # Store experience to replay buffer\n replay_buffer.store(o, a, r, o2, d, c)\n\n # Super critical, easy to overlook step: make sure to update\n # most recent observation!\n o = o2\n\n if d or (ep_len == max_ep_len):\n logger.store(EpRet=ep_ret, EpCost=ep_cost, EpLen=ep_len, EpGoals=ep_goals)\n o, r, d, ep_ret, ep_cost, ep_len, ep_goals = env.reset(), 0, False, 0, 0, 0, 0\n\n if t > 0 and t % update_freq == 0:\n #if index_risk < 0:\n # index_risk = 0\n \n for j in range(update_freq):\n batch = replay_buffer.sample_batch(local_batch_size)\n feed_dict = {x_ph: batch['obs1'],\n x2_ph: batch['obs2'],\n a_ph: batch['acts'],\n r_ph: batch['rews'],\n c_ph: batch['costs'],\n d_ph: batch['done'],\n }\n if t < local_update_after:\n logger.store(**sess.run(vars_to_get, feed_dict))\n else:\n values, _ = sess.run([vars_to_get, grouped_update], feed_dict)\n logger.store(**values)\n\n # End of epoch wrap-up\n if t > 0 and t % local_steps_per_epoch == 0:\n epoch = t // local_steps_per_epoch\n \n #=====================================================================#\n # Cumulative cost calculations #\n #=====================================================================#\n cumulative_cost = mpi_sum(cum_cost)\n cost_rate = cumulative_cost / ((epoch+1)*steps_per_epoch)\n \n #if index_risk > 0:\n # index_risk = index_risk - 1/300\n\n # Save model\n if (epoch % save_freq == 0) or (epoch == epochs-1):\n logger.save_state({'env': env}, number_model)\n number_model += 1\n\n # Test the performance of the deterministic version of the agent.\n test_start_time = time.time()\n test_agent()\n logger.store(TestTime=time.time() - test_start_time)\n\n logger.store(EpochTime=time.time() - epoch_start_time)\n epoch_start_time = time.time()\n\n # Log info about epoch\n logger.log_tabular('Epoch', epoch)\n logger.log_tabular('EpRet', with_min_and_max=True)\n logger.log_tabular('TestEpRet', with_min_and_max=True)\n logger.log_tabular('EpCost', with_min_and_max=True)\n logger.log_tabular('TestEpCost', with_min_and_max=True)\n logger.log_tabular('EpLen', average_only=True)\n logger.log_tabular('TestEpLen', average_only=True)\n logger.log_tabular('EpGoals', average_only=True)\n logger.log_tabular('TestEpGoals', average_only=True)\n logger.log_tabular('CumulativeCost', cumulative_cost)\n logger.log_tabular('CostRate', cost_rate)\n logger.log_tabular('TotalEnvInteracts', mpi_sum(local_steps))\n logger.log_tabular('QR1Vals', with_min_and_max=True)\n logger.log_tabular('QR2Vals', with_min_and_max=True)\n logger.log_tabular('QCVals', with_min_and_max=True)\n logger.log_tabular('LogPi', with_min_and_max=True)\n logger.log_tabular('LossPi', average_only=True)\n logger.log_tabular('LossQR1', average_only=True)\n logger.log_tabular('LossQR2', average_only=True)\n logger.log_tabular('LossQC', average_only=True)\n logger.log_tabular('LossAlpha', average_only=True)\n logger.log_tabular('LogAlpha', average_only=True)\n logger.log_tabular('Alpha', average_only=True)\n if use_costs:\n logger.log_tabular('LossBeta', average_only=True)\n logger.log_tabular('LogBeta', average_only=True)\n logger.log_tabular('Beta', average_only=True)\n logger.log_tabular('PiEntropy', average_only=True)\n logger.log_tabular('TestTime', average_only=True)\n logger.log_tabular('EpochTime', average_only=True)\n logger.log_tabular('TotalTime', time.time()-start_time)\n logger.dump_tabular()\n\nif __name__ == '__main__':\n import json\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--env', type=str, default='Safexp-PointGoal1-v0')\n parser.add_argument('--hid', type=int, default=256)\n parser.add_argument('--l', type=int, default=2)\n parser.add_argument('--gamma', type=float, default=0.99)\n parser.add_argument('--lr', type=float, default=1e-3)\n parser.add_argument('--seed', '-s', type=int, default=0)\n parser.add_argument('--epochs', type=int, default=100)\n parser.add_argument('--exp_name', type=str, default='sac')\n parser.add_argument('--steps_per_epoch', type=int, default=30000)\n parser.add_argument('--update_freq', type=int, default=100)\n parser.add_argument('--cpu', type=int, default=4)\n parser.add_argument('--render', default=False, action='store_true')\n parser.add_argument('--local_start_steps', default=500, type=int)\n parser.add_argument('--local_update_after', default=500, type=int)\n parser.add_argument('--batch_size', default=256, type=int)\n parser.add_argument('--fixed_entropy_bonus', default=None, type=float)\n parser.add_argument('--entropy_constraint', type=float, default= -1)\n parser.add_argument('--fixed_cost_penalty', default=None, type=float)\n parser.add_argument('--cost_constraint', type=float, default=None)\n parser.add_argument('--cost_lim', type=float, default=None)\n parser.add_argument('--lr_s', type=int, default=50)\n parser.add_argument('--damp_s', type=int, default=10)\n parser.add_argument('--logger_kwargs_str', type=json.loads, default='{\"output_dir\": \"./data\"}')\n args = parser.parse_args()\n\n try:\n import safety_gym\n except:\n print('Make sure to install Safety Gym to use constrained RL environments.')\n\n mpi_fork(args.cpu)\n\n from wc_sac.utils.run_utils import setup_logger_kwargs\n \n logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)\n logger_kwargs= args.logger_kwargs_str\n\n sac(lambda : gym.make(args.env), actor_fn=mlp_actor, critic_fn=mlp_critic,\n ac_kwargs=dict(hidden_sizes=[args.hid]*args.l),\n gamma=args.gamma, seed=args.seed, epochs=args.epochs, batch_size=args.batch_size,\n logger_kwargs=logger_kwargs, steps_per_epoch=args.steps_per_epoch,\n update_freq=args.update_freq, lr=args.lr, render=args.render,\n local_start_steps=args.local_start_steps, local_update_after=args.local_update_after,\n fixed_entropy_bonus=args.fixed_entropy_bonus, entropy_constraint=args.entropy_constraint,\n fixed_cost_penalty=args.fixed_cost_penalty, cost_constraint=args.cost_constraint, cost_lim = args.cost_lim, lr_scale = args.lr_s, damp_scale = args.damp_s,\n )\n"
] |
[
[
"tensorflow.get_variable",
"tensorflow.concat",
"tensorflow.control_dependencies",
"tensorflow.reduce_sum",
"tensorflow.minimum",
"tensorflow.global_variables",
"tensorflow.tanh",
"tensorflow.group",
"numpy.random.randint",
"tensorflow.layers.dense",
"tensorflow.stop_gradient",
"tensorflow.Session",
"numpy.zeros",
"numpy.log",
"tensorflow.shape",
"tensorflow.placeholder",
"tensorflow.exp",
"tensorflow.global_variables_initializer",
"tensorflow.set_random_seed",
"tensorflow.clip_by_value",
"tensorflow.constant",
"numpy.random.seed",
"tensorflow.reduce_mean",
"tensorflow.assign",
"tensorflow.log",
"tensorflow.variable_scope",
"tensorflow.nn.softplus"
]
] |
yamathcy/motif
|
[
"3f43568e59f0879fbab5ef278e9e687b7cac3dd6"
] |
[
"tests/feature_extractors/test_cesium.py"
] |
[
"\"\"\"Test motif.features.cesium\n\"\"\"\nimport unittest\nimport numpy as np\n\nfrom motif.feature_extractors import cesium\n\n\ndef array_equal(array1, array2):\n return np.all(np.isclose(array1, array2, atol=1e-7))\n\n\nclass TestCesiumFeatures(unittest.TestCase):\n\n def setUp(self):\n self.ftr = cesium.CesiumFeatures()\n\n def test_get_feature_vector(self):\n times = np.linspace(0, 1, 2000)\n freqs_hz = 440.0 * np.ones((2000, ))\n salience = 0.5 * np.ones((2000, ))\n sample_rate = 2000\n with self.assertRaises(NotImplementedError):\n self.ftr.get_feature_vector(\n times, freqs_hz, salience, sample_rate\n )\n\n def test_feature_names(self):\n expected = range(80)\n actual = self.ftr.feature_names\n self.assertEqual(expected, actual)\n\n def test_get_id(self):\n expected = 'cesium'\n actual = self.ftr.get_id()\n self.assertEqual(expected, actual)\n"
] |
[
[
"numpy.ones",
"numpy.linspace",
"numpy.isclose"
]
] |
mittagessen/cutout
|
[
"c5bb68b4d71fe3625c89c6e005dad333143d052d"
] |
[
"cutout/model.py"
] |
[
"import torch\n\nfrom torchvision import models\nimport torch.nn.functional as F\nfrom torch import autograd\n\nfrom torch import nn\n\n\ndef _wi(m):\n if isinstance(m, torch.nn.Linear):\n torch.nn.init.xavier_uniform_(m.weight.data)\n torch.nn.init.constant_(m.bias.data, 0)\n elif isinstance(m, torch.nn.LSTM):\n for p in m.parameters():\n # weights\n if p.data.dim() == 2:\n torch.nn.init.orthogonal_(p.data)\n # initialize biases to 1 (jozefowicz 2015)\n else:\n torch.nn.init.constant_(p.data[len(p)//4:len(p)//2], 1.0)\n elif isinstance(m, torch.nn.GRU):\n for p in m.parameters():\n torch.nn.init.orthogonal_(p.data)\n elif isinstance(m, torch.nn.Conv2d):\n torch.nn.init.kaiming_uniform_(m.weight, nonlinearity='relu')\n torch.nn.init.constant_(m.bias, 0)\n\nclass ClassificationNet(nn.Module):\n \"\"\"\n ResNet-152\n \"\"\"\n def __init__(self, refine_features=False):\n super(ClassificationNet, self).__init__()\n self.resnet = models.resnet152(pretrained=True)\n self.refine_features(refine_features)\n self.fc = nn.Linear(self.resnet.fc.in_features, 1)\n self.adamaxpool = nn.AdaptiveMaxPool2d(1)\n\n def refine_features(self, refine_features):\n if not refine_features:\n for param in self.resnet.parameters():\n param.requires_grad = False\n\n def forward(self, x):\n x = self.resnet.conv1(x)\n x = self.resnet.bn1(x)\n x = self.resnet.relu(x)\n x = self.resnet.maxpool(x)\n\n x = self.resnet.layer1(x)\n x = self.resnet.layer2(x)\n x = self.resnet.layer3(x)\n x = self.resnet.layer4(x)\n\n x = self.resnet.avgpool(x)\n x = x.permute(0, 2, 3, 1)\n x = self.fc(x).permute(0, 3, 1, 2)\n x = self.adamaxpool(x)\n return x.squeeze().unsqueeze(0)\n\n def init_weights(self):\n self.fc.apply(_wi)\n"
] |
[
[
"torch.nn.AdaptiveMaxPool2d",
"torch.nn.init.constant_",
"torch.nn.init.kaiming_uniform_",
"torch.nn.Linear",
"torch.nn.init.orthogonal_",
"torch.nn.init.xavier_uniform_"
]
] |
siddharthjoshi01/Banknote-Authentication-Project
|
[
"842974648742318c3d0831020ce30efc23d54015"
] |
[
"scripts/proj1_B.py"
] |
[
"############################################################################################\n# The following program deals with the problem 2 of project 1. The program implements the #\n# machine learning algorithms: perceptron, logistic regression, support vector machines, #\n# decision trees, random forests and k-nearest neighbor. #\n# With the model trained on subsequent algorithms, the accuracy of each was compared. #\n############################################################################################\n\nimport numpy as np \nimport pandas as pd \nfrom sklearn.metrics import accuracy_score\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\n\nfrom sklearn.model_selection import train_test_split\n\n\n# function to determine the data metrics to be displayed\ndef analysis(y_test, y_pred, y_combined, y_combined_pred):\n \n print('Incorrectly classified training samples: %d' % (y_test != y_pred).sum())\n print('Accuracy: %.4f' % accuracy_score(y_test, y_pred))\n print('Combined (train & test) incorrectly classified samples: %d' % (y_combined != y_combined_pred).sum())\n print('Combined (train & test) Accuracy: %.4f' % accuracy_score(y_combined, y_combined_pred))\n\n\n# function to standardize testing and training data\ndef standardize(xtrain, xtest):\n \n scalar = StandardScaler() # defining a standard scalar\n scalar.fit(xtrain)\n std_xtrain = scalar.transform(xtrain) # standardizing training data\n std_xtest = scalar.transform(xtest) # standardizing testing data\n std_xComb = np.vstack((std_xtrain, std_xtest))\n xComb = np.vstack((xtrain, xtest))\n\n return std_xtrain, std_xtest, std_xComb, xComb\n\n\n# function to implement perceptron algorithm\ndef perceptron(xtrain, xtest, ytrain, ytest):\n \n std_xtrain, std_xtest, std_xComb, xComb = standardize(xtrain, xtest)\n perceptron = Perceptron(max_iter=10, tol=1e-2, eta0=1e-3, fit_intercept=True, random_state=0, verbose=False)\n perceptron.fit(std_xtrain, ytrain.values.ravel()) # training step\n ypredict = perceptron.predict(std_xtest) # prediction step\n predict_yComb = perceptron.predict(std_xComb)\n\n return ypredict, predict_yComb\n\n\n# function to implement logistic regression algorithm\ndef logistic_regression(xtrain,xtest,ytrain,ytest):\n\n std_xtrain, std_xtest, std_xComb, xComb = standardize(xtrain, xtest)\n loreg = LogisticRegression(C=10.0, solver='lbfgs', multi_class='ovr', random_state=1)\n loreg.fit(std_xtrain, ytrain) # training step\n ypredict = loreg.predict(std_xtest) # prediction step\n predict_yComb = loreg.predict(std_xComb)\n \n return ypredict, predict_yComb\n\n\n# function to implement support vector machines algorithm\ndef supportvectormachine(xtrain,xtest,ytrain,ytest):\n \n std_xtrain, std_xtest, std_xComb, xComb = standardize(xtrain, xtest)\n svm = SVC(kernel='linear', C=3.0, random_state=0)\n svm.fit(std_xtrain, ytrain) # training step\n ypredict = svm.predict(std_xtest) # prediction step\n predict_yComb = svm.predict(std_xComb)\n\n return ypredict, predict_yComb\n\n\n# function to implement decision tree algorithm\ndef decisiontree(xtrain,xtest,ytrain,ytest):\n \n std_xtrain, std_xtest, std_xComb, xComb = standardize(xtrain, xtest)\n tree = DecisionTreeClassifier(criterion='entropy', max_depth=5, random_state=0)\n tree.fit(xtrain, ytrain) # training step\n ypredict = tree.predict(xtest) # prediction step\n predict_yComb = tree.predict(xComb)\n \n return ypredict, predict_yComb\n\n\n# function to implement random forests algorithm\ndef randomforest(xtrain, xtest, ytrain, ytest):\n \n std_xtrain, std_xtest, std_xComb, xComb = standardize(xtrain, xtest)\n frst = RandomForestClassifier(criterion='entropy', n_estimators=10,random_state=1,min_samples_split = 5, n_jobs=2)\n frst.fit(xtrain, ytrain) # training step\n ypredict = frst.predict(xtest) # prediction step\n predict_yComb = frst.predict(xComb)\n \n return ypredict, predict_yComb\n\n\n# function to implement k nearest neighbor algorithm\ndef knearestneighbor(xtrain, xtest, ytrain, ytest):\n\n std_xtrain, std_xtest, std_xComb, xComb = standardize(xtrain, xtest)\n knn = KNeighborsClassifier(n_neighbors=20,algorithm='auto', p=2, metric='minkowski')\n knn.fit(std_xtrain, ytrain) # training step\n ypredict = knn.predict(std_xtest) # prediction step\n predict_yComb = knn.predict(std_xComb)\n \n return ypredict, predict_yComb\n\n\ndef algorithms(algo, xtrain, xtest, ytrain, ytest):\n\n if algo == 'perceptron':\n print('PERCEPTRON')\n ypredict, predict_yComb = perceptron(xtrain, xtest, ytrain, ytest)\n\n elif algo == 'logisticregression':\n print('LOGISTIC REGRESSION')\n ypredict, predict_yComb = logistic_regression(xtrain, xtest, ytrain, ytest)\n\n elif algo == 'supportvectormachine':\n print('SUPPORT VECTOR MACHINES')\n ypredict, predict_yComb = supportvectormachine(xtrain, xtest, ytrain, ytest)\n\n elif algo == 'decisiontrees':\n print('DECISION TREES')\n ypredict, predict_yComb = decisiontree(xtrain, xtest, ytrain, ytest)\n\n elif algo == 'randomforests':\n print('RANDOM FORESTS')\n ypredict, predict_yComb = randomforest(xtrain, xtest, ytrain, ytest)\n\n elif algo == 'knearestneighbor':\n print('K NEAREST NEIGHBORS')\n ypredict, predict_yComb = knearestneighbor(xtrain, xtest, ytrain, ytest)\n\n else:\n print('Out of Scope')\n \n return ypredict, predict_yComb\n\n\nfile = '..\\\\resources\\\\data_banknote_authentication.txt'\ndat = pd.read_csv(file)\ndf = pd.DataFrame(dat)\nX = df.iloc[:, 0:3] # considering all features\nY = df.iloc[:, 4]\n\n\nxtrain, xtest, ytrain, ytest = train_test_split(X, Y, test_size=0.3, random_state=0) #splitting the data into train and test\nmethods = ['perceptron','logisticregression','supportvectormachine','decisiontrees','randomforests','knearestneighbor']\n\nfor method in methods:\n ypredict, predict_yComb = algorithms(method,xtrain, xtest, ytrain, ytest)\n yComb = np.hstack((ytrain, ytest))\n analysis(ytest, ypredict, yComb, predict_yComb)\n print()\n"
] |
[
[
"numpy.hstack",
"pandas.read_csv",
"sklearn.linear_model.LogisticRegression",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.metrics.accuracy_score",
"sklearn.model_selection.train_test_split",
"pandas.DataFrame",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.svm.SVC",
"sklearn.preprocessing.StandardScaler",
"numpy.vstack",
"sklearn.linear_model.Perceptron"
]
] |
yangyuethz/pyEPR
|
[
"39d914587938cd7aa7fc546444c5bd4572a922dc"
] |
[
"pyEPR/project_info.py"
] |
[
"\"\"\"\nMain interface module to use pyEPR.\n\nContains code to conenct to Ansys and to analyze HFSS files using the EPR method.\n\nThis module handles the micowave part of the analysis and conenction to\n\nFurther contains code to be able to do autogenerated reports,\n\nCopyright Zlatko Minev, Zaki Leghtas, and the pyEPR team\n2015, 2016, 2017, 2018, 2019, 2020\n\"\"\"\n\nfrom __future__ import print_function # Python 2.7 and 3 compatibility\n\nimport sys\nfrom pathlib import Path\n\nimport pandas as pd\n\nfrom . import Dict, ansys, config, logger\nfrom .toolbox.pythonic import get_instance_vars\n\n\ndiss_opt = ['dielectrics_bulk', 'dielectric_surfaces', 'resistive_surfaces', 'seams']\n\nclass ProjectInfo(object):\n \"\"\"\n Primary class to store interface information between ``pyEPR`` and ``Ansys``.\n\n * **Ansys:** stores and provides easy access to the ansys interface classes :py:class:`pyEPR.ansys.HfssApp`,\n :py:class:`pyEPR.ansys.HfssDesktop`, :py:class:`pyEPR.ansys.HfssProject`, :py:class:`pyEPR.ansys.HfssDesign`,\n :py:class:`pyEPR.ansys.HfssSetup` (which, if present could nbe a subclass, such as a driven modal setup\n :py:class:`pyEPR.ansys.HfssDMSetup`, eigenmode :py:class:`pyEPR.ansys.HfssEMSetup`, or Q3D :py:class:`pyEPR.ansys.AnsysQ3DSetup`),\n the 3D modeler to design geometry :py:class:`pyEPR.ansys.HfssModeler`.\n * **Junctions:** The class stores params about the design that the user puts will use, such as the names and\n properties of the junctions, such as whihc rectangle and line is associated with which junction.\n\n\n Note:\n\n **Junction parameters.**\n The junction parameters are stored in the ``self.junctions`` ordered dictionary\n\n A Josephson tunnel junction has to have its parameters specified here for the analysis.\n Each junction is given a name and is specified by a dictionary.\n It has the following properties:\n\n * ``Lj_variable`` (str):\n Name of HFSS variable that specifies junction inductance Lj defined\n on the boundary condition in HFSS.\n WARNING: DO NOT USE Global names that start with $.\n * ``rect`` (str):\n String of Ansys name of the rectangle on which the lumped boundary condition is defined.\n * ``line`` (str):\n Name of HFSS polyline which spans the length of the recntalge.\n Used to define the voltage across the junction.\n Used to define the current orientation for each junction.\n Used to define sign of ZPF.\n * ``length`` (str):\n Length in HFSS of the junction rectangle and line (specified in meters).\n To create, you can use :code:`epr.parse_units('100um')`.\n * ``Cj_variable`` (str, optional) [experimental]:\n Name of HFSS variable that specifies junction inductance Cj defined\n on the boundary condition in HFSS. DO NOT USE Global names that start with ``$``.\n\n Warning:\n\n To define junctions, do **NOT** use global names!\n I.e., do not use names in ansys that start with ``$``.\n\n\n Note:\n\n **Junction parameters example .** To define junction parameters, see the following example\n\n .. code-block:: python\n :linenos:\n\n # Create project infor class\n pinfo = ProjectInfo()\n\n # Now, let us add a junction called `j1`, with the following properties\n pinfo.junctions['j1'] = {\n 'Lj_variable' : 'Lj_1', # name of Lj variable in Ansys\n 'rect' : 'jj_rect_1',\n 'line' : 'jj_line_1',\n #'Cj' : 'Cj_1' # name of Cj variable in Ansys - optional\n }\n\n To extend to define 5 junctions in bulk, we could use the following script\n\n .. code-block:: python\n :linenos:\n\n n_junctions = 5\n for i in range(1, n_junctions + 1):\n pinfo.junctions[f'j{i}'] = {'Lj_variable' : f'Lj_{i}',\n 'rect' : f'jj_rect_{i}',\n 'line' : f'jj_line_{i}'}\n\n\n .. _Google Python Style Guide:\n http://google.github.io/styleguide/pyguide.html\n\n \"\"\"\n\n class _Dissipative:\n \"\"\"\n Deprecating the _Dissipative class and turning it into a dictionary.\n This is used to message people on the deprecation so they could change their scripts.\n \"\"\"\n def __init__(self):\n self['pinfo'] = None\n for opt in diss_opt:\n self[opt] = None\n\n def __setitem__(self, key, value):\n # --- check valid inputs ---\n if not (key in diss_opt or key == 'pinfo'):\n raise ValueError(f\"No such parameter {key}\")\n if key != 'pinfo' and (not isinstance(value, list) or \\\n not all(isinstance(x, str) for x in value)) and (value != None):\n raise ValueError(f'dissipative[\\'{key}\\'] must be a list of strings ' \\\n 'containing names of models in the project!')\n if key != 'pinfo' and hasattr(self['pinfo'], 'design'):\n for x in value:\n if x not in self['pinfo'].get_all_object_names():\n raise ValueError(\n f'\\'{x}\\' is not an object in the HFSS project')\n super().__setattr__(key, value)\n\n def __getitem__(self, attr):\n if not (attr in diss_opt or attr == 'pinfo'):\n raise AttributeError(f'dissipitive has no attribute \"{attr}\". '\\\n f'The possible attributes are:\\n {str(diss_opt)}')\n return super().__getattribute__(attr)\n\n def __setattr__(self, attr, value):\n logger.warning(\n f\"DEPRECATED!! use pinfo.dissipative['{attr}'] = {value} instead!\")\n self[attr] = value\n\n def __getattr__(self, attr):\n raise AttributeError(f'dissipitive has no attribute \"{attr}\". '\\\n f'The possible attributes are:\\n {str(diss_opt)}')\n\n def __getattribute__(self, attr):\n if attr in diss_opt:\n logger.warning(\n f\"DEPRECATED!! use pinfo.dissipative['{attr}'] instead!\")\n return super().__getattribute__(attr)\n\n def __repr__(self):\n return str(self.data())\n\n def data(self):\n \"\"\"Return dissipatvie as dictionary\"\"\"\n return {str(opt): self[opt] for opt in diss_opt}\n\n def __init__(self, project_path: str = None, project_name: str = None, design_name: str = None,\n setup_name: str = None, do_connect: bool = True):\n \"\"\"\n Keyword Arguments:\n\n project_path (str) : Directory path to the hfss project file.\n Should be the directory, not the file.\n Defaults to ``None``; i.e., assumes the project is open, and thus gets the project based\n on `project_name`.\n project_name (str) : Name of the project within the project_path.\n Defaults to ``None``, which will get the current active one.\n design_name (str) : Name of the design within the project.\n Defaults to ``None``, which will get the current active one.\n setup_name (str) : Name of the setup within the design.\n Defaults to ``None``, which will get the current active one.\n\n do_connect (bool) [additional]: Do create connection to Ansys or not? Defaults to ``True``.\n\n \"\"\"\n\n # Path: format path correctly to system convention\n self.project_path = str(Path(project_path)) \\\n if not (project_path is None) else None\n self.project_name = project_name\n self.design_name = design_name\n self.setup_name = setup_name\n\n # HFSS desgin: describe junction parameters\n # TODO: introduce modal labels\n self.junctions = Dict() # See above for help\n self.ports = Dict()\n\n # Dissipative HFSS volumes and surfaces\n self.dissipative = self._Dissipative()\n self.options = config.ansys\n\n # Conected to HFSS variable\n self.app = None\n self.desktop = None\n self.project = None\n self.design = None\n self.setup = None\n\n if do_connect:\n self.connect()\n self.dissipative['pinfo'] = self\n\n _Forbidden = ['app', 'design', 'desktop', 'project',\n 'dissipative', 'setup', '_Forbidden', 'junctions']\n\n def save(self):\n '''\n Return all the data in a dectionary form that can be used to be saved\n '''\n return dict(\n pinfo=pd.Series(get_instance_vars(self, self._Forbidden)),\n dissip=pd.Series(self.dissipative.data()),\n options=pd.Series(get_instance_vars(self.options)),\n junctions=pd.DataFrame(self.junctions),\n ports=pd.DataFrame(self.ports),\n )\n\n def connect(self):\n \"\"\"\n Do establihs connection to Ansys desktop.\n \"\"\"\n logger.info('Connecting to Ansys Desktop API...')\n\n self.app, self.desktop, self.project = ansys.load_ansys_project(\n self.project_name, self.project_path)\n self.project_name = self.project.name\n self.project_path = self.project.get_path()\n\n # Design\n if self.design_name is None:\n self.design = self.project.get_active_design()\n self.design_name = self.design.name\n logger.info(f'\\tOpened active design\\n\\\n\\tDesign: {self.design_name} [Solution type: {self.design.solution_type}]')\n else:\n\n try:\n self.design = self.project.get_design(self.design_name)\n logger.info(f'\\tOpened active design\\n\\\n\\tDesign: {self.design_name} [Solution type: {self.design.solution_type}]')\n\n except Exception as e:\n _traceback = sys.exc_info()[2]\n logger.error(f\"Original error \\N{loudly crying face}: {e}\\n\")\n raise(Exception(' Did you provide the correct design name?\\\n Failed to pull up design. \\N{loudly crying face}').with_traceback(_traceback))\n\n # Setup\n try:\n setup_names = self.design.get_setup_names()\n\n if len(setup_names) == 0:\n logger.warning('\\tNo design setup detected.')\n if self.design.solution_type == 'Eigenmode':\n logger.warning('\\tCreating eigenmode default setup one.')\n setup = self.design.create_em_setup()\n self.setup_name = setup.name\n elif self.design.solution_type == 'DrivenModal':\n setup = self.design.create_dm_setup() # adding a driven modal design\n self.setup_name = setup.name\n else:\n self.setup_name = setup_names[0]\n\n # get the actual setup if there is one\n self.get_setup(self.setup_name)\n\n except Exception as e:\n\n _traceback = sys.exc_info()[2]\n logger.error(f\"Original error \\N{loudly crying face}: {e}\\n\")\n raise Exception(' Did you provide the correct setup name?\\\n Failed to pull up setup. \\N{loudly crying face}').with_traceback(_traceback)\n\n # Finalize\n self.project_name = self.project.name\n self.design_name = self.design.name\n\n logger.info(\n '\\tConnection to Ansys established successfully. \\N{grinning face} \\n')\n\n return self\n\n def get_setup(self, name: str):\n \"\"\"\n Connects to a specific setup for the design.\n Sets self.setup and self.setup_name.\n\n Args:\n name (str): Name of the setup.\n If the setup does not exist, then throws a loggger error.\n Defaults to ``None``, in which case returns None\n\n \"\"\"\n if name is None:\n return None\n else:\n self.setup = self.design.get_setup(name=self.setup_name)\n if self.setup is None:\n logger.error(f\"Could not retrieve setup: {self.setup_name}\\n \\\n Did you give the right name? Does it exist?\")\n\n self.setup_name = self.setup.name\n logger.info(\n f'\\tOpened setup `{self.setup_name}` ({type(self.setup)})')\n return self.setup\n\n def check_connected(self):\n \"\"\"\n Checks if fully connected including setup.\n \"\"\"\n return\\\n (self.setup is not None) and\\\n (self.design is not None) and\\\n (self.project is not None) and\\\n (self.desktop is not None) and\\\n (self.app is not None)\n\n def disconnect(self):\n '''\n Disconnect from existing HFSS design.\n '''\n assert self.check_connected() is True,\\\n \"It does not appear that you have connected to HFSS yet.\\\n Use the connect() method. \\N{nauseated face}\"\n self.project.release()\n self.desktop.release()\n self.app.release()\n ansys.release()\n\n # UTILITY FUNCTIONS\n\n def get_dm(self):\n '''\n Utility shortcut function to get the design and modeler.\n\n .. code-block:: python\n\n oDesign, oModeler = pinfo.get_dm()\n\n '''\n return self.design, self.design.modeler\n\n def get_all_variables_names(self):\n \"\"\"Returns array of all project and local design names.\"\"\"\n return self.project.get_variable_names() + self.design.get_variable_names()\n\n def get_all_object_names(self):\n \"\"\"Returns array of strings\"\"\"\n o_objects = []\n for s in [\"Non Model\", \"Solids\", \"Unclassified\", \"Sheets\", \"Lines\"]:\n o_objects += self.design.modeler.get_objects_in_group(s)\n return o_objects\n\n def validate_junction_info(self):\n \"\"\"Validate that the user has put in the junction info correctly.\n Do not also forget to check the length of the rectangles/line of\n the junction if you change it.\n \"\"\"\n\n all_variables_names = self.get_all_variables_names()\n all_object_names = self.get_all_object_names()\n\n for jjnm, jj in self.junctions.items():\n\n assert jj['Lj_variable'] in all_variables_names,\\\n \"\"\"pyEPR ProjectInfo user error found \\N{face with medical mask}:\n Seems like for junction `%s` you specified a design or project\n variable for `Lj_variable` that does not exist in HFSS by the name:\n `%s` \"\"\" % (jjnm, jj['Lj_variable'])\n\n for name in ['rect', 'line']:\n\n assert jj[name] in all_object_names, \\\n \"\"\"pyEPR ProjectInfo user error found \\N{face with medical mask}:\n Seems like for junction `%s` you specified a %s that does not exist\n in HFSS by the name: `%s` \"\"\" % (jjnm, name, jj[name])\n \n def __del__(self):\n logger.info('Disconnected from Ansys HFSS')\n self.disconnect()\n"
] |
[
[
"pandas.DataFrame"
]
] |
isaacatwork/aws-py-lambda-container
|
[
"f9e500674c13aa658cd263725b4c515a9bd0a5c7"
] |
[
"anniversary/data_generator.py"
] |
[
"import argparse\nimport datetime\nimport itertools\nimport logging\nimport os\nimport random\n\nimport boto3\nimport pandas as pd\nfrom botocore.exceptions import ClientError\nfrom faker import Faker\n\n\ndef date_generator():\n \"\"\"\n Simple random date generator\n\n Returns datetime.date - a valid random date\n\n Example:\n >>> r_date = date_generator()\n >>> isinstance(r_date, datetime.date)\n True\n \"\"\"\n year = random.randint(1990, 2021) # assuming company started in 1990\n month = random.randint(1, 12)\n day = random.randint(1, 28)\n return datetime.date(year=year, month=month, day=day)\n\n\ndef create_bucket(bucket_name: str) -> bool:\n \"\"\"Create an S3 bucket\n\n Args:\n bucket_name(str): Bucket to create\n\n Returns:\n bool - True if bucket created, else False\n \"\"\"\n\n # Create bucket\n try:\n s3_client = boto3.client(\n \"s3\",\n endpoint_url=\"http://minio:9000\",\n aws_access_key_id=os.environ[\"AWS_ACCESS_KEY_ID\"],\n aws_secret_access_key=os.environ[\"AWS_SECRET_ACCESS_KEY\"],\n region_name=\"us-east-1\",\n )\n s3_client.create_bucket(Bucket=bucket_name)\n return True\n except KeyError as e:\n logging.error(f\"Key not found: {e}\")\n except ClientError as e:\n logging.error(e)\n return False\n\n\ndef employee_data_generator(bucket_name: str, num_of_rows: int = 1_000_000) -> bool:\n \"\"\"\n Generates a dateframe with 4 columns - employee_name, start_date, department and title.\n Writes generated data in 3 formats - csv, parquet, json into s3 bucket for lambda container playground\n\n Args:\n bucket_name(str): name of bucket to write sample data.\n num_of_rows(int): number of rows of data to generate. Max 1_000_000 rows. Defaults to 1_000_000\n\n Returns:\n Tr: genereted data\n\n Raises:\n ValueError: when num_of_rows is lager than 1000000\n\n Example:\n >>> employee_data_generator(bucket_name=test_bucket, num_of_rows=500)\n True\n \"\"\"\n if num_of_rows > 1_000_000:\n raise ValueError(\"Row number too large, Try less than 1 million rows\")\n\n client_kwargs = {\"client_kwargs\": {\"endpoint_url\": \"http://minio:9000\"}}\n\n fake = Faker(\"en_US\")\n sample_names = [fake.name() for _ in range(num_of_rows)]\n sample_roles = random.choices(\n [\n \"Data Engineer\",\n \"Data Scientist\",\n \"Engineering Manager\",\n \"Software Engineer\",\n \"Team Lead\",\n ],\n k=num_of_rows,\n )\n\n sample_department = random.choices(\n [\"R&D\", \"Sales\", \"Product\", \"Marketing\"], k=num_of_rows\n )\n\n generated_date = [date_generator() for _ in range(0, num_of_rows)]\n\n try:\n\n df = pd.DataFrame(\n {\n \"employee_name\": sample_names,\n \"start_date\": generated_date,\n \"department\": sample_department,\n \"role\": sample_roles,\n }\n )\n df.loc[:, \"start_date\"] = pd.to_datetime(df.start_date)\n\n df.to_parquet(\n f\"s3://{bucket_name}/sample.parquet\",\n engine=\"pyarrow\",\n compression=\"snappy\",\n storage_options=client_kwargs,\n )\n\n return True\n except Exception as e:\n logging.error(e)\n return False\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-n\", \"--num_of_rows\", help=\"Number of rows to generate\", required=False\n )\n parser.add_argument(\n \"-b\", \"--bucket\", help=\"Name of bucket to save generated files\", required=True\n )\n args = vars(parser.parse_args())\n\n num_of_rows = int(args.get(\"num_of_rows\", 1_000_00))\n bucket_name = args.get(\"bucket\")\n create_bucket(bucket_name=bucket_name)\n employee_data_generator(bucket_name=bucket_name, num_of_rows=num_of_rows)\n print(\"Data generation complete\")"
] |
[
[
"pandas.to_datetime",
"pandas.DataFrame"
]
] |
jleinonen/downscaling-rnn-gan
|
[
"3c5e819ab994352519a71df6eb69def1e95163f2"
] |
[
"dsrnngan/data.py"
] |
[
"import numpy as np\nfrom scipy.ndimage import convolve\n\n\nclass Smoothener(object):\n def __init__(self):\n (x,y) = np.mgrid[-2:3,-2:3]\n self.smoothing_kernel = np.exp(-0.5*(x**2+y**2)/(0.65**2))\n self.smoothing_kernel /= self.smoothing_kernel.sum()\n self.edge_shapes = {}\n\n def smoothen(self, img):\n img_shape = tuple(img.shape[2:4])\n if img_shape not in self.edge_shapes:\n s = convolve(np.ones(img_shape, dtype=np.float32),\n self.smoothing_kernel, mode=\"constant\")\n s = 1.0/s\n self.edge_shapes[img_shape] = s\n else:\n s = self.edge_shapes[img_shape]\n\n img_smooth = np.empty_like(img)\n for i in range(img.shape[0]):\n for j in range(img.shape[1]):\n for k in range(img.shape[-1]):\n img_smooth[i,j,:,:,k] = convolve(img[i,j,:,:,k],\n self.smoothing_kernel, mode=\"constant\") * s\n\n return img_smooth\n\n\nclass BatchGenerator(object):\n \n def __init__(self, sequences, decoder, downsampler, batch_size=32,\n random_seed=None, augment=True, smoothen_image=True, zeros_frac=0.0):\n\n self.batch_size = batch_size\n self.sequences = sequences\n self.N = self.sequences.shape[0]\n self.img_shape = tuple(self.sequences.shape[2:4])\n self.num_frames = self.sequences.shape[1]\n self.decoder = decoder\n self.downsampler = downsampler\n self.augment = augment\n self.smoothen_image = smoothen_image\n self.zeros_frac = zeros_frac\n self.smoothener = Smoothener()\n self.reset()\n\n def __iter__(self):\n return self\n\n def reset(self, random_seed=None):\n self.prng = np.random.RandomState(seed=random_seed)\n self.next_ind = np.array([], dtype=int)\n\n def next_indices(self):\n while len(self.next_ind) < self.batch_size:\n ind = np.arange(self.N, dtype=int)\n self.prng.shuffle(ind)\n self.next_ind = np.concatenate([self.next_ind, ind])\n return self.next_ind[:self.batch_size]\n\n def __next__(self):\n ind = self.next_indices()\n self.next_ind = self.next_ind[self.batch_size:]\n\n X = self.sequences[ind,...]\n\n X = self.decoder(X)\n if self.augment:\n X = self.augment_sequence_batch(X)\n Y = self.downsampler(X)\n X = self.decoder.normalize(X)\n Y = self.decoder.normalize(Y)\n if self.smoothen_image:\n X = self.smoothener.smoothen(X)\n\n if self.zeros_frac > 0.0:\n set_zero = (self.prng.rand(X.shape[0]) < self.zeros_frac)\n X[set_zero,...] = 0.0\n Y[set_zero,...] = 0.0\n\n return (X,Y)\n\n def augment_sequence(self, sequence):\n seq = sequence.copy()\n\n # mirror\n if bool(self.prng.randint(2)):\n seq = np.flip(seq, axis=1)\n if bool(self.prng.randint(2)):\n seq = np.flip(seq, axis=2)\n\n # rotate\n num_rot = self.prng.randint(4)\n if num_rot > 0:\n seq = np.rot90(seq, k=num_rot, axes=(1,2))\n\n return seq\n\n def augment_sequence_batch(self, sequences):\n sequences = sequences.copy()\n for i in range(sequences.shape[0]):\n sequences[i,...] = self.augment_sequence(sequences[i,...])\n return sequences\n\n\nclass RainRateDecoder(object):\n def __init__(self, scaling_fn, value_range=(np.log10(0.1), np.log10(100)),\n below_val=np.nan, normalize=False):\n\n self.logR = np.log10(np.load(scaling_fn))\n self.logR[0] = np.nan\n #self.x = np.arange(len(self.logR))\n self.value_range = value_range\n self.below_val = below_val\n self.normalize_output = normalize\n\n def __call__(self, img):\n valid = (img != 0)\n img_dec = np.full(img.shape, np.nan, dtype=np.float32)\n img_dec[valid] = self.logR[img[valid]]\n img_dec[img_dec<self.value_range[0]] = self.below_val\n img_dec.clip(max=self.value_range[1], out=img_dec)\n if self.normalize_output:\n img_dec = self.normalize(img_dec)\n return img_dec\n\n def normalize(self, img):\n return (img-self.below_val) / \\\n (self.value_range[1]-self.below_val) \n\n def denormalize(self, img, set_nan=True):\n img = img*(self.value_range[1]-self.below_val) + self.below_val\n img[img < self.value_range[0]] = self.below_val\n if set_nan:\n img[img == self.below_val] = np.nan\n return img\n\n\nclass CODDecoder(RainRateDecoder):\n def __init__(self,\n value_range=(np.log(1.19), np.log(158.48865)),\n below_val=np.nan, normalize=False,\n scale_factor=158.48865/(2**16-2)):\n\n self.value_range = value_range\n self.below_val = below_val\n self.normalize_output = normalize\n self.scale_factor = scale_factor\n\n def __call__(self, img):\n valid = (img != 0)\n img_dec = np.full(img.shape, np.nan, dtype=np.float32)\n img_dec[valid] = np.log(img[valid]*self.scale_factor)\n img_dec[(img_dec<self.value_range[0]) | ~valid] = self.below_val\n img_dec.clip(max=self.value_range[1], out=img_dec)\n if self.normalize_output:\n img_dec = self.normalize(img_dec)\n return img_dec\n\n\nclass LogDownsampler(object):\n def __init__(self, pool_size=16, min_val=np.nan, threshold_val=None):\n self.pool_size = pool_size \n self.min_val = min_val\n self.threshold_val = threshold_val\n\n def __call__(self, log_R):\n R = 10**log_R\n R[~np.isfinite(R)] = 0.0\n lores_shape = (log_R.shape[0], log_R.shape[1], \n log_R.shape[2]//self.pool_size, log_R.shape[3]//self.pool_size,\n log_R.shape[4])\n R_ds = np.zeros(lores_shape, dtype=np.float32)\n for (il,ih) in enumerate(range(0,log_R.shape[2],self.pool_size)):\n for (jl,jh) in enumerate(range(0,log_R.shape[3],self.pool_size)):\n R_ds[:,:,il,jl,:] = R[:,:,ih:ih+self.pool_size,\n jh:jh+self.pool_size,:].mean(axis=(2,3))\n log_R_ds = np.log10(R_ds)\n min_mask = ~np.isfinite(log_R_ds)\n if self.threshold_val is not None:\n min_mask |= (log_R_ds < self.threshold_val)\n log_R_ds[min_mask] = self.min_val\n return log_R_ds\n"
] |
[
[
"numpy.rot90",
"numpy.log",
"numpy.isfinite",
"numpy.empty_like",
"numpy.arange",
"scipy.ndimage.convolve",
"numpy.full",
"numpy.concatenate",
"numpy.ones",
"numpy.log10",
"numpy.flip",
"numpy.exp",
"numpy.load",
"numpy.array",
"numpy.zeros",
"numpy.random.RandomState"
]
] |
aGIToz/semi-supervised-segmentation-on-graphs
|
[
"cffdd6c75850c6cdc8e7752d60fbd372f6e61f34"
] |
[
"graph/graph.py"
] |
[
"\"\"\"\nA simple knn graph creation with exponential weights.\n\ntodo:\npython decorator for elapsed time?\nDo if for patches.\nGraph Symmetrization.\n\"\"\"\n\nfrom time import time\nimport numpy\nfrom bufferkdtree.neighbors import NearestNeighbors\nfrom utilities import roll, unroll\nimport pyopencl as cl\nimport numpy as np\nimport os\nimport sys\npwd = os.getcwd()\nbase_d = os.path.dirname(pwd)\nbool_1 = False\nprint(base_d) if bool_1 else print()\nprint(pwd) if bool_1 else print()\nmywf = os.path.join(pwd,\"graph/expo.cl\")\n\nclass initialParams:\n \"\"\" A basic structure to group all intial params for a graph creation.\n params:\n ------\n position: This can also be patches !\n signal: can be a texture or patches in case of some images\n k:\n sigma:\n \"\"\" \n\n def __init__(self, position, signal, k, sigma):\n \"\"\"\n >>>>NOTE<<<<: azad mar. 04 févr. 2020 10:31:52 CET\n Often the signal used to calculate the weights if not \n the same as signal used in evolving a pde which can be\n often be the initial distance seeds, or the rgb values\n or the xyz values.\n \"\"\"\n \n \"\"\"\n {{{OPTIMIZATION}}}: azad mar. 04 févr. 2020 10:36:29 CET\n The code can be more faster if one is creating the patch\n based graph as buffer KD tree already calculates the euc-\n lidean distance, right now you are calculating the euclidean\n distance in the kernel.\n \"\"\"\n \n self.position = position\n self.signal = signal \n self.k = k\n self.sigma = sigma\n\nclass Graph:\n \"\"\" A graph data structure which shall be returned \n params:\n ------\n wgts:\n ngbrs:\n k:\n \"\"\" \n\n def __init__(self, wgts, ngbrs, k):\n self.wgts =wgts \n self.ngbrs = ngbrs\n self.k = k\n\ndef buildGraph(ip, dev=0):\n \"\"\"Builds the knn grap with intial params.\n params:\n ------\n ip: initial params\n\n return: \n ------\n graph: graph object of Graph \n \"\"\"\n\n start = time()\n nbrs = NearestNeighbors(n_neighbors = ip.k + 1, algorithm=\"buffer_kd_tree\", tree_depth=9, plat_dev_ids={0:[0]}) \n nbrs.fit(ip.position)\n dists, inds = nbrs.kneighbors(ip.position)\n print(\"success\") if bool_1 else print()\n\n # now build the graph using those nns using gpu\n platform = cl.get_platforms()[0]\n print(platform)\n device = platform.get_devices()[dev]\n print(device)\n context = cl.Context([device])\n print(context)\n program = cl.Program(context, open(mywf).read()).build()\n print(program)\n queue = cl.CommandQueue(context)\n print(queue)\n \n # define the input here which is the ndbrs gpu\n ngbrs_gpu = inds\n ngbrs_gpu = ngbrs_gpu[0:,1:]\n ngbrs_gpu = unroll(ngbrs_gpu)\n ngbrs_gpu = ngbrs_gpu.astype('int32')\n \n # define the second input here which is the signal levels\n signal = ip.signal\n n, chnl = signal.shape\n signal = np.reshape(signal,(n*chnl,),order='F')\n signal = signal.astype('float32')\n print(\"signal\",signal.shape) if bool_1 else print()\n k = ip.k\n print(\"n is :\", n) if bool_1 else print()\n scale = ip.sigma\n \n # create the buffers on the device, intensity, nbgrs, weights\n mem_flags = cl.mem_flags\n ngbrs_buf = cl.Buffer(context, mem_flags.READ_ONLY | mem_flags.COPY_HOST_PTR,hostbuf=ngbrs_gpu)\n signal_buf = cl.Buffer(context, mem_flags.READ_ONLY | mem_flags.COPY_HOST_PTR, hostbuf=signal)\n weight_vec = np.ndarray(shape=(n*k,), dtype=np.float32)\n weight_buf = cl.Buffer(context, mem_flags.WRITE_ONLY, weight_vec.nbytes)\n \n # run the kernel to compute the weights\n program.compute_weights(queue, (n,), None, signal_buf, ngbrs_buf, weight_buf, np.int32(k), np.float32(scale), np.int32(chnl))\n \n queue.finish() #OT\n \n # copy the weihts to the host memory\n cl.enqueue_copy(queue, weight_vec, weight_buf)\n end = time() - start\n \n print('total time taken by the gpu python:', end) if bool_1 else print()\n # save the graph\n graph = Graph(weight_vec,ngbrs_gpu,k)\n return graph\n"
] |
[
[
"numpy.reshape",
"numpy.int32",
"numpy.ndarray",
"numpy.float32"
]
] |
kevinko1788/CarND-Behavioral-Cloning-P3
|
[
"f8b17260d98a1aae66c340efcbebf9db1b752039"
] |
[
"model.py"
] |
[
"import csv\nimport cv2\nimport numpy as np\nimport tensorflow\nimport pdb\n\nfrom keras.models import Sequential\nfrom keras.layers import Flatten, Dense, Lambda, Conv2D, Cropping2D, Activation\nfrom keras.layers import Dropout, ELU, MaxPooling2D\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import shuffle\n\ndef preprocess_image(image):\n#will handle cropping in conv layer\n#only apply blur and cvt color to RGB, since image received with cv2.imread(), BGR2RGB\n preprocess_image = cv2.GaussianBlur(image, (3,3),0)\n preprocess_image = cv2.cvtColor(preprocess_image, cv2.COLOR_BGR2RGB)\n return preprocess_image\n \ndef flip_image(image):\n flipped_image = cv2.flip(image,1)\n return flipped_image\n\ndef generator(samples, batch_size=1024):\n while True:\n shuffle(samples)\n\n images = []\n angles = []\n\n batch_samples = samples[0:batch_size]\n\n for image_path, angle in batch_samples: \n image = cv2.imread(image_path)\n argumented_image = preprocess_image(image) \n images.append(argumented_image)\n angles.append(angle)\n # To improve: should handle flip earlier and shuffle to avoid bias\n images.append(flip_image(argumented_image))\n angles.append(angle*-1.0)\n print(len(images))\n yield shuffle(np.array(images),np.array(angles))\n\n# Load lines from CSV\nlines = []\ndataPath = './data'\nwith open(dataPath + '/driving_log.csv') as csvFile:\n reader = csv.reader(csvFile)\n next(reader, None)\n for line in reader:\n lines.append(line)\n\nimages_path = []\nangles = []\n\nfor line in lines:\n for i in range(3):\n source_path = line[i]\n filename = source_path.split('/')[-1]\n current_path= './data/IMG/' + filename\n images_path.append(current_path)\n angle = float(line[3])\n if i == 0:\n angles.append(angle)\n elif i == 1:\n angles.append(angle + 0.20)\n else:\n angles.append(angle - 0.20)\n\n#split train and validation data 0.2\nsamples = list(zip(images_path,angles))\ntrain_samples, validation_samples = train_test_split(samples, test_size=0.2)\n\nprint('Train samples:', len(train_samples))\nprint('Validation samples:', len(validation_samples))\n\n#generate images\nbatch_size = 1024\ntrain_generator = generator(train_samples, batch_size = batch_size)\nvalidation_generator = generator(validation_samples,batch_size = batch_size)\n\n#create model\nmodel = Sequential()\n\n# Preprocess incoming data, centered around zero with small standard deviation \nmodel.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160,320,3))) \n\n# # trim image to only see section with road\nmodel.add(Cropping2D(cropping=((70,25),(0,0))))\n\n#layer 1- Convolution, no of filters- 24, filter size= 5x5, stride= 2x2\nmodel.add(Conv2D(24,(5,5), strides=(2,2)))\nmodel.add(Activation('elu'))\n\n#layer 2- Convolution, no of filters- 36, filter size= 5x5, stride= 2x2\nmodel.add(Conv2D(36,(5,5), strides=(2,2)))\nmodel.add(Activation('elu'))\n\n#layer 3- Convolution, no of filters- 48, filter size= 5x5, stride= 2x2\nmodel.add(Conv2D(48,(5,5), strides=(2,2)))\nmodel.add(Activation('elu'))\n\n#layer 4- Convolution, no of filters- 64, filter size= 3x3, stride= 1x1\nmodel.add(Conv2D(64,(3,3)))\nmodel.add(Activation('elu'))\n\n#layer 5- Convolution, no of filters- 64, filter size= 3x3, stride= 1x1\nmodel.add(Conv2D(64,(3,3)))\nmodel.add(Activation('elu'))\n\n#flatten image from 2D to side by side\nmodel.add(Flatten())\n\n#layer 6- fully connected layer 1\nmodel.add(Dense(100))\nmodel.add(Activation('elu'))\n\n#Adding a dropout layer to avoid overfitting. Here we are have given the dropout rate as 25% after first fully connected layer\nmodel.add(Dropout(0.25))\n\n#layer 7- fully connected layer 1\nmodel.add(Dense(50))\nmodel.add(Activation('elu'))\n\n\n#layer 8- fully connected layer 1\nmodel.add(Dense(10))\nmodel.add(Activation('elu'))\n\n#layer 9- fully connected layer 1\nmodel.add(Dense(1)) #here the final layer will contain one value as this is a regression problem and not classification\n\n# Compile and train the model\n# model.compile(loss='mse',optimizer='adam')\nmodel.compile(optimizer=Adam(lr=1e-4), loss='mse')\n\n\nfile_name = 'model.h5'\nprint('checkpointer')\ncheckpointer = ModelCheckpoint(file_name, monitor='val_loss', verbose = 1, save_best_only = True)\nprint('fit_generator')\n\nmodel.fit_generator(train_generator, steps_per_epoch= len(train_samples)//batch_size, validation_data=validation_generator, validation_steps=len(validation_samples)//batch_size, epochs=3, verbose=1, callbacks=[checkpointer])\n\nmodel.save(file_name)\nprint('model saved!')\n\nmodel.summary()\n"
] |
[
[
"sklearn.utils.shuffle",
"numpy.array",
"sklearn.model_selection.train_test_split"
]
] |
kingsaint/ExplainableEntityLinking
|
[
"2f26602a0187d8785214e639ccb8dc87f4ca2302"
] |
[
"src/rl/graph_search/pg.py"
] |
[
"\"\"\"\n Copyright (c) 2018, salesforce.com, inc.\n All rights reserved.\n SPDX-License-Identifier: BSD-3-Clause\n For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause\n \n Policy gradient (REINFORCE algorithm) training and inference.\n\"\"\"\n\nimport torch\n\nfrom src.learn_framework import LFramework\nimport src.rl.graph_search.beam_search as search\nimport src.utils.ops as ops\nfrom src.utils.ops import int_fill_var_cuda, var_cuda, zeros_var_cuda\n\n\nclass PolicyGradient(LFramework):\n def __init__(self, args, kg, pn):\n super(PolicyGradient, self).__init__(args, kg, pn)\n\n # Training hyperparameters\n self.relation_only = args.relation_only\n self.use_action_space_bucketing = args.use_action_space_bucketing\n self.num_rollouts = args.num_rollouts\n self.num_rollout_steps = args.num_rollout_steps\n self.baseline = args.baseline\n self.beta = args.beta # entropy regularization parameter\n self.gamma = args.gamma # shrinking factor\n self.action_dropout_rate = args.action_dropout_rate\n self.action_dropout_anneal_factor = args.action_dropout_anneal_factor\n self.action_dropout_anneal_interval = args.action_dropout_anneal_interval\n\n # Inference hyperparameters\n self.beam_size = args.beam_size\n\n # Analysis\n self.path_types = dict()\n self.num_path_types = 0\n\n def reward_fun(self, e1, r, e2, pred_e2):\n return (pred_e2 == e2).float()\n\n def loss(self, mini_batch):\n \n def stablize_reward(r):\n r_2D = r.view(-1, self.num_rollouts)\n if self.baseline == 'avg_reward':\n stabled_r_2D = r_2D - r_2D.mean(dim=1, keepdim=True)\n elif self.baseline == 'avg_reward_normalized':\n stabled_r_2D = (r_2D - r_2D.mean(dim=1, keepdim=True)) / (r_2D.std(dim=1, keepdim=True) + ops.EPSILON)\n else:\n raise ValueError('Unrecognized baseline function: {}'.format(self.baseline))\n stabled_r = stabled_r_2D.view(-1)\n return stabled_r\n \n e1, e2, r = self.format_batch(mini_batch, num_tiles=self.num_rollouts)\n output = self.rollout(e1, r, e2, num_steps=self.num_rollout_steps)\n\n # Compute policy gradient loss\n pred_e2 = output['pred_e2']\n log_action_probs = output['log_action_probs']\n action_entropy = output['action_entropy']\n\n # Compute discounted reward\n final_reward = self.reward_fun(e1, r, e2, pred_e2)\n if self.baseline != 'n/a':\n final_reward = stablize_reward(final_reward)\n cum_discounted_rewards = [0] * self.num_rollout_steps\n cum_discounted_rewards[-1] = final_reward\n R = 0\n for i in range(self.num_rollout_steps - 1, -1, -1):\n R = self.gamma * R + cum_discounted_rewards[i]\n cum_discounted_rewards[i] = R\n\n # Compute policy gradient\n pg_loss, pt_loss = 0, 0\n for i in range(self.num_rollout_steps):\n log_action_prob = log_action_probs[i]\n pg_loss += -cum_discounted_rewards[i] * log_action_prob\n pt_loss += -cum_discounted_rewards[i] * torch.exp(log_action_prob)\n\n # Entropy regularization\n entropy = torch.cat([x.unsqueeze(1) for x in action_entropy], dim=1).mean(dim=1)\n pg_loss = (pg_loss - entropy * self.beta).mean()\n pt_loss = (pt_loss - entropy * self.beta).mean()\n\n loss_dict = {}\n loss_dict['model_loss'] = pg_loss\n loss_dict['print_loss'] = float(pt_loss)\n loss_dict['reward'] = final_reward\n loss_dict['entropy'] = float(entropy.mean())\n if self.run_analysis:\n fn = torch.zeros(final_reward.size())\n for i in range(len(final_reward)):\n if not final_reward[i]:\n if int(pred_e2[i]) in self.kg.all_objects[int(e1[i])][int(r[i])]:\n fn[i] = 1\n loss_dict['fn'] = fn\n\n return loss_dict\n\n def rollout(self, e_s, q, e_t, num_steps, visualize_action_probs=False):\n \"\"\"\n Perform multi-step rollout from the source entity conditioned on the query relation.\n :param pn: Policy network.\n :param e_s: (Variable:batch) source entity indices.\n :param q: (Variable:batch) query relation indices.\n :param e_t: (Variable:batch) target entity indices.\n :param kg: Knowledge graph environment.\n :param num_steps: Number of rollout steps.\n :param visualize_action_probs: If set, save action probabilities for visualization.\n :return pred_e2: Target entities reached at the end of rollout.\n :return log_path_prob: Log probability of the sampled path.\n :return action_entropy: Entropy regularization term.\n \"\"\"\n assert (num_steps > 0)\n kg, pn = self.kg, self.mdl\n\n # Initialization\n log_action_probs = []\n action_entropy = []\n r_s = int_fill_var_cuda(e_s.size(), kg.dummy_start_r)\n seen_nodes = int_fill_var_cuda(e_s.size(), kg.dummy_e).unsqueeze(1)\n path_components = []\n\n path_trace = [(r_s, e_s)]\n emb_e_s = pn.initialize_path((r_s, e_s), q, kg, 'train')\n\n for t in range(num_steps):\n last_r, e = path_trace[-1]\n obs = [e_s, emb_e_s, q, e_t, t==0 ,t==(num_steps-1), last_r, seen_nodes]\n db_outcomes, inv_offset, policy_entropy = pn.transit(\n e, obs, kg, 'train', use_action_space_bucketing=self.use_action_space_bucketing)\n sample_outcome = self.sample_action(db_outcomes, inv_offset)\n action = sample_outcome['action_sample']\n pn.update_path(action, kg)\n action_prob = sample_outcome['action_prob']\n log_action_probs.append(ops.safe_log(action_prob))\n action_entropy.append(policy_entropy)\n seen_nodes = torch.cat([seen_nodes, e.unsqueeze(1)], dim=1)\n path_trace.append(action)\n\n if visualize_action_probs:\n top_k_action = sample_outcome['top_actions']\n top_k_action_prob = sample_outcome['top_action_probs']\n path_components.append((e, top_k_action, top_k_action_prob))\n\n pred_e2 = path_trace[-1][1]\n self.record_path_trace(path_trace)\n\n return {\n 'pred_e2': pred_e2,\n 'log_action_probs': log_action_probs,\n 'action_entropy': action_entropy,\n 'path_trace': path_trace,\n 'path_components': path_components\n }\n\n def sample_action(self, db_outcomes, inv_offset=None):\n \"\"\"\n Sample an action based on current policy.\n :param db_outcomes (((r_space, e_space), action_mask), action_dist):\n r_space: (Variable:batch) relation space\n e_space: (Variable:batch) target entity space\n action_mask: (Variable:batch) binary mask indicating padding actions.\n action_dist: (Variable:batch) action distribution of the current step based on set_policy\n network parameters\n :param inv_offset: Indexes for restoring original order in a batch.\n :return next_action (next_r, next_e): Sampled next action.\n :return action_prob: Probability of the sampled action.\n \"\"\"\n\n def apply_action_dropout_mask(action_dist, action_mask):\n if self.action_dropout_rate > 0:\n rand = torch.rand(action_dist.size())\n action_keep_mask = var_cuda(rand > self.action_dropout_rate).float()\n # There is a small chance that that action_keep_mask is accidentally set to zero.\n # When this happen, we take a random sample from the available actions.\n # sample_action_dist = action_dist * (action_keep_mask + ops.EPSILON)\n sample_action_dist = \\\n action_dist * action_keep_mask + ops.EPSILON * (1 - action_keep_mask) * action_mask\n return sample_action_dist\n else:\n return action_dist\n\n def sample(action_space, action_dist):\n sample_outcome = {}\n ((r_space, e_space), action_mask) = action_space\n sample_action_dist = apply_action_dropout_mask(action_dist, action_mask)\n idx = torch.multinomial(sample_action_dist, 1, replacement=True)\n next_r = ops.batch_lookup(r_space, idx)\n next_e = ops.batch_lookup(e_space, idx)\n action_prob = ops.batch_lookup(action_dist, idx)\n sample_outcome['action_sample'] = (next_r, next_e)\n sample_outcome['action_prob'] = action_prob\n return sample_outcome\n\n if inv_offset is not None:\n next_r_list = []\n next_e_list = []\n action_dist_list = []\n action_prob_list = []\n for action_space, action_dist in db_outcomes:\n sample_outcome = sample(action_space, action_dist)\n next_r_list.append(sample_outcome['action_sample'][0])\n next_e_list.append(sample_outcome['action_sample'][1])\n action_prob_list.append(sample_outcome['action_prob'])\n action_dist_list.append(action_dist)\n next_r = torch.cat(next_r_list, dim=0)[inv_offset]\n next_e = torch.cat(next_e_list, dim=0)[inv_offset]\n action_sample = (next_r, next_e)\n action_prob = torch.cat(action_prob_list, dim=0)[inv_offset]\n sample_outcome = {}\n sample_outcome['action_sample'] = action_sample\n sample_outcome['action_prob'] = action_prob\n else:\n sample_outcome = sample(db_outcomes[0][0], db_outcomes[0][1])\n\n return sample_outcome\n\n def predict(self, mini_batch, verbose=False):\n kg, pn = self.kg, self.mdl\n e1, e2, r = self.format_batch(mini_batch)\n beam_search_output = search.beam_search(\n pn, e1, r, e2, kg, self.num_rollout_steps, self.beam_size)\n pred_e2s = beam_search_output['pred_e2s']\n pred_e2_scores = beam_search_output['pred_e2_scores']\n if verbose:\n # print inference paths\n search_traces = beam_search_output['search_traces']\n output_beam_size = min(self.beam_size, pred_e2_scores.shape[1])\n for i in range(len(e1)):\n for j in range(output_beam_size):\n ind = i * output_beam_size + j\n if pred_e2s[i][j] == kg.dummy_e:\n break\n search_trace = []\n for k in range(len(search_traces)):\n search_trace.append((int(search_traces[k][0][ind]), int(search_traces[k][1][ind])))\n print('e1 = {},r = {},e2 = {},beam {},score = {},<PATH> {}'.format(kg.id2entity_aug[int(e1[i].item())], kg.id2relation[int(r[i].item())], kg.id2entity_aug[int(e2[i].item())],\n j, float(pred_e2_scores[i][j]), ops.format_path(search_trace, kg)))\n with torch.no_grad():\n pred_scores = zeros_var_cuda([len(e1), kg.num_entities])\n for i in range(len(e1)):\n pred_scores[i][pred_e2s[i]] = torch.exp(pred_e2_scores[i])\n return pred_scores\n\n def record_path_trace(self, path_trace):\n path_length = len(path_trace)\n flattened_path_trace = [x for t in path_trace for x in t]\n path_trace_mat = torch.cat(flattened_path_trace).reshape(-1, path_length)\n path_trace_mat = path_trace_mat.data.cpu().numpy()\n\n for i in range(path_trace_mat.shape[0]):\n path_recorder = self.path_types\n for j in range(path_trace_mat.shape[1]):\n e = path_trace_mat[i, j]\n if not e in path_recorder:\n if j == path_trace_mat.shape[1] - 1:\n path_recorder[e] = 1\n self.num_path_types += 1\n else:\n path_recorder[e] = {}\n else:\n if j == path_trace_mat.shape[1] - 1:\n path_recorder[e] += 1\n path_recorder = path_recorder[e]\n"
] |
[
[
"torch.exp",
"torch.no_grad",
"torch.multinomial",
"torch.cat"
]
] |
mnthnx64/All-about-faces
|
[
"860eb760286fcb5d04fd797e16842509467f462f"
] |
[
"gender-emotion/video_emotion_color_demo.py"
] |
[
"from statistics import mode\n\nimport cv2\nfrom keras.models import load_model\nimport numpy as np\n\nfrom utils.datasets import get_labels\nfrom utils.inference import detect_faces\nfrom utils.inference import draw_text\nfrom utils.inference import draw_bounding_box\nfrom utils.inference import apply_offsets\nfrom utils.inference import load_detection_model\nfrom utils.preprocessor import preprocess_input\n\n# parameters for loading data and images\ndetection_model_path = './trained_models/detection_models/haarcascade_frontalface_default.xml'\nemotion_model_path = './trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'\nemotion_labels = get_labels('fer2013')\n\n# hyper-parameters for bounding boxes shape\nframe_window = 10\nemotion_offsets = (20, 40)\n\n# loading models\nface_detection = load_detection_model(detection_model_path)\nemotion_classifier = load_model(emotion_model_path, compile=False)\n\n# getting input model shapes for inference\nemotion_target_size = emotion_classifier.input_shape[1:3]\n\n# starting lists for calculating modes\nemotion_window = []\n\n# starting video streaming\ncv2.namedWindow('window_frame')\nvideo_capture = cv2.VideoCapture(0)\nwhile True:\n bgr_image = video_capture.read()[1]\n gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)\n rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)\n faces = detect_faces(face_detection, gray_image)\n\n for face_coordinates in faces:\n\n x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)\n gray_face = gray_image[y1:y2, x1:x2]\n try:\n gray_face = cv2.resize(gray_face, (emotion_target_size))\n except:\n continue\n\n gray_face = preprocess_input(gray_face, True)\n gray_face = np.expand_dims(gray_face, 0)\n gray_face = np.expand_dims(gray_face, -1)\n emotion_prediction = emotion_classifier.predict(gray_face)\n emotion_probability = np.max(emotion_prediction)\n emotion_label_arg = np.argmax(emotion_prediction)\n emotion_text = emotion_labels[emotion_label_arg]\n emotion_window.append(emotion_text)\n\n if len(emotion_window) > frame_window:\n emotion_window.pop(0)\n try:\n emotion_mode = mode(emotion_window)\n except:\n continue\n\n if emotion_text == 'angry':\n color = emotion_probability * np.asarray((255, 0, 0))\n elif emotion_text == 'sad':\n color = emotion_probability * np.asarray((0, 0, 255))\n elif emotion_text == 'happy':\n color = emotion_probability * np.asarray((255, 255, 0))\n elif emotion_text == 'surprise':\n color = emotion_probability * np.asarray((0, 255, 255))\n else:\n color = emotion_probability * np.asarray((0, 255, 0))\n\n color = color.astype(int)\n color = color.tolist()\n\n draw_bounding_box(face_coordinates, rgb_image, color)\n draw_text(face_coordinates, rgb_image, emotion_mode,\n color, 0, -45, 1, 1)\n\n bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)\n cv2.imshow('window_frame', bgr_image)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\nvideo_capture.release()\ncv2.destroyAllWindows()\n"
] |
[
[
"numpy.asarray",
"numpy.max",
"numpy.expand_dims",
"numpy.argmax"
]
] |
YOULLNEVERWA/PyTorchVideoCompression
|
[
"48b57298c86557d151627dc3ef8a2db8ab613654"
] |
[
"DVC/subnet/flowlib.py"
] |
[
"\"\"\"\n# ==============================\n# flowlib.py\n# library for optical flow processing\n# Author: Ruoteng Li\n# Date: 6th Aug 2016\n# ==============================\n\"\"\"\nimport png\nimport numpy as np\nimport matplotlib.colors as cl\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\n\nUNKNOWN_FLOW_THRESH = 1e7\nSMALLFLOW = 0.0\nLARGEFLOW = 1e8\n\n\"\"\"\n=============\nFlow Section\n=============\n\"\"\"\n\n\ndef show_flow(filename):\n \"\"\"\n visualize optical flow map using matplotlib\n :param filename: optical flow file\n :return: None\n \"\"\"\n flow = read_flow(filename)\n img = flow_to_image(flow)\n plt.imshow(img)\n plt.show()\n\n\ndef visualize_flow(flow, mode='Y'):\n \"\"\"\n this function visualize the input flow\n :param flow: input flow in array\n :param mode: choose which color mode to visualize the flow (Y: Ccbcr, RGB: RGB color)\n :return: None\n \"\"\"\n if mode == 'Y':\n # Ccbcr color wheel\n img = flow_to_image(flow)\n plt.imshow(img)\n plt.show()\n elif mode == 'RGB':\n (h, w) = flow.shape[0:2]\n du = flow[:, :, 0]\n dv = flow[:, :, 1]\n valid = flow[:, :, 2]\n max_flow = max(np.max(du), np.max(dv))\n img = np.zeros((h, w, 3), dtype=np.float64)\n # angle layer\n img[:, :, 0] = np.arctan2(dv, du) / (2 * np.pi)\n # magnitude layer, normalized to 1\n img[:, :, 1] = np.sqrt(du * du + dv * dv) * 8 / max_flow\n # phase layer\n img[:, :, 2] = 8 - img[:, :, 1]\n # clip to [0,1]\n small_idx = img[:, :, 0:3] < 0\n large_idx = img[:, :, 0:3] > 1\n img[small_idx] = 0\n img[large_idx] = 1\n # convert to rgb\n img = cl.hsv_to_rgb(img)\n # remove invalid point\n img[:, :, 0] = img[:, :, 0] * valid\n img[:, :, 1] = img[:, :, 1] * valid\n img[:, :, 2] = img[:, :, 2] * valid\n # show\n plt.imshow(img)\n plt.show()\n\n return None\n\n\ndef read_flow(filename):\n \"\"\"\n read optical flow from Middlebury .flo file\n :param filename: name of the flow file\n :return: optical flow data in matrix\n \"\"\"\n f = open(filename, 'rb')\n try:\n magic = np.fromfile(f, np.float32, count=1)[0] # For Python3.x\n except:\n magic = np.fromfile(f, np.float32, count=1) # For Python2.x\n data2d = None\n\n if 202021.25 != magic:\n print('Magic number incorrect. Invalid .flo file')\n else:\n w = np.fromfile(f, np.int32, count=1)\n h = np.fromfile(f, np.int32, count=1)\n # print(\"Reading %d x %d flo file\" % (h, w))\n data2d = np.fromfile(f, np.float32, count=2 * w[0] * h[0])\n # reshape data into 3D array (columns, rows, channels)\n data2d = np.resize(data2d, (h[0], w[0], 2))\n f.close()\n return data2d\n\ndef read_flow_png(flow_file):\n \"\"\"\n Read optical flow from KITTI .png file\n :param flow_file: name of the flow file\n :return: optical flow data in matrix\n \"\"\"\n flow_object = png.Reader(filename=flow_file)\n flow_direct = flow_object.asDirect()\n flow_data = list(flow_direct[2])\n (w, h) = flow_direct[3]['size']\n flow = np.zeros((h, w, 3), dtype=np.float64)\n for i in range(len(flow_data)):\n flow[i, :, 0] = flow_data[i][0::3]\n flow[i, :, 1] = flow_data[i][1::3]\n flow[i, :, 2] = flow_data[i][2::3]\n\n invalid_idx = (flow[:, :, 2] == 0)\n flow[:, :, 0:2] = (flow[:, :, 0:2] - 2 ** 15) / 64.0\n flow[invalid_idx, 0] = 0\n flow[invalid_idx, 1] = 0\n return flow\n\n\ndef write_flow(flow, filename):\n \"\"\"\n write optical flow in Middlebury .flo format\n :param flow: optical flow map\n :param filename: optical flow file path to be saved\n :return: None\n \"\"\"\n f = open(filename, 'wb')\n magic = np.array([202021.25], dtype=np.float32)\n (height, width) = flow.shape[0:2]\n w = np.array([width], dtype=np.int32)\n h = np.array([height], dtype=np.int32)\n magic.tofile(f)\n w.tofile(f)\n h.tofile(f)\n flow.tofile(f)\n f.close()\n\n\ndef segment_flow(flow):\n h = flow.shape[0]\n w = flow.shape[1]\n u = flow[:, :, 0]\n v = flow[:, :, 1]\n\n idx = ((abs(u) > LARGEFLOW) | (abs(v) > LARGEFLOW))\n idx2 = (abs(u) == SMALLFLOW)\n class0 = (v == 0) & (u == 0)\n u[idx2] = 0.00001\n tan_value = v / u\n\n class1 = (tan_value < 1) & (tan_value >= 0) & (u > 0) & (v >= 0)\n class2 = (tan_value >= 1) & (u >= 0) & (v >= 0)\n class3 = (tan_value < -1) & (u <= 0) & (v >= 0)\n class4 = (tan_value < 0) & (tan_value >= -1) & (u < 0) & (v >= 0)\n class8 = (tan_value >= -1) & (tan_value < 0) & (u > 0) & (v <= 0)\n class7 = (tan_value < -1) & (u >= 0) & (v <= 0)\n class6 = (tan_value >= 1) & (u <= 0) & (v <= 0)\n class5 = (tan_value >= 0) & (tan_value < 1) & (u < 0) & (v <= 0)\n\n seg = np.zeros((h, w))\n\n seg[class1] = 1\n seg[class2] = 2\n seg[class3] = 3\n seg[class4] = 4\n seg[class5] = 5\n seg[class6] = 6\n seg[class7] = 7\n seg[class8] = 8\n seg[class0] = 0\n seg[idx] = 0\n\n return seg\n\n\ndef flow_error(tu, tv, u, v):\n \"\"\"\n Calculate average end point error\n :param tu: ground-truth horizontal flow map\n :param tv: ground-truth vertical flow map\n :param u: estimated horizontal flow map\n :param v: estimated vertical flow map\n :return: End point error of the estimated flow\n \"\"\"\n smallflow = 0.0\n '''\n stu = tu[bord+1:end-bord,bord+1:end-bord]\n stv = tv[bord+1:end-bord,bord+1:end-bord]\n su = u[bord+1:end-bord,bord+1:end-bord]\n sv = v[bord+1:end-bord,bord+1:end-bord]\n '''\n stu = tu[:]\n stv = tv[:]\n su = u[:]\n sv = v[:]\n\n idxUnknow = (abs(stu) > UNKNOWN_FLOW_THRESH) | (abs(stv) > UNKNOWN_FLOW_THRESH)\n stu[idxUnknow] = 0\n stv[idxUnknow] = 0\n su[idxUnknow] = 0\n sv[idxUnknow] = 0\n\n ind2 = [(np.absolute(stu) > smallflow) | (np.absolute(stv) > smallflow)]\n index_su = su[ind2]\n index_sv = sv[ind2]\n an = 1.0 / np.sqrt(index_su ** 2 + index_sv ** 2 + 1)\n un = index_su * an\n vn = index_sv * an\n\n index_stu = stu[ind2]\n index_stv = stv[ind2]\n tn = 1.0 / np.sqrt(index_stu ** 2 + index_stv ** 2 + 1)\n tun = index_stu * tn\n tvn = index_stv * tn\n\n '''\n angle = un * tun + vn * tvn + (an * tn)\n index = [angle == 1.0]\n angle[index] = 0.999\n ang = np.arccos(angle)\n mang = np.mean(ang)\n mang = mang * 180 / np.pi\n '''\n\n epe = np.sqrt((stu - su) ** 2 + (stv - sv) ** 2)\n epe = epe[ind2]\n mepe = np.mean(epe)\n return mepe\n\n\ndef flow_to_image(flow, display=False, maxrad = None):\n \"\"\"\n Convert flow into middlebury color code image\n :param flow: optical flow map\n :return: optical flow image in middlebury color\n \"\"\"\n u = flow[0, :, :]\n v = flow[1, :, :]\n\n maxu = -999.\n maxv = -999.\n minu = 999.\n minv = 999.\n\n idxUnknow = (abs(u) > UNKNOWN_FLOW_THRESH) | (abs(v) > UNKNOWN_FLOW_THRESH)\n u[idxUnknow] = 0\n v[idxUnknow] = 0\n\n maxu = max(maxu, np.max(u))\n minu = min(minu, np.min(u))\n\n maxv = max(maxv, np.max(v))\n minv = min(minv, np.min(v))\n\n rad = np.sqrt(u ** 2 + v ** 2)\n if maxrad == None:\n maxrad = max(-1, np.max(rad))\n\n if display:\n print(\"max flow: %.4f\\nflow range:\\nu = %.3f .. %.3f\\nv = %.3f .. %.3f\" % (maxrad, minu,maxu, minv, maxv))\n\n u = u/(maxrad + np.finfo(float).eps)\n v = v/(maxrad + np.finfo(float).eps)\n\n img = compute_color(u, v)\n\n idx = np.repeat(idxUnknow[:, :, np.newaxis], 3, axis=2)\n img[idx] = 0\n\n return np.uint8(img), maxrad\n\n\ndef evaluate_flow_file(gt, pred):\n \"\"\"\n evaluate the estimated optical flow end point error according to ground truth provided\n :param gt: ground truth file path\n :param pred: estimated optical flow file path\n :return: end point error, float32\n \"\"\"\n # Read flow files and calculate the errors\n gt_flow = read_flow(gt) # ground truth flow\n eva_flow = read_flow(pred) # predicted flow\n # Calculate errors\n average_pe = flow_error(gt_flow[:, :, 0], gt_flow[:, :, 1], eva_flow[:, :, 0], eva_flow[:, :, 1])\n return average_pe\n\n\ndef evaluate_flow(gt_flow, pred_flow):\n \"\"\"\n gt: ground-truth flow\n pred: estimated flow\n \"\"\"\n average_pe = flow_error(gt_flow[:, :, 0], gt_flow[:, :, 1], pred_flow[:, :, 0], pred_flow[:, :, 1])\n return average_pe\n\n\n\"\"\"\n==============\nDisparity Section\n==============\n\"\"\"\n\n\ndef read_disp_png(file_name):\n \"\"\"\n Read optical flow from KITTI .png file\n :param file_name: name of the flow file\n :return: optical flow data in matrix\n \"\"\"\n image_object = png.Reader(filename=file_name)\n image_direct = image_object.asDirect()\n image_data = list(image_direct[2])\n (w, h) = image_direct[3]['size']\n channel = len(image_data[0]) / w\n flow = np.zeros((h, w, channel), dtype=np.uint16)\n for i in range(len(image_data)):\n for j in range(channel):\n flow[i, :, j] = image_data[i][j::channel]\n return flow[:, :, 0] / 256\n\n\ndef disp_to_flowfile(disp, filename):\n \"\"\"\n Read KITTI disparity file in png format\n :param disp: disparity matrix\n :param filename: the flow file name to save\n :return: None\n \"\"\"\n f = open(filename, 'wb')\n magic = np.array([202021.25], dtype=np.float32)\n (height, width) = disp.shape[0:2]\n w = np.array([width], dtype=np.int32)\n h = np.array([height], dtype=np.int32)\n empty_map = np.zeros((height, width), dtype=np.float32)\n data = np.dstack((disp, empty_map))\n magic.tofile(f)\n w.tofile(f)\n h.tofile(f)\n data.tofile(f)\n f.close()\n\n\n\"\"\"\n==============\nImage Section\n==============\n\"\"\"\n\n\ndef read_image(filename):\n \"\"\"\n Read normal image of any format\n :param filename: name of the image file\n :return: image data in matrix uint8 type\n \"\"\"\n img = Image.open(filename)\n im = np.array(img)\n return im\n\n\ndef warp_image(im, flow):\n \"\"\"\n Use optical flow to warp image to the next\n :param im: image to warp\n :param flow: optical flow\n :return: warped image\n \"\"\"\n from scipy import interpolate\n image_height = im.shape[0]\n image_width = im.shape[1]\n flow_height = flow.shape[0]\n flow_width = flow.shape[1]\n n = image_height * image_width\n (iy, ix) = np.mgrid[0:image_height, 0:image_width]\n (fy, fx) = np.mgrid[0:flow_height, 0:flow_width]\n fx += flow[:,:,0]\n fy += flow[:,:,1]\n mask = np.logical_or(fx <0 , fx > flow_width)\n mask = np.logical_or(mask, fy < 0)\n mask = np.logical_or(mask, fy > flow_height)\n fx = np.minimum(np.maximum(fx, 0), flow_width)\n fy = np.minimum(np.maximum(fy, 0), flow_height)\n points = np.concatenate((ix.reshape(n,1), iy.reshape(n,1)), axis=1)\n xi = np.concatenate((fx.reshape(n, 1), fy.reshape(n,1)), axis=1)\n warp = np.zeros((image_height, image_width, im.shape[2]))\n for i in range(im.shape[2]):\n channel = im[:, :, i]\n plt.imshow(channel, cmap='gray')\n values = channel.reshape(n, 1)\n new_channel = interpolate.griddata(points, values, xi, method='cubic')\n new_channel = np.reshape(new_channel, [flow_height, flow_width])\n new_channel[mask] = 1\n warp[:, :, i] = new_channel.astype(np.uint8)\n\n return warp.astype(np.uint8)\n\n\n\"\"\"\n==============\nOthers\n==============\n\"\"\"\n\ndef scale_image(image, new_range):\n \"\"\"\n Linearly scale the image into desired range\n :param image: input image\n :param new_range: the new range to be aligned\n :return: image normalized in new range\n \"\"\"\n min_val = np.min(image).astype(np.float32)\n max_val = np.max(image).astype(np.float32)\n min_val_new = np.array(min(new_range), dtype=np.float32)\n max_val_new = np.array(max(new_range), dtype=np.float32)\n scaled_image = (image - min_val) / (max_val - min_val) * (max_val_new - min_val_new) + min_val_new\n return scaled_image.astype(np.uint8)\n\n\ndef compute_color(u, v):\n \"\"\"\n compute optical flow color map\n :param u: optical flow horizontal map\n :param v: optical flow vertical map\n :return: optical flow in color code\n \"\"\"\n [h, w] = u.shape\n img = np.zeros([h, w, 3])\n nanIdx = np.isnan(u) | np.isnan(v)\n u[nanIdx] = 0\n v[nanIdx] = 0\n\n colorwheel = make_color_wheel()\n ncols = np.size(colorwheel, 0)\n\n rad = np.sqrt(u**2+v**2)\n\n a = np.arctan2(-v, -u) / np.pi\n\n fk = (a+1) / 2 * (ncols - 1) + 1\n\n k0 = np.floor(fk).astype(int)\n\n k1 = k0 + 1\n k1[k1 == ncols+1] = 1\n f = fk - k0\n\n for i in range(0, np.size(colorwheel,1)):\n tmp = colorwheel[:, i]\n col0 = tmp[k0-1] / 255\n col1 = tmp[k1-1] / 255\n col = (1-f) * col0 + f * col1\n\n idx = rad <= 1\n col[idx] = 1-rad[idx]*(1-col[idx])\n notidx = np.logical_not(idx)\n\n col[notidx] *= 0.75\n img[:, :, i] = np.uint8(np.floor(255 * col*(1-nanIdx)))\n\n return img\n\n\ndef make_color_wheel():\n \"\"\"\n Generate color wheel according Middlebury color code\n :return: Color wheel\n \"\"\"\n RY = 15\n YG = 6\n GC = 4\n CB = 11\n BM = 13\n MR = 6\n\n ncols = RY + YG + GC + CB + BM + MR\n\n colorwheel = np.zeros([ncols, 3])\n\n col = 0\n\n # RY\n colorwheel[0:RY, 0] = 255\n colorwheel[0:RY, 1] = np.transpose(np.floor(255*np.arange(0, RY) / RY))\n col += RY\n\n # YG\n colorwheel[col:col+YG, 0] = 255 - np.transpose(np.floor(255*np.arange(0, YG) / YG))\n colorwheel[col:col+YG, 1] = 255\n col += YG\n\n # GC\n colorwheel[col:col+GC, 1] = 255\n colorwheel[col:col+GC, 2] = np.transpose(np.floor(255*np.arange(0, GC) / GC))\n col += GC\n\n # CB\n colorwheel[col:col+CB, 1] = 255 - np.transpose(np.floor(255*np.arange(0, CB) / CB))\n colorwheel[col:col+CB, 2] = 255\n col += CB\n\n # BM\n colorwheel[col:col+BM, 2] = 255\n colorwheel[col:col+BM, 0] = np.transpose(np.floor(255*np.arange(0, BM) / BM))\n col += + BM\n\n # MR\n colorwheel[col:col+MR, 2] = 255 - np.transpose(np.floor(255 * np.arange(0, MR) / MR))\n colorwheel[col:col+MR, 0] = 255\n\n return colorwheel\n \ndef save_flow_image(flow, image_file):\n \"\"\"\n save flow visualization into image file\n :param flow: optical flow data\n :param flow_fil\n :return: None\n \"\"\"\n flow_img = flow_to_image(flow)\n img_out = Image.fromarray(flow_img)\n img_out.save(image_file)"
] |
[
[
"matplotlib.pyplot.imshow",
"numpy.resize",
"numpy.sqrt",
"matplotlib.colors.hsv_to_rgb",
"numpy.arctan2",
"numpy.max",
"numpy.mean",
"scipy.interpolate.griddata",
"numpy.reshape",
"numpy.uint8",
"numpy.arange",
"numpy.finfo",
"numpy.size",
"numpy.repeat",
"numpy.zeros",
"numpy.logical_not",
"numpy.min",
"numpy.isnan",
"numpy.logical_or",
"numpy.floor",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.fromfile",
"numpy.maximum",
"numpy.absolute",
"numpy.dstack"
]
] |
wesleyktatum/bms_kaggle
|
[
"679c23b53ecbcf12ebd84a84a8619adc5f6a4ab2"
] |
[
"eval_distributed.py"
] |
[
"import os\nimport json\nimport argparse\nimport numpy as np\nimport pandas as pd\nfrom time import perf_counter\n\nfrom util import *\nfrom dataloader import MoleculeDataset\nfrom models.sasa import ResNet26, ResNet38, ResNet50\nfrom models.axial import axial18s, axial18srpe, axial26s, axial50s, axial50m, axial50l\nfrom models.resnet import resnet18, resnet34, resnet50\nfrom models.bilstm import biLSTM512\nfrom models.transformer import trans128_4x, trans256_4x, trans512_4x\nfrom models.caption import CaptionModel\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport torch.multiprocessing as mp\nimport torch.distributed as dist\n\nimport Levenshtein as lev\n\nDEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\ndef main(gpu, args, shard_id):\n rank = gpu\n dist.init_process_group(backend='nccl', init_method='env://',\n world_size=args.n_gpus, rank=rank)\n ckpt, ckpt_args, _ = load_model_from_ckpt(args.checkpoint_fn)\n\n if ckpt_args.encoder == 'resnet18':\n encoder = resnet18(pretrained=False, finetune=True)\n d_enc = 512\n elif ckpt_args.encoder == 'resnet34':\n encoder = resnet34(pretrained=False, finetune=True)\n d_enc = 512\n elif ckpt_args.encoder == 'resnet50':\n encoder = resnet50(pretrained=False, finetune=True)\n d_enc = 2048\n if ckpt_args.decoder == 'bilstm':\n decoder = biLSTM512(vocab_size=args.vocab_size, device=DEVICE, d_enc=d_enc)\n elif ckpt_args.decoder == 'trans128_4x':\n decoder = trans128_4x(vocab_size=args.vocab_size, d_enc=d_enc, N=ckpt_args.n_decoder_layers,\n device=DEVICE, teacher_force=False)\n elif ckpt_args.decoder == 'trans256_4x':\n decoder = trans256_4x(vocab_size=args.vocab_size, d_enc=d_enc, N=ckpt_args.n_decoder_layers,\n device=DEVICE, teacher_force=False)\n elif ckpt_args.decoder == 'trans512_4x':\n decoder = trans512_4x(vocab_size=args.vocab_size, d_enc=d_enc, N=ckpt_args.n_decoder_layers,\n device=DEVICE, teacher_force=False)\n model = CaptionModel(encoder, decoder)\n model.load_state_dict(ckpt['model_state_dict'])\n torch.cuda.set_device(gpu)\n model.cuda(gpu)\n model = nn.parallel.DistributedDataParallel(model, device_ids=[gpu])\n model.eval()\n\n if args.unrotated:\n write_fn = os.path.join(args.write_dir, '{}_{}_{}_unrotated_predictions{}.txt'.format(args.checkpoint_fn.split('/')[-1].split('.')[0], args.mode, args.search_mode, gpu))\n else:\n write_fn = os.path.join(args.write_dir, '{}_{}_{}_rotated_predictions{}.txt'.format(args.checkpoint_fn.split('/')[-1].split('.')[0], args.mode, args.search_mode, gpu))\n try:\n f = open(write_fn, 'r')\n f.close()\n already_wrote = True\n except FileNotFoundError:\n already_wrote = False\n if not already_wrote:\n log_file = open(write_fn, 'a')\n log_file.write('image_id\\tInChI\\n')\n log_file.close()\n\n print('loading shard {}...'.format(shard_id))\n mol_data = MoleculeDataset(args.mode, shard_id, args.imgs_dir, ckpt_args.img_size, unrotated=args.unrotated, rotate=False)\n img_ids = pd.read_csv(os.path.join(args.imgs_dir, '{}_shards'.format(args.mode), 'img_id_shard{}.csv'.format(shard_id))).image_id.values\n\n data_sampler = torch.utils.data.distributed.DistributedSampler(mol_data,\n num_replicas=args.n_gpus,\n rank=rank,\n shuffle=False)\n\n data_loader = torch.utils.data.DataLoader(mol_data, batch_size=args.batch_size,\n shuffle=False, num_workers=0,\n pin_memory=False, drop_last=False,\n sampler=data_sampler)\n\n for i, (batch_imgs, batch_img_id_idxs) in enumerate(data_loader):\n batch_imgs = batch_imgs.cuda(non_blocking=True)\n batch_size = batch_imgs.shape[0]\n final_batch = False\n if batch_size != args.batch_size:\n final_batch = True\n final_batch_size = batch_size\n full_chunks = int(final_batch_size / args.chunk_size)\n final_chunk = final_batch_size % args.chunk_size\n if final_batch:\n for j in range(full_chunks+1):\n if j == full_chunks:\n imgs = batch_imgs[j*args.chunk_size:(j*args.chunk_size)+final_chunk,:,:,:]\n img_id_idxs = batch_img_id_idxs[j*args.chunk_size:(j*args.chunk_size)+final_chunk]\n decoded = model.module.predict(imgs, search_mode=args.search_mode, width=args.beam_width,\n device=DEVICE)\n for k, img_id_idx in enumerate(img_id_idxs):\n pred_inchi = decode_inchi(decoded[k,:], args.ord_dict)\n img_id = img_ids[img_id_idx]\n log_file = open(write_fn, 'a')\n log_file.write('{}\\t{}\\n'.format(img_id, pred_inchi))\n log_file.close()\n else:\n imgs = batch_imgs[j*args.chunk_size:(j+1)*args.chunk_size,:,:,:]\n img_id_idxs = batch_img_id_idxs[j*args.chunk_size:(j+1)*args.chunk_size]\n decoded = model.module.predict(imgs, search_mode=args.search_mode, width=args.beam_width,\n device=DEVICE)\n for k, img_id_idx in enumerate(img_id_idxs):\n pred_inchi = decode_inchi(decoded[k,:], args.ord_dict)\n img_id = img_ids[img_id_idx]\n log_file = open(write_fn, 'a')\n log_file.write('{}\\t{}\\n'.format(img_id, pred_inchi))\n log_file.close()\n else:\n for j in range(args.batch_chunks):\n imgs = batch_imgs[j*args.chunk_size:(j+1)*args.chunk_size,:,:,:]\n img_id_idxs = batch_img_id_idxs[j*args.chunk_size:(j+1)*args.chunk_size]\n decoded = model.module.predict(imgs, search_mode=args.search_mode, width=args.beam_width,\n device=DEVICE)\n for k, img_id_idx in enumerate(img_id_idxs):\n pred_inchi = decode_inchi(decoded[k,:], args.ord_dict)\n img_id = img_ids[img_id_idx]\n log_file = open(write_fn, 'a')\n log_file.write('{}\\t{}\\n'.format(img_id, pred_inchi))\n log_file.close()\n del mol_data, data_loader\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--imgs_dir', type=str, default='/gscratch/pfaendtner/orion/mol_translation/data')\n parser.add_argument('--data_dir', type=str, default='data')\n parser.add_argument('--eval_dir', type=str, default='eval')\n parser.add_argument('--search_mode', choices=['greedy', 'beam'], default='greedy')\n parser.add_argument('--beam_width', type=int, default=4)\n parser.add_argument('--checkpoint_fn', type=str, default=None)\n parser.add_argument('--batch_size', type=int, default=256)\n parser.add_argument('--batch_chunks', type=int, default=8)\n parser.add_argument('--n_samples', type=int, default=10000)\n parser.add_argument('--unrotated', default=False, action='store_true')\n args = parser.parse_args()\n os.environ['MASTER_ADDR'] = 'localhost'\n os.environ['MASTER_PORT'] = '12355'\n\n args.mode = 'eval'\n shards_dir = os.path.join(args.imgs_dir, '{}_shards'.format(args.mode))\n with open('{}/char_dict.json'.format(args.data_dir), 'r') as f:\n args.char_dict = json.load(f)\n with open('{}/ord_dict.json'.format(args.data_dir), 'r') as f:\n args.ord_dict = json.load(f)\n args.vocab_size = len(args.char_dict.keys())\n args.chunk_size = args.batch_size // args.batch_chunks\n\n os.makedirs(args.eval_dir, exist_ok=True)\n args.n_gpus = torch.cuda.device_count()\n\n if args.unrotated:\n args.write_dir = os.path.join(args.eval_dir, '{}_{}_{}_unrotated_predictions/'.format(args.checkpoint_fn.split('/')[-1].split('.')[0], args.mode, args.search_mode))\n else:\n args.write_dir = os.path.join(args.eval_dir, '{}_{}_{}_rotated_predictions/'.format(args.checkpoint_fn.split('/')[-1].split('.')[0], args.mode, args.search_mode))\n os.makedirs(args.write_dir, exist_ok=True)\n\n n_shards = get_n_shards(shards_dir)\n for shard_id in range(n_shards):\n print(shard_id)\n print('crafting spawns...')\n\n mp.spawn(main, nprocs=args.n_gpus, args=(args, shard_id,))\n"
] |
[
[
"torch.distributed.init_process_group",
"torch.utils.data.distributed.DistributedSampler",
"torch.cuda.set_device",
"torch.multiprocessing.spawn",
"torch.utils.data.DataLoader",
"torch.cuda.is_available",
"torch.cuda.device_count",
"torch.nn.parallel.DistributedDataParallel"
]
] |
chenxyzj/car-color-classifier-yolo3-python
|
[
"013649f2e6219a28fd945a46e74d939d58c1ee17"
] |
[
"classifier.py"
] |
[
"# Copyright © 2019 by Spectrico\r\n# Licensed under the MIT License\r\n\r\nimport numpy as np\r\nimport json\r\nimport tensorflow as tf\r\nfrom PIL import Image, ImageOps\r\nimport cv2\r\nimport io\r\nimport config\r\n\r\nmodel_file = config.model_file\r\nlabel_file = config.label_file\r\ninput_layer = config.input_layer\r\noutput_layer = config.output_layer\r\nclassifier_input_size = config.classifier_input_size\r\n\r\ndef load_graph(model_file):\r\n graph = tf.Graph()\r\n graph_def = tf.GraphDef()\r\n\r\n with open(model_file, \"rb\") as f:\r\n graph_def.ParseFromString(f.read())\r\n with graph.as_default():\r\n tf.import_graph_def(graph_def)\r\n\r\n return graph\r\n\r\ndef load_labels(label_file):\r\n label = []\r\n with open(label_file, \"r\", encoding='cp1251') as ins:\r\n for line in ins:\r\n label.append(line.rstrip())\r\n\r\n return label\r\n\r\nclass Classifier():\r\n def __init__(self):\r\n # uncomment the next 3 lines if you want to use CPU instead of GPU\r\n #import os\r\n #os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\r\n #os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\r\n\r\n self.graph = load_graph(model_file)\r\n self.labels = load_labels(label_file)\r\n\r\n input_name = \"import/\" + input_layer\r\n output_name = \"import/\" + output_layer\r\n self.input_operation = self.graph.get_operation_by_name(input_name)\r\n self.output_operation = self.graph.get_operation_by_name(output_name)\r\n\r\n self.sess = tf.Session(graph=self.graph)\r\n self.sess.graph.finalize() # Graph is read-only after this statement.\r\n\r\n def predict(self, img):\r\n img = img[:, :, ::-1]\r\n h, w = img.shape[:2]\r\n center_crop_size = min(w, h)\r\n x = int((w - center_crop_size) / 2)\r\n y = int((h - center_crop_size) / 2)\r\n img = img[y:y + center_crop_size, x:x + center_crop_size]\r\n img = cv2.resize(img, classifier_input_size)\r\n\r\n # Add a forth dimension since Tensorflow expects a list of images\r\n img = np.expand_dims(img, axis=0)\r\n\r\n # Scale the input image to the range used in the trained network\r\n img = img.astype(np.float32)\r\n img /= 127.5\r\n img -= 1.\r\n\r\n results = self.sess.run(self.output_operation.outputs[0], {\r\n self.input_operation.outputs[0]: img\r\n })\r\n results = np.squeeze(results)\r\n\r\n top = 3\r\n top_indices = results.argsort()[-top:][::-1]\r\n classes = []\r\n for ix in top_indices:\r\n classes.append({\"color\": self.labels[ix], \"prob\": str(results[ix])})\r\n return(classes)\r\n"
] |
[
[
"tensorflow.Graph",
"tensorflow.import_graph_def",
"numpy.expand_dims",
"numpy.squeeze",
"tensorflow.Session",
"tensorflow.GraphDef"
]
] |
damiafuentes/TelloSDKPy
|
[
"156b61e374150cf97a396973208b765f6dcec0c0"
] |
[
"djitellopy/tello.py"
] |
[
"\"\"\"Library for interacting with DJI Ryze Tello drones.\n\"\"\"\n\n# coding=utf-8\nimport logging\nimport socket\nimport time\nfrom threading import Thread\nfrom typing import Optional, Union, Type, Dict\n\nfrom .enforce_types import enforce_types\n\nimport av\nimport numpy as np\n\n\nthreads_initialized = False\ndrones: Optional[dict] = {}\nclient_socket: socket.socket\n\n\nclass TelloException(Exception):\n pass\n\n\n@enforce_types\nclass Tello:\n \"\"\"Python wrapper to interact with the Ryze Tello drone using the official Tello api.\n Tello API documentation:\n [1.3](https://dl-cdn.ryzerobotics.com/downloads/tello/20180910/Tello%20SDK%20Documentation%20EN_1.3.pdf),\n [2.0 with EDU-only commands](https://dl-cdn.ryzerobotics.com/downloads/Tello/Tello%20SDK%202.0%20User%20Guide.pdf)\n \"\"\"\n # Send and receive commands, client socket\n RESPONSE_TIMEOUT = 7 # in seconds\n TAKEOFF_TIMEOUT = 20 # in seconds\n FRAME_GRAB_TIMEOUT = 5\n TIME_BTW_COMMANDS = 0.1 # in seconds\n TIME_BTW_RC_CONTROL_COMMANDS = 0.001 # in seconds\n RETRY_COUNT = 3 # number of retries after a failed command\n TELLO_IP = '192.168.10.1' # Tello IP address\n\n # Video stream, server socket\n VS_UDP_IP = '0.0.0.0'\n VS_UDP_PORT = 11111\n\n CONTROL_UDP_PORT = 8889\n STATE_UDP_PORT = 8890\n\n # Constants for video settings\n BITRATE_AUTO = 0\n BITRATE_1MBPS = 1\n BITRATE_2MBPS = 2\n BITRATE_3MBPS = 3\n BITRATE_4MBPS = 4\n BITRATE_5MBPS = 5\n RESOLUTION_480P = 'low'\n RESOLUTION_720P = 'high'\n FPS_5 = 'low'\n FPS_15 = 'middle'\n FPS_30 = 'high'\n CAMERA_FORWARD = 0\n CAMERA_DOWNWARD = 1\n\n # Set up logger\n HANDLER = logging.StreamHandler()\n FORMATTER = logging.Formatter('[%(levelname)s] %(filename)s - %(lineno)d - %(message)s')\n HANDLER.setFormatter(FORMATTER)\n\n LOGGER = logging.getLogger('djitellopy')\n LOGGER.addHandler(HANDLER)\n LOGGER.setLevel(logging.INFO)\n # Use Tello.LOGGER.setLevel(logging.<LEVEL>) in YOUR CODE\n # to only receive logs of the desired level and higher\n\n # Conversion functions for state protocol fields\n INT_STATE_FIELDS = (\n # Tello EDU with mission pads enabled only\n 'mid', 'x', 'y', 'z',\n # 'mpry': (custom format 'x,y,z')\n # Common entries\n 'pitch', 'roll', 'yaw',\n 'vgx', 'vgy', 'vgz',\n 'templ', 'temph',\n 'tof', 'h', 'bat', 'time'\n )\n FLOAT_STATE_FIELDS = ('baro', 'agx', 'agy', 'agz')\n\n state_field_converters: Dict[str, Union[Type[int], Type[float]]]\n state_field_converters = {key : int for key in INT_STATE_FIELDS}\n state_field_converters.update({key : float for key in FLOAT_STATE_FIELDS})\n\n # VideoCapture object\n background_frame_read: Optional['BackgroundFrameRead'] = None\n\n stream_on = False\n is_flying = False\n\n def __init__(self,\n host=TELLO_IP,\n retry_count=RETRY_COUNT):\n\n global threads_initialized, client_socket, drones\n\n self.address = (host, Tello.CONTROL_UDP_PORT)\n self.stream_on = False\n self.retry_count = retry_count\n self.last_received_command_timestamp = time.time()\n self.last_rc_control_timestamp = time.time()\n\n if not threads_initialized:\n # Run Tello command responses UDP receiver on background\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n response_receiver_thread = Thread(target=Tello.udp_response_receiver)\n response_receiver_thread.daemon = True\n response_receiver_thread.start()\n\n # Run state UDP receiver on background\n state_receiver_thread = Thread(target=Tello.udp_state_receiver)\n state_receiver_thread.daemon = True\n state_receiver_thread.start()\n\n threads_initialized = True\n\n drones[host] = {'responses': [], 'state': {}}\n\n self.LOGGER.info(\"Tello instance was initialized. Host: '{}'. Port: '{}'.\".format(host, Tello.CONTROL_UDP_PORT))\n\n def get_own_udp_object(self):\n \"\"\"Get own object from the global drones dict. This object is filled\n with responses and state information by the receiver threads.\n Internal method, you normally wouldn't call this yourself.\n \"\"\"\n global drones\n\n host = self.address[0]\n return drones[host]\n\n @staticmethod\n def udp_response_receiver():\n \"\"\"Setup drone UDP receiver. This method listens for responses of Tello.\n Must be run from a background thread in order to not block the main thread.\n Internal method, you normally wouldn't call this yourself.\n \"\"\"\n while True:\n try:\n data, address = client_socket.recvfrom(1024)\n\n address = address[0]\n Tello.LOGGER.debug('Data received from {} at client_socket'.format(address))\n\n if address not in drones:\n continue\n\n drones[address]['responses'].append(data)\n\n except Exception as e:\n Tello.LOGGER.error(e)\n break\n\n @staticmethod\n def udp_state_receiver():\n \"\"\"Setup state UDP receiver. This method listens for state information from\n Tello. Must be run from a background thread in order to not block\n the main thread.\n Internal method, you normally wouldn't call this yourself.\n \"\"\"\n state_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n state_socket.bind((\"\", Tello.STATE_UDP_PORT))\n\n while True:\n try:\n data, address = state_socket.recvfrom(1024)\n\n address = address[0]\n Tello.LOGGER.debug('Data received from {} at state_socket'.format(address))\n\n if address not in drones:\n continue\n\n data = data.decode('ASCII')\n drones[address]['state'] = Tello.parse_state(data)\n\n except Exception as e:\n Tello.LOGGER.error(e)\n break\n\n @staticmethod\n def parse_state(state: str) -> Dict[str, Union[int, float, str]]:\n \"\"\"Parse a state line to a dictionary\n Internal method, you normally wouldn't call this yourself.\n \"\"\"\n state = state.strip()\n Tello.LOGGER.debug('Raw state data: {}'.format(state))\n\n if state == 'ok':\n return {}\n\n state_dict = {}\n for field in state.split(';'):\n split = field.split(':')\n if len(split) < 2:\n continue\n\n key = split[0]\n value: Union[int, float, str] = split[1]\n\n if key in Tello.state_field_converters:\n num_type = Tello.state_field_converters[key]\n try:\n value = num_type(value)\n except ValueError as e:\n Tello.LOGGER.debug('Error parsing state value for {}: {} to {}'\n .format(key, value, num_type))\n Tello.LOGGER.error(e)\n continue\n\n state_dict[key] = value\n\n return state_dict\n\n def get_current_state(self) -> dict:\n \"\"\"Call this function to attain the state of the Tello. Returns a dict\n with all fields.\n Internal method, you normally wouldn't call this yourself.\n \"\"\"\n return self.get_own_udp_object()['state']\n\n def get_state_field(self, key: str):\n \"\"\"Get a specific sate field by name.\n Internal method, you normally wouldn't call this yourself.\n \"\"\"\n state = self.get_current_state()\n\n if key in state:\n return state[key]\n else:\n raise TelloException('Could not get state property: {}'.format(key))\n\n def get_mission_pad_id(self) -> int:\n \"\"\"Mission pad ID of the currently detected mission pad\n Only available on Tello EDUs after calling enable_mission_pads\n Returns:\n int: -1 if none is detected, else 1-8\n \"\"\"\n return self.get_state_field('mid')\n\n def get_mission_pad_distance_x(self) -> int:\n \"\"\"X distance to current mission pad\n Only available on Tello EDUs after calling enable_mission_pads\n Returns:\n int: distance in cm\n \"\"\"\n return self.get_state_field('x')\n\n def get_mission_pad_distance_y(self) -> int:\n \"\"\"Y distance to current mission pad\n Only available on Tello EDUs after calling enable_mission_pads\n Returns:\n int: distance in cm\n \"\"\"\n return self.get_state_field('y')\n\n def get_mission_pad_distance_z(self) -> int:\n \"\"\"Z distance to current mission pad\n Only available on Tello EDUs after calling enable_mission_pads\n Returns:\n int: distance in cm\n \"\"\"\n return self.get_state_field('z')\n\n def get_pitch(self) -> int:\n \"\"\"Get pitch in degree\n Returns:\n int: pitch in degree\n \"\"\"\n return self.get_state_field('pitch')\n\n def get_roll(self) -> int:\n \"\"\"Get roll in degree\n Returns:\n int: roll in degree\n \"\"\"\n return self.get_state_field('roll')\n\n def get_yaw(self) -> int:\n \"\"\"Get yaw in degree\n Returns:\n int: yaw in degree\n \"\"\"\n return self.get_state_field('yaw')\n\n def get_speed_x(self) -> int:\n \"\"\"X-Axis Speed\n Returns:\n int: speed\n \"\"\"\n return self.get_state_field('vgx')\n\n def get_speed_y(self) -> int:\n \"\"\"Y-Axis Speed\n Returns:\n int: speed\n \"\"\"\n return self.get_state_field('vgy')\n\n def get_speed_z(self) -> int:\n \"\"\"Z-Axis Speed\n Returns:\n int: speed\n \"\"\"\n return self.get_state_field('vgz')\n\n def get_acceleration_x(self) -> float:\n \"\"\"X-Axis Acceleration\n Returns:\n float: acceleration\n \"\"\"\n return self.get_state_field('agx')\n\n def get_acceleration_y(self) -> float:\n \"\"\"Y-Axis Acceleration\n Returns:\n float: acceleration\n \"\"\"\n return self.get_state_field('agy')\n\n def get_acceleration_z(self) -> float:\n \"\"\"Z-Axis Acceleration\n Returns:\n float: acceleration\n \"\"\"\n return self.get_state_field('agz')\n\n def get_lowest_temperature(self) -> int:\n \"\"\"Get lowest temperature\n Returns:\n int: lowest temperature (°C)\n \"\"\"\n return self.get_state_field('templ')\n\n def get_highest_temperature(self) -> int:\n \"\"\"Get highest temperature\n Returns:\n float: highest temperature (°C)\n \"\"\"\n return self.get_state_field('temph')\n\n def get_temperature(self) -> float:\n \"\"\"Get average temperature\n Returns:\n float: average temperature (°C)\n \"\"\"\n templ = self.get_lowest_temperature()\n temph = self.get_highest_temperature()\n return (templ + temph) / 2\n\n def get_height(self) -> int:\n \"\"\"Get current height in cm\n Returns:\n int: height in cm\n \"\"\"\n return self.get_state_field('h')\n\n def get_distance_tof(self) -> int:\n \"\"\"Get current distance value from TOF in cm\n Returns:\n int: TOF distance in cm\n \"\"\"\n return self.get_state_field('tof')\n\n def get_barometer(self) -> int:\n \"\"\"Get current barometer measurement in cm\n This resembles the absolute height.\n See https://en.wikipedia.org/wiki/Altimeter\n Returns:\n int: barometer measurement in cm\n \"\"\"\n return self.get_state_field('baro') * 100\n\n def get_flight_time(self) -> int:\n \"\"\"Get the time the motors have been active in seconds\n Returns:\n int: flight time in s\n \"\"\"\n return self.get_state_field('time')\n\n def get_battery(self) -> int:\n \"\"\"Get current battery percentage\n Returns:\n int: 0-100\n \"\"\"\n return self.get_state_field('bat')\n\n def get_udp_video_address(self) -> str:\n \"\"\"Internal method, you normally wouldn't call this youself.\n \"\"\"\n address_schema = 'udp://{ip}:{port}' # + '?overrun_nonfatal=1&fifo_size=5000'\n address = address_schema.format(ip=self.VS_UDP_IP, port=self.VS_UDP_PORT)\n return address\n\n def get_frame_read(self) -> 'BackgroundFrameRead':\n \"\"\"Get the BackgroundFrameRead object from the camera drone. Then, you just need to call\n backgroundFrameRead.frame to get the actual frame received by the drone.\n Returns:\n BackgroundFrameRead\n \"\"\"\n if self.background_frame_read is None:\n address = self.get_udp_video_address()\n self.background_frame_read = BackgroundFrameRead(self, address)\n self.background_frame_read.start()\n return self.background_frame_read\n\n def send_command_with_return(self, command: str, timeout: int = RESPONSE_TIMEOUT) -> str:\n \"\"\"Send command to Tello and wait for its response.\n Internal method, you normally wouldn't call this yourself.\n Return:\n bool/str: str with response text on success, False when unsuccessfull.\n \"\"\"\n # Commands very consecutive makes the drone not respond to them.\n # So wait at least self.TIME_BTW_COMMANDS seconds\n diff = time.time() - self.last_received_command_timestamp\n if diff < self.TIME_BTW_COMMANDS:\n self.LOGGER.debug('Waiting {} seconds to execute command: {}...'.format(diff, command))\n time.sleep(diff)\n\n self.LOGGER.info(\"Send command: '{}'\".format(command))\n timestamp = time.time()\n\n client_socket.sendto(command.encode('utf-8'), self.address)\n\n responses = self.get_own_udp_object()['responses']\n\n while not responses:\n if time.time() - timestamp > timeout:\n message = \"Aborting command '{}'. Did not receive a response after {} seconds\".format(command, timeout)\n self.LOGGER.warning(message)\n return message\n time.sleep(0.1) # Sleep during send command\n\n self.last_received_command_timestamp = time.time()\n\n first_response = responses.pop(0) # first datum from socket\n try:\n response = first_response.decode(\"utf-8\")\n except UnicodeDecodeError as e:\n self.LOGGER.error(e)\n return \"response decode error\"\n response = response.rstrip(\"\\r\\n\")\n\n self.LOGGER.info(\"Response {}: '{}'\".format(command, response))\n return response\n\n def send_command_without_return(self, command: str):\n \"\"\"Send command to Tello without expecting a response.\n Internal method, you normally wouldn't call this yourself.\n \"\"\"\n # Commands very consecutive makes the drone not respond to them. So wait at least self.TIME_BTW_COMMANDS seconds\n\n self.LOGGER.info(\"Send command (no response expected): '{}'\".format(command))\n client_socket.sendto(command.encode('utf-8'), self.address)\n\n def send_control_command(self, command: str, timeout: int = RESPONSE_TIMEOUT) -> bool:\n \"\"\"Send control command to Tello and wait for its response.\n Internal method, you normally wouldn't call this yourself.\n \"\"\"\n response = \"max retries exceeded\"\n for i in range(0, self.retry_count):\n response = self.send_command_with_return(command, timeout=timeout)\n\n if 'ok' in response.lower():\n return True\n\n self.LOGGER.debug(\"Command attempt #{} failed for command: '{}'\".format(i, command))\n\n self.raise_result_error(command, response)\n return False # never reached\n\n def send_read_command(self, command: str) -> str:\n \"\"\"Send given command to Tello and wait for its response.\n Internal method, you normally wouldn't call this yourself.\n \"\"\"\n\n response = self.send_command_with_return(command)\n\n try:\n response = str(response)\n except TypeError as e:\n self.LOGGER.error(e)\n\n if any(word in response for word in ('error', 'ERROR', 'False')):\n self.raise_result_error(command, response)\n return \"Error: this code should never be reached\"\n\n return response\n\n def send_read_command_int(self, command: str) -> int:\n \"\"\"Send given command to Tello and wait for its response.\n Parses the response to an integer\n Internal method, you normally wouldn't call this yourself.\n \"\"\"\n response = self.send_read_command(command)\n return int(response)\n\n def send_read_command_float(self, command: str) -> float:\n \"\"\"Send given command to Tello and wait for its response.\n Parses the response to an integer\n Internal method, you normally wouldn't call this yourself.\n \"\"\"\n response = self.send_read_command(command)\n return float(response)\n\n def raise_result_error(self, command: str, response: str) -> bool:\n \"\"\"Used to reaise an error after an unsuccessful command\n Internal method, you normally wouldn't call this yourself.\n \"\"\"\n tries = 1 + self.retry_count\n raise TelloException(\"Command '{}' was unsuccessful for {} tries. Latest response:\\t'{}'\"\n .format(command, tries, response))\n\n def connect(self, wait_for_state=True):\n \"\"\"Enter SDK mode. Call this before any of the control functions.\n \"\"\"\n self.send_control_command(\"command\")\n\n if wait_for_state:\n REPS = 20\n for i in range(REPS):\n if self.get_current_state():\n t = i / REPS # in seconds\n Tello.LOGGER.debug(\"'.connect()' received first state packet after {} seconds\".format(t))\n break\n time.sleep(1 / REPS)\n\n if not self.get_current_state():\n raise TelloException('Did not receive a state packet from the Tello')\n\n def send_keepalive(self):\n \"\"\"Send a keepalive packet to prevent the drone from landing after 15s\n \"\"\"\n self.send_control_command(\"keepalive\")\n\n def turn_motor_on(self):\n \"\"\"Turn on motors without flying (mainly for cooling)\n \"\"\"\n self.send_control_command(\"motoron\")\n\n def turn_motor_off(self):\n \"\"\"Turns off the motor cooling mode\n \"\"\"\n self.send_control_command(\"motoroff\")\n\n def initiate_throw_takeoff(self):\n \"\"\"Allows you to take off by throwing your drone within 5 seconds of this command\n \"\"\"\n self.send_control_command(\"throwfly\")\n self.is_flying = True\n\n def takeoff(self):\n \"\"\"Automatic takeoff.\n \"\"\"\n # Something it takes a looooot of time to take off and return a succesful takeoff.\n # So we better wait. Otherwise, it would give us an error on the following calls.\n self.send_control_command(\"takeoff\", timeout=Tello.TAKEOFF_TIMEOUT)\n self.is_flying = True\n\n def land(self):\n \"\"\"Automatic landing.\n \"\"\"\n self.send_control_command(\"land\")\n self.is_flying = False\n\n def streamon(self):\n \"\"\"Turn on video streaming. Use `tello.get_frame_read` afterwards.\n Video Streaming is supported on all tellos when in AP mode (i.e.\n when your computer is connected to Tello-XXXXXX WiFi ntwork).\n Currently Tello EDUs do not support video streaming while connected\n to a WiFi-network.\n\n !!! Note:\n If the response is 'Unknown command' you have to update the Tello\n firmware. This can be done using the official Tello app.\n \"\"\"\n self.send_control_command(\"streamon\")\n self.stream_on = True\n\n def streamoff(self):\n \"\"\"Turn off video streaming.\n \"\"\"\n self.send_control_command(\"streamoff\")\n self.stream_on = False\n\n def emergency(self):\n \"\"\"Stop all motors immediately.\n \"\"\"\n self.send_command_without_return(\"emergency\")\n self.is_flying = False\n\n def move(self, direction: str, x: int):\n \"\"\"Tello fly up, down, left, right, forward or back with distance x cm.\n Users would normally call one of the move_x functions instead.\n Arguments:\n direction: up, down, left, right, forward or back\n x: 20-500\n \"\"\"\n self.send_control_command(\"{} {}\".format(direction, x))\n\n def move_up(self, x: int):\n \"\"\"Fly x cm up.\n Arguments:\n x: 20-500\n \"\"\"\n self.move(\"up\", x)\n\n def move_down(self, x: int):\n \"\"\"Fly x cm down.\n Arguments:\n x: 20-500\n \"\"\"\n self.move(\"down\", x)\n\n def move_left(self, x: int):\n \"\"\"Fly x cm left.\n Arguments:\n x: 20-500\n \"\"\"\n self.move(\"left\", x)\n\n def move_right(self, x: int):\n \"\"\"Fly x cm right.\n Arguments:\n x: 20-500\n \"\"\"\n self.move(\"right\", x)\n\n def move_forward(self, x: int):\n \"\"\"Fly x cm forward.\n Arguments:\n x: 20-500\n \"\"\"\n self.move(\"forward\", x)\n\n def move_back(self, x: int):\n \"\"\"Fly x cm backwards.\n Arguments:\n x: 20-500\n \"\"\"\n self.move(\"back\", x)\n\n def rotate_clockwise(self, x: int):\n \"\"\"Rotate x degree clockwise.\n Arguments:\n x: 1-360\n \"\"\"\n self.send_control_command(\"cw {}\".format(x))\n\n def rotate_counter_clockwise(self, x: int):\n \"\"\"Rotate x degree counter-clockwise.\n Arguments:\n x: 1-3600\n \"\"\"\n self.send_control_command(\"ccw {}\".format(x))\n\n def flip(self, direction: str):\n \"\"\"Do a flip maneuver.\n Users would normally call one of the flip_x functions instead.\n Arguments:\n direction: l (left), r (right), f (forward) or b (back)\n \"\"\"\n self.send_control_command(\"flip {}\".format(direction))\n\n def flip_left(self):\n \"\"\"Flip to the left.\n \"\"\"\n self.flip(\"l\")\n\n def flip_right(self):\n \"\"\"Flip to the right.\n \"\"\"\n self.flip(\"r\")\n\n def flip_forward(self):\n \"\"\"Flip forward.\n \"\"\"\n self.flip(\"f\")\n\n def flip_back(self):\n \"\"\"Flip backwards.\n \"\"\"\n self.flip(\"b\")\n\n def go_xyz_speed(self, x: int, y: int, z: int, speed: int):\n \"\"\"Fly to x y z relative to the current position.\n Speed defines the traveling speed in cm/s.\n Arguments:\n x: -500-500\n y: -500-500\n z: -500-500\n speed: 10-100\n \"\"\"\n cmd = 'go {} {} {} {}'.format(x, y, z, speed)\n self.send_control_command(cmd)\n\n def curve_xyz_speed(self, x1: int, y1: int, z1: int, x2: int, y2: int, z2: int, speed: int):\n \"\"\"Fly to x2 y2 z2 in a curve via x2 y2 z2. Speed defines the traveling speed in cm/s.\n\n - Both points are relative to the current position\n - The current position and both points must form a circle arc.\n - If the arc radius is not within the range of 0.5-10 meters, it raises an Exception\n - x1/x2, y1/y2, z1/z2 can't both be between -20-20 at the same time, but can both be 0.\n\n Arguments:\n x1: -500-500\n x2: -500-500\n y1: -500-500\n y2: -500-500\n z1: -500-500\n z2: -500-500\n speed: 10-60\n \"\"\"\n cmd = 'curve {} {} {} {} {} {} {}'.format(x1, y1, z1, x2, y2, z2, speed)\n self.send_control_command(cmd)\n\n def go_xyz_speed_mid(self, x: int, y: int, z: int, speed: int, mid: int):\n \"\"\"Fly to x y z relative to the mission pad with id mid.\n Speed defines the traveling speed in cm/s.\n Arguments:\n x: -500-500\n y: -500-500\n z: -500-500\n speed: 10-100\n mid: 1-8\n \"\"\"\n cmd = 'go {} {} {} {} m{}'.format(x, y, z, speed, mid)\n self.send_control_command(cmd)\n\n def curve_xyz_speed_mid(self, x1: int, y1: int, z1: int, x2: int, y2: int, z2: int, speed: int, mid: int):\n \"\"\"Fly to x2 y2 z2 in a curve via x2 y2 z2. Speed defines the traveling speed in cm/s.\n\n - Both points are relative to the mission pad with id mid.\n - The current position and both points must form a circle arc.\n - If the arc radius is not within the range of 0.5-10 meters, it raises an Exception\n - x1/x2, y1/y2, z1/z2 can't both be between -20-20 at the same time, but can both be 0.\n\n Arguments:\n x1: -500-500\n y1: -500-500\n z1: -500-500\n x2: -500-500\n y2: -500-500\n z2: -500-500\n speed: 10-60\n mid: 1-8\n \"\"\"\n cmd = 'curve {} {} {} {} {} {} {} m{}'.format(x1, y1, z1, x2, y2, z2, speed, mid)\n self.send_control_command(cmd)\n\n def go_xyz_speed_yaw_mid(self, x: int, y: int, z: int, speed: int, yaw: int, mid1: int, mid2: int):\n \"\"\"Fly to x y z relative to mid1.\n Then fly to 0 0 z over mid2 and rotate to yaw relative to mid2's rotation.\n Speed defines the traveling speed in cm/s.\n Arguments:\n x: -500-500\n y: -500-500\n z: -500-500\n speed: 10-100\n yaw: -360-360\n mid1: 1-8\n mid2: 1-8\n \"\"\"\n cmd = 'jump {} {} {} {} {} m{} m{}'.format(x, y, z, speed, yaw, mid1, mid2)\n self.send_control_command(cmd)\n\n def enable_mission_pads(self):\n \"\"\"Enable mission pad detection\n \"\"\"\n self.send_control_command(\"mon\")\n\n def disable_mission_pads(self):\n \"\"\"Disable mission pad detection\n \"\"\"\n self.send_control_command(\"moff\")\n\n def set_mission_pad_detection_direction(self, x):\n \"\"\"Set mission pad detection direction. enable_mission_pads needs to be\n called first. When detecting both directions detecting frequency is 10Hz,\n otherwise the detection frequency is 20Hz.\n Arguments:\n x: 0 downwards only, 1 forwards only, 2 both directions\n \"\"\"\n self.send_control_command(\"mdirection {}\".format(x))\n\n def set_speed(self, x: int):\n \"\"\"Set speed to x cm/s.\n Arguments:\n x: 10-100\n \"\"\"\n self.send_control_command(\"speed {}\".format(x))\n\n def send_rc_control(self, left_right_velocity: int, forward_backward_velocity: int, up_down_velocity: int,\n yaw_velocity: int):\n \"\"\"Send RC control via four channels. Command is sent every self.TIME_BTW_RC_CONTROL_COMMANDS seconds.\n Arguments:\n left_right_velocity: -100~100 (left/right)\n forward_backward_velocity: -100~100 (forward/backward)\n up_down_velocity: -100~100 (up/down)\n yaw_velocity: -100~100 (yaw)\n \"\"\"\n def clamp100(x: int) -> int:\n return max(-100, min(100, x))\n\n if time.time() - self.last_rc_control_timestamp > self.TIME_BTW_RC_CONTROL_COMMANDS:\n self.last_rc_control_timestamp = time.time()\n cmd = 'rc {} {} {} {}'.format(\n clamp100(left_right_velocity),\n clamp100(forward_backward_velocity),\n clamp100(up_down_velocity),\n clamp100(yaw_velocity)\n )\n self.send_command_without_return(cmd)\n\n def set_wifi_credentials(self, ssid: str, password: str):\n \"\"\"Set the Wi-Fi SSID and password. The Tello will reboot afterwords.\n \"\"\"\n cmd = 'wifi {} {}'.format(ssid, password)\n self.send_control_command(cmd)\n\n def connect_to_wifi(self, ssid: str, password: str):\n \"\"\"Connects to the Wi-Fi with SSID and password.\n After this command the tello will reboot.\n Only works with Tello EDUs.\n \"\"\"\n cmd = 'ap {} {}'.format(ssid, password)\n self.send_control_command(cmd)\n\n def set_network_ports(self, state_packet_port: int, video_stream_port: int):\n \"\"\"Sets the ports for state packets and video streaming\n While you can use this command to reconfigure the Tello this library currently does not support\n non-default ports (TODO!)\n \"\"\"\n cmd = 'port {} {}'.format(state_packet_port, video_stream_port)\n self.send_control_command(cmd)\n\n def reboot(self):\n \"\"\"Reboots the drone\n \"\"\"\n self.send_command_without_return('reboot')\n\n def set_video_bitrate(self, bitrate: int):\n \"\"\"Sets the bitrate of the video stream\n Use one of the following for the bitrate argument:\n Tello.BITRATE_AUTO\n Tello.BITRATE_1MBPS\n Tello.BITRATE_2MBPS\n Tello.BITRATE_3MBPS\n Tello.BITRATE_4MBPS\n Tello.BITRATE_5MBPS\n \"\"\"\n cmd = 'setbitrate {}'.format(bitrate)\n self.send_control_command(cmd)\n\n def set_video_resolution(self, resolution: str):\n \"\"\"Sets the resolution of the video stream\n Use one of the following for the resolution argument:\n Tello.RESOLUTION_480P\n Tello.RESOLUTION_720P\n \"\"\"\n cmd = 'setresolution {}'.format(resolution)\n self.send_control_command(cmd)\n\n def set_video_fps(self, fps: str):\n \"\"\"Sets the frames per second of the video stream\n Use one of the following for the fps argument:\n Tello.FPS_5\n Tello.FPS_15\n Tello.FPS_30\n \"\"\"\n cmd = 'setfps {}'.format(fps)\n self.send_control_command(cmd)\n\n def set_video_direction(self, direction: int):\n \"\"\"Selects one of the two cameras for video streaming\n The forward camera is the regular 1080x720 color camera\n The downward camera is a grey-only 320x240 IR-sensitive camera\n Use one of the following for the direction argument:\n Tello.CAMERA_FORWARD\n Tello.CAMERA_DOWNWARD\n \"\"\"\n cmd = 'downvision {}'.format(direction)\n self.send_control_command(cmd)\n\n def send_expansion_command(self, expansion_cmd: str):\n \"\"\"Sends a command to the ESP32 expansion board connected to a Tello Talent\n Use e.g. tello.send_expansion_command(\"led 255 0 0\") to turn the top led red.\n \"\"\"\n cmd = 'EXT {}'.format(expansion_cmd)\n self.send_control_command(cmd)\n\n def query_speed(self) -> int:\n \"\"\"Query speed setting (cm/s)\n Returns:\n int: 1-100\n \"\"\"\n return self.send_read_command_int('speed?')\n\n def query_battery(self) -> int:\n \"\"\"Get current battery percentage via a query command\n Using get_battery is usually faster\n Returns:\n int: 0-100 in %\n \"\"\"\n return self.send_read_command_int('battery?')\n\n def query_flight_time(self) -> int:\n \"\"\"Query current fly time (s).\n Using get_flight_time is usually faster.\n Returns:\n int: Seconds elapsed during flight.\n \"\"\"\n return self.send_read_command_int('time?')\n\n def query_height(self) -> int:\n \"\"\"Get height in cm via a query command.\n Using get_height is usually faster\n Returns:\n int: 0-3000\n \"\"\"\n return self.send_read_command_int('height?')\n\n def query_temperature(self) -> int:\n \"\"\"Query temperature (°C).\n Using get_temperature is usually faster.\n Returns:\n int: 0-90\n \"\"\"\n return self.send_read_command_int('temp?')\n\n def query_attitude(self) -> dict:\n \"\"\"Query IMU attitude data.\n Using get_pitch, get_roll and get_yaw is usually faster.\n Returns:\n {'pitch': int, 'roll': int, 'yaw': int}\n \"\"\"\n response = self.send_read_command('attitude?')\n return Tello.parse_state(response)\n\n def query_barometer(self) -> int:\n \"\"\"Get barometer value (cm)\n Using get_barometer is usually faster.\n Returns:\n int: 0-100\n \"\"\"\n baro = self.send_read_command_int('baro?')\n return baro * 100\n\n def query_distance_tof(self) -> float:\n \"\"\"Get distance value from TOF (cm)\n Using get_distance_tof is usually faster.\n Returns:\n float: 30-1000\n \"\"\"\n # example response: 801mm\n tof = self.send_read_command('tof?')\n return int(tof[:-2]) / 10\n\n def query_wifi_signal_noise_ratio(self) -> str:\n \"\"\"Get Wi-Fi SNR\n Returns:\n str: snr\n \"\"\"\n return self.send_read_command('wifi?')\n\n def query_sdk_version(self) -> str:\n \"\"\"Get SDK Version\n Returns:\n str: SDK Version\n \"\"\"\n return self.send_read_command('sdk?')\n\n def query_serial_number(self) -> str:\n \"\"\"Get Serial Number\n Returns:\n str: Serial Number\n \"\"\"\n return self.send_read_command('sn?')\n\n def query_active(self) -> str:\n \"\"\"Get the active status\n Returns:\n str\n \"\"\"\n return self.send_read_command('active?')\n\n def end(self):\n \"\"\"Call this method when you want to end the tello object\n \"\"\"\n try:\n if self.is_flying:\n self.land()\n if self.stream_on:\n self.streamoff()\n except TelloException:\n pass\n\n if self.background_frame_read is not None:\n self.background_frame_read.stop()\n\n host = self.address[0]\n if host in drones:\n del drones[host]\n\n def __del__(self):\n self.end()\n\n\nclass BackgroundFrameRead:\n \"\"\"\n This class read frames using PyAV in background. Use\n backgroundFrameRead.frame to get the current frame.\n \"\"\"\n\n def __init__(self, tello, address):\n self.address = address\n self.frame = np.zeros([300, 400, 3], dtype=np.uint8)\n\n # Try grabbing frame with PyAV\n # According to issue #90 the decoder might need some time\n # https://github.com/damiafuentes/DJITelloPy/issues/90#issuecomment-855458905\n try:\n Tello.LOGGER.debug('trying to grab video frames...')\n self.container = av.open(self.address, timeout=(Tello.FRAME_GRAB_TIMEOUT, None))\n except av.error.ExitError:\n raise TelloException('Failed to grab video frames from video stream')\n\n self.stopped = False\n self.worker = Thread(target=self.update_frame, args=(), daemon=True)\n\n def start(self):\n \"\"\"Start the frame update worker\n Internal method, you normally wouldn't call this yourself.\n \"\"\"\n self.worker.start()\n\n def update_frame(self):\n \"\"\"Thread worker function to retrieve frames using PyAV\n Internal method, you normally wouldn't call this yourself.\n \"\"\"\n try:\n for frame in self.container.decode(video=0):\n self.frame = np.array(frame.to_image())\n if self.stopped:\n self.container.close()\n break\n except av.error.ExitError:\n raise TelloException('Do not have enough frames for decoding, please try again or increase video fps before get_frame_read()')\n\n def stop(self):\n \"\"\"Stop the frame update worker\n Internal method, you normally wouldn't call this yourself.\n \"\"\"\n self.stopped = True\n"
] |
[
[
"numpy.zeros"
]
] |
GiuppoUni/warInspector
|
[
"c0b06ba355d481e2260f9476b38524270948808a"
] |
[
"application.py"
] |
[
"\"\"\"\r\n==================================================\r\n\r\n WarInspector web app\r\n \r\n==================================================\r\n\r\n\r\n\"\"\"\r\n\r\nprint(__doc__)\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom numpy import dot\r\nfrom numpy.linalg import norm\r\n\r\nfrom flask import Flask, flash, redirect, render_template, request, session, abort,send_from_directory,send_file,jsonify\r\n\r\nimport json\r\n\r\nfrom operator import itemgetter\r\n\r\n\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.decomposition import PCA\r\nimport matplotlib.pyplot as plt\r\nfrom selenium import webdriver\r\nfrom bs4 import BeautifulSoup\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.common.exceptions import TimeoutException\r\n\r\n\r\nPCA_WIDTH=\"300\"\r\nPCA_HEIGHT=\"250\"\r\n\r\nfolder=\"static/data/\"\r\nop = webdriver.ChromeOptions()\r\nop.add_argument('headless')\r\ndriver = webdriver.Chrome('/usr/bin/chromedriver',options=op)\r\n#1. Declare application\r\n# application= Flask(__name__)\r\napplication= Flask(__name__,root_path=\".\",template_folder='.')\r\n\r\n#2. Declare data stores\r\nclass DataStore():\r\n CountryName=None\r\n Year1=None\r\n Year2=None\r\ndata=DataStore()\r\n\r\n'''\r\ndef MDSMain(year,country):\r\n\r\n data = pd.io.parsers.read_csv(\r\n 'static/data/importNumbers1989-2018.csv' \r\n )\r\n print(data.columns, data)\r\n if year!=None:\r\n d=data[str(year)] \r\n else:\r\n d=data[\"2018\"] \r\n d=d.fillna(0)\r\n\r\n print( \"\\n\\n\",np.all(np.isfinite(d)), not np.any(np.isnan(d)))\r\n\r\n countries=data[\"Country\"]\r\n\r\n print(d,type(d),countries)\r\n\r\n dissM1=np.zeros((len(data),len(data))) #creates a zeros dissM\r\n dissM2=np.zeros((len(data),len(data))) #creates a zeros dissM\r\n dissM3=np.zeros((len(data),len(data))) #creates a zeros dissM\r\n\r\n for i in range(len(d)):\r\n for j in range (len(d)):\r\n dissM1[i][j]= abs(d[i]-d[j])\r\n d_i=d[i] \r\n d_j=d[j]\r\n if(d[i]+d[j]!= 0):\r\n dissM2[i][j]= abs (d[i]-d[j])/(d[i]+d[j])\r\n else:\r\n dissM2[i][j]= abs (d[i]-d[j])\r\n\r\n if( (d[i]+d[j]) !=0 ):\r\n dissM3[i][j]= abs (d[i]-d[j])/( (d[i]+d[j])/2 )\r\n else:\r\n dissM3[i][j]= abs (d[i]-d[j])\r\n\r\n \r\n\r\n\r\n mds = manifold.MDS(n_components=2, max_iter=300, eps=1e-9,\r\n dissimilarity=\"precomputed\")\r\n pos1 = mds.fit(dissM1).embedding_\r\n stress1 = mds.fit(dissM1).stress_\r\n pos2 = mds.fit(dissM2).embedding_\r\n stress2 = mds.fit(dissM2).stress_\r\n pos3 = mds.fit(dissM3).embedding_\r\n stress3 = mds.fit(dissM3).stress_\r\n\r\n s = 50\r\n\r\n fig1=plt.figure()\r\n plt.scatter(pos1[:, 0], pos1[:, 1], color='red',s=s, lw=0, label='d[i]-d[j]')\r\n for label, x, y in zip(countries, pos1[:, 0], pos1[:, 1]):\r\n plt.title(\"Absolute distance\",color=\"white\")\r\n plt.annotate(\r\n label,\r\n xy = (x, y), xytext = (-20, 20),\r\n textcoords = 'offset points', ha = 'right', va = 'bottom',\r\n bbox = dict(boxstyle = 'round,pad=0.3', fc = 'yellow', alpha = 0.5),\r\n arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))\r\n \r\n # fig, ax = plt.subplots(1)\r\n # ax.tick_params(axis='x', colors=\"white\")\r\n # ax.tick_params(axis='y', colors=\"white\")\r\n plt.legend() \r\n #plt.show()\r\n print(\"Stress 1:\",stress1)\r\n\r\n fig2=plt.figure()\r\n plt.scatter(pos2[:, 0], pos2[:, 1], color='red',s=s, lw=0, label='d[i]-d[j]/(d[i]+d[j])')\r\n for label, x, y in zip(countries, pos2[:, 0], pos2[:, 1]):\r\n plt.annotate(\r\n label,\r\n xy = (x, y), xytext = (-20, 20),\r\n textcoords = 'offset points', ha = 'right', va = 'bottom',\r\n bbox = dict(boxstyle = 'round,pad=0.3', fc = 'yellow', alpha = 0.5),\r\n arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))\r\n plt.legend() \r\n #plt.show()\r\n print(\"Stress 2:\",stress2)\r\n\r\n fig3=plt.figure() \r\n plt.scatter(pos3[:, 0], pos3[:, 1], color='red',s=s, lw=0, label='abs (d[i]-d[j])/( (d[i]+d[j])/2 )')\r\n for label, x, y in zip(countries, pos3[:, 0], pos3[:, 1]):\r\n plt.annotate(\r\n label,\r\n xy = (x, y), xytext = (-20, 20),\r\n textcoords = 'offset points', ha = 'right', va = 'bottom',\r\n bbox = dict(boxstyle = 'round,pad=0.3', fc = 'yellow', alpha = 0.5),\r\n arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))\r\n plt.legend() \r\n #plt.show()\r\n print(\"Stress 3:\",stress3)\r\n\r\n fig1InHtml=mpld3.fig_to_html(fig1)\r\n fig2InHtml=mpld3.fig_to_html(fig2)\r\n fig3InHtml=mpld3.fig_to_html(fig3)\r\n # fig3InHtml=\"\"\r\n\r\n return fig1InHtml + fig2InHtml + fig3InHtml\r\n'''\r\n\r\n'''\r\n=========\r\nPCA\r\n=========\r\n'''\r\n\r\ndef createDf(year1,year2,countries,features = ['IMPORT_TOTAL',\"EXPORT_TOTAL\", 'ARMY_TOTAL','REF_TOTAL', 'GDP_TOTAL', 'POP_TOTAL']):\r\n \r\n #refugees\r\n df_ref = pd.read_csv(folder+\"ref.csv\", header=4 )\r\n df_ref = df_ref.rename(columns=lambda x: x.strip())\r\n\r\n #population\r\n df_pop = pd.read_csv(folder + \"pop.csv\", header=4)\r\n df_pop = df_pop.rename(columns=lambda x: x.strip())\r\n\r\n #army\r\n df_arm = pd.read_csv(folder + \"army-dimensions.csv\", header=4)\r\n df_arm = df_arm.rename(columns=lambda x: x.strip())\r\n\r\n #gdp\r\n df_gdp = pd.read_csv(folder + \"gdp.csv\", header=4)\r\n df_gdp = df_gdp.rename(columns=lambda x: x.strip())\r\n\r\n \r\n #countries list\r\n # df_countries = pd.read_csv(folder + \"countriesAlpha3.csv\")\r\n # df_countries[\"name\"] = df_countries[\"name\"].apply(lambda x: x.strip() )\r\n\r\n year_range=[year1]\r\n if(year2!=year1):\r\n year_range = list(range(year1,year2+1))\r\n year_range_str = [\"Country Name\",\"Country Code\"] + [str(x) for x in year_range]\r\n print(year_range_str)\r\n\r\n\r\n\r\n\r\n df_imp = pd.read_csv(folder + \"df_imp_clean.csv\")\r\n \r\n print(df_imp) \r\n df_mrgd_imp = df_imp.iloc[:,[0,-1,-2,-3,-4]] \r\n df_mrgd_imp.rename(columns={\"code3\":\"Country Code\"}, inplace=True)\r\n # NOTE\r\n # Compute Mean for range of years \r\n df_mrgd_imp[\"IMPORT_TOTAL\"] = df_imp[year_range_str[2:]].mean(axis=1)\r\n df_arm[\"ARMY_TOTAL\"] = df_arm[year_range_str[2:]].mean(axis=1)\r\n df_ref[\"REF_TOTAL\"] = df_ref[year_range_str[2:]].mean(axis=1)\r\n df_gdp[\"GDP_TOTAL\"] = df_gdp[year_range_str[2:]].mean(axis=1)\r\n df_pop[\"POP_TOTAL\"] = df_pop[year_range_str[2:]].mean(axis=1)\r\n # print(df_arm)\r\n # print(df_ref)\r\n # print(df_gdp)\r\n\r\n # print(\"->\",df_mrgd_imp)\r\n \r\n # Merging for final IMP\r\n df_mrgd_imp = pd.merge(df_mrgd_imp,\r\n df_mrgd_imp[[\"Country Name\",\"Country Code\",\"IMPORT_TOTAL\"]],on=[\"Country Name\",\"Country Code\"])\r\n\r\n df_mrgd_imp['IMPORT_TOTAL'] = df_mrgd_imp.groupby(['Country Code'])['IMPORT_TOTAL_x'].transform('sum')\r\n df_mrgd_imp = df_mrgd_imp.drop_duplicates(subset=[\"Country Name\",\"Country Code\"])\r\n\r\n\r\n df_mrgd_imp = pd.merge(df_mrgd_imp,\r\n df_arm[[\"Country Code\",\"ARMY_TOTAL\"]],on=\"Country Code\")\r\n df_mrgd_imp['ARMY_TOTAL'] = df_mrgd_imp.groupby(['Country Code'])['ARMY_TOTAL'].transform('sum')\r\n df_mrgd_imp = df_mrgd_imp.drop_duplicates(subset=[\"Country Name\",\"Country Code\"])\r\n\r\n df_mrgd_imp = pd.merge(df_mrgd_imp,\r\n df_ref[[\"Country Code\",\"REF_TOTAL\"]],on=\"Country Code\")\r\n df_mrgd_imp['REF_TOTAL'] = df_mrgd_imp.groupby(['Country Code'])['REF_TOTAL'].transform('sum')\r\n df_mrgd_imp = df_mrgd_imp.drop_duplicates(subset=[\"Country Name\",\"Country Code\"])\r\n\r\n df_mrgd_imp = pd.merge(df_mrgd_imp,\r\n df_gdp[[\"Country Code\",\"GDP_TOTAL\"]],on=\"Country Code\")\r\n df_mrgd_imp['GDP_TOTAL'] = df_mrgd_imp.groupby(['Country Code'])['GDP_TOTAL'].transform('sum')\r\n df_mrgd_imp = df_mrgd_imp.drop_duplicates(subset=[\"Country Name\",\"Country Code\"])\r\n\r\n df_mrgd_imp = pd.merge(df_mrgd_imp,\r\n df_pop[[\"Country Code\",\"POP_TOTAL\"]],on=\"Country Code\")\r\n df_mrgd_imp['POP_TOTAL'] = df_mrgd_imp.groupby(['Country Code'])['POP_TOTAL'].transform('sum')\r\n df_mrgd_imp = df_mrgd_imp.drop_duplicates(subset=[\"Country Name\",\"Country Code\"])\r\n\r\n # Merging for final EXP\r\n\r\n df_exp = pd.read_csv(folder + \"df_exp_clean.csv\")\r\n\r\n df_mrgd_imp[\"EXPORT_TOTAL\"] = df_exp[year_range_str[2:]].mean(axis=1)\r\n\r\n df_mrgd_imp = pd.merge(df_mrgd_imp,\r\n df_mrgd_imp[[\"Country Name\",\"Country Code\",\"EXPORT_TOTAL\"]],on=[\"Country Name\",\"Country Code\"])\r\n\r\n\r\n df_mrgd_imp['EXPORT_TOTAL'] = df_mrgd_imp.groupby(['Country Code'])['EXPORT_TOTAL_x'].transform('sum')\r\n df_mrgd_imp = df_mrgd_imp.drop_duplicates(subset=[\"Country Name\",\"Country Code\"])\r\n\r\n # print(df_mrgd_imp)\r\n return df_mrgd_imp\r\n\r\n\r\ndef pcaMain(year1=2016,year2=2019,countries=[\"ITA\"],features = ['IMPORT_TOTAL',\"EXPORT_TOTAL\", 'ARMY_TOTAL','REF_TOTAL', 'GDP_TOTAL', 'POP_TOTAL']):\r\n\r\n\r\n df_mrgd = createDf(year1,year2,countries)\r\n print(df_mrgd)\r\n print(\"Plotting\",countries)\r\n def pca_plot(df):\r\n # df['target']= df[\"Country Name\"].apply(lambda x: \"selected\" if x.strip() in selected\r\n # else \"not selected\")\r\n # print(df)\r\n df = df.rename(columns={\"Country Code\":\"target\"})\r\n\r\n # Separating out the features\r\n x = df.loc[:, features].values\r\n # Separating out the target\r\n y = df.loc[:,['target']].values\r\n # Standardizing the features\r\n x = StandardScaler().fit_transform(x)\r\n\r\n \r\n pca = PCA(n_components=2)\r\n principalComponents = pca.fit_transform(x)\r\n principalDf = pd.DataFrame(data = principalComponents\r\n , columns = ['principal component 1', 'principal component 2'])\r\n \r\n\r\n finalDf = pd.concat([principalDf, df[['target']]], axis = 1)\r\n finalDf = pd.concat([finalDf, df[['Country Name']]], axis = 1)\r\n finalDf = pd.concat([finalDf, df[['IMPORT_TOTAL']]], axis = 1)\r\n finalDf = pd.concat([finalDf, df[['EXPORT_TOTAL']]], axis = 1)\r\n\r\n print(finalDf)\r\n\r\n\r\n fig = plt.figure(figsize = (8,8))\r\n ax = fig.add_subplot(1,1,1) \r\n # ax.set_xlabel('Principal Component 1', fontsize = 15)\r\n # ax.set_ylabel('Principal Component 2', fontsize = 15)\r\n # ax.set_title('2 component PCA', fontsize = 20)\r\n colors = ['y', 'b']\r\n\r\n indicesToKeep = finalDf[\"target\"].apply(lambda x: x in countries)\r\n\r\n\r\n\r\n indicesToKeep2 = ~ indicesToKeep\r\n pc1 = finalDf.loc[indicesToKeep2, 'principal component 1'].to_list()\r\n pc2 = finalDf.loc[indicesToKeep2, 'principal component 2'].to_list()\r\n names = finalDf.loc[indicesToKeep2, \"target\"].to_list()\r\n names2 = finalDf.loc[indicesToKeep2, \"Country Name\"].to_list()\r\n not_selected_labels = [False]*len(names)\r\n imp = finalDf.loc[indicesToKeep2, \"IMPORT_TOTAL\"].to_list()\r\n exp = finalDf.loc[indicesToKeep2, \"EXPORT_TOTAL\"].to_list()\r\n\r\n\r\n # print(finalDf)\r\n # No grid\r\n plt.grid(b=None)\r\n\r\n ax.scatter( pc1, pc2 \r\n , c = \"b\"\r\n , s = 10)\r\n\r\n ax.grid()\r\n # non targets:\r\n for i,r in enumerate(zip(names, pc1, pc2)):\r\n if( pc1[i] > 0.2 and pc2[i] > 0.2):\r\n ax.annotate(r[0], (pc1[i]+0.1, pc2[i]+0.1 ))\r\n\r\n pc1Sel = finalDf.loc[indicesToKeep, 'principal component 1'].to_list()\r\n pc2Sel = finalDf.loc[indicesToKeep, 'principal component 2'].to_list()\r\n namesSel = finalDf.loc[indicesToKeep, \"target\"].to_list()\r\n namesSel2 = finalDf.loc[indicesToKeep, \"Country Name\"].to_list()\r\n selected_labels = [True]*len(namesSel)\r\n impSel = finalDf.loc[indicesToKeep, \"IMPORT_TOTAL\"].to_list()\r\n expSel = finalDf.loc[indicesToKeep, \"EXPORT_TOTAL\"].to_list()\r\n \r\n print(\"MAX\",finalDf['IMPORT_TOTAL'].max(),finalDf['EXPORT_TOTAL'].max())\r\n \r\n scatter = ax.scatter( pc1Sel, pc2Sel \r\n , c = \"y\"\r\n , s = 10,edgecolors='r')\r\n for i,r in enumerate(zip(namesSel, pc1Sel, pc2Sel)):\r\n ax.annotate(r[0], (pc1Sel[i]+0.1, pc2Sel[i]+0.1 ))\r\n\r\n # ax.legend( scatter,title=\"Selected:\", labels=countries)\r\n \r\n print(namesSel)\r\n # plt.show()\r\n\r\n print(\"pca.explained_variance_ratio_\",pca.explained_variance_ratio_)\r\n\r\n # Prepare data for d3\r\n\r\n return [list(zip(pc1,pc2,names,names2,not_selected_labels,imp,exp))\r\n +list(zip(pc1Sel,pc2Sel,namesSel,namesSel2,selected_labels,impSel,expSel))]\r\n\r\n data_imp = pca_plot(df_mrgd)\r\n # print(\"data_imp\",data_imp)\r\n data_imp = [sorted( data_imp[0], key=itemgetter(2))]\r\n print(\"data_imp\",data_imp)\r\n return data_imp\r\n\r\n\r\n\r\n\r\n# /**********\r\n# * ROUTES *\r\n# **********/\r\n \r\n\r\n@application.route(\"/main\",methods=[\"GET\",\"POST\"])\r\n\r\n#3. Define main code\r\n@application.route(\"/\",methods=[\"GET\",\"POST\"])\r\ndef homepage():\r\n print(\"--------------- In python doing stuff (1.5s)...\")\r\n # time.sleep(1.5)\r\n print(\"--------------- Rendering index\")\r\n\r\n # CountryName = request.form.get('Country_field',DEFAULT_STATE)\r\n # Year1 = request.form.get('Year_field1', DEFAULT_YEARS[0])\r\n # Year2 = request.form.get('Year_field2', DEFAULT_YEARS[1])\r\n\r\n # data.CountryName=CountryName\r\n # data.Year1=Year1\r\n # data.Year2=Year2\r\n \r\n return render_template(\"index.html\")\r\n # return render_template(\"myIndex.html\")\r\n\r\n\r\n@application.route(\"/get-data\",methods=[\"POST\"])\r\ndef returnPCAData():\r\n\r\n print(\"[S] Received request for pca\")\r\n json_data=request.data \r\n # print(json_data)\r\n parsed_json = (json.loads(json_data))\r\n # print(parsed_json[\"country\"])\r\n\r\n country = parsed_json['country']\r\n year1 = parsed_json['year1']\r\n year2 = parsed_json['year2']\r\n features = parsed_json[\"features\"]\r\n print(\"[S] Params:\",year1,year2,country)\r\n\r\n\r\n\r\n #s = MDSMain(year1,None) \r\n # s1,s2 = PCAMain(year1,year2, country )\r\n\r\n # old_dimensions_string = '\"width\": 800.0, \"height\": 800.0'\r\n # new_dimensions_string = '\"width\":' + PCA_WIDTH + ',\"height\":' + PCA_HEIGHT\r\n \r\n # s1 = s1.replace(\"<style>\",\"\").replace(\"</style>\",\"\")\\\r\n # .replace(old_dimensions_string,new_dimensions_string)\r\n \r\n # s2 = s2.replace(\"<style>\",\"\").replace(\"</style>\",\"\")\\\r\n # .replace(old_dimensions_string,new_dimensions_string)\r\n\r\n # #print(s)\r\n # return jsonify(data.CountryName,data.Year1,data.Year2,s1,s2)\r\n\r\n data_to_d3 = pcaMain(year1,year2,country,features)\r\n \r\n \r\n # link = driver.find_element_by_xpath('//*[@class=\"gct-alerts__latest-alert\"]')\r\n # link.click()\r\n # alerts = []\r\n # dates = []\r\n # for a in soup.findAll('div', attrs={'class':'gct-alerts__alert-body gct-alerts__alert-body--all'}):\r\n # alerts.append(a)\r\n # for d in soup.findAll('span', attrs={'class':'gct-alerts__alert-date gct-alerts__alert-date--all'}):\r\n # dates.append(d)\r\n\r\n return jsonify(data.CountryName,data.Year1,data.Year2,data_to_d3 )\r\n\r\n@application.route(\"/get-news\",methods=[\"GET\"])\r\ndef returnNews():\r\n # oldlink = \"https://www.cfr.org/global-conflict-tracker/?category=us&conflictType=1099&vm=grid\"\r\n link = \"https://www.crisisgroup.org/crisiswatch\"\r\n driver.get(link)\r\n content = driver.page_source\r\n soup = BeautifulSoup(content,\"html.parser\")\r\n # alert = soup.find('span', attrs={'class':'gct-alerts__latest-alert'})\r\n alert_parent = soup.find('p', attrs={'class':'[ u-mar-b0 u-mar-t15 ] [ u-fs13 u-lh15 u-fwl u-c-white:link ]'})\r\n alert = alert_parent.findAll('a', attrs={'class':'js-scrollTo'})\r\n # if(alert.text==\"...\"):\r\n # driver.get(link)\r\n # content = driver.page_source\r\n # soup = BeautifulSoup(content,\"html.parser\")\r\n\r\n last_update_parent = soup.find('h3', attrs={'class':'[ u-fs12 ]'})\r\n last_update = last_update_parent.find('span')\r\n country_array = [a.text for a in alert] \r\n print(alert,last_update)\r\n alert = str(alert).replace(\",\",\", \").replace(\"[\",\"\").replace(\"]\",\"\").replace('href=\"/crisiswatch#','href=\"https://www.crisisgroup.org/crisiswatch#')\r\n return jsonify(alert,str(\"Updated on: \"+last_update.text),str(country_array))\r\n\r\n# Other routes\r\n@application.route('/about')\r\ndef about():\r\n return render_template('static/html/about.html')\r\n\r\n@application.route('/weapons')\r\ndef weapons():\r\n years = request.args.get('years')\r\n if(years!=None):\r\n years = years.split(\",\") #if key doesn't exist, returns None\r\n countries = request.args.get('countries')\r\n if(countries != None):\r\n countries = countries.split(\",\") #if key doesn't exist, returns None\r\n countries = \",\".join(countries)\r\n print(\"---------\",years,jsonify(str(countries)))\r\n return render_template('static/html/weapons.html',years=years,countries=countries)\r\n\r\n@application.route('/countries')\r\ndef countries():\r\n return render_template('static/html/countries.html')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n application.run(debug=True)\r\n\r\n\r\n\r\n"
] |
[
[
"pandas.merge",
"pandas.read_csv",
"pandas.concat",
"pandas.DataFrame",
"matplotlib.pyplot.grid",
"sklearn.preprocessing.StandardScaler",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.figure"
]
] |
Wong-Lab/deepcell-label
|
[
"b07f93bed342b470d0662c898e4c2fc8ccb5e9c2"
] |
[
"deepcell_label/conftest.py"
] |
[
"\"\"\"Tests for the DeepCell Label Flask App.\"\"\"\n\nimport os\n\nfrom flask_sqlalchemy import SQLAlchemy\nimport numpy as np\nimport pytest\nfrom pytest_lazyfixture import lazy_fixture\nfrom unittest.mock import MagicMock\n\nfrom deepcell_label import create_app # pylint: disable=C0413\nfrom deepcell_label.loaders import Loader\n\n\n# flask-sqlalchemy fixtures from http://alexmic.net/flask-sqlalchemy-pytest/\n\n\nTESTDB_PATH = '/tmp/test_project.db'\nTEST_DATABASE_URI = 'sqlite:///{}'.format(TESTDB_PATH)\n\n\n# TODO: Could this become a fixture?\nclass DummyLoader(Loader):\n def __init__(self, raw=None, labels=None, cell_info=None, path='test.npz'):\n super().__init__()\n\n if raw is None:\n raw = np.zeros((1, 1, 1, 1))\n\n if labels is None:\n labels = np.zeros(raw.shape)\n elif labels.shape != raw.shape:\n raw = np.zeros(labels.shape)\n\n self.path = path\n self.raw_array = raw\n self.label_array = labels\n self.add_semantic_labels() # computes cell_ids\n if cell_info is not None:\n self.cell_info = cell_info\n\n\n@pytest.fixture(scope='session')\ndef app():\n \"\"\"Session-wide test `Flask` application.\"\"\"\n\n if os.path.exists(TESTDB_PATH):\n os.unlink(TESTDB_PATH)\n\n yield create_app(\n TESTING=True,\n SQLALCHEMY_DATABASE_URI=TEST_DATABASE_URI,\n )\n\n os.unlink(TESTDB_PATH)\n\n\n@pytest.fixture(scope='session')\ndef _db(app):\n \"\"\"\n Provide the transactional fixtures with access to the database via a Flask-SQLAlchemy\n database connection.\n\n https://pypi.org/project/pytest-flask-sqlalchemy/\n \"\"\"\n db = SQLAlchemy(app=app)\n return db\n"
] |
[
[
"numpy.zeros"
]
] |
ericchen12377/Leetcode-Algorithm-Python
|
[
"eb58cd4f01d9b8006b7d1a725fc48910aad7f192"
] |
[
"1stRound/Medium/973-K Closest Points to Origin/Argsort.py"
] |
[
"import numpy as np\n\n\nclass Solution:\n def kClosest(self, points: List[List[int]], K: int) -> List[List[int]]:\n res = []\n for p in points:\n d = np.sqrt(p[0]**2 + p[1]**2)\n res.append(d)\n idx = np.argsort(res)[:K]\n print(idx)\n return [points[i] for i in idx]\n"
] |
[
[
"numpy.argsort",
"numpy.sqrt"
]
] |
temuller/PISCoLA
|
[
"e380603155991c267c26c4c93dfd650b9777b6b9"
] |
[
"src/piscola/filter_utils.py"
] |
[
"import os\nimport numpy as np\nimport piscola\n\ndef integrate_filter(spectrum_wave, spectrum_flux, filter_wave, filter_response, response_type='photon'):\n \"\"\"Calcultes the flux density of an SED given a filter response.\n\n Parameters\n ----------\n spectrum_wave : array\n Spectrum's wavelength range.\n spectrum_flux : array\n Spectrum's flux density distribution.\n filter_wave : array\n Filter's wavelength range.\n filter_response : array\n Filter's response function.\n response_type : str, default ``photon``\n Filter's response type. Either ``photon`` or ``energy``.\n\n Returns\n -------\n flux_filter : float\n Flux density.\n\n \"\"\"\n\n # truncate the filter at both sides by the same amount if it is not not cover by most of the spectrum range\n imax = np.argmin(np.abs(spectrum_wave.max() - filter_wave)) + 1\n imin = np.argmin(np.abs(spectrum_wave.min() - filter_wave))\n\n if imax != len(filter_wave):\n imin = len(filter_wave) - imax\n elif imin != 0:\n imax = len(filter_wave) - imin\n\n min_index, max_index = filter_effective_range(filter_response)\n assert (imin <= min_index) and (imax >= max_index), 'The spectrum does not cover enough range of the filter used.'\n\n filter_wave = filter_wave[imin:imax]\n filter_response = filter_response[imin:imax]\n\n #check filter response type\n if response_type == 'energy':\n filter_response = filter_response.copy()/filter_wave\n\n interp_response = np.interp(spectrum_wave, filter_wave, filter_response, left=0.0, right=0.0)\n I1 = np.trapz(spectrum_flux*interp_response*spectrum_wave, spectrum_wave)\n I2 = np.trapz(filter_response*filter_wave, filter_wave)\n flux_filter = I1/I2\n\n return flux_filter\n\n\ndef calc_eff_wave(spectrum_wave, spectrum_flux, filter_wave, filter_response, response_type='photon'):\n \"\"\"Calcultes the effective wavelength of the filter given an SED.\n\n Parameters\n ----------\n spectrum_wave : array\n Spectrum's wavelength range.\n spectrum_flux : array\n Spectrum's flux density distribution.\n filter_wave : array\n Filter's wavelength range.\n filter_response : array\n Filter's response function.\n response_type : str, default ``photon``\n Filter's response type. Either ``photon`` or ``energy``.\n\n Returns\n -------\n eff_wave : float\n Filter's effective wavelength.\n\n \"\"\"\n\n #check filter response type\n if response_type == 'energy':\n filter_response = filter_response/filter_wave\n\n interp_response = np.interp(spectrum_wave, filter_wave, filter_response, left=0.0, right=0.0)\n I1 = np.trapz((spectrum_wave**2)*interp_response*spectrum_flux, spectrum_wave)\n I2 = np.trapz(spectrum_wave*interp_response*spectrum_flux, spectrum_wave)\n eff_wave = I1/I2\n\n return eff_wave\n\n\ndef calc_pivot_wave(filter_wave, filter_response, response_type='photon'):\n \"\"\"Calcultes the pivot wavelength for the given filter.\n\n Parameters\n ----------\n filter_wave : array\n Filter's wavelength range.\n filter_response : array\n Filter's response function.\n response_type : str, default ``photon``\n Filter's response type. Either ``photon`` or ``energy``.\n\n Returns\n -------\n pivot_wave : float\n Filter's pivot wavelength.\n\n \"\"\"\n\n #check filter response type\n if response_type == 'energy':\n filter_response = filter_response/filter_wave\n\n I1 = np.trapz(filter_response*filter_wave, filter_wave)\n I2 = np.trapz(filter_response/filter_wave, filter_wave)\n pivot_wave = np.sqrt(I1/I2)\n\n return pivot_wave\n\n\ndef calc_zp(filter_wave, filter_response, response_type, mag_sys, filter_name):\n \"\"\"Calculates the zero point in the AB, Vega or BD17 magnitude systems.\n\n Parameters\n ----------\n filter_wave : array\n Filter's wavelength range.\n filter_response : array\n Filter's response function.\n response_type : str, default ``photon``\n Filter's response type. Either ``photon`` or ``energy``.\n mag_sys : str\n Magnitude system. For example, ``AB``, ``BD17`` or ``Vega``.\n filter_name : str\n Filter name.\n\n Returns\n -------\n zp : float\n Zero-point in the given natural magnitude system.\n\n \"\"\"\n\n path = piscola.__path__[0]\n mag_sys_dict = {}\n mag_sys_file_path = os.path.join(path, 'standards/magnitude_systems.txt')\n with open(mag_sys_file_path) as mag_sys_file:\n for line in mag_sys_file:\n (key, val) = line.split() # key:magnitude system name, val: file with natural system values\n mag_sys_dict[key] = val\n\n assert mag_sys.upper() in mag_sys_dict.keys(), f\"magnitude system '{mag_sys.upper()}' not found in '{mag_sys_file_path}'\"\n\n file_path = os.path.join(path, 'standards', mag_sys_dict[mag_sys.upper()])\n\n if ('ab' in mag_sys.split('_')) or ('AB' in mag_sys.split('_')):\n c = 2.99792458e18 # speed of light in [Angstroms/s]\n ab_wave = np.arange(1000, 250000, 5)\n ab_flux = 3631e-23*c/ab_wave**2 # in [erg s^-1 cm^-2 A^-1]\n f_ab = integrate_filter(ab_wave, ab_flux, filter_wave, filter_response, response_type)\n\n # get ZP offsets\n with open(file_path, 'rt') as ab_sys_file:\n ab_mag = [line.split() for line in ab_sys_file if filter_name in line.split()]\n if ab_mag:\n zp = 2.5*np.log10(f_ab) + eval(ab_mag[0][1])\n else:\n raise ValueError(f'Could not find \"{filter_name}\" filter in {file_path}')\n\n elif 'vega' in mag_sys.lower():\n vega_sed_file = os.path.join(path, 'standards/alpha_lyr_stis_005.dat')\n spectrum_wave, spectrum_flux = np.loadtxt(vega_sed_file).T\n f_vega = integrate_filter(spectrum_wave, spectrum_flux, filter_wave, filter_response, response_type)\n zp = 2.5*np.log10(f_vega)\n\n elif 'bd17' in mag_sys.lower():\n # get ZP offsets\n with open(file_path, 'rt') as bd17_sys_file:\n standard_sed = [line.split()[1] for line in bd17_sys_file if 'standard_sed:' in line.split()][0]\n\n bd17_sed_file = os.path.join(path, 'standards', standard_sed)\n spectrum_wave, spectrum_flux = np.loadtxt(bd17_sed_file).T\n f_bd17 = integrate_filter(spectrum_wave, spectrum_flux, filter_wave, filter_response, response_type)\n\n with open(file_path, 'rt') as bd17_sys_file:\n bd17_mag = [line.split() for line in bd17_sys_file if filter_name in line.split()]\n\n if bd17_mag:\n zp = 2.5*np.log10(f_bd17) + eval(bd17_mag[0][1])\n else:\n raise ValueError(f'Could not find \"{filter_name}\" filter in {file_path}')\n else:\n raise ValueError(f'Could not find \"{mag_sys}\" magnitude system in the implemented systems of the code')\n\n return zp\n\n\ndef filter_effective_range(filter_response, percent=99.0):\n \"\"\"Finds the min and max indexes which contain at least the desire percentage of the filter's\n response-function area.\n\n **Note:** each index contains the wanted area independently from the other.\n\n Parameters\n ----------\n filter_response : array\n Filter's response function.\n percent : float, default ``99.0``\n Percentage of the filter's area that wants to be kept.\n\n Returns\n -------\n min_index : int\n Minimum index containing the wanted area of the filter.\n max_index : int\n Maximum index containing the wanted area of the filter.\n\n \"\"\"\n\n for min_index in range(len(filter_response)):\n max_index = len(filter_response) - min_index\n area = 100*np.trapz(filter_response[min_index:max_index])/np.trapz(filter_response)\n if area < percent:\n break\n\n # to prevent going beyond the edges of the array\n if min_index == 0:\n min_index += 1\n max_index -= 1\n\n min_index -=1\n max_index += 1\n\n return min_index, max_index\n"
] |
[
[
"numpy.sqrt",
"numpy.arange",
"numpy.log10",
"numpy.interp",
"numpy.trapz",
"numpy.loadtxt"
]
] |
bycxw/ETC4Rec
|
[
"8f3074949a0f3731eac32e0cec36a5d29d97201f"
] |
[
"etclayers/transformer.py"
] |
[
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Transformer layers for ETC.\"\"\"\n\nimport functools\nfrom typing import List, Optional, Text\n\nimport tensorflow.compat.v1 as tf\n\nimport tensor_utils\nfrom etclayers import attention\nfrom etclayers import recompute_grad as recompute_grad_lib\nfrom etclayers import wrappers\n\n\nclass GlobalLocalTransformerLayers(tf.keras.layers.Layer):\n \"\"\"A sequence of Transformer layers with factorized attention for long inputs.\n\n These layers should accommodate inputs much larger than standard Transformer\n (using full self-attention). The input is divided between a large \"long\"\n input and small \"global\" input. \"Long-to-global\", \"global-to-global\",\n and \"global-to-long\" attention are all full attention, while \"long-to-long\"\n attention uses local self-attention. As a result, computational complexity\n scales linearly with the \"long\" input length rather than quadratically as in\n standard Transformer.\n\n The configuration is very similar to `RelativeTransformerLayers` apart\n from the separate \"global\" and \"long\" settings.\n\n See the ETC paper for more details: https://arxiv.org/abs/2004.08483\n \"\"\"\n\n def __init__(self,\n long_hidden_size: int,\n global_hidden_size: int,\n num_hidden_layers: int,\n num_attention_heads: int,\n local_radius: int,\n att_size_per_head: Optional[int] = None,\n long_intermediate_size: Optional[int] = None,\n global_intermediate_size: Optional[int] = None,\n hidden_act=tensor_utils.get_activation('gelu'),\n hidden_dropout_prob: float = 0.1,\n attention_probs_dropout_prob: float = 0.1,\n initializer_range: float = 0.02,\n relative_vocab_size: Optional[int] = None,\n share_feed_forward_params: bool = True,\n share_kv_projections: bool = False,\n share_qkv_projections: bool = True,\n share_att_output_projection: bool = False,\n use_pre_activation_order: bool = False,\n use_one_hot_lookup: bool = False,\n grad_checkpointing_period: int = 0,\n name: Text = 'global_local_transformer_layers',\n **kwargs):\n \"\"\"Init.\n\n Args:\n long_hidden_size: Size of the long input hidden dimension.\n global_hidden_size: Size of the global input hidden dimension. If this is\n different from `long_hidden_size`, you must turn off parameter sharing\n between long and global operations. In particular, the following\n sharing options which default to True must be set to False instead:\n `share_feed_forward_params`\n `share_qkv_projections`\n num_hidden_layers: Number of Transformer layers. Each layer includes both\n an attention sublayer and a feed-forward sublayer.\n num_attention_heads: Number of attention heads for global-local attention.\n Must evenly divide both `global_hidden_size` and `long_hidden_size`\n unless `att_size_per_head` is specified.\n local_radius: How many tokens to the left/right for long input tokens to\n locally self-attend to. For example, a value of 1 would allow each token\n to only attend to 1 token to the left and 1 token to the right of it.\n att_size_per_head: Size of attention query/key/value vectors per head.\n By default this will be `long_hidden_size / num_attention_heads`, so\n `num_attention_heads` must evenly divide `long_hidden_size` in this\n case.\n long_intermediate_size: The size of the \"intermediate\" (i.e. feed-forward)\n layers for long input. Defaults to 4 * long_hidden_size.\n global_intermediate_size: The size of the \"intermediate\" (i.e.\n feed-forward) layers for global input. Defaults to 4 *\n global_hidden_size. Must not be different from `long_intermediate_size`\n if `share_feed_forward_params` is True (the default).\n hidden_act: The non-linear activation function in the intermediate layers.\n hidden_dropout_prob: The dropout probability for the attention and\n feed-forward residual blocks. Must be between 0.0 and 1.0.\n attention_probs_dropout_prob: Dropout probability for attention\n probabilities. Must be between 0.0 and 1.0.\n initializer_range: The standard deviation of the truncated normal\n initializer for initializing all weight matrices.\n relative_vocab_size: Size of relative position vocabulary. If left\n unspecified, relative positions will be ignored for attention.\n share_feed_forward_params: If True (the default), we share the same\n fully connected feed-forward parameters for the long and global inputs.\n share_kv_projections: If True, key and value projections will be shared\n between long-to-long and long-to-global components, as well as between\n global-to-global and global-to-long components. This results in 2 key\n projections per layer instead of 4 (and similarly for value\n projections). Note that if `share_qkv_projections` is True, then\n `share_kv_projections` is completely ignored since the former results\n in even more sharing.\n share_qkv_projections: If True (the default), all 4 attention operations\n (long-to-long, global-to-global, long-to-global, and global-to-long)\n will share the same query, key, and value projections. The 3 projections\n will still be different from each other and different per layer.\n share_att_output_projection: If True, all 4 attention operations\n (long-to-long, global-to-global, long-to-global, and global-to-long)\n will share the same output projection per layer.\n use_pre_activation_order: If True, use \"pre-activation\" order for residual\n blocks (see ResidualBlock docstring).\n use_one_hot_lookup: Whether to use tf.one_hot for embedding lookup instead\n of tf.gather. Default is False, but setting to True may be more\n efficient on TPUs for vocab sizes that aren't too large. Currently this\n is only used during lookup of relative position embeddings.\n grad_checkpointing_period: How often to checkpoint activations. The\n default of 0 stores all activations. If greater than 0, activations are\n recomputed as necessary when calculating gradients to save memory. As an\n optimization, we avoid recomputing the last `grad_checkpointing_period`\n layers, so larger values result in less computational overhead but\n reduced memory savings. Using a value of `1` results in potentially the\n greatest memory savings but with the highest recompute cost.\n name: Name of the layer.\n **kwargs: Forwarded to super.\n \"\"\"\n super(GlobalLocalTransformerLayers, self).__init__(name=name, **kwargs)\n\n if long_intermediate_size is None:\n long_intermediate_size = 4 * long_hidden_size\n if global_intermediate_size is None:\n global_intermediate_size = 4 * global_hidden_size\n\n (att_size_per_head, long_total_att_size,\n global_total_att_size) = self._resolve_att_sizes(\n att_size_per_head=att_size_per_head,\n long_hidden_size=long_hidden_size,\n global_hidden_size=global_hidden_size,\n num_attention_heads=num_attention_heads)\n\n self.long_hidden_size = long_hidden_size\n self.global_hidden_size = global_hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.local_radius = local_radius\n self.att_size_per_head = att_size_per_head\n self.long_intermediate_size = long_intermediate_size\n self.global_intermediate_size = global_intermediate_size\n self.hidden_act = hidden_act\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.initializer_range = initializer_range\n self.relative_vocab_size = relative_vocab_size\n self.share_feed_forward_params = share_feed_forward_params\n self.share_kv_projections = share_kv_projections\n self.share_qkv_projections = share_qkv_projections\n self.share_att_output_projection = share_att_output_projection\n self.use_pre_activation_order = use_pre_activation_order\n self.use_one_hot_lookup = use_one_hot_lookup\n self.grad_checkpointing_period = grad_checkpointing_period\n\n self._long_total_att_size = long_total_att_size\n self._global_total_att_size = global_total_att_size\n\n self._validate_init_parameters()\n\n # TODO(jainslie): When using pre-activation order, the recommendation\n # from https://arxiv.org/abs/1904.10509 is to scale some of the\n # initialization by 1 / sqrt(2 * num_hidden_layers). Add logic\n # to do this scaling (maybe within ResidualBlock rather than through\n # initialization).\n self.initializer = tf.keras.initializers.TruncatedNormal(\n stddev=initializer_range)\n\n self.fused_att_layers = []\n self.long_feed_forward_layers = []\n self.global_feed_forward_layers = []\n\n for i in range(num_hidden_layers):\n normalization_layers = [\n tf.keras.layers.LayerNormalization(\n axis=-1, epsilon=1e-12, name='layer_norm_0'),\n tf.keras.layers.LayerNormalization(\n axis=-1, epsilon=1e-12, name='layer_norm_1')\n ]\n self.fused_att_layers.append(\n wrappers.ResidualBlock(\n inner_layer=attention.FusedGlobalLocalAttention(\n long_hidden_size=long_hidden_size,\n global_hidden_size=global_hidden_size,\n num_heads=num_attention_heads,\n local_radius=local_radius,\n long_total_att_size=long_total_att_size,\n global_total_att_size=global_total_att_size,\n relative_vocab_size=relative_vocab_size,\n att_dropout_prob=attention_probs_dropout_prob,\n initializer=self.initializer,\n share_kv_projections=share_kv_projections,\n share_qkv_projections=share_qkv_projections,\n share_att_output_projection=share_att_output_projection,\n use_one_hot_lookup=use_one_hot_lookup),\n normalization_layer=normalization_layers,\n dropout_probability=self.hidden_dropout_prob,\n use_pre_activation_order=self.use_pre_activation_order,\n name='fused_att_layer_%d' % i))\n\n if share_feed_forward_params:\n feed_forward_layer = wrappers.ResidualBlock(\n dropout_probability=hidden_dropout_prob,\n use_pre_activation_order=use_pre_activation_order,\n inner_intermediate_size=long_intermediate_size,\n inner_activation=hidden_act,\n inner_kernel_initializer=self.initializer,\n name='feed_forward_layer_%d' % i)\n feed_forward_layer.build(tf.TensorShape([None, long_hidden_size]))\n self.long_feed_forward_layers.append(feed_forward_layer)\n # Create separate layer to generate a new dropout seed.\n self.global_feed_forward_layers.append(\n wrappers.ResidualBlock(\n dropout_probability=hidden_dropout_prob,\n use_pre_activation_order=use_pre_activation_order,\n inner_layer=feed_forward_layer.inner_layer,\n normalization_layer=feed_forward_layer.normalization_layers,\n name='global_feed_forward_layer_%d' % i))\n else:\n self.long_feed_forward_layers.append(\n wrappers.ResidualBlock(\n dropout_probability=hidden_dropout_prob,\n use_pre_activation_order=use_pre_activation_order,\n inner_intermediate_size=long_intermediate_size,\n inner_activation=hidden_act,\n inner_kernel_initializer=self.initializer,\n name='long_feed_forward_layer_%d' % i))\n self.global_feed_forward_layers.append(\n wrappers.ResidualBlock(\n dropout_probability=hidden_dropout_prob,\n use_pre_activation_order=use_pre_activation_order,\n inner_intermediate_size=global_intermediate_size,\n inner_activation=hidden_act,\n inner_kernel_initializer=self.initializer,\n name='global_feed_forward_layer_%d' % i))\n\n def call(self,\n long_input: tf.Tensor,\n global_input: tf.Tensor,\n l2l_att_mask: Optional[tf.Tensor] = None,\n g2g_att_mask: Optional[tf.Tensor] = None,\n l2g_att_mask: Optional[tf.Tensor] = None,\n g2l_att_mask: Optional[tf.Tensor] = None,\n l2l_relative_att_ids: Optional[tf.Tensor] = None,\n g2g_relative_att_ids: Optional[tf.Tensor] = None,\n l2g_relative_att_ids: Optional[tf.Tensor] = None,\n g2l_relative_att_ids: Optional[tf.Tensor] = None,\n att_implementation: Text = 'sparse',\n training=None,\n ) -> List[tf.Tensor]:\n \"\"\"Calls the layer.\n\n We use abbreviations like \"l2g\" to mean \"long-to-global\".\n\n Args:\n long_input: <float32>[batch_size, long_seq_len, long_hidden_size].\n global_input: <float32>[batch_size, global_seq_len, global_hidden_size].\n l2l_att_mask: <int32>[batch_size, long_seq_len, 2*local_radius + 1]\n long-to-long attention mask for local attention. Should have only 0 and\n 1 values, with 0 for entries that should be masked and 1 otherwise.\n Leave as None to allow all long elements to attend to all other long\n elements within the local radius.\n g2g_att_mask: <int32>[batch_size, global_seq_len, global_seq_len]\n global-to-global attention mask. Should have only 0 and 1 values, with 0\n for entries that should be masked and 1 otherwise. Leave as None to\n allow all global elements to attend to all other global elements within\n each example.\n l2g_att_mask: <int32>[batch_size, long_seq_len, global_seq_len]\n long-to-global attention mask. Should have only 0 and 1 values, with 0\n for entries that should be masked and 1 otherwise. Leave as None to\n allow all long elements to attend to all global elements within each\n example.\n g2l_att_mask: <int32>[batch_size, global_seq_len, long_seq_len]\n global-to-long attention mask. Should have only 0 and 1 values, with 0\n for entries that should be masked and 1 otherwise. Leave as None to\n allow all global elements to attend to all long elements within each\n example.\n l2l_relative_att_ids: <int32>[batch_size, long_seq_len, 2*local_radius+1]\n long-to-long relative local self-attention ids. Leave as None to skip\n the relative portion of l2l attention.\n g2g_relative_att_ids: <int32>[batch_size, global_seq_len, global_seq_len]\n global-to-global relative attention ids. Leave as None to skip the\n relative portion of g2g attention.\n l2g_relative_att_ids: <int32>[batch_size, long_seq_len, global_seq_len]\n long-to-global relative attention ids. Leave as None to skip the\n relative portion of l2g attention.\n g2l_relative_att_ids: <int32>[batch_size, global_seq_len, long_seq_len]\n global-to-long relative attention ids. Leave as None to skip the\n relative portion of g2l attention.\n att_implementation: String representing which internal attention\n implementation to use. Valid values include 'auto' (the default),\n 'sparse', and 'full'. 'sparse' is preferred for sequences longer than\n about 1k tokens, but 'full' may be faster for sequences shorter than\n this. 'auto' attempts to automatically decide when to use full\n attention. See `QkvRelativeLocalAttention` for more details.\n training: For Keras, optional boolean scalar tensor or Python boolean\n indicating whether the call is meant for training or inference.\n\n Returns:\n A list of Tensors, [long_output, global_output]:\n long_output: <float32>[batch_size, long_seq_len, long_hidden_size]\n global_output: <float32>[batch_size, global_seq_len, global_hidden_size]\n \"\"\"\n long_output = long_input\n global_output = global_input\n\n def make_layer_fn(index: int):\n \"\"\"Makes a function that runs the entire `index` layer.\"\"\"\n\n def layer_fn(long_input, global_input):\n \"\"\"A function for an entire layer.\"\"\"\n long_output = long_input\n global_output = global_input\n\n long_output, global_output = self.fused_att_layers[index](\n [long_output, global_output],\n l2l_att_mask=l2l_att_mask,\n g2g_att_mask=g2g_att_mask,\n l2g_att_mask=l2g_att_mask,\n g2l_att_mask=g2l_att_mask,\n l2l_relative_att_ids=l2l_relative_att_ids,\n g2g_relative_att_ids=g2g_relative_att_ids,\n l2g_relative_att_ids=l2g_relative_att_ids,\n g2l_relative_att_ids=g2l_relative_att_ids,\n att_implementation=att_implementation,\n training=training)\n\n # Long and global feed-forward\n long_output = self.long_feed_forward_layers[index](\n long_output, training=training)\n global_output = self.global_feed_forward_layers[index](\n global_output, training=training)\n\n return (long_output, global_output)\n\n return layer_fn\n\n # If `grad_checkpointing_period` is 0 or greater than or equal to the\n # number of layers, no checkpointing will be used.\n stride = (\n self.num_hidden_layers if self.grad_checkpointing_period <= 0 else min(\n self.grad_checkpointing_period, self.num_hidden_layers))\n # Split layers into chains of size `stride`. Put remainder at the beginning.\n for split in range(stride - (-self.num_hidden_layers % stride),\n self.num_hidden_layers + 1, stride):\n # Chain layers together with max length `stride`.\n layer_fn = functools.partial(\n functools.reduce, lambda outputs, f: f(*outputs),\n list(map(make_layer_fn, range(max(0, split - stride), split))))\n # Destructure arguments for compatibility with `recompute_grad`.\n layer_fn = functools.partial(lambda f, *args: f(args), layer_fn)\n # Skip the last block. Store activations for gradient computation.\n if split < self.num_hidden_layers:\n layer_fn = recompute_grad_lib.recompute_grad(layer_fn)\n long_output, global_output = layer_fn(long_output, global_output)\n\n return [long_output, global_output]\n\n def _resolve_att_sizes(self, att_size_per_head, long_hidden_size,\n global_hidden_size, num_attention_heads):\n if att_size_per_head is None:\n if long_hidden_size % num_attention_heads != 0:\n raise ValueError(\n '`long_hidden_size` must be a multiple of `num_attention_heads` '\n 'when `att_size_per_head` is None.')\n if global_hidden_size % num_attention_heads != 0:\n raise ValueError(\n '`global_hidden_size` must be a multiple of `num_attention_heads` '\n 'when `att_size_per_head` is None.')\n att_size_per_head = long_hidden_size // num_attention_heads\n long_total_att_size = long_hidden_size\n global_total_att_size = global_hidden_size\n else:\n long_total_att_size = att_size_per_head * num_attention_heads\n global_total_att_size = long_total_att_size\n\n return (att_size_per_head, long_total_att_size, global_total_att_size)\n\n def _validate_init_parameters(self) -> None:\n if self.share_feed_forward_params:\n if self.long_hidden_size != self.global_hidden_size:\n raise ValueError(\n '`long_hidden_size` must equal `global_hidden_size` when '\n '`share_feed_forward_params` is True.')\n if self.long_intermediate_size != self.global_intermediate_size:\n raise ValueError(\n '`long_intermediate_size` must equal `global_intermediate_size` '\n 'when `share_feed_forward_params` is True.')\n if (self.share_qkv_projections and\n self.long_hidden_size != self.global_hidden_size):\n raise ValueError(\n '`long_hidden_size` must equal `global_hidden_size` when '\n '`share_qkv_projections` is True.')\n if (self.share_kv_projections and\n self.long_hidden_size != self.global_hidden_size):\n raise ValueError(\n '`long_hidden_size` must equal `global_hidden_size` when '\n '`share_kv_projections` is True.')\n if (self.share_att_output_projection and\n self.long_hidden_size != self.global_hidden_size):\n raise ValueError(\n '`long_hidden_size` must equal `global_hidden_size` when '\n '`share_att_output_projection` is True.')\n\n\nclass RelativeTransformerLayers(tf.keras.layers.Layer):\n \"\"\"A sequence of Transformer encoder layers with optional relative attention.\n\n Just like the original Transformer, this layer uses full attention and scales\n quadratically with the input length. To efficiently handle large inputs,\n ETC uses `GlobalLocalTransformerLayers` instead. We just include this layer\n as a convenience since it contains the efficient relative attention\n implementation used by ETC and may be useful for applications with shorter\n graph-like inputs.\n\n See the ETC paper (https://arxiv.org/abs/2004.08483) Appendix A for a\n description of the relative attention implementation.\n \"\"\"\n\n def __init__(self,\n hidden_size: int,\n num_hidden_layers: int,\n num_attention_heads: int,\n intermediate_size: Optional[int] = None,\n hidden_act=tensor_utils.get_activation('gelu'),\n hidden_dropout_prob: float = 0.1,\n attention_probs_dropout_prob: float = 0.1,\n initializer_range: float = 0.02,\n relative_vocab_size: Optional[int] = None,\n use_pre_activation_order: bool = False,\n use_one_hot_lookup: bool = False,\n name: Text = 'relative_transformer_layers',\n **kwargs):\n \"\"\"Init.\n\n Args:\n hidden_size: Size of the output hidden dimension. Must match the input\n hidden dimension size.\n num_hidden_layers: Number of Transformer layers. Each layer includes both\n an attention sublayer and a feed-forward sublayer.\n num_attention_heads: Number of attention heads. Must evenly divide\n `hidden_size`.\n intermediate_size: The size of the \"intermediate\" (i.e. feed-forward)\n layers. Defaults to 4 * hidden_size.\n hidden_act: The non-linear activation function in the intermediate layers.\n hidden_dropout_prob: The dropout probability for the attention and\n feed-forward residual blocks. Must be between 0.0 and 1.0.\n attention_probs_dropout_prob: Dropout probability for attention\n probabilities. Must be between 0.0 and 1.0.\n initializer_range: The standard deviation of the truncated normal\n initializer for initializing weight matrices.\n relative_vocab_size: Size of relative position vocabulary. If left\n unspecified, relative positions will be ignored for attention.\n use_pre_activation_order: If True, use \"pre-activation\" order for residual\n blocks (see ResidualBlock docstring).\n use_one_hot_lookup: Whether to use tf.one_hot for embedding lookup instead\n of tf.gather. Default is False, but setting to True may be more\n efficient on TPUs for vocab sizes that aren't too large. Currently this\n is only used during lookup of relative position embeddings.\n name: Name of the layer.\n **kwargs: Forwarded to super.\n \"\"\"\n super(RelativeTransformerLayers, self).__init__(name=name, **kwargs)\n\n if intermediate_size is None:\n intermediate_size = 4 * hidden_size\n\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.hidden_act = hidden_act\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.initializer_range = initializer_range\n self.relative_vocab_size = relative_vocab_size\n self.use_pre_activation_order = use_pre_activation_order\n self.use_one_hot_lookup = use_one_hot_lookup\n\n # TODO(jainslie): When using pre-activation order, the recommendation\n # from https://arxiv.org/abs/1904.10509 is to scale some of the\n # initialization by 1 / sqrt(2 * num_hidden_layers). Add logic\n # to do this scaling (maybe within ResidualBlock rather than through\n # initialization).\n self.initializer = tf.keras.initializers.TruncatedNormal(\n stddev=initializer_range)\n\n self.attention_layers = []\n self.feed_forward_layers = []\n for i in range(num_hidden_layers):\n self.attention_layers.append(\n wrappers.ResidualBlock(\n inner_layer=attention.RelativeAttention(\n hidden_size=hidden_size,\n num_heads=num_attention_heads,\n relative_vocab_size=relative_vocab_size,\n att_dropout_prob=attention_probs_dropout_prob,\n initializer=self.initializer,\n use_one_hot_lookup=use_one_hot_lookup),\n dropout_probability=hidden_dropout_prob,\n use_pre_activation_order=use_pre_activation_order,\n name='attention_layer_%d' % i))\n self.feed_forward_layers.append(\n wrappers.ResidualBlock(\n dropout_probability=hidden_dropout_prob,\n use_pre_activation_order=use_pre_activation_order,\n inner_intermediate_size=intermediate_size,\n inner_activation=hidden_act,\n inner_kernel_initializer=self.initializer,\n name='feed_forward_layer_%d' % i))\n\n def call(self,\n inputs: tf.Tensor,\n att_mask: Optional[tf.Tensor] = None,\n relative_att_ids: Optional[tf.Tensor] = None,\n training=None) -> tf.Tensor:\n \"\"\"Calls the layer.\n\n Args:\n inputs: <float32>[batch_size, seq_len, hidden_size].\n att_mask: <int32>[batch_size, seq_len, seq_len]. Should have only 0 and 1\n values, with 0 for entries that should be masked and 1 otherwise. Leave\n as None to allow all elements to attend to all other elements within\n each example.\n relative_att_ids: <int32>[batch_size, seq_len, seq_len]. Leave as None to\n skip the relative portion of attention.\n training: For Keras, optional boolean scalar tensor or Python boolean\n indicating whether the call is meant for training or inference.\n\n Returns:\n <float32>[batch_size, seq_len, hidden_size].\n \"\"\"\n output_tensor = inputs\n\n for i in range(self.num_hidden_layers):\n output_tensor = self.attention_layers[i](\n output_tensor,\n training=training,\n att_mask=att_mask,\n relative_att_ids=relative_att_ids)\n output_tensor = self.feed_forward_layers[i](\n output_tensor, training=training)\n\n return output_tensor\n"
] |
[
[
"tensorflow.compat.v1.keras.layers.LayerNormalization",
"tensorflow.compat.v1.TensorShape",
"tensorflow.compat.v1.keras.initializers.TruncatedNormal"
]
] |
SwiftieH/SpGAT
|
[
"b9fdd1a326e28d4d4dcd922cdebaedd764783cf6"
] |
[
"inits.py"
] |
[
"import tensorflow as tf\nimport numpy as np\n\n\ndef uniform(shape, scale=1.0, name=None):\n \"\"\"Uniform init.\"\"\"\n initial = tf.random_uniform(shape, minval=0.0, maxval=scale, dtype=tf.float32)\n return tf.Variable(initial, name=name)\n\n\ndef glorot(shape, name=None):\n \"\"\"Glorot & Bengio (AISTATS 2010) init.\"\"\"\n init_range = np.sqrt(6.0/(shape[0]+shape[1]))\n initial = tf.random_uniform(shape, minval=-init_range, maxval=init_range, dtype=tf.float32)\n return tf.Variable(initial, name=name)\n\n\ndef zeros(shape, name=None):\n \"\"\"All zeros.\"\"\"\n initial = tf.zeros(shape, dtype=tf.float32)\n return tf.Variable(initial, name=name)\n\n\ndef ones(shape, name=None):\n \"\"\"All ones.\"\"\"\n initial = tf.ones(shape, dtype=tf.float32)\n return tf.Variable(initial, name=name)\n\ndef ones_fix(shape, name=None):\n \"\"\"All ones.\"\"\"\n initial = tf.ones(shape, dtype=tf.float32)\n return tf.Variable(initial, name=name, trainable=False)\n\n\n\n\n"
] |
[
[
"numpy.sqrt",
"tensorflow.Variable",
"tensorflow.zeros",
"tensorflow.ones",
"tensorflow.random_uniform"
]
] |
DuncDennis/rescomp
|
[
"796d8db1aa0b21c909097f6642d3408e04bf8271"
] |
[
"test/test_measures.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\" Tests if the rescomp.measures module works as it should \"\"\"\n\nimport unittest\nimport numpy as np\nfrom rescomp import measures\nimport pytest\n\n\nclass testMeasures(unittest.TestCase):\n def setUp(self):\n np.random.seed(0)\n\n def tearDown(self):\n np.random.seed(None)\n\n def test_rmse(self):\n length = np.random.randint(10, 100)\n dim = np.random.randint(10, 100)\n\n pred = np.random.random((length, dim))\n meas = np.random.random((length, dim))\n\n rmse_desired = np.sqrt(((pred - meas) ** 2).sum() / meas.shape[0])\n\n rmse = measures.rmse(pred, meas)\n\n # results not exactly equal due to numpy optimizations\n np.testing.assert_allclose(rmse, rmse_desired, rtol=1e-15)\n\n def test_rmse_normalization_mean(self):\n length = np.random.randint(10, 100)\n dim = np.random.randint(10, 100)\n\n pred = np.random.random((length, dim))\n meas = np.random.random((length, dim))\n\n nrmse_desired = \\\n np.sqrt(((pred - meas) ** 2).sum() / meas.shape[0]) / np.mean(meas)\n\n nrmse = measures.rmse(pred, meas, normalization=\"mean\")\n\n # results not exactly equal due to numpy optimizations\n np.testing.assert_allclose(nrmse, nrmse_desired, rtol=1e-15)\n\n def test_rmse_normalization_std_over_time(self):\n length = np.random.randint(10, 100)\n dim = np.random.randint(10, 100)\n\n pred = np.random.random((length, dim))\n meas = np.random.random((length, dim))\n\n std = np.std(meas, axis=0)\n mean_std = np.mean(std)\n nrmse_desired = \\\n np.sqrt(((pred - meas) ** 2).sum() / meas.shape[0]) / mean_std\n\n nrmse = measures.rmse(pred, meas, normalization=\"std_over_time\")\n\n # results not exactly equal due to numpy optimizations\n np.testing.assert_allclose(nrmse, nrmse_desired, rtol=1e-15)\n\n def test_rmse_over_time(self):\n length = 1\n dim = np.random.randint(10, 100)\n\n pred = np.random.random((length, dim))\n meas = np.random.random((length, dim))\n\n rmse_desired = measures.rmse(pred, meas)\n rmse = measures.rmse_over_time(pred, meas)\n\n np.testing.assert_equal(rmse, rmse_desired)\n\n def test_divergence_time(self):\n pred = np.array([[i, i + 1] for i in range(10)])\n meas = np.array([[i * 2, i + 1] for i in range(10)])\n\n epsilon = 5\n\n div_time_desired = 6\n div_time = measures.divergence_time(pred, meas, epsilon)\n\n np.testing.assert_equal(div_time, div_time_desired)\n\n @pytest.mark.skip(reason='measures.error_over_time not yet implemented')\n def test_error_over_time_same_as_rmse_over_time(self):\n length = np.random.randint(1, 100)\n dim = np.random.randint(1, 100)\n # some norm\n norm = \"maxmin\"\n pred = np.random.random((length, dim))\n meas = np.random.random((length, dim))\n rmse = measures.rmse_over_time(pred, meas, normalization=norm)\n error = measures.error_over_time(pred, meas, distance_measure=\"rmse\", normalization=norm)\n np.testing.assert_almost_equal(rmse, error, decimal=15)\n\n @pytest.mark.skip(reason='measures.error_over_time not yet implemented')\n def test_error_over_time_custom_function(self):\n length = np.random.randint(1, 100)\n dim = np.random.randint(1, 100)\n # some norm\n norm = None\n pred = np.random.random((length, dim))\n meas = np.random.random((length, dim))\n error_str_distance_measure = measures.error_over_time(pred, meas, distance_measure=\"L2\", normalization=norm)\n\n def L2_function(delta):\n return np.linalg.norm(delta, axis=1)\n\n error_fct_distance_measure = measures.error_over_time(pred, meas, distance_measure=L2_function,\n normalization=norm)\n np.testing.assert_almost_equal(error_str_distance_measure, error_fct_distance_measure, decimal=15)\n\n @pytest.mark.skip(reason='measures.error_over_time not yet implemented')\n def test_error_over_time_special_norm(self):\n length = np.random.randint(1, 100)\n dim = np.random.randint(1, 100)\n # some norm\n norm = \"root_of_avg_of_spacedist_squared\"\n pred = np.random.random((length, dim))\n meas = np.random.random((length, dim))\n error = measures.error_over_time(pred, meas, distance_measure=\"L2\", normalization=norm)\n error_manually = np.linalg.norm(pred - meas, axis=1) / np.sqrt(np.mean(np.linalg.norm(meas, axis=1) ** 2))\n np.testing.assert_almost_equal(error, error_manually, decimal=15)\n\n @pytest.mark.skip(reason='measures.valid_time_index not yet implemented')\n def test_valid_time_index(self):\n error_series = np.linspace(0, 10, 11)\n epsilon = 5\n desired_valid_time_index = 6\n measured_valid_time_index = measures.valid_time_index(error_series, epsilon)\n np.testing.assert_equal(desired_valid_time_index, measured_valid_time_index)\n\n @pytest.mark.skip(reason='measures.valid_time_index not yet implemented')\n def test_valid_times_zero_error(self):\n error_series = np.zeros(5)\n epsilon = 0\n desired_valid_time_index = 4\n measured_valid_time_index = measures.valid_time_index(error_series, epsilon)\n np.testing.assert_equal(desired_valid_time_index, measured_valid_time_index)\n\n\nif __name__ == \"__main__\":\n unittest.main(verbosity=2)\n"
] |
[
[
"numpy.testing.assert_equal",
"numpy.random.random",
"numpy.random.seed",
"numpy.linspace",
"numpy.linalg.norm",
"numpy.testing.assert_almost_equal",
"numpy.std",
"numpy.mean",
"numpy.testing.assert_allclose",
"numpy.zeros",
"numpy.random.randint"
]
] |
HirotakaNakagame/feature-enginerring
|
[
"f5800337fd37dc487fc78f39e0d475e8f8178360"
] |
[
"WeightofEvidenceEncoder/WeightofEvidenceEncoder.py"
] |
[
"# Authors: Hirotaka Nakagame <hirotaka.nakagame@gmail.com>\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n\r\nclass WeightofEvidenceEncoder:\r\n \"\"\"\r\n Calculate weight of evidence for categorical features\r\n\r\n Parameters\r\n ----------\r\n feats: list type\r\n list of features to be transformed\r\n drop_original: bool, default=False\r\n Will drop original columns if set True else keep the original columns\r\n fillna: float type, default=None\r\n what missing values will be filled with\r\n prefix: str type, default=\"woe_\"\r\n prefix to add to the new columns\r\n suffix: str type, default=\"\"\r\n suffix to add to the new columns\r\n \"\"\"\r\n\r\n def __init__(self, feats, drop_original=False, fillna=None, prefix=\"woe_\", suffix=\"\"):\r\n self.feats = feats\r\n self.drop_original = drop_original\r\n self.fillna = fillna\r\n self.prefix = prefix\r\n self.suffix = suffix\r\n self.transform_dict = {\"woe\": {}, \"iv\": {}}\r\n self.is_fitted = False\r\n\r\n @staticmethod\r\n def __calc_perc(x, y):\r\n \"\"\"\r\n Calculates % of events and non events\r\n\r\n Parameters\r\n ----------\r\n x : pd.Series\r\n a feature variable\r\n y : pd.Series\r\n a target variable\r\n\r\n Returns\r\n -------\r\n p_event : pd.Series\r\n % of events\r\n p_non_event : pd.Series\r\n % of non events\r\n \"\"\"\r\n X_temp = pd.DataFrame({\"feat\": x.values, \"target\": y.values})\r\n n_event = y.value_counts()\r\n p_event = X_temp.groupby(\"target\")[\"feat\"].value_counts()[1] / n_event[1]\r\n p_non_event = X_temp.groupby(\"target\")[\"feat\"].value_counts()[0] / n_event[0]\r\n return p_event, p_non_event\r\n\r\n def fit(self, X, y):\r\n \"\"\"\r\n Fit Weight of Evidence Encoder\r\n\r\n Parameters\r\n ----------\r\n X : pd.DataFrame\r\n a\r\n y : pd.Series\r\n a\r\n \"\"\"\r\n for feat in self.feats:\r\n p_event, p_non_event = self.__calc_perc(X[feat], y)\r\n woe = np.log(p_non_event / p_event)\r\n woe.fillna(0, inplace=True)\r\n self.transform_dict[\"woe\"][feat] = woe.to_dict()\r\n information_value = (p_non_event - p_event) * woe\r\n if self.fillna is not None:\r\n information_value.fillna(self.fillna, inplace=True)\r\n self.transform_dict[\"iv\"][feat] = information_value\r\n self.is_fitted = True\r\n\r\n def transofrm(self, X):\r\n \"\"\"\r\n Transform X using Weight of Evidence Encoder\r\n\r\n Parameters\r\n ----------\r\n X : pd.DataFrame\r\n The data to encode\r\n Returns\r\n -------\r\n X : pd.DataFrame\r\n Transformed input\r\n \"\"\"\r\n assert self.is_fitted, \"Fot the encoder first\"\r\n for feat in self.feats:\r\n new_col_name = self.prefix + feat + self.suffix\r\n X[new_col_name] = X[feat].map(\r\n self.transform_dict[\"woe\"][feat], na_action=self.fillna\r\n )\r\n if (self.drop_original is True) & (self.prefix + self.suffix != \"\"):\r\n X.drop([feat], axis=1, inplace=True)\r\n return X\r\n\r\n @staticmethod\r\n def __add_description(information_value):\r\n \"\"\"\r\n Convert Information Value to Description.\r\n\r\n Parameters\r\n ----------\r\n information_value : float\r\n Information Value\r\n Returns\r\n -------\r\n desc: str type\r\n Description of Information Value\r\n \"\"\"\r\n predictive_power = {\r\n (0, 0.02): \"Not useful for prediction\",\r\n (0.02, 0.1): \"Weak predictive power\",\r\n (0.1, 0.3): \"Medium predictive power\",\r\n (0.3, 0.5): \"Strong predictive power\",\r\n (0.5, np.inf): \"Too good to be true!\",\r\n }\r\n for bounds, desc in predictive_power.items():\r\n if bounds[0] <= information_value < bounds[1]:\r\n return desc\r\n return \"Too good to be true!\"\r\n\r\n def information_values(self, add_description=True):\r\n \"\"\"\r\n The information-value-based feature importances\r\n\r\n The higher, the more important the feature.\r\n The information value is calculated as\r\n sigma (% of non events - % of events) * weight of evidence\r\n\r\n Parameters\r\n ----------\r\n add_description : bool, default=True\r\n Will add a column to describe information value if True\r\n\r\n Returns\r\n -------\r\n X_iv : pd.DataFrame\r\n index is feature names and column is information value\r\n \"\"\"\r\n assert self.is_fitted, \"Fot the encoder first\"\r\n X_iv = pd.DataFrame(index=self.feats, columns=[\"Information Value\"])\r\n for feat in self.feats:\r\n X_iv.loc[feat, \"Information Value\"] = self.transform_dict[\"iv\"][feat].sum()\r\n if add_description:\r\n X_iv[\"Predictive Power Description\"] = X_iv[\"Information Value\"].apply(\r\n self.__add_description\r\n )\r\n\r\n return X_iv\r\n"
] |
[
[
"numpy.log",
"pandas.DataFrame"
]
] |
tbmihailov/OBQA
|
[
"653c5c64ae7eb164bde0b381813afe5f664dcf67"
] |
[
"ir/run_sts.py"
] |
[
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"BERT finetuning runner.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport csv\nimport os\nimport logging\nimport argparse\nimport random\nfrom tqdm import tqdm, trange\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\nfrom torch.utils.data.distributed import DistributedSampler\n\nfrom torch.nn import CrossEntropyLoss\nfrom pytorch_pretrained_bert.tokenization import BertTokenizer\nfrom pytorch_pretrained_bert.modeling import BertForSequenceClassification\nfrom pytorch_pretrained_bert.optimization import BertAdam\nfrom pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE\n\nlogging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt = '%m/%d/%Y %H:%M:%S',\n level = logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nclass InputExample(object):\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\n\n def __init__(self, guid, text_a, text_b=None, label=None):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self, input_ids, input_mask, segment_ids, label_id):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.label_id = label_id\n\n\nclass DataProcessor(object):\n \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n raise NotImplementedError()\n\n def get_dev_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n raise NotImplementedError()\n\n def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()\n\n @classmethod\n def _read_tsv(cls, input_file, quotechar=None):\n \"\"\"Reads a tab separated value file.\"\"\"\n with open(input_file, \"r\", encoding='utf-8') as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n lines.append(line)\n return lines\n\nclass StsProcessor(DataProcessor): \n \"\"\"Processor for the STS-B data set (GLUE version).\"\"\" \n \n def get_train_examples(self, data_dir): \n \"\"\"See base class.\"\"\" \n return self._create_examples( \n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\") \n \n def get_dev_examples(self, data_dir): \n \"\"\"See base class.\"\"\" \n return self._create_examples( \n self._read_tsv(os.path.join(data_dir, \"dev_matched.tsv\")), \n \"dev_matched\") \n \n def get_labels(self): \n \"\"\"See base class.\"\"\" \n return [\"contradiction\", \"entailment\", \"neutral\"] \n \n def _create_examples(self, lines, set_type): \n \"\"\"Creates examples for the training and dev sets.\"\"\" \n examples = [] \n for (i, line) in enumerate(lines): \n if i == 0: \n continue \n guid = \"%s-%s\" % (set_type, line[0]) \n text_a = line[7] \n text_b = line[8] \n label = float(line[-1]) \n examples.append( \n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples \n\nclass OBQAProcessor(DataProcessor):\n \"\"\"Processor for the MRPC data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n logger.info(\"LOOKING AT {}\".format(os.path.join(data_dir, \"train.tsv\")))\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n guid = line[0]\n text_a = line[1]\n text_b = line[2]\n label = float(line[3])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n \nclass MrpcProcessor(DataProcessor):\n \"\"\"Processor for the MRPC data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n logger.info(\"LOOKING AT {}\".format(os.path.join(data_dir, \"train.tsv\")))\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[3]\n text_b = line[4]\n label = line[0]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\nclass MnliProcessor(DataProcessor):\n \"\"\"Processor for the MultiNLI data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev_matched.tsv\")),\n \"dev_matched\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"contradiction\", \"entailment\", \"neutral\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[8]\n text_b = line[9]\n label = line[-1]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\nclass SnliProcessor(DataProcessor): \n \"\"\"Processor for the SNLI data set (GLUE version).\"\"\" \n \n def get_train_examples(self, data_dir): \n \"\"\"See base class.\"\"\" \n return self._create_examples( \n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\") \n \n def get_dev_examples(self, data_dir): \n \"\"\"See base class.\"\"\" \n return self._create_examples( \n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \n \"dev\") \n \n def get_labels(self): \n \"\"\"See base class.\"\"\" \n return [\"contradiction\", \"entailment\", \"neutral\"] \n \n def _create_examples(self, lines, set_type): \n \"\"\"Creates examples for the training and dev sets.\"\"\" \n examples = [] \n for (i, line) in enumerate(lines): \n if i == 0: \n continue \n guid = \"%s-%s\" % (set_type, line[0]) \n text_a = line[7] \n text_b = line[8] \n label = line[-1] \n examples.append( \n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples \n\nclass ColaProcessor(DataProcessor):\n \"\"\"Processor for the CoLA data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in tqdm(enumerate(lines)):\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[3]\n label = line[1]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples\n\n\ndef convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n #label_map = {label : i for i, label in enumerate(label_list)}\n\n features = []\n for (ex_index, example) in tqdm(enumerate(examples)):\n tokens_a = tokenizer.tokenize(example.text_a)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambigiously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n segment_ids = [0] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n #label_id = label_map[example.label]\n label_id = example.label\n if ex_index < 5:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"tokens: %s\" % \" \".join(\n [str(x) for x in tokens]))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n logger.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n logger.info(\"label: %s (id = %d)\" % (example.label, label_id))\n\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id))\n return features\n\n\ndef _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n\ndef accuracy(out, labels):\n outputs = np.argmax(out, axis=1)\n return np.sum(outputs == labels)\n\ndef average(x):\n assert len(x) > 0\n return float(sum(x)) / len(x)\n\nimport math\ndef pearson(x, y):\n xx = []\n for val in x:\n xx.append(val[0])\n x = xx\n assert len(x) == len(y)\n n = len(x)\n assert n > 0\n avg_x = average(x)\n avg_y = average(y)\n diffprod = 0\n xdiff2 = 0\n ydiff2 = 0\n for idx in range(n):\n xdiff = x[idx] - avg_x\n ydiff = y[idx] - avg_y\n diffprod += xdiff * ydiff\n xdiff2 += xdiff * xdiff\n ydiff2 += ydiff * ydiff\n\n return diffprod / math.sqrt(xdiff2 * ydiff2)\n\ndef warmup_linear(x, warmup=0.002):\n if x < warmup:\n return x/warmup\n return 1.0 - x\n\ndef main():\n parser = argparse.ArgumentParser()\n\n ## Required parameters\n parser.add_argument(\"--data_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The input data dir. Should contain the .tsv files (or other data files) for the task.\")\n parser.add_argument(\"--bert_model\", default=None, type=str, required=True,\n help=\"Bert pre-trained model selected in the list: bert-base-uncased, \"\n \"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, \"\n \"bert-base-multilingual-cased, bert-base-chinese.\")\n parser.add_argument(\"--task_name\",\n default=None,\n type=str,\n required=True,\n help=\"The name of the task to train.\")\n parser.add_argument(\"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\")\n\n ## Other parameters\n parser.add_argument(\"--max_seq_length\",\n default=128,\n type=int,\n help=\"The maximum total input sequence length after WordPiece tokenization. \\n\"\n \"Sequences longer than this will be truncated, and sequences shorter \\n\"\n \"than this will be padded.\")\n parser.add_argument(\"--do_train\",\n action='store_true',\n help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\",\n action='store_true',\n help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--do_lower_case\",\n action='store_true',\n help=\"Set this flag if you are using an uncased model.\")\n parser.add_argument(\"--train_batch_size\",\n default=32,\n type=int,\n help=\"Total batch size for training.\")\n parser.add_argument(\"--eval_batch_size\",\n default=8,\n type=int,\n help=\"Total batch size for eval.\")\n parser.add_argument(\"--learning_rate\",\n default=5e-5,\n type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--num_train_epochs\",\n default=3.0,\n type=float,\n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--warmup_proportion\",\n default=0.1,\n type=float,\n help=\"Proportion of training to perform linear learning rate warmup for. \"\n \"E.g., 0.1 = 10%% of training.\")\n parser.add_argument(\"--no_cuda\",\n action='store_true',\n help=\"Whether not to use CUDA when available\")\n parser.add_argument(\"--local_rank\",\n type=int,\n default=-1,\n help=\"local_rank for distributed training on gpus\")\n parser.add_argument('--seed',\n type=int,\n default=42,\n help=\"random seed for initialization\")\n parser.add_argument('--gradient_accumulation_steps',\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument('--fp16',\n action='store_true',\n help=\"Whether to use 16-bit float precision instead of 32-bit\")\n parser.add_argument('--loss_scale',\n type=float, default=0,\n help=\"Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\\n\"\n \"0 (default value): dynamic loss scaling.\\n\"\n \"Positive power of 2: static loss scaling value.\\n\")\n\n args = parser.parse_args()\n\n processors = {\n \"obqa\": OBQAProcessor, \n \"cola\": ColaProcessor,\n \"mnli\": MnliProcessor,\n \"mrpc\": MrpcProcessor,\n \"snli\": SnliProcessor,\n \"stsb\": StsProcessor,\n }\n\n num_labels_task = {\n \"obqa\": 1,\n \"cola\": 2,\n \"mnli\": 3,\n \"mrpc\": 2,\n \"snli\": 3,\n \"stsb\": 1\n }\n\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n n_gpu = torch.cuda.device_count()\n else:\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n n_gpu = 1\n # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.distributed.init_process_group(backend='nccl')\n logger.info(\"device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}\".format(\n device, n_gpu, bool(args.local_rank != -1), args.fp16))\n\n if args.gradient_accumulation_steps < 1:\n raise ValueError(\"Invalid gradient_accumulation_steps parameter: {}, should be >= 1\".format(\n args.gradient_accumulation_steps))\n\n args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n if not args.do_train and not args.do_eval:\n raise ValueError(\"At least one of `do_train` or `do_eval` must be True.\")\n\n if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:\n raise ValueError(\"Output directory ({}) already exists and is not empty.\".format(args.output_dir))\n os.makedirs(args.output_dir, exist_ok=True)\n\n task_name = args.task_name.lower()\n\n if task_name not in processors:\n raise ValueError(\"Task not found: %s\" % (task_name))\n\n processor = processors[task_name]()\n num_labels = num_labels_task[task_name]\n label_list = None # processor.get_labels()\n\n tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)\n\n train_examples = None\n num_train_steps = None\n if args.do_train:\n train_examples = processor.get_train_examples(args.data_dir)\n num_train_steps = int(\n len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)\n\n # Prepare model\n model = BertForSequenceClassification.from_pretrained(args.bert_model,\n cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank),\n num_labels = num_labels)\n if args.fp16:\n model.half()\n model.to(device)\n if args.local_rank != -1:\n try:\n from apex.parallel import DistributedDataParallel as DDP\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\n\n model = DDP(model)\n elif n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Prepare optimizer\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n t_total = num_train_steps\n if args.local_rank != -1:\n t_total = t_total // torch.distributed.get_world_size()\n if args.fp16:\n try:\n from apex.optimizers import FP16_Optimizer\n from apex.optimizers import FusedAdam\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\n\n optimizer = FusedAdam(optimizer_grouped_parameters,\n lr=args.learning_rate,\n bias_correction=False,\n max_grad_norm=1.0)\n if args.loss_scale == 0:\n optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)\n else:\n optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)\n\n else:\n optimizer = BertAdam(optimizer_grouped_parameters,\n lr=args.learning_rate,\n warmup=args.warmup_proportion,\n t_total=t_total)\n\n global_step = 0\n nb_tr_steps = 0\n tr_loss = 0\n if args.do_train:\n train_features = convert_examples_to_features(\n train_examples, label_list, args.max_seq_length, tokenizer)\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_examples))\n logger.info(\" Batch size = %d\", args.train_batch_size)\n logger.info(\" Num steps = %d\", num_train_steps)\n all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)\n all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.float)\n train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n if args.local_rank == -1:\n train_sampler = RandomSampler(train_data)\n else:\n train_sampler = DistributedSampler(train_data)\n train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)\n\n model.train()\n ep = 0\n output_model_file = \"dummy\"\n #loss_fct = CrossEntropyLoss()\n from torch.nn import MSELoss\n loss_fct = MSELoss()\n for _ in trange(int(args.num_train_epochs), desc=\"Epoch\"):\n tr_loss = 0\n nb_tr_examples, nb_tr_steps = 0, 0\n ep += 1\n tq = tqdm(train_dataloader, desc=\"Iteration\")\n acc = 0\n for step, batch in enumerate(tq):\n batch = tuple(t.to(device) for t in batch)\n input_ids, input_mask, segment_ids, label_ids = batch\n logits = model(input_ids, segment_ids, input_mask)\n #print(logits.view(-1, num_labels).size(),label_ids.view(-1).size())\n loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1, num_labels))\n \n logits = logits.detach().cpu().numpy()\n label_ids = label_ids.to('cpu').numpy()\n tmp_accuracy = pearson(logits, label_ids)\n acc += tmp_accuracy\n \n if n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu.\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n optimizer.backward(loss)\n else:\n loss.backward()\n\n tr_loss += loss.item()\n nb_tr_examples += input_ids.size(0)\n nb_tr_steps += 1\n \n if (step + 1) % args.gradient_accumulation_steps == 0:\n # modify learning rate with special warm up BERT uses\n lr_this_step = args.learning_rate * warmup_linear(global_step/t_total, args.warmup_proportion)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr_this_step\n optimizer.step()\n optimizer.zero_grad()\n global_step += 1\n \n tq.set_description(\"Loss:\"+str(tr_loss/nb_tr_steps)+\",Pearson:\"+str(acc/nb_tr_steps)) \n model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self\n output_model_file = os.path.join(args.output_dir, \"pytorch_model.bin.\" + str(ep))\n torch.save(model_to_save.state_dict(), output_model_file)\n \n\nif __name__ == \"__main__\":\n main()\n \n"
] |
[
[
"torch.nn.MSELoss",
"torch.distributed.init_process_group",
"numpy.random.seed",
"torch.cuda.set_device",
"torch.utils.data.distributed.DistributedSampler",
"torch.manual_seed",
"torch.utils.data.TensorDataset",
"torch.utils.data.DataLoader",
"torch.utils.data.RandomSampler",
"torch.tensor",
"torch.nn.DataParallel",
"numpy.argmax",
"torch.cuda.manual_seed_all",
"torch.cuda.is_available",
"torch.device",
"torch.cuda.device_count",
"numpy.sum",
"torch.distributed.get_world_size"
]
] |
Aewaterhouse/dhitools
|
[
"da3a2244d44802dc871debcc6ce06bf78dfc85a8"
] |
[
"dhitools/dfs.py"
] |
[
"\"\"\"DHI MIKE21 dfs0/1/2 functions\n\"\"\"\n\nimport numpy as np\nimport datetime as dt\nimport pandas as pd\nfrom . import _utils\nimport os\n\n# Import .NET libraries\nimport DHI.Generic.MikeZero.DFS as dfs\n\n\nclass _Dfs(object):\n \"\"\"\n Base class for dfs0/1/2\n \"\"\"\n\n def __init__(self, dfs_object):\n self.items = self.dfs_info(dfs_object)\n\n self.time = self.dfs_time()\n\n def dfs_info(self, dfs_object):\n \"\"\"\n Make a dictionary with .dfs items and other attributes.\n\n See class attributes\n \"\"\"\n items = {}\n\n time_obj = dfs_object.FileInfo.TimeAxis\n dt_start_obj = time_obj.StartDateTime\n\n self.number_tstep = time_obj.NumberOfTimeSteps\n self.num_items = len(dfs_object.ItemInfo)\n self.timestep = time_obj.TimeStep\n self.start_datetime = dt.datetime(\n year=dt_start_obj.Year,\n month=dt_start_obj.Month,\n day=dt_start_obj.Day,\n hour=dt_start_obj.Hour,\n minute=dt_start_obj.Minute,\n second=dt_start_obj.Second,\n )\n self.end_datetime = self.start_datetime + dt.timedelta(\n seconds=self.timestep * self.number_tstep\n )\n\n itemnames = [\n [\n n.Name,\n n.Quantity.UnitAbbreviation,\n n.Quantity.ItemDescription,\n n.Quantity.Item,\n n.Quantity.Unit,\n ]\n for n in dfs_object.ItemInfo\n ]\n\n for ind, it in enumerate(itemnames):\n # Create key from itemname and add to dictionary\n itemName = str(it[0])\n items[itemName] = {}\n items[itemName][\"unit_abr\"] = str(it[1])\n items[itemName][\"item_type\"] = str(it[2])\n items[itemName][\"eum_item_DHI\"] = str(it[3])\n items[itemName][\"eum_unit_DHI\"] = str(it[4])\n items[itemName][\"index\"] = ind\n\n return items\n\n def dfs_time(self):\n \"\"\" Create a time sequency between start and end datetime \"\"\"\n time = np.arange(\n self.start_datetime, self.end_datetime, dt.timedelta(seconds=self.timestep)\n ).astype(dt.datetime)\n return time\n\n def summary(self):\n \"\"\"\n Prints a summary of the dfs\n \"\"\"\n print(\"Input file: {}\".format(self.filename))\n print(\n \"Time start = {}\".format(\n dt.datetime.strftime(self.start_datetime, \"%d/%m/%Y %H:%M:%S\")\n )\n )\n print(\"Number of timesteps = {}\".format(self.number_tstep))\n print(\"Timestep = {}\".format(self.timestep))\n print(\"Number of items = {}\".format(self.num_items))\n\n # Dfs1 specific\n if self.filename.endswith(\".dfs1\"):\n print(\"number of profile points = {}\".format(self.num_points))\n\n # Dfs2 specific\n if self.filename.endswith(\".dfs2\"):\n print(\"\")\n print(\"Projection = \\n {}\".format(self.projection))\n print(\"\")\n print(\"Grid:\")\n print(\"(num_X, num_Y) = ({}, {})\".format(self.x_count, self.y_count))\n print(\"(del_X, del_Y) = ({}, {})\".format(self.del_x, self.del_y))\n print(\"(X_min, Y_min) = ({}, {})\".format(self.x_min, self.y_min))\n print(\"(X_max, Y_max) = ({}, {})\".format(self.x_max, self.y_max))\n print(\"\")\n\n print(\"Items:\")\n for n in self.items.keys():\n print(\n \"{}, unit = {}, index = {}\".format(\n n, self.items[n][\"unit_abr\"], self.items[n][\"index\"]\n )\n )\n\n\nclass Dfs0(_Dfs):\n \"\"\"\n MIKE21 dfs0 class. Contains many attributes read in from the input `.dfs0`\n file.\n\n Parameters\n ----------\n filename : str\n Path to .dfs0\n\n Attributes\n -------\n filename : str\n Path to .dfs0\n data : pandas.DataFrame, shape (num_timesteps, num_items)\n Pandas DataFrame containing .dfs0 item data.\n Indexed by time. Columns are each .dfs0 item name.\n num_items : int\n Total number of .dfs0 items\n items : dict\n List .dfs0 items (ie. surface elevation, current speed), item names,\n item indexto lookup in .dfs0, item units and counts of elements, nodes\n and time steps.\n start_datetime : datetime\n Start datetime (datetime object)\n end_datetime : datetime\n End datetime (datetime object)\n timestep : float\n Timestep delta in seconds\n number_tstep : int\n Total number of timesteps\n time : ndarray, shape (number_tstep,)\n Sequence of datetimes between start and end datetime at delta timestep\n \"\"\"\n\n def __init__(self, filename):\n self.filename = filename\n dfs0_object = dfs.DfsFileFactory.DfsGenericOpen(self.filename)\n super(Dfs0, self).__init__(dfs0_object)\n self.data = self._read_dfs0(dfs0_object)\n\n def _read_dfs0(self, dfs0_object, close=True):\n \"\"\"\n Read in .dfs0 file\n \"\"\"\n out_arr = np.zeros((self.number_tstep, self.num_items))\n\n for i in range(self.number_tstep):\n for j in range(self.num_items):\n item_data = dfs0_object.ReadItemTimeStep(j + 1, i)\n out_arr[i, j] = item_data.Data[0]\n\n out_df = pd.DataFrame(data=out_arr, columns=self.items.keys(), index=self.time)\n\n if close:\n dfs0_object.Close()\n\n return out_df\n\n\nclass Dfs1(_Dfs):\n \"\"\"\n MIKE21 dfs1 class. Contains many attributes read in from the input `.dfs1`\n file.\n\n Parameters\n ----------\n filename : str\n Path to .dfs1\n\n Attributes\n -------\n filename : str\n Path to .dfs1\n num_items : int\n Total number of .dfs1 items\n num_points : int\n Total number of .dfs1 profile points within each item\n items : dict\n List .dfs1 items (ie. surface elevation, current speed), item names,\n item indexto lookup in .dfs1, item units and counts of elements, nodes\n and time steps. Contains item data, accessed by dict key `item_name`.\n This is more easily accessed by :func:`item_data()`.\n start_datetime : datetime\n Start datetime (datetime object)\n end_datetime : datetime\n End datetime (datetime object)\n timestep : float\n Timestep delta in seconds\n number_tstep : int\n Total number of timesteps\n time : ndarray, shape (number_tstep,)\n Sequence of datetimes between start and end datetime at delta timestep\n \"\"\"\n\n def __init__(self, filename):\n self.filename = filename\n dfs1_object = dfs.DfsFileFactory.Dfs1FileOpen(self.filename)\n self.num_points = len(dfs1_object.ReadItemTimeStep(1, 0).Data)\n super(Dfs1, self).__init__(dfs1_object)\n self._read_dfs1(dfs1_object)\n\n def _read_dfs1(self, dfs1_object, close=True):\n \"\"\"\n Read in .dfs1 file\n \"\"\"\n for itemname in self.items.keys():\n item_idx = self.items[itemname][\"index\"] + 1\n out_arr = np.zeros((self.number_tstep, self.num_points))\n\n for i in range(self.number_tstep):\n out_arr[i, :] = _utils.dotnet_arr_to_ndarr(\n dfs1_object.ReadItemTimeStep(item_idx, i).Data\n )\n out_df = pd.DataFrame(data=out_arr, index=self.time)\n\n self.items[itemname][\"data\"] = out_df\n\n if close:\n dfs1_object.Close()\n\n def item_data(self, item_name):\n \"\"\"\n Return pandas DataFrame of `dfs1` item data.\n\n Parameters\n ----------\n item_name : str\n Specified item to return element data. Item names can be found in\n :class:`items <dhitools.dfs.Dfs1>` attribute or by\n :func:`summary()`.\n\n Returns\n -------\n data : pandas.DataFrame, shape (num_timesteps, num_points)\n Pandas DataFrame containing .dfs1 item data.\n Indexed by time. Columns are each of the profile points.\n \"\"\"\n\n return self.items[item_name][\"data\"]\n\n\nclass Dfs2(_Dfs):\n \"\"\"\n MIKE21 dfs2 class. Contains many attributes read in from the input `.dfs2`\n file.\n\n Parameters\n ----------\n filename : str\n Path to .dfs2\n\n Attributes\n -------\n filename : str\n Path to .dfs2\n num_items : int\n Total number of .dfs2 items\n num_points : int\n Total number of .dfs2 profile points within each item\n items : dict\n List .dfs2 items (ie. surface elevation, current speed), item names,\n item indexto lookup in .dfs2, item units and counts of elements, nodes\n and time steps. Contains item data, accessed by dict key `item_name`.\n This is more easily accessed by :func:`item_data()`.\n start_datetime : datetime\n Start datetime (datetime object)\n end_datetime : datetime\n End datetime (datetime object)\n timestep : float\n Timestep delta in seconds\n number_tstep : int\n Total number of timesteps\n time : ndarray, shape (number_tstep,)\n Sequence of datetimes between start and end datetime at delta timestep\n projection : str\n .mesh spatial projection string in WKT format\n X : ndarray, shape (y_count, x_count)\n X meshgrid\n Y : ndarray, shape (y_count, x_count)\n Y meshgrid\n gridshape : tuple, length 2\n .dfs2 grid shape\n x_count : int\n Number of x points\n y_count : int\n Number of y points\n del_x : int\n X grid step\n del_y : int\n Y grid step\n x_max : int\n Max x value\n x_min : int\n Min x value\n y_max : int\n Max y value\n y_min : int\n Min y value\n nodata_float : float\n Nodata value for type float data\n nodata_double : float\n Nodata value for type double data\n nodata_int : int\n Nodata value for type int data\n \"\"\"\n\n def __init__(self, filename):\n self.filename = filename\n dfs2_object = dfs.DfsFileFactory.Dfs2FileOpen(self.filename)\n self.num_points = len(dfs2_object.ReadItemTimeStep(1, 0).Data)\n super(Dfs2, self).__init__(dfs2_object)\n self._read_dfs2(dfs2_object)\n\n def _read_dfs2(self, dfs2_object, close=True):\n \"\"\"\n Read in .dfs2 file\n \"\"\"\n sa = dfs2_object.SpatialAxis\n fi = dfs2_object.FileInfo\n self.projection = str(fi.Projection.WKTString)\n self.x_min = sa.X0\n self.del_x = sa.Dx\n self.x_count = sa.XCount\n self.x_max = self.x_min + (self.del_x * self.x_count)\n self.y_min = sa.Y0\n self.del_y = sa.Dy\n self.y_count = sa.YCount\n self.y_max = self.y_min + (self.del_y * self.y_count)\n self.gridshape = (self.y_count, self.x_count)\n\n self.X, self.Y = np.meshgrid(\n np.arange(self.x_min, self.x_max, self.del_x),\n np.arange(self.y_min, self.y_max, self.del_y),\n )\n\n # No data values\n self.nodata_float = fi.DeleteValueFloat\n self.nodata_double = fi.DeleteValueDouble\n self.nodata_int = fi.DeleteValueInt\n\n if close:\n dfs2_object.Close()\n\n def item_data(self, item_name, tstep_start=None, tstep_end=None):\n \"\"\"\n Function description...\n\n Parameters\n ----------\n item_name : str\n Specified item to return data. Item names are found in\n the `Dfs2.items` attribute.\n tstep_start : int or None, optional\n Specify time step for element data. Timesteps begin from 0.\n If `None`, returns data from 0 time step.\n tstep_end : int or None, optional\n Specify last time step for element data. Allows for range of time\n steps to be returned, where `tstep_end` is included.Must be\n positive int <= number of timesteps\n If `None`, returns single time step specified by `tstep_start`\n If `-1`, returns all time steps from `tstep_start`:end\n\n Returns\n -------\n item_data : ndarray, shape (y_count, x_count, [tstep_end-tstep_start])\n Data for specified item and time steps.\n\n \"\"\"\n dfs2_object = dfs.DfsFileFactory.Dfs2FileOpen(self.filename)\n data = _item_data(\n dfs2_object=dfs2_object,\n item_name=item_name,\n item_info=self.items,\n tstep_start=tstep_start,\n tstep_end=tstep_end,\n gridshape=self.gridshape,\n )\n dfs2_object.Close()\n\n return data\n\n\ndef _item_data(\n dfs2_object, item_name, item_info, gridshape, tstep_start=None, tstep_end=None\n):\n \"\"\" Read specified item_name dfs2 data \"\"\"\n\n item_idx = item_info[item_name][\"index\"] + 1\n if tstep_start is None:\n tstep_start = 0\n\n if tstep_end is None:\n # Only get one tstep specified by tstep_start\n tstep_end = tstep_start + 1\n elif tstep_end == -1:\n # Get from tstep_start to the end\n tstep_end = dfs2_object.FileInfo.TimeAxis.NumberOfTimeSteps\n else:\n # Add one to include tstep_end in output\n tstep_end += 1\n\n t_range = range(tstep_start, tstep_end)\n ndshape = gridshape + (len(t_range),)\n data = np.zeros(shape=(ndshape))\n for i, t in enumerate(t_range):\n data[:, :, i] = _utils.dotnet_arr_to_ndarr(\n dfs2_object.ReadItemTimeStep(item_idx, t).Data\n ).reshape(gridshape)\n\n return data\n"
] |
[
[
"numpy.arange",
"numpy.zeros",
"pandas.DataFrame"
]
] |
Fangyh09/pysteps
|
[
"9eb7f4ead0a946d98b7504d1bd66b18dc405ed51"
] |
[
"pysteps/tests/test_motion.py"
] |
[
"# coding: utf-8\n\n\"\"\"\nTest the convergence of the optical flow methods available in\npySTEPS using idealized motion fields.\n\nTo test the convergence, using an example precipitation field we will:\n\n- Read precipitation field from a file\n- Morph the precipitation field using a given motion field (linear or rotor) to\n generate a sequence of moving precipitation patterns.\n- Using the available optical flow methods, retrieve the motion field from the\n precipitation time sequence (synthetic precipitation observations).\n\nThis tests check that the retrieved motion fields are within reasonable values.\nAlso, they will fail if any modification on the code decrease the quality of\nthe retrieval.\n\"\"\"\n\nimport numpy as np\nimport pytest\nfrom scipy.ndimage import uniform_filter\n\nimport pysteps as stp\nfrom pysteps import motion\nfrom pysteps.motion.vet import morph\nfrom pysteps.tests.helpers import get_precipitation_fields, smart_assert\n\nreference_field = get_precipitation_fields(num_prev_files=0)\n\n\ndef _create_motion_field(input_precip, motion_type):\n \"\"\"\n Create idealized motion fields to be applied to the reference image.\n\n Parameters\n ----------\n\n input_precip: numpy array (lat, lon)\n\n motion_type : str\n The supported motion fields are:\n\n - linear_x: (u=2, v=0)\n - linear_y: (u=0, v=2)\n - rotor: rotor field\n\n Returns\n -------\n ideal_motion : numpy array (u, v)\n \"\"\"\n\n # Create an imaginary grid on the image and create a motion field to be\n # applied to the image.\n ny, nx = input_precip.shape\n\n ideal_motion = np.zeros((2, nx, ny))\n\n if motion_type == \"linear_x\":\n ideal_motion[0, :] = 2 # Motion along x\n elif motion_type == \"linear_y\":\n ideal_motion[1, :] = 2 # Motion along y\n else:\n raise ValueError(\"motion_type not supported.\")\n\n # We need to swap the axes because the optical flow methods expect\n # (lat, lon) or (y,x) indexing convention.\n ideal_motion = ideal_motion.swapaxes(1, 2)\n return ideal_motion\n\n\ndef _create_observations(input_precip, motion_type, num_times=9):\n \"\"\"\n Create synthetic precipitation observations by displacing the input field\n using an ideal motion field.\n\n Parameters\n ----------\n\n input_precip: numpy array (lat, lon)\n Input precipitation field.\n\n motion_type : str\n The supported motion fields are:\n\n - linear_x: (u=2, v=0)\n - linear_y: (u=0, v=2)\n\n num_times: int, optional\n Length of the observations sequence.\n\n\n Returns\n -------\n synthetic_observations : numpy array\n Sequence of observations\n \"\"\"\n\n ideal_motion = _create_motion_field(input_precip, motion_type)\n\n # The morph function expects (lon, lat) or (x, y) dimensions.\n # Hence, we need to swap the lat,lon axes.\n\n # NOTE: The motion field passed to the morph function can't have any NaNs.\n # Otherwise, it can produce a segmentation fault.\n morphed_field, mask = morph(input_precip.swapaxes(0, 1),\n ideal_motion.swapaxes(1, 2))\n\n mask = np.array(mask, dtype=bool)\n\n synthetic_observations = np.ma.MaskedArray(morphed_field, mask=mask)\n synthetic_observations = synthetic_observations[np.newaxis, :]\n\n for t in range(1, num_times):\n morphed_field, mask = morph(synthetic_observations[t - 1],\n ideal_motion.swapaxes(1, 2))\n mask = np.array(mask, dtype=bool)\n\n morphed_field = np.ma.MaskedArray(morphed_field[np.newaxis, :],\n mask=mask[np.newaxis, :])\n\n synthetic_observations = np.ma.concatenate([synthetic_observations,\n morphed_field],\n axis=0)\n\n # Swap back to (lat, lon)\n synthetic_observations = synthetic_observations.swapaxes(1, 2)\n\n synthetic_observations = np.ma.masked_invalid(synthetic_observations)\n\n synthetic_observations.data[np.ma.getmaskarray(synthetic_observations)] = 0\n\n return ideal_motion, synthetic_observations\n\n\nconvergence_arg_names = (\"input_precip, optflow_method_name, motion_type, \"\n \"num_times, max_rel_rmse\")\n\nconvergence_arg_values = [(reference_field, 'lk', 'linear_x', 2, 0.1),\n (reference_field, 'lk', 'linear_y', 2, 0.1),\n (reference_field, 'lk', 'linear_x', 3, 0.1),\n (reference_field, 'lk', 'linear_y', 3, 0.1),\n (reference_field, 'vet', 'linear_x', 2, 9),\n #(reference_field, 'vet', 'linear_y', 2, 9),\n #(reference_field, 'vet', 'linear_x', 3, 9),\n #(reference_field, 'vet', 'linear_y', 3, 9),\n (reference_field, 'darts', 'linear_x', 9, 25),\n (reference_field, 'darts', 'linear_y', 9, 25)]\n\n\n@pytest.mark.parametrize(convergence_arg_names, convergence_arg_values)\ndef test_optflow_method_convergence(input_precip, optflow_method_name,\n motion_type, num_times, max_rel_rmse):\n \"\"\"\n Test the convergence to the actual solution of the optical flow method used.\n\n We measure the error in the retrieved field by using the\n Relative RMSE = Rel_RMSE = sqrt(Relative MSE)\n\n - Rel_RMSE = 0%: no error\n - Rel_RMSE = 100%: The retrieved motion field has an average error\n equal in magnitude to the motion field.\n\n Relative RMSE is computed only un a region surrounding the precipitation\n field, were we have enough information to retrieve the motion field.\n The precipitation region includes the precipitation pattern plus a margin\n of approximately 20 grid points.\n\n\n Parameters\n ----------\n\n input_precip: numpy array (lat, lon)\n Input precipitation field.\n\n optflow_method_name: str\n Optical flow method name\n\n motion_type : str\n The supported motion fields are:\n\n - linear_x: (u=2, v=0)\n - linear_y: (u=0, v=2)\n \"\"\"\n\n ideal_motion, precip_obs = _create_observations(input_precip.copy(),\n motion_type,\n num_times=num_times)\n\n oflow_method = motion.get_method(optflow_method_name)\n\n if optflow_method_name == 'vet':\n # By default, the maximum number of iteration in the VET minimization\n # is maxiter=100.\n # To increase the stability of the tests to we increase this value to\n # maxiter=150.\n computed_motion = oflow_method(precip_obs, verbose=False,\n options=dict(maxiter=150))\n else:\n\n computed_motion = oflow_method(precip_obs, verbose=False)\n\n precip_data, _ = stp.utils.dB_transform(precip_obs.max(axis=0),\n inverse=True)\n precip_data.data[precip_data.mask] = 0\n\n precip_mask = ((uniform_filter(precip_data, size=20) > 0.1)\n & ~precip_obs.mask.any(axis=0))\n\n # To evaluate the accuracy of the computed_motion vectors, we will use\n # a relative RMSE measure.\n # Relative MSE = < (expected_motion - computed_motion)^2 > / <expected_motion^2 >\n # Relative RMSE = sqrt(Relative MSE)\n\n mse = ((ideal_motion - computed_motion)[:, precip_mask] ** 2).mean()\n\n rel_mse = mse / (ideal_motion[:, precip_mask] ** 2).mean()\n rel_rmse = np.sqrt(rel_mse) * 100\n print(f\"method:{optflow_method_name} ; \"\n f\"motion:{motion_type} ; times: {num_times} ; \"\n f\"rel_rmse:{rel_rmse:.2f}%\")\n assert rel_rmse < max_rel_rmse\n\n\nno_precip_args_names = (\"optflow_method_name, num_times\")\nno_precip_args_values = [('lk', 2), ('lk', 3),\n ('vet', 2), ('vet', 3),\n ('darts', 9)]\n\n\n@pytest.mark.parametrize(no_precip_args_names, no_precip_args_values)\ndef test_no_precipitation(optflow_method_name, num_times):\n \"\"\"\n Test that the motion methods work well with a zero precipitation in the\n domain.\n\n The expected result is a zero motion vector.\n\n Parameters\n ----------\n\n optflow_method_name: str\n Optical flow method name\n\n num_times : int\n Number of precipitation frames (times) used as input for the optical\n flow methods.\n \"\"\"\n zero_precip = np.zeros((num_times,) + reference_field.shape)\n motion_method = motion.get_method(optflow_method_name)\n uv_motion = motion_method(zero_precip, verbose=False)\n\n assert np.abs(uv_motion).max() < 0.01\n"
] |
[
[
"numpy.ma.MaskedArray",
"numpy.sqrt",
"numpy.ma.getmaskarray",
"numpy.abs",
"scipy.ndimage.uniform_filter",
"numpy.ma.concatenate",
"numpy.ma.masked_invalid",
"numpy.array",
"numpy.zeros"
]
] |
seungguini/DialoGPT
|
[
"6b89677496cc674fd5cf951c179e94d330292437"
] |
[
"LSP_train.py"
] |
[
"# Copyright (c) Microsoft Corporation. \n# Licensed under the MIT license. \n'''\n * @Desc: train GPT2 from scratch/ fine tuning.\n Modified based on Huggingface GPT-2 implementation\n'''\n\nimport json\nimport os\nimport sys\nimport argparse\nimport logging\nimport time\nimport tqdm\nimport datetime\nimport torch\n\nimport numpy as np\n\nfrom os.path import join\nfrom torch.distributed import get_rank, get_world_size\n\nfrom lsp_model import GPT2LMHeadModel, GPT2Tokenizer, GPT2Config, Adam\nfrom gpt2_training.train_utils import load_model, boolean_string, set_lr, get_eval_list_same_length\nfrom gpt2_training.eval_utils import eval_model_loss\n\nfrom data_loader import BucketingDataLoader, DynamicBatchingLoader, DistributedBucketingDataLoader\n\n\nfrom gpt2_training.distributed import all_reduce_and_rescale_tensors, all_gather_list\n\n\nlogging.basicConfig(\n format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nINF = 100000000\nCACHE_EMPTY_STEP = 10000\nEVAL_STEP = 100000\n\n#########################################################################\n# Prepare Parser\n##########################################################################\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--model_name_or_path', type=str,\n help='pretrained model name or path to local checkpoint')\nparser.add_argument(\"--seed\", type=int, default=42)\nparser.add_argument(\"--max_seq_length\", type=int, default=128)\n\nparser.add_argument(\"--skip_eval\", action='store_true',\n help='If true, skip evaluation.')\nparser.add_argument(\"--init_checkpoint\", type=str)\nparser.add_argument(\"--train_input_file\", type=str)\nparser.add_argument(\"--eval_input_file\", type=str)\nparser.add_argument(\"--continue_from\", type=int, default=0)\n\nparser.add_argument(\"--train_batch_size\", type=int, default=4,\n help=\"batch size now means per GPU per step\")\nparser.add_argument(\"--gradient_accumulation_steps\", type=int, default=2,\n help=\"to increase effective batch size \"\n \"and reduce synchronization\")\nparser.add_argument(\"--eval_batch_size\", type=int, default=4)\nparser.add_argument(\"--learning_rate\", type=float, default=1e-5)\nparser.add_argument(\"--num_optim_steps\", type=int, default=1000000,\n help=\"new API specifies num update steps\")\nparser.add_argument(\"--valid_step\", type=int, default=10000,\n help=\"how many optim steps between validations\")\nparser.add_argument(\"--warmup_proportion\", type=float, default=0.1)\nparser.add_argument(\"--warmup_steps\", type=int, default=16000)\n\nparser.add_argument(\"--normalize_data\", type=boolean_string, default=True)\nparser.add_argument(\"--fp16\", type=boolean_string, default=True)\nparser.add_argument(\"--lr_schedule\", type=str,\n choices=['noam', 'noamwd', 'BERT', 'None'], default='noam')\nparser.add_argument(\"--loss_scale\", type=float, default=0)\nparser.add_argument(\"--no_token_id\", type=boolean_string, default=True)\n\nparser.add_argument(\"--output_dir\", type=str)\nparser.add_argument(\"--log_dir\", type=str)\nparser.add_argument('--pbar', type=boolean_string, default=True, help='turn on progress bar')\n\n# distributed\nparser.add_argument('--local_rank', type=int, default=-1,\n help='for torch.distributed')\nparser.add_argument('--config', help='JSON config file')\n\n\n# do normal parsing\nargs = parser.parse_args()\n\nif args.config is not None:\n # override argparse defaults by config JSON\n opts = json.load(open(args.config))\n for k, v in opts.items():\n if isinstance(v, str):\n # PHILLY ENV special cases\n if 'PHILLY_JOB_DIRECTORY' in v:\n v = v.replace('PHILLY_JOB_DIRECTORY',\n os.environ['PHILLY_JOB_DIRECTORY'])\n elif 'PHILLY_LOG_DIRECTORY' in v:\n v = v.replace('PHILLY_LOG_DIRECTORY',\n os.environ['PHILLY_LOG_DIRECTORY'])\n setattr(args, k, v)\n\n # command line should override config JSON\n argv = sys.argv[1:]\n overrides, _ = parser.parse_known_args(argv)\n for k, v in vars(overrides).items():\n if '--{k}' in argv:\n setattr(args, k, v)\n setattr(args, 'local_rank', overrides.local_rank)\n\n\nassert args.train_batch_size % args.gradient_accumulation_steps == 0, \\\n 'batch size % gradient accumulation steps != 0!'\nargs.train_batch_size = (args.train_batch_size\n // args.gradient_accumulation_steps)\nlogger.info('train batch size = {}, '\n 'new train batch size (after gradient accumulation) = {}'.format(\n args.train_batch_size*args.gradient_accumulation_steps,\n args.train_batch_size))\n\n\nif args.local_rank == -1:\n logger.info('CUDA available? {}'.format(str(torch.cuda.is_available())))\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n n_gpu = torch.cuda.device_count()\n args.device, args.n_gpu = device, n_gpu\nelse:\n # distributed training\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n # Initializes the distributed backend which will take care of\n # sychronizing nodes/GPUs\n torch.distributed.init_process_group(backend='nccl')\n n_gpu = torch.distributed.get_world_size()\n args.device, args.n_gpu = device, 1\n logger.info(\"device: {} n_gpu: {}, distributed training: {}, \"\n \"16-bits training: {}\".format(\n device, n_gpu, bool(args.local_rank != -1), args.fp16))\n\nnp.random.seed(args.seed)\ntorch.random.manual_seed(args.seed)\ntorch.cuda.manual_seed(args.seed)\nif n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\ntimestamp = datetime.datetime.now().strftime('%Y-%m-%d%H%M%S')\n#print(args.output_dir)\noutput_dir = join(args.output_dir,\n 'GPT2.{}.{}.{}gpu.{}'.format(args.learning_rate,\n args.train_batch_size, n_gpu, timestamp))\nlog_dir = args.log_dir if args.log_dir is not None and len(args.log_dir) > 0 else output_dir\nif args.local_rank == -1 or get_rank() == 0:\n os.makedirs(output_dir, exist_ok=True)\n\nlogger.info('Input Argument Information')\nargs_dict = vars(args)\nfor a in args_dict:\n logger.info('%-28s %s' % (a, args_dict[a]))\n\n\n#########################################################################\n# Prepare Data Set\n##########################################################################\nenc = GPT2Tokenizer.from_pretrained(args.model_name_or_path)\n\nconfig = GPT2Config.from_json_file(\n join(args.model_name_or_path, 'config.json'))\n\nif args.local_rank == -1:\n train_dataloader = BucketingDataLoader(args.train_input_file,\n args.train_batch_size,\n args.max_seq_length)\nelse:\n train_dataloader = DistributedBucketingDataLoader(\n get_rank(), get_world_size(),\n args.train_input_file, args.train_batch_size,\n args.max_seq_length)\n\neval_dataloader_loss = DynamicBatchingLoader(\n args.eval_input_file, enc, args.normalize_data,\n args.eval_batch_size, args.max_seq_length)\n\neval_dataloader_gen = get_eval_list_same_length(\n args.eval_input_file, enc, args.eval_batch_size, True)\n\n\n#########################################################################\n# Prepare Model and Optimizer\n##########################################################################\nmodel = load_model(GPT2LMHeadModel(config), args.init_checkpoint,\n args, verbose=True)\nif args.local_rank != -1:\n # when from scratch make sure initial models are the same\n params = [p.data for p in model.parameters()]\n all_reduce_and_rescale_tensors(\n params, float(torch.distributed.get_world_size()))\n\nmodel_parameters = filter(lambda p: p.requires_grad, model.parameters())\ntotal_params = sum([np.prod(p.size()) for p in model_parameters])\nlogger.info('Number of parameter = {}'.format(total_params))\n\nparam_optimizer = list(model.named_parameters())\nno_decay = ['bias', 'ln'] # no decay for bias and LayerNorm (ln)\noptimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer\n if not any(nd in n for nd in no_decay)],\n 'weight_decay': 0.01},\n {'params': [p for n, p in param_optimizer\n if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n]\n\nif args.fp16:\n logger.info('in fp16, using FusedAdam')\n try:\n from apex.optimizers import FP16_Optimizer\n from apex.optimizers import FusedAdam\n except ImportError:\n raise ImportError(\n \"Please install apex from https://www.github.com/nvidia/apex \"\n \"to use distributed and fp16 training.\")\n\n optimizer = FusedAdam(optimizer_grouped_parameters,\n lr=args.learning_rate,\n bias_correction=False,\n max_grad_norm=1.0)\n if args.loss_scale == 0:\n optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True,\n verbose=False)\n else:\n optimizer = FP16_Optimizer(optimizer,\n static_loss_scale=args.loss_scale,\n verbose=False)\nelse:\n optimizer = Adam(optimizer_grouped_parameters, args.learning_rate,\n max_grad_norm=1.0)\n\n#########################################################################\n# Training !\n##########################################################################\n\nif args.local_rank == -1 or get_rank() == 0:\n train_logger = open(join(log_dir, 'train_log.txt'), 'a+', buffering=1)\n eval_logger = open(join(log_dir, 'eval_log.txt'), 'a+', buffering=1)\n print('epoch,global_step,step,mean_loss,mean_ppl,n_token_real, n_token_total,epoch_time', file=train_logger)\n print('epoch,global_step,step,eval_loss,eval_ppl', file=eval_logger)\n\nglobal_step = 0\nstep = 0\nepoch = 0\n\nif args.continue_from:\n global_step = args.continue_from\n step = global_step*2 - 1\n\n\nif args.local_rank != -1:\n n_gpu = 1\nif args.local_rank == -1 or get_rank() == 0:\n if args.pbar:\n pbar = tqdm.tqdm(total=args.num_optim_steps, desc=f\"training\")\n else:\n pbar = None\n\nwhile True:\n model.train()\n (tr_loss, tr_ppl, mean_ppl, nb_tr_examples, nb_tr_steps) = 0.0, 0.0, 0.0, 0, 0\n n_token_real, n_token_total = 0, 0\n train_start_time_epoch = time.time()\n for batch in train_dataloader:\n # activate new training mode\n seq_len = batch[0].shape[1]\n batch = tuple(t.to(device) for t in batch)\n input_ids, position_ids, token_ids, label_ids, *_ = batch\n if args.no_token_id:\n token_ids = None\n loss, ppl = model(input_ids, position_ids, token_ids, label_ids)\n\n if n_gpu > 1:\n loss = loss.mean()\n ppl = ppl.mean()\n loss = loss / (args.train_batch_size / input_ids.shape[0])\n if args.fp16:\n optimizer.backward(loss)\n else:\n loss.backward()\n\n tr_loss += float(loss.item()) * (args.train_batch_size / input_ids.shape[0])\n nb_tr_examples += input_ids.size(0)\n nb_tr_steps += 1\n mean_loss = tr_loss / nb_tr_steps\n if ppl.item() < INF:\n tr_ppl += ppl.item()\n else:\n tr_ppl += mean_ppl\n mean_ppl = tr_ppl / nb_tr_steps\n\n n_token_total += input_ids.shape[0] * input_ids.shape[1]\n n_token_real += (input_ids != 0).sum().item()\n\n # gradient update\n step += 1\n if step % args.gradient_accumulation_steps == 0:\n set_lr(optimizer, global_step,\n args.lr_schedule, args.learning_rate,\n args.warmup_steps, args.warmup_proportion,\n config.n_embd, args.num_optim_steps)\n\n if args.local_rank != -1:\n grads = [p.grad.data for p in model.parameters()\n if p.requires_grad and p.grad is not None]\n all_reduce_and_rescale_tensors(grads, float(1))\n\n optimizer.step()\n optimizer.zero_grad()\n global_step += 1\n\n # Print log info to file\n if args.local_rank != -1:\n mean_loss = sum(all_gather_list(mean_loss)) / get_world_size()\n mean_ppl = sum(all_gather_list(mean_ppl)) / get_world_size()\n n_token_real_all_proc = sum(all_gather_list(n_token_real))\n n_token_total_all_proc = sum(all_gather_list(n_token_total))\n else:\n n_token_real_all_proc = n_token_real\n n_token_total_all_proc = n_token_total\n\n if args.local_rank == -1 or get_rank() == 0:\n epoch_time = time.time() - train_start_time_epoch\n if pbar is not None:\n pbar.set_postfix_str(\n f\"tok/s: {n_token_real_all_proc//epoch_time//1000}k \"\n f\"ppl: {mean_ppl:.2f} epoch: {epoch}\")\n pbar.update(1)\n print('{},{},{},{},{},{},{},{}'.format(\n epoch+1, global_step+1, step+1, mean_loss, mean_ppl,\n n_token_real_all_proc, n_token_total_all_proc, epoch_time),\n file=train_logger)\n\n if global_step % args.valid_step == 0:\n if args.local_rank == -1 or get_rank() == 0:\n # only rank 0 process evaluate\n torch.save(\n {k: (v.cpu() if v is not None else None) # save to cpu tensors\n for k, v in model.state_dict().items()},\n join(output_dir,\n f'GP2-pretrain-step-{global_step}.pkl'))\n\n eval_loss, eval_ppl = eval_model_loss(\n model, enc, eval_dataloader_loss, epoch, args)\n # enable generation step evaluation for now\n # gen_response = eval_model_generation(\n # model, enc, eval_dataloader_gen, epoch, args)\n '''\n # probably use beam search only for test set\n if False:\n gen_response_beam = eval_model_generation(\n model, enc, eval_dataloader_gen, epoch, args,\n use_beam_search=True, beam_width=3)\n '''\n print('{},{},{},{},{}'.format(\n epoch+1, global_step+1, step+1, eval_loss, eval_ppl),\n file=eval_logger)\n logger.info('current learning rate: '\n + str(optimizer.param_groups[0]['lr']))\n model.train()\n if global_step >= args.num_optim_steps:\n break\n\n if (step+1) % CACHE_EMPTY_STEP == 0:\n torch.cuda.empty_cache()\n\n if global_step >= args.num_optim_steps:\n break\n epoch += 1\n\n\nif args.local_rank == -1 or get_rank() == 0:\n if pbar is not None:\n pbar.close()\n train_logger.close()\n eval_logger.close()\n"
] |
[
[
"torch.distributed.init_process_group",
"torch.cuda.manual_seed",
"torch.cuda.set_device",
"numpy.random.seed",
"torch.random.manual_seed",
"torch.cuda.empty_cache",
"torch.cuda.manual_seed_all",
"torch.cuda.is_available",
"torch.device",
"torch.distributed.get_rank",
"torch.cuda.device_count",
"torch.distributed.get_world_size"
]
] |
BNIA/VitalSigns
|
[
"1c06284a7423fb837890b5d4b42567e8f14bf278"
] |
[
"VitalSigns/closecrawl.py"
] |
[
"# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/01_Close Crawl.ipynb (unless otherwise specified).\n\n\nfrom __future__ import absolute_import, print_function, unicode_literals\n\n\n\nfrom __future__ import absolute_import, print_function, unicode_literals\n\n\n\nfrom __future__ import absolute_import, print_function, unicode_literals\n\n\n\nfrom __future__ import absolute_import, print_function, unicode_literals\n\n\n\nfrom __future__ import absolute_import, print_function, unicode_literals\n\n\n\nfrom __future__ import absolute_import, print_function, unicode_literals\n\n\n__all__ = ['casetype', 'year', 'lowerbound', 'upperbound', 'output', 'description', 'HEADER', 'URL', 'CASE_PAT',\n 'CASE_TYPES', 'HTML_DIR', 'HTML_FILE', 'CHECKPOINT', 'NO_CASE', 'FEATURES', 'FIELDS', 'INTERNAL_FIELDS',\n 'description', 'Session', 'Spider', 'description', 'filter_addr', 'PUNCTUATION', 'street_address',\n 'TITLE_SPLIT_PAT', 'ZIP_STR', 'ZIP_PAT', 'MONEY_STR', 'MONEY_PAT', 'NULL_ADDR', 'STRIP_ADDR', 'Miner',\n 'FEATURES', 'FIELDS', 'INTERNAL_FIELDS', 'temp_output', 'description', 'Cleaner', 'description',\n 'close_crawl']\n\n# Cell\ncasetype = 'O'\nyear = 20\nlowerbound = 0000\nupperbound = 9000\noutput = 'outputfile.csv'\n\n# Cell\nimport mechanicalsoup\nmechanicalsoup.__version__\n\n# Cell\ndescription = \"\"\"settings\n\nConfiguration settings and global variables for the entire project. This bit\nis intended to only be used as a non-executable script.\n\"\"\"\n\nfrom os import path\n\n# browser settings\nHEADER = (\"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1)\"\n \" Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1\")\nURL = \"http://casesearch.courts.state.md.us/casesearch/\"\nCASE_PAT = \"24{type}{year}00{num}\"\n\n# scraping parameters\nCASE_TYPES = ('O', 'C')\n\n# temporary directory settings\nHTML_DIR = \"responses\"\nHTML_FILE = path.join(HTML_DIR, \"{case}\")\n\n# log file settings\nCHECKPOINT = \"checkpoint.json\"\nNO_CASE = \"no_case.json\"\n\n# data mining settings\nFEATURES = [\n \"Filing Date\",\n \"Case Number\",\n \"Case Type\",\n \"Title\",\n \"Plaintiff\",\n \"Defendant\",\n \"Address\",\n \"Business or Organization Name\",\n \"Party Type\",\n]\nFIELDS = FEATURES + [ \"Zip Code\", \"Partial Cost\" ]\nINTERNAL_FIELDS = [ \"Business or Organization Name\", \"Party Type\"]\n\n# Cell\ndescription = \"\"\"local_browser\n\nNOTICE: Close Crawl formerly ran its browser form submissions through Mechanize.\nThe module, however, is deprecated and does not support Python 3. The more\nstable and maintained Mechanize and BeautifulSoup wrapper, MechanicalSoup,\nhas since replaced the Mechanize methods to support Python 3.\n\nThis module contains the configurations and settings for the browser used for\ncrawling and scraping through the pages in Close Crawl. The script contains the\nimplementation of the Session class which inherits attributes from the classobj\nmechanize.Browser()\n\nThe script worked as an internal module for original Close Crawl executable, and could be imported\nas a module for testing purposes.\n\"\"\"\n\n# Cell\n#nbdev_comment from __future__ import absolute_import, print_function, unicode_literals\n# import cookielib\nimport http.cookiejar as cookielib # for Python3\nimport warnings\nfrom urllib.request import urlopen\n# from urllib import urlopen urllib.request\n\n## from mechanize import Browser, _http\nimport mechanicalsoup\n\n# from settings import HEADER, URL\n\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\n\n# Cell\nclass Session(object):\n\n def __init__(self):\n \"\"\"Constructor\n\n Args:\n None\n\n Attributes:\n browser (`mechanize._mechanize.Browser`): browser object in session\n \"\"\"\n\n self.browser = mechanicalsoup.StatefulBrowser()\n\n # set error and debug handlers for the browser\n\n # cookie jar\n self.browser.set_cookiejar(cookielib.LWPCookieJar())\n\n # browser options\n # self.browser.set_handle_equiv(True)\n # self.browser.set_handle_gzip(True)\n # self.browser.set_handle_redirect(True)\n # self.browser.set_handle_referer(True)\n # self.browser.set_handle_robots(False)\n\n # follows refresh 0 but doesn't hang on refresh > 0\n #self.browser.set_handle_refresh( _http.HTTPRefreshProcessor(), max_time=1 )\n\n # user-Agent\n # self.browser.addheaders = [(\"User-agent\", HEADER)]\n\n def close(self):\n \"\"\"Destructor for Session. Closes current browser session\n\n Args:\n None\n\n Returns:\n None\n \"\"\"\n self.browser.close()\n\n def case_id_form(self, case):\n \"\"\"Grabs the form in the case searching page, and inputs the\n case number to return the response.\n\n Args:\n case (`str`): case ID to be scraped\n\n Returns:\n response (`str`): HTML response\n \"\"\"\n\n # iterate through the forms to find the correct one\n #for form in self.browser.forms():\n # if form.attrs[\"name\"] == \"inquiryFormByCaseNum\":\n # self.browser.form = form\n # break\n\n self.browser.select_form('form[action=\"/casesearch/inquiryByCaseNum.jis\"]')\n\n # submit case ID and return the response\n self.browser[\"caseId\"] = case\n response = self.browser.submit_selected()\n response = response.text\n # if any( case_type in response.upper() for case_type in (\"FORECLOSURE\", \"FORECLOSURE RIGHTS OF REDEMPTION\", \"MOTOR TORT\") ): print (response.upper)\n\n self.browser.open(\"http://casesearch.courts.state.md.us/casesearch/inquiryByCaseNum.jis\")\n # , \"MOTOR TORT\"\n return response if any(\n case_type in response.upper() for case_type in\n (\"FORECLOSURE\", \"FORECLOSURE RIGHTS OF REDEMPTION\")\n ) else False\n\n def disclaimer_form(self):\n \"\"\"Navigates to the URL to proceed to the case searching page\n\n Args:\n None\n\n Returns:\n None\n \"\"\"\n\n # visit the site\n print(URL)\n self.browser.open(\"http://casesearch.courts.state.md.us/casesearch/\")\n\n # select the only form on the page\n self.browser.select_form('form')\n\n with open(\"output1.html\", \"w\") as file:\n file.write(str( self.browser.page ))\n chkbxid = self.browser.page.find('input',{'name':'disclaimer'})['value']\n\n # select the checkbox\n self.browser[\"disclaimer\"] = [chkbxid]\n\n # submit the form\n self.browser.submit_selected()\n\n @staticmethod\n def server_running():\n \"\"\"Checks the status of the Casesearch servers\n\n Args:\n None\n\n Returns:\n `True` if server is up, `False` otherwise\n \"\"\"\n return urlopen(URL).getcode() == 200\n\n\n# Cell\n#nbdev_comment from __future__ import absolute_import, print_function, unicode_literals\nfrom json import dumps, load\nfrom os import path, makedirs\nfrom random import uniform\nimport sys\nfrom time import sleep\n\nfrom tqdm import trange\n\n# from local_browser import Session\n# from settings import CASE_PAT, CHECKPOINT, HTML_DIR, HTML_FILE\n\n# Cell\nclass Spider(object):\n\n def __init__(self, case_type, year, bounds=range(1, 6), gui=False):\n\n # initial disclaimer page for terms and agreements\n self.browser = Session()\n\n if not self.browser.server_running():\n sys.exit(\"Server is unavailable at the moment\")\n\n print (self.browser)\n\n self.browser.disclaimer_form()\n\n self.WAITING_TIME = 0\n self.case_type = case_type\n self.year = year\n self.bounds = bounds\n\n if not path.exists(HTML_DIR):\n makedirs(HTML_DIR)\n\n def save_response(self):\n\n case_range = trange(\n len(self.bounds), desc=\"Crawling\", leave=True\n )\n\n for case_num in case_range:\n\n if case_num and not case_num % 500:\n print(\"500 CASES SCRAPED. SCRIPT WILL WAIT 5 MINUTES TO RESUME\")\n\n for i in range(150, 0, -1):\n sleep(1)\n sys.stdout.write('\\r' + \"%02d:%02d\" % divmod(i, 60))\n sys.stdout.flush()\n\n case = CASE_PAT.format(\n type=self.case_type,\n year=self.year,\n num=\"{:04d}\".format(int(str(self.bounds[case_num])[-4:]))\n )\n\n try:\n\n wait = uniform(0.0, 0.5)\n sleep(wait)\n\n self.WAITING_TIME += wait\n\n case_range.set_description(\"Crawling {}\".format(case))\n\n stripped_html = self.browser.case_id_form(case)\n # print('returend this ' , stripped_html)\n\n if stripped_html:\n with open(\n HTML_FILE.format(case=case) + \".html\", 'w'\n ) as case_file:\n case_file.write(str(stripped_html))\n\n # pause process\n except KeyboardInterrupt:\n\n self.dump_json({\n \"error_case\":\n \"{:04d}\".format(int(str(self.bounds[case_num])[-4:])),\n \"year\": self.year,\n \"type\": self.type\n })\n print(\"Crawling paused at\", case)\n break\n\n # case does not exist\n except IndexError:\n\n self.dump_json({\"error_case\": case})\n print(case, \"does not exist\")\n break\n\n # close browser and end session\n self.close_sesh()\n\n @staticmethod\n def dump_json(data):\n\n with open(CHECKPOINT, \"r+\") as checkpoint:\n checkpoint_data = load(checkpoint)\n\n for key, val in data.items():\n checkpoint_data[key] = val\n\n checkpoint_data[key] = data\n checkpoint.seek(0)\n checkpoint.write(dumps(checkpoint_data))\n checkpoint.truncate()\n\n def close_sesh(self):\n self.browser.close()\n\n\n# Cell\ndescription = \"\"\"Patterns\n\nRegular expression patterns and string filtering functions implemented in the\nproject. This file is intended to only be used as a non-executable script.\n\n(\\d{1,4}\\s[\\w\\s]{1,20}((?:st(reet)?|ln|lane|ave(nue)?|r(?:oa)?d|highway|hwy|\ndr(?:ive)?|sq(uare)?|tr(?:ai)l|c(?:our)?t|parkway|pkwy|cir(cle)?|ter(?:race)?|\nboulevard|blvd|pl(?:ace)?)\\W?(?=\\s|$))(\\s(apt|block|unit)\\W?([A-Z]|\\d+))?)\n\"\"\"\n\n# Cell\nfrom re import compile as re_compile\nfrom re import I as IGNORECASE\nfrom string import punctuation\n\n# Cell\nPUNCTUATION = punctuation.replace('#', '') # all punctuations except '#'\n\nstreet_address = re_compile(\n \"(\" # begin regex group\n \"\\d{1,4}\\s\" # house number\n \"[\\w\\s]{1,20}\" # street name\n \"(\" # start street type group\n \"(?:st(reet)?|ln|lane|ave(nue)?\" # (st)reet, lane, ln, (ave)nue\n \"|r(?:oa)?d|highway|hwy|dr(?:ive)?\" # rd, road, hwy, highway, (dr)ive\n \"|sq(uare)?|tr(?:ai)l|c(?:our)?t\" # (sq)uare, (tr)ail, ct, court\n \"|parkway|pkwy|cir(cle)?|ter(?:race)?\" # parkway, pkwy, (cir)cle, (ter)race\n \"|boulevard|blvd|pl(?:ace)?\" # boulevard, bvld, (pl)ace\n \"\\W?(?=\\s|$))\" # look ahead for whitespace or end of string\n \")\" # end street type group\n \"(\\s(apt|block|unit)(\\W|#)?([\\d|\\D|#-|\\W])+)?\" # apt, block, unit number\n \")\", # end regex group\n IGNORECASE # case insensitive flag\n)\n\n# case insensitive delimiter for Titles\nTITLE_SPLIT_PAT = re_compile(\" vs \", IGNORECASE)\n\n# pattern for Baltimore zip codes\nZIP_STR = \"2\\d{4}\"\nZIP_PAT = re_compile(ZIP_STR)\n\n# regex pattern to capture monetary values between $0.00 and $999,999,999.99\n# punctuation insensitive\nMONEY_STR = \"\\$\\d{,3},?\\d{,3},?\\d{,3}\\.?\\d{2}\"\nMONEY_PAT = re_compile(MONEY_STR)\n\nNULL_ADDR = re_compile(\n \"^(\"\n \"(\" + MONEY_STR + \")\"\n \"|(\" + ZIP_STR + \")\"\n \"|(\\d+)\"\n \"|(\" + ZIP_STR + \".*\" + MONEY_STR + \")\"\n \")$\",\n IGNORECASE\n)\n\nSTRIP_ADDR = re_compile(\n \"(balto|\" + ZIP_STR + \"|md|\" + MONEY_STR + \").*\",\n IGNORECASE\n)\n\n\ndef filter_addr(address):\n\n try:\n return ''.join(\n street_address.search(\n address.translate( str.maketrans('','', PUNCTUATION) )\n ).group(0)\n )\n except AttributeError:\n return ''\n\n\n# Cell\n\"\"\"Miner\"\"\"\n#nbdev_comment from __future__ import absolute_import, print_function, unicode_literals\nfrom csv import DictWriter\nfrom json import dump, dumps, load\nfrom os import path\n\nfrom bs4 import BeautifulSoup\nfrom tqdm import trange\n\n# from patterns import MONEY_PAT, TITLE_SPLIT_PAT, ZIP_PAT, filter_addr\n# from settings import HTML_FILE, NO_CASE\n\n# Cell\n# data mining settings\nFEATURES = [\n \"Filing Date\",\n \"Case Number\",\n \"Case Type\",\n \"Title\",\n \"Plaintiff\",\n \"Defendant\",\n \"Address\",\n \"Business or Organization Name\",\n \"Party Type\",\n]\nFIELDS = FEATURES + [ \"Zip Code\", \"Partial Cost\" ]\nINTERNAL_FIELDS = [ \"Business or Organization Name\", \"Party Type\"]\n\nclass Miner(object):\n\n def __init__(self, responses, output, debug=False):\n\n self.responses = responses\n self.output = output\n self.debug = debug\n self.dataset = []\n self.maybe_tax = False\n self.features = [i + ':' for i in FEATURES]\n\n def scan_files(self):\n\n case_range = trange(len(self.responses), desc=\"Mining\", leave=True) \\\n if not self.debug else range(len(self.responses))\n\n for file_name in case_range:\n with open(\n HTML_FILE.format(case=self.responses[file_name]), 'r'\n ) as html_src:\n\n if not self.debug:\n case_range.set_description(\n \"Mining {}\".format(self.responses[file_name])\n )\n\n feature_list = self.scrape(html_src.read())\n row = self.distribute(feature_list)\n\n if not row:\n\n if not path.isfile(NO_CASE):\n with open(NO_CASE, 'w') as no_case_file:\n dump([], no_case_file)\n\n with open(NO_CASE, \"r+\") as no_case_file:\n no_case_data = load(no_case_file)\n no_case_data.append(str(self.responses[file_name][:-5]))\n no_case_file.seek(0)\n no_case_file.write(dumps(sorted(set(no_case_data))))\n no_case_file.truncate()\n\n self.dataset.extend(row)\n\n def export(self):\n\n file_exists = path.isfile(self.output)\n\n with open(self.output, 'a') as csv_file:\n writer = DictWriter(\n csv_file,\n fieldnames=[\n col for col in FIELDS if col not in INTERNAL_FIELDS\n ]\n )\n\n if not file_exists:\n writer.writeheader()\n\n for row in self.dataset:\n writer.writerow(row)\n\n def scrape(self, html_data):\n \"\"\"Scrapes the desired features\n\n Args:\n html_data: <str>, source HTML\n\n Returns:\n scraped_features: <dict>, features scraped and mapped from content\n \"\"\"\n\n soup = BeautifulSoup(html_data, \"html.parser\")\n\n # Search for the word 'tax in the document'\n if \"tax\" in soup.text.lower():\n self.maybe_tax = True\n\n # Create an array from all TR's with the inner HTML for each TR\n # Data we want is stored inside an arbitrary # of 'span' tags inside the TR's.\\\n tr_list = soup.find_all(\"tr\")\n\n # This will create an array for each TR with an array of SPAN values inside.\n feature_list = []\n for tag in tr_list:\n try:\n # Create an innerhtml array for all spans within a single TR\n tag = [j.string for j in tag.findAll(\"span\")]\n if set(tuple(tag)) & set(self.features):\n try:\n # Save the spans inner HTML if its not a header label\n tag = [i for i in tag if \"(each\" not in i.lower()]\n except AttributeError:\n continue\n feature_list.append(tag)\n\n except IndexError:\n continue\n\n # feature_list is an array [tr] of arrays [spans]. we want this flattened.\n # [tr1span1KEY, tr1span1VALUE, tr1span2KEY, tr1span2VALUE, tr2span1KEY, tr2span1VALUE, ]\n try:\n # flatten multidimensional list\n feature_list = [\n item.replace(':', '')\n for sublist in feature_list for item in sublist\n ]\n\n except AttributeError:\n pass\n\n return feature_list\n\n def distribute(self, feature_list):\n\n # feature_list ~= [html][tr][spans].innterHTML\n # [tr1span1KEY, tr1span1VALUE, tr1span2KEY, tr1span2VALUE, tr2span1KEY, tr2span1VALUE, ]\n def __pair(list_type):\n\n # break up elements with n-tuples greater than 2\n def __raw_business(i): return any( x in feature_list[i:i + 2][0] for x in INTERNAL_FIELDS )\n def __feature_list(i): return feature_list[i:i + 2][0] in FEATURES\n condition = __raw_business if list_type else __feature_list\n\n # then convert list of tuples to dict for faster lookup\n return [ tuple(feature_list[i:i + 2]) for i in range(0, len(feature_list), 2) if condition(i) ]\n\n raw_business = __pair(1) # [(x1,y1),(x2,y2),(x3,y3)] => INTERNAL_FIELDS\n feature_list = dict(__pair(0)) # FEATURES\n filtered_business = []\n\n # Party_Type = 'property address' not 'plaintiff' or 'defendant'\n # Input exists 'Business or Org Name' and == an Address\n for label, value in enumerate(raw_business):\n try:\n party_type = value[1].upper()\n section = raw_business[label + 1][0].upper()\n flag1 = party_type == \"PROPERTY ADDRESS\"\n flag2 = section == \"BUSINESS OR ORGANIZATION NAME\"\n if flag1 and flag2: filtered_business.append(raw_business[label + 1])\n\n except IndexError:\n print(\"Party Type issue at Case\", feature_list[\"Case Number\"])\n\n scraped_features = []\n\n for address in filtered_business:\n\n str_address = filter_addr(str(address[-1]))\n\n temp_features = {\n key: value for key, value in feature_list.items()\n if key in [\"Title\", \"Case Type\", \"Case Number\", \"Filing Date\"]\n }\n\n\n if temp_features[\"Case Type\"].upper() == \"FORECLOSURE\":\n temp_features[\"Case Type\"] = \"Mortgage\"\n\n elif temp_features[\"Case Type\"].upper() == \\\n \"FORECLOSURE RIGHTS OF REDEMPTION\" and self.maybe_tax:\n temp_features[\"Case Type\"] = \"Tax\"\n\n else:\n # break out of the rest of the loop if case type is neither\n continue\n\n if 'Title' not in temp_features:\n # print('feature_list');\n # print(feature_list);\n # print('\\n \\n raw_business');\n # print(raw_business);\n continue\n\n # break up Title feature into Plaintiff and Defendant\n try:\n temp_features[\"Plaintiff\"], temp_features[\"Defendant\"] = \\\n TITLE_SPLIT_PAT.split(temp_features[\"Title\"])\n\n except ValueError:\n temp_features[\"Plaintiff\"], temp_features[\"Defendant\"] = \\\n (\", \")\n\n temp_features[\"Address\"] = \\\n str_address if str_address else address[-1]\n\n temp_features[\"Zip Code\"] = ''.join(ZIP_PAT.findall(address[-1]))\n\n temp_features[\"Partial Cost\"] = ''.join(\n MONEY_PAT.findall(address[-1])\n )\n\n scraped_features.append(temp_features)\n temp_features = {}\n\n return scraped_features\n\n\n# Cell\n# from settings import CHECKPOINT, HTML_DIR\nfrom os import path, remove, walk\n\ntemp_output = \"temp_data.csv\"\n\n# Cell\ndescription = \"\"\"Cleaner\n\nThis module implements post-scraping cleaning processes on the raw initial\ndataset. Processes include stripping excess strings off Address values,\nremoving Zip Code and Partial Cost values mislabeled as Address, and merging\nrows containing blank values in alternating features.\n\nThe script works as an internal module for Close Crawl, but can be executed\nas a standalone to manually process datasets:\n\n $ python cleaner.py <path/to/old/dataset> <path/of/new/dataset>\n\n\"\"\"\n\n# Cell\n#nbdev_comment from __future__ import absolute_import, print_function, unicode_literals\nfrom pandas import DataFrame, concat, read_csv, to_datetime\n# from patterns import NULL_ADDR, STRIP_ADDR, filter_addr, punctuation\n\n# Cell\nclass Cleaner(object):\n \"\"\"Class object for cleaning the raw dataset extracted after the initial\n scraping\n \"\"\"\n\n def __init__(self, path):\n \"\"\"Constructor for Cleaner\n\n Args:\n path (`str`): path to input CSV dataset\n\n Attributes:\n df (`pandas.core.frame.DataFrame`): initial DataFrame\n columns (`list` of `str`): columns of the DataFrame\n clean_df (`pandas.core.frame.DataFrame`): final DataFrame to be\n outputted\n \"\"\"\n\n self.df = self.prettify(read_csv(path))\n\n self.columns = list(self.df)\n self.clean_df = []\n\n @staticmethod\n def prettify(df, internal=True):\n \"\"\"Drops duplicates, sorts and fills missing values in the DataFrame\n to make it manageable.\n\n Args:\n df (`pandas.core.frame.DataFrame`): DataFrame to be managed\n internal (`bool`, optional): flag for determining state of\n DataFrame\n\n Returns:\n df (`pandas.core.frame.DataFrame`): organized DataFrame\n \"\"\"\n\n df.drop_duplicates(inplace=True, keep='last', subset=[\"Case Number\"] )\n df[\"Filing Date\"] = to_datetime(df[\"Filing Date\"])\n\n df.sort_values(\n [\"Filing Date\", \"Case Number\", \"Address\"],\n ascending=[True] * 3,\n inplace=True\n )\n\n if internal:\n df[\"Zip Code\"] = df[\"Zip Code\"].fillna(0.0).astype(int)\n df[\"Zip Code\"] = df[\"Zip Code\"].replace(0, '')\n return df\n\n def clean_addr(self):\n \"\"\"Cleans excess strings off Address values and removes Zip Code and\n Partial Cost values mislabeled as Address.\n\n Args:\n None\n\n Returns:\n None\n \"\"\"\n\n def clean_string(addr):\n \"\"\"Applies regular expressions and other filters on Address\n values\n\n Args:\n addr (`str`): Address value to be filtered\n\n Returns:\n addr (`str`): filtered Address value\n \"\"\"\n\n # if value does not match the street_address pattern\n if not filter_addr(addr): # patterns.filter_addr\n\n if NULL_ADDR.sub('', addr): # value may contain valid Address\n return str(\n STRIP_ADDR.sub(\n '', addr) # strip off Zip Code and Partial Cost\n ).translate(\n {ord(c): None for c in punctuation}\n ).strip() # strip off punctuations\n\n return addr\n\n print(\"Cleaning addresses...\", end=\" \")\n\n self.df[\"Address\"] = self.df[\"Address\"].apply(\n lambda x: clean_string(x)\n )\n self.df[\"Address\"] = self.df[\"Address\"].apply(\n lambda x: NULL_ADDR.sub('', x)\n )\n\n # replace empty string values with NULL\n self.df[\"Zip Code\"] = self.df[\"Zip Code\"].replace('', float(\"nan\"))\n self.df[\"Address\"] = self.df[\"Address\"].replace('', float(\"nan\"))\n\n print(\"Done\")\n\n @staticmethod\n def combine_rows(row):\n \"\"\"Merges rows after filtering out common values\n\n Args:\n row (`list` of `list` of `str`): groupby(\"Case Number\") rows\n\n Returns:\n (`list` of `str`): merged row\n \"\"\"\n\n def __filter_tuple(col):\n \"\"\"Filters common values from rows\n\n Args:\n col (`tuple` of `str`): values per column\n\n Returns:\n value (`str`): common value found per mergeable rows\n \"\"\"\n\n for value in set(col):\n if value == value: # equivalent to value != NaN\n return value\n\n return [__filter_tuple(x) for x in zip(*row)]\n\n @staticmethod\n def mergeable(bool_vec):\n \"\"\"Determines if groupby(\"Case Number\") rows are mergeable\n\n Example:\n bool_vec = [\n [True, True, True, True, True, True, False, True, True],\n [True, True, True, True, True, True, True, False, False],\n [True, True, True, True, True, True, False, False, False]\n ]\n\n __sum_col(bool_vec) -> [3, 3, 3, 3, 3, 3, 1, 1, 1]\n\n __bool_pat(__sum_col(bool_vec)) -> True\n\n Args:\n bool_vec (`list` of `bool`): represents non-NULL values\n\n Returns:\n (`bool`): True if rows are mergeable\n \"\"\"\n\n def __sum_col():\n \"\"\"Sums columns\n\n Args:\n None\n\n Returns:\n (`list` of `int`): sum of columns\n \"\"\"\n return [sum(x) for x in zip(*bool_vec)]\n\n def __bool_pat(row):\n \"\"\"Determines mergeability\n\n Args:\n None\n\n Returns:\n (`bool`): True if rows are mergeable\n \"\"\"\n return set(row[-3:]) == set([1]) and set(row[:-3]) != set([1])\n\n return True if __bool_pat(__sum_col()) else False\n\n def merge_nulls(self):\n \"\"\"Splits DataFrames into those with NULL values to be merged, and then\n later merged with the original DataFrame\n\n Args:\n None\n\n Returns:\n None\n \"\"\"\n\n print(\"Merging rows...\", end=\" \")\n print(self.df)\n\n # filter out rows with any NULL values\n origin_df = self.df.dropna()\n\n # filter out rows only with NULL values\n null_df = self.df[self.df.isnull().any(axis=1)]\n\n # boolean representation of the DataFrame with NULL values\n bool_df = null_df.notnull()\n\n # (`list` of `dict` of `str` : `str`) to be converted to a DataFrame\n new_df = []\n\n for i in null_df[\"Case Number\"].unique():\n bool_row = bool_df[null_df[\"Case Number\"] == i]\n new_row = null_df[null_df[\"Case Number\"] == i]\n\n # if the rows are mergeable, combine them\n if self.mergeable(bool_row.values):\n new_row = self.combine_rows(new_row.values.tolist())\n\n new_df.append(\n {\n feature: value\n for feature, value in zip(self.columns, new_row)\n }\n )\n\n # else, treat them individually\n else:\n new_row = new_row.values.tolist()\n\n for row in new_row:\n new_df.append(\n {\n feature: value\n for feature, value in zip(self.columns, row)\n }\n )\n\n # merge the DataFrames back\n self.clean_df = concat(\n [origin_df, DataFrame(new_df)]\n ).reset_index(drop=True)\n\n # prettify the new DataFrame\n self.clean_df = self.prettify(\n self.clean_df[self.columns], internal=False\n )\n\n print(\"Done\")\n\n def init_clean(self):\n \"\"\"Initializes cleaning process\n\n Args:\n None\n\n Returns:\n None\n \"\"\"\n print(self.df)\n self.clean_addr()\n print(self.df)\n self.merge_nulls()\n\n def download(self, output_name):\n \"\"\"Downloads the cleaned and manipulated DataFrame into a CSV file\n\n Args:\n output_name (`str`): path of the new output file\n\n Returns:\n None\n \"\"\"\n self.clean_df.rename(columns={\"Address\": \"address\"}, inplace=True)\n self.clean_df.to_csv(output_name, index=False)\n\n\n# Cell\ndescription = \"\"\"main\n\nThe main executable script for Close Crawl. This file manages types, flags\nand constraints for the case type, year and output data file.\n\nUsage:\n $ python main.py <case_type> <case_year> <path/of/new/dataset>\n <opt: lower_bound> <opt: upper_bound> <opt: debug>\n\nExample usage:\n $ python main.py O 2015 test_set.csv -l=300 -u=600 -d=1\n\n\"\"\"\n\n# Cell\n#nbdev_comment from __future__ import absolute_import, print_function, unicode_literals\nfrom json import dump, dumps, load\nfrom os import path, remove, walk\nfrom shutil import rmtree\nfrom time import time\n\n# Cell\ndef close_crawl(case_type, case_year, output, cases='', lower_bound=0,\n upper_bound=0, debug=False, scrape=True, mine=True,\n clean=True):\n \"\"\"Main function for Close Crawl.\n\n Args:\n case_type (`str`): type of foreclosure case, options are 'O' and 'C'\n case_year (`str`): year of foreclosure cases\n output (`str`): path of the output CSV file, along with the valid\n extension (.csv)\n lower_bound (`int`, optional): lower bound of range of cases\n upper_bound (`int`, optional): upper bound of range of cases\n debug (`bool`, optional): option for switching between debug mode.\n Default -> True\n\n Returns:\n None\n \"\"\"\n\n temp_output = \"temp_data.csv\"\n wait = 0\n case_list = []\n\n print('checkpoint')\n if not path.isfile(CHECKPOINT):\n print(\"Initializing project...\")\n with open(CHECKPOINT, \"w\") as checkpoint:\n dump(\n {\n \"last_case\": \"{:04d}\".format(int(str(lower_bound)[-4:])),\n \"type\": case_type,\n \"year\": case_year[-2:],\n \"error_case\": '',\n },\n checkpoint\n )\n\n print('cases')\n if not cases:\n\n with open(CHECKPOINT) as checkpoint:\n prev_bound = int(load(checkpoint)[\"last_case\"])\n if not lower_bound:\n lower_bound = prev_bound\n upper_bound = upper_bound if int(upper_bound) > int(lower_bound) \\\n else str(lower_bound + 5)\n\n case_list = range(int(lower_bound), int(upper_bound) + 1)\n\n else:\n\n with open(cases) as manual_cases:\n case_list = sorted(list(set(load(manual_cases))))\n\n print('scrape')\n if scrape:\n spider = Spider(\n case_type=case_type, year=case_year[-2:],\n bounds=case_list, gui=False\n )\n\n spider.save_response()\n\n wait = spider.WAITING_TIME\n\n print('HTML_DIR', HTML_DIR)\n file_array = [filenames for (dirpath, dirnames, filenames)\n in walk(HTML_DIR)][0]\n\n start_mine = time()\n print('mine')\n if mine:\n miner = Miner(file_array, temp_output)\n miner.scan_files()\n miner.export()\n\n\n print('clean')\n if clean:\n df_obj = Cleaner(temp_output)\n\n df_obj.init_clean()\n df_obj.download(output)\n\n print('save')\n\n with open(CHECKPOINT, \"r+\") as checkpoint:\n checkpoint_data = load(checkpoint)\n print(\"checkpoint_data\")\n print(checkpoint_data)\n checkpoint_data[\"last_case\"] = sorted(file_array)[-1].split('.')[0][-4:]\n checkpoint.seek(0)\n checkpoint.write(dumps(checkpoint_data))\n checkpoint.truncate()\n\n \"\"\"\n with open(CHECKPOINT, \"r+\") as checkpoint:\n checkpoint_data = load(checkpoint)\n\n for key, val in data.items():\n checkpoint_data[key] = val\n\n checkpoint_data[key] = data\n checkpoint.seek(0)\n checkpoint.write(dumps(checkpoint_data))\n checkpoint.truncate()\n \"\"\"\n\n # print(\"Crawling runtime: {0:.2f} s\".format((end_crawl - start_crawl)))\n # print(\"Downloading runtime: {0:.2f} s\".format( ((end_crawl - start_crawl) - wait)) )\n # print(\"Mining runtime: {0:.2f} s\".format((end_mine - start_mine)))\n # print(\"Program runtime: {0:.2f} s\".format((end - start)))\n print(\"------------ SCRAPING COMPLETED ------------\")\n\n\n# Cell\n#nbdev_comment from __future__ import absolute_import, print_function, unicode_literals\nimport sys\nfrom textwrap import dedent"
] |
[
[
"pandas.to_datetime",
"pandas.read_csv",
"pandas.DataFrame"
]
] |
sujavarghese/data-validator
|
[
"e0c5d94da797cb43b17d6ee193d337cbcb602f49"
] |
[
"tests/test_helper.py"
] |
[
"import re\nimport numpy\nimport pandas\nfrom tests import TestCase\nfrom file_validator.helper.utils import (\n FLOAT_RE, FIRST_CAP_RE, ALL_CAP_RE, clean_value, is_empty, to_snake, to_int, to_str, to_float, dollar_str_to_float,\n)\n\n\nclass TestUtils(TestCase):\n def test_should_match_float_re(self):\n self.assertEqual(True, True if re.match(FLOAT_RE, '2.0') else False)\n self.assertEqual(True, True if re.match(FLOAT_RE, '2.00') else False)\n self.assertEqual(True, True if re.match(FLOAT_RE, '02.00') else False)\n self.assertEqual(True, True if re.match(FLOAT_RE, '02') else False)\n self.assertEqual(True, True if re.match(FLOAT_RE, '2') else False)\n self.assertEqual(False, True if re.match(FLOAT_RE, 'True') else False)\n self.assertEqual(False, True if re.match(FLOAT_RE, '[1,2]') else False)\n\n def test_should_verify_first_cap_re(self):\n self.assertEqual(False, True if re.match(FIRST_CAP_RE, '. Some text') else False) # True?\n self.assertEqual(True, True if re.match(FIRST_CAP_RE, '.Some text') else False) # True?\n self.assertEqual(False, True if re.match(FIRST_CAP_RE, '. another text') else False)\n self.assertEqual(False, True if re.match(FIRST_CAP_RE, '.another text') else False)\n self.assertEqual(False, True if re.match(FIRST_CAP_RE, 'More text') else False)\n self.assertEqual(True, True if re.match(FIRST_CAP_RE, ' More text') else False)\n self.assertEqual(True, True if re.match(FIRST_CAP_RE, ' EvenMoreText') else False) # True?\n self.assertEqual(False, True if re.match(FIRST_CAP_RE, ' evenMore') else False) # True?\n self.assertEqual(True, True if re.match(FIRST_CAP_RE, '.EvenMoreText') else False) # True?\n self.assertEqual(True, True if re.match(FIRST_CAP_RE, ' Even more text... ') else False) # True\n\n def test_should_verify_all_cap_re(self):\n self.assertEqual(True, True if re.match(ALL_CAP_RE, '1W') else False)\n self.assertEqual(True, True if re.match(ALL_CAP_RE, '1West') else False)\n self.assertEqual(True, True if re.match(ALL_CAP_RE, '1West north') else False)\n self.assertEqual(True, True if re.match(ALL_CAP_RE, '1West north') else False)\n\n self.assertEqual(False, True if re.match(ALL_CAP_RE, 'oneWest north') else False)\n self.assertEqual(False, True if re.match(ALL_CAP_RE, 'oneWest') else False)\n self.assertEqual(True, True if re.match(ALL_CAP_RE, 'eW') else False)\n self.assertEqual(False, True if re.match(ALL_CAP_RE, ' eW') else False)\n self.assertEqual(False, True if re.match(ALL_CAP_RE, '.eW') else False)\n self.assertEqual(False, True if re.match(ALL_CAP_RE, '. eW') else False)\n self.assertEqual(False, True if re.match(ALL_CAP_RE, ' eW ') else False)\n\n def test_should_clean_value(self):\n self.assertEqual(' some text ', clean_value(' some text ')) # No change\n self.assertEqual(dict(), clean_value(dict())) # No change\n self.assertEqual(1, clean_value(pandas.Series([1, 2])))\n self.assertEqual({\"key1\": 1, \"key2\": 2}, clean_value(pandas.Series([{\"key1\": 1, \"key2\": 2}, {\"key3\": 3, \"key4\": 4}])))\n self.assertEqual(('key1', 1), clean_value(pandas.Series([('key1', 1), ('key2', 2)])))\n\n def test_should_verify_is_empty(self):\n self.assertEqual(False, is_empty(' some text '))\n self.assertEqual(False, is_empty(dict()))\n self.assertEqual(False, is_empty(list()))\n self.assertEqual(False, is_empty(tuple()))\n self.assertEqual(False, is_empty(set()))\n self.assertEqual(False, is_empty(0))\n self.assertEqual(False, is_empty(0.0))\n self.assertEqual(False, is_empty(\" \"))\n self.assertEqual(True, is_empty(str()))\n self.assertEqual(True, is_empty(u''))\n self.assertEqual(True, is_empty(\"\"))\n self.assertEqual(True, is_empty(None))\n self.assertEqual(True, is_empty(numpy.nan))\n\n def test_should_verify_to_snake(self):\n self.assertEqual(' some text ', to_snake(' some text ')) # no change\n self.assertEqual('some text', to_snake('some text')) # no change\n self.assertEqual('sometext', to_snake('sometext')) # no change\n self.assertEqual('some_text', to_snake('SomeText'))\n self.assertEqual('._some_text', to_snake('.SomeText'))\n self.assertEqual('some_text', to_snake('someText'))\n self.assertEqual(' some_text', to_snake(' someText'))\n self.assertEqual(' _some _text', to_snake(' Some Text'))\n\n def test_should_verify_to_int(self):\n self.assertEqual(1, to_int(1)) # no change\n self.assertEqual(1, to_int(1.0))\n self.assertEqual(1, to_int('1'))\n self.assertEqual(1, to_int('1.0'))\n self.assertEqual(None, to_int('(1.0,)'))\n self.assertEqual(None, to_int('test'))\n self.assertEqual(None, to_int(dict()))\n self.assertEqual(None, to_int(list()))\n self.assertEqual(None, to_int(set()))\n self.assertEqual(None, to_int(tuple()))\n self.assertEqual(None, to_int(pandas.Series([])))\n\n def test_should_verify_to_str(self):\n self.assertEqual('1', to_str(1))\n self.assertEqual('1.0', to_str(1.0))\n self.assertEqual('1', to_str('1')) # no change\n self.assertEqual('1.0', to_str('1.0')) # no change\n self.assertEqual('(1.0,)', to_str('(1.0,)')) # no change\n self.assertEqual('test', to_str('test')) # no change\n self.assertEqual('{}', to_str(dict()))\n self.assertEqual('[]', to_str(list()))\n self.assertEqual('set()', to_str(set()))\n self.assertEqual('()', to_str(tuple()))\n self.assertEqual('Series([], dtype: float64)', to_str(pandas.Series([])))\n self.assertEqual(None, to_str(str()))\n self.assertEqual(None, to_str(None))\n self.assertEqual(None, to_str(None))\n\n def test_should_verify_to_float(self):\n self.assertEqual(1.0, to_float(1))\n self.assertEqual(1.0, to_float(1.0)) # no change\n self.assertEqual(1.0, to_float('1'))\n self.assertEqual(1.0, to_float('1.0'))\n self.assertEqual(None, to_float('(1.0,)'))\n self.assertEqual(None, to_float('test'))\n self.assertEqual(None, to_float(dict()))\n self.assertEqual(None, to_float(list()))\n self.assertEqual(None, to_float(set()))\n self.assertEqual(None, to_float(tuple()))\n self.assertEqual(None, to_float(pandas.Series([])))\n self.assertEqual(None, to_float(str()))\n self.assertEqual(None, to_float(None))\n self.assertEqual(None, to_float(None))\n\n def test_should_verify_dollar_str_to_float(self):\n self.assertEqual(None, dollar_str_to_float(1))\n self.assertEqual(None, dollar_str_to_float(1.0))\n self.assertEqual(1.0, dollar_str_to_float('1'))\n self.assertEqual(1.0, dollar_str_to_float('1.0')) # no change\n self.assertEqual(-1.0, dollar_str_to_float('(1.0,)'))\n self.assertEqual(None, dollar_str_to_float('test'))\n self.assertEqual(None, dollar_str_to_float(dict()))\n self.assertEqual(None, dollar_str_to_float(list()))\n self.assertEqual(None, dollar_str_to_float(set()))\n self.assertEqual(None, dollar_str_to_float(tuple()))\n self.assertEqual(None, dollar_str_to_float(pandas.Series([])))\n self.assertEqual(None, dollar_str_to_float(str()))\n self.assertEqual(None, dollar_str_to_float(None))\n self.assertEqual(None, dollar_str_to_float(None))\n\n self.assertEqual(1.0, dollar_str_to_float('$1.0'))\n self.assertEqual(1.0, dollar_str_to_float('$$1.0'))\n self.assertEqual(None, dollar_str_to_float('$1. 0'))\n self.assertEqual(None, dollar_str_to_float('$1 .0'))\n self.assertEqual(None, dollar_str_to_float('$1 . 0'))\n self.assertEqual(1.0, dollar_str_to_float('$ 1.0'))\n self.assertEqual(None, dollar_str_to_float('&1.0'))\n self.assertEqual(None, dollar_str_to_float('&100,000.0'))\n self.assertEqual(100000.0, dollar_str_to_float('$100,000.0'))\n self.assertEqual(1000000.0, dollar_str_to_float('$$1,000,000.0'))\n"
] |
[
[
"pandas.Series"
]
] |
pnuehrenberg/Mask_RCNN
|
[
"b98dd80842c95487ad842c216b8868cac5c9a073"
] |
[
"mrcnn/model.py"
] |
[
"\"\"\"\nMask R-CNN\nThe main Mask R-CNN model implementation.\n\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\"\"\"\n\nimport os\nimport random\nimport datetime\nimport re\nimport math\nimport logging\nfrom collections import OrderedDict\nimport multiprocessing\nimport numpy as np\nimport tensorflow as tf\nimport keras\nimport keras.backend as K\nimport keras.layers as KL\nimport keras.engine as KE\nimport keras.models as KM\n\nfrom mrcnn import utils\n\n# Requires TensorFlow 1.3+ and Keras 2.0.8+.\nfrom distutils.version import LooseVersion\nassert LooseVersion(tf.__version__) >= LooseVersion(\"1.3\")\nassert LooseVersion(keras.__version__) >= LooseVersion('2.0.8')\n\n# # Required for cudnn after changing resolution\n# config = tf.ConfigProto()\n# config.gpu_options.allow_growth = True\n# session = tf.Session(config=config)\n\n############################################################\n# Utility Functions\n############################################################\n\ndef log(text, array=None):\n \"\"\"Prints a text message. And, optionally, if a Numpy array is provided it\n prints it's shape, min, and max values.\n \"\"\"\n if array is not None:\n text = text.ljust(25)\n text += (\"shape: {:20} \".format(str(array.shape)))\n if array.size:\n text += (\"min: {:10.5f} max: {:10.5f}\".format(array.min(),array.max()))\n else:\n text += (\"min: {:10} max: {:10}\".format(\"\",\"\"))\n text += \" {}\".format(array.dtype)\n print(text)\n\n\nclass BatchNorm(KL.BatchNormalization):\n \"\"\"Extends the Keras BatchNormalization class to allow a central place\n to make changes if needed.\n\n Batch normalization has a negative effect on training if batches are small\n so this layer is often frozen (via setting in Config class) and functions\n as linear layer.\n \"\"\"\n def call(self, inputs, training=None):\n \"\"\"\n Note about training values:\n None: Train BN layers. This is the normal mode\n False: Freeze BN layers. Good when batch size is small\n True: (don't use). Set layer in training mode even when making inferences\n \"\"\"\n return super(self.__class__, self).call(inputs, training=training)\n\n\ndef compute_backbone_shapes(config, image_shape):\n \"\"\"Computes the width and height of each stage of the backbone network.\n\n Returns:\n [N, (height, width)]. Where N is the number of stages\n \"\"\"\n if callable(config.BACKBONE):\n return config.COMPUTE_BACKBONE_SHAPE(image_shape)\n\n # Currently supports ResNet only\n assert config.BACKBONE in [\"resnet50\", \"resnet101\"]\n return np.array(\n [[int(math.ceil(image_shape[0] / stride)),\n int(math.ceil(image_shape[1] / stride))]\n for stride in config.BACKBONE_STRIDES])\n\n\n############################################################\n# Resnet Graph\n############################################################\n\n# Code adopted from:\n# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py\n\ndef identity_block(input_tensor, kernel_size, filters, stage, block,\n use_bias=True, train_bn=True):\n \"\"\"The identity_block is the block that has no conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of middle conv layer at main path\n filters: list of integers, the nb_filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n use_bias: Boolean. To use or not use a bias in conv layers.\n train_bn: Boolean. Train or freeze Batch Norm layers\n \"\"\"\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',\n use_bias=use_bias)(input_tensor)\n x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2b', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',\n use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)\n\n x = KL.Add()([x, input_tensor])\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)\n return x\n\n\ndef conv_block(input_tensor, kernel_size, filters, stage, block,\n strides=(2, 2), use_bias=True, train_bn=True):\n \"\"\"conv_block is the block that has a conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of middle conv layer at main path\n filters: list of integers, the nb_filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n use_bias: Boolean. To use or not use a bias in conv layers.\n train_bn: Boolean. Train or freeze Batch Norm layers\n Note that from stage 3, the first conv layer at main path is with subsample=(2,2)\n And the shortcut should have subsample=(2,2) as well\n \"\"\"\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,\n name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)\n x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2b', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +\n '2c', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)\n\n shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,\n name=conv_name_base + '1', use_bias=use_bias)(input_tensor)\n shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)\n\n x = KL.Add()([x, shortcut])\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)\n return x\n\n\ndef resnet_graph(input_image, architecture, stage5=False, train_bn=True):\n \"\"\"Build a ResNet graph.\n architecture: Can be resnet50 or resnet101\n stage5: Boolean. If False, stage5 of the network is not created\n train_bn: Boolean. Train or freeze Batch Norm layers\n \"\"\"\n assert architecture in [\"resnet50\", \"resnet101\"]\n # Stage 1\n x = KL.ZeroPadding2D((3, 3))(input_image)\n x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)\n x = BatchNorm(name='bn_conv1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding=\"same\")(x)\n # Stage 2\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)\n C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)\n # Stage 3\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)\n C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)\n # Stage 4\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)\n block_count = {\"resnet50\": 5, \"resnet101\": 22}[architecture]\n for i in range(block_count):\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)\n C4 = x\n # Stage 5\n if stage5:\n x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)\n C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)\n else:\n C5 = None\n return [C1, C2, C3, C4, C5]\n\n\n############################################################\n# Proposal Layer\n############################################################\n\ndef apply_box_deltas_graph(boxes, deltas):\n \"\"\"Applies the given deltas to the given boxes.\n boxes: [N, (y1, x1, y2, x2)] boxes to update\n deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply\n \"\"\"\n # Convert to y, x, h, w\n height = boxes[:, 2] - boxes[:, 0]\n width = boxes[:, 3] - boxes[:, 1]\n center_y = boxes[:, 0] + 0.5 * height\n center_x = boxes[:, 1] + 0.5 * width\n # Apply deltas\n center_y += deltas[:, 0] * height\n center_x += deltas[:, 1] * width\n height *= tf.exp(deltas[:, 2])\n width *= tf.exp(deltas[:, 3])\n # Convert back to y1, x1, y2, x2\n y1 = center_y - 0.5 * height\n x1 = center_x - 0.5 * width\n y2 = y1 + height\n x2 = x1 + width\n result = tf.stack([y1, x1, y2, x2], axis=1, name=\"apply_box_deltas_out\")\n return result\n\n\ndef clip_boxes_graph(boxes, window):\n \"\"\"\n boxes: [N, (y1, x1, y2, x2)]\n window: [4] in the form y1, x1, y2, x2\n \"\"\"\n # Split\n wy1, wx1, wy2, wx2 = tf.split(window, 4)\n y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)\n # Clip\n y1 = tf.maximum(tf.minimum(y1, wy2), wy1)\n x1 = tf.maximum(tf.minimum(x1, wx2), wx1)\n y2 = tf.maximum(tf.minimum(y2, wy2), wy1)\n x2 = tf.maximum(tf.minimum(x2, wx2), wx1)\n clipped = tf.concat([y1, x1, y2, x2], axis=1, name=\"clipped_boxes\")\n clipped.set_shape((clipped.shape[0], 4))\n return clipped\n\n\nclass ProposalLayer(KE.Layer):\n \"\"\"Receives anchor scores and selects a subset to pass as proposals\n to the second stage. Filtering is done based on anchor scores and\n non-max suppression to remove overlaps. It also applies bounding\n box refinement deltas to anchors.\n\n Inputs:\n rpn_probs: [batch, num_anchors, (bg prob, fg prob)]\n rpn_bbox: [batch, num_anchors, (dy, dx, log(dh), log(dw))]\n anchors: [batch, num_anchors, (y1, x1, y2, x2)] anchors in normalized coordinates\n\n Returns:\n Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]\n \"\"\"\n\n def __init__(self, proposal_count, nms_threshold, config=None, **kwargs):\n super(ProposalLayer, self).__init__(**kwargs)\n self.config = config\n self.proposal_count = proposal_count\n self.nms_threshold = nms_threshold\n\n def call(self, inputs):\n # Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]\n scores = inputs[0][:, :, 1]\n # Box deltas [batch, num_rois, 4]\n deltas = inputs[1]\n deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])\n # Anchors\n anchors = inputs[2]\n\n # Improve performance by trimming to top anchors by score\n # and doing the rest on the smaller subset.\n pre_nms_limit = tf.minimum(self.config.PRE_NMS_LIMIT, tf.shape(anchors)[1])\n ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,\n name=\"top_anchors\").indices\n scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),\n self.config.IMAGES_PER_GPU)\n deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),\n self.config.IMAGES_PER_GPU)\n pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x),\n self.config.IMAGES_PER_GPU,\n names=[\"pre_nms_anchors\"])\n\n # Apply deltas to anchors to get refined anchors.\n # [batch, N, (y1, x1, y2, x2)]\n boxes = utils.batch_slice([pre_nms_anchors, deltas],\n lambda x, y: apply_box_deltas_graph(x, y),\n self.config.IMAGES_PER_GPU,\n names=[\"refined_anchors\"])\n\n # Clip to image boundaries. Since we're in normalized coordinates,\n # clip to 0..1 range. [batch, N, (y1, x1, y2, x2)]\n window = np.array([0, 0, 1, 1], dtype=np.float32)\n boxes = utils.batch_slice(boxes,\n lambda x: clip_boxes_graph(x, window),\n self.config.IMAGES_PER_GPU,\n names=[\"refined_anchors_clipped\"])\n\n # Filter out small boxes\n # According to Xinlei Chen's paper, this reduces detection accuracy\n # for small objects, so we're skipping it.\n\n # Non-max suppression\n def nms(boxes, scores):\n indices = tf.image.non_max_suppression(\n boxes, scores, self.proposal_count,\n self.nms_threshold, name=\"rpn_non_max_suppression\")\n proposals = tf.gather(boxes, indices)\n # Pad if needed\n padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)\n proposals = tf.pad(proposals, [(0, padding), (0, 0)])\n return proposals\n proposals = utils.batch_slice([boxes, scores], nms,\n self.config.IMAGES_PER_GPU)\n return proposals\n\n def compute_output_shape(self, input_shape):\n return (None, self.proposal_count, 4)\n\n\n############################################################\n# ROIAlign Layer\n############################################################\n\ndef log2_graph(x):\n \"\"\"Implementation of Log2. TF doesn't have a native implementation.\"\"\"\n return tf.log(x) / tf.log(2.0)\n\n\nclass PyramidROIAlign(KE.Layer):\n \"\"\"Implements ROI Pooling on multiple levels of the feature pyramid.\n\n Params:\n - pool_shape: [pool_height, pool_width] of the output pooled regions. Usually [7, 7]\n\n Inputs:\n - boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized\n coordinates. Possibly padded with zeros if not enough\n boxes to fill the array.\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n - feature_maps: List of feature maps from different levels of the pyramid.\n Each is [batch, height, width, channels]\n\n Output:\n Pooled regions in the shape: [batch, num_boxes, pool_height, pool_width, channels].\n The width and height are those specific in the pool_shape in the layer\n constructor.\n \"\"\"\n\n def __init__(self, pool_shape, **kwargs):\n super(PyramidROIAlign, self).__init__(**kwargs)\n self.pool_shape = tuple(pool_shape)\n\n def call(self, inputs):\n # Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords\n boxes = inputs[0]\n\n # Image meta\n # Holds details about the image. See compose_image_meta()\n image_meta = inputs[1]\n\n # Feature Maps. List of feature maps from different level of the\n # feature pyramid. Each is [batch, height, width, channels]\n feature_maps = inputs[2:]\n\n # Assign each ROI to a level in the pyramid based on the ROI area.\n y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)\n h = y2 - y1\n w = x2 - x1\n # Use shape of first image. Images in a batch must have the same size.\n image_shape = parse_image_meta_graph(image_meta)['image_shape'][0]\n # Equation 1 in the Feature Pyramid Networks paper. Account for\n # the fact that our coordinates are normalized here.\n # e.g. a 224x224 ROI (in pixels) maps to P4\n image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32)\n roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))\n roi_level = tf.minimum(5, tf.maximum(\n 2, 4 + tf.cast(tf.round(roi_level), tf.int32)))\n roi_level = tf.squeeze(roi_level, 2)\n\n # Loop through levels and apply ROI pooling to each. P2 to P5.\n pooled = []\n box_to_level = []\n for i, level in enumerate(range(2, 6)):\n ix = tf.where(tf.equal(roi_level, level))\n level_boxes = tf.gather_nd(boxes, ix)\n\n # Box indices for crop_and_resize.\n box_indices = tf.cast(ix[:, 0], tf.int32)\n\n # Keep track of which box is mapped to which level\n box_to_level.append(ix)\n\n # Stop gradient propogation to ROI proposals\n level_boxes = tf.stop_gradient(level_boxes)\n box_indices = tf.stop_gradient(box_indices)\n\n # Crop and Resize\n # From Mask R-CNN paper: \"We sample four regular locations, so\n # that we can evaluate either max or average pooling. In fact,\n # interpolating only a single value at each bin center (without\n # pooling) is nearly as effective.\"\n #\n # Here we use the simplified approach of a single value per bin,\n # which is how it's done in tf.crop_and_resize()\n # Result: [batch * num_boxes, pool_height, pool_width, channels]\n pooled.append(tf.image.crop_and_resize(\n feature_maps[i], level_boxes, box_indices, self.pool_shape,\n method=\"bilinear\"))\n\n # Pack pooled features into one tensor\n pooled = tf.concat(pooled, axis=0)\n\n # Pack box_to_level mapping into one array and add another\n # column representing the order of pooled boxes\n box_to_level = tf.concat(box_to_level, axis=0)\n box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)\n box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],\n axis=1)\n\n # Rearrange pooled features to match the order of the original boxes\n # Sort box_to_level by batch then box index\n # TF doesn't have a way to sort by two columns, so merge them and sort.\n sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]\n ix = tf.nn.top_k(sorting_tensor, k=tf.shape(\n box_to_level)[0]).indices[::-1]\n ix = tf.gather(box_to_level[:, 2], ix)\n pooled = tf.gather(pooled, ix)\n\n # Re-add the batch dimension\n shape = tf.concat([tf.shape(boxes)[:2], tf.shape(pooled)[1:]], axis=0)\n pooled = tf.reshape(pooled, shape)\n return pooled\n\n def compute_output_shape(self, input_shape):\n return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1], )\n\n\n############################################################\n# Detection Target Layer\n############################################################\n\ndef overlaps_graph(boxes1, boxes2):\n \"\"\"Computes IoU overlaps between two sets of boxes.\n boxes1, boxes2: [N, (y1, x1, y2, x2)].\n \"\"\"\n # 1. Tile boxes2 and repeat boxes1. This allows us to compare\n # every boxes1 against every boxes2 without loops.\n # TF doesn't have an equivalent to np.repeat() so simulate it\n # using tf.tile() and tf.reshape.\n b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),\n [1, 1, tf.shape(boxes2)[0]]), [-1, 4])\n b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])\n # 2. Compute intersections\n b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)\n b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)\n y1 = tf.maximum(b1_y1, b2_y1)\n x1 = tf.maximum(b1_x1, b2_x1)\n y2 = tf.minimum(b1_y2, b2_y2)\n x2 = tf.minimum(b1_x2, b2_x2)\n intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)\n # 3. Compute unions\n b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)\n b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)\n union = b1_area + b2_area - intersection\n # 4. Compute IoU and reshape to [boxes1, boxes2]\n iou = intersection / union\n overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])\n return overlaps\n\n\ndef detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):\n \"\"\"Generates detection targets for one image. Subsamples proposals and\n generates target class IDs, bounding box deltas, and masks for each.\n\n Inputs:\n proposals: [POST_NMS_ROIS_TRAINING, (y1, x1, y2, x2)] in normalized coordinates. Might\n be zero padded if there are not enough proposals.\n gt_class_ids: [MAX_GT_INSTANCES] int class IDs\n gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.\n gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.\n\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\n and masks.\n rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates\n class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.\n deltas: [TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw))]\n masks: [TRAIN_ROIS_PER_IMAGE, height, width]. Masks cropped to bbox\n boundaries and resized to neural network output size.\n\n Note: Returned arrays might be zero padded if not enough target ROIs.\n \"\"\"\n # Assertions\n asserts = [\n tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],\n name=\"roi_assertion\"),\n ]\n with tf.control_dependencies(asserts):\n proposals = tf.identity(proposals)\n\n # Remove zero padding\n proposals, _ = trim_zeros_graph(proposals, name=\"trim_proposals\")\n gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name=\"trim_gt_boxes\")\n gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,\n name=\"trim_gt_class_ids\")\n gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,\n name=\"trim_gt_masks\")\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = tf.where(gt_class_ids < 0)[:, 0]\n non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]\n crowd_boxes = tf.gather(gt_boxes, crowd_ix)\n gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)\n gt_boxes = tf.gather(gt_boxes, non_crowd_ix)\n gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)\n\n # Compute overlaps matrix [proposals, gt_boxes]\n overlaps = overlaps_graph(proposals, gt_boxes)\n\n # Compute overlaps with crowd boxes [proposals, crowd_boxes]\n crowd_overlaps = overlaps_graph(proposals, crowd_boxes)\n crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n\n # Determine positive and negative ROIs\n roi_iou_max = tf.reduce_max(overlaps, axis=1)\n # 1. Positive ROIs are those with >= 0.5 IoU with a GT box\n positive_roi_bool = (roi_iou_max >= 0.5)\n positive_indices = tf.where(positive_roi_bool)[:, 0]\n # 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.\n negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]\n\n # Subsample ROIs. Aim for 33% positive\n # Positive ROIs\n positive_count = int(config.TRAIN_ROIS_PER_IMAGE *\n config.ROI_POSITIVE_RATIO)\n positive_indices = tf.random_shuffle(positive_indices)[:positive_count]\n positive_count = tf.shape(positive_indices)[0]\n # Negative ROIs. Add enough to maintain positive:negative ratio.\n r = 1.0 / config.ROI_POSITIVE_RATIO\n negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count\n negative_indices = tf.random_shuffle(negative_indices)[:negative_count]\n # Gather selected ROIs\n positive_rois = tf.gather(proposals, positive_indices)\n negative_rois = tf.gather(proposals, negative_indices)\n\n # Assign positive ROIs to GT boxes.\n positive_overlaps = tf.gather(overlaps, positive_indices)\n roi_gt_box_assignment = tf.cond(\n tf.greater(tf.shape(positive_overlaps)[1], 0),\n true_fn = lambda: tf.argmax(positive_overlaps, axis=1),\n false_fn = lambda: tf.cast(tf.constant([]),tf.int64)\n )\n roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)\n roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)\n\n # Compute bbox refinement for positive ROIs\n deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)\n deltas /= config.BBOX_STD_DEV\n\n # Assign positive ROIs to GT masks\n # Permute masks to [N, height, width, 1]\n transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)\n # Pick the right mask for each ROI\n roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)\n\n # Compute mask targets\n boxes = positive_rois\n if config.USE_MINI_MASK:\n # Transform ROI coordinates from normalized image space\n # to normalized mini-mask space.\n y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)\n gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)\n gt_h = gt_y2 - gt_y1\n gt_w = gt_x2 - gt_x1\n y1 = (y1 - gt_y1) / gt_h\n x1 = (x1 - gt_x1) / gt_w\n y2 = (y2 - gt_y1) / gt_h\n x2 = (x2 - gt_x1) / gt_w\n boxes = tf.concat([y1, x1, y2, x2], 1)\n box_ids = tf.range(0, tf.shape(roi_masks)[0])\n masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,\n box_ids,\n config.MASK_SHAPE)\n # Remove the extra dimension from masks.\n masks = tf.squeeze(masks, axis=3)\n\n # Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with\n # binary cross entropy loss.\n masks = tf.round(masks)\n\n # Append negative ROIs and pad bbox deltas and masks that\n # are not used for negative ROIs with zeros.\n rois = tf.concat([positive_rois, negative_rois], axis=0)\n N = tf.shape(negative_rois)[0]\n P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)\n rois = tf.pad(rois, [(0, P), (0, 0)])\n roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])\n roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])\n deltas = tf.pad(deltas, [(0, N + P), (0, 0)])\n masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])\n\n return rois, roi_gt_class_ids, deltas, masks\n\n\nclass DetectionTargetLayer(KE.Layer):\n \"\"\"Subsamples proposals and generates target box refinement, class_ids,\n and masks for each.\n\n Inputs:\n proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might\n be zero padded if there are not enough proposals.\n gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.\n gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized\n coordinates.\n gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type\n\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\n and masks.\n rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized\n coordinates\n target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\n target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw)]\n target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width]\n Masks cropped to bbox boundaries and resized to neural\n network output size.\n\n Note: Returned arrays might be zero padded if not enough target ROIs.\n \"\"\"\n\n def __init__(self, config, **kwargs):\n super(DetectionTargetLayer, self).__init__(**kwargs)\n self.config = config\n\n def call(self, inputs):\n proposals = inputs[0]\n gt_class_ids = inputs[1]\n gt_boxes = inputs[2]\n gt_masks = inputs[3]\n\n # Slice the batch and run a graph for each slice\n # TODO: Rename target_bbox to target_deltas for clarity\n names = [\"rois\", \"target_class_ids\", \"target_bbox\", \"target_mask\"]\n outputs = utils.batch_slice(\n [proposals, gt_class_ids, gt_boxes, gt_masks],\n lambda w, x, y, z: detection_targets_graph(\n w, x, y, z, self.config),\n self.config.IMAGES_PER_GPU, names=names)\n return outputs\n\n def compute_output_shape(self, input_shape):\n return [\n (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois\n (None, self.config.TRAIN_ROIS_PER_IMAGE), # class_ids\n (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas\n (None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],\n self.config.MASK_SHAPE[1]) # masks\n ]\n\n def compute_mask(self, inputs, mask=None):\n return [None, None, None, None]\n\n\n############################################################\n# Detection Layer\n############################################################\n\ndef refine_detections_graph(rois, probs, deltas, window, config):\n \"\"\"Refine classified proposals and filter overlaps and return final\n detections.\n\n Inputs:\n rois: [N, (y1, x1, y2, x2)] in normalized coordinates\n probs: [N, num_classes]. Class probabilities.\n deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific\n bounding box deltas.\n window: (y1, x1, y2, x2) in normalized coordinates. The part of the image\n that contains the image excluding the padding.\n\n Returns detections shaped: [num_detections, (y1, x1, y2, x2, class_id, score)] where\n coordinates are normalized.\n \"\"\"\n # Class IDs per ROI\n class_ids = tf.argmax(probs, axis=1, output_type=tf.int32)\n # Class probability of the top class of each ROI\n indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)\n class_scores = tf.gather_nd(probs, indices)\n # Class-specific bounding box deltas\n deltas_specific = tf.gather_nd(deltas, indices)\n # Apply bounding box deltas\n # Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates\n refined_rois = apply_box_deltas_graph(\n rois, deltas_specific * config.BBOX_STD_DEV)\n # Clip boxes to image window\n refined_rois = clip_boxes_graph(refined_rois, window)\n\n # TODO: Filter out boxes with zero area\n\n # Filter out background boxes\n keep = tf.where(class_ids > 0)[:, 0]\n # Filter out low confidence boxes\n if config.DETECTION_MIN_CONFIDENCE:\n conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]\n keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),\n tf.expand_dims(conf_keep, 0))\n keep = tf.sparse_tensor_to_dense(keep)[0]\n\n # Apply per-class NMS\n # 1. Prepare variables\n pre_nms_class_ids = tf.gather(class_ids, keep)\n pre_nms_scores = tf.gather(class_scores, keep)\n pre_nms_rois = tf.gather(refined_rois, keep)\n unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]\n\n def nms_keep_map(class_id):\n \"\"\"Apply Non-Maximum Suppression on ROIs of the given class.\"\"\"\n # Indices of ROIs of the given class\n ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]\n # Apply NMS\n class_keep = tf.image.non_max_suppression(\n tf.gather(pre_nms_rois, ixs),\n tf.gather(pre_nms_scores, ixs),\n max_output_size=config.DETECTION_MAX_INSTANCES,\n iou_threshold=config.DETECTION_NMS_THRESHOLD)\n # Map indices\n class_keep = tf.gather(keep, tf.gather(ixs, class_keep))\n # Pad with -1 so returned tensors have the same shape\n gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]\n class_keep = tf.pad(class_keep, [(0, gap)],\n mode='CONSTANT', constant_values=-1)\n # Set shape so map_fn() can infer result shape\n class_keep.set_shape([config.DETECTION_MAX_INSTANCES])\n return class_keep\n\n # 2. Map over class IDs\n nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,\n dtype=tf.int64)\n # 3. Merge results into one list, and remove -1 padding\n nms_keep = tf.reshape(nms_keep, [-1])\n nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])\n # 4. Compute intersection between keep and nms_keep\n keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),\n tf.expand_dims(nms_keep, 0))\n keep = tf.sparse_tensor_to_dense(keep)[0]\n # Keep top detections\n roi_count = config.DETECTION_MAX_INSTANCES\n class_scores_keep = tf.gather(class_scores, keep)\n num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)\n top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]\n keep = tf.gather(keep, top_ids)\n\n # Arrange output as [N, (y1, x1, y2, x2, class_id, score)]\n # Coordinates are normalized.\n detections = tf.concat([\n tf.gather(refined_rois, keep),\n tf.to_float(tf.gather(class_ids, keep))[..., tf.newaxis],\n tf.gather(class_scores, keep)[..., tf.newaxis]\n ], axis=1)\n\n # Pad with zeros if detections < DETECTION_MAX_INSTANCES\n gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]\n detections = tf.pad(detections, [(0, gap), (0, 0)], \"CONSTANT\")\n return detections\n\n\nclass DetectionLayer(KE.Layer):\n \"\"\"Takes classified proposal boxes and their bounding box deltas and\n returns the final detection boxes.\n\n Returns:\n [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where\n coordinates are normalized.\n \"\"\"\n\n def __init__(self, config=None, **kwargs):\n super(DetectionLayer, self).__init__(**kwargs)\n self.config = config\n\n def call(self, inputs):\n rois = inputs[0]\n mrcnn_class = inputs[1]\n mrcnn_bbox = inputs[2]\n image_meta = inputs[3]\n\n # Get windows of images in normalized coordinates. Windows are the area\n # in the image that excludes the padding.\n # Use the shape of the first image in the batch to normalize the window\n # because we know that all images get resized to the same size.\n m = parse_image_meta_graph(image_meta)\n image_shape = m['image_shape'][0]\n window = norm_boxes_graph(m['window'], image_shape[:2])\n\n # Run detection refinement graph on each item in the batch\n detections_batch = utils.batch_slice(\n [rois, mrcnn_class, mrcnn_bbox, window],\n lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),\n self.config.IMAGES_PER_GPU)\n\n # Reshape output\n # [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] in\n # normalized coordinates\n return tf.reshape(\n detections_batch,\n [self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])\n\n def compute_output_shape(self, input_shape):\n return (None, self.config.DETECTION_MAX_INSTANCES, 6)\n\n\n############################################################\n# Region Proposal Network (RPN)\n############################################################\n\ndef rpn_graph(feature_map, anchors_per_location, anchor_stride):\n \"\"\"Builds the computation graph of Region Proposal Network.\n\n feature_map: backbone features [batch, height, width, depth]\n anchors_per_location: number of anchors per pixel in the feature map\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\n every pixel in the feature map), or 2 (every other pixel).\n\n Returns:\n rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)\n rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.\n rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be\n applied to anchors.\n \"\"\"\n # TODO: check if stride of 2 causes alignment issues if the feature map\n # is not even.\n # Shared convolutional base of the RPN\n shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',\n strides=anchor_stride,\n name='rpn_conv_shared')(feature_map)\n\n # Anchor Score. [batch, height, width, anchors per location * 2].\n x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',\n activation='linear', name='rpn_class_raw')(shared)\n\n # Reshape to [batch, anchors, 2]\n rpn_class_logits = KL.Lambda(\n lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)\n\n # Softmax on last dimension of BG/FG.\n rpn_probs = KL.Activation(\n \"softmax\", name=\"rpn_class_xxx\")(rpn_class_logits)\n\n # Bounding box refinement. [batch, H, W, anchors per location * depth]\n # where depth is [x, y, log(w), log(h)]\n x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding=\"valid\",\n activation='linear', name='rpn_bbox_pred')(shared)\n\n # Reshape to [batch, anchors, 4]\n rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)\n\n return [rpn_class_logits, rpn_probs, rpn_bbox]\n\n\ndef build_rpn_model(anchor_stride, anchors_per_location, depth):\n \"\"\"Builds a Keras model of the Region Proposal Network.\n It wraps the RPN graph so it can be used multiple times with shared\n weights.\n\n anchors_per_location: number of anchors per pixel in the feature map\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\n every pixel in the feature map), or 2 (every other pixel).\n depth: Depth of the backbone feature map.\n\n Returns a Keras Model object. The model outputs, when called, are:\n rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)\n rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.\n rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be\n applied to anchors.\n \"\"\"\n input_feature_map = KL.Input(shape=[None, None, depth],\n name=\"input_rpn_feature_map\")\n outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)\n return KM.Model([input_feature_map], outputs, name=\"rpn_model\")\n\n\n############################################################\n# Feature Pyramid Network Heads\n############################################################\n\ndef fpn_classifier_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True,\n fc_layers_size=1024):\n \"\"\"Builds the computation graph of the feature pyramid network classifier\n and regressor heads.\n\n rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized\n coordinates.\n feature_maps: List of feature maps from different layers of the pyramid,\n [P2, P3, P4, P5]. Each has a different resolution.\n image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n pool_size: The width of the square feature map generated from ROI Pooling.\n num_classes: number of classes, which determines the depth of the results\n train_bn: Boolean. Train or freeze Batch Norm layers\n fc_layers_size: Size of the 2 FC layers\n\n Returns:\n logits: [batch, num_rois, NUM_CLASSES] classifier logits (before softmax)\n probs: [batch, num_rois, NUM_CLASSES] classifier probabilities\n bbox_deltas: [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))] Deltas to apply to\n proposal boxes\n \"\"\"\n # ROI Pooling\n # Shape: [batch, num_rois, POOL_SIZE, POOL_SIZE, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_classifier\")([rois, image_meta] + feature_maps)\n # Two 1024 FC layers (implemented with Conv2D for consistency)\n x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (pool_size, pool_size), padding=\"valid\"),\n name=\"mrcnn_class_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (1, 1)),\n name=\"mrcnn_class_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),\n name=\"pool_squeeze\")(x)\n\n # Classifier head\n mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),\n name='mrcnn_class_logits')(shared)\n mrcnn_probs = KL.TimeDistributed(KL.Activation(\"softmax\"),\n name=\"mrcnn_class\")(mrcnn_class_logits)\n\n # BBox head\n # [batch, num_rois, NUM_CLASSES * (dy, dx, log(dh), log(dw))]\n x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),\n name='mrcnn_bbox_fc')(shared)\n # Reshape to [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))]\n s = K.int_shape(x)\n mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name=\"mrcnn_bbox\")(x)\n\n return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox\n\n\ndef build_fpn_mask_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True):\n \"\"\"Builds the computation graph of the mask head of Feature Pyramid Network.\n\n rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized\n coordinates.\n feature_maps: List of feature maps from different layers of the pyramid,\n [P2, P3, P4, P5]. Each has a different resolution.\n image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n pool_size: The width of the square feature map generated from ROI Pooling.\n num_classes: number of classes, which determines the depth of the results\n train_bn: Boolean. Train or freeze Batch Norm layers\n\n Returns: Masks [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, NUM_CLASSES]\n \"\"\"\n # ROI Pooling\n # Shape: [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_mask\")([rois, image_meta] + feature_maps)\n\n # Conv layers\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv3\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn3')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv4\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn4')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation=\"relu\"),\n name=\"mrcnn_mask_deconv\")(x)\n x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation=\"sigmoid\"),\n name=\"mrcnn_mask\")(x)\n return x\n\n\n############################################################\n# Loss Functions\n############################################################\n\ndef smooth_l1_loss(y_true, y_pred):\n \"\"\"Implements Smooth-L1 loss.\n y_true and y_pred are typically: [N, 4], but could be any shape.\n \"\"\"\n diff = K.abs(y_true - y_pred)\n less_than_one = K.cast(K.less(diff, 1.0), \"float32\")\n loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)\n return loss\n\n\ndef rpn_class_loss_graph(rpn_match, rpn_class_logits):\n \"\"\"RPN anchor classifier loss.\n\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for BG/FG.\n \"\"\"\n # Squeeze last dim to simplify\n rpn_match = tf.squeeze(rpn_match, -1)\n # Get anchor classes. Convert the -1/+1 match to 0/1 values.\n anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)\n # Positive and Negative anchors contribute to the loss,\n # but neutral anchors (match value = 0) don't.\n indices = tf.where(K.not_equal(rpn_match, 0))\n # Pick rows that contribute to the loss and filter out the rest.\n rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)\n anchor_class = tf.gather_nd(anchor_class, indices)\n # Cross entropy loss\n loss = K.sparse_categorical_crossentropy(target=anchor_class,\n output=rpn_class_logits,\n from_logits=True)\n loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))\n return loss\n\n\ndef rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):\n \"\"\"Return the RPN bounding box loss graph.\n\n config: the model config object.\n target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].\n Uses 0 padding to fill in unsed bbox deltas.\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]\n \"\"\"\n # Positive anchors contribute to the loss, but negative and\n # neutral anchors (match value of 0 or -1) don't.\n rpn_match = K.squeeze(rpn_match, -1)\n indices = tf.where(K.equal(rpn_match, 1))\n\n # Pick bbox deltas that contribute to the loss\n rpn_bbox = tf.gather_nd(rpn_bbox, indices)\n\n # Trim target bounding box deltas to the same length as rpn_bbox.\n batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)\n target_bbox = batch_pack_graph(target_bbox, batch_counts,\n config.IMAGES_PER_GPU)\n\n loss = smooth_l1_loss(target_bbox, rpn_bbox)\n\n loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))\n return loss\n\n\ndef mrcnn_class_loss_graph(target_class_ids, pred_class_logits,\n active_class_ids):\n \"\"\"Loss for the classifier head of Mask RCNN.\n\n target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero\n padding to fill in the array.\n pred_class_logits: [batch, num_rois, num_classes]\n active_class_ids: [batch, num_classes]. Has a value of 1 for\n classes that are in the dataset of the image, and 0\n for classes that are not in the dataset.\n \"\"\"\n # During model building, Keras calls this function with\n # target_class_ids of type float32. Unclear why. Cast it\n # to int to get around it.\n target_class_ids = tf.cast(target_class_ids, 'int64')\n\n # Find predictions of classes that are not in the dataset.\n pred_class_ids = tf.argmax(pred_class_logits, axis=2)\n # TODO: Update this line to work with batch > 1. Right now it assumes all\n # images in a batch have the same active_class_ids\n pred_active = tf.gather(active_class_ids[0], pred_class_ids)\n\n # Loss\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=target_class_ids, logits=pred_class_logits)\n\n # Erase losses of predictions of classes that are not in the active\n # classes of the image.\n loss = loss * pred_active\n\n # Computer loss mean. Use only predictions that contribute\n # to the loss to get a correct mean.\n loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)\n return loss\n\n\ndef mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):\n \"\"\"Loss for Mask R-CNN bounding box refinement.\n\n target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]\n target_class_ids: [batch, num_rois]. Integer class IDs.\n pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]\n \"\"\"\n # Reshape to merge batch and roi dimensions for simplicity.\n target_class_ids = K.reshape(target_class_ids, (-1,))\n target_bbox = K.reshape(target_bbox, (-1, 4))\n pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))\n\n # Only positive ROIs contribute to the loss. And only\n # the right class_id of each ROI. Get their indices.\n positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]\n positive_roi_class_ids = tf.cast(\n tf.gather(target_class_ids, positive_roi_ix), tf.int64)\n indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)\n\n # Gather the deltas (predicted and true) that contribute to loss\n target_bbox = tf.gather(target_bbox, positive_roi_ix)\n pred_bbox = tf.gather_nd(pred_bbox, indices)\n\n # Smooth-L1 Loss\n loss = K.switch(tf.size(target_bbox) > 0,\n smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),\n tf.constant(0.0))\n loss = K.mean(loss)\n return loss\n\n\ndef mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):\n \"\"\"Mask binary cross-entropy loss for the masks head.\n\n target_masks: [batch, num_rois, height, width].\n A float32 tensor of values 0 or 1. Uses zero padding to fill array.\n target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.\n pred_masks: [batch, proposals, height, width, num_classes] float32 tensor\n with values from 0 to 1.\n \"\"\"\n # Reshape for simplicity. Merge first two dimensions into one.\n target_class_ids = K.reshape(target_class_ids, (-1,))\n mask_shape = tf.shape(target_masks)\n target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))\n pred_shape = tf.shape(pred_masks)\n pred_masks = K.reshape(pred_masks,\n (-1, pred_shape[2], pred_shape[3], pred_shape[4]))\n # Permute predicted masks to [N, num_classes, height, width]\n pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])\n\n # Only positive ROIs contribute to the loss. And only\n # the class specific mask of each ROI.\n positive_ix = tf.where(target_class_ids > 0)[:, 0]\n positive_class_ids = tf.cast(\n tf.gather(target_class_ids, positive_ix), tf.int64)\n indices = tf.stack([positive_ix, positive_class_ids], axis=1)\n\n # Gather the masks (predicted and true) that contribute to loss\n y_true = tf.gather(target_masks, positive_ix)\n y_pred = tf.gather_nd(pred_masks, indices)\n\n # Compute binary cross entropy. If no positive ROIs, then return 0.\n # shape: [batch, roi, num_classes]\n loss = K.switch(tf.size(y_true) > 0,\n K.binary_crossentropy(target=y_true, output=y_pred),\n tf.constant(0.0))\n loss = K.mean(loss)\n return loss\n\n\n############################################################\n# Data Generator\n############################################################\n\ndef load_image_gt(dataset, config, image_id, augment=False, augmentation=None,\n use_mini_mask=False):\n \"\"\"Load and return ground truth data for an image (image, mask, bounding boxes).\n\n augment: (deprecated. Use augmentation instead). If true, apply random\n image augmentation. Currently, only horizontal flipping is offered.\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.\n For example, passing imgaug.augmenters.Fliplr(0.5) flips images\n right/left 50% of the time.\n use_mini_mask: If False, returns full-size masks that are the same height\n and width as the original image. These can be big, for example\n 1024x1024x100 (for 100 instances). Mini masks are smaller, typically,\n 224x224 and are generated by extracting the bounding box of the\n object and resizing it to MINI_MASK_SHAPE.\n\n Returns:\n image: [height, width, 3]\n shape: the original shape of the image before resizing and cropping.\n class_ids: [instance_count] Integer class IDs\n bbox: [instance_count, (y1, x1, y2, x2)]\n mask: [height, width, instance_count]. The height and width are those\n of the image unless use_mini_mask is True, in which case they are\n defined in MINI_MASK_SHAPE.\n \"\"\"\n # Load image and mask\n image = dataset.load_image(image_id)\n mask, class_ids = dataset.load_mask(image_id)\n original_shape = image.shape\n image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=config.IMAGE_MIN_DIM,\n min_scale=config.IMAGE_MIN_SCALE,\n max_dim=config.IMAGE_MAX_DIM,\n mode=config.IMAGE_RESIZE_MODE)\n mask = utils.resize_mask(mask, scale, padding, crop)\n\n # Random horizontal flips.\n # TODO: will be removed in a future update in favor of augmentation\n if augment:\n logging.warning(\"'augment' is deprecated. Use 'augmentation' instead.\")\n if random.randint(0, 1):\n image = np.fliplr(image)\n mask = np.fliplr(mask)\n\n # Augmentation\n # This requires the imgaug lib (https://github.com/aleju/imgaug)\n if augmentation:\n import imgaug\n\n # Augmenters that are safe to apply to masks\n # Some, such as Affine, have settings that make them unsafe, so always\n # test your augmentation on masks\n MASK_AUGMENTERS = [\"Sequential\", \"SomeOf\", \"OneOf\", \"Sometimes\",\n \"Fliplr\", \"Flipud\", \"CropAndPad\",\n \"Affine\", \"PiecewiseAffine\"]\n\n def hook(images, augmenter, parents, default):\n \"\"\"Determines which augmenters to apply to masks.\"\"\"\n return augmenter.__class__.__name__ in MASK_AUGMENTERS\n\n # Store shapes before augmentation to compare\n image_shape = image.shape\n mask_shape = mask.shape\n # Make augmenters deterministic to apply similarly to images and masks\n det = augmentation.to_deterministic()\n image = det.augment_image(image)\n # Change mask to np.uint8 because imgaug doesn't support np.bool\n mask = det.augment_image(mask.astype(np.uint8),\n hooks=imgaug.HooksImages(activator=hook))\n # Verify that shapes didn't change\n assert image.shape == image_shape, \"Augmentation shouldn't change image size\"\n assert mask.shape == mask_shape, \"Augmentation shouldn't change mask size\"\n # Change mask back to bool\n mask = mask.astype(np.bool)\n\n # Note that some boxes might be all zeros if the corresponding mask got cropped out.\n # and here is to filter them out\n _idx = np.sum(mask, axis=(0, 1)) > 0\n mask = mask[:, :, _idx]\n class_ids = class_ids[_idx]\n # Bounding boxes. Note that some boxes might be all zeros\n # if the corresponding mask got cropped out.\n # bbox: [num_instances, (y1, x1, y2, x2)]\n bbox = utils.extract_bboxes(mask)\n\n # Active classes\n # Different datasets have different classes, so track the\n # classes supported in the dataset of this image.\n active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)\n source_class_ids = dataset.source_class_ids[dataset.image_info[image_id][\"source\"]]\n active_class_ids[source_class_ids] = 1\n\n # Resize masks to smaller size to reduce memory usage\n if use_mini_mask:\n mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)\n\n # Image meta data\n image_meta = compose_image_meta(image_id, original_shape, image.shape,\n window, scale, active_class_ids)\n\n return image, image_meta, class_ids, bbox, mask\n\n\ndef build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):\n \"\"\"Generate targets for training Stage 2 classifier and mask heads.\n This is not used in normal training. It's useful for debugging or to train\n the Mask RCNN heads without using the RPN head.\n\n Inputs:\n rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.\n gt_class_ids: [instance count] Integer class IDs\n gt_boxes: [instance count, (y1, x1, y2, x2)]\n gt_masks: [height, width, instance count] Ground truth masks. Can be full\n size or mini-masks.\n\n Returns:\n rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]\n class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\n bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific\n bbox refinements.\n masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped\n to bbox boundaries and resized to neural network output size.\n \"\"\"\n assert rpn_rois.shape[0] > 0\n assert gt_class_ids.dtype == np.int32, \"Expected int but got {}\".format(\n gt_class_ids.dtype)\n assert gt_boxes.dtype == np.int32, \"Expected int but got {}\".format(\n gt_boxes.dtype)\n assert gt_masks.dtype == np.bool_, \"Expected bool but got {}\".format(\n gt_masks.dtype)\n\n # It's common to add GT Boxes to ROIs but we don't do that here because\n # according to XinLei Chen's paper, it doesn't help.\n\n # Trim empty padding in gt_boxes and gt_masks parts\n instance_ids = np.where(gt_class_ids > 0)[0]\n assert instance_ids.shape[0] > 0, \"Image must contain instances.\"\n gt_class_ids = gt_class_ids[instance_ids]\n gt_boxes = gt_boxes[instance_ids]\n gt_masks = gt_masks[:, :, instance_ids]\n\n # Compute areas of ROIs and ground truth boxes.\n rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \\\n (rpn_rois[:, 3] - rpn_rois[:, 1])\n gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \\\n (gt_boxes[:, 3] - gt_boxes[:, 1])\n\n # Compute overlaps [rpn_rois, gt_boxes]\n overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))\n for i in range(overlaps.shape[1]):\n gt = gt_boxes[i]\n overlaps[:, i] = utils.compute_iou(\n gt, rpn_rois, gt_box_area[i], rpn_roi_area)\n\n # Assign ROIs to GT boxes\n rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)\n rpn_roi_iou_max = overlaps[np.arange(\n overlaps.shape[0]), rpn_roi_iou_argmax]\n # GT box assigned to each ROI\n rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]\n rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]\n\n # Positive ROIs are those with >= 0.5 IoU with a GT box.\n fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]\n\n # Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)\n # TODO: To hard example mine or not to hard example mine, that's the question\n # bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]\n bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]\n\n # Subsample ROIs. Aim for 33% foreground.\n # FG\n fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)\n if fg_ids.shape[0] > fg_roi_count:\n keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)\n else:\n keep_fg_ids = fg_ids\n # BG\n remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]\n if bg_ids.shape[0] > remaining:\n keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)\n else:\n keep_bg_ids = bg_ids\n # Combine indices of ROIs to keep\n keep = np.concatenate([keep_fg_ids, keep_bg_ids])\n # Need more?\n remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]\n if remaining > 0:\n # Looks like we don't have enough samples to maintain the desired\n # balance. Reduce requirements and fill in the rest. This is\n # likely different from the Mask RCNN paper.\n\n # There is a small chance we have neither fg nor bg samples.\n if keep.shape[0] == 0:\n # Pick bg regions with easier IoU threshold\n bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]\n assert bg_ids.shape[0] >= remaining\n keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)\n assert keep_bg_ids.shape[0] == remaining\n keep = np.concatenate([keep, keep_bg_ids])\n else:\n # Fill the rest with repeated bg rois.\n keep_extra_ids = np.random.choice(\n keep_bg_ids, remaining, replace=True)\n keep = np.concatenate([keep, keep_extra_ids])\n assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \\\n \"keep doesn't match ROI batch size {}, {}\".format(\n keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)\n\n # Reset the gt boxes assigned to BG ROIs.\n rpn_roi_gt_boxes[keep_bg_ids, :] = 0\n rpn_roi_gt_class_ids[keep_bg_ids] = 0\n\n # For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.\n rois = rpn_rois[keep]\n roi_gt_boxes = rpn_roi_gt_boxes[keep]\n roi_gt_class_ids = rpn_roi_gt_class_ids[keep]\n roi_gt_assignment = rpn_roi_iou_argmax[keep]\n\n # Class-aware bbox deltas. [y, x, log(h), log(w)]\n bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,\n config.NUM_CLASSES, 4), dtype=np.float32)\n pos_ids = np.where(roi_gt_class_ids > 0)[0]\n bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(\n rois[pos_ids], roi_gt_boxes[pos_ids, :4])\n # Normalize bbox refinements\n bboxes /= config.BBOX_STD_DEV\n\n # Generate class-specific target masks\n masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),\n dtype=np.float32)\n for i in pos_ids:\n class_id = roi_gt_class_ids[i]\n assert class_id > 0, \"class id must be greater than 0\"\n gt_id = roi_gt_assignment[i]\n class_mask = gt_masks[:, :, gt_id]\n\n if config.USE_MINI_MASK:\n # Create a mask placeholder, the size of the image\n placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)\n # GT box\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]\n gt_w = gt_x2 - gt_x1\n gt_h = gt_y2 - gt_y1\n # Resize mini mask to size of GT box\n placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \\\n np.round(utils.resize(class_mask, (gt_h, gt_w))).astype(bool)\n # Place the mini batch in the placeholder\n class_mask = placeholder\n\n # Pick part of the mask and resize it\n y1, x1, y2, x2 = rois[i].astype(np.int32)\n m = class_mask[y1:y2, x1:x2]\n mask = utils.resize(m, config.MASK_SHAPE)\n masks[i, :, :, class_id] = mask\n\n return rois, roi_gt_class_ids, bboxes, masks\n\n\ndef build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):\n \"\"\"Given the anchors and GT boxes, compute overlaps and identify positive\n anchors and deltas to refine them to match their corresponding GT boxes.\n\n anchors: [num_anchors, (y1, x1, y2, x2)]\n gt_class_ids: [num_gt_boxes] Integer class IDs.\n gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]\n\n Returns:\n rpn_match: [N] (int32) matches between anchors and GT boxes.\n 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n \"\"\"\n # RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)\n # RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]\n rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = np.where(gt_class_ids < 0)[0]\n if crowd_ix.shape[0] > 0:\n # Filter out crowds from ground truth class IDs and boxes\n non_crowd_ix = np.where(gt_class_ids > 0)[0]\n crowd_boxes = gt_boxes[crowd_ix]\n gt_class_ids = gt_class_ids[non_crowd_ix]\n gt_boxes = gt_boxes[non_crowd_ix]\n # Compute overlaps with crowd boxes [anchors, crowds]\n crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)\n crowd_iou_max = np.amax(crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n else:\n # All anchors don't intersect a crowd\n no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)\n\n # Compute overlaps [num_anchors, num_gt_boxes]\n overlaps = utils.compute_overlaps(anchors, gt_boxes)\n\n # Match anchors to GT Boxes\n # If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.\n # If an anchor overlaps a GT box with IoU < 0.3 then it's negative.\n # Neutral anchors are those that don't match the conditions above,\n # and they don't influence the loss function.\n # However, don't keep any GT box unmatched (rare, but happens). Instead,\n # match it to the closest anchor (even if its max IoU is < 0.3).\n #\n # 1. Set negative anchors first. They get overwritten below if a GT box is\n # matched to them. Skip boxes in crowd areas.\n anchor_iou_argmax = np.argmax(overlaps, axis=1)\n anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]\n rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1\n # 2. Set an anchor for each GT box (regardless of IoU value).\n # If multiple anchors have the same IoU match all of them\n gt_iou_argmax = np.argwhere(overlaps == np.max(overlaps, axis=0))[:,0]\n rpn_match[gt_iou_argmax] = 1\n # 3. Set anchors with high overlap as positive.\n rpn_match[anchor_iou_max >= 0.7] = 1\n\n # Subsample to balance positive and negative anchors\n # Don't let positives be more than half the anchors\n ids = np.where(rpn_match == 1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)\n if extra > 0:\n # Reset the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n # Same for negative proposals\n ids = np.where(rpn_match == -1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -\n np.sum(rpn_match == 1))\n if extra > 0:\n # Rest the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n\n # For positive anchors, compute shift and scale needed to transform them\n # to match the corresponding GT boxes.\n ids = np.where(rpn_match == 1)[0]\n ix = 0 # index into rpn_bbox\n # TODO: use box_refinement() rather than duplicating the code here\n for i, a in zip(ids, anchors[ids]):\n # Closest gt box (it might have IoU < 0.7)\n gt = gt_boxes[anchor_iou_argmax[i]]\n\n # Convert coordinates to center plus width/height.\n # GT Box\n gt_h = gt[2] - gt[0]\n gt_w = gt[3] - gt[1]\n gt_center_y = gt[0] + 0.5 * gt_h\n gt_center_x = gt[1] + 0.5 * gt_w\n # Anchor\n a_h = a[2] - a[0]\n a_w = a[3] - a[1]\n a_center_y = a[0] + 0.5 * a_h\n a_center_x = a[1] + 0.5 * a_w\n\n # Compute the bbox refinement that the RPN should predict.\n rpn_bbox[ix] = [\n (gt_center_y - a_center_y) / a_h,\n (gt_center_x - a_center_x) / a_w,\n np.log(gt_h / a_h),\n np.log(gt_w / a_w),\n ]\n # Normalize\n rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV\n ix += 1\n\n return rpn_match, rpn_bbox\n\n\ndef generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):\n \"\"\"Generates ROI proposals similar to what a region proposal network\n would generate.\n\n image_shape: [Height, Width, Depth]\n count: Number of ROIs to generate\n gt_class_ids: [N] Integer ground truth class IDs\n gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.\n\n Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.\n \"\"\"\n # placeholder\n rois = np.zeros((count, 4), dtype=np.int32)\n\n # Generate random ROIs around GT boxes (90% of count)\n rois_per_box = int(0.9 * count / gt_boxes.shape[0])\n for i in range(gt_boxes.shape[0]):\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]\n h = gt_y2 - gt_y1\n w = gt_x2 - gt_x1\n # random boundaries\n r_y1 = max(gt_y1 - h, 0)\n r_y2 = min(gt_y2 + h, image_shape[0])\n r_x1 = max(gt_x1 - w, 0)\n r_x2 = min(gt_x2 + w, image_shape[1])\n\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))\n x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=\n threshold][:rois_per_box]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=\n threshold][:rois_per_box]\n if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n box_rois = np.hstack([y1, x1, y2, x2])\n rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois\n\n # Generate random ROIs anywhere in the image (10% of count)\n remaining_count = count - (rois_per_box * gt_boxes.shape[0])\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))\n x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=\n threshold][:remaining_count]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=\n threshold][:remaining_count]\n if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n global_rois = np.hstack([y1, x1, y2, x2])\n rois[-remaining_count:] = global_rois\n return rois\n\nclass DataGenerator(keras.utils.Sequence):\n \"\"\"A generator that returns images and corresponding target class ids,\n bounding box deltas, and masks.\n\n dataset: The Dataset object to pick data from\n config: The model config object\n shuffle: If True, shuffles the samples before every epoch\n augment: (deprecated. Use augmentation instead). If true, apply random\n image augmentation. Currently, only horizontal flipping is offered.\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.\n For example, passing imgaug.augmenters.Fliplr(0.5) flips images\n right/left 50% of the time.\n random_rois: If > 0 then generate proposals to be used to train the\n network classifier and mask heads. Useful if training\n the Mask RCNN part without the RPN.\n batch_size: How many images to return in each call\n detection_targets: If True, generate detection targets (class IDs, bbox\n deltas, and masks). Typically for debugging or visualizations because\n in trainig detection targets are generated by DetectionTargetLayer.\n no_augmentation_sources: Optional. List of sources to exclude for\n augmentation. A source is string that identifies a dataset and is\n defined in the Dataset class.\n\n Returns a Python generator. Upon calling next() on it, the\n generator returns two lists, inputs and outputs. The contents\n of the lists differs depending on the received arguments:\n inputs list:\n - images: [batch, H, W, C]\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n - rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)\n - rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n - gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs\n - gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]\n - gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width\n are those of the image unless use_mini_mask is True, in which\n case they are defined in MINI_MASK_SHAPE.\n\n outputs list: Usually empty in regular training. But if detection_targets\n is True then the outputs list contains target class_ids, bbox deltas,\n and masks.\n \"\"\"\n\n def __init__(self, dataset, config, shuffle=True, augment=False, augmentation=None,\n random_rois=0, batch_size=1, detection_targets=False,\n no_augmentation_sources=None):\n\n self.image_ids = np.copy(dataset.image_ids)\n self.dataset = dataset\n self.config = config\n self.error_count = 0\n\n # Anchors\n # [anchor_count, (y1, x1, y2, x2)]\n self.backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)\n self.anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,\n config.RPN_ANCHOR_RATIOS,\n self.backbone_shapes,\n config.BACKBONE_STRIDES,\n config.RPN_ANCHOR_STRIDE)\n\n self.shuffle = shuffle\n self.augment = augment\n self.augmentation = augmentation\n self.random_rois = random_rois\n self.batch_size = batch_size\n self.detection_targets = detection_targets\n self.no_augmentation_sources = no_augmentation_sources or []\n\n\n\n def __len__(self):\n return int(np.ceil(len(self.image_ids) / float(self.batch_size)))\n\n def __getitem__(self, idx):\n return self.data_generator( self.image_ids[idx*self.batch_size:(idx+1)*self.batch_size] )\n\n def data_generator(self, image_ids):\n b=0\n while b < self.batch_size and b < image_ids.shape[0]:\n try:\n # Get GT bounding boxes and masks for image.\n image_id = image_ids[b]\n\n # If the image source is not to be augmented pass None as augmentation\n if self.dataset.image_info[image_id]['source'] in self.no_augmentation_sources:\n image, image_meta, gt_class_ids, gt_boxes, gt_masks = \\\n load_image_gt(self.dataset, self.config, image_id, augment=self.augment,\n augmentation=None,\n use_mini_mask=self.config.USE_MINI_MASK)\n else:\n image, image_meta, gt_class_ids, gt_boxes, gt_masks = \\\n load_image_gt(self.dataset, self.config, image_id, augment=self.augment,\n augmentation=self.augmentation,\n use_mini_mask=self.config.USE_MINI_MASK)\n\n # Skip images that have no instances. This can happen in cases\n # where we train on a subset of classes and the image doesn't\n # have any of the classes we care about.\n if not np.any(gt_class_ids > 0):\n continue\n\n # RPN Targets\n rpn_match, rpn_bbox = build_rpn_targets(image.shape, self.anchors,\n gt_class_ids, gt_boxes, self.config)\n\n # Mask R-CNN Targets\n if self.random_rois:\n rpn_rois = generate_random_rois(\n image.shape, self.random_rois, gt_class_ids, gt_boxes)\n if self.detection_targets:\n rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask =\\\n build_detection_targets(\n rpn_rois, gt_class_ids, gt_boxes, gt_masks, self.config)\n\n # Init batch arrays\n if b == 0:\n batch_image_meta = np.zeros(\n (self.batch_size,) + image_meta.shape, dtype=image_meta.dtype)\n batch_rpn_match = np.zeros(\n [self.batch_size, self.anchors.shape[0], 1], dtype=rpn_match.dtype)\n batch_rpn_bbox = np.zeros(\n [self.batch_size, self.config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)\n batch_images = np.zeros(\n (self.batch_size,) + image.shape, dtype=np.float32)\n batch_gt_class_ids = np.zeros(\n (self.batch_size, self.config.MAX_GT_INSTANCES), dtype=np.int32)\n batch_gt_boxes = np.zeros(\n (self.batch_size, self.config.MAX_GT_INSTANCES, 4), dtype=np.int32)\n batch_gt_masks = np.zeros(\n (self.batch_size, gt_masks.shape[0], gt_masks.shape[1],\n self.config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)\n if self.random_rois:\n batch_rpn_rois = np.zeros(\n (self.batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)\n if self.detection_targets:\n batch_rois = np.zeros(\n (self.batch_size,) + rois.shape, dtype=rois.dtype)\n batch_mrcnn_class_ids = np.zeros(\n (self.batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)\n batch_mrcnn_bbox = np.zeros(\n (self.batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)\n batch_mrcnn_mask = np.zeros(\n (self.batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)\n\n # If more instances than fits in the array, sub-sample from them.\n if gt_boxes.shape[0] > self.config.MAX_GT_INSTANCES:\n ids = np.random.choice(\n np.arange(gt_boxes.shape[0]), self.config.MAX_GT_INSTANCES, replace=False)\n gt_class_ids = gt_class_ids[ids]\n gt_boxes = gt_boxes[ids]\n gt_masks = gt_masks[:, :, ids]\n\n # Add to batch\n batch_image_meta[b] = image_meta\n batch_rpn_match[b] = rpn_match[:, np.newaxis]\n batch_rpn_bbox[b] = rpn_bbox\n batch_images[b] = mold_image(image.astype(np.float32), self.config)\n batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids\n batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes\n batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks\n if self.random_rois:\n batch_rpn_rois[b] = rpn_rois\n if self.detection_targets:\n batch_rois[b] = rois\n batch_mrcnn_class_ids[b] = mrcnn_class_ids\n batch_mrcnn_bbox[b] = mrcnn_bbox\n batch_mrcnn_mask[b] = mrcnn_mask\n b += 1\n\n # Batch full?\n if b >= self.batch_size:\n inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,\n batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]\n outputs = []\n\n if self.random_rois:\n inputs.extend([batch_rpn_rois])\n if self.detection_targets:\n inputs.extend([batch_rois])\n # Keras requires that output and targets have the same number of dimensions\n batch_mrcnn_class_ids = np.expand_dims(\n batch_mrcnn_class_ids, -1)\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])\n\n return inputs, outputs\n\n except (GeneratorExit, KeyboardInterrupt):\n raise\n except:\n # Log it and skip the image\n logging.exception(\"Error processing image {}\".format(\n self.dataset.image_info[image_id]))\n self.error_count += 1\n if self.error_count > 5:\n raise\n\n def on_epoch_end(self):\n if self.shuffle == True:\n np.random.shuffle(self.image_ids)\n\n\n############################################################\n# MaskRCNN Class\n############################################################\n\nclass MaskRCNN():\n \"\"\"Encapsulates the Mask RCNN model functionality.\n\n The actual Keras model is in the keras_model property.\n \"\"\"\n\n def __init__(self, mode, config, model_dir):\n \"\"\"\n mode: Either \"training\" or \"inference\"\n config: A Sub-class of the Config class\n model_dir: Directory to save training logs and trained weights\n \"\"\"\n assert mode in ['training', 'inference']\n self.mode = mode\n self.config = config\n self.model_dir = model_dir\n self.set_log_dir()\n self.keras_model = self.build(mode=mode, config=config)\n\n def build(self, mode, config):\n \"\"\"Build Mask R-CNN architecture.\n input_shape: The shape of the input image.\n mode: Either \"training\" or \"inference\". The inputs and\n outputs of the model differ accordingly.\n \"\"\"\n assert mode in ['training', 'inference']\n\n # Image size must be dividable by 2 multiple times\n h, w = config.IMAGE_SHAPE[:2]\n if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):\n raise Exception(\"Image size must be dividable by 2 at least 6 times \"\n \"to avoid fractions when downscaling and upscaling.\"\n \"For example, use 256, 320, 384, 448, 512, ... etc. \")\n\n # Inputs\n input_image = KL.Input(\n shape=[None, None, config.IMAGE_SHAPE[2]], name=\"input_image\")\n input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],\n name=\"input_image_meta\")\n if mode == \"training\":\n # RPN GT\n input_rpn_match = KL.Input(\n shape=[None, 1], name=\"input_rpn_match\", dtype=tf.int32)\n input_rpn_bbox = KL.Input(\n shape=[None, 4], name=\"input_rpn_bbox\", dtype=tf.float32)\n\n # Detection GT (class IDs, bounding boxes, and masks)\n # 1. GT Class IDs (zero padded)\n input_gt_class_ids = KL.Input(\n shape=[None], name=\"input_gt_class_ids\", dtype=tf.int32)\n # 2. GT Boxes in pixels (zero padded)\n # [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates\n input_gt_boxes = KL.Input(\n shape=[None, 4], name=\"input_gt_boxes\", dtype=tf.float32)\n # Normalize coordinates\n gt_boxes = KL.Lambda(lambda x: norm_boxes_graph(\n x, K.shape(input_image)[1:3]))(input_gt_boxes)\n # 3. GT Masks (zero padded)\n # [batch, height, width, MAX_GT_INSTANCES]\n if config.USE_MINI_MASK:\n input_gt_masks = KL.Input(\n shape=[config.MINI_MASK_SHAPE[0],\n config.MINI_MASK_SHAPE[1], None],\n name=\"input_gt_masks\", dtype=bool)\n else:\n input_gt_masks = KL.Input(\n shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],\n name=\"input_gt_masks\", dtype=bool)\n elif mode == \"inference\":\n # Anchors in normalized coordinates\n input_anchors = KL.Input(shape=[None, 4], name=\"input_anchors\")\n\n # Build the shared convolutional layers.\n # Bottom-up Layers\n # Returns a list of the last layers of each stage, 5 in total.\n # Don't create the thead (stage 5), so we pick the 4th item in the list.\n if callable(config.BACKBONE):\n _, C2, C3, C4, C5 = config.BACKBONE(input_image, stage5=True,\n train_bn=config.TRAIN_BN)\n else:\n _, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE,\n stage5=True, train_bn=config.TRAIN_BN)\n # Top-down Layers\n # TODO: add assert to varify feature map sizes match what's in config\n P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c5p5')(C5)\n P4 = KL.Add(name=\"fpn_p4add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p5upsampled\")(P5),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c4p4')(C4)])\n P3 = KL.Add(name=\"fpn_p3add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p4upsampled\")(P4),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c3p3')(C3)])\n P2 = KL.Add(name=\"fpn_p2add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p3upsampled\")(P3),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c2p2')(C2)])\n # Attach 3x3 conv to all P layers to get the final feature maps.\n P2 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p2\")(P2)\n P3 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p3\")(P3)\n P4 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p4\")(P4)\n P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p5\")(P5)\n # P6 is used for the 5th anchor scale in RPN. Generated by\n # subsampling from P5 with stride of 2.\n P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name=\"fpn_p6\")(P5)\n\n # Note that P6 is used in RPN, but not in the classifier heads.\n rpn_feature_maps = [P2, P3, P4, P5, P6]\n mrcnn_feature_maps = [P2, P3, P4, P5]\n\n # Anchors\n if mode == \"training\":\n anchors = self.get_anchors(config.IMAGE_SHAPE)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)\n # A hack to get around Keras's bad support for constants\n anchors = KL.Lambda(lambda x: tf.Variable(anchors), name=\"anchors\")(input_image)\n else:\n anchors = input_anchors\n\n # RPN Model\n rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,\n len(config.RPN_ANCHOR_RATIOS), config.TOP_DOWN_PYRAMID_SIZE)\n # Loop through pyramid layers\n layer_outputs = [] # list of lists\n for p in rpn_feature_maps:\n layer_outputs.append(rpn([p]))\n # Concatenate layer outputs\n # Convert from list of lists of level outputs to list of lists\n # of outputs across levels.\n # e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]\n output_names = [\"rpn_class_logits\", \"rpn_class\", \"rpn_bbox\"]\n outputs = list(zip(*layer_outputs))\n outputs = [KL.Concatenate(axis=1, name=n)(list(o))\n for o, n in zip(outputs, output_names)]\n\n rpn_class_logits, rpn_class, rpn_bbox = outputs\n\n # Generate proposals\n # Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates\n # and zero padded.\n proposal_count = config.POST_NMS_ROIS_TRAINING if mode == \"training\"\\\n else config.POST_NMS_ROIS_INFERENCE\n rpn_rois = ProposalLayer(\n proposal_count=proposal_count,\n nms_threshold=config.RPN_NMS_THRESHOLD,\n name=\"ROI\",\n config=config)([rpn_class, rpn_bbox, anchors])\n\n if mode == \"training\":\n # Class ID mask to mark class IDs supported by the dataset the image\n # came from.\n active_class_ids = KL.Lambda(\n lambda x: parse_image_meta_graph(x)[\"active_class_ids\"]\n )(input_image_meta)\n\n if not config.USE_RPN_ROIS:\n # Ignore predicted ROIs and use ROIs provided as an input.\n input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],\n name=\"input_roi\", dtype=np.int32)\n # Normalize coordinates\n target_rois = KL.Lambda(lambda x: norm_boxes_graph(\n x, K.shape(input_image)[1:3]))(input_rois)\n else:\n target_rois = rpn_rois\n\n # Generate detection targets\n # Subsamples proposals and generates target outputs for training\n # Note that proposal class IDs, gt_boxes, and gt_masks are zero\n # padded. Equally, returned rois and targets are zero padded.\n rois, target_class_ids, target_bbox, target_mask =\\\n DetectionTargetLayer(config, name=\"proposal_targets\")([\n target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])\n\n # Network Heads\n # TODO: verify that this handles zero padded ROIs\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\\\n fpn_classifier_graph(rois, mrcnn_feature_maps, input_image_meta,\n config.POOL_SIZE, config.NUM_CLASSES,\n train_bn=config.TRAIN_BN,\n fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)\n\n mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,\n input_image_meta,\n config.MASK_POOL_SIZE,\n config.NUM_CLASSES,\n train_bn=config.TRAIN_BN)\n\n # TODO: clean up (use tf.identify if necessary)\n output_rois = KL.Lambda(lambda x: x * 1, name=\"output_rois\")(rois)\n\n # Losses\n rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name=\"rpn_class_loss\")(\n [input_rpn_match, rpn_class_logits])\n rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name=\"rpn_bbox_loss\")(\n [input_rpn_bbox, input_rpn_match, rpn_bbox])\n class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name=\"mrcnn_class_loss\")(\n [target_class_ids, mrcnn_class_logits, active_class_ids])\n bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name=\"mrcnn_bbox_loss\")(\n [target_bbox, target_class_ids, mrcnn_bbox])\n mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name=\"mrcnn_mask_loss\")(\n [target_mask, target_class_ids, mrcnn_mask])\n\n # Model\n inputs = [input_image, input_image_meta,\n input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]\n if not config.USE_RPN_ROIS:\n inputs.append(input_rois)\n outputs = [rpn_class_logits, rpn_class, rpn_bbox,\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,\n rpn_rois, output_rois,\n rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]\n model = KM.Model(inputs, outputs, name='mask_rcnn')\n else:\n # Network Heads\n # Proposal classifier and BBox regressor heads\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\\\n fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta,\n config.POOL_SIZE, config.NUM_CLASSES,\n train_bn=config.TRAIN_BN,\n fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)\n\n # Detections\n # output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in\n # normalized coordinates\n detections = DetectionLayer(config, name=\"mrcnn_detection\")(\n [rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])\n\n # Create masks for detections\n detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)\n mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,\n input_image_meta,\n config.MASK_POOL_SIZE,\n config.NUM_CLASSES,\n train_bn=config.TRAIN_BN)\n\n model = KM.Model([input_image, input_image_meta, input_anchors],\n [detections, mrcnn_class, mrcnn_bbox,\n mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],\n name='mask_rcnn')\n\n # Add multi-GPU support.\n if config.GPU_COUNT > 1:\n from mrcnn.parallel_model import ParallelModel\n model = ParallelModel(model, config.GPU_COUNT)\n\n return model\n\n def find_last(self):\n \"\"\"Finds the last checkpoint file of the last trained model in the\n model directory.\n Returns:\n The path of the last checkpoint file\n \"\"\"\n # Get directory names. Each directory corresponds to a model\n dir_names = next(os.walk(self.model_dir))[1]\n key = self.config.NAME.lower()\n dir_names = filter(lambda f: f.startswith(key), dir_names)\n dir_names = sorted(dir_names)\n if not dir_names:\n import errno\n raise FileNotFoundError(\n errno.ENOENT,\n \"Could not find model directory under {}\".format(self.model_dir))\n # Pick last directory\n dir_name = os.path.join(self.model_dir, dir_names[-1])\n # Find the last checkpoint\n checkpoints = next(os.walk(dir_name))[2]\n checkpoints = filter(lambda f: f.startswith(\"mask_rcnn\"), checkpoints)\n checkpoints = sorted(checkpoints)\n if not checkpoints:\n import errno\n raise FileNotFoundError(\n errno.ENOENT, \"Could not find weight files in {}\".format(dir_name))\n checkpoint = os.path.join(dir_name, checkpoints[-1])\n return checkpoint\n\n def load_weights(self, filepath, by_name=False, exclude=None):\n \"\"\"Modified version of the corresponding Keras function with\n the addition of multi-GPU support and the ability to exclude\n some layers from loading.\n exclude: list of layer names to exclude\n \"\"\"\n import h5py\n # Conditional import to support versions of Keras before 2.2\n # TODO: remove in about 6 months (end of 2018)\n try:\n from keras.engine import saving\n except ImportError:\n # Keras before 2.2 used the 'topology' namespace.\n from keras.engine import topology as saving\n\n if exclude:\n by_name = True\n\n if h5py is None:\n raise ImportError('`load_weights` requires h5py.')\n f = h5py.File(filepath, mode='r')\n if 'layer_names' not in f.attrs and 'model_weights' in f:\n f = f['model_weights']\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n keras_model = self.keras_model\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\")\\\n else keras_model.layers\n\n # Exclude some layers\n if exclude:\n layers = filter(lambda l: l.name not in exclude, layers)\n\n if by_name:\n saving.load_weights_from_hdf5_group_by_name(f, layers)\n else:\n saving.load_weights_from_hdf5_group(f, layers)\n if hasattr(f, 'close'):\n f.close()\n\n # Update the log directory\n self.set_log_dir(filepath)\n\n def get_imagenet_weights(self):\n \"\"\"Downloads ImageNet trained weights from Keras.\n Returns path to weights file.\n \"\"\"\n from keras.utils.data_utils import get_file\n TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\\\n 'releases/download/v0.2/'\\\n 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',\n TF_WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n md5_hash='a268eb855778b3df3c7506639542a6af')\n return weights_path\n\n def compile(self, learning_rate, momentum):\n \"\"\"Gets the model ready for training. Adds losses, regularization, and\n metrics. Then calls the Keras compile() function.\n \"\"\"\n # Optimizer object\n optimizer = keras.optimizers.SGD(\n lr=learning_rate, momentum=momentum,\n clipnorm=self.config.GRADIENT_CLIP_NORM)\n # Add Losses\n # First, clear previously set losses to avoid duplication\n self.keras_model._losses = []\n self.keras_model._per_input_losses = {}\n loss_names = [\n \"rpn_class_loss\", \"rpn_bbox_loss\",\n \"mrcnn_class_loss\", \"mrcnn_bbox_loss\", \"mrcnn_mask_loss\"]\n for name in loss_names:\n layer = self.keras_model.get_layer(name)\n if layer.output in self.keras_model.losses:\n continue\n loss = (\n tf.reduce_mean(layer.output, keepdims=True)\n * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.add_loss(loss)\n\n # Add L2 Regularization\n # Skip gamma and beta weights of batch normalization layers.\n reg_losses = [\n keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)\n for w in self.keras_model.trainable_weights\n if 'gamma' not in w.name and 'beta' not in w.name]\n self.keras_model.add_loss(tf.add_n(reg_losses))\n\n # Compile\n self.keras_model.compile(\n optimizer=optimizer,\n loss=[None] * len(self.keras_model.outputs))\n\n # Add metrics for losses\n for name in loss_names:\n if name in self.keras_model.metrics_names:\n continue\n layer = self.keras_model.get_layer(name)\n self.keras_model.metrics_names.append(name)\n loss = (\n tf.reduce_mean(layer.output, keepdims=True)\n * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.metrics_tensors.append(loss)\n\n def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):\n \"\"\"Sets model layers as trainable if their names match\n the given regular expression.\n \"\"\"\n # Print message on the first call (but not on recursive calls)\n if verbose > 0 and keras_model is None:\n log(\"Selecting layers to train\")\n\n keras_model = keras_model or self.keras_model\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\")\\\n else keras_model.layers\n\n for layer in layers:\n # Is the layer a model?\n if layer.__class__.__name__ == 'Model':\n print(\"In model: \", layer.name)\n self.set_trainable(\n layer_regex, keras_model=layer, indent=indent + 4)\n continue\n\n if not layer.weights:\n continue\n # Is it trainable?\n trainable = bool(re.fullmatch(layer_regex, layer.name))\n # Update layer. If layer is a container, update inner layer.\n if layer.__class__.__name__ == 'TimeDistributed':\n layer.layer.trainable = trainable\n else:\n layer.trainable = trainable\n # Print trainable layer names\n if trainable and verbose > 0:\n log(\"{}{:20} ({})\".format(\" \" * indent, layer.name,\n layer.__class__.__name__))\n\n def set_log_dir(self, model_path=None):\n \"\"\"Sets the model log directory and epoch counter.\n\n model_path: If None, or a format different from what this code uses\n then set a new log directory and start epochs from 0. Otherwise,\n extract the log directory and the epoch counter from the file\n name.\n \"\"\"\n # Set date and epoch counter as if starting a new model\n self.epoch = 0\n now = datetime.datetime.now()\n\n # If we have a model path with date and epochs use them\n if model_path:\n # Continue from we left of. Get epoch and date from the file name\n # A sample model path might look like:\n # \\path\\to\\logs\\coco20171029T2315\\mask_rcnn_coco_0001.h5 (Windows)\n # /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5 (Linux)\n regex = r\".*[/\\\\][\\w-]+(\\d{4})(\\d{2})(\\d{2})T(\\d{2})(\\d{2})[/\\\\]mask\\_rcnn\\_[\\w-]+(\\d{4})\\.h5\"\n m = re.match(regex, model_path)\n if m:\n now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),\n int(m.group(4)), int(m.group(5)))\n # Epoch number in file is 1-based, and in Keras code it's 0-based.\n # So, adjust for that then increment by one to start from the next epoch\n self.epoch = int(m.group(6)) - 1 + 1\n print('Re-starting from epoch %d' % self.epoch)\n\n # Directory for training logs\n self.log_dir = os.path.join(self.model_dir, \"{}{:%Y%m%dT%H%M}\".format(\n self.config.NAME.lower(), now))\n\n # Path to save after each epoch. Include placeholders that get filled by Keras.\n self.checkpoint_path = os.path.join(self.log_dir, \"mask_rcnn_{}_*epoch*.h5\".format(\n self.config.NAME.lower()))\n self.checkpoint_path = self.checkpoint_path.replace(\n \"*epoch*\", \"{epoch:04d}\")\n\n def train(self, train_dataset, val_dataset, learning_rate, epochs, layers,\n augmentation=None, custom_callbacks=None, no_augmentation_sources=None):\n \"\"\"Train the model.\n train_dataset, val_dataset: Training and validation Dataset objects.\n learning_rate: The learning rate to train with\n epochs: Number of training epochs. Note that previous training epochs\n are considered to be done alreay, so this actually determines\n the epochs to train in total rather than in this particaular\n call.\n layers: Allows selecting wich layers to train. It can be:\n - A regular expression to match layer names to train\n - One of these predefined values:\n heads: The RPN, classifier and mask heads of the network\n all: All the layers\n 3+: Train Resnet stage 3 and up\n 4+: Train Resnet stage 4 and up\n 5+: Train Resnet stage 5 and up\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug)\n augmentation. For example, passing imgaug.augmenters.Fliplr(0.5)\n flips images right/left 50% of the time. You can pass complex\n augmentations as well. This augmentation applies 50% of the\n time, and when it does it flips images right/left half the time\n and adds a Gaussian blur with a random sigma in range 0 to 5.\n\n augmentation = imgaug.augmenters.Sometimes(0.5, [\n imgaug.augmenters.Fliplr(0.5),\n imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))\n ])\n\t custom_callbacks: Optional. Add custom callbacks to be called\n\t with the keras fit_generator method. Must be list of type keras.callbacks.\n no_augmentation_sources: Optional. List of sources to exclude for\n augmentation. A source is string that identifies a dataset and is\n defined in the Dataset class.\n \"\"\"\n assert self.mode == \"training\", \"Create model in training mode.\"\n\n # Pre-defined layer regular expressions\n layer_regex = {\n # all layers but the backbone\n \"heads\": r\"(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n # From a specific Resnet stage and up\n \"3+\": r\"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n \"4+\": r\"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n \"5+\": r\"(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n # All layers\n \"all\": \".*\",\n }\n if layers in layer_regex.keys():\n layers = layer_regex[layers]\n\n # Data generators\n train_generator = DataGenerator(train_dataset, self.config, shuffle=True,\n augmentation=augmentation,\n batch_size=self.config.BATCH_SIZE,\n no_augmentation_sources=no_augmentation_sources)\n val_generator = DataGenerator(val_dataset, self.config, shuffle=True,\n batch_size=self.config.BATCH_SIZE)\n\n # Create log_dir if it does not exist\n if not os.path.exists(self.log_dir):\n os.makedirs(self.log_dir)\n\n # Callbacks\n callbacks = [\n keras.callbacks.TensorBoard(log_dir=self.log_dir,\n histogram_freq=0, write_graph=True, write_images=False),\n keras.callbacks.ModelCheckpoint(self.checkpoint_path,\n verbose=0, save_weights_only=True),\n ]\n\n # Add custom callbacks to the list\n if custom_callbacks:\n callbacks += custom_callbacks\n\n # Train\n log(\"\\nStarting at epoch {}. LR={}\\n\".format(self.epoch, learning_rate))\n log(\"Checkpoint Path: {}\".format(self.checkpoint_path))\n self.set_trainable(layers)\n self.compile(learning_rate, self.config.LEARNING_MOMENTUM)\n\n # Work-around for Windows: Keras fails on Windows when using\n # multiprocessing workers. See discussion here:\n # https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009\n if os.name is 'nt':\n workers = 0\n else:\n workers = multiprocessing.cpu_count()\n\n self.keras_model.fit_generator(\n train_generator,\n initial_epoch=self.epoch,\n epochs=epochs,\n steps_per_epoch=self.config.STEPS_PER_EPOCH,\n callbacks=callbacks,\n validation_data=val_generator,\n validation_steps=self.config.VALIDATION_STEPS,\n max_queue_size=100,\n workers=workers,\n use_multiprocessing=True,\n )\n self.epoch = max(self.epoch, epochs)\n\n def mold_inputs(self, images):\n \"\"\"Takes a list of images and modifies them to the format expected\n as an input to the neural network.\n images: List of image matrices [height,width,depth]. Images can have\n different sizes.\n\n Returns 3 Numpy matrices:\n molded_images: [N, h, w, 3]. Images resized and normalized.\n image_metas: [N, length of meta data]. Details about each image.\n windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the\n original image (padding excluded).\n \"\"\"\n molded_images = []\n image_metas = []\n windows = []\n for image in images:\n # Resize image\n # TODO: move resizing to mold_image()\n molded_image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=self.config.IMAGE_MIN_DIM,\n min_scale=self.config.IMAGE_MIN_SCALE,\n max_dim=self.config.IMAGE_MAX_DIM,\n mode=self.config.IMAGE_RESIZE_MODE)\n molded_image = mold_image(molded_image, self.config)\n # Build image_meta\n image_meta = compose_image_meta(\n 0, image.shape, molded_image.shape, window, scale,\n np.zeros([self.config.NUM_CLASSES], dtype=np.int32))\n # Append\n molded_images.append(molded_image)\n windows.append(window)\n image_metas.append(image_meta)\n # Pack into arrays\n molded_images = np.stack(molded_images)\n image_metas = np.stack(image_metas)\n windows = np.stack(windows)\n return molded_images, image_metas, windows\n\n def unmold_detections(self, detections, mrcnn_mask, original_image_shape,\n image_shape, window):\n \"\"\"Reformats the detections of one image from the format of the neural\n network output to a format suitable for use in the rest of the\n application.\n\n detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates\n mrcnn_mask: [N, height, width, num_classes]\n original_image_shape: [H, W, C] Original image shape before resizing\n image_shape: [H, W, C] Shape of the image after resizing and padding\n window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real\n image is excluding the padding.\n\n Returns:\n boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels\n class_ids: [N] Integer class IDs for each bounding box\n scores: [N] Float probability scores of the class_id\n masks: [height, width, num_instances] Instance masks\n \"\"\"\n # How many detections do we have?\n # Detections array is padded with zeros. Find the first class_id == 0.\n zero_ix = np.where(detections[:, 4] == 0)[0]\n N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]\n\n # Extract boxes, class_ids, scores, and class-specific masks\n boxes = detections[:N, :4]\n class_ids = detections[:N, 4].astype(np.int32)\n scores = detections[:N, 5]\n masks = mrcnn_mask[np.arange(N), :, :, class_ids]\n\n # Translate normalized coordinates in the resized image to pixel\n # coordinates in the original image before resizing\n window = utils.norm_boxes(window, image_shape[:2])\n wy1, wx1, wy2, wx2 = window\n shift = np.array([wy1, wx1, wy1, wx1])\n wh = wy2 - wy1 # window height\n ww = wx2 - wx1 # window width\n scale = np.array([wh, ww, wh, ww])\n # Convert boxes to normalized coordinates on the window\n boxes = np.divide(boxes - shift, scale)\n # Convert boxes to pixel coordinates on the original image\n boxes = utils.denorm_boxes(boxes, original_image_shape[:2])\n\n # Filter out detections with zero area. Happens in early training when\n # network weights are still random\n exclude_ix = np.where(\n (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]\n if exclude_ix.shape[0] > 0:\n boxes = np.delete(boxes, exclude_ix, axis=0)\n class_ids = np.delete(class_ids, exclude_ix, axis=0)\n scores = np.delete(scores, exclude_ix, axis=0)\n masks = np.delete(masks, exclude_ix, axis=0)\n N = class_ids.shape[0]\n\n # Resize masks to original image size and set boundary threshold.\n full_masks = []\n for i in range(N):\n # Convert neural network mask to full size mask\n full_mask = utils.unmold_mask(masks[i], boxes[i], original_image_shape)\n full_masks.append(full_mask)\n full_masks = np.stack(full_masks, axis=-1)\\\n if full_masks else np.empty(original_image_shape[:2] + (0,))\n\n return boxes, class_ids, scores, full_masks\n\n def detect(self, images, verbose=0):\n \"\"\"Runs the detection pipeline.\n\n images: List of images, potentially of different sizes.\n\n Returns a list of dicts, one dict per image. The dict contains:\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\n class_ids: [N] int class IDs\n scores: [N] float probability scores for the class IDs\n masks: [H, W, N] instance binary masks\n \"\"\"\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(\n images) == self.config.BATCH_SIZE, \"len(images) must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(images)))\n for image in images:\n log(\"image\", image)\n\n # Mold inputs to format expected by the neural network\n molded_images, image_metas, windows = self.mold_inputs(images)\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape,\\\n \"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes.\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"image_metas\", image_metas)\n log(\"anchors\", anchors)\n # Run object detection\n detections, _, _, mrcnn_mask, _, _, _ =\\\n self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(images):\n final_rois, final_class_ids, final_scores, final_masks =\\\n self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, molded_images[i].shape,\n windows[i])\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results\n\n def detect_molded(self, molded_images, image_metas, verbose=0):\n \"\"\"Runs the detection pipeline, but expect inputs that are\n molded already. Used mostly for debugging and inspecting\n the model.\n\n molded_images: List of images loaded using load_image_gt()\n image_metas: image meta data, also returned by load_image_gt()\n\n Returns a list of dicts, one dict per image. The dict contains:\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\n class_ids: [N] int class IDs\n scores: [N] float probability scores for the class IDs\n masks: [H, W, N] instance binary masks\n \"\"\"\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(molded_images) == self.config.BATCH_SIZE,\\\n \"Number of images must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(molded_images)))\n for image in molded_images:\n log(\"image\", image)\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape, \"Images must have the same size\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"image_metas\", image_metas)\n log(\"anchors\", anchors)\n # Run object detection\n detections, _, _, mrcnn_mask, _, _, _ =\\\n self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(molded_images):\n window = [0, 0, image.shape[0], image.shape[1]]\n final_rois, final_class_ids, final_scores, final_masks =\\\n self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, molded_images[i].shape,\n window)\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results\n\n def get_anchors(self, image_shape):\n \"\"\"Returns anchor pyramid for the given image size.\"\"\"\n backbone_shapes = compute_backbone_shapes(self.config, image_shape)\n # Cache anchors and reuse if image shape is the same\n if not hasattr(self, \"_anchor_cache\"):\n self._anchor_cache = {}\n if not tuple(image_shape) in self._anchor_cache:\n # Generate Anchors\n a = utils.generate_pyramid_anchors(\n self.config.RPN_ANCHOR_SCALES,\n self.config.RPN_ANCHOR_RATIOS,\n backbone_shapes,\n self.config.BACKBONE_STRIDES,\n self.config.RPN_ANCHOR_STRIDE)\n # Keep a copy of the latest anchors in pixel coordinates because\n # it's used in inspect_model notebooks.\n # TODO: Remove this after the notebook are refactored to not use it\n self.anchors = a\n # Normalize coordinates\n self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])\n return self._anchor_cache[tuple(image_shape)]\n\n def ancestor(self, tensor, name, checked=None):\n \"\"\"Finds the ancestor of a TF tensor in the computation graph.\n tensor: TensorFlow symbolic tensor.\n name: Name of ancestor tensor to find\n checked: For internal use. A list of tensors that were already\n searched to avoid loops in traversing the graph.\n \"\"\"\n checked = checked if checked is not None else []\n # Put a limit on how deep we go to avoid very long loops\n if len(checked) > 500:\n return None\n # Convert name to a regex and allow matching a number prefix\n # because Keras adds them automatically\n if isinstance(name, str):\n name = re.compile(name.replace(\"/\", r\"(\\_\\d+)*/\"))\n\n parents = tensor.op.inputs\n for p in parents:\n if p in checked:\n continue\n if bool(re.fullmatch(name, p.name)):\n return p\n checked.append(p)\n a = self.ancestor(p, name, checked)\n if a is not None:\n return a\n return None\n\n def find_trainable_layer(self, layer):\n \"\"\"If a layer is encapsulated by another layer, this function\n digs through the encapsulation and returns the layer that holds\n the weights.\n \"\"\"\n if layer.__class__.__name__ == 'TimeDistributed':\n return self.find_trainable_layer(layer.layer)\n return layer\n\n def get_trainable_layers(self):\n \"\"\"Returns a list of layers that have weights.\"\"\"\n layers = []\n # Loop through all layers\n for l in self.keras_model.layers:\n # If layer is a wrapper, find inner trainable layer\n l = self.find_trainable_layer(l)\n # Include layer if it has weights\n if l.get_weights():\n layers.append(l)\n return layers\n\n def run_graph(self, images, outputs, image_metas=None):\n \"\"\"Runs a sub-set of the computation graph that computes the given\n outputs.\n\n image_metas: If provided, the images are assumed to be already\n molded (i.e. resized, padded, and normalized)\n\n outputs: List of tuples (name, tensor) to compute. The tensors are\n symbolic TensorFlow tensors and the names are for easy tracking.\n\n Returns an ordered dict of results. Keys are the names received in the\n input and values are Numpy arrays.\n \"\"\"\n model = self.keras_model\n\n # Organize desired outputs into an ordered dict\n outputs = OrderedDict(outputs)\n for o in outputs.values():\n assert o is not None\n\n # Build a Keras function to run parts of the computation graph\n inputs = model.inputs\n if model.uses_learning_phase and not isinstance(K.learning_phase(), int):\n inputs += [K.learning_phase()]\n kf = K.function(model.inputs, list(outputs.values()))\n\n # Prepare inputs\n if image_metas is None:\n molded_images, image_metas, _ = self.mold_inputs(images)\n else:\n molded_images = images\n image_shape = molded_images[0].shape\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n model_in = [molded_images, image_metas, anchors]\n\n # Run inference\n if model.uses_learning_phase and not isinstance(K.learning_phase(), int):\n model_in.append(0.)\n outputs_np = kf(model_in)\n\n # Pack the generated Numpy arrays into a a dict and log the results.\n outputs_np = OrderedDict([(k, v)\n for k, v in zip(outputs.keys(), outputs_np)])\n for k, v in outputs_np.items():\n log(k, v)\n return outputs_np\n\n\n############################################################\n# Data Formatting\n############################################################\n\ndef compose_image_meta(image_id, original_image_shape, image_shape,\n window, scale, active_class_ids):\n \"\"\"Takes attributes of an image and puts them in one 1D array.\n\n image_id: An int ID of the image. Useful for debugging.\n original_image_shape: [H, W, C] before resizing or padding.\n image_shape: [H, W, C] after resizing and padding\n window: (y1, x1, y2, x2) in pixels. The area of the image where the real\n image is (excluding the padding)\n scale: The scaling factor applied to the original image (float32)\n active_class_ids: List of class_ids available in the dataset from which\n the image came. Useful if training on images from multiple datasets\n where not all classes are present in all datasets.\n \"\"\"\n meta = np.array(\n [image_id] + # size=1\n list(original_image_shape) + # size=3\n list(image_shape) + # size=3\n list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates\n [scale] + # size=1\n list(active_class_ids) # size=num_classes\n )\n return meta\n\n\ndef parse_image_meta(meta):\n \"\"\"Parses an array that contains image attributes to its components.\n See compose_image_meta() for more details.\n\n meta: [batch, meta length] where meta length depends on NUM_CLASSES\n\n Returns a dict of the parsed values.\n \"\"\"\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id.astype(np.int32),\n \"original_image_shape\": original_image_shape.astype(np.int32),\n \"image_shape\": image_shape.astype(np.int32),\n \"window\": window.astype(np.int32),\n \"scale\": scale.astype(np.float32),\n \"active_class_ids\": active_class_ids.astype(np.int32),\n }\n\n\ndef parse_image_meta_graph(meta):\n \"\"\"Parses a tensor that contains image attributes to its components.\n See compose_image_meta() for more details.\n\n meta: [batch, meta length] where meta length depends on NUM_CLASSES\n\n Returns a dict of the parsed tensors.\n \"\"\"\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id,\n \"original_image_shape\": original_image_shape,\n \"image_shape\": image_shape,\n \"window\": window,\n \"scale\": scale,\n \"active_class_ids\": active_class_ids,\n }\n\n\ndef mold_image(images, config):\n \"\"\"Expects an RGB image (or array of images) and subtracts\n the mean pixel and converts it to float. Expects image\n colors in RGB order.\n \"\"\"\n return images.astype(np.float32) - config.MEAN_PIXEL\n\n\ndef unmold_image(normalized_images, config):\n \"\"\"Takes a image normalized with mold() and returns the original.\"\"\"\n return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)\n\n\n############################################################\n# Miscellenous Graph Functions\n############################################################\n\ndef trim_zeros_graph(boxes, name='trim_zeros'):\n \"\"\"Often boxes are represented with matrices of shape [N, 4] and\n are padded with zeros. This removes zero boxes.\n\n boxes: [N, 4] matrix of boxes.\n non_zeros: [N] a 1D boolean mask identifying the rows to keep\n \"\"\"\n non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)\n boxes = tf.boolean_mask(boxes, non_zeros, name=name)\n return boxes, non_zeros\n\n\ndef batch_pack_graph(x, counts, num_rows):\n \"\"\"Picks different number of values from each row\n in x depending on the values in counts.\n \"\"\"\n outputs = []\n for i in range(num_rows):\n outputs.append(x[i, :counts[i]])\n return tf.concat(outputs, axis=0)\n\n\ndef norm_boxes_graph(boxes, shape):\n \"\"\"Converts boxes from pixel coordinates to normalized coordinates.\n boxes: [..., (y1, x1, y2, x2)] in pixel coordinates\n shape: [..., (height, width)] in pixels\n\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n\n Returns:\n [..., (y1, x1, y2, x2)] in normalized coordinates\n \"\"\"\n h, w = tf.split(tf.cast(shape, tf.float32), 2)\n scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)\n shift = tf.constant([0., 0., 1., 1.])\n return tf.divide(boxes - shift, scale)\n\n\ndef denorm_boxes_graph(boxes, shape):\n \"\"\"Converts boxes from normalized coordinates to pixel coordinates.\n boxes: [..., (y1, x1, y2, x2)] in normalized coordinates\n shape: [..., (height, width)] in pixels\n\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n\n Returns:\n [..., (y1, x1, y2, x2)] in pixel coordinates\n \"\"\"\n h, w = tf.split(tf.cast(shape, tf.float32), 2)\n scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)\n shift = tf.constant([0., 0., 1., 1.])\n return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32)\n"
] |
[
[
"numpy.amax",
"numpy.expand_dims",
"tensorflow.concat",
"tensorflow.control_dependencies",
"tensorflow.stack",
"tensorflow.reduce_sum",
"tensorflow.minimum",
"tensorflow.cast",
"tensorflow.image.non_max_suppression",
"tensorflow.equal",
"tensorflow.image.crop_and_resize",
"numpy.concatenate",
"numpy.max",
"tensorflow.abs",
"tensorflow.map_fn",
"numpy.any",
"tensorflow.pad",
"tensorflow.where",
"tensorflow.random_shuffle",
"numpy.where",
"tensorflow.add_n",
"numpy.divide",
"numpy.random.randint",
"tensorflow.boolean_mask",
"numpy.hstack",
"tensorflow.Variable",
"numpy.reshape",
"numpy.fliplr",
"numpy.arange",
"tensorflow.squeeze",
"numpy.stack",
"tensorflow.divide",
"tensorflow.stop_gradient",
"tensorflow.gather",
"tensorflow.nn.top_k",
"numpy.argmax",
"numpy.copy",
"tensorflow.argmax",
"numpy.zeros",
"numpy.log",
"tensorflow.gather_nd",
"tensorflow.unique",
"tensorflow.shape",
"numpy.random.choice",
"tensorflow.identity",
"tensorflow.exp",
"tensorflow.sparse_tensor_to_dense",
"numpy.delete",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.split",
"tensorflow.round",
"numpy.array",
"numpy.sum",
"tensorflow.size",
"tensorflow.reduce_max",
"tensorflow.multiply",
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.range",
"tensorflow.reduce_mean",
"numpy.abs",
"tensorflow.maximum",
"tensorflow.reshape",
"tensorflow.expand_dims",
"numpy.sort",
"numpy.ones",
"numpy.random.shuffle",
"tensorflow.log",
"numpy.broadcast_to",
"tensorflow.sqrt",
"numpy.empty",
"tensorflow.logical_and"
]
] |
bparazin/skyportal
|
[
"c160610ca0cc28eef9f36c2d11cc15bd9bcbfe56"
] |
[
"skyportal/models/localization.py"
] |
[
"__all__ = ['Localization']\n\nimport sqlalchemy as sa\nfrom sqlalchemy.orm import relationship, deferred\nfrom sqlalchemy.dialects.postgresql import JSONB\nfrom sqlalchemy.ext.hybrid import hybrid_property\n\nfrom astropy.table import Table\nimport numpy as np\nimport ligo.skymap.bayestar as ligo_bayestar\nimport healpy\n\nfrom baselayer.app.models import Base, AccessibleIfUserMatches\n\n\nclass Localization(Base):\n \"\"\"Localization information, including the localization ID, event ID, right\n ascension, declination, error radius (if applicable), and the healpix\n map.\"\"\"\n\n update = delete = AccessibleIfUserMatches('sent_by')\n\n sent_by_id = sa.Column(\n sa.ForeignKey('users.id', ondelete='CASCADE'),\n nullable=False,\n index=True,\n doc=\"The ID of the User who created this Localization.\",\n )\n\n sent_by = relationship(\n \"User\",\n foreign_keys=sent_by_id,\n back_populates=\"localizations\",\n doc=\"The user that saved this Localization\",\n )\n\n nside = 512\n # HEALPix resolution used for flat (non-multiresolution) operations.\n\n dateobs = sa.Column(\n sa.ForeignKey('gcnevents.dateobs', ondelete=\"CASCADE\"),\n nullable=False,\n index=True,\n doc='UTC event timestamp',\n )\n\n localization_name = sa.Column(sa.String, doc='Localization name', index=True)\n\n uniq = deferred(\n sa.Column(\n sa.ARRAY(sa.BigInteger),\n nullable=False,\n doc='Multiresolution HEALPix UNIQ pixel index array',\n )\n )\n\n probdensity = deferred(\n sa.Column(\n sa.ARRAY(sa.Float),\n nullable=False,\n doc='Multiresolution HEALPix probability density array',\n )\n )\n\n distmu = deferred(\n sa.Column(sa.ARRAY(sa.Float), doc='Multiresolution HEALPix distance mu array')\n )\n\n distsigma = deferred(\n sa.Column(\n sa.ARRAY(sa.Float), doc='Multiresolution HEALPix distance sigma array'\n )\n )\n\n distnorm = deferred(\n sa.Column(\n sa.ARRAY(sa.Float),\n doc='Multiresolution HEALPix distance normalization array',\n )\n )\n\n contour = deferred(sa.Column(JSONB, doc='GeoJSON contours'))\n\n @hybrid_property\n def is_3d(self):\n return (\n self.distmu is not None\n and self.distsigma is not None\n and self.distnorm is not None\n )\n\n @is_3d.expression\n def is_3d(cls):\n return sa.and_(\n cls.distmu.isnot(None),\n cls.distsigma.isnot(None),\n cls.distnorm.isnot(None),\n )\n\n @property\n def table_2d(self):\n \"\"\"Get multiresolution HEALPix dataset, probability density only.\"\"\"\n return Table(\n [np.asarray(self.uniq, dtype=np.int64), self.probdensity],\n names=['UNIQ', 'PROBDENSITY'],\n )\n\n @property\n def table(self):\n \"\"\"Get multiresolution HEALPix dataset, probability density and\n distance.\"\"\"\n if self.is_3d:\n return Table(\n [\n np.asarray(self.uniq, dtype=np.int64),\n self.probdensity,\n self.distmu,\n self.distsigma,\n self.distnorm,\n ],\n names=['UNIQ', 'PROBDENSITY', 'DISTMU', 'DISTSIGMA', 'DISTNORM'],\n )\n else:\n return self.table_2d\n\n @property\n def flat_2d(self):\n \"\"\"Get flat resolution HEALPix dataset, probability density only.\"\"\"\n order = healpy.nside2order(Localization.nside)\n result = ligo_bayestar.rasterize(self.table_2d, order)['PROB']\n return healpy.reorder(result, 'NESTED', 'RING')\n\n @property\n def flat(self):\n \"\"\"Get flat resolution HEALPix dataset, probability density and\n distance.\"\"\"\n if self.is_3d:\n order = healpy.nside2order(Localization.nside)\n t = ligo_bayestar.rasterize(self.table, order)\n result = t['PROB'], t['DISTMU'], t['DISTSIGMA'], t['DISTNORM']\n return healpy.reorder(result, 'NESTED', 'RING')\n else:\n return (self.flat_2d,)\n"
] |
[
[
"numpy.asarray"
]
] |
brown170/fudge
|
[
"4f818b0e0b0de52bc127dd77285b20ce3568c97a",
"4f818b0e0b0de52bc127dd77285b20ce3568c97a"
] |
[
"brownies/legacy/toENDF6/covariances/modelParameters.py",
"examples/compareCrossSections.py"
] |
[
"# <<BEGIN-copyright>>\n# Copyright 2021, Lawrence Livermore National Security, LLC.\n# See the top-level COPYRIGHT file for details.\n# \n# SPDX-License-Identifier: BSD-3-Clause\n# <<END-copyright>>\n\nimport numpy\n\nfrom fudge.resonances import scatteringRadius as scatteringRadiusModule, resolved as resolvedModule\n\nfrom pqu import PQU as PQUModule\n\nfrom fudge.covariances import covarianceSuite as covarianceSuiteModule\nfrom fudge.covariances import modelParameters as modelParametersModule\n\nfrom .. import endfFormats as endfFormatsModule\nfrom .. import gndsToENDF6 as gndsToENDF6Module\nfrom ..resonances import resolved as resonancesRewriteModule\n\n#\n# helper methods:\n#\ndef writeLCOMP2( matrix, NDIGIT, NNN ):\n import numpy\n nints = 56 // (NDIGIT + 1) # how many numbers fit on each line?\n if NDIGIT == 3: nints = 13 # special case\n rsd = numpy.sqrt(matrix.diagonal())\n rsd[rsd == 0] = 1\n corr_mat = matrix / numpy.outer(rsd, rsd)\n corr_mat *= 10**NDIGIT\n corr_mat[ corr_mat > 0 ] -= 0.5\n corr_mat[ corr_mat < 0 ] += 0.5\n corr_mat = numpy.rint(corr_mat) # rint: round to nearest int\n # write lower-diagonal as sparse matrix using INTG format:\n endfCorrMat = []\n for i in range(len(corr_mat)):\n vals = corr_mat[i, :i]\n j = 0\n while j < i:\n if vals[j] != 0:\n endfCorrMat.append(endfFormatsModule.writeEndfINTG(\n i + 1, j + 1, list(vals[j:j + nints]), NDIGIT))\n j += nints\n else:\n j += 1\n NM = len(endfCorrMat)\n endf = [endfFormatsModule.endfHeadLine(0, 0, NDIGIT, NNN, NM, 0)]\n endf += endfCorrMat\n return endf\n\ndef toENDF6(self, endfMFList, flags, targetInfo, verbosityIndent=''):\n \"\"\"\n Write all unresolved covariance data to ENDF6\n \"\"\"\n\n resolved, unresolved = [],[]\n for section_ in self:\n if isinstance(section_, modelParametersModule.averageParameterCovariance):\n unresolved.append(section_)\n else:\n resolved.append(section_)\n\n NER = len(resolved)\n if unresolved: NER += 1\n\n res = targetInfo['reactionSuite'].resonances\n if resolved:\n if isinstance(res.resolved.evaluated, resolvedModule.BreitWigner):\n LFW = res.resolved.evaluated.resonanceParameters.table.getColumn('fissionWidth') is not None\n elif isinstance(res.resolved.evaluated, resolvedModule.RMatrix):\n LFW = any( [RR.reactionLink.link.isFission() for RR in res.resolved.evaluated.resonanceReactions])\n else:\n LFW = any( [RR.reactionLink.link.isFission() for RR in res.unresolved.evaluated.resonanceReactions])\n\n # MF32 header information:\n ZAM, AWT = targetInfo['ZA'], targetInfo['mass']\n NIS, ABN, ZAI = 1, 1.0, ZAM # assuming only one isotope per file\n endf = [endfFormatsModule.endfHeadLine(ZAM, AWT, 0, 0, NIS, 0)]\n endf.append(endfFormatsModule.endfHeadLine(ZAI, ABN, 0, LFW, NER, 0))\n\n endfMFList[32][151] = endf\n\n for section_ in resolved:\n gndsToENDF6Module.getForm( targetInfo['style'], section_ ).toENDF6(endfMFList, flags, targetInfo, verbosityIndent)\n if unresolved: # these need to all be done together\n averageParametersToENDF6(unresolved, endfMFList, flags, targetInfo, verbosityIndent)\n\n endfMFList[32][151].append( endfFormatsModule.endfSENDLineNumber() )\n\ncovarianceSuiteModule.parameterCovariances.toENDF6 = toENDF6\n\n\ndef toENDF6(self, endfMFList, flags, targetInfo, verbosityIndent=''):\n \"\"\"\n Translate resolved resonance covariance back to ENDF-6\n \"\"\"\n import numpy\n\n def swaprows( matrix, i1, i2, nrows ):\n # may need to rearrange parameters: ENDF often sorts first by L rather than by energy\n rows = matrix[i1:i1+nrows].copy()\n matrix[i1:i1+nrows] = matrix[i2:i2+nrows]; matrix[i2:i2+nrows] = rows\n cols = matrix[:,i1:i1+nrows].copy()\n matrix[:,i1:i1+nrows] = matrix[:,i2:i2+nrows]; matrix[:,i2:i2+nrows] = cols\n\n # need the resonance parameters as well as covariance matrix:\n res = targetInfo['reactionSuite'].resonances\n conversionFlags = targetInfo['ENDFconversionFlags'].get(self,\"\")\n\n LRF = 7\n if isinstance(res.resolved.evaluated, resolvedModule.BreitWigner):\n LRF = {\n resolvedModule.BreitWigner.singleLevel: 1,\n resolvedModule.BreitWigner.multiLevel: 2\n }[ res.resolved.evaluated.approximation ]\n elif 'LRF3' in conversionFlags:\n LRF = 3\n\n endf = []\n EL, EH = res.resolved.domainMin, res.resolved.domainMax\n AWT, LRU, NRO = targetInfo['mass'], 1, 0\n NAPS = not res.resolved.evaluated.calculateChannelRadius\n endf.append(endfFormatsModule.endfHeadLine(EL, EH, LRU, LRF, NRO, NAPS))\n\n LCOMP = 1\n if 'LCOMP=0' in conversionFlags: LCOMP = 0\n elif 'LCOMP=2' in conversionFlags: LCOMP = 2\n\n if LRF in (1,2):\n RPs = res.resolved.evaluated.resonanceParameters.table\n NRes = len(RPs)\n\n SPI = targetInfo['spin']\n AP = res.resolved.evaluated.scatteringRadius.getValueAs('10*fm')\n\n sortByL = \"sortByL\" in conversionFlags\n Ls = RPs.getColumn('L')\n NLS = len(set(Ls))\n LAD = 0\n if LCOMP in (1,2) or not sortByL: NLS = 0\n ISR = any( [isinstance(parameter.link, scatteringRadiusModule.scatteringRadius) for parameter in self.parameters] )\n endf.append( endfFormatsModule.endfHeadLine( SPI,AP,LAD,LCOMP,NLS,ISR ) )\n MLS = 0\n if ISR:\n MLS = 1 # currently don't handle energy-dependent DAP\n DAP = PQUModule.PQU( numpy.sqrt( self.matrix.constructArray()[0,0] ),\n self.parameters[0].link.form.axes[0].unit ).getValueAs('10*fm')\n endf.append( endfFormatsModule.endfContLine( 0,DAP,0,0,0,0 ) )\n\n # MF32 repeats the resonance parameter information.\n # Extract that info from reactionSuite.resonances:\n table = [RPs.getColumn('L'), RPs.getColumn('energy',unit='eV'), RPs.getColumn('J'),\n RPs.getColumn('totalWidth',unit='eV') or [0]*NRes,\n RPs.getColumn('neutronWidth',unit='eV'), RPs.getColumn('captureWidth',unit='eV'),\n RPs.getColumn('fissionWidth') or [0]*NRes]\n CS = RPs.getColumn('channelSpin')\n if CS is not None: # ENDF hack: J<0 -> use lower available channel spin\n CS = [2*(cs-SPI) for cs in CS]\n Js = [v[0]*v[1] for v in zip(table[2],CS)]\n table[2] = Js\n table = list( zip(*table) )\n matrix = self.matrix.constructArray()[MLS:,MLS:]\n # toss out extra rows/columns of zeros (for column 'L')\n MPAR2 = len(matrix) // len(table)\n index = []\n for idx in range(len(table)):\n index += [idx*MPAR2+1, idx*MPAR2+2, idx*MPAR2+3] # indices to remove\n keep = numpy.array( sorted( set(range(len(matrix))).difference(index)) )\n matrix = matrix[keep.reshape(-1,1),keep]\n MPAR = len(matrix) // len(table)\n\n if sortByL:\n # reorder resonances, sorting first by L and second by energy:\n table.sort()\n\n elist1 = [(lis[1],lis[4],lis[5]) for lis in table]\n elist2 = list( zip( RPs.getColumn('energy',unit='eV'),\n RPs.getColumn('neutronWidth',unit='eV'),\n RPs.getColumn('captureWidth',unit='eV') ) )\n\n for i in range(len(elist1)):\n i2 = elist2.index( elist1[i] )\n if i2!=i:\n swaprows( matrix, MPAR*i, MPAR*elist2.index( elist1[i] ), MPAR )\n val = elist2[i]\n elist2[i] = elist2[i2]; elist2[i2] = val\n\n if LCOMP==0:\n tableIndex = 0\n for L in set( Ls ):\n NRS = Ls.count(L)\n endf.append( endfFormatsModule.endfHeadLine( AWT, 0, L, 0, 18*NRS, NRS ) )\n for i in range(tableIndex, len(table)):\n if table[i][0]!=L: break\n endf.append( endfFormatsModule.endfDataLine( table[i][1:7] ) )\n block = matrix[MPAR*i:MPAR*(i+1), MPAR*i:MPAR*(i+1)]\n lis = [block[0,0], block[1,1], block[2,1], block[2,2]]\n if MPAR==4:\n lis += [block[3,1],block[3,2],block[3,3],0,0,0,0,0]\n else:\n lis += [0,0,0,0,0,0,0,0]\n endf += endfFormatsModule.endfDataList( lis )\n tableIndex += NRS\n\n\n if LCOMP==1:\n NSRS, NLRS = 1,0 # short-range correlations only\n endf.append( endfFormatsModule.endfHeadLine( AWT, 0, 0, 0, NSRS, NLRS ) )\n NRB = NRes\n NVS = (NRB*MPAR)*(NRB*MPAR+1)//2 # length of the upper diagonal matrix\n endf.append( endfFormatsModule.endfHeadLine( 0,0, MPAR, 0, NVS+6*NRB, NRB ) )\n\n for res in table:\n endf.append( endfFormatsModule.endfDataLine( res[1:7] ) )\n\n dataList = []\n for i in range(len(matrix)): dataList.extend( list( matrix[i][i:] ) )\n endf += endfFormatsModule.endfDataList( dataList )\n\n elif LCOMP==2:\n QX, LRX = 0, 0 # haven't encountered any competitive widths yet\n dat = numpy.sqrt( matrix.diagonal() )\n NRes = 0\n matrixDat = []\n omitResonance = []\n for i in range(len(table)):\n params = table[i][1:7]\n uncerts = [dat[MPAR*i],0,0,dat[MPAR*i+1],dat[MPAR*i+2],0]\n if MPAR==4: uncerts[-1] = dat[MPAR*i+3]\n if not any(uncerts):\n omitResonance.extend([True]*MPAR) # resonance is not included in MF=32\n continue\n omitResonance.extend([False]*MPAR)\n matrixDat += endfFormatsModule.endfDataList( params )\n matrixDat += endfFormatsModule.endfDataList( uncerts )\n NRes += 1\n\n endf.append(endfFormatsModule.endfHeadLine(AWT, QX, 0, LRX, 12 * NRes, NRes))\n endf += matrixDat\n\n # correlation matrix:\n if any(omitResonance): # see Pt192\n omitResonance = numpy.array(omitResonance)\n matrix = matrix[~omitResonance][:,~omitResonance]\n NDIGIT = [a for a in conversionFlags.split(',') if a.startswith('NDIGIT')]\n NDIGIT = int( NDIGIT[0][-1] )\n endf += writeLCOMP2( matrix, NDIGIT, NRes*MPAR )\n\n elif LRF==3:\n conversionDetails = targetInfo['LRF3conversion'] # useful info saved when writing MF=2 back to ENDF6\n\n table = conversionDetails['table']\n sortedTable = conversionDetails['sortedTable']\n NRes = len(table['energies'])\n\n SPI = targetInfo['spin']\n AP = conversionDetails['AP']\n\n sortByL = \"sortByL\" in conversionFlags\n Ls = table['Ls']\n NLS = len(set(Ls))\n LAD = 0\n if LCOMP==2 and res.resolved.evaluated.supportsAngularReconstruction: LAD=1\n if LCOMP==2 or not sortByL: NLS = 0\n ISR = any( [isinstance(parameter.link, scatteringRadiusModule.scatteringRadius) for parameter in self.parameters] )\n endf.append( endfFormatsModule.endfHeadLine( SPI,AP,LAD,LCOMP,NLS,ISR ) )\n MLS = 0\n matrix = self.matrix.constructArray()\n if ISR:\n MLS = 1 # currently don't handle energy-dependent DAP\n DAP = PQUModule.PQU( numpy.sqrt(matrix[0,0]), self.parameters[0].link.form.axes[0].unit ).getValueAs('10*fm')\n endf.append( endfFormatsModule.endfHeadLine( 0,0,0,0,MLS,1 ) )\n endf.append( endfFormatsModule.endfDataLine( [DAP] ) )\n\n matrix = matrix[MLS:,MLS:]\n MPAR = len(matrix) // NRes\n\n if not sortByL:\n sortedTable.sort(key=lambda foo: foo[1]) # sort by resonance energy. Otherwise sorted first by L, then E\n\n elist1 = [(lis[1],lis[3],lis[4]) for lis in sortedTable]\n elist2 = list(zip( table['energies'], table['elastic'], table['capture'] ))\n\n for i in range(len(elist1)):\n i2 = elist2.index( elist1[i] )\n if i2!=i:\n swaprows( matrix, MPAR*i, MPAR*elist2.index( elist1[i] ), MPAR )\n val = elist2[i]\n elist2[i] = elist2[i2]; elist2[i2] = val\n\n for i1 in range(len(elist1)): # switch order of elastic and capture widths\n swaprows(matrix, MPAR * i1 + 1, MPAR * i1 + 2, 1)\n\n omitRow = numpy.sum(matrix, axis=1) == 0\n\n # check for empty rows/columns:\n if any(omitRow):\n omitResonance = []\n for idx in range(len(sortedTable)):\n if numpy.all(omitRow[MPAR*idx:MPAR*(idx+1)]):\n omitResonance.append(idx)\n else:\n omitRow[MPAR*idx:MPAR*(idx+1)] = 0 # only omit of all parameters are zero for this resonance\n for idx in omitResonance[::-1]:\n sortedTable.pop(idx)\n NRes = len(sortedTable)\n matrix = matrix[~omitRow][:, ~omitRow]\n\n if LCOMP==0:\n tableIndex = 0\n for L in set( Ls ):\n NRS = Ls.count(L)\n endf.append( endfFormatsModule.endfHeadLine( AWT, 0, L, 0, 18*NRS, NRS ) )\n for i in range(tableIndex, len(sortedTable)):\n if sortedTable[i][0]!=L: break\n endf.append( endfFormatsModule.endfDataLine( sortedTable[i][1:7] ) )\n block = matrix[MPAR*i:MPAR*(i+1), MPAR*i:MPAR*(i+1)]\n lis = [block[0,0], block[1,1], block[2,1], block[2,2]]\n if MPAR==4:\n lis += [block[3,1],block[3,2],block[3,3],0,0,0,0,0]\n else:\n lis += [0,0,0,0,0,0,0,0]\n endf += endfFormatsModule.endfDataList( lis )\n tableIndex += NRS\n\n elif LCOMP==1:\n NSRS, NLRS = 1,0 # short-range correlations only\n endf.append( endfFormatsModule.endfHeadLine( AWT, 0, 0, 0, NSRS, NLRS ) )\n NRB = NRes\n NVS = (NRB*MPAR)*(NRB*MPAR+1)//2 # length of the upper diagonal matrix\n endf.append( endfFormatsModule.endfHeadLine( 0,0, MPAR, 0, NVS+6*NRB, NRB ) )\n\n for res in sortedTable:\n endf.append( endfFormatsModule.endfDataLine( res[1:] ) )\n\n dataList = []\n for i in range(len(matrix)): dataList.extend( list( matrix[i][i:] ) )\n endf += endfFormatsModule.endfDataList( dataList )\n\n elif LCOMP==2:\n QX, LRX = 0, 0 # haven't encountered any competitive widths yet\n dat = numpy.sqrt( matrix.diagonal() )\n NRes = 0\n matrixDat = []\n for i in range(len(sortedTable)):\n params = sortedTable[i][1:]\n uncerts = [dat[MPAR*i],0,dat[MPAR*i+1],dat[MPAR*i+2],0,0]\n if MPAR==5: uncerts[-2:] = [dat[MPAR*i+3], dat[MPAR*i+4]]\n if not any(uncerts): continue # Some resonances may not be included in MF=32\n matrixDat += endfFormatsModule.endfDataList( params )\n matrixDat += endfFormatsModule.endfDataList( uncerts )\n NRes += 1\n\n endf.append(endfFormatsModule.endfHeadLine(AWT, QX, 0, LRX, 12 * NRes, NRes))\n endf += matrixDat\n\n # correlation matrix:\n NDIGIT = [a for a in conversionFlags.split(',') if a.startswith('NDIGIT')]\n NDIGIT = int( NDIGIT[0][-1] )\n endf += writeLCOMP2( matrix, NDIGIT, NRes*MPAR )\n\n else: # LRF = 7\n import numpy\n matrix = self.matrix.constructArray()\n RML = res.resolved.evaluated\n\n IFG = int( RML.reducedWidthAmplitudes )\n NJS = len( self.parameters )\n ISR = 0 # FIXME: scattering radius uncertainty not yet handled\n\n if LCOMP==1:\n AWRI, NSRS, NLRS = 0, 1, 0 # FIXME: hard-coded\n NJSX = len( RML.spinGroups )\n NPARB = 0\n\n endf.append(endfFormatsModule.endfContLine(0, 0, 0, LCOMP, 0, ISR))\n endf.append( endfFormatsModule.endfContLine(AWRI, 0, 0, 0, NSRS, NLRS) )\n endf.append( endfFormatsModule.endfContLine(0,0,NJSX,0,0,0) )\n for spingrp in RML.spinGroups:\n NCH = len( spingrp.channels )\n NRB = len( spingrp.resonanceParameters.table )\n NX = (NCH//6 + 1)*NRB\n endf.append( endfFormatsModule.endfContLine(0,0,NCH,NRB,6*NX,NX) )\n for res in spingrp.resonanceParameters.table:\n for jidx in range(NCH // 6 + 1):\n endfLine = res[jidx * 6:jidx * 6 + 6]\n while len(endfLine) < 6: endfLine.append(0)\n endf.append(endfFormatsModule.endfDataLine(endfLine))\n if NRB == 0:\n endf.append(endfFormatsModule.endfDataLine([0, 0, 0, 0, 0, 0]))\n NPARB += NRB * (NCH+1)\n\n # matrix header\n N = (NPARB * (NPARB+1))//2\n endf.append( endfFormatsModule.endfContLine(0,0,0,0,N,NPARB))\n dataList = []\n for i in range(len(matrix)): dataList.extend( list( matrix[i][i:] ) ) # upper-diagonal\n endf += endfFormatsModule.endfDataList( dataList )\n\n elif LCOMP==2:\n uncertainties = list(numpy.sqrt(matrix.diagonal()))\n endf.append(endfFormatsModule.endfContLine(0, 0, IFG, LCOMP, NJS, ISR))\n endf.extend( resonancesRewriteModule.writeRMatrixParticlePairs(RML, targetInfo) )\n\n uidx = 0\n NNN = 0\n for spingrp in RML.spinGroups:\n NRSA = len(spingrp.resonanceParameters.table)\n if NRSA==0: continue\n NCH, spinGroupHeader = resonancesRewriteModule.writeRMatrixSpinGroupHeader(RML, spingrp, targetInfo)\n NNN += NRSA * (NCH + 1) # +1 for resonance energy\n endf.extend( spinGroupHeader )\n\n # write resonance parameters (redundant with MF=2), followed by uncertainties\n NX = (NCH//6 + 1)*NRSA\n endf.append( endfFormatsModule.endfHeadLine( 0,0,0,NRSA,12*NX,NX ) )\n for res in spingrp.resonanceParameters.table:\n for jidx in range(NCH//6+1):\n endfLine = res[jidx*6:jidx*6+6]\n while len(endfLine)<6: endfLine.append(0)\n endf.append( endfFormatsModule.endfDataLine( endfLine ) )\n\n uncertaintiesThisResonance = uncertainties[uidx:uidx + len(res)]\n for jidx in range(NCH//6+1):\n endfLine = uncertaintiesThisResonance[jidx*6:max(jidx*6+6, NCH)]\n while len(endfLine)<6: endfLine.append(0)\n endf.append( endfFormatsModule.endfDataLine( endfLine ) )\n uidx += len(res)\n\n if NRSA==0:\n endf.append( endfFormatsModule.endfDataLine( [0,0,0,0,0,0] ) )\n\n # correlation matrix:\n NDIGIT = [a for a in conversionFlags.split(',') if a.startswith('NDIGIT')]\n NDIGIT = int(NDIGIT[0][-1])\n endf += writeLCOMP2(matrix, NDIGIT, NNN)\n else:\n raise NotImplementedError(\"MF32 LRF7 with LCOMP=%d\" % LCOMP)\n\n endfMFList[32][151] += endf\n\nmodelParametersModule.parameterCovarianceMatrix.toENDF6 = toENDF6\n\n\ndef averageParametersToENDF6( averageParameterSections, endfMFList, flags, targetInfo, verbosityIndent):\n \"\"\"\n Unresolved resonance parameters need to be converted from multiple sections back into a single\n covariance matrix for ENDF-6\n \"\"\"\n\n endf = []\n res = targetInfo['reactionSuite'].resonances\n URR = res.unresolved\n EL,EH = URR.domainMin, URR.domainMax\n LRU,LRF,NRO,NAPS = 2,1,0,0\n endf.append(endfFormatsModule.endfContLine(EL,EH,LRU,LRF,NRO,NAPS))\n\n SPI = targetInfo['spin']\n if res.unresolved.evaluated.scatteringRadius is not None:\n AP = res.unresolved.evaluated.scatteringRadius.getValueAs('10*fm')\n else:\n AP = res.scatteringRadius.getValueAs('10*fm')\n\n NLS = len(URR.evaluated.Ls)\n AWRI = targetInfo['mass']\n endf.append(endfFormatsModule.endfHeadLine(SPI, AP, 0, 0, NLS, 0))\n\n MPARs, diagonal, diagonalPointers = [], [], []\n for lsection in URR.evaluated.Ls:\n NJS = len( lsection.Js )\n endf.append(endfFormatsModule.endfHeadLine(AWRI,0,lsection.L,0,6*NJS,NJS))\n\n for jsection in lsection.Js:\n\n params = {'D':0, 'AJ':jsection.J, 'GNO':0, 'GG':0, 'GF':0, 'GX':0}\n\n MPAR = 0\n for parameter in [jsection.levelSpacing] + list(jsection.widths):\n if parameter.data.uncertainty is None: break\n covarianceMatrix = parameter.data.uncertainty.data.link\n\n savedParams = targetInfo['ENDFconversionFlags'].get(covarianceMatrix,\"\").split(',')\n for p in savedParams:\n key, value = p.split('=')\n params[key] = float(value)\n\n if covarianceMatrix.matrix.array.shape != (1,1):\n raise NotImplementedError(\"ENDF-6 format does not support energy-dependent unresolved covariances\")\n diagonal.append( covarianceMatrix.matrix.array.values[0] )\n diagonalPointers.append( parameter )\n MPAR += 1\n\n MPARs.append( MPAR )\n\n endf.append(endfFormatsModule.endfDataLine([params[key] for key in ('D','AJ','GNO','GG','GF','GX')]))\n\n assert len(set(MPARs)) == 1 # same number of widths for each L/J section\n MPAR = MPARs[0]\n NPAR = len(diagonal)\n\n endf.append(endfFormatsModule.endfContLine(0,0,MPAR,0,(NPAR*(NPAR+1))//2,NPAR))\n matrix = numpy.identity(NPAR) * diagonal\n\n crossTerms = [section for section in averageParameterSections if section.crossTerm]\n for crossTerm in crossTerms:\n ridx = diagonalPointers.index( crossTerm.rowData.link )\n cidx = diagonalPointers.index( crossTerm.columnData.link )\n matrix[ridx,cidx] = matrix[cidx,ridx] = gndsToENDF6Module.getForm( targetInfo['style'], crossTerm ).matrix.array.values[0]\n\n datalist = []\n for idx,row in enumerate(matrix):\n datalist += row[idx:].tolist()\n endf += endfFormatsModule.endfDataList(datalist)\n\n endfMFList[32][151] += endf\n",
"#! /usr/bin/env python\n# <<BEGIN-copyright>>\n# Copyright 2021, Lawrence Livermore National Security, LLC.\n# See the top-level COPYRIGHT file for details.\n# \n# SPDX-License-Identifier: BSD-3-Clause\n# <<END-copyright>>\n\n\"\"\"\ncompareCrossSections.py: compare the cross section for given MT number from two different evaluated files.\n\"\"\"\nimport sys, traceback\n\ndef compare_plot( xsc1, xsc2, title=\"comparison plot\", legend1=\"first file\", legend2=\"second file\",\n saveFile=None, legendXY = (0.05, 0.95) ):\n \"\"\" starting with XYs data for xsc1 and xsc2, draw a comparison plot \"\"\"\n from fudge.vis.matplotlib import plot2d\n import matplotlib.pyplot as plt\n\n if xsc1.domain() != xsc2.domain():\n xsc1, xsc2 = xsc1.mutualify( 1e-8, 1e-8, 0, xsc2, 1e-8, 1e-8, 0 )\n diff = xsc1 - xsc2\n mean = (xsc1 + xsc2) / 2.0\n\n import numpy\n x1,y1 = list( map( numpy.array, diff.copyDataToXsAndYs() ) )\n x2,y2 = list( map( numpy.array, mean.copyDataToXsAndYs() ) )\n y2[ (y2==0)*(y1==0) ] = 1.0 # silence zero/zero division warnings\n relative_diff = list( zip( x1, y1 / y2 * 100 ) )\n\n \"\"\" # XYs division can take a long time, unnecessary in this case\n mean.nf_pointwiseXY.setSafeDivide( True ) # control divide/0 errors\n relative_diff = (xsc1 - xsc2) / mean * 100\n \"\"\"\n\n plot1 = plot2d.DataSet2d( xsc1, legend=legend1, symbol=\"+\" )\n plot2 = plot2d.DataSet2d( xsc2, legend=legend2, lineStyle=\"--\", symbol=\"+\", color=\"red\" )\n reldiff_plot = plot2d.DataSet2d( relative_diff, legend=\"percent difference\" )\n\n xAxisSettings = plot2d.AxisSettings( label=\"\", isLog=True )\n yUnit = args.yUnit or 'barn'\n yAxisSettings = plot2d.AxisSettings( label=\"Cross Section (%s)\" % yUnit, isLog=True )\n\n fig = plt.figure( figsize=(10,8) )\n fig.subplots_adjust( top=0.88, bottom=0.12, wspace=0.4 )\n\n ax1 = plt.subplot2grid((4,1), (0,0), rowspan=3)\n mplot = plot2d.__makePlot2d( [plot1, plot2], xAxisSettings, yAxisSettings,\n legendOn=True, legendXY=legendXY, thePlot = ax1, minY=0 )\n plt.setp( ax1.get_xticklabels(), visible=False )\n plt.setp( ax1.get_label(), visible=False )\n\n # also plot the relative difference (needs different y-axis):\n xUnit = args.xUnit or 'eV'\n xAxisSettings = plot2d.AxisSettings( label=\"$E_n$ (%s)\" % xUnit, isLog=True )\n yAxisSettings = plot2d.AxisSettings( label=\"% diff\" )\n\n ax2 = plt.subplot2grid((4,1), (3,0), sharex=ax1)\n plot2d.__makePlot2d( [reldiff_plot], xAxisSettings, yAxisSettings,\n legendOn=False, thePlot = ax2, minY=0)\n # tick marks may be too dense on this y-axis:\n #ax2.get_yaxis().set_ticks( [-0.2,0,0.2] )\n\n plt.suptitle( title, fontsize=24, fontweight='bold' )\n if saveFile: plt.savefig( saveFile )\n else: plt.show()\n\ndef process_args():\n from argparse import ArgumentParser\n parser = ArgumentParser(\n description = \"\"\"Compare the same cross section in two different evaluations,\n or compare a summed cross section from one evaluation with the sum of its parts\n (using option --summed).\"\"\",\n epilog = \"\"\"Input files can be in GNDS or ENDF format.\n Resonances will be reconstructed ONLY for input files in GNDS format.\n For ENDF-6 files, reconstruction should be done before-hand using RECENT or another tool.\"\"\",\n )\n parser.add_argument( \"mt\", type=int, help=\"ENDF MT of reaction to compare\" )\n parser.add_argument( \"file1\", type=str, help=\"First file\" )\n parser.add_argument( \"file2\", type=str, nargs=\"?\",\n help=\"Second file (required unless option --summed supplied)\" )\n parser.add_argument( \"-t\", \"--tolerance\", type=float, default=0.001,\n help=\"specify tolerance for reconstruction. 0.001 => 0.1%%\" )\n parser.add_argument( \"-l\", \"--legend\", default=None, nargs=2,\n help=\"legend for each evaluation. Requires two legends\" )\n parser.add_argument( \"-L\", \"--legendLocation\", default='ul',\n help=\"legend location: 'ul', 'ur', 'll' or 'lr'\" )\n parser.add_argument( \"-T\", \"--title\", default=None,\n help=\"specify plot title\" )\n parser.add_argument( \"-o\", \"--outfile\", default=None, help=\"Output file name\")\n parser.add_argument( \"--xUnit\", type=str, help=\"Convert x-axes to this unit (e.g. MeV)\" )\n parser.add_argument( \"--yUnit\", type=str, help=\"Convert y-axes to this unit (e.g. mb)\" )\n parser.add_argument( \"-S\", \"--summed\", action='store_true', default=False,\n help=\"For a single evaluation, compare a summed cross section (e.g. total, inelastic) with the sum of its parts. Only one input file needed\" )\n return parser.parse_args()\n\nif __name__ == '__main__':\n from fudge import reactionSuite as reactionSuiteModule, styles as stylesModule\n from fudge.reactionData import crossSection\n from brownies.legacy.converting import endfFileToGNDS\n\n args = process_args()\n\n reconstructedStyleName = 'tmp_reconstructed'\n\n def getReactionSuite( filename, singleMTOnly = None ):\n try:\n RS = reactionSuiteModule.readXML( filename )\n except:\n try:\n rce = endfFileToGNDS.endfFileToGNDS( filename, singleMTOnly = singleMTOnly,\n skipBadData = True, continuumSpectraFix = True,\n parseCrossSectionOnly = True)\n RS, c = rce['reactionSuite'], rce['covarianceSuite']\n except Exception as excep:\n print(\"Exception raised:\", excep)\n print(\"File %s doesn't seem to be a legal ENDF or GNDS file!\" % filename)\n traceback.print_exc(file=sys.stdout)\n sys.exit()\n RS.originalFile = filename\n return RS\n\n def getXS( reactionSuite, MT, sumsOnly = False ):\n allReacs = list( reactionSuite.sums.crossSectionSums )\n if not sumsOnly:\n allReacs += list(reactionSuite.reactions)\n reac = [r for r in allReacs if r.ENDF_MT == MT]\n if len(reac) != 1:\n print(\"Couldn't find unique reaction for MT%d in %s\" % (MT, reactionSuite.originalFile))\n xsc = reac[0].crossSection\n if isinstance( xsc.evaluated, crossSection.resonancesWithBackground ):\n evalStyle = reactionSuite.styles.getEvaluatedStyle()\n reconstructedStyle = stylesModule.crossSectionReconstructed( reconstructedStyleName, derivedFrom=evalStyle.label )\n reactionSuite.reconstructResonances( reconstructedStyle, accuracy=args.tolerance )\n pwxs = xsc[ reconstructedStyleName ]\n else:\n pwxs = xsc.toPointwise_withLinearXYs( accuracy = 1e-3, lowerEps = 1e-8 )\n return pwxs.convertAxisToUnit(1,'eV').convertAxisToUnit(0,'b')\n\n if args.summed:\n RS = getReactionSuite( args.file1 )\n xs1 = getXS(RS, args.mt, sumsOnly = True)\n summedReac = [r for r in (RS.sums.crossSectionSums) if int( r.ENDF_MT ) == args.mt]\n if len(summedReac) != 1:\n print(\"Couldn't find unique summed reaction for MT%d in %s\" % (args.mt, RS.originalFile))\n sys.exit(1)\n summedReac = summedReac[0]\n if reconstructedStyleName in summedReac.summands[0].link:\n summedXsc = summedReac.summands[0].link[ reconstructedStyleName ]\n else:\n summedXsc = summedReac.summands[0].link.toPointwise_withLinearXYs( accuracy = 1e-3, lowerEps = 1e-8 )\n for summand in summedReac.summands[1:]:\n if reconstructedStyleName in summand.link:\n newXsc = summand.link[ reconstructedStyleName ]\n else:\n newXsc = summand.link.toPointwise_withLinearXYs( accuracy = 1e-3, lowerEps = 1e-8 )\n summedXsc, newXsc = summedXsc.mutualify( 1e-8,1e-8,0, newXsc, 1e-8,1e-8,0 )\n summedXsc += newXsc\n xs2 = summedXsc.convertAxisToUnit(1,'eV').convertAxisToUnit(0,'b')\n l1,l2 = ('tabulated sum','calculated sum')\n else:\n rs1 = getReactionSuite(args.file1, singleMTOnly=args.mt)\n xs1 = getXS( rs1, args.mt )\n rs2 = getReactionSuite(args.file2, singleMTOnly=args.mt)\n xs2 = getXS( rs2, args.mt )\n l1,l2 = args.file1, args.file2\n\n if args.xUnit:\n for xs in (xs1,xs2):\n xs.convertUnits( {xs.axes[1].unit: args.xUnit } )\n if args.yUnit:\n for xs in (xs1,xs2):\n xs.convertUnits( {xs.axes[0].unit: args.yUnit } )\n\n if args.legend: l1,l2 = args.legend\n if args.title: title = args.title\n else: title=\"MT%i xsc comparison\" % args.mt\n\n legendXY = {'ul': (0.05, 0.95), 'ur': (0.75, 0.95),\n 'll': (0.05, 0.2), 'lr': (0.75, 0.2)}.get( args.legendLocation )\n\n compare_plot( xs1, xs2, title=title, legend1=l1, legend2=l2, legendXY=legendXY, saveFile=args.outfile )\n"
] |
[
[
"numpy.sqrt",
"numpy.rint",
"numpy.all",
"numpy.identity",
"numpy.outer",
"numpy.array",
"numpy.sum"
],
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplot2grid",
"matplotlib.pyplot.figure"
]
] |
danieldiezmallo/euBERTus
|
[
"056c36166362074cb08734ca9fb4c6e6b42f3aaf"
] |
[
"01 Crawling/crawler/crawler/spiders/eu_data_spider.py"
] |
[
"import scrapy\nfrom datetime import datetime\nimport pandas as pd\nimport os\nfrom shutil import rmtree\n\nclass EuDataSpiderSpider(scrapy.Spider):\n name = \"eu_data_spider\" \n def start_requests(self):\n '''\n Select the URLs to extract from\n ''' \n # Get the output dir for the data from the args\n self.output_directory = getattr(self, 'output_directory', 'output/') \n # Delete the output dir if exists and create anew\n try:\n if os.path.isdir(self.output_directory):\n rmtree(self.output_directory)\n os.makedirs(self.output_directory) \n except Exception as e:\n print(e)\n # Process the urls and other data from the file\n path_to_file = getattr(self, 'path_to_file', 'data.csv')\n\n for index, row in pd.read_csv(path_to_file).iterrows():\n if isinstance(row.html_to_download, str):\n yield scrapy.Request(\n url = row.html_to_download, \n callback = self.parse, \n meta = {'id':row.id, 'title':row.title_, 'authors':row.authors, 'date':row.date_document, 'celex':row.celex,'Full_OJ':row. Full_OJ}\n )\n\n def parse(self, response):\n print(f\"Processing.. {response.url}\")\n \n # Find the relevant paragraphs in the html documents\n paragraphs = response.xpath('//p/text()|//span/text()|//h1/text()|//h2/text()|//h3/text()').extract()\n with open(f\"{self.output_directory}{response.meta['id']}\", 'wb') as f:\n for paragraph in paragraphs:\n try:\n f.write(f'{paragraph}\\n'.encode(\"UTF-8\"))\n except Exception as e:\n # If a paragraph encounters an error, ignore it\n pass\n \n yield dict(\n id = response.meta['id'],\n title = response.meta['title'],\n url = response.url,\n authors = response.meta['authors'], \n date = response.meta['date'], \n celex = response.meta['celex'], \n Full_OJ = response.meta['Full_OJ'],\n timeStamp = datetime.now().strftime(\"%m/%d/%Y %H:%M:%S\"),\n )\n"
] |
[
[
"pandas.read_csv"
]
] |
hirnimeshrampuresoftware/quaternion
|
[
"dac0c5cd5ea286e3583b71e1a1ee1620e7e7a03d"
] |
[
"src/quaternion/calculus.py"
] |
[
"# Copyright (c) 2019, Michael Boyle\n# See LICENSE file for details: <https://github.com/moble/quaternion/blob/main/LICENSE>\n\nfrom __future__ import division, print_function, absolute_import\nimport numpy as np\nfrom quaternion.numba_wrapper import njit, jit, xrange\n\n\ndef fd_derivative(f, t):\n \"\"\"Fourth-order finite-differencing with non-uniform time steps\n\n The formula for this finite difference comes from Eq. (A 5b) of \"Derivative formulas and errors for non-uniformly\n spaced points\" by M. K. Bowen and Ronald Smith. As explained in their Eqs. (B 9b) and (B 10b), this is a\n fourth-order formula -- though that's a squishy concept with non-uniform time steps.\n\n TODO: If there are fewer than five points, the function should revert to simpler (lower-order) formulas.\n\n \"\"\"\n dfdt = np.empty_like(f)\n if (f.ndim == 1):\n _derivative(f, t, dfdt)\n elif (f.ndim == 2):\n _derivative_2d(f, t, dfdt)\n elif (f.ndim == 3):\n _derivative_3d(f, t, dfdt)\n else:\n raise NotImplementedError(\"Taking derivatives of {0}-dimensional arrays is not yet implemented\".format(f.ndim))\n return dfdt\n\n\n@njit\ndef _derivative(f, t, dfdt):\n for i in xrange(2):\n t_i = t[i]\n t1 = t[0]\n t2 = t[1]\n t3 = t[2]\n t4 = t[3]\n t5 = t[4]\n h1 = t1 - t_i\n h2 = t2 - t_i\n h3 = t3 - t_i\n h4 = t4 - t_i\n h5 = t5 - t_i\n h12 = t1 - t2\n h13 = t1 - t3\n h14 = t1 - t4\n h15 = t1 - t5\n h23 = t2 - t3\n h24 = t2 - t4\n h25 = t2 - t5\n h34 = t3 - t4\n h35 = t3 - t5\n h45 = t4 - t5\n dfdt[i] = (-((h2 * h3 * h4 + h2 * h3 * h5 + h2 * h4 * h5 + h3 * h4 * h5) / (h12 * h13 * h14 * h15)) * f[0]\n + ((h1 * h3 * h4 + h1 * h3 * h5 + h1 * h4 * h5 + h3 * h4 * h5) / (h12 * h23 * h24 * h25)) * f[1]\n - ((h1 * h2 * h4 + h1 * h2 * h5 + h1 * h4 * h5 + h2 * h4 * h5) / (h13 * h23 * h34 * h35)) * f[2]\n + ((h1 * h2 * h3 + h1 * h2 * h5 + h1 * h3 * h5 + h2 * h3 * h5) / (h14 * h24 * h34 * h45)) * f[3]\n - ((h1 * h2 * h3 + h1 * h2 * h4 + h1 * h3 * h4 + h2 * h3 * h4) / (h15 * h25 * h35 * h45)) * f[4])\n\n for i in xrange(2, len(t) - 2):\n t1 = t[i - 2]\n t2 = t[i - 1]\n t3 = t[i]\n t4 = t[i + 1]\n t5 = t[i + 2]\n h1 = t1 - t3\n h2 = t2 - t3\n h4 = t4 - t3\n h5 = t5 - t3\n h12 = t1 - t2\n h13 = t1 - t3\n h14 = t1 - t4\n h15 = t1 - t5\n h23 = t2 - t3\n h24 = t2 - t4\n h25 = t2 - t5\n h34 = t3 - t4\n h35 = t3 - t5\n h45 = t4 - t5\n dfdt[i] = (-((h2 * h4 * h5) / (h12 * h13 * h14 * h15)) * f[i - 2]\n + ((h1 * h4 * h5) / (h12 * h23 * h24 * h25)) * f[i - 1]\n - ((h1 * h2 * h4 + h1 * h2 * h5 + h1 * h4 * h5 + h2 * h4 * h5) / (h13 * h23 * h34 * h35)) * f[i]\n + ((h1 * h2 * h5) / (h14 * h24 * h34 * h45)) * f[i + 1]\n - ((h1 * h2 * h4) / (h15 * h25 * h35 * h45)) * f[i + 2])\n\n for i in xrange(len(t) - 2, len(t)):\n t_i = t[i]\n t1 = t[-5]\n t2 = t[-4]\n t3 = t[-3]\n t4 = t[-2]\n t5 = t[-1]\n h1 = t1 - t_i\n h2 = t2 - t_i\n h3 = t3 - t_i\n h4 = t4 - t_i\n h5 = t5 - t_i\n h12 = t1 - t2\n h13 = t1 - t3\n h14 = t1 - t4\n h15 = t1 - t5\n h23 = t2 - t3\n h24 = t2 - t4\n h25 = t2 - t5\n h34 = t3 - t4\n h35 = t3 - t5\n h45 = t4 - t5\n dfdt[i] = (-((h2 * h3 * h4 + h2 * h3 * h5 + h2 * h4 * h5 + h3 * h4 * h5) / (h12 * h13 * h14 * h15)) * f[-5]\n + ((h1 * h3 * h4 + h1 * h3 * h5 + h1 * h4 * h5 + h3 * h4 * h5) / (h12 * h23 * h24 * h25)) * f[-4]\n - ((h1 * h2 * h4 + h1 * h2 * h5 + h1 * h4 * h5 + h2 * h4 * h5) / (h13 * h23 * h34 * h35)) * f[-3]\n + ((h1 * h2 * h3 + h1 * h2 * h5 + h1 * h3 * h5 + h2 * h3 * h5) / (h14 * h24 * h34 * h45)) * f[-2]\n - ((h1 * h2 * h3 + h1 * h2 * h4 + h1 * h3 * h4 + h2 * h3 * h4) / (h15 * h25 * h35 * h45)) * f[-1])\n\n return\n\n\n@njit\ndef _derivative_2d(f, t, dfdt):\n for i in xrange(2):\n t_i = t[i]\n t1 = t[0]\n t2 = t[1]\n t3 = t[2]\n t4 = t[3]\n t5 = t[4]\n h1 = t1 - t_i\n h2 = t2 - t_i\n h3 = t3 - t_i\n h4 = t4 - t_i\n h5 = t5 - t_i\n h12 = t1 - t2\n h13 = t1 - t3\n h14 = t1 - t4\n h15 = t1 - t5\n h23 = t2 - t3\n h24 = t2 - t4\n h25 = t2 - t5\n h34 = t3 - t4\n h35 = t3 - t5\n h45 = t4 - t5\n for k in xrange(f.shape[1]):\n dfdt[i, k] = (\n -((h2 * h3 * h4 + h2 * h3 * h5 + h2 * h4 * h5 + h3 * h4 * h5) / (h12 * h13 * h14 * h15)) * f[0, k]\n + ((h1 * h3 * h4 + h1 * h3 * h5 + h1 * h4 * h5 + h3 * h4 * h5) / (h12 * h23 * h24 * h25)) * f[1, k]\n - ((h1 * h2 * h4 + h1 * h2 * h5 + h1 * h4 * h5 + h2 * h4 * h5) / (h13 * h23 * h34 * h35)) * f[2, k]\n + ((h1 * h2 * h3 + h1 * h2 * h5 + h1 * h3 * h5 + h2 * h3 * h5) / (h14 * h24 * h34 * h45)) * f[3, k]\n - ((h1 * h2 * h3 + h1 * h2 * h4 + h1 * h3 * h4 + h2 * h3 * h4) / (h15 * h25 * h35 * h45)) * f[4, k])\n\n for i in xrange(2, len(t) - 2):\n t1 = t[i - 2]\n t2 = t[i - 1]\n t3 = t[i]\n t4 = t[i + 1]\n t5 = t[i + 2]\n h1 = t1 - t3\n h2 = t2 - t3\n h4 = t4 - t3\n h5 = t5 - t3\n h12 = t1 - t2\n h13 = t1 - t3\n h14 = t1 - t4\n h15 = t1 - t5\n h23 = t2 - t3\n h24 = t2 - t4\n h25 = t2 - t5\n h34 = t3 - t4\n h35 = t3 - t5\n h45 = t4 - t5\n for k in xrange(f.shape[1]):\n dfdt[i, k] = (-((h2 * h4 * h5) / (h12 * h13 * h14 * h15)) * f[i - 2, k]\n + ((h1 * h4 * h5) / (h12 * h23 * h24 * h25)) * f[i - 1, k]\n - ((h1 * h2 * h4 + h1 * h2 * h5 + h1 * h4 * h5 + h2 * h4 * h5) / (h13 * h23 * h34 * h35))\n * f[i, k]\n + ((h1 * h2 * h5) / (h14 * h24 * h34 * h45)) * f[i + 1, k]\n - ((h1 * h2 * h4) / (h15 * h25 * h35 * h45)) * f[i + 2, k])\n\n for i in xrange(len(t) - 2, len(t)):\n t_i = t[i]\n t1 = t[-5]\n t2 = t[-4]\n t3 = t[-3]\n t4 = t[-2]\n t5 = t[-1]\n h1 = t1 - t_i\n h2 = t2 - t_i\n h3 = t3 - t_i\n h4 = t4 - t_i\n h5 = t5 - t_i\n h12 = t1 - t2\n h13 = t1 - t3\n h14 = t1 - t4\n h15 = t1 - t5\n h23 = t2 - t3\n h24 = t2 - t4\n h25 = t2 - t5\n h34 = t3 - t4\n h35 = t3 - t5\n h45 = t4 - t5\n for k in xrange(f.shape[1]):\n dfdt[i, k] = (\n -((h2 * h3 * h4 + h2 * h3 * h5 + h2 * h4 * h5 + h3 * h4 * h5) / (h12 * h13 * h14 * h15)) * f[-5, k]\n + ((h1 * h3 * h4 + h1 * h3 * h5 + h1 * h4 * h5 + h3 * h4 * h5) / (h12 * h23 * h24 * h25)) * f[-4, k]\n - ((h1 * h2 * h4 + h1 * h2 * h5 + h1 * h4 * h5 + h2 * h4 * h5) / (h13 * h23 * h34 * h35)) * f[-3, k]\n + ((h1 * h2 * h3 + h1 * h2 * h5 + h1 * h3 * h5 + h2 * h3 * h5) / (h14 * h24 * h34 * h45)) * f[-2, k]\n - ((h1 * h2 * h3 + h1 * h2 * h4 + h1 * h3 * h4 + h2 * h3 * h4) / (h15 * h25 * h35 * h45)) * f[-1, k])\n\n return\n\n\n@njit\ndef _derivative_3d(f, t, dfdt):\n for i in xrange(2):\n t_i = t[i]\n t1 = t[0]\n t2 = t[1]\n t3 = t[2]\n t4 = t[3]\n t5 = t[4]\n h1 = t1 - t_i\n h2 = t2 - t_i\n h3 = t3 - t_i\n h4 = t4 - t_i\n h5 = t5 - t_i\n h12 = t1 - t2\n h13 = t1 - t3\n h14 = t1 - t4\n h15 = t1 - t5\n h23 = t2 - t3\n h24 = t2 - t4\n h25 = t2 - t5\n h34 = t3 - t4\n h35 = t3 - t5\n h45 = t4 - t5\n for k in xrange(f.shape[1]):\n for m in xrange(f.shape[1]):\n dfdt[i, k, m] = (\n -((h2 * h3 * h4 + h2 * h3 * h5 + h2 * h4 * h5 + h3 * h4 * h5) / (h12 * h13 * h14 * h15)) * f[0, k, m]\n + ((h1 * h3 * h4 + h1 * h3 * h5 + h1 * h4 * h5 + h3 * h4 * h5) / (h12 * h23 * h24 * h25)) * f[1, k, m]\n - ((h1 * h2 * h4 + h1 * h2 * h5 + h1 * h4 * h5 + h2 * h4 * h5) / (h13 * h23 * h34 * h35)) * f[2, k, m]\n + ((h1 * h2 * h3 + h1 * h2 * h5 + h1 * h3 * h5 + h2 * h3 * h5) / (h14 * h24 * h34 * h45)) * f[3, k, m]\n - ((h1 * h2 * h3 + h1 * h2 * h4 + h1 * h3 * h4 + h2 * h3 * h4) / (h15 * h25 * h35 * h45)) * f[4, k, m])\n\n for i in xrange(2, len(t) - 2):\n t1 = t[i - 2]\n t2 = t[i - 1]\n t3 = t[i]\n t4 = t[i + 1]\n t5 = t[i + 2]\n h1 = t1 - t3\n h2 = t2 - t3\n h4 = t4 - t3\n h5 = t5 - t3\n h12 = t1 - t2\n h13 = t1 - t3\n h14 = t1 - t4\n h15 = t1 - t5\n h23 = t2 - t3\n h24 = t2 - t4\n h25 = t2 - t5\n h34 = t3 - t4\n h35 = t3 - t5\n h45 = t4 - t5\n for k in xrange(f.shape[1]):\n for m in xrange(f.shape[1]):\n dfdt[i, k, m] = (-((h2 * h4 * h5) / (h12 * h13 * h14 * h15)) * f[i - 2, k, m]\n + ((h1 * h4 * h5) / (h12 * h23 * h24 * h25)) * f[i - 1, k, m]\n - ((h1 * h2 * h4 + h1 * h2 * h5 + h1 * h4 * h5 + h2 * h4 * h5) / (h13 * h23 * h34 * h35))\n * f[i, k, m]\n + ((h1 * h2 * h5) / (h14 * h24 * h34 * h45)) * f[i + 1, k, m]\n - ((h1 * h2 * h4) / (h15 * h25 * h35 * h45)) * f[i + 2, k, m])\n\n for i in xrange(len(t) - 2, len(t)):\n t_i = t[i]\n t1 = t[-5]\n t2 = t[-4]\n t3 = t[-3]\n t4 = t[-2]\n t5 = t[-1]\n h1 = t1 - t_i\n h2 = t2 - t_i\n h3 = t3 - t_i\n h4 = t4 - t_i\n h5 = t5 - t_i\n h12 = t1 - t2\n h13 = t1 - t3\n h14 = t1 - t4\n h15 = t1 - t5\n h23 = t2 - t3\n h24 = t2 - t4\n h25 = t2 - t5\n h34 = t3 - t4\n h35 = t3 - t5\n h45 = t4 - t5\n for k in xrange(f.shape[1]):\n for m in xrange(f.shape[1]):\n dfdt[i, k, m] = (\n -((h2 * h3 * h4 + h2 * h3 * h5 + h2 * h4 * h5 + h3 * h4 * h5) / (h12 * h13 * h14 * h15)) * f[-5, k, m]\n + ((h1 * h3 * h4 + h1 * h3 * h5 + h1 * h4 * h5 + h3 * h4 * h5) / (h12 * h23 * h24 * h25)) * f[-4, k, m]\n - ((h1 * h2 * h4 + h1 * h2 * h5 + h1 * h4 * h5 + h2 * h4 * h5) / (h13 * h23 * h34 * h35)) * f[-3, k, m]\n + ((h1 * h2 * h3 + h1 * h2 * h5 + h1 * h3 * h5 + h2 * h3 * h5) / (h14 * h24 * h34 * h45)) * f[-2, k, m]\n - ((h1 * h2 * h3 + h1 * h2 * h4 + h1 * h3 * h4 + h2 * h3 * h4) / (h15 * h25 * h35 * h45)) * f[-1, k, m])\n\n return\n\n\n@jit\ndef fd_indefinite_integral(f, t):\n Sfdt = np.empty_like(f)\n Sfdt[0] = 0.0\n for i in xrange(1, len(t)):\n for j in xrange(f.shape[1]):\n Sfdt[i, j] = Sfdt[i - 1, j] + (f[i, j] + f[i - 1, j]) * ((t[i] - t[i - 1]) / 2.0)\n return Sfdt\n\n\ndef fd_definite_integral(f, t):\n Sfdt = np.zeros_like(f)\n Sfdt[1:, ...] = (f[1:, ...] + f[:-1, ...]) * ((t[1:] - t[:-1]) / 2.0).reshape((-1,) + (1,)*(f.ndim-1))\n return np.sum(Sfdt, axis=0)\n\n\ndef spline_evaluation(f, t, t_out=None, axis=None, spline_degree=3,\n derivative_order=0, definite_integral_bounds=None):\n \"\"\"Approximate input data using a spline and evaluate\n\n Note that this function is somewhat more general than it needs to be, so that it can be reused\n for closely related functions involving derivatives, antiderivatives, and integrals.\n\n Parameters\n ==========\n f : (..., N, ...) array_like\n Real or complex function values to be interpolated.\n\n t : (N,) array_like\n An N-D array of increasing real values. The length of f along the interpolation axis must be\n equal to the length of t. The number of data points must be larger than the spline degree.\n\n t_out : None or (M,) array_like [defaults to None]\n The new values of `t` on which to evaluate the result. If None, it is assumed that some\n other feature of the data is needed, like a derivative or antiderivative, which are then\n output using the same `t` values as the input.\n\n axis : None or int [defaults to None]\n The axis of `f` with length equal to the length of `t`. If None, this function searches for\n an axis of equal length in reverse order -- that is, starting from the last axis of `f`.\n Note that this feature is helpful when `f` is one-dimensional or will always satisfy that\n criterion, but is dangerous otherwise. Caveat emptor.\n\n spline_degree : int [defaults to 3]\n Degree of the interpolating spline. Must be 1 <= spline_degree <= 5.\n\n derivative_order : int [defaults to 0]\n The order of the derivative to apply to the data. Note that this may be negative, in which\n case the corresponding antiderivative is returned.\n\n definite_integral_bounds : None or (2,) array_like [defaults to None]\n If this is not None, the `t_out` and `derivative_order` parameters are ignored, and the\n returned values are just the (first) definite integrals of the splines between these limits,\n along each remaining axis.\n\n \"\"\"\n import numpy as np\n from scipy.interpolate import InterpolatedUnivariateSpline\n\n # Process input arguments and get data into correct shape\n if not 1 <= spline_degree <= 5:\n raise ValueError('The spline degree must be between 1 and 5 (inclusive); it is {0}.'.format(spline_degree))\n t = np.asarray(t, dtype=float, order='C')\n if t.ndim != 1:\n raise ValueError('Input t values must be a one-dimensional array; this input has {0}.'.format(t.ndim))\n n = t.size\n if spline_degree >= n:\n raise ValueError('The spline degree ({0}) must be less than the number of data points ({1}).'.format(spline_degree, n))\n f = np.asanyarray(f)\n if axis is None:\n try:\n axis = f.ndim - 1 - list(reversed(f.shape)).index(n)\n except ValueError:\n axis = None\n if axis is None or f.shape[axis] != n:\n raise ValueError((\n \"Input function values `f` [shape {0}] should have at least one \"\n \"axis with the same length as input `t` [{1}], or bad axis input.\"\n ).format(f.shape, n))\n shape = list(f.shape)\n if definite_integral_bounds is not None:\n shape[axis] = 1 # We'll keep this axis for now (set to length 1) for uniform treatment, and remove it before returning\n definite_integral_bounds = np.array(definite_integral_bounds, dtype=float)\n if definite_integral_bounds.shape != (2,):\n raise ValueError(\"Expected exactly two bounds for the definite integral; got {0}.\".format(definite_integral_bounds.shape))\n f_out = np.empty(shape, dtype=f.dtype)\n t_a, t_b = definite_integral_bounds\n def evaluator(s):\n return s.integral(t_a, t_b)\n axis_slice = slice(max(0, np.argmin(np.abs(t-t_a))-10), min(n, np.argmin(np.abs(t-t_b))+11))\n else:\n if t_out is None:\n t_out = t\n axis_slice = slice(None)\n else:\n axis_slice = slice(max(0, np.argmin(np.abs(t-t_out[0]))-10), min(n, np.argmin(np.abs(t-t_out[-1]))+11))\n shape[axis] = t_out.size\n if derivative_order != 0 and derivative_order > spline_degree:\n raise ValueError(\"Order of derivative ({0}) must not be greater than degree of spline ({1})\".format(derivative_order, spline_degree))\n f_out = np.empty(shape, dtype=f.dtype)\n if derivative_order < 0:\n def evaluator(s):\n return s.antiderivative(n=-derivative_order)(t_out)\n elif derivative_order > 0:\n def evaluator(s):\n return s.derivative(n=derivative_order)(t_out)\n else:\n def evaluator(s):\n return s(t_out)\n def spline(f, t):\n return InterpolatedUnivariateSpline(t[axis_slice], f[axis_slice], k=spline_degree)\n\n # Move the axis to the end so that we can just iterate over all but the last index\n if axis != -1 and axis != n-1:\n f = np.moveaxis(f, axis, -1)\n f_out = np.moveaxis(f_out, axis, -1)\n\n # Iterate over all extra axes and evaluate\n complex_valued = np.iscomplexobj(f)\n for index in np.ndindex(f.shape[:-1]):\n if complex_valued:\n f_out[index] = evaluator(spline(f[index].real, t)) + 1j * evaluator(spline(f[index].imag, t))\n else:\n f_out[index] = evaluator(spline(f[index], t))\n\n # Undo the axis move we did previously to the output (input doesn't matter any more)\n if axis != -1 and axis != n-1:\n f_out = np.moveaxis(f_out, -1, axis)\n\n # If this is a definite integral, remove that extraneous axis\n if definite_integral_bounds is not None:\n f_out = np.squeeze(f_out, axis=axis)\n\n return f_out\n\n\ndef spline_derivative(f, t, derivative_order=1, axis=0):\n return spline_evaluation(f, t, axis=axis, derivative_order=derivative_order)\n\n\ndef spline_indefinite_integral(f, t, integral_order=1, axis=0):\n return spline_evaluation(f, t, axis=axis, derivative_order=-integral_order)\n\n\ndef spline_definite_integral(f, t, t1=None, t2=None, axis=0):\n if t1 is None:\n t1 = t[0]\n if t2 is None:\n t2 = t[-1]\n return spline_evaluation(f, t, axis=axis, definite_integral_bounds=(t1, t2))\n\n\ntry:\n from scipy.interpolate import InterpolatedUnivariateSpline\n spline = spline_evaluation\n derivative = spline_derivative\n antiderivative = spline_indefinite_integral\n indefinite_integral = spline_indefinite_integral\n definite_integral = spline_definite_integral\nexcept ImportError:\n import warnings\n warning_text = (\n \"\\n\\n\" + \"!\" * 57 + \"\\n\" +\n \"Could not import from scipy, which means that derivatives\\n\" +\n \"and integrals will use less accurate finite-differencing\\n\" +\n \"techniques. You may want to install scipy.\" +\n \"\\n\" + \"!\" * 57 + \"\\n\"\n )\n warnings.warn(warning_text)\n derivative = fd_derivative\n antiderivative = fd_indefinite_integral\n indefinite_integral = fd_indefinite_integral\n definite_integral = fd_definite_integral\n"
] |
[
[
"scipy.interpolate.InterpolatedUnivariateSpline",
"numpy.abs",
"numpy.asarray",
"numpy.empty_like",
"numpy.squeeze",
"numpy.asanyarray",
"numpy.zeros_like",
"numpy.iscomplexobj",
"numpy.ndindex",
"numpy.array",
"numpy.moveaxis",
"numpy.sum",
"numpy.empty"
]
] |
TheEagerLearner/Hacktoberfest2020
|
[
"a244d49c0f7959e05bbcdad39ee49b559b32e4a2"
] |
[
"Machine_Learning/Regression/Data_Preprocessing/dataPreprocessing.py"
] |
[
"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\n#reading data from the csv file\r\ndataset=pd.read_csv(\"Data.csv\")\r\n\r\n#Seperating dataset into dependent and independent variables\r\nX=dataset.iloc[:,:-1].values\r\nY=dataset.iloc[:,-1].values\r\n\r\n#removing NULL values from the dataset\r\n\r\nfrom sklearn.impute import SimpleImputer\r\n\r\nimputer=SimpleImputer(\r\n missing_values=np.nan,\r\n strategy=\"mean\" \r\n )\r\nX[:,1:]=imputer.fit_transform(X[:,1:])\r\n\r\n#Labeling categorical variables\r\n\r\nfrom sklearn.preprocessing import LabelEncoder,OneHotEncoder\r\nfrom sklearn.compose import ColumnTransformer\r\n\r\nLEncoder=LabelEncoder()\r\n\r\nY=LEncoder.fit_transform(Y)\r\n\r\nct=ColumnTransformer(transformers=[(\"encoder\",OneHotEncoder(),[0])],remainder=\"passthrough\")\r\n\r\nX=np.array(ct.fit_transform(X))\r\n\r\n#Spliting data into training and testing dataset\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nX_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=0.2,random_state=1)\r\n\r\n#Applying feature scaling to non categorical values\r\n#using Standardization\r\n\r\nfrom sklearn.preprocessing import StandardScaler\r\n\r\nss=StandardScaler()\r\n\r\nX_train[:,3:]=ss.fit_transform(X_train[:,3:])\r\nX_test[:,3:]=ss.fit_transform(X_test[:,3:])\r\n\r\n#Data processing completed\r\n\r\n\r\n"
] |
[
[
"pandas.read_csv",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.impute.SimpleImputer",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.StandardScaler",
"sklearn.preprocessing.LabelEncoder"
]
] |
GUO-Liping/Work_excel
|
[
"6c74dd28552a8dd7de32a93dab3f17e717c37458"
] |
[
"work_with_excel.py"
] |
[
"# -*- coding utf-8 -*-\nimport pandas as pd\na = pd.read_excel('data_a.xlsx', index_col=[0], header=[0])\nb = pd.read_excel('data_b.xlsx', sheet_name=None, index_col=None, header=None, skiprows=0, na_values=['NA']) # it's a dictionary\n# where index_col=[0] means that using the first column as the index rows\n# index_col=None means that nothing was set as the index rows\n# header=[2] means deleting the first and second rows and using the third row as the index columns\n# header=None means that nothing was set as the index columns\n\nb_sheet1 = b['Sheet1'] # it is a DataFrame\nb_sheet1[3] = b_sheet1[2] - b_sheet1[1]\nb_sheet2 = b_sheet1[2] - b_sheet1[1]\n\nb_sheet2.to_excel('data_b2.xlsx',sheet_name='Sheet1')\n\n##############################################################################################################\n'''\n# 18 Pandas Functions to Replace Excel with Python (and be happy forever)\n# Import Package\nimport pandas as pd\n\n# Work With Excel Files\ndf = pd.read_excel('filename.xlsx') # Read Excel File\ndf.to_excel('filename.xlsx', index=Fales) # Save Excel File\n# pandas.read_excel(io, sheet_name=0, header=0, names=None, index_col=None, usecols=None, squeeze=False, dtype=None, engine=None, converters=None, true_values=None, false_values=None, skiprows=None, nrows=None, na_values=None, keep_default_na=True, na_filter=True, verbose=False, parse_dates=False, date_parser=None, thousands=None, comment=None, skipfooter=0, convert_float=None, mangle_dupe_cols=True, storage_options=None)\n# DataFrame.to_excel(excel_writer, sheet_name='Sheet1', na_rep='', float_format=None, columns=None, header=True, index=True, index_label=None, startrow=0, startcol=0, engine=None, merge_cells=True, encoding=None, inf_rep='inf', verbose=True, freeze_panes=None, storage_options=None)\n\n# Work With CSV Files\ndf.read_csv('filename.csv') # Read CSV File\ndf.to_csv('filename.csv') # Save CSV File\n\n# Preview DataFrame\ndf.head()\ndf.tail()\ndf.shape()\n\n# Selection DataFrame\ndf1 = pd.DataFrame(np.random.randn(6,4), index=list('abcdef'), columns=list('ABCD'))\ndf1.loc[['a', 'b', 'd'], :]\ndf1.loc['d':, 'A':'C']\ndf1.loc['a']\ndf1.loc['a'] > 0\ndf1.loc[:, df1.loc['a'] > 0]\ndf1.loc['a', 'A']\ndf1.loc[lambda df: df.A > 0, :]\ndf1.loc[:, lambda df: ['A', 'B']]\ndf1.iloc[:, lambda df: [0, 1]]\ndf1[lambda df: df.columns[0]]\ndf1.A.loc[lambda s: s > 0]\ndf2 = pd.DataFrame(np.random.randn(6,4), index=list(range(0,12,2)), columns=list(range(0,8,2)))\ndf2.iloc[:3]\ndf2.iloc[1:5, 2:4]\ndf2.iloc[[1, 3, 5], [1, 3]]\ndf2.iloc[1:3, :]\ndf2.iloc[:, 1:3]\ndf2.iloc[1, 1]\n\n# Convert to numpy array\ndf = pd.DataFrame([[21, 72, 67], [23, 78, 69], [32, 74, 56], [52, 54, 76]],\tcolumns=['a', 'b', 'c'])\narr = df.to_numpy() # Note that the recommended approach is df.to_numpy().\narr = df.values\n\n# Get Statistics\ndf.count() # Count Rows\ndf.describe() # Get general statistics (min,max,mean,std,...)\ndf['col_name'].value_counts() # Get unique value count\n\n# Work with DataFrame\ndf['col_name'] # Select one column\ndf.fillna(0) # Replace Null values\ndf.dropna() # Remove Null values\ndf[df['col_name'] == 0] # Filter DataFrame\ndf.drop_duplicates() # Remove duplicates\ndf = pd.util.testing.makeDataFrame() # or # pd.get_dummies(pd.Series(list('abcaa'))) # Create dummy dataframe\n\n# Replace Vlookup With Pandas Join\ndf.join(df2,on='col_name') # vlookup\n\n# Pandas .replace() vs Excel Find and Replace\ndf.replace('to_replace', 'new_value) # Find and Replace\ndf.replace(regex=r'^ba.$', value='new') # Allows regex\n\n# Replace Pivot Tables with GroupBy\ndf.groupby(['col1','col2']).sum() # Pivot/Groupby\ndf.groupby(['col1','col2']).agg({'col1':'sum','col2','count'})\n\n# Plot Your DataFrame\ndf.plot() # Plot your DataFrame\n'''\n##############################################################################################################"
] |
[
[
"pandas.read_excel"
]
] |
ZhuangLab/Chromatin_Analysis_2020_cell
|
[
"ecc0d3e92e8b9cb0dcf970c29440f91404055da6"
] |
[
"sequential_tracing/source/domain_tools/DomainAnalysis.py"
] |
[
"import matplotlib.pylab as plt\nimport numpy as np\nimport pickle,os\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom scipy.spatial.distance import pdist,cdist,squareform\nfrom sklearn.cluster import DBSCAN\ndef rg_med(zxy):\n zxy_ = np.array(zxy)\n zxy_ = zxy_[~np.isnan(zxy_[:,0])]\n zxy_ = zxy_ - np.median(zxy_,0)\n return np.median(np.linalg.norm(zxy_,axis=-1))\ndef partition_map(list_,map_,mapU=None,return_map=False):\n \"\"\"\n Inputs\n takes a list [e1,e2,e3,e4,e5,e6] and a map (a list of indices [0,0,1,0,1,2]). map can be a list of symbols too. ['aa','aa','bb','aa','bb','cc']\n Output\n returns a sorted list of lists, e.g. [[e1, e2,e4],[e3,e5],[e6]]\n \"\"\"\n list__=np.array(list_)\n map__=np.array(map_)\n if mapU is None:\n mapU = np.unique(map__)\n if type(mapU)==str:\n if mapU=='ordered':\n mapU=get_map(map_)\n if return_map:\n return [list(list__[map__==element]) for element in mapU],list(mapU)\n return [list(list__[map__==element]) for element in mapU]\n\ndef resize(im__,scale_percent = 100):\n import cv2\n width = int(im__.shape[1] * scale_percent / 100)\n height = int(im__.shape[0] * scale_percent / 100)\n dim = (width, height)\n resized = cv2.resize(im__, dim, interpolation = cv2.INTER_NEAREST)\n return resized\ndef rotate_bound(image, angle):\n import cv2\n # grab the dimensions of the image and then determine the\n # center\n (h, w) = image.shape[:2]\n (cX, cY) = (w // 2, h // 2)\n \n # grab the rotation matrix (applying the negative of the\n # angle to rotate clockwise), then grab the sine and cosine\n # (i.e., the rotation components of the matrix)\n M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)\n cos = np.abs(M[0, 0])\n sin = np.abs(M[0, 1])\n \n # compute the new bounding dimensions of the image\n nW = int((h * sin) + (w * cos))\n nH = int((h * cos) + (w * sin))\n \n # adjust the rotation matrix to take into account translation\n M[0, 2] += (nW / 2) - cX\n M[1, 2] += (nH / 2) - cY\n \n # perform the actual rotation and return the image\n return cv2.warpAffine(image, M, (nW, nH),cv2.INTER_NEAREST)\ndef get_boundaries_old(im,su=5,sl=5,valley=5,cutoff_max=1.,plt_val=False):\n \"\"\"Bintu et al 2018 candidate boundary calling\"\"\"\n im_=np.array(im)\n ratio,ration,center,centern=[],[],[],[]\n for i in range(len(im)):\n x_im_l,y_im_l = [],[]\n x_im_r,y_im_r = [],[]\n\n xn_im_l,yn_im_l = [],[]\n xn_im_r,yn_im_r = [],[]\n\n for j in range(sl):\n xn_im_l.extend(i+j-np.arange(su)-1)\n yn_im_l.extend([i+j]*su)\n xn_im_r.extend(i+j+sl-np.arange(su)-1)\n yn_im_r.extend([i+j+sl]*su)\n \n x_im_l.extend(i+j+np.arange(su)+1)\n y_im_l.extend([i+j]*su)\n x_im_r.extend(i+j+sl+np.arange(su)+1)\n y_im_r.extend([i+j+sl]*su)\n \n\n x_im_l,y_im_l,x_im_r,y_im_r = map(np.array,[x_im_l,y_im_l,x_im_r,y_im_r])\n xn_im_l,yn_im_l,xn_im_r,yn_im_r = map(np.array,[xn_im_l,yn_im_l,xn_im_r,yn_im_r])\n\n in_image = np.all(x_im_l>=0) and np.all(x_im_r>=0) and np.all(y_im_l>=0) and np.all(y_im_r>=0)\n in_image = in_image and np.all(x_im_l<len(im)) and np.all(x_im_r<len(im)) and np.all(y_im_l<len(im)) and np.all(y_im_r<len(im))\n\n in_imagen = np.all(xn_im_l>=0) and np.all(xn_im_r>=0) and np.all(yn_im_l>=0) and np.all(yn_im_r>=0)\n in_imagen = in_imagen and np.all(xn_im_l<len(im)) and np.all(xn_im_r<len(im)) and np.all(yn_im_l<len(im)) and np.all(yn_im_r<len(im))\n if in_image:\n val_l,val_r = np.nanmean(im_[x_im_l,y_im_l]),np.nanmean(im_[x_im_r,y_im_r])\n ratio.append(val_l/val_r)\n center.append(i+sl)\n if in_imagen:\n val_l,val_r = np.nanmean(im_[xn_im_l,yn_im_l]),np.nanmean(im_[xn_im_r,yn_im_r])\n ration.append(val_r/val_l)\n centern.append(i+sl)\n if False:#i==9:\n plt.figure(figsize=(20,20))\n plt.plot(xn_im_l,yn_im_l,'mo')\n plt.plot(xn_im_r,yn_im_r,'go')\n plt.plot(x_im_l,y_im_l,'ro')\n plt.plot(x_im_r,y_im_r,'bo')\n plt.imshow(im,interpolation='nearest',cmap='seismic_r',vmax=1000)\n plt.show()\n #print x_im_l,y_im_l,x_im_r,y_im_r\n\n center,ratio=np.array(center),np.array(ratio)\n centern,ration=np.array(centern),np.array(ration)\n max_ratio = np.zeros(len(im))+np.nan\n max_ratio[center]=ratio\n max_ratio[centern]=np.nanmax([max_ratio[centern],ration],axis=0)\n \n local_max_good = get_ind_loc_max(max_ratio,cutoff_max=cutoff_max,valley=valley)\n #local_max_goodn = get_ind_loc_max(ration,cutoff_max=cutoff_max,valley=valley)\n ###Plotting\n if plt_val:\n #plt.close('all')\n plt.figure(figsize=(12,7))\n #print local_max_good,local_max_goodn\n plt.plot(center,np.log(ratio),'o-')\n plt.plot(centern,np.log(ration),'o-')\n plt.plot(np.log(max_ratio),'k-')\n if len(local_max_good)>0:\n plt.plot(local_max_good,np.log(max_ratio[local_max_good]),'o')\n \n plt.show()\n fig, ax = plt.subplots(figsize=(12,7))\n if len(local_max_good)>0:\n ax.plot(local_max_good[:],local_max_good[:],'go',ms=10,mec='k',mew=2)\n #cax = ax.imshow(set_diag(img,np.nanmax(img)),interpolation='nearest',cmap='bwr')#,vmax=1000,vmin=0)\n cax = ax.imshow(im,interpolation='nearest',cmap='seismic')#,vmax=1000,vmin=0)\n cbar = fig.colorbar(cax)\n plt.show()\n return max_ratio,local_max_good\ndef ArrowHead_Feature(_arrowmap, _make_plot=True, _verbose=True):\n \"\"\"From Arrowhead map generate three feature matrices\n Inputs:\n _arrowmap: input arrowhead map, 2d-array\n _make_plot: whether make plot for arrowhead result, bool\n _verbose: say something!, bool\n Outputs:\n _S_sign: sum of signs of upper triangle - lower triangle, 2d matrix\n _S_sum: sum of values of upper triangle - lower triangle, 2d matrix\n _S_var: variance among values in upper and lower triangle, 2d matrix\"\"\"\n # get shape\n _arrow_shape = np.shape(_arrowmap)\n _dim = _arrow_shape[0]\n # initialize three feature matrices\n _S_sign = np.zeros(_arrow_shape)\n _S_sum = np.zeros(_arrow_shape)\n _S_var = np.zeros(_arrow_shape)\n # loop through entries\n for _i in range(1, _dim):\n #for _j in range(_i+1, min(int(np.ceil((_dim+2*_i)/3)), int(_i*2))):\n for _j in range(_i+1, _dim):\n _crop_dim = _j - _i\n # get limits\n _ulim = max(_i-_crop_dim,0) # upper and left limits\n _rlim = min(_j+2*_crop_dim, _dim) # right limit\n # if not cropped as a whole, crop again:\n if _j-_ulim != 2*_crop_dim or _rlim-_ulim != 4*_crop_dim:\n _crop_dim = min(_i, int((_dim-_j)/2))\n if _crop_dim < 1:\n continue\n else:\n _crop = np.copy(_arrowmap[_i-_crop_dim:_i+_crop_dim, _j-2*_crop_dim:_j+2*_crop_dim])\n else:\n # crop feature triangles\n _crop = np.copy(_arrowmap[_ulim:_j, _ulim:_rlim])\n for _c in range(2*_crop_dim):\n _crop[np.ceil(_c/2).astype(np.int):,_c] = np.nan # remove lower-left triangle\n _crop[:_crop_dim+int((_c+1)/2), _c+_crop.shape[0]] = np.nan # remote upper-right triangle\n # get sign sum var for this (i,j) pair\n _sign = np.nansum(_crop[:, :_crop.shape[0]]>0) - np.nansum(_crop[:, :_crop.shape[0]]<0) \\\n - np.nansum(_crop[:, _crop.shape[0]:]>0) + np.nansum(_crop[:, _crop.shape[0]:]<0)\n _sum = np.nansum(_crop[:, :_crop.shape[0]]) - np.nansum(_crop[:, _crop.shape[0]:])\n _num_elem = _crop[:, :_crop.shape[0]]\n _var = np.nanvar(_crop)\n # save\n _S_sign[_i,_j] = _sign\n _S_sum[_i,_j] = _sum\n _S_var[_i,_j] = _var\n _S_sign[_j,_i] = _S_sign[_i,_j]\n _S_sum[_j,_i] = _S_sum[_i,_j] \n _S_var[_j,_i] = _S_var[_i,_j]\n \n if _make_plot:\n plt.figure()\n plt.imshow(_S_sign, cmap='seismic')\n plt.colorbar()\n plt.title(\"sign\")\n plt.show()\n plt.figure()\n plt.imshow(_S_sum, cmap='seismic')\n plt.colorbar()\n plt.title(\"sum\")\n plt.show()\n plt.figure()\n plt.imshow(_S_var, cmap='seismic')\n plt.colorbar()\n plt.title(\"var\")\n plt.show()\n \n return _S_sign, _S_sum, _S_var\n\ndef interp1dnan(A):\n A_=np.array(A)\n ok = np.isnan(A)==False\n xp = ok.nonzero()[0]\n fp = A[ok]\n x = np.isnan(A).nonzero()[0]\n A_[np.isnan(A)] = np.interp(x, xp, fp)\n return A_\ndef chromosome_segment_RG(_chr, _group):\n \"\"\"Calculate radius of gyration given chr coordinates and selected segment group\"\"\"\n _segment = _chr[_group]\n return np.nanmean(np.nanvar(_segment, 0))\ndef interpolate_chr(_chr):\n \"\"\"linear interpolate chromosome coordinates\"\"\"\n _new_chr = np.array(_chr)\n for i in range(_new_chr.shape[-1]):\n _new_chr[:,i]=interp1dnan(_new_chr[:,i])\n return _new_chr\n\ndef Find_Boundaries(distmap, S_features, gaussian_size=0.25, lower_ind_thres=-5, make_plot=True):\n \"\"\"Primary algorithm to find domain boundaries\n Inputs:\n distmap: distance map for a chromosome, 2d-array\n S_features: tuple or list of features, list or tuple of 2d-array\n gaussian_size: sigma for gaussian filter applied to features to better call local maximum, float\n lower_ind_thres: lower boundary for accepted indices along off-diagonal lines, int\n make_plot: whether make plots, bool\n Outputs:\n selected_pk_coords: selected peaks in feature maps, which corresponds to domain boundaries, 1d-array\n \"\"\"\n from scipy.ndimage.interpolation import map_coordinates\n from scipy.signal import find_peaks\n #from astropy.convolution import Gaussian2DKernel,convolve\n \n dim = np.shape(distmap)[0]\n # genrate coordinates for line i+x, i+x/2 which arrow edges align:\n start_ind = np.arange(-int(dim/2),dim)\n coord_list = [np.stack([np.arange(np.abs(i),dim), max(0,i)/2+np.arange(max(0,i),dim+min(0,i))/2]) for i in start_ind] \n # set gaussian kernel\n #kernel = Gaussian2DKernel(x_stddev=gaussian_size)\n # initialize feature ids\n feature_list = []\n for feature_id in range(2):\n # gaussian filter this map\n if gaussian_size:\n feature_map = convolve(S_features[feature_id], kernel)\n else:\n feature_map = S_features[feature_id]\n # extract arrow lines\n arrow_lines = [map_coordinates(feature_map, _coords) for _coords in coord_list]\n # calculate mean to find local maximum\n arrow_line_means = np.array([np.mean(arrline) for arrline in arrow_lines])\n # calculate peaks for meean behavior line \n feature_line_ids = find_peaks(arrow_line_means, distance=3, width=2)[0] # this step is better to be more rigorious\n feature_line_ids = feature_line_ids[start_ind[feature_line_ids]>lower_ind_thres]\n feature_list.append(feature_line_ids)\n # plot selected lines\n #plt.figure()\n #plt.plot(start_ind, arrow_line_means)\n #plt.plot(start_ind[feature_line_ids], arrow_line_means[feature_line_ids], 'ro')\n #plt.show()\n # select shared feature_ids\n selected_ids = []\n for _id in feature_list[0]:\n if sum(np.abs(feature_list[1]-_id) <= 1) > 0:\n _local_ids = feature_list[1][np.abs(feature_list[1]-_id) <= 1]\n _local_ids = np.concatenate([[_id], _local_ids])\n selected_ids.append(np.min(_local_ids))\n selected_ids = np.array(selected_ids)\n if len(selected_ids) == 0:\n return np.array([])\n # selected ids plus +-1 lines\n feature_map = convolve(S_features[1], kernel)\n selected_coords = [coord_list[_i] for _i in np.unique([selected_ids, selected_ids-1, selected_ids+1])]\n selected_lines = [map_coordinates(feature_map, _coords) for _coords in selected_coords]\n # call peaks\n pks = [find_peaks(_line, distance=2, width=2)[0] for _line in selected_lines]\n pk_coords = np.sort(np.concatenate([_coord[0,_pk] for _coord, _pk in zip(selected_coords, pks)]))\n # select into connected groups\n selected_groups = []\n _group = []\n for _i,_c in enumerate(pk_coords):\n if len(_group) == 0:\n _group.append(_c)\n elif sum(np.abs(np.array(_group)-_c)<=1) >= 1:\n _group.append(_c)\n np.delete(pk_coords, _i)\n else:\n if len(_group) > 1:\n selected_groups.append(_group)\n _group = []\n # pick from connected groups\n group_size_th = 2\n selected_pk_coords = np.sort([int(np.round(np.mean(_group))) for _group in selected_groups if len(_group) >= group_size_th])\n if make_plot:\n plt.figure()\n plt.imshow(distmap, cmap='seismic_r', vmin=0, vmax=1000)\n plt.colorbar()\n plt.title(\"input distance map\")\n edges = [0] + list(selected_pk_coords)+[dim]\n for _i,_c in enumerate(edges[:-1]):\n plt.plot(np.arange(_c, edges[_i+1]), np.ones(edges[_i+1]-_c)*_c, color='y', linewidth=3.0)\n plt.plot(np.ones(edges[_i+1]-_c)*edges[_i+1], np.arange(_c, edges[_i+1]), color='y', linewidth=3.0)\n plt.xlim([0,dim])\n plt.show()\n \n return selected_pk_coords\n\n\ndef Generate_ArrowHead(distmap, _make_plot=True, _normalization=False, _scale=[200,1000], _gaussian_size=0):\n \"\"\"Function to transfer normal distance map to arrow head map\n Inputs:\n distmap: n-by-n array for pair-wise distance, 2d-array\n _make_plot: whether make plot for arrowhead result, bool\n _normalization: whether do normalization for orignial iamage, bool\n _scale: if _normalization is given, give lower and upper limits which will be transformed into 0 and 1, list of 2\n _gaussian_size: sigma for gaussian blur the original distance map, int > 0\n Outputs:\n _arrowmap: arrowhead map for given distance map, 2d-array\"\"\"\n _distmap_shape = distmap.shape\n _arrowmap = np.zeros(_distmap_shape)\n ## normalization\n if _normalization:\n _normed_map = (distmap-min(_scale))/(max(_scale)-min(_scale))\n _normed_map[_normed_map>1] = 1\n _normed_map[_normed_map<0] = 0\n else:\n _normed_map = distmap\n ## gaussian convolve\n if _gaussian_size > 0:\n pass\n ## Do arrowhead transformation here\n for i in range(_distmap_shape[0]):\n for j in range(1, _distmap_shape[1]-i):\n _arrowmap[i, i+j] = (_normed_map[i,i-j]-_normed_map[i,i+j])/(_normed_map[i,i-j]+_normed_map[i,i+j])\n _arrowmap[i+j, i] = _arrowmap[i, i+j]\n if _gaussian_size > 0:\n #_arrowmap = convolve(_arrowmap, _kernel)\n pass\n\n if _make_plot:\n plt.figure()\n plt.imshow(_normed_map, cmap='seismic')\n plt.colorbar()\n plt.title(\"input distance map\")\n plt.show()\n plt.figure()\n plt.imshow(_arrowmap, cmap='seismic_r')\n plt.colorbar()\n plt.title(\"Arrowhead\")\n plt.show()\n return _arrowmap\n\ndef get_domain_fusion_rg(zxy,dom_starts_f,percR=90):\n \"\"\" zxy is Nx3 in nm (with nans) and dom_starts_f are the edges of all domains\n This algoritm checks to see if any two domains \"fuse\" by checking when applying \n DBSCAN whether they are primarily in one class\n \"\"\"\n #zxy = pts[ichr]\n #dom_starts_f = dom_starts_fs[ichr]\n bad = np.isnan(zxy[:,0])\n zxy_ = zxy[~bad]\n #mat=squareform(pdist(zxy_))\n\n dom_starts = np.zeros(len(zxy))\n dom_starts[dom_starts_f[:-1]]=1\n dom_starts = list(np.where(dom_starts[~bad])[0])+[len(zxy_)]\n dices = []\n dices_f = []\n for i in range(len(dom_starts)-1):\n for j in range(i):\n zxy1 = zxy_[dom_starts[i]:dom_starts[i+1]]\n zxy2 = zxy_[dom_starts[j]:dom_starts[j+1]]\n cm1 = np.median(zxy1,0)\n cm2 = np.median(zxy2,0)\n \n r1 = np.percentile(np.linalg.norm(zxy1-cm1,axis = -1),percR)\n r2 = np.percentile(np.linalg.norm(zxy2-cm2,axis = -1),percR)\n d12 = np.linalg.norm(cm1-cm2)\n dice = d12<(r1+r2)\n if dice:\n dices.append([dom_starts[i],dom_starts[i+1],dom_starts[j],dom_starts[j+1],dice])\n dices_f.append([dom_starts_f[i],dom_starts_f[i+1],dom_starts_f[j],dom_starts_f[j+1]])\n dices_f = np.array(dices_f)\n return dices_f\ndef get_domain_fusion(zxy,dom_starts_f,thglob=0.5,eps=500,min_samples=2):\n \"\"\" zxy is Nx3 in nm (with nans) and dom_starts_f are the edges of all domains\n This algoritm checks to see if any two domains \"fuse\" by checking when applying \n DBSCAN whether they are primarily in one class\n \"\"\"\n #zxy = pts[ichr]\n #dom_starts_f = dom_starts_fs[ichr]\n bad = np.isnan(zxy[:,0])\n zxy_ = zxy[~bad]\n #mat=squareform(pdist(zxy_))\n\n dom_starts = np.zeros(len(zxy))\n dom_starts[dom_starts_f[:-1]]=1\n dom_starts = list(np.where(dom_starts[~bad])[0])+[len(zxy_)]\n dices = []\n dices_f = []\n for i in range(len(dom_starts)-1):\n for j in range(i):\n zxy1 = zxy_[dom_starts[i]:dom_starts[i+1]]\n zxy2 = zxy_[dom_starts[j]:dom_starts[j+1]]\n\n X = np.concatenate([zxy1,zxy2])\n #X = zxy_\n db = DBSCAN(eps=eps, min_samples=min_samples).fit(X)\n #db = SpectralClustering(n_clusters=2).fit(X)\n lab1,lab2 = db.labels_[:len(zxy1)],db.labels_[len(zxy1):]\n\n lab1 = lab1[lab1!=-1]\n lab2 = lab2[lab2!=-1]\n dice = False\n if len(lab1)>0 and len(lab2)>0:\n e1,cts1 = np.unique(lab1,return_counts=True)\n e1=e1[np.argmax(cts1)]\n e2,cts2 = np.unique(lab2,return_counts=True)\n e2=e2[np.argmax(cts2)]\n #dice = 1.*(np.sum(np.in1d(lab1,lab2))+np.sum(np.in1d(lab2,lab1)))/(len(lab1)+len(lab2))\n dice = (e1==e2) and (np.max(cts1)/len(lab1)>thglob) and (np.max(cts2)/len(lab2)>thglob)\n if dice:\n dices.append([dom_starts[i],dom_starts[i+1],dom_starts[j],dom_starts[j+1]])\n dices_f.append([dom_starts_f[i],dom_starts_f[i+1],dom_starts_f[j],dom_starts_f[j+1]])\n dices_f = np.array(dices_f)\n return dices_f\n \ndef get_ind_loc_max(ratio,cutoff_max=1.,valley=3):\n \"\"\"get local maximum within valley size bigger than cut-off\"\"\"\n local_max_good_ = []\n for id_ in range(len(ratio)):\n l = np.max([0,id_-valley])\n r = np.min([len(ratio),id_+valley])\n if ratio[id_]==np.nanmax(ratio[l:r]) and ratio[id_]>cutoff_max:\n local_max_good_.append(id_)\n return np.array(local_max_good_)\ndef calc_sep_cor(mat,a,b,c,context=75):\n #a,b,c = dom_starts[i],dom_starts[i+1],dom_starts[i+2]\n bst,bend = b-context,b+context\n if bst<0:bst=0\n if bend>len(mat):bend=len(mat)\n dom1_cont = mat[a:b,bst:bend]\n dom2_cont = mat[b:c,bst:bend]\n func = np.mean\n dom1_cont = func(dom1_cont,0)\n dom2_cont = func(dom2_cont,0)\n dom1_cont-=func(dom1_cont)\n dom2_cont-=func(dom2_cont)\n #cor_coef = np.median(dom1_cont*dom2_cont)/np.sqrt(np.median(dom1_cont*dom1_cont)*np.median(dom2_cont*dom2_cont))\n cor_coef = func(dom1_cont*dom2_cont)/np.sqrt(func(dom1_cont*dom1_cont)*func(dom2_cont*dom2_cont))\n return cor_coef\ndef calc_seps_cor(mat,dom_starts,context=75):\n return np.array([calc_sep_cor(mat,dom_starts[i],dom_starts[i+1],dom_starts[i+2],context=context) \n for i in range(len(dom_starts)-2)])\ndef get_dom_starts_cor(zxy,dom_sz=5,context=1000,cut_off=0.66,dist_cutoff=750):\n \"\"\"\n This is the main function for domain calling.\n dom_sz is the minimum domains size - this is usesd to caclulate candidate boundaries.\n context is how much of the off diagonal to consider for correlation between domains.\n Prior to computing the correlations to decide whtehr domains fuse, dist_cutoff is used to threshold\n the distance matrix.\n Use as:\n #zxy,zxy_,mat,dom_starts_,dom_starts,dom_starts_f= \n get_dom_starts_cor(pts[99],dom_sz=5,context=1000,cut_off=0.66,dist_cutoff=750)\n \"\"\"\n\n #throw away nans\n zxy_ = np.array(zxy)\n bad = np.isnan(zxy_[:,0])\n zxy_ = zxy_[~bad]\n \n \n #get candidate boundaries\n dists = []\n \n for i in range(len(zxy_)):\n cm1= np.nanmean(zxy_[max(i-dom_sz,0):i],axis=0)\n cm2= np.nanmean(zxy_[i:i+dom_sz],axis=0)\n dist = np.linalg.norm(cm1-cm2)\n dists.append(dist)\n\n bds_candidates = get_ind_loc_max(dists,cutoff_max=0,valley=dom_sz)\n dom_starts= [0]+[dm for dm in bds_candidates if dm>dom_sz and dm<len(zxy_)-dom_sz]+[len(zxy_)]\n \n mat = squareform(pdist(zxy_))\n mat_ = mat<dist_cutoff\n #mat_ = np.exp(-mat*mat/(2*dist_cutoff**2))\n dom_starts_ = list(dom_starts)\n dom_starts = list(dom_starts)\n \n while len(dom_starts)>2:\n seps = calc_seps_cor(mat_,dom_starts,context=context)\n imin = np.argmax(seps)\n if seps[imin]>cut_off:\n dom_starts.pop(imin+1)\n seps = list(seps)\n seps.pop(imin)\n else:\n break\n \n dom_starts_f = np.concatenate([np.arange(len(zxy))[~bad],[len(zxy)]])\n dom_starts_f = dom_starts_f[dom_starts]#\n return zxy,zxy_,mat,dom_starts_,dom_starts,dom_starts_f\n"
] |
[
[
"numpy.nanmax",
"scipy.signal.find_peaks",
"sklearn.cluster.DBSCAN",
"scipy.ndimage.interpolation.map_coordinates",
"numpy.all",
"numpy.max",
"numpy.concatenate",
"numpy.mean",
"numpy.nanmean",
"numpy.where",
"numpy.unique",
"matplotlib.pylab.xlim",
"numpy.arange",
"numpy.nanvar",
"numpy.ceil",
"numpy.copy",
"numpy.argmax",
"matplotlib.pylab.figure",
"numpy.interp",
"numpy.nansum",
"matplotlib.pylab.plot",
"numpy.zeros",
"numpy.log",
"numpy.min",
"numpy.isnan",
"numpy.median",
"numpy.delete",
"matplotlib.pylab.imshow",
"matplotlib.pylab.colorbar",
"numpy.array",
"matplotlib.pylab.show",
"numpy.abs",
"numpy.linalg.norm",
"matplotlib.pylab.title",
"numpy.ones",
"scipy.spatial.distance.pdist",
"numpy.shape",
"matplotlib.pylab.subplots"
]
] |
rboman/progs
|
[
"c60b4e0487d01ccd007bcba79d1548ebe1685655",
"c60b4e0487d01ccd007bcba79d1548ebe1685655"
] |
[
"sandbox/qt/codingtrain/cc23_supershape2d.py",
"sandbox/fortranpython/navaro/fortranarrays.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# from \"SuperShape2D\" (Daniel Shiffman)\n# Video: https://youtu.be/ksRoh-10lak\n\n# supershapes: http://paulbourke.net/geometry/supershape/\n\nimport sys, os\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nimport math\nimport numpy as np\n\n\ndef mapFromTo(x, a, b, c, d):\n \"\"\"map() function of javascript\"\"\"\n y = (float(x) - float(a))/(float(b) - float(a)) * \\\n (float(d) - float(c)) + float(c)\n return y\n\n\nclass SuperShape(QWidget):\n def __init__(self, parent=None, nstars=500):\n QWidget.__init__(self, parent)\n self.myTimerId = None\n\n self.setWindowTitle(\"Coding Train - Supershape2D\")\n self.setFixedSize(400, 400)\n\n # black background\n p = self.palette()\n p.setColor(self.backgroundRole(), Qt.black)\n self.setAutoFillBackground(True)\n self.setPalette(p)\n\n # parameters\n self.n1 = 0.3\n self.n2 = 0.3\n self.n3 = 0.3\n self.m = 5\n self.a = 1\n self.b = 1\n\n self.radius = 100\n\n def paintEvent(self, event):\n painter = QPainter(self)\n\n painter.translate(self.width()/2, self.height()/2)\n\n painter.setPen(Qt.white)\n #painter.setBrush(Qt.NoBrush)\n painter.setBrush(Qt.darkGray)\n\n total = 200\n increment = 2 * math.pi/total\n\n points = []\n for angle in np.arange(0, 2 * math.pi, increment):\n r = self.supershape(angle)\n x = self.radius * r * math.cos(angle)\n y = self.radius * r * math.sin(angle)\n points.append(QPoint(x, y))\n\n painter.drawPolygon(QPolygon(points))\n\n # write some info \n painter.resetTransform() \n font = painter.font()\n font.setPixelSize(10)\n painter.setFont(font)\n\n text=''\n for var in ['m','a','b','n1','n2','n3']:\n text += '%s = %f\\n' % (var, getattr(self,var))\n\n rectangle = painter.viewport().adjusted(10,10,-20,-20)\n boundingRect = painter.drawText(rectangle, 0, text)\n\n\n\n def supershape(self, theta):\n part1 = (1.0 / self.a) * math.cos(theta * self.m / 4.0)\n part1 = abs(part1)\n part1 = math.pow(part1, self.n2)\n\n part2 = (1.0 / self.b) * math.sin(theta * self.m / 4.0)\n part2 = abs(part2)\n part2 = math.pow(part2, self.n3)\n\n part3 = math.pow(part1 + part2, 1/self.n1)\n\n if part3 == 0.0:\n return 0.0\n return 1.0 / part3\n\n\nclass Window(QWidget):\n\n def __init__(self):\n QWidget.__init__(self)\n\n self.initUI()\n\n def buildSlider(self, widget, rmin, rmax, stp, name):\n slider = QSlider(Qt.Horizontal)\n slider.setMinimumWidth(200)\n slider.setRange(0, stp)\n slider.setValue( float(getattr(widget, name) -rmin) /(rmax-rmin) * stp )\n slider.valueChanged.connect(lambda x: setattr(widget, name, rmin+x*float(rmax-rmin)/stp))\n slider.valueChanged.connect(lambda x: widget.repaint()) \n return slider\n\n def initUI(self):\n\n iconfile = os.path.join(os.path.dirname(__file__), 'coding_train_icon.png')\n self.setWindowIcon(QIcon(iconfile))\n\n widget = SuperShape()\n\n vbox = QFormLayout()\n vbox.addRow(\"m\", self.buildSlider(widget, rmin=0, rmax=10, stp=100, name='m'))\n vbox.addRow(\"a\", self.buildSlider(widget, rmin=1, rmax=10, stp=100, name='a'))\n vbox.addRow(\"b\", self.buildSlider(widget, rmin=1, rmax=10, stp=100, name='b'))\n vbox.addRow(\"n1\", self.buildSlider(widget, rmin=0.1, rmax=1, stp=100, name='n1'))\n vbox.addRow(\"n2\", self.buildSlider(widget, rmin=0.1, rmax=1, stp=100, name='n2'))\n vbox.addRow(\"n3\", self.buildSlider(widget, rmin=0.1, rmax=1, stp=100, name='n3'))\n vbox.addRow(\"radius\", self.buildSlider(widget, rmin=1, rmax=500, stp=500, name='radius'))\n\n hbox = QHBoxLayout()\n\n hbox.addWidget(widget)\n hbox.addLayout(vbox)\n\n self.setLayout(hbox)\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = Window()\n ex.show()\n sys.exit(app.exec_())\n",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport numpy as np \nimport navaro as na\n\nvitesses = np.array([[0, 1, 2], [0, 3, 2], [0, 1, 3]], dtype=np.float64)\npositions = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]], dtype=np.float64)\nprint('type(pos) =', positions.dtype)\nna.move(positions, vitesses, 0.1)\nprint('p=', positions) #le tableau n'est pas mis a jour, stockage C\n\npositions = np.array(positions, dtype=np.int8, order='F')\nprint('type(pos) =', positions.dtype)\nna.move(positions, vitesses, 0.1)\nprint('p=', positions) #le tableau n'est pas mis a jour, mauvais type!\n\npositions = np.array(positions, dtype=np.float64, order='F')\nprint('type(pos) =', positions.dtype)\nna.move(positions, vitesses, 0.1)\nprint('p=', positions) #le tableau est modifie, stockage Fortran\n\n##########\n\nprint('A=', na.create_array(5))\n\n########\n\nA = np.array([[0, 1, 2], [2, 3, 4]], dtype=np.float64)\nB = np.array([[2, 1], [3, 2], [1, 9]], dtype=np.float64)\nC = na.mult_array(A,B)\nprint(\"C=\", C)\nprint(\"C=\", np.dot(A,B))\n\n"
] |
[
[
"numpy.arange"
],
[
"numpy.dot",
"numpy.array"
]
] |
anthonyhu/domain-adapt
|
[
"8c390becdf7093e1cdb8e61c6be0f35e387db898"
] |
[
"keras_model/utils.py"
] |
[
"import numpy as np\nimport tensorflow as tf\nimport tensorflow.keras.backend as K\n\n\ndef preprocess_image(image, out_size):\n \"\"\" Tensorflow image preprocessing: bilinear resizing, and normalising to [-1, 1]\n\n Parameters\n ----------\n image: tf.Tensor\n output from tf.read_file\n\n outsize: tuple(int, int)\n defined as (height, width)\n\n Returns\n resized_image: tf.Tensor\n \"\"\"\n # Original image is (720, 1280)\n image = tf.image.decode_jpeg(image, channels=3)\n # Resize shortest side to 512\n new_size = (512, 910)\n image = tf.image.resize_images(image, new_size, method=tf.image.ResizeMethod.BILINEAR)\n image = tf.image.random_crop(image, (*out_size, 3))\n image = tf.image.random_flip_left_right(image)\n\n image = (2 * image - 255.0) / 255.0 # normalise to [-1, 1] range\n\n return image\n\n\ndef load_and_preprocess_image(path, out_size):\n image = tf.read_file(path)\n return preprocess_image(image, out_size)\n\n\ndef preprocess_vgg(x):\n x = 255 * (x + 1) / 2 # [-1,1] to [0, 255]\n x = x[..., ::-1] # RGB to BGR\n # Substract mean\n mean = K.constant([103.939, 116.779, 123.680])\n mean = K.reshape(mean, (1, 1, 1, 3))\n return x - mean\n\n\ndef convert_to_uint8(img):\n \"\"\" Convert image from floating point [-1, 1] to np.uint8 [0, 255]\"\"\"\n return np.uint8(255 * (img + 1) / 2)\n\n\ndef write_log(callback, names, logs, batch_number):\n \"\"\" Write logs to tensorboard\n\n Parameters\n ----------\n callback: tf.keras.callbacks.TensorBoard\n names: list<str>\n names of the scalars to save\n logs: list<float>\n values of the scalars to save\n batch_number: int\n \"\"\"\n for name, value in zip(names, logs):\n summary = tf.Summary()\n summary_value = summary.value.add()\n summary_value.simple_value = value\n summary_value.tag = name\n callback.writer.add_summary(summary, batch_number)\n callback.writer.flush()\n"
] |
[
[
"tensorflow.image.random_flip_left_right",
"tensorflow.read_file",
"tensorflow.image.resize_images",
"numpy.uint8",
"tensorflow.image.random_crop",
"tensorflow.keras.backend.reshape",
"tensorflow.keras.backend.constant",
"tensorflow.Summary",
"tensorflow.image.decode_jpeg"
]
] |
TorchDrift/TorchDrift
|
[
"bda529a7a26c1a9b14d8d878a75122ece22b95d6"
] |
[
"torchdrift/detectors/mmd.py"
] |
[
"from typing import Optional\n\nimport torch\n\nfrom . import Detector\nimport torchdrift.utils\n\n\nclass Kernel:\n \"\"\"Base class for kernels\n\n Unless otherwise noted, all kernels implementing lengthscale detection\n use the median of pairwise distances as the lengthscale.\"\"\"\n\n pass\n\n\nclass GaussianKernel(Kernel):\n r\"\"\"Unnormalized gaussian kernel\n\n .. math::\n k(|x-y|) = \\exp(-|x-y|^2/(2\\ell^2))\n\n where :math:`\\ell` is the `lengthscale` (autodetected or given).\"\"\"\n\n def __init__(self, lengthscale=None):\n super().__init__()\n self.lengthscale = lengthscale\n\n def __call__(self, dists):\n # note that lengthscale should be squared in the RBF to match the Gretton et al heuristic\n if self.lengthscale is not None:\n lengthscale = self.lengthscale\n else:\n lengthscale = dists.median()\n return torch.exp((-0.5 / lengthscale ** 2) * dists ** 2)\n\n\nclass ExpKernel(Kernel):\n r\"\"\"Unnormalized exponential kernel\n\n .. math::\n k(|x-y|) = \\exp(-|x-y|/\\ell)\n\n where :math:`\\ell` is the `lengthscale` (autodetected or given).\"\"\"\n\n def __init__(self, lengthscale=None):\n super().__init__()\n self.lengthscale = lengthscale\n\n def __call__(self, dists):\n if self.lengthscale is not None:\n lengthscale = self.lengthscale\n else:\n lengthscale = dists.median()\n return torch.exp((-1 / lengthscale) * dists)\n\n\nclass RationalQuadraticKernel(Kernel):\n r\"\"\"Unnormalized rational quadratic kernel\n\n .. math::\n k(|x-y|) = (1+|x-y|^2/(2 \\alpha \\ell^2))^{-\\alpha}\n\n where :math:`\\ell` is the `lengthscale` (autodetected or given).\"\"\"\n\n def __init__(self, lengthscale=None, alpha=1.0):\n super().__init__()\n self.alpha = alpha\n self.lengthscale = lengthscale\n\n def __call__(self, dists):\n if self.lengthscale is not None:\n lengthscale = self.lengthscale\n else:\n lengthscale = dists.median()\n return torch.pow(\n 1 + (1 / (2 * self.alpha * lengthscale ** 2)) * dists ** 2, -self.alpha\n )\n\n\ndef kernel_mmd(x, y, n_perm=1000, kernel=GaussianKernel()):\n \"\"\"Implements the kernel MMD two-sample test.\n\n It is modelled after the kernel MMD paper and code:\n A. Gretton et al.: A kernel two-sample test, JMLR 13 (2012)\n http://www.gatsby.ucl.ac.uk/~gretton/mmd/mmd.htm\n\n The arguments `x` and `y` should be two-dimensional tensors.\n The first is the batch dimension (which may differ), the second\n the features (which must be the same on both `x` and `y`).\n\n `n_perm` is number of bootstrap permutations to get p-value, pass `None` to not get p-value.\n \"\"\"\n\n n, d = x.shape\n m, d2 = y.shape\n torchdrift.utils.check(d == d2, \"feature dimension mismatch\")\n xy = torch.cat([x.detach(), y.detach()], dim=0)\n dists = torch.cdist(xy, xy, p=2.0)\n # we are a bit sloppy here as we just keep the diagonal and everything twice\n k = kernel(dists)\n k_x = k[:n, :n]\n k_y = k[n:, n:]\n k_xy = k[:n, n:]\n # The diagonals are always 1 (up to numerical error, this is (3) in Gretton et al.)\n # note that their code uses the biased (and differently scaled mmd)\n mmd = (\n k_x.sum() / (n * (n - 1)) + k_y.sum() / (m * (m - 1)) - 2 * k_xy.sum() / (n * m)\n )\n if n_perm is None:\n return mmd\n mmd_0s = []\n count = 0\n for i in range(n_perm):\n # this isn't efficient, it would be lovely to generate a cuda kernel or C++ for loop and do the\n # permutation on the fly...\n pi = torch.randperm(n + m, device=x.device)\n k = k[pi][:, pi]\n k_x = k[:n, :n]\n k_y = k[n:, n:]\n k_xy = k[:n, n:]\n # The diagonals are always 1 (up to numerical error, this is (3) in Gretton et al.)\n mmd_0 = (\n k_x.sum() / (n * (n - 1))\n + k_y.sum() / (m * (m - 1))\n - 2 * k_xy.sum() / (n * m)\n )\n mmd_0s.append(mmd_0)\n count = count + (mmd_0 > mmd)\n # pyplot.hist(torch.stack(mmd_0s, dim=0).tolist(), bins=50)\n # true_divide: torch 1.6 compat replace with \"/\" after October 2021\n p_val = torch.true_divide(count, n_perm)\n\n return mmd, p_val\n\n\nclass KernelMMDDriftDetector(Detector):\n \"\"\"Drift detector based on the kernel Maximum Mean Discrepancy (MMD) test.\n\n This is modelled after the MMD drift detection in\n S. Rabanser et al: *Failing Loudly: An Empirical Study of Methods for Detecting Dataset Shift* (NeurIPS), 2019.\n\n Note that our heuristic choice of the kernel bandwith is more closely aligned with that of the original MMD paper and code than S. Rabanser's.\n\n The default kernel is the unnormalized Gaussian (or Squared Exponential) kernel.\n \"\"\"\n\n def __init__(\n self, *, return_p_value=False, n_perm: int = 1000, kernel=GaussianKernel()\n ):\n super().__init__(return_p_value=return_p_value)\n self.n_perm = n_perm\n self.kernel = kernel\n self.n_test = None\n self.scores = None\n\n def fit(self, x: torch.Tensor, n_test=None):\n \"\"\"Record a sample as the reference distribution\n\n Args:\n x: The reference data\n n_test: If an int is specified, the last n_test datapoints\n will not be considered part of the reference data. Instead,\n bootstrappin using permutations will be used to determine\n the distribution under the null hypothesis at fit time.\n Future testing must then always be done with n_test elements\n to get p-values.\n\"\"\"\n x = x.detach()\n if n_test is None:\n self.base_outputs = x\n else:\n torchdrift.utils.check(0 < n_test < x.size(0), \"n_test must be strictly between 0 and the number of samples\")\n self.n_test = n_test\n self.base_outputs = x[:-n_test]\n\n n_ref = x.size(0) - n_test\n\n scores = []\n for i in range(self.n_perm):\n slicing = torch.randperm(x.size(0))\n scores.append(kernel_mmd(\n x[slicing[:-n_test]], x[slicing[-n_test:]], n_perm=None, kernel=self.kernel))\n scores = torch.stack(scores)\n\n # limited smallish sample sizes, the MMD appears to exhibit a nonzero offset\n # which vanishes in the limit we adapt. After correcting this, the gamma distribution\n # approximation suggested by Gretton et al seems very good.\n self.dist_min = scores.min().double()\n mean = scores.mean() - self.dist_min\n var = scores.var().double()\n self.dist_alpha = mean**2 / var\n self.dist_beta = mean / var\n self.scores = scores\n\n def predict_shift_from_features(\n self,\n base_outputs: torch.Tensor,\n outputs: torch.Tensor,\n compute_score: bool,\n compute_p_value: bool,\n individual_samples: bool = False,\n ):\n torchdrift.utils.check(\n not individual_samples, \"Individual samples not supported by MMD detector\"\n )\n if not compute_p_value:\n ood_score = kernel_mmd(\n base_outputs, outputs, n_perm=None, kernel=self.kernel\n )\n p_value = None\n elif self.n_test is None:\n ood_score, p_value = kernel_mmd(\n base_outputs, outputs, n_perm=self.n_perm, kernel=self.kernel\n )\n else:\n torchdrift.utils.check(self.n_test == outputs.size(0),\n \"number of test samples does not match calibrated number\")\n ood_score = kernel_mmd(\n base_outputs, outputs, n_perm=None, kernel=self.kernel\n )\n p_value = torch.igammac(self.dist_alpha, self.dist_beta * (ood_score - self.dist_min).clamp(min=0)) # needs PyTorch >=1.8\n\n return ood_score, p_value\n"
] |
[
[
"torch.true_divide",
"torch.randperm",
"torch.exp",
"torch.cdist",
"torch.stack",
"torch.pow"
]
] |
enricovian/GraphSAGE
|
[
"0cdda29dbc075fb8f3441c15638d1b06de992a57"
] |
[
"graphsage/layers.py"
] |
[
"from __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom graphsage.inits import zeros\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\n# DISCLAIMER:\n# Boilerplate parts of this code file were originally forked from\n# https://github.com/tkipf/gcn\n# which itself was very inspired by the keras package\n\n# global unique layer ID dictionary for layer name assignment\n_LAYER_UIDS = {}\n\ndef get_layer_uid(layer_name=''):\n \"\"\"Helper function, assigns unique layer IDs.\"\"\"\n if layer_name not in _LAYER_UIDS:\n _LAYER_UIDS[layer_name] = 1\n return 1\n else:\n _LAYER_UIDS[layer_name] += 1\n return _LAYER_UIDS[layer_name]\n\nclass Layer(object):\n \"\"\"Base layer class. Defines basic API for all layer objects.\n Implementation inspired by keras (http://keras.io).\n # Properties\n name: String, defines the variable scope of the layer.\n logging: Boolean, switches Tensorflow histogram logging on/off\n\n # Methods\n _call(inputs): Defines computation graph of layer\n (i.e. takes input, returns output)\n __call__(inputs): Wrapper for _call()\n _log_vars(): Log all variables\n \"\"\"\n\n def __init__(self, **kwargs):\n allowed_kwargs = {'name', 'logging', 'model_size'}\n for kwarg in kwargs.keys():\n assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg\n name = kwargs.get('name')\n if not name:\n layer = self.__class__.__name__.lower()\n name = layer + '_' + str(get_layer_uid(layer))\n self.name = name\n self.vars = {}\n logging = kwargs.get('logging', False)\n self.logging = logging\n self.sparse_inputs = False\n\n def _call(self, inputs):\n return inputs\n\n def __call__(self, inputs):\n with tf.name_scope(self.name):\n if self.logging and not self.sparse_inputs:\n tf.summary.histogram(self.name + '/inputs', inputs)\n outputs = self._call(inputs)\n if self.logging:\n tf.summary.histogram(self.name + '/outputs', outputs)\n return outputs\n\n def _log_vars(self):\n for var in self.vars:\n tf.summary.histogram(self.name + '/vars/' + var, self.vars[var])\n\n\nclass Dense(Layer):\n \"\"\"Dense layer.\"\"\"\n def __init__(self, input_dim, output_dim, dropout=0.,\n act=tf.nn.relu, placeholders=None, bias=True, featureless=False,\n sparse_inputs=False, **kwargs):\n super(Dense, self).__init__(**kwargs)\n\n self.dropout = dropout\n\n self.act = act\n self.featureless = featureless\n self.bias = bias\n self.input_dim = input_dim\n self.output_dim = output_dim\n\n # helper variable for sparse dropout\n self.sparse_inputs = sparse_inputs\n if sparse_inputs:\n self.num_features_nonzero = placeholders['num_features_nonzero']\n\n with tf.variable_scope(self.name + '_vars'):\n self.vars['weights'] = tf.get_variable('weights', shape=(input_dim, output_dim),\n dtype=tf.float32,\n initializer=tf.contrib.layers.xavier_initializer(),\n regularizer=tf.contrib.layers.l2_regularizer(FLAGS.weight_decay))\n if self.bias:\n self.vars['bias'] = zeros([output_dim], name='bias')\n\n if self.logging:\n self._log_vars()\n\n def _call(self, inputs):\n x = inputs\n\n x = tf.nn.dropout(x, 1-self.dropout)\n\n # transform\n output = tf.matmul(x, self.vars['weights'])\n\n # bias\n if self.bias:\n output += self.vars['bias']\n\n return self.act(output)\n"
] |
[
[
"tensorflow.matmul",
"tensorflow.name_scope",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.variable_scope",
"tensorflow.nn.dropout",
"tensorflow.summary.histogram"
]
] |
sindura-sriram/biobert
|
[
"a296da725d8631483a083cc18af14145653b4c58"
] |
[
"extract_features.py"
] |
[
"# Databricks notebook source\n# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Extract pre-computed feature vectors from BERT.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport codecs\nimport collections\nimport json\nimport re\n\nimport modeling\nimport tokenization\nimport tensorflow as tf\n\nflags = tf.flags\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\"input_file\", None, \"\")\n\nflags.DEFINE_string(\"output_file\", None, \"\")\n\nflags.DEFINE_string(\"layers\", \"-1,-2,-3,-4\", \"\")\n\nflags.DEFINE_string(\n \"bert_config_file\", None,\n \"The config json file corresponding to the pre-trained BERT model. \"\n \"This specifies the model architecture.\")\n\nflags.DEFINE_integer(\n \"max_seq_length\", 128,\n \"The maximum total input sequence length after WordPiece tokenization. \"\n \"Sequences longer than this will be truncated, and sequences shorter \"\n \"than this will be padded.\")\n\nflags.DEFINE_string(\n \"init_checkpoint\", None,\n \"Initial checkpoint (usually from a pre-trained BERT model).\")\n\nflags.DEFINE_string(\"vocab_file\", None,\n \"The vocabulary file that the BERT model was trained on.\")\n\nflags.DEFINE_bool(\n \"do_lower_case\", True,\n \"Whether to lower case the input text. Should be True for uncased \"\n \"models and False for cased models.\")\n\nflags.DEFINE_integer(\"batch_size\", 32, \"Batch size for predictions.\")\n\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU or GPU/CPU.\")\n\nflags.DEFINE_string(\"master\", None,\n \"If using a TPU, the address of the master.\")\n\nflags.DEFINE_integer(\n \"num_tpu_cores\", 8,\n \"Only used if `use_tpu` is True. Total number of TPU cores to use.\")\n\nflags.DEFINE_bool(\n \"use_one_hot_embeddings\", False,\n \"If True, tf.one_hot will be used for embedding lookups, otherwise \"\n \"tf.nn.embedding_lookup will be used. On TPUs, this should be True \"\n \"since it is much faster.\")\n\n\nclass InputExample(object):\n\n def __init__(self, unique_id, text_a, text_b):\n self.unique_id = unique_id\n self.text_a = text_a\n self.text_b = text_b\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids):\n self.unique_id = unique_id\n self.tokens = tokens\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.input_type_ids = input_type_ids\n\n\ndef input_fn_builder(features, seq_length):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n all_unique_ids = []\n all_input_ids = []\n all_input_mask = []\n all_input_type_ids = []\n\n for feature in features:\n all_unique_ids.append(feature.unique_id)\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_input_type_ids.append(feature.input_type_ids)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"unique_ids\":\n tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_type_ids\":\n tf.constant(\n all_input_type_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n })\n\n d = d.batch(batch_size=batch_size, drop_remainder=False)\n return d\n\n return input_fn\n\n\ndef model_fn_builder(bert_config, init_checkpoint, layer_indexes, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n input_type_ids = features[\"input_type_ids\"]\n\n model = modeling.BertModel(\n config=bert_config,\n is_training=False,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=input_type_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n if mode != tf.estimator.ModeKeys.PREDICT:\n raise ValueError(\"Only PREDICT modes are supported: %s\" % (mode))\n\n tvars = tf.trainable_variables()\n scaffold_fn = None\n (assignment_map,\n initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(\n tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n all_layers = model.get_all_encoder_layers()\n\n predictions = {\n \"unique_id\": unique_ids,\n }\n\n for (i, layer_index) in enumerate(layer_indexes):\n predictions[\"layer_output_%d\" % i] = all_layers[layer_index]\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n return output_spec\n\n return model_fn\n\n\ndef convert_examples_to_features(examples, seq_length, tokenizer):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n features = []\n for (ex_index, example) in enumerate(examples):\n tokens_a = tokenizer.tokenize(example.text_a)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > seq_length - 2:\n tokens_a = tokens_a[0:(seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = []\n input_type_ids = []\n tokens.append(\"[CLS]\")\n input_type_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n input_type_ids.append(0)\n tokens.append(\"[SEP]\")\n input_type_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n input_type_ids.append(1)\n tokens.append(\"[SEP]\")\n input_type_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < seq_length:\n input_ids.append(0)\n input_mask.append(0)\n input_type_ids.append(0)\n\n assert len(input_ids) == seq_length\n assert len(input_mask) == seq_length\n assert len(input_type_ids) == seq_length\n\n if ex_index < 5:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"unique_id: %s\" % (example.unique_id))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\n \"input_type_ids: %s\" % \" \".join([str(x) for x in input_type_ids]))\n\n features.append(\n InputFeatures(\n unique_id=example.unique_id,\n tokens=tokens,\n input_ids=input_ids,\n input_mask=input_mask,\n input_type_ids=input_type_ids))\n return features\n\n\ndef _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n\n\ndef read_examples(input_file):\n \"\"\"Read a list of `InputExample`s from an input file.\"\"\"\n examples = []\n unique_id = 0\n with tf.gfile.GFile(input_file, \"r\") as reader:\n while True:\n line = tokenization.convert_to_unicode(reader.readline())\n if not line:\n break\n line = line.strip()\n text_a = None\n text_b = None\n m = re.match(r\"^(.*) \\|\\|\\| (.*)$\", line)\n if m is None:\n text_a = line\n else:\n text_a = m.group(1)\n text_b = m.group(2)\n examples.append(\n InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b))\n unique_id += 1\n return examples\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n layer_indexes = [int(x) for x in FLAGS.layers.split(\",\")]\n\n bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)\n\n tokenizer = tokenization.FullTokenizer(\n vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)\n\n is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2\n run_config = tf.contrib.tpu.RunConfig(\n master=FLAGS.master,\n tpu_config=tf.contrib.tpu.TPUConfig(\n num_shards=FLAGS.num_tpu_cores,\n per_host_input_for_training=is_per_host))\n\n examples = read_examples(FLAGS.input_file)\n\n features = convert_examples_to_features(\n examples=examples, seq_length=FLAGS.max_seq_length, tokenizer=tokenizer)\n\n unique_id_to_feature = {}\n for feature in features:\n unique_id_to_feature[feature.unique_id] = feature\n\n model_fn = model_fn_builder(\n bert_config=bert_config,\n init_checkpoint=FLAGS.init_checkpoint,\n layer_indexes=layer_indexes,\n use_tpu=FLAGS.use_tpu,\n use_one_hot_embeddings=FLAGS.use_one_hot_embeddings)\n\n # If TPU is not available, this will fall back to normal Estimator on CPU\n # or GPU.\n estimator = tf.contrib.tpu.TPUEstimator(\n use_tpu=FLAGS.use_tpu,\n model_fn=model_fn,\n config=run_config,\n predict_batch_size=FLAGS.batch_size)\n\n input_fn = input_fn_builder(\n features=features, seq_length=FLAGS.max_seq_length)\n\n with codecs.getwriter(\"utf-8\")(tf.gfile.Open(FLAGS.output_file,\n \"w\")) as writer:\n for result in estimator.predict(input_fn, yield_single_examples=True):\n unique_id = int(result[\"unique_id\"])\n feature = unique_id_to_feature[unique_id]\n output_json = collections.OrderedDict()\n output_json[\"linex_index\"] = unique_id\n all_features = []\n for (i, token) in enumerate(feature.tokens):\n all_layers = []\n for (j, layer_index) in enumerate(layer_indexes):\n layer_output = result[\"layer_output_%d\" % j]\n layers = collections.OrderedDict()\n layers[\"index\"] = layer_index\n layers[\"values\"] = [\n round(float(x), 6) for x in layer_output[i:(i + 1)].flat\n ]\n all_layers.append(layers)\n features = collections.OrderedDict()\n features[\"token\"] = token\n features[\"layers\"] = all_layers\n all_features.append(features)\n output_json[\"features\"] = all_features\n writer.write(json.dumps(output_json) + \"\\n\")\n\n\nif __name__ == \"__main__\":\n flags.mark_flag_as_required(\"input_file\")\n flags.mark_flag_as_required(\"vocab_file\")\n flags.mark_flag_as_required(\"bert_config_file\")\n flags.mark_flag_as_required(\"init_checkpoint\")\n flags.mark_flag_as_required(\"output_file\")\n tf.app.run()\n"
] |
[
[
"tensorflow.contrib.tpu.TPUEstimator",
"tensorflow.train.Scaffold",
"tensorflow.constant",
"tensorflow.gfile.Open",
"tensorflow.gfile.GFile",
"tensorflow.train.init_from_checkpoint",
"tensorflow.logging.info",
"tensorflow.logging.set_verbosity",
"tensorflow.contrib.tpu.TPUConfig",
"tensorflow.trainable_variables",
"tensorflow.contrib.tpu.TPUEstimatorSpec",
"tensorflow.app.run"
]
] |
intuinno/visinsight
|
[
"94ef3f9f7921e272c59a4e503fde897414d9adab"
] |
[
"sample.py"
] |
[
"import torch\r\nimport matplotlib.pyplot as plt\r\nimport argparse\r\nimport pickle \r\nfrom torch.autograd import Variable \r\nfrom torchvision import transforms \r\nfrom data_loader import build_vocab \r\nfrom model import EncoderCNN, DecoderRNN\r\nfrom model import ResNet, ResidualBlock\r\nfrom attn_model import ResidualBlock, AttnEncoder, AttnDecoderRnn\r\nfrom PIL import Image\r\n\r\n\r\ndef to_var(x, volatile=False):\r\n if torch.cuda.is_available():\r\n x = x.cuda()\r\n return Variable(x, volatile=volatile)\r\n\r\ndef load_image(image_path, transform):\r\n image = Image.open(image_path).convert('RGB')\r\n image = image.resize([64, 64], Image.LANCZOS)\r\n \r\n if transform is not None:\r\n image = transform(image).unsqueeze(0)\r\n \r\n return image\r\n \r\ndef main(args):\r\n # Image preprocessing\r\n transform = transforms.Compose([ \r\n transforms.ToTensor(), \r\n transforms.Normalize((0.033, 0.032, 0.033), \r\n (0.027, 0.027, 0.027))])\r\n\r\n # Load vocabulary wrapper\r\n with open(args.vocab_path, 'rb') as f:\r\n vocab = pickle.load(f)\r\n len_vocab = vocab.idx \r\n\r\n # Build Models\r\n encoder = ResNet(ResidualBlock, [3, 3, 3], len_vocab)\r\n encoder.eval() # evaluation mode (BN uses moving mean/variance)\r\n\r\n decoder = DecoderRNN(len_vocab, args.hidden_size, \r\n len(vocab), args.num_layers)\r\n \r\n attn_encoder = AttnEncoder(ResidualBlock, [3, 3, 3])\r\n attn_encoder.eval()\r\n attn_decoder = AttnDecoderRnn(args.feature_size, args.hidden_size, \r\n len(vocab), args.num_layers)\r\n\r\n # Load the trained model parameters\r\n attn_encoder.load_state_dict(torch.load(args.encoder_path))\r\n attn_decoder.load_state_dict(torch.load(args.decoder_path))\r\n\r\n\r\n # Prepare Image\r\n image = load_image(args.image, transform)\r\n image_tensor = to_var(image, volatile=True)\r\n\r\n # If use gpu\r\n if torch.cuda.is_available():\r\n attn_encoder.cuda()\r\n attn_decoder.cuda()\r\n \r\n # Generate caption from image\r\n feature = attn_encoder(image_tensor)\r\n sampled_ids = attn_decoder.sample(feature)\r\n ids_arr = []\r\n for element in sampled_ids: \r\n temp = element.cpu().data.numpy()\r\n ids_arr.append(int(temp))\r\n\r\n \r\n # Decode word_ids to words\r\n sampled_caption = []\r\n for word_id in ids_arr:\r\n word = vocab.idx2word[word_id]\r\n sampled_caption.append(word)\r\n if word == '<end>':\r\n break\r\n sentence = ' '.join(sampled_caption)\r\n \r\n # Print out image and generated caption.\r\n print (sentence)\r\n #image = Image.open(args.image)\r\n #plt.imshow(np.asarray(image))\r\n \r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--image', type=str, required=True,\r\n help='input image for generating caption')\r\n parser.add_argument('--encoder_path', type=str, default='./models/3object/encoder-50-150.pkl',\r\n help='path for trained encoder')\r\n parser.add_argument('--decoder_path', type=str, default='./models/3object/decoder-50-150.pkl',\r\n help='path for trained decoder')\r\n parser.add_argument('--vocab_path', type=str, default='./data/vocab_3object.pkl',\r\n help='path for vocabulary wrapper')\r\n parser.add_argument('--root_path', type=str, default='data/3object/',\r\n help='path for root')\r\n \r\n # Model parameters (should be same as paramters in train.py)\r\n parser.add_argument('--embed_size', type=int , default=256,\r\n help='dimension of word embedding vectors')\r\n parser.add_argument('--feature_size', type=int , default=256,\r\n help='dimension of word embedding vectors')\r\n parser.add_argument('--hidden_size', type=int , default=512,\r\n help='dimension of lstm hidden states')\r\n parser.add_argument('--num_layers', type=int , default=1 ,\r\n help='number of layers in lstm')\r\n args = parser.parse_args()\r\n main(args)\r\n"
] |
[
[
"torch.load",
"torch.cuda.is_available",
"torch.autograd.Variable"
]
] |
CongBao/ImageEnhancer
|
[
"0f1b3131a2a573f9d00ec9cb6bb0ce69dac169f2"
] |
[
"ie/utilities/image_io.py"
] |
[
"\"\"\" Load images from file \"\"\"\n\nfrom __future__ import division\n\nimport os\nimport random\n\nimport numpy as np\nfrom scipy import misc\nfrom tqdm import tqdm\nfrom joblib import Parallel, delayed\n\n__author__ = 'Cong Bao'\n\ndef _load_img(img_list, img_dir, i):\n return misc.imread(img_dir + img_list[i])\n\ndef load_img(img_dir, shape, ratio=(0.7, 0.15, 0.15), thread=2):\n \"\"\" load images from file system\n :param img_dir: the directory of images\n :param shape: the width, height, and channel of each image\n :param ratio: ratio to separate train, validation, and test sets, default (0.7, 0.15, 0.15), if None, no separation\n :param thread: number of threads to be used, default 2\n :return: separated data sets in a tuple: (training set, validation set, test set), or a single numpy array if ratio is None\n \"\"\"\n fmt = 'Loading {part} dataset: {{percentage:3.0f}}% {{r_bar}}'\n width, height, channel = shape\n # all images path\n img_list = os.listdir(img_dir)\n # number of each part\n total_num = len(img_list)\n # do not separate\n if ratio is None:\n data_set = []\n data_set.extend(Parallel(n_jobs=thread)(delayed(_load_img)(img_list, img_dir, i) for i in tqdm(range(total_num), bar_format=fmt.format(part='total'))))\n data_set = np.asarray(data_set, 'uint8').reshape((total_num, width, height, channel))\n return data_set\n # separate train, valid, and test\n train_num = int(total_num * ratio[0])\n valid_num = int(total_num * ratio[1])\n test_num = total_num - train_num - valid_num\n # load train set\n train_list = random.sample(img_list, train_num)\n train_set = []\n train_set.extend(Parallel(n_jobs=thread)(delayed(_load_img)(train_list, img_dir, i) for i in tqdm(range(train_num), bar_format=fmt.format(part='train'))))\n # load validation set\n valid_list = random.sample(set(img_list) - set(train_list), valid_num)\n valid_set = []\n valid_set.extend(Parallel(n_jobs=thread)(delayed(_load_img)(valid_list, img_dir, i) for i in tqdm(range(valid_num), bar_format=fmt.format(part='validation'))))\n # load test set\n test_list = list(set(img_list) - set(train_list) - set(valid_list))\n test_set = []\n test_set.extend(Parallel(n_jobs=thread)(delayed(_load_img)(test_list, img_dir, i) for i in tqdm(range(test_num), bar_format=fmt.format(part='test'))))\n # transfer to numpy array\n train_set = np.asarray(train_set, 'uint8').reshape((train_num, width, height, channel))\n valid_set = np.asarray(valid_set, 'uint8').reshape((valid_num, width, height, channel))\n test_set = np.asarray(test_set, 'uint8').reshape((test_num, width, height, channel))\n return (train_set, valid_set, test_set)\n\ndef save_img(img_dir, img_set):\n \"\"\" save images to file system\n :param img_dir: the directory to store\n :param img_set: an numpy array in the shape of (batch, width, height, channel)\n \"\"\"\n fmt = 'Saving processed images: {percentage:3.0f}% {r_bar}'\n for i, img in tqdm(enumerate(img_set), bar_format=fmt, total=np.shape(img_set)[0]):\n misc.imsave(img_dir + 'processed.' + str(i) + '.png', img)\n"
] |
[
[
"numpy.asarray",
"numpy.shape",
"scipy.misc.imread"
]
] |
saeedm31/ACTIN
|
[
"44c3d4d729b5173cf1f5b0a5fd1e7a8659c6636e"
] |
[
"actin/actin_files/ac_read_data.py"
] |
[
"#!/usr/bin/env python\n\n\n# compatibility with python 2/3:\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport sys, os\nimport numpy as np\nimport glob\n\nimport astropy.io.fits as pyfits\nimport matplotlib.pylab as plt\n\n# ACTIN modules:\nimport ac_tools\nimport ac_settings as ac_set\n\n\ndef check_for_calib_files(e2ds_header, file_type, folder, dif_time_max=1.0, plot_spec=False):\n \"\"\"\n Check for calibration files (wave or blaze) in the working directory\n in case of predetermined files are not present and choses the ones with\n less than 1 day difference from the original e2ds file.\n\n Parameters:\n -----------\n e2ds_header : fits header\n Fits header of e2ds fits file.\n file_type : str\n File type to be searched, either 'wave' or 'blaze'.\n folder : str\n Path to the fits files.\n dif_time_max : float (optional)\n Maximum difference in time between e2ds and\n the chosen calib file [days].\n\n Returns:\n --------\n calib_pfile : {str, None}\n Selected calibration file with path included, None if\n file not found.\n \"\"\"\n\n print(\"Executing: check_for_calib_files\")\n\n mjd_e2ds = e2ds_header['MJD-OBS']\n\n filename = os.path.join(folder, '*{}_A.fits'.format(file_type))\n calib_pfiles = glob.glob(filename)\n if len(calib_pfiles) > 0:\n\n time_dif = []\n\n for k in range(len(calib_pfiles)):\n w = pyfits.open(calib_pfiles[k])\n mjd = w[0].header['MJD-OBS']\n w.close()\n time_dif.append(abs(mjd_e2ds-mjd))\n\n if min(time_dif) < dif_time_max:\n index = time_dif.index(min(time_dif))\n calib_pfile = calib_pfiles[index]\n calib_file = os.path.split(calib_pfile)[-1]\n\n print(\"New {} file used:\\t{}\".format(file_type, calib_file))\n print(\"{} file time difference to e2ds = {:.2f} days\".format(file_type, min(time_dif)))\n return calib_pfile\n else:\n print(\"*** WARNING: Closest {} file was produced longer than {} day(s)\".format(file_type, dif_time_max))\n return None\n else:\n print(\"*** WARNING: No more {} files in folder\".format(file_type))\n return None\n\n\ndef calc_wave(e2ds_pfile, obs):\n \"\"\"\n Compute wavelength from e2ds headers.\n\n Parameters:\n -----------\n e2ds_pfile : str\n e2ds file name with path to its location.\n obs : str\n Code related to instrument to be used in fits headers.\n\n Returns:\n --------\n wave : list of lists\n 2d wavelength list where len(wave) is number of orders [angstroms].\n \"\"\"\n\n print(\"Executing: calc_wave\")\n\n e2ds = pyfits.open(e2ds_pfile)\n\n deg = e2ds[0].header['HIERARCH {} DRS CAL TH DEG LL'.format(obs)]\n\n ll_coeff = np.zeros((len(e2ds[0].data), deg + 1))\n\n # Read coefficients\n for i in range(len(e2ds[0].data)):\n for j in range(deg + 1):\n ll_coeff[i, j] = e2ds[0].header['HIERARCH {} DRS CAL TH COEFF '\n 'LL{}'.format(obs, (j + (deg + 1)*i))]\n\n # Evaluate polynomials\n x = np.arange(e2ds[0].data.shape[1]) # Pixel array\n wave = np.zeros(e2ds[0].data.shape) # Wavelength 2D array\n for i in range(len(wave)):\n wave[i] = np.poly1d(ll_coeff[i][::-1])(x)\n\n e2ds.close()\n\n return wave\n\n\ndef read_data_rdb(file):\n \"\"\"\n Read spectrum from rdb file with the following headers:\n 'obj', 'obs_date', 'bjd', 'wave', 'flux', 'error_pixel' (optional)\n \"\"\"\n print(\"Reading file:\\t{}\".format(file))\n\n try: data, hdr = ac_tools.read_rdb(file)\n except:\n print(\"*** ERROR: Cannot read {}\".format(file))\n sys.exit()\n\n print(\"Object:\\t\\t{}\".format(data['obj'][0]))\n\n data['wave'] = np.asarray(data['wave'])\n data['flux'] = np.asarray(data['flux'])\n data['tel'] = \"unknown\"\n data['instr'] = \"unknown\"\n data['obj'] = data['obj'][0]\n data['obs_date'] = data['obs_date'][0]\n data['bjd'] = data['bjd'][0]\n data['blaze'] = np.ones(len(data['flux']))\n data['snr'] = None\n data['median_snr'] = None\n data['ccf_noise'] = 0.0\n\n return data\n\n\ndef read_data(pfile, rv_in=None, obj_name=None, force_calc_wave=False, plot_spec=False):\n \"\"\"\n Reads data from 'S2D', 'S1D', 'e2ds', 's1d', 's1d_*_rv', 'ADP', and 'rdb' files.\n - force_calc_wave is for testing purposes.\n - plot_spec is for testing purposes.\n \"\"\"\n\n print()\n print(\"READING DATA FROM FILE:\")\n print(\"-----------------------\")\n\n flg = None\n\n # Get file type\n file_type = ac_tools.get_file_type(pfile)\n\n # Read rdb file and return data\n if file_type == 'rdb':\n data = read_data_rdb(pfile)\n data['file_type'] = file_type\n return data\n\n folder, file = os.path.split(pfile)\n file_info = file.split('_')[0]\n\n print(\"Working folder:\\t{}{}\".format(folder, os.path.sep))\n print(\"Reading file:\\t{}\".format(file))\n\n hdu = pyfits.open(pfile)\n hdr = hdu[0].header\n\n # Reading headers\n tel = hdr['TELESCOP']\n instr = hdr['INSTRUME']\n date_obs = hdr['DATE-OBS']\n\n print(\"Telescope:\\t{}\".format(tel))\n print(\"Instrument:\\t{}\".format(instr))\n\n if instr == 'HARPS':\n obs = 'ESO'\n ords = 72\n elif instr == 'HARPN':\n obs = 'TNG'\n ords = 69\n elif instr == 'ESPRESSO':\n obs = 'ESO'\n ords = 170\n else:\n sys.exit(\"*** ERROR: Instrument not recognized. ACTIN only accepts HARPS, HARPS-N, and ESPRESSO fits files. If using another instrument make an '.rdb' table with the headers 'obj', 'obs_date', 'bjd', 'wave', 'flux', 'error_pixel' (optional) and run ACTIN on that file.\")\n\n # get target\n obj = ac_tools.get_target(pfile)\n\n print(\"Object:\\t\\t{}\".format(obj))\n\n if obj in ('WAVE,WAVE,THAR1','WAVE,WAVE,THAR2'):\n print('*** WARNING: File is ThAr flux.')\n print(\"*** ACTION: Ignoring Measurement.\")\n return\n\n # Override object name with name given in obj_name option\n if obj_name:\n obj = ac_tools.override_obj(obj, obj_name)\n print(\"Object name changed to:\", obj)\n\n if instr in ('ESPRESSO'):\n if file_type == \"S2D\":\n flux = hdu[1].data\n flux_err = hdu[2].data\n wave_raw = hdu[5].data # wavelength air\n if file_type == \"S1D\":\n data = hdu[1].data\n flux = data['flux']\n flux_err = data['error']\n wave_raw = data['wavelength_air']\n bjd = hdr['HIERARCH {} QC BJD'.format(obs)]\n snr = [hdr['HIERARCH {} QC ORDER{} SNR'.format(obs,k+1)] for k in range(ords)]\n snr = np.asarray(snr)\n median_snr = np.median(snr)\n try: bv = hdr['HIERARCH {} OCS OBJ BV'.format(obs)]\n except: bv = None\n #try: airmass_end = hdr['HIERARCH {} TEL3 AIRM END'.format(obs)]\n #except: airmass_end = None\n try: airmass = hdr['AIRMASS']\n except KeyError: airmass = None\n try: exptime = hdr['EXPTIME']\n except KeyError: exptime = None\n\n if instr in ('HARPS', 'HARPN'):\n bjd = hdr['HIERARCH {} DRS BJD'.format(obs)]\n berv = hdr['HIERARCH {} DRS BERV'.format(obs)]\n try: bv = hdr['HIERARCH {} OCS OBJ BV'.format(obs)]\n except: bv = None\n #try: airmass_end = hdr['HIERARCH {} TEL AIRM END'.format(obs)]\n #except: airmass_end = None\n try: airmass = hdr['AIRMASS']\n except KeyError: airmass = None\n try: exptime = hdr['EXPTIME']\n except KeyError: exptime = None\n if file_type in (\"s1d\", \"e2ds\"):\n flux = hdu[0].data\n flux_err = None\n hdu.close()\n snr = [hdr['HIERARCH {} DRS SPE EXT SN{}'.format(obs, k)] for k in range(ords)]\n snr = np.asarray(snr)\n median_snr = np.median(snr)\n if file_type == 'ADP':\n flux = hdu[1].data[0][1]\n flux_err = None\n wave_raw = hdu[1].data[0][0]\n hdu.close()\n median_snr = hdr['SNR']\n blaze = np.ones(len(flux))\n snr = None\n\n if file_type == 's1d':\n # dwave_raw is 0.01 ang\n wave_raw = hdr['CRVAL1'] + hdr['CDELT1']*np.arange(hdr['NAXIS1'])\n blaze = np.ones(len(flux))\n\n if file_type in ('e2ds', 'ADP'):\n if file_type != 'ADP':\n # Reading data from wave file\n wave_file = hdr['HIERARCH {} DRS CAL TH FILE'.format(obs)]\n print(\"Wave file:\\t{}\".format(wave_file))\n wave_pfile = os.path.join(folder, wave_file)\n if force_calc_wave == False:\n try:\n wave_hdu = pyfits.open(wave_pfile)\n except:\n print(\"*** INFO: Could not open\", wave_pfile)\n wave_pfile = check_for_calib_files(hdr, 'wave', folder)\n try:\n wave_hdu = pyfits.open(wave_pfile)\n except:\n print(\"*** INFO: Could not open:\")\n print(\"***\", wave_pfile)\n print(\"*** ACTION: Computing wavelength from\", file_type)\n wave_raw = calc_wave(pfile, obs)\n wave_raw = np.asarray(wave_raw)\n else:\n wave_raw = wave_hdu[0].data\n wave_hdu.close()\n else:\n wave_raw = wave_hdu[0].data\n wave_hdu.close()\n if force_calc_wave == True:\n print(\"*** ACTION: Computing wavelength from\", file_type)\n wave_raw = calc_wave(pfile, obs)\n wave_raw = np.asarray(wave_raw)\n\n # Reading data from blaze file\n blaze_file = hdr['HIERARCH {} DRS BLAZE FILE'.format(obs)]\n print(\"Blaze file:\\t{}\".format(blaze_file))\n blaze_pfile = os.path.join(folder, blaze_file)\n try:\n blaze_hdu = pyfits.open(blaze_pfile)\n except:\n print(\"*** WARNING: The blaze file associated with this e2ds is not present.\")\n print(\"*** Looking for other blaze files in the folder...\")\n blaze_pfile = check_for_calib_files(hdr,'blaze',folder)\n try:\n blaze_hdu = pyfits.open(blaze_pfile)\n except:\n print(\"*** WARNING: Flux not deblazed. This can introduce artificial variations in the indices values.\")\n if file_type == 'e2ds':\n blaze = np.ones([len(flux),len(flux[0])])\n if file_type == 'ADP':\n blaze = np.ones(len(flux))\n flg = 'noDeblazed'\n else:\n blaze = blaze_hdu[0].data\n blaze_hdu.close()\n else:\n blaze = blaze_hdu[0].data\n blaze_hdu.close()\n\n if instr in ('ESPRESSO'):\n rv = hdr['HIERARCH {} QC CCF RV'.format(obs)] # [km/s]\n rv_err = hdr['HIERARCH {} QC CCF RV ERROR'.format(obs)] # [km/s]\n fwhm = hdr['HIERARCH {} QC CCF FWHM'.format(obs)] # [km/s]\n fwhm_err = hdr['HIERARCH {} QC CCF FWHM ERROR'.format(obs)] # [km/s]\n cont = hdr['HIERARCH {} QC CCF CONTRAST'.format(obs)] # [%]\n cont_err = hdr['HIERARCH {} QC CCF CONTRAST ERROR'.format(obs)] # [%]\n try: bv = hdr['HIERARCH {} OCS OBJ BV'.format(obs)] # B-V\n except: bv = None\n berv = hdr['HIERARCH {} QC BERV'.format(obs)] # [km/s] barycentric Earth radial velocity\n\n rv = rv * 1000 # convert to m/s\n rv_err = rv_err * 1000\n berv = berv * 1000 # convert to m/s\n fwhm = fwhm * 1000 # convert to m/s\n fwhm_err = fwhm_err * 1000\n bis = None\n bis_err = None\n ccf_noise = 0.0\n\n if file_type == 'S2D': blaze = np.ones([len(flux),len(flux[0])])\n if file_type == 'S1D': blaze = np.ones([len(flux)])\n\n if instr in ('HARPS', 'HARPN'):\n # Reading data from CCF file\n\n if file_type == 'ADP':\n ccf_search = \"{}.{}*ccf_*_A.fits\".format(instr, date_obs[:-2])\n else:\n ccf_search = \"{}*_ccf_*_A.fits\".format(file_info[:-2])\n ccf_search = os.path.join(folder, ccf_search)\n try:\n ccf_pfile = glob.glob(ccf_search)[0]\n ccf_hdu = pyfits.open(ccf_pfile)\n except:\n print(\"*** WARNING: Could not find or open:\")\n print(\"***\", ccf_search)\n print(\"*** WARNING: No CCF data available.\")\n rv = None; rv_err = None; fwhm = None; fwhm_err = None; cont = None; cont_err = None; ccf_noise = None; berv = None\n else:\n ccf_file = os.path.split(ccf_pfile)[-1]\n print(\"CCF file:\\t{}\".format(ccf_file))\n ccf_hdr = ccf_hdu[0].header\n ccf_hdu.close()\n\n try:\n rv = ccf_hdr['HIERARCH {} DRS CCF RVC'.format(obs)] # [km/s]\n rv_err = ccf_hdr['HIERARCH {} DRS DVRMS'.format(obs)] # [m/s]\n except KeyError as err:\n print(\"*** ERROR: {}, Ignoring measurement.\".format(err))\n return\n\n ccf_noise = ccf_hdr['HIERARCH {} DRS CCF NOISE'.format(obs)] # [km/s]\n fwhm = ccf_hdr['HIERARCH {} DRS CCF FWHM'.format(obs)] # [km/s]\n fwhm_err = None\n cont = ccf_hdr['HIERARCH {} DRS CCF CONTRAST'.format(obs)] # [%]\n cont_err = None\n\n rv = rv * 1000 # convert to m/s\n berv = berv * 1000 # convert to m/s\n fwhm = fwhm * 1000 # convert to m/s\n ccf_noise = ccf_noise * 1000 # convert to m/s\n\n # Reading data from BIS file\n if file_type == 'ADP':\n bis_search = \"{}.{}*bis_*_A.fits\".format(instr, date_obs[:-2])\n else:\n bis_search = \"{}*_bis_*_A.fits\".format(file_info[:-2])\n bis_search = os.path.join(folder, bis_search)\n try:\n bis_pfile = glob.glob(bis_search)[0]\n bis_hdu = pyfits.open(bis_pfile)\n except:\n print(\"*** WARNING: Could not find or open:\")\n print(\"***\", bis_search)\n print(\"*** WARNING: No BIS data available.\")\n bis = None\n bis_err = None\n else:\n bis_file = os.path.split(bis_pfile)[-1]\n print(\"BIS file:\\t{}\".format(bis_file))\n bis_hdr = bis_hdu[0].header\n bis_hdu.close()\n bis = bis_hdr['HIERARCH {} DRS BIS SPAN'.format(obs)] # [km/s]\n bis = bis * 1000 # convert to m/s\n bis_err = None\n\n # Wavelength calibration:\n c = 299792458.0 # [m/s]\n\n # receiving rv from input (must be in km/s)\n if rv_in is not None: rv = rv_in * 1000\n\n if instr in ('ESPRESSO'):\n wave = wave_raw - rv * wave_raw / c\n\n if instr in ('HARPS', 'HARPN'):\n if file_type in ac_set.ftypes['1d']:\n # To know if reading s1a_A_rv files:\n rest_frame = file.split('_')[-1].split('.')[0]\n\n # If using s1d need rv to calibrate wavelength\n if not rv and rest_frame != 'rv':\n print(\"*** ERROR: No rv data available to calibrate wavelength.\")\n return\n\n # If using s1d_*_rv the wavelength is already at rest frame\n if rest_frame == 'rv': wave = wave_raw\n\n # If using s1d use rv to calibrate wavelength\n if rv and rest_frame != 'rv':\n dwave = rv * wave_raw / c\n wave = wave_raw - dwave\n\n if file_type in ac_set.ftypes['2d']:\n if rv:\n dwave = (rv - berv) * wave_raw / c\n wave = wave_raw - dwave\n else:\n print(\"*** ERROR: No rv data available to calibrate wavelength.\")\n return\n\n # Test plot\n plot_spec = False\n\n if plot_spec == True:\n ord = 6\n dif_wave = np.diff(wave_raw)\n wave = wave_raw[1:]\n if type(wave[0]) in (list, np.ndarray):\n wave = wave[ord]\n plt.xlabel(\"Wave_raw ord {} [Ang]\".format(ord))\n plt.ylabel(\"diff_wave_raw ord {} [Ang]\".format(ord))\n else:\n plt.xlabel(\"Wave_raw [Ang]\")\n plt.ylabel(\"diff_wave_raw\")\n\n dif_wave = np.diff(wave)\n wave = wave[1:]\n\n plt.plot(wave, dif_wave, 'k.')\n\n #plt.axvline(7877.08, c='b',ls='-')\n plt.show()\n\n\n data = {}\n data['filename'] = os.path.basename(pfile)\n data['flux'] = flux\n data['flux_err'] = flux_err\n data['wave'] = wave\n data['blaze'] = blaze\n data['obs_date'] = date_obs\n data['obj'] = obj\n data['bjd'] = bjd\n data['median_snr'] = median_snr\n data['snr'] = snr # 2d\n data['rv'] = rv # m/s\n data['rv_err'] = rv_err # m/s\n data['fwhm'] = fwhm # m/s\n data['fwhm_err'] = fwhm_err\n data['cont'] = cont\n data['cont_err'] = cont_err\n data['bis'] = bis\n data['bis_err'] = bis_err\n data['ccf_noise'] = ccf_noise # m/s\n data['airmass'] = airmass\n data['exptime'] = exptime\n data['bv'] = bv\n data['tel'] = tel\n data['instr'] = instr\n data['file_type'] = file_type\n data['data_flg'] = flg\n\n return data\n"
] |
[
[
"matplotlib.pylab.show",
"numpy.poly1d",
"numpy.asarray",
"numpy.arange",
"numpy.median",
"numpy.diff",
"matplotlib.pylab.ylabel",
"matplotlib.pylab.plot",
"matplotlib.pylab.xlabel",
"numpy.zeros"
]
] |
mvoran/iTunesAppReviewScraper
|
[
"f51ab0575dc567e2f3e76f35c6a26b2d344abcd2"
] |
[
"scraper/scraper.py"
] |
[
"import requests\nimport xml.etree.ElementTree as ET\nimport re\nimport dateparser\nimport pandas as pd\n\nfrom itunes_app_review_scraper.config import Config\n\nbasepath = None\n\nclass iTunesScraper(object):\n @classmethod\n def _request_itunes(cls, app_id, store_id, page):\n headers = {'X-Apple-Store-Front': Config.STORE_FRONT.format(store_id=store_id), 'User-Agent': Config.USER_AGENT}\n\n r = requests.get(Config.URL.format(app_id=app_id, page=page), headers=headers)\n\n if r.status_code != 200:\n raise RuntimeError('Failed to get data for {}: {}'.format(store_id, r.status_code))\n\n return r.content\n\n @classmethod\n def _parse_itunes_xml(cls, data):\n reviews = []\n\n try:\n root = ET.fromstring(data)\n except:\n return reviews\n\n for node in root.findall(Config.REVIEW_NODE, Config.NAMESPACE):\n review = {}\n\n star_data = node.find(Config.STAR_CHILD, Config.NAMESPACE)\n text_data = node.find(Config.TEXT_CHILD, Config.NAMESPACE)\n review_metadata = node.findall(Config.METADATA_CHILD, Config.NAMESPACE)\n\n review['title'] = review_metadata[0][0].text\n \n #in most country storefronts the stars are listed as numbers, e.g., \"5 stars\"\n #but in SE, DE and AT the numbers are spelled out, e.g., \"zwei stahlen\"\n try:\n review['stars'] = re.search(r'\\d', star_data.get('alt')).group()\n except:\n word = re.search(r'[\\w]{2,4}',star_data.get('alt'), re.UNICODE).group()\n review['stars'] = Config.NUMBERS[word]\n \n review['text'] = text_data.text\n\n #usernames are usually in the first group (review_metadata[1][0][0])\n #but some CJK usernames are in the second group (review_metadata[1][0])\n try:\n review['username'] = review_metadata[1][0][0].text.strip()\n except:\n try:\n review['username'] = review_metadata[1][0].text.strip()\n except:\n review['username'] = 'UNPARSABLE'\n \n\n version_date_data = review_metadata[1][0].tail\n\n version = Config.VERSION_RE.search(version_date_data)\n\n if version:\n review['version'] = version.group()\n else:\n review['version'] = None\n\n #lots of date formats to manage \n date = Config.DATE_RE.search(version_date_data)\n date_euro = Config.DATE_EURO_RE.search(version_date_data)\n date_nl = Config.DATE_NL_RE.search(version_date_data)\n date_br = Config.DATE_BR_RE.search(version_date_data)\n date_gb = Config.DATE_GB_RE.search(version_date_data)\n date_ru = Config.DATE_RU_RE.search(version_date_data)\n date_kr = Config.DATE_KR_RE.search(version_date_data)\n\n if date:\n review['date'] = dateparser.parse(date.group()) #since months are spelled out dateparser can handle dates without specifying order\n elif date_euro:\n review['date'] = dateparser.parse(date_euro.group()) #since months are spelled out dateparser can handle dates without specifying order\n elif date_nl:\n review['date'] = dateparser.parse(date_nl.group(), settings={'DATE_ORDER': 'DMY'})\n elif date_br:\n review['date'] = dateparser.parse(date_br.group(), settings={'DATE_ORDER': 'DMY'})\n elif date_gb:\n review['date'] = dateparser.parse(date_gb.group()) #since months are spelled out dateparser can handle dates without specifying order\n elif date_ru:\n review['date'] = dateparser.parse(date_ru.group(), settings={'DATE_ORDER': 'DMY'})\n elif date_kr:\n review['date'] = dateparser.parse(date_kr.group(), settings={'DATE_ORDER': 'YMD'})\n else:\n review['date'] = review_metadata[1][0].tail\n\n reviews.append(review)\n\n return reviews\n\n @classmethod\n def _get_all_pages(cls, app_id, store_id):\n reviews = []\n page = 0\n\n while 1:\n reviews_xml = cls._request_itunes(app_id, store_id, page)\n reviews_parsed = cls._parse_itunes_xml(reviews_xml)\n\n if not reviews_parsed:\n break\n\n reviews += reviews_parsed\n page += 1\n\n global basepath\n if not basepath is None:\n filename = str(store_id) + '_' + str(page).zfill(4) + '.csv'\n pdwrite = pd.DataFrame(reviews)\n pdwrite['text'] = pdwrite['text'].str.replace('\\r\\n', '')\n pdwrite.to_csv(basepath + filename, sep='|', quotechar='^', line_terminator='\\n', encoding='utf-8')\n\n masterfilename = 'AppStoreReviews_' + str(app_id) + '_PagesCopied.txt'\n ofile = open(basepath + masterfilename, 'a')\n ofile.write(str(store_id) + '\\t' + str(page).zfill(4) + '\\n')\n ofile.close()\n \n reviews=[] #reset the reviews list so that each page is written to only one csv file\n\n return reviews\n\n @classmethod\n def _get_all_countries(cls, app_id):\n reviews = {}\n\n for country, store_id in Config.COUNTRIES.items():\n reviews[country] = cls._get_all_pages(app_id, store_id)\n\n return reviews\n\n @classmethod\n def _get_reviews_country(cls, app_id, country='US'):\n if len(country) != 2 and len(country) != 6:\n raise ValueError('Either use a country code or a store id')\n\n if len(country) == 2:\n if country.upper() not in Config.COUNTRIES:\n raise ValueError('{} is not a valid country'.format(country))\n\n country = Config.COUNTRIES[country]\n\n if len(country) == 6:\n if country not in Config.COUNTRIES.values():\n raise ValueError('{} is not a valid store id'.format(country))\n\n reviews = cls._get_all_pages(app_id, country)\n\n return reviews\n\n @classmethod\n def get_reviews(cls, app_id, base_path=None, country=None):\n global basepath\n basepath = base_path\n if country is None:\n return cls._get_all_countries(app_id)\n\n return cls._get_reviews_country(app_id, country)\n"
] |
[
[
"pandas.DataFrame"
]
] |
februarysea/machinelearning
|
[
"c82f62acf16aef66b44e5f06f9b288be44c3cb64"
] |
[
"week3/algorithm4.py"
] |
[
"# algorithm4.py\n# logistic regression\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ndef logistic_regression(x, y):\n cost_functions = []\n\n # the number of sample\n m = len(x)\n # the number of fearture\n n = len(x[0])\n # learning rate\n alpha = 0.001\n # initialization theta\n theta = np.zeros([n, 1])\n\n k = 1\n while k<5001:\n # caculate cost function\n cost_total = 0\n for i in range(0, m):\n thetaT_multiply_x = np.matmul(theta.T, x[i])\n h_x = 1 / (1 + np.exp(-thetaT_multiply_x))\n cost_total = cost_total + y[i] * np.log(h_x) + (1-y[i]) * np.log(1-h_x)\n cost_function = - cost_total / m\n cost_functions.append(cost_function)\n\n # gradient descent\n temp_theta = np.empty([n, 1])\n descent_total = 0\n for j in range(0, n):\n for i in range(0, m):\n thetaT_multiply_x = np.matmul(theta.T, x[i])\n h_x = 1 / (1 + np.exp(-thetaT_multiply_x))\n descent_total = descent_total + (h_x - y[i]) * x[i][j]\n temp_theta[j] = theta[j] - alpha / m * descent_total\n theta = temp_theta\n print(f\"time:{k}, cost function:{cost_function[0]}\")\n k = k + 1\n\n plot_figure(cost_functions)\n return theta\n\ndef test_model(theta, x, y):\n # 1 means right\n # 0 means wrong\n accuracy =[]\n m = len(x)\n for i in range(0, m):\n thetaT_multiply_x = np.matmul(theta.T, x[i])\n h_x = 1 / (1 + np.exp(-thetaT_multiply_x))\n if h_x>=0.5:\n if y[i]==1:\n accuracy.append(1)\n else:\n accuracy.append(0)\n else:\n if y[i]==0:\n accuracy.append(1)\n else:\n accuracy.append(0)\n print(f\"accuracy:{round(accuracy.count(1) / len(accuracy), 2)}\")\n\ndef plot_figure(cost_functions):\n x = np.arange(1, len(cost_functions)+1)\n y = np.array(cost_functions)\n plt.title(\"cost function tendency\")\n plt.xlabel(\"times\") \n plt.ylabel(\"cost function\")\n plt.plot(x, y)\n plt.show()\n\nif __name__ == \"__main__\":\n with open(\"data/divorce.csv\", 'r') as f:\n data = f.read()\n data = data.split(\"\\n\")\n for i in range(0,len(data)):\n data[i] = data[i].split(';')\n for k in range(0, len(data[i])):\n data[i][k] = int(data[i][k])\n # add x0 = 1\n data[i] = [0] + data[i]\n \n data = np.array(data)\n \n m = len(data)\n n = len(data[0])\n x = np.empty(shape=[m, n-1], dtype=int)\n y = np.empty(shape=[m, 1], dtype=int)\n for i in range(0,m):\n x[i] = data[i][0:-1]\n y[i] = data[i][-1]\n \n # training set and test set\n training_x = x[0:len(data)-20]\n training_y = y[0:len(data)-20]\n test_x = x[len(data)-20:]\n test_y = y[len(data)-20:]\n\n theta = logistic_regression(x=training_x, y=training_y)\n test_model(theta=theta, x=test_x, y=test_y)"
] |
[
[
"numpy.log",
"matplotlib.pyplot.title",
"numpy.matmul",
"numpy.empty",
"matplotlib.pyplot.plot",
"numpy.exp",
"matplotlib.pyplot.xlabel",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
vishalbelsare/grblas
|
[
"d181ac4cb495d1bc806253137f53a42d65693b5e"
] |
[
"grblas/_ss/prefix_scan.py"
] |
[
"from math import ceil, log2\n\nimport numba\nimport numpy as np\n\nimport grblas as gb\n\nfrom .. import binary\nfrom ..operator import get_semiring, get_typed_op\n\n\n@numba.njit\ndef compact_indices(indptr, size): # pragma: no cover\n \"\"\"Given indptr from hypercsr, create a new col_indices array that is compact.\n\n That is, for each row with degree N, the column indices will be 0..N-1.\n \"\"\"\n indptr = indptr.view(np.int64) # so `diff` below is an integer\n col_indices = np.empty(size, dtype=np.uint64)\n start = 0\n ncols = 0\n for i in range(1, indptr.size):\n end = indptr[i]\n diff = end - start\n if diff > ncols:\n ncols = diff\n for j in range(diff):\n col_indices[start + j] = j\n start = end\n return col_indices, ncols\n\n\n# By default, scans on matrices are done along rows.\n# To perform scans along columns, pass a transposed matrix.\ndef prefix_scan(A, monoid, *, name=None, within):\n from .. import Matrix, Vector\n from ..matrix import TransposedMatrix\n\n monoid = get_typed_op(monoid, A.dtype, kind=\"binary\")\n A._expect_op(monoid, (\"BinaryOp\", \"Monoid\"), argname=\"op\", within=within)\n if monoid.opclass == \"BinaryOp\":\n if monoid.monoid is not None:\n monoid = monoid.monoid\n else:\n A._expect_op(monoid, \"Monoid\", argname=\"op\", within=within)\n semiring = get_semiring(monoid, binary.first)\n binaryop = semiring.monoid.binaryop\n\n is_transposed = type(A) is TransposedMatrix\n N_orig = A.shape[-1]\n if N_orig < 2:\n if is_transposed:\n return A.T.dup()\n return A.dup()\n\n # Compactify all the elements\n is_vector = type(A) is Vector\n if is_vector:\n info = A.ss.export(\"sparse\", sort=True)\n N_cols = len(info[\"indices\"])\n compact_info = dict(info, indices=np.arange(N_cols, dtype=np.uint64), size=N_cols)\n elif is_transposed:\n info = A.T.ss.export(\"hypercsc\", sort=True)\n row_indices, N_cols = compact_indices(info[\"indptr\"], info[\"row_indices\"].size)\n compact_info = dict(\n info,\n col_indices=row_indices,\n ncols=N_cols,\n nrows=info[\"ncols\"],\n rows=info[\"cols\"],\n format=\"hypercsr\",\n sorted_cols=True,\n )\n del compact_info[\"cols\"]\n del compact_info[\"row_indices\"]\n del compact_info[\"sorted_rows\"]\n else:\n info = A.ss.export(\"hypercsr\", sort=True)\n col_indices, N_cols = compact_indices(info[\"indptr\"], info[\"col_indices\"].size)\n compact_info = dict(info, col_indices=col_indices, ncols=N_cols)\n\n if N_cols < 2:\n if is_transposed:\n return A.T.dup()\n return A.dup()\n N_half = N_cols // 2\n val_t = np.int8\n index_t = np.uint64\n index = 1\n if is_vector:\n A = Vector.ss.import_sparse(**compact_info)\n else:\n A = Matrix.ss.import_hypercsr(**compact_info)\n\n # First iteration\n S = Matrix.ss.import_csc(\n nrows=N_cols,\n ncols=N_half,\n indptr=np.arange(0, 2 * N_half + 2, 2, dtype=index_t),\n row_indices=np.arange(2 * N_half, dtype=np.uint64),\n values=np.ones(1, dtype=val_t), # 2 * N_half\n is_iso=True,\n sorted_rows=True,\n take_ownership=True,\n name=\"Up_0\",\n )\n B = semiring(A @ S).new(name=\"B\")\n if is_vector:\n mask = None\n else:\n mask = B.S\n\n # Upsweep\n stride = 1\n stride2 = 2\n while stride2 <= N_half:\n k = (N_half - stride2) // stride2 + 1\n cols = np.arange(stride2 - 1, N_half, stride2, dtype=index_t)\n # assert k == cols.size\n S = Matrix.ss.import_hypercsc(\n nrows=N_half,\n ncols=N_half,\n indptr=np.arange(k + 1, dtype=index_t),\n cols=cols,\n row_indices=cols - stride,\n values=np.ones(1, dtype=val_t), # k\n is_iso=True,\n sorted_rows=True,\n take_ownership=True,\n name=f\"Up_{index}\",\n )\n B(binaryop, mask=mask) << semiring(B @ S)\n index += 1\n stride = stride2\n stride2 *= 2\n\n # Downsweep\n index = 0\n if N_half > 2:\n stride2 = max(2, 2 ** ceil(log2(N_half // 2)))\n stride = stride2 // 2\n while stride > 0:\n k = (N_half - stride2 - stride) // stride2 + 1\n if k == 0:\n stride2 = stride\n stride //= 2\n continue\n cols = np.arange(stride2 + stride - 1, N_half, stride2, dtype=index_t)\n # assert k == cols.size\n S = Matrix.ss.import_hypercsc(\n nrows=N_half,\n ncols=N_half,\n indptr=np.arange(k + 1, dtype=index_t),\n cols=cols,\n row_indices=cols - stride,\n values=np.ones(1, dtype=val_t), # k\n is_iso=True,\n sorted_rows=True,\n take_ownership=True,\n name=f\"Down_{index}\",\n )\n B(binaryop, mask=mask) << semiring(B @ S)\n index += 1\n stride2 = stride\n stride //= 2\n\n # Last iteration\n indptr = np.arange(0, 2 * N_half + 2, 2)\n indptr[-1] = N_cols - 1\n col_indices = np.arange(1, N_cols, dtype=index_t)\n S = Matrix.ss.import_csr(\n nrows=N_half,\n ncols=N_cols,\n indptr=indptr,\n col_indices=col_indices,\n values=np.ones(1, dtype=val_t), # N_cols - 1\n is_iso=True,\n sorted_cols=True,\n take_ownership=True,\n name=f\"Down_{index}\",\n )\n RV = semiring(B @ S).new(mask=A.S, name=\"RV\")\n\n indices = np.arange(0, N_cols, 2, dtype=index_t)\n d = Vector.ss.import_sparse(\n size=N_cols,\n indices=indices,\n values=np.ones(1, dtype=val_t), # (N_cols + 1) // 2\n is_iso=True,\n sorted_index=True,\n take_ownership=True,\n name=\"d\",\n )\n D = gb.ss.diag(d, name=\"D\")\n RV(binaryop) << semiring(A @ D)\n # De-compactify into final result\n if is_vector:\n rv_info = RV.ss.export(\"sparse\", sort=True, give_ownership=True)\n RV = Vector.ss.import_sparse(name=name, **dict(info, values=rv_info[\"values\"]))\n elif is_transposed:\n rv_info = RV.ss.export(\"hypercsr\", sort=True, give_ownership=True)\n RV = Matrix.ss.import_hypercsc(name=name, **dict(info, values=rv_info[\"values\"]))\n else:\n rv_info = RV.ss.export(\"hypercsr\", sort=True, give_ownership=True)\n RV = Matrix.ss.import_hypercsr(name=name, **dict(info, values=rv_info[\"values\"]))\n return RV\n"
] |
[
[
"numpy.arange",
"numpy.empty",
"numpy.ones"
]
] |
keisuke-umezawa/ATRank
|
[
"391452ee1deacaa600f6cfc405afbed5fb389b87"
] |
[
"atrank/train.py"
] |
[
"import logging\nimport os\nimport time\nimport json\nimport joblib\nimport pickle\nimport random\nimport numpy as np\nfrom collections import OrderedDict, defaultdict\n\nimport torch\nimport torch.nn.functional as F\nfrom torchviz import make_dot\n\nfrom input import DataInput, DataInputTest\nfrom model import Model\n\n# For reproducibility\nrandom.seed(1234)\nnp.random.seed(1234)\ntorch.manual_seed(1234)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n\n\n_logger = logging.getLogger(__name__)\n\n\nweight_dir = \"./\"\n\n\ndef create_model(config, cate_list, device):\n _logger.info(json.dumps(config, indent=4))\n model = Model(cate_list, device=device, **config)\n\n return model.to(device)\n\n\ndef create_optimizer(model, config):\n return torch.optim.SGD(model.parameters(), lr=config[\"learning_rate\"])\n\n\ndef train_model(train_set, test_set, model, optimizer, criteria, config, device):\n\n random.shuffle(train_set)\n\n batch_per_epoch = (len(train_set) // config[\"traing_batch_size\"]) + 1\n batch_per_epoch_tst = (len(test_set) // config[\"test_batch_size\"]) + 1\n\n _logger.info(\"--------start----------\")\n\n history = defaultdict(list)\n for e in range(config[\"num_epochs\"]):\n # Train\n model.train()\n cum_loss = 0.0\n for i, uij in DataInput(train_set, config[\"traing_batch_size\"]):\n y = torch.Tensor(uij[2]).to(device)\n kwargs = dict(\n u=torch.IntTensor(uij[0]).to(device),\n i=torch.LongTensor(uij[1]).to(device),\n hist_i=torch.LongTensor(uij[3]).to(device),\n hist_t=torch.LongTensor(uij[4]).to(device),\n sl=torch.IntTensor(uij[5]).to(device),\n )\n\n if e == 0 and i == 1:\n _logger.debug(f\"Label: {y}\")\n _logger.debug(f\"kwargs: {kwargs}\")\n\n # Initialize\n optimizer.zero_grad()\n\n # Forward\n logits, norm = model(**kwargs)\n probs = torch.sigmoid(logits)\n loss = criteria(probs, y) + config[\"regulation_rate\"] * norm\n\n if e == 0 and i == 1:\n make_dot(probs, params=dict(model.named_parameters())).render(\"graph\")\n\n # Backward\n loss.backward()\n optimizer.step()\n cum_loss += loss.item()\n\n if i % 100 == 0:\n _logger.debug(f\"{logits}\")\n _logger.info(\n f\"Epoch {e + 1:02d} batch {i + 1:04d} / {batch_per_epoch}, \"\n + f\"trn_loss: {cum_loss / (i + 1):.04f}\"\n )\n\n history[\"trn_loss\"].append(cum_loss / batch_per_epoch)\n\n # Test\n model.eval()\n cum_loss = 0.0\n cum_auc = 0.0\n with torch.no_grad():\n for i, uij in DataInputTest(test_set, config[\"test_batch_size\"]):\n kwargs = dict(\n u=torch.IntTensor(uij[0]).to(device),\n i=torch.LongTensor(uij[1]).to(device),\n hist_i=torch.LongTensor(uij[3]).to(device),\n hist_t=torch.LongTensor(uij[4]).to(device),\n sl=torch.IntTensor(uij[5]).to(device),\n )\n logits, norm = model(**kwargs)\n probs_pos = torch.sigmoid(logits)\n cum_loss += criteria(probs_pos, torch.ones_like(probs_pos)).item() * 0.5\n\n kwargs = dict(\n u=torch.IntTensor(uij[0]).to(device),\n i=torch.LongTensor(uij[2]).to(device),\n hist_i=torch.LongTensor(uij[3]).to(device),\n hist_t=torch.LongTensor(uij[4]).to(device),\n sl=torch.IntTensor(uij[5]).to(device),\n )\n logits, norm = model(**kwargs)\n probs_neg = torch.sigmoid(logits)\n cum_loss += (\n criteria(probs_neg, torch.zeros_like(probs_neg)).item() * 0.5\n )\n\n cum_auc += np.mean((probs_pos > probs_neg).detach().cpu().numpy())\n\n history[\"tst_auc\"].append(cum_auc / batch_per_epoch_tst)\n history[\"tst_loss\"].append(cum_loss / batch_per_epoch_tst)\n\n for key in [\"tst_loss\", \"tst_auc\"]:\n _logger.info(f\"{key:5}: {history[key][-1]:.5f}\")\n\n # Saving\n torch.save(model.state_dict(), f\"./{e:03d}.pytorch\")\n joblib.dump(history, f\"{weight_dir}/history\")\n\n\ndef train():\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n _logger.info(\"Device: {}\".format(device))\n\n start_time = time.time()\n with open(\"dataset.pkl\", \"rb\") as f:\n train_set = pickle.load(f)\n test_set = pickle.load(f)\n cate_list = pickle.load(f)\n user_count, item_count, cate_count = pickle.load(f)\n\n # Build Config\n config = OrderedDict()\n config[\"item_count\"] = item_count\n config[\"cate_count\"] = cate_count\n config[\"itemid_embedding_size\"] = 64\n config[\"cateid_embedding_size\"] = 64\n config[\"num_blocks\"] = 1\n config[\"num_heads\"] = 8\n config[\"learning_rate\"] = 0.05\n config[\"num_epochs\"] = 50\n config[\"traing_batch_size\"] = 32\n config[\"test_batch_size\"] = 128\n config[\"regulation_rate\"] = 0.00005\n\n model = create_model(config, cate_list, device)\n optimizer = create_optimizer(model, config)\n criteria = F.binary_cross_entropy\n train_model(train_set, test_set, model, optimizer, criteria, config, device)\n\n\ndef main():\n train()\n\n\nif __name__ == \"__main__\":\n level = logging.INFO\n handler = logging.StreamHandler()\n handler.setLevel(level)\n _logger.setLevel(level)\n _logger.addHandler(handler)\n _logger.propagate = False\n main()\n"
] |
[
[
"torch.sigmoid",
"torch.LongTensor",
"numpy.random.seed",
"torch.Tensor",
"torch.manual_seed",
"torch.zeros_like",
"torch.no_grad",
"torch.cuda.is_available",
"torch.IntTensor",
"torch.ones_like"
]
] |
csbhr/OpenUtility
|
[
"0db908d5598275e0cf73b5bafb7e5be32c51a81e"
] |
[
"utils/PerceptualSimilarity/models/__init__.py"
] |
[
"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom skimage.measure import compare_ssim\nimport torch\n\nfrom utils.PerceptualSimilarity.models import dist_model\n\nclass PerceptualLoss(torch.nn.Module):\n def __init__(self, model='net-lin', net='alex', colorspace='rgb', spatial=False, use_gpu=True, gpu_ids=[0], version='0.1'): # VGG using our perceptually-learned weights (LPIPS metric)\n # def __init__(self, model='net', net='vgg', use_gpu=True): # \"default\" way of using VGG as a perceptual loss\n super(PerceptualLoss, self).__init__()\n print('Setting up Perceptual loss...')\n self.use_gpu = use_gpu\n self.spatial = spatial\n self.gpu_ids = gpu_ids\n self.model = dist_model.DistModel()\n self.model.initialize(model=model, net=net, use_gpu=use_gpu, colorspace=colorspace, spatial=self.spatial, gpu_ids=gpu_ids, version=version)\n print('...[%s] initialized'%self.model.name())\n print('...Done')\n\n def forward(self, pred, target, normalize=False):\n \"\"\"\n Pred and target are Variables.\n If normalize is True, assumes the images are between [0,1] and then scales them between [-1,+1]\n If normalize is False, assumes the images are already between [-1,+1]\n\n Inputs pred and target are Nx3xHxW\n Output pytorch Variable N long\n \"\"\"\n\n if normalize:\n target = 2 * target - 1\n pred = 2 * pred - 1\n\n return self.model.forward(target, pred)\n\ndef normalize_tensor(in_feat,eps=1e-10):\n norm_factor = torch.sqrt(torch.sum(in_feat**2,dim=1,keepdim=True))\n return in_feat/(norm_factor+eps)\n\ndef l2(p0, p1, range=255.):\n return .5*np.mean((p0 / range - p1 / range)**2)\n\ndef psnr(p0, p1, peak=255.):\n return 10*np.log10(peak**2/np.mean((1.*p0-1.*p1)**2))\n\ndef dssim(p0, p1, range=255.):\n return (1 - compare_ssim(p0, p1, data_range=range, multichannel=True)) / 2.\n\ndef rgb2lab(in_img,mean_cent=False):\n from skimage import color\n img_lab = color.rgb2lab(in_img)\n if(mean_cent):\n img_lab[:,:,0] = img_lab[:,:,0]-50\n return img_lab\n\ndef tensor2np(tensor_obj):\n # change dimension of a tensor object into a numpy array\n return tensor_obj[0].cpu().float().numpy().transpose((1,2,0))\n\ndef np2tensor(np_obj):\n # change dimenion of np array into tensor array\n return torch.Tensor(np_obj[:, :, :, np.newaxis].transpose((3, 2, 0, 1)))\n\ndef tensor2tensorlab(image_tensor,to_norm=True,mc_only=False):\n # image tensor to lab tensor\n from skimage import color\n\n img = tensor2im(image_tensor)\n img_lab = color.rgb2lab(img)\n if(mc_only):\n img_lab[:,:,0] = img_lab[:,:,0]-50\n if(to_norm and not mc_only):\n img_lab[:,:,0] = img_lab[:,:,0]-50\n img_lab = img_lab/100.\n\n return np2tensor(img_lab)\n\ndef tensorlab2tensor(lab_tensor,return_inbnd=False):\n from skimage import color\n import warnings\n warnings.filterwarnings(\"ignore\")\n\n lab = tensor2np(lab_tensor)*100.\n lab[:,:,0] = lab[:,:,0]+50\n\n rgb_back = 255.*np.clip(color.lab2rgb(lab.astype('float')),0,1)\n if(return_inbnd):\n # convert back to lab, see if we match\n lab_back = color.rgb2lab(rgb_back.astype('uint8'))\n mask = 1.*np.isclose(lab_back,lab,atol=2.)\n mask = np2tensor(np.prod(mask,axis=2)[:,:,np.newaxis])\n return (im2tensor(rgb_back),mask)\n else:\n return im2tensor(rgb_back)\n\ndef rgb2lab(input):\n from skimage import color\n return color.rgb2lab(input / 255.)\n\ndef tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255./2.):\n image_numpy = image_tensor[0].cpu().float().numpy()\n image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor\n return image_numpy.astype(imtype)\n\ndef im2tensor(image, imtype=np.uint8, cent=1., factor=255./2.):\n return torch.Tensor((image / factor - cent)\n [:, :, :, np.newaxis].transpose((3, 2, 0, 1)))\n\ndef tensor2vec(vector_tensor):\n return vector_tensor.data.cpu().numpy()[:, :, 0, 0]\n\ndef voc_ap(rec, prec, use_07_metric=False):\n \"\"\" ap = voc_ap(rec, prec, [use_07_metric])\n Compute VOC AP given precision and recall.\n If use_07_metric is true, uses the\n VOC 07 11 point method (default:False).\n \"\"\"\n if use_07_metric:\n # 11 point metric\n ap = 0.\n for t in np.arange(0., 1.1, 0.1):\n if np.sum(rec >= t) == 0:\n p = 0\n else:\n p = np.max(prec[rec >= t])\n ap = ap + p / 11.\n else:\n # correct AP calculation\n # first append sentinel values at the end\n mrec = np.concatenate(([0.], rec, [1.]))\n mpre = np.concatenate(([0.], prec, [0.]))\n\n # compute the precision envelope\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n\n # to calculate area under PR curve, look for points\n # where X axis (recall) changes value\n i = np.where(mrec[1:] != mrec[:-1])[0]\n\n # and sum (\\Delta recall) * prec\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap\n\ndef tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255./2.):\n# def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=1.):\n image_numpy = image_tensor[0].cpu().float().numpy()\n image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor\n return image_numpy.astype(imtype)\n\ndef im2tensor(image, imtype=np.uint8, cent=1., factor=255./2.):\n# def im2tensor(image, imtype=np.uint8, cent=1., factor=1.):\n return torch.Tensor((image / factor - cent)\n [:, :, :, np.newaxis].transpose((3, 2, 0, 1)))\n"
] |
[
[
"numpy.maximum",
"numpy.arange",
"torch.sum",
"numpy.concatenate",
"numpy.max",
"numpy.mean",
"numpy.prod",
"numpy.transpose",
"numpy.where",
"numpy.sum",
"numpy.isclose"
]
] |
jonholdship/UCLCHEM
|
[
"a946815b2e6ab275f658ed64463dcd4e32fcdc17"
] |
[
"scripts/example_plotting_script.py"
] |
[
"\"\"\"\nA script largely intended for people unfamiliar with Python.\nIf you run run_uclchem_tests.py, you'll produce several example model outputs.\nThis script then uses those outputs to demonstrate some simple plotting.\n\"\"\"\n\nimport uclchem\nimport matplotlib.pyplot as plt\n#pick species, any number is fine\nspecies_list=[\"#H2O\",\"#CO\",\"#CH3OH\",\"CO\",\"CH3OH\",\"H2O\"]\ninput_file=\"examples/example-output/phase1-full.dat\"\ninput_file=\"output/test.csv\"\n\n\n#call read_uclchem. \nmodel_data=uclchem.analysis.read_output_file(input_file)\n\n#create_abundance_plot will return pyplot figure and axis objects where the axis\n#contains a plot of the species abundance through time for all species in species_list\n#optionally, save it to plot_file\nfig,ax=uclchem.analysis.create_abundance_plot(model_data,species_list,plot_file=\"examples/example_plot.png\")\n\n\n\n\n\n#alternatively, we may already have an axis we'd like to plot to\n#in which case, plot_species() may be more helpful\n\nfig,ax=plt.subplots()\nax=uclchem.analysis.plot_species(ax,model_data,species_list)\n\n#the returned object lets us make some edits\n#lets plot the temperature on a second y axis \nax2=ax.twinx()\nax2.plot(model_data[\"Time\"],model_data[\"gasTemp\"],color=\"black\")\nax2.set(ylabel=\"Temperature / K\")\n#and make some slight adjustments to the plot before saving again\nax.set(xscale='log',xlim=(1,1e7),ylim=(9e-31,5e-4))\nax.set_title('This is a Test Plot')\nfig.savefig(\"examples/improved_example_plot.png\")\n\n\n#plot_species allows us to do more complex things such as subplots\nfig,axes=plt.subplots(2,2,figsize=(16,9))\naxes=axes.flatten() #turn [2,2] array into [4]\n\nfor i,species in enumerate([[\"CO\",\"#CO\"],[\"H2O\",\"#H2O\"],[\"CH3OH\",\"#CH3OH\"],[\"CO2\",\"#CO2\"]]):\n\taxes[i]=uclchem.plot_species(axes[i],model_data,species)\nfig.savefig(\"examples/multiplot_example.png\")"
] |
[
[
"matplotlib.pyplot.subplots"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.