repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
ani-poroorkara/jpas
[ "0409bbe2f9de39089ee72342558be8fc25d8ac97" ]
[ "backend/scripts/utility/liveData.py" ]
[ "import pymongo\nimport json\nimport os\nimport pandas as pd\nfrom linkedInutility import get_db_connection\nimport pymongo\nimport logging\ndef livedata(cfg):\n logging.info(\"Creating Database connection\")\n db_connection = get_db_connection(cfg)\n dblist=db_connection.list_database_names()\n if \"LinkedInJob\" in dblist:\n mydb = db_connection[\"LinkedInJob\"]\n db_cm = mydb[\"Staging_raw_data\"]\n filename = os.path.abspath(\"/Users/vijetavamannayak/LinkedInProject/rawData/data.csv\")\n #file_res = os.path.join(cdir, filepath)\n\n data = pd.read_csv(filename)\n #print(data)\n data_json = json.loads(data.to_json(orient='records'))\n db_cm.insert_many(data_json)\n\n #mycol = mydb[\"Staging\"]\n logging.info(\"The data is inserted into the database\")\n\n" ]
[ [ "pandas.read_csv" ] ]
softbankrobotics-research/NeurIPS19-SBDRL
[ "9ea43a256da1fbb7fa32e07fa5e97700abb4d47a", "9ea43a256da1fbb7fa32e07fa5e97700abb4d47a" ]
[ "code/symmetry_based_disentanglement_a_la_WM/flatland/flat_game/generate_data_small_ball.py", "code/symmetry_based_disentanglement_a_la_WM/vae/arch_torch.py" ]
[ "from env import Env\nimport numpy as np\nfrom tqdm import trange\nimport time\nimport math\nimport pickle as pk\nimport random\nimport pymunk\nfrom constants import *\nimport torch\n\n\nprint(pymunk.version)\n\nagent_parameters = {\n 'radius': 8,\n 'speed': 10,\n 'rotation_speed' : math.pi/8,\n 'living_penalty': 0,\n 'position': (50,50),\n 'angle': 0,\n 'sensors': [\n \n {\n 'nameSensor' : 'proximity_test',\n 'typeSensor': 'proximity',\n 'fovResolution': 64,\n 'fovRange': 300,\n 'fovAngle': math.pi ,\n 'bodyAnchor': 'body',\n 'd_r': 0,\n 'd_theta': 0,\n 'd_relativeOrientation': 0,\n 'display': False,\n }\n \n \n ],\n 'actions': ['forward', 'turn_left', 'turn_right', 'left', 'right', 'backward'],\n 'measurements': ['health', 'poisons', 'fruits'],\n 'texture': {\n 'type': 'color',\n 'c': (255, 255, 255)\n },\n 'normalize_measurements': False,\n 'normalize_states': False,\n 'normalize_rewards': False\n}\n\nenv_parameters = {\n 'map':False,\n 'n_rooms': 2,\n 'display': True,\n 'horizon': 10001,\n 'shape': (104, 104),\n 'mode': 'time',\n 'poisons': {\n 'number': 0,\n 'positions': 'random',\n 'size': 10,\n 'reward': -10,\n 'respawn': True,\n 'texture': {\n 'type': 'color',\n 'c': (255, 255, 255),\n }\n },\n 'fruits': {\n 'number': 0,\n 'positions': 'random',\n 'size': 10,\n 'reward': 10,\n 'respawn': True,\n 'texture': {\n 'type': 'color',\n 'c': (255, 150, 0),\n }\n },\n 'obstacles': [\n \n ],\n 'walls_texture': {\n 'type': 'color',\n 'c': (255, 0, 0)\n },\n 'agent': agent_parameters\n}\n\n\nenv = Env(**env_parameters)\nn = len(agent_parameters['actions'])\nmeas, sens = None, None\nprev_sens = None\n\ndataset = []\n\naction = {}\n\nlongtrans = 0\nlattrans = 0\nrot = 0\n\naction_save = []\nstart = time.time()\ndone = False\nfor i in trange(1):\n while not done:\n if sens is not None:\n prev_sens = sens.copy()\n \n if np.random.binomial(1, 0.5, size=None) == 0:\n\n action_binary = (2*np.random.binomial(1, 0.5, size=None) -1)\n action_tosave = action_binary+1\n\n #print('lateral', action_tosave)\n\n action['longitudinal_velocity'] = 0\n action['lateral_velocity'] = action_binary*1\n action['angular_velocity'] = 0\n\n else:\n\n action_binary = (2*np.random.binomial(1, 0.5, size=None) -1)\n action_tosave = action_binary+2\n\n #print('longitudinal', action_tosave)\n\n\n action['longitudinal_velocity'] = action_binary*1\n action['lateral_velocity'] = 0\n action['angular_velocity'] = 0\n\n action_save.append(action_tosave)\n\n \n sens, r, done, meas = env.step(action)\n\n \n env.reset()\n done = False\nend = time.time()\nprint(end - start)\n\ndef translate(action):\n if action == 1:\n res = 'up'\n if action == -1:\n res = 'down'\n if action == 0:\n res = 'left'\n if action == 2:\n res = 'right'\n\n return res\n\n\nfrom PIL import Image\nimport os\ninputs = []\nfor i,elem in enumerate(os.listdir('images/')):\n im = np.asarray(Image.open('images/' + str(i+1)+'.png').convert('RGB'))\n inputs.append(im)\ninputs = np.array(inputs).reshape(-1,64,64,3)\ninputs = inputs.astype('float32') / 255.\n\naction_save = action_save[1:]\n\n\nfrom vae.arch_torch import VAE\nvae = torch.load('vae/15483481760431166_disentangled/saved_models/epoch_10_env_0', map_location={'cuda:0': 'cpu'})\n\ninputs_z = []\nfor i,elem in enumerate(inputs):\n z = vae.forward(torch.Tensor(elem.reshape(-1,64,64,3).transpose((0,3,1,2))), encode=True, mean=True)\n inputs_z.append(z.detach().numpy().reshape(1,2))\n #if i!=len(inputs)-1:\n #print(translate(action_save[i]))\ninputs_z = np.array(inputs_z).reshape(-1,2)\n\n#print(inputs_z[:-1], 'inputs')\n#print(action_save, 'actions')\n#print(inputs_z[1:], 'targets')\n\nnp.save('inputs', inputs_z[:-1])\nnp.save('actions', np.array(action_save))\nnp.save('targets', inputs_z[1:])\n\n\n\n", "import torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nfrom constants import *\nimport numpy as np\nfrom torch.utils.data import Dataset\nimport os\nfrom PIL import Image\n\nclass VAE(nn.Module):\n\tdef __init__(self):\n\t\tsuper(VAE, self).__init__()\n\n\t\t# Encoder layers\n\t\t\n\t\tself.conv1 = nn.Conv2d(3, 32, 4, stride=2, padding=1)\n\t\tself.conv2 = nn.Conv2d(32, 32, 4, stride=2, padding=1)\n\t\tself.conv3 = nn.Conv2d(32, 32, 4, stride=2, padding=1)\n\t\tself.conv4 = nn.Conv2d(32, 32, 4, stride=2, padding=1)\n\n\n\t\tself.fc1 = nn.Linear(512, 256)\n\t\tself.fc2 = nn.Linear(256, 256)\n\t\tself.fc3 = nn.Linear(256, 2*Z_DIM)\n\n\t\t# Decoder layers\n\n\t\tself.fc4 = nn.Linear(Z_DIM, 256)\n\t\tself.fc5 = nn.Linear(256, 512)\n\n\t\tself.deconv1 = nn.ConvTranspose2d(32, 32, 4, stride=2, padding=1)\n\t\tself.deconv2 = nn.ConvTranspose2d(32, 32, 4, stride=2, padding=1)\n\t\tself.deconv3 = nn.ConvTranspose2d(32, 32, 4, stride=2, padding=1)\n\t\tself.deconv4 = nn.ConvTranspose2d(32, 3, 4, stride=2, padding=1)\n\n\tdef encode(self, x):\n\t\th = F.selu(self.conv1(x))\n\t\th = F.selu(self.conv2(h))\n\t\th = F.selu(self.conv3(h))\n\t\th = F.selu(self.conv4(h))\n\t\tself.h_shape = h.shape\n\t\th = h.view(-1, h.shape[1]*h.shape[2]*h.shape[3])\n\t\th = F.selu(self.fc1(h))\n\t\th = F.selu(self.fc2(h))\n\t\th = F.selu(self.fc3(h))\n\t\treturn h\n\n\tdef reparameterize(self, mu_and_logvar):\n\t\tmu = torch.split(mu_and_logvar,int(mu_and_logvar.shape[1]/2),dim=1)[0]\n\t\tlogvar = torch.split(mu_and_logvar,int(mu_and_logvar.shape[1]/2),dim=1)[1]\n\t\tstd = torch.exp(logvar)\n\t\teps = torch.randn_like(std)\n\t\treturn eps * std + mu\n\n\n\tdef decode(self, z):\n\t\th = F.selu(self.fc4(z))\n\t\th = F.selu(self.fc5(h).reshape(-1, self.h_shape[1], self.h_shape[2], self.h_shape[3]))\n\t\th = F.selu(self.deconv1(h))\n\t\th = F.selu(self.deconv2(h))\n\t\th = F.selu(self.deconv3(h))\n\t\th = F.sigmoid(self.deconv4(h))\n\t\treturn h\n\n\n\tdef forward(self, x, encode=False, mean=False, decode=False):\n\t\tif decode:\n\t\t\treturn self.decode(x)\n\t\tmu_and_logvar = self.encode(x)\n\t\tz = self.reparameterize(mu_and_logvar)\n\t\tif encode:\n\t\t\tif mean:\n\t\t\t\tmu = torch.split(mu_and_logvar,int(mu_and_logvar.shape[1]/2),dim=1)[0]\n\t\t\t\treturn mu\n\t\t\treturn z\n\t\treturn self.decode(z), mu_and_logvar\n\n\n\tdef generate_reconstructed_data(self, obs_data, filename):\n\n\t\timages_input = torch.from_numpy(np.array(obs_data)).float().cuda()\n\t\timages_output = []\n\t\t\n\t\timages_output = self.forward(images_input)[0].cpu().detach().numpy()\n\n\t\timages_input = np.array(images_input.cpu().detach()).transpose((0,2,3,1))\n\t\timages_output = np.array(images_output).transpose((0,2,3,1))\n\n\t\tout = np.array([images_input, images_output])\n\n\t\tnp.save(filename, out)\n\n\t\treturn \n\n\tdef linear_interpolation(self, image_origin, image_destination, number_frames):\n\n\t\tres = []\n\t\tres.append(image_origin.reshape(1,3,64,64))\n\n\t\torigin_z = self.forward(np.array(image_origin).reshape((1,3,64,64)), encode=True)\n\t\tfinal_z = self.forward(np.array(image_destination).reshape((1,3,64,64)), encode=True)\n\n\n\t\tfor i in range(0, number_frames+1):\n\t\t\ti /= number_frames\n\t\t\tprint(i)\n\t\t\ttranslat_img = ((1 - i) * origin_z) + (i * final_z)\n\t\t\tres.append(self.forward(np.array(translat_img), decode=True))\n\n\t\tres.append(image_destination.reshape(1,3,64,64))\n\n\t\treturn np.array(res)\n\n\tdef generate_rnn_data(self, obs_data, action_data):\n\n\t\trnn_input = []\n\t\trnn_output = []\n\n\t\tfor i, j in zip(obs_data, action_data): \n\t\t\trnn_z_input = self.forward(torch.tensor(np.array(i).transpose((0,3,1,2))).cuda(), encode=True).detach().cpu().numpy()\n\t\t\tconc = [np.append(x,y) for x, y in zip(rnn_z_input, j.reshape((300,1)))]\n\t\t\trnn_input.append(conc[:-1])\n\t\t\trnn_output.append(np.array(rnn_z_input[1:]))\n\n\t\trnn_input = np.array(rnn_input)\n\t\trnn_output = np.array(rnn_output)\n\n\t\treturn (rnn_input, rnn_output)\n\n\nclass CustomDataset(Dataset):\n\tdef __init__(self, path_input):\n\t\tself.inputs = []\n\t\tfor i,elem in enumerate(os.listdir(path_input)):\n\t\t\ttry:\n\t\t\t\tself.inputs.append(np.asarray(Image.open(path_input + elem).convert('RGB')))\n\t\t\texcept OSError:\n\t\t\t\tpass\n\t\tself.inputs = np.array(self.inputs).reshape(-1,64,64,3)\n\t\tself.inputs = self.inputs.astype('float32') / 255.\n\t\tprint(self.inputs.shape)\n\t\t#self.inputs = np.load('/media/looka/bc6982c9-5dc3-4761-93f3-40a4eafda3ec/phd/flatland/WorldModels/flatland_topview/data/PGMRL_validate_obs_data.npy').reshape(300,64,64,3).transpose((0,3,1,2))\n\t\tself.inputs = self.inputs.transpose((0,3,1,2))\n\t\tprint(self.inputs.shape)\n\n\tdef __getitem__(self, index):\n\t\tinput_batch = self.inputs[index]\n\t\treturn input_batch\n\n\tdef __len__(self):\n\t\tcount = len(self.inputs)\n\t\treturn count # of how many examples(images?) you have\n\n" ]
[ [ "numpy.array", "numpy.random.binomial", "numpy.save", "torch.load" ], [ "torch.nn.Linear", "numpy.array", "torch.nn.ConvTranspose2d", "numpy.save", "torch.nn.Conv2d", "torch.randn_like", "numpy.append", "torch.exp" ] ]
HDUAIS/MARL_Bench
[ "f592d20ddbcb2039453cf56221083d4ac64dee46" ]
[ "src/learners/dmaq_qatten_learner.py" ]
[ "# From https://github.com/wjh720/QPLEX/, added here for convenience.\nimport copy\nfrom components.episode_buffer import EpisodeBatch\nfrom modules.mixers.dmaq_general import DMAQer\n# from modules.mixers.dmaq_qatten import DMAQ_QattenMixer\nimport torch.nn.functional as F\nimport torch as th\nfrom torch.optim import RMSprop\nimport numpy as np\n\n\nclass DMAQ_qattenLearner:\n def __init__(self, mac, scheme, logger, args):\n self.args = args\n self.mac = mac\n self.logger = logger\n\n self.params = list(mac.parameters())\n\n self.last_target_update_episode = 0\n\n self.mixer = None\n if args.mixer is not None:\n if args.mixer == \"dmaq\":\n self.mixer = DMAQer(args)\n # elif args.mixer == 'dmaq_qatten':\n # self.mixer = DMAQ_QattenMixer(args)\n else:\n raise ValueError(\"Mixer {} not recognised.\".format(args.mixer))\n self.params += list(self.mixer.parameters())\n self.target_mixer = copy.deepcopy(self.mixer)\n\n self.optimiser = RMSprop(params=self.params, lr=args.lr, alpha=args.optim_alpha, eps=args.optim_eps)\n\n # a little wasteful to deepcopy (e.g. duplicates action selector), but should work for any MAC\n self.target_mac = copy.deepcopy(mac)\n\n self.log_stats_t = -self.args.learner_log_interval - 1\n\n self.n_actions = self.args.n_actions\n\n def sub_train(self, batch: EpisodeBatch, t_env: int, episode_num: int, mac, mixer, optimiser, params,\n show_demo=False, save_data=None):\n # Get the relevant quantities\n rewards = batch[\"reward\"][:, :-1]\n actions = batch[\"actions\"][:, :-1]\n terminated = batch[\"terminated\"][:, :-1].float()\n mask = batch[\"filled\"][:, :-1].float()\n mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])\n avail_actions = batch[\"avail_actions\"]\n actions_onehot = batch[\"actions_onehot\"][:, :-1]\n\n # Calculate estimated Q-Values\n mac_out = []\n mac.init_hidden(batch.batch_size)\n for t in range(batch.max_seq_length):\n agent_outs = mac.forward(batch, t=t)\n mac_out.append(agent_outs)\n mac_out = th.stack(mac_out, dim=1) # Concat over time\n\n # Pick the Q-Values for the actions taken by each agent\n chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3, index=actions).squeeze(3) # Remove the last dim\n\n x_mac_out = mac_out.clone().detach()\n x_mac_out[avail_actions == 0] = -9999999\n max_action_qvals, max_action_index = x_mac_out[:, :-1].max(dim=3)\n\n max_action_index = max_action_index.detach().unsqueeze(3)\n is_max_action = (max_action_index == actions).int().float()\n\n if show_demo:\n q_i_data = chosen_action_qvals.detach().cpu().numpy()\n q_data = (max_action_qvals - chosen_action_qvals).detach().cpu().numpy()\n # self.logger.log_stat('agent_1_%d_q_1' % save_data[0], np.squeeze(q_data)[0], t_env)\n # self.logger.log_stat('agent_2_%d_q_2' % save_data[1], np.squeeze(q_data)[1], t_env)\n\n # Calculate the Q-Values necessary for the target\n target_mac_out = []\n self.target_mac.init_hidden(batch.batch_size)\n for t in range(batch.max_seq_length):\n target_agent_outs = self.target_mac.forward(batch, t=t)\n target_mac_out.append(target_agent_outs)\n\n # We don't need the first timesteps Q-Value estimate for calculating targets\n target_mac_out = th.stack(target_mac_out[1:], dim=1) # Concat across time\n\n # Mask out unavailable actions\n target_mac_out[avail_actions[:, 1:] == 0] = -9999999\n\n # Max over target Q-Values\n if self.args.double_q:\n # Get actions that maximise live Q (for double q-learning)\n mac_out_detach = mac_out.clone().detach()\n mac_out_detach[avail_actions == 0] = -9999999\n cur_max_actions = mac_out_detach[:, 1:].max(dim=3, keepdim=True)[1]\n target_chosen_qvals = th.gather(target_mac_out, 3, cur_max_actions).squeeze(3)\n target_max_qvals = target_mac_out.max(dim=3)[0]\n target_next_actions = cur_max_actions.detach()\n\n cur_max_actions_onehot = th.zeros(cur_max_actions.squeeze(3).shape + (self.n_actions,))\n cur_max_actions_onehot = cur_max_actions_onehot.scatter_(3, cur_max_actions, 1)\n else:\n # Calculate the Q-Values necessary for the target\n target_mac_out = []\n self.target_mac.init_hidden(batch.batch_size)\n for t in range(batch.max_seq_length):\n target_agent_outs = self.target_mac.forward(batch, t=t)\n target_mac_out.append(target_agent_outs)\n # We don't need the first timesteps Q-Value estimate for calculating targets\n target_mac_out = th.stack(target_mac_out[1:], dim=1) # Concat across time\n target_max_qvals = target_mac_out.max(dim=3)[0]\n\n # Mix\n if mixer is not None:\n if self.args.mixer == \"dmaq_qatten\":\n ans_chosen, q_attend_regs, head_entropies = \\\n mixer(chosen_action_qvals, batch[\"state\"][:, :-1], is_v=True)\n ans_adv, _, _ = mixer(chosen_action_qvals, batch[\"state\"][:, :-1], actions=actions_onehot,\n max_q_i=max_action_qvals, is_v=False)\n chosen_action_qvals = ans_chosen + ans_adv\n else:\n ans_chosen = mixer(chosen_action_qvals, batch[\"state\"][:, :-1], is_v=True)\n ans_adv = mixer(chosen_action_qvals, batch[\"state\"][:, :-1], actions=actions_onehot,\n max_q_i=max_action_qvals, is_v=False)\n chosen_action_qvals = ans_chosen + ans_adv\n\n if self.args.double_q:\n if self.args.mixer == \"dmaq_qatten\":\n target_chosen, _, _ = self.target_mixer(target_chosen_qvals, batch[\"state\"][:, 1:], is_v=True)\n target_adv, _, _ = self.target_mixer(target_chosen_qvals, batch[\"state\"][:, 1:],\n actions=cur_max_actions_onehot,\n max_q_i=target_max_qvals, is_v=False)\n target_max_qvals = target_chosen + target_adv\n else:\n target_chosen = self.target_mixer(target_chosen_qvals, batch[\"state\"][:, 1:], is_v=True)\n target_adv = self.target_mixer(target_chosen_qvals, batch[\"state\"][:, 1:],\n actions=cur_max_actions_onehot,\n max_q_i=target_max_qvals, is_v=False)\n target_max_qvals = target_chosen + target_adv\n else:\n target_max_qvals = self.target_mixer(target_max_qvals, batch[\"state\"][:, 1:], is_v=True)\n\n # Calculate 1-step Q-Learning targets\n targets = rewards + self.args.gamma * (1 - terminated) * target_max_qvals\n\n if show_demo:\n tot_q_data = chosen_action_qvals.detach().cpu().numpy()\n tot_target = targets.detach().cpu().numpy()\n print('action_pair_%d_%d' % (save_data[0], save_data[1]), np.squeeze(q_data[:, 0]),\n np.squeeze(q_i_data[:, 0]), np.squeeze(tot_q_data[:, 0]), np.squeeze(tot_target[:, 0]))\n self.logger.log_stat('action_pair_%d_%d' % (save_data[0], save_data[1]),\n np.squeeze(tot_q_data[:, 0]), t_env)\n return\n\n # Td-error\n td_error = (chosen_action_qvals - targets.detach())\n\n mask = mask.expand_as(td_error)\n\n # 0-out the targets that came from padded data\n masked_td_error = td_error * mask\n\n # Normal L2 loss, take mean over actual data\n if self.args.mixer == \"dmaq_qatten\":\n loss = (masked_td_error ** 2).sum() / mask.sum() + q_attend_regs\n else:\n loss = (masked_td_error ** 2).sum() / mask.sum()\n\n masked_hit_prob = th.mean(is_max_action, dim=2) * mask\n hit_prob = masked_hit_prob.sum() / mask.sum()\n\n # Optimise\n optimiser.zero_grad()\n loss.backward()\n grad_norm = th.nn.utils.clip_grad_norm_(params, self.args.grad_norm_clip)\n optimiser.step()\n\n if t_env - self.log_stats_t >= self.args.learner_log_interval:\n self.logger.log_stat(\"loss\", loss.item(), t_env)\n self.logger.log_stat(\"hit_prob\", hit_prob.item(), t_env)\n self.logger.log_stat(\"grad_norm\", grad_norm, t_env)\n mask_elems = mask.sum().item()\n self.logger.log_stat(\"td_error_abs\", (masked_td_error.abs().sum().item() / mask_elems), t_env)\n self.logger.log_stat(\"q_taken_mean\",\n (chosen_action_qvals * mask).sum().item() / (mask_elems * self.args.n_agents), t_env)\n self.logger.log_stat(\"target_mean\", (targets * mask).sum().item() / (mask_elems * self.args.n_agents),\n t_env)\n self.log_stats_t = t_env\n\n def train(self, batch: EpisodeBatch, t_env: int, episode_num: int, show_demo=False, save_data=None):\n self.sub_train(batch, t_env, episode_num, self.mac, self.mixer, self.optimiser, self.params,\n show_demo=show_demo, save_data=save_data)\n if (episode_num - self.last_target_update_episode) / self.args.target_update_interval >= 1.0:\n self._update_targets()\n self.last_target_update_episode = episode_num\n\n def _update_targets(self):\n self.target_mac.load_state(self.mac)\n if self.mixer is not None:\n self.target_mixer.load_state_dict(self.mixer.state_dict())\n self.logger.console_logger.info(\"Updated target network\")\n\n def cuda(self):\n self.mac.cuda()\n self.target_mac.cuda()\n if self.mixer is not None:\n self.mixer.cuda()\n self.target_mixer.cuda()\n\n def save_models(self, path):\n self.mac.save_models(path)\n if self.mixer is not None:\n th.save(self.mixer.state_dict(), \"{}/mixer.th\".format(path))\n th.save(self.optimiser.state_dict(), \"{}/opt.th\".format(path))\n\n def load_models(self, path):\n self.mac.load_models(path)\n # Not quite right but I don't want to save target networks\n self.target_mac.load_models(path)\n if self.mixer is not None:\n self.mixer.load_state_dict(th.load(\"{}/mixer.th\".format(path), map_location=lambda storage, loc: storage))\n self.target_mixer.load_state_dict(th.load(\"{}/mixer.th\".format(path),\n map_location=lambda storage, loc: storage))\n self.optimiser.load_state_dict(th.load(\"{}/opt.th\".format(path), map_location=lambda storage, loc: storage))\n" ]
[ [ "torch.stack", "torch.optim.RMSprop", "torch.gather", "torch.nn.utils.clip_grad_norm_", "numpy.squeeze", "torch.mean" ] ]
XTJ21/ArknightsAutoHelper
[ "74ea7b232d7eed6cc783b27762197cdb68c368f9" ]
[ "imgreco/itemdb.py" ]
[ "from dataclasses import dataclass\nimport os\nimport logging\nimport json\nfrom functools import lru_cache\n\nimport cv2\nimport numpy as np\n\nimport app\nfrom util import cvimage as Image\n\nlogger = logging.getLogger(__name__)\n\nnet_file = app.cache_path / 'ark_material.onnx'\nindex_file = app.cache_path / 'index_itemid_relation.json'\nmodel_timestamp = 0\n\n@dataclass\nclass DnnItemRecord:\n class_id: int\n item_id: str\n item_name: str\n item_type: str\n\ndnn_items_by_class : dict[int, DnnItemRecord] = {}\ndnn_items_by_item_id : dict[str, DnnItemRecord] = {}\ndnn_items_by_item_name : dict[str, DnnItemRecord] = {}\n\n@lru_cache(1)\ndef load_net():\n update_index_info()\n with open(net_file, 'rb') as f:\n data = f.read()\n net = cv2.dnn.readNetFromONNX(data)\n return net\n\n\n@lru_cache(1)\ndef _update_index_info():\n with open(index_file, 'r', encoding='utf-8') as f:\n data = json.load(f)\n global model_timestamp\n model_timestamp = data['time']\n idx2id, id2idx, idx2name, idx2type = data['idx2id'], data['id2idx'], data['idx2name'], data['idx2type']\n dnn_items_by_class.clear()\n dnn_items_by_item_id.clear()\n dnn_items_by_item_name.clear()\n for index, item_id in enumerate(idx2id):\n record = DnnItemRecord(index, item_id, idx2name[index], idx2type[index])\n dnn_items_by_class[index] = record\n dnn_items_by_item_id[item_id] = record\n dnn_items_by_item_name[idx2name[index]] = record\n\ndef update_index_info():\n update_net()\n return _update_index_info()\n\ndef retry_get(url, max_retry=5, timeout=3):\n import requests\n c = 0\n ex = None\n while c < max_retry:\n try:\n return requests.get(url, timeout=timeout)\n except Exception as e:\n c += 1\n ex = e\n raise ex\n\n\ndef update_net():\n local_cache_time = 0\n import time\n os.makedirs(os.path.dirname(index_file), exist_ok=True)\n try:\n stat = os.stat(index_file)\n cache_mtime = stat.st_mtime\n with open(index_file, 'r', encoding='utf-8') as f:\n local_rel = json.load(f)\n model_gen_time = local_rel['time'] / 1000\n now = time.time()\n logger.debug(f'{cache_mtime=} {now=} {model_gen_time=}')\n if cache_mtime > model_gen_time and now - cache_mtime < 60 * 60 * 8:\n return\n except:\n pass\n logger.info('检查物品识别模型更新')\n resp = retry_get('https://cdn.jsdelivr.net/gh/triwinds/arknights-ml@latest/inventory/index_itemid_relation.json')\n remote_relation = resp.json()\n if remote_relation['time'] > local_cache_time:\n from datetime import datetime\n logger.info(f'更新物品识别模型, 模型生成时间: {datetime.fromtimestamp(remote_relation[\"time\"]/1000).strftime(\"%Y-%m-%d %H:%M:%S\")}')\n with open(index_file, 'w', encoding='utf-8') as f:\n json.dump(remote_relation, f, ensure_ascii=False)\n resp = retry_get('https://cdn.jsdelivr.net/gh/triwinds/arknights-ml@latest/inventory/ark_material.onnx')\n with open(net_file, 'wb') as f:\n f.write(resp.content)\n _update_index_info.cache_clear()\n else:\n os.utime(index_file, None)\n\n\ndef _update_mat_collection(collection, name, img):\n global itemmask\n if img.size != (48, 48):\n img = img.resize((48, 48), Image.BILINEAR)\n mat = np.array(img)\n mat[itemmask] = 0\n collection[name] = mat\n\n\nresources_known_items = {}\n\n\ndef load():\n from . import resources\n from . import minireco\n resource_files = [(x[:-4], resources.resolve('items/' + x)) for x in resources.get_entries('items')[1] if x.endswith('.png')]\n global resources_itemmats, num_recognizer, itemmask, resources_known_items\n resources_itemmats = {}\n itemmask = np.asarray(resources.load_image('common/itemmask.png', '1'))\n for name, index in resource_files:\n img = resources.load_image(index, 'RGB')\n _update_mat_collection(resources_itemmats, name, img)\n\n model = resources.load_pickle('minireco/NotoSansCJKsc-DemiLight-nums.dat')\n reco = minireco.MiniRecognizer(model, minireco.compare_ccoeff)\n num_recognizer=reco\n\n for prefix in ['items', 'items/archive', 'items/not-loot']:\n _, files = resources.get_entries(prefix)\n for filename in files:\n itemname = filename[:-4] if filename.endswith('.png') else filename\n path = prefix + '/' + filename\n resources_known_items[itemname] = resources.resolve(path)\n update_extra_items()\n\n\ndef update_extra_items():\n import app\n\n new_mtime = os.path.getmtime(app.extra_items_path)\n\n if new_mtime <= update_extra_items.old_mtime:\n return\n from . import resources\n from glob import glob\n extra_files = [(os.path.basename(x)[:-4], resources.FileSystemIndex(x)) for x in glob(os.path.join(\n app.extra_items_path, '*.png'))]\n extra_known_items = {}\n extra_itemmats = {}\n for key, value in extra_files:\n for name, index in extra_files:\n img = resources.load_image(index, 'RGB')\n _update_mat_collection(extra_itemmats, name, img)\n extra_known_items[key] = value\n global itemmats\n itemmats = {}\n itemmats.update(resources_itemmats)\n itemmats.update(extra_itemmats)\n global all_known_items\n all_known_items = {}\n all_known_items.update(resources_known_items)\n all_known_items.update(extra_known_items)\n update_extra_items.old_mtime = new_mtime\n\nupdate_extra_items.old_mtime = 0\n\ndef add_item(image) -> str:\n import os\n import time\n import app\n date = time.strftime('%Y-%m-%d')\n index = add_item.last_index + 1\n while True:\n name = '未知物品-%s-%d' % (date, index)\n filename = app.extra_items_path.joinpath(name + '.png')\n if not os.path.exists(filename):\n break\n index += 1\n add_item.last_index = index\n image.save(filename)\n update_extra_items()\n return name\n\nadd_item.last_index = 0\n\nload()\n" ]
[ [ "numpy.array" ] ]
evangibson/lights_in_space
[ "de84d9e4f80c1a3fc4471afd370d69a9d78edc06" ]
[ "support/ga.py" ]
[ "# Import libraries\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport warnings\nimport copy\nimport pandas as pd\n\n\nclass dna:\n \"\"\"Makes up the components of a population\"\"\"\n def __init__(self,\n chromosome_length=9,\n number_of_genes=2,\n upper_range=6,\n lower_range=-6):\n self.L = chromosome_length\n self.G = number_of_genes\n self.b = upper_range\n self.a = lower_range\n\n self.full = None # Stand in for the genome that can be instantiated in spawn or otherwise\n\n self.precision = (self.b - self.a) / ((2 ** self.L) - 1) # Can be calculated up front to save function calling\n\n self.score = None\n\n def spawn(self):\n \"\"\"Generates a new random set of chromosomes\"\"\"\n self.full = np.random.randint(2, # Sets 0,1 binary randomization\n size=(self.G,\n self.L)) # creates chromosones of length L for the number of genes specified\n\n def bit_summation(self):\n \"\"\"Performs left to right summations for each chromosome\"\"\"\n\n x = lambda b, i: b * (2 ** i) # Calculator for single bit\n\n self.full_summations = dict()\n\n for ix in range(0, self.G): # need to happen for every gene\n power_list = list()\n bitplaceholder = 0\n for g in self.full[ix]: # Enter the bits in a gene\n power_list.append(x(g, bitplaceholder))\n bitplaceholder += 1\n\n self.full_summations.update({ix: sum(power_list)})\n\n def decode_chromosomes(self):\n \"\"\"Generates decoded values for input into fitness function\"\"\"\n self.gene_decoded = dict()\n\n self.eval_values = list()\n\n for s in list(self.full_summations.keys()):\n de = self.full_summations[s] * self.precision + self.a\n\n # For advanced mapping and tracing\n self.gene_decoded.update({s: de})\n\n # For brute force evaluation using \"evaluate\"\n self.eval_values.append(de)\n\n def evaluate(self, evaluation_function):\n \"\"\"Evaluation function should be passed as a lambda function that tolerates the decoded chromosome values\n Only accepts one argument. Will need multi item lambda function to handle more than one value\"\"\"\n # Arguments will be passed to eval function from left to right. Fitness function will need to index a list\n self.fitness = evaluation_function(self.eval_values)\n\n def mutate(self,\n chromosome_to_call,\n bit_to_flip):\n \"\"\"Flips a bit to its opposite. Will not determine randomness of procedure\"\"\"\n if self.full[chromosome_to_call][bit_to_flip] == 0:\n self.full[chromosome_to_call][bit_to_flip] = 1\n else:\n self.full[chromosome_to_call][bit_to_flip] = 0\n\n\nclass population:\n \"\"\"Container for dna objects\"\"\"\n def __init__(self,\n fitness_function, # A lambda function that tolerates length G\n dictionary_of_dna_objects=None,\n # pass an existing population of dna objects if you don't want to generate a new one\n start_index=None,\n max_pop_size=100, # can be set to the length of the dictionary of_dna_objects that are passed\n asc=True, # If true, will place low scores as winners\n **kwargs): # used to pass dna argument\n\n self.asc = asc\n if start_index is None:\n self.start = 0\n else:\n self.start = start_index\n\n if dictionary_of_dna_objects is None:\n print(\"Generate new population of size {}\".format(max_pop_size))\n\n self.pool = dict()\n for i in range(self.start, self.start + max_pop_size):\n try:\n temp_member = dna(**kwargs)\n\n except:\n warnings.warn(\"Problem instantiating dna child objects. Reverting to default parameters\")\n temp_member = dna()\n\n # Stabilize DNA\n temp_member.spawn()\n temp_member.bit_summation()\n temp_member.decode_chromosomes()\n temp_member.evaluate(fitness_function)\n\n # The ids of each random dna object will be the index of the range\n self.pool.update({i: temp_member})\n\n\n else:\n # Keep in mind, ID will have to be assigned if population not generated from scratch\n self.pool = dictionary_of_dna_objects\n\n # Children pool\n try:\n self.child1 = dna(**kwargs)\n self.child1.spawn()\n self.child1.bit_summation()\n self.child1.decode_chromosomes()\n self.child1.evaluate(fitness_function)\n\n self.child2 = dna(**kwargs)\n self.child2.spawn()\n self.child2.bit_summation()\n self.child2.decode_chromosomes()\n self.child1.evaluate(fitness_function)\n\n except:\n warnings.warn(\"Problem instantiating dna child objects. Reverting to default parameters\")\n self.child1 = dna()\n self.child2 = dna()\n\n self.fit_func = fitness_function\n self.max_pop_size = max_pop_size\n\n def rank_population(self):\n \"\"\"Pulls fitness values out of existing pool and casts to a dataframe\"\"\"\n # Generate ordered lists of fitness and ids\n ids = list()\n fits = list()\n for m in list(self.pool.keys()):\n fits.append(self.pool[m].fitness)\n ids.append(m)\n\n self.ranks = pd.DataFrame({\"ID\": ids, \"Score\": fits}).sort_values(\"Score\")\n\n # Generate a ranking column\n self.ranks[\"Placement\"] = self.ranks['Score'].rank(ascending=self.asc)\n\n return self.ranks\n\n def determine_survival(self, clone_function):\n \"\"\"The clone function will act against the variable rank for each member of the population.\n The function should return a 0 or 1:\n 1 - Survive\n 0 - Did not survive to next generation\"\"\"\n self.ranks['Survive'] = self.ranks[\"Placement\"].map(clone_function)\n\n def crossover_breed(self,\n parent_1_id,\n parent_2_id,\n swap_range=None,\n random_cutoff_point=False,\n chromosomes_to_cross=False, # If False, will crossover all during breeding\n crossover_start_point=0,\n crossover_end_point=3):\n\n \"\"\"Produces children as objects. Does not replace any population members\"\"\"\n\n # For each child, instantiate it as a copy of a parent and then overwrite a part of the chromosome with the adjacent parent\n self.child1 = copy.deepcopy(self.pool[parent_1_id])\n self.child2 = copy.deepcopy(self.pool[parent_2_id])\n\n # Loops over all chromosomes in the parent at the same points\n if chromosomes_to_cross is False:\n for chrom_index in range(0, self.child1.G): # Arbitrary selection param\n self.child1.full[chrom_index][crossover_start_point:crossover_end_point] = self.pool[parent_2_id].full[chrom_index][crossover_start_point:crossover_end_point]\n self.child2.full[chrom_index][crossover_start_point:crossover_end_point] = self.pool[parent_1_id].full[chrom_index][crossover_start_point:crossover_end_point]\n\n self.child1.bit_summation()\n self.child1.decode_chromosomes()\n self.child1.evaluate(self.fit_func)\n\n self.child2.bit_summation()\n self.child2.decode_chromosomes()\n self.child2.evaluate(self.fit_func)\n\n else:\n print(\"Breeding aborted.\")\n\n def fill_empty_population(self, **kwargs):\n \"\"\"If population pool < max population, will fill space with new dna members\"\"\"\n return None" ]
[ [ "pandas.DataFrame", "numpy.random.randint" ] ]
Julio-Yanes/NiMARE
[ "36bb05034041998519814b55fe402489147fdd63", "36bb05034041998519814b55fe402489147fdd63" ]
[ "nimare/parcellate/mapbot.py", "nimare/annotate/topic/lda.py" ]
[ "\"\"\"\nMeta-analytic parcellation based on text (MAPBOT).\n\"\"\"\nimport numpy as np\nimport pandas as pd\nfrom sklearn.decomposition import NMF\nfrom scipy.spatial.distance import cdist\nfrom nilearn.masking import apply_mask, unmask\n\nfrom ..base import Parcellator\nfrom ..due import due\nfrom .. import references\n\n\n@due.dcite(references.MAPBOT, description='Introduces the MAPBOT algorithm.')\nclass MAPBOT(Parcellator):\n \"\"\"\n Meta-analytic parcellation based on text (MAPBOT) [1]_.\n\n Parameters\n ----------\n tfidf_df : :obj:`pandas.DataFrame`\n A DataFrame with feature counts for the model. The index is 'id',\n used for identifying studies. Other columns are features (e.g.,\n unigrams and bigrams from Neurosynth), where each value is the number\n of times the feature is found in a given article.\n coordinates_df : :obj:`pandas.DataFrame`\n A DataFrame with a list of foci in the dataset. The index is 'id',\n used for identifying studies. Additional columns include 'i', 'j' and\n 'k' (the matrix indices of the foci in standard space).\n mask : :obj:`str` or :obj:`nibabel.Nifti1.Nifti1Image`\n Mask file or image.\n\n Notes\n -----\n MAPBOT uses both the reported foci for studies, as well as associated term\n weights.\n Here are the steps:\n 1. For each voxel in the mask, identify studies in dataset\n corresponding to that voxel. Selection criteria can be either\n based on a distance threshold (e.g., all studies with foci\n within 5mm of voxel) or based on a minimum number of studies\n (e.g., the 50 studies reporting foci closest to the voxel).\n 2. For each voxel, compute average frequency of each term across\n selected studies. This results in an n_voxels X n_terms frequency\n matrix F.\n 3. Compute n_voxels X n_voxels value matrix V:\n - D = (F.T * F) * ones(F)\n - V = F * D^-.5\n 4. Perform non-negative matrix factorization on value matrix.\n\n Warnings\n --------\n This method is not yet implemented.\n\n References\n ----------\n .. [1] Yuan, Rui, et al. \"MAPBOT: Meta-analytic parcellation based on text,\n and its application to the human thalamus.\" NeuroImage 157 (2017):\n 716-732. https://doi.org/10.1016/j.neuroimage.2017.06.032\n \"\"\"\n def __init__(self, tfidf_df, coordinates_df, mask):\n self.mask = mask\n self.tfidf_df = tfidf_df\n self.coordinates = coordinates_df\n\n def fit(self, target_mask, method='min_distance', r=5, n_exps=50,\n n_parcels=2):\n \"\"\"\n Run MAPBOT parcellation.\n\n Parameters\n ----------\n region_name : :obj:`str`\n Name of region for parcellation.\n n_parcels : :obj:`int`, optional\n Number of parcels to generate for ROI. If array_like, each parcel\n number will be evaluated and results for all will be returned.\n Default is 2.\n \"\"\"\n if not isinstance(n_parcels):\n n_parcels = [n_parcels]\n\n # Step 1: Build correlation matrix\n target_data = apply_mask(target_mask, self.mask)\n target_map = unmask(target_data, self.mask)\n target_data = target_map.get_data()\n mask_idx = np.vstack(np.where(target_data))\n n_voxels = mask_idx.shape[1]\n voxel_arr = np.zeros((n_voxels, np.sum(self.mask)))\n del voxel_arr # currently unused\n\n ijk = self.coordinates[['i', 'j', 'k']].values\n temp_df = self.coordinates.copy()\n term_df = pd.DataFrame(columns=self.tfidf_df.columns,\n index=range(n_voxels))\n for i_voxel in range(n_voxels):\n voxel = mask_idx[:, i_voxel]\n temp_df['distance'] = cdist(ijk, voxel)\n\n if method == 'min_studies':\n # number of studies\n temp_df2 = temp_df.groupby('id')[['distance']].min()\n temp_df2 = temp_df2.sort_values(by='distance')\n sel_ids = temp_df2.iloc[:n_exps].index.values\n elif method == 'min_distance':\n # minimum distance\n temp_df2 = temp_df.groupby('id')[['distance']].min()\n sel_ids = temp_df2.loc[temp_df2['distance'] < r].index.values\n\n # Build DT matrix\n voxel_df = self.tfidf_df.loc[self.tfidf_df.index.isin(sel_ids)]\n term_df.loc[i_voxel] = voxel_df.mean(axis=0)\n values = term_df.values\n d = np.dot(np.dot(values.T, values), np.ones((values.shape[0], 1)))\n values_prime = np.dot(values, d**-.5)\n for i_parc in n_parcels:\n model = NMF(n_components=i_parc, init='nndsvd', random_state=0)\n W = model.fit_transform(values_prime)\n H = model.components_\n del W, H # not sure what's next\n", "\"\"\"\nTopic modeling with latent Dirichlet allocation via MALLET.\n\"\"\"\nimport os\nimport os.path as op\nimport shutil\nimport logging\nimport subprocess\n\nimport pandas as pd\n\nfrom ...base import AnnotationModel\nfrom ...utils import get_resource_path\nfrom ...due import due\nfrom ... import references\n\nLGR = logging.getLogger(__name__)\n\n\n@due.dcite(references.LDA, description='Introduces LDA.')\n@due.dcite(references.MALLET, description='Citation for MALLET toolbox')\n@due.dcite(references.LDAMODEL,\n description='First use of LDA for automated annotation of '\n 'neuroimaging literature.')\nclass LDAModel(AnnotationModel):\n \"\"\"\n Perform topic modeling using Latent Dirichlet Allocation [1]_ with the\n Java toolbox MALLET [2]_, as performed in [3]_.\n\n Parameters\n ----------\n text_df : :obj:`pandas.DataFrame`\n A pandas DataFrame with two columns ('id' and 'text') containing\n article text_df.\n n_topics : :obj:`int`, optional\n Number of topics to generate. Default=50.\n n_words : :obj:`int`, optional\n Number of top words to return for each topic. Default=31, based on\n Poldrack et al. (2012). Not used.\n n_iters : :obj:`int`, optional\n Number of iterations to run in training topic model. Default=1000.\n alpha : :obj:`float`, optional\n The Dirichlet prior on the per-document topic distributions.\n Default: 50 / n_topics, based on Poldrack et al. (2012).\n beta : :obj:`float`, optional\n The Dirichlet prior on the per-topic word distribution. Default: 0.001,\n based on Poldrack et al. (2012).\n\n References\n ----------\n .. [1] Blei, David M., Andrew Y. Ng, and Michael I. Jordan. \"Latent\n dirichlet allocation.\" Journal of machine Learning research 3.Jan\n (2003): 993-1022.\n .. [2] McCallum, Andrew Kachites. \"Mallet: A machine learning for language\n toolkit.\" (2002).\n .. [3] Poldrack, Russell A., et al. \"Discovering relations between mind,\n brain, and mental disorders using topic mapping.\" PLoS computational\n biology 8.10 (2012): e1002707.\n https://doi.org/10.1371/journal.pcbi.1002707\n \"\"\"\n def __init__(self, text_df, n_topics=50, n_iters=1000, alpha='auto',\n beta=0.001):\n resdir = op.abspath(get_resource_path())\n tempdir = op.join(resdir, 'topic_models')\n text_dir = op.join(tempdir, 'texts')\n if not op.isdir(tempdir):\n os.mkdir(tempdir)\n\n if alpha == 'auto':\n alpha = 50. / n_topics\n elif not isinstance(alpha, float):\n raise ValueError('Argument alpha must be float or \"auto\"')\n\n self.params = {\n 'n_topics': n_topics,\n 'n_iters': n_iters,\n 'alpha': alpha,\n 'beta': beta,\n }\n\n # Check for presence of text files and convert if necessary\n if not op.isdir(text_dir):\n LGR.info('Texts folder not found. Creating text files...')\n os.mkdir(text_dir)\n for id_ in text_df.index.values:\n text = text_df.loc[id_]['text']\n with open(op.join(text_dir, str(id_) + '.txt'), 'w') as fo:\n fo.write(text)\n\n # Run MALLET topic modeling\n LGR.info('Generating topics...')\n mallet_bin = op.join(op.dirname(op.dirname(__file__)),\n 'resources/mallet/bin/mallet')\n import_str = ('{mallet} import-dir '\n '--input {text_dir} '\n '--output {outdir}/topic-input.mallet '\n '--keep-sequence '\n '--remove-stopwords').format(mallet=mallet_bin,\n text_dir=text_dir,\n outdir=tempdir)\n\n train_str = ('{mallet} train-topics '\n '--input {out}/topic-input.mallet '\n '--num-topics {n_topics} '\n '--output-doc-topics {out}/doc_topics.txt '\n '--topic-word-weights-file {out}/topic_word_weights.txt '\n '--num-iterations {n_iters} '\n '--output-model {out}/saved_model.mallet '\n '--random-seed 1 '\n '--alpha {alpha} '\n '--beta {beta}').format(mallet=mallet_bin, out=tempdir,\n n_topics=self.params['n_topics'],\n n_iters=self.params['n_iters'],\n alpha=self.params['alpha'],\n beta=self.params['beta'])\n\n subprocess.call(import_str, shell=True)\n subprocess.call(train_str, shell=True)\n\n # Read in and convert doc_topics and topic_keys.\n topic_names = ['topic_{0:03d}'.format(i) for i in range(self.params['n_topics'])]\n\n # doc_topics: Topic weights for each paper.\n # The conversion here is pretty ugly at the moment.\n # First row should be dropped. First column is row number and can be used\n # as the index.\n # Second column is 'file: /full/path/to/id.txt' <-- Parse to get id.\n # After that, odd columns are topic numbers and even columns are the\n # weights for the topics in the preceding column. These columns are sorted\n # on an individual id basis by the weights.\n n_cols = (2 * self.params['n_topics']) + 1\n dt_df = pd.read_csv(op.join(tempdir, 'doc_topics.txt'),\n delimiter='\\t', skiprows=1, header=None,\n index_col=0)\n dt_df = dt_df[dt_df.columns[:n_cols]]\n\n # Get ids from filenames\n dt_df[1] = dt_df[1].apply(self._clean_str)\n\n # Put weights (even cols) and topics (odd cols) into separate dfs.\n weights_df = dt_df[dt_df.columns[2::2]]\n weights_df.index = dt_df[1]\n weights_df.columns = range(self.params['n_topics'])\n\n topics_df = dt_df[dt_df.columns[1::2]]\n topics_df.index = dt_df[1]\n topics_df.columns = range(self.params['n_topics'])\n\n # Sort columns in weights_df separately for each row using topics_df.\n sorters_df = topics_df.apply(self._get_sort, axis=1)\n weights = weights_df.as_matrix()\n sorters = sorters_df.as_matrix()\n # there has to be a better way to do this.\n for i in range(sorters.shape[0]):\n weights[i, :] = weights[i, sorters[i, :]]\n\n # Define topic names (e.g., topic_000)\n p_topic_g_doc_df = pd.DataFrame(columns=topic_names, data=weights,\n index=dt_df[1])\n p_topic_g_doc_df.index.name = 'id'\n self.p_topic_g_doc = p_topic_g_doc_df.values\n\n # Topic word weights\n p_word_g_topic_df = pd.read_csv(op.join(tempdir, 'topic_word_weights.txt'),\n dtype=str, keep_default_na=False,\n na_values=[], sep='\\t', header=None,\n names=['topic', 'word', 'weight'])\n p_word_g_topic_df['weight'] = p_word_g_topic_df['weight'].astype(float)\n p_word_g_topic_df['topic'] = p_word_g_topic_df['topic'].astype(int)\n p_word_g_topic_df = p_word_g_topic_df.pivot(index='topic',\n columns='word',\n values='weight')\n p_word_g_topic_df = p_word_g_topic_df.div(p_word_g_topic_df.sum(axis=1),\n axis=0)\n self.p_word_g_topic = p_word_g_topic_df.values\n\n # Remove all temporary files (text files, model, and outputs).\n shutil.rmtree(tempdir)\n\n def _clean_str(self, string):\n return op.basename(op.splitext(string)[0])\n\n def _get_sort(self, lst):\n return [i[0] for i in sorted(enumerate(lst), key=lambda x: x[1])]\n" ]
[ [ "numpy.dot", "numpy.sum", "numpy.ones", "numpy.where", "sklearn.decomposition.NMF", "scipy.spatial.distance.cdist" ], [ "pandas.DataFrame" ] ]
TheCodez/pytorch-LiLaNet
[ "8a39326ad8c2150ac5cb4dfa9b7ae2a4ef4a91e8" ]
[ "train_kitti.py" ]
[ "import os\nimport warnings\nfrom argparse import ArgumentParser\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom ignite.contrib.handlers import ProgressBar\nfrom ignite.contrib.handlers.tensorboard_logger import *\nfrom ignite.engine import Events, Engine\nfrom ignite.metrics import RunningAverage, Loss, ConfusionMatrix, IoU\nfrom ignite.utils import convert_tensor\nfrom torch.utils.data import DataLoader\n\nfrom lilanet.datasets import KITTI, Normalize, Compose, RandomHorizontalFlip\nfrom lilanet.datasets.transforms import ToTensor\nfrom lilanet.model import LiLaNet\nfrom lilanet.utils import save\n\n\ndef get_data_loaders(data_dir, batch_size, val_batch_size, num_workers):\n normalize = Normalize(mean=KITTI.mean(), std=KITTI.std())\n transforms = Compose([\n RandomHorizontalFlip(),\n ToTensor(),\n normalize\n ])\n\n val_transforms = Compose([\n ToTensor(),\n normalize\n ])\n\n train_loader = DataLoader(KITTI(root=data_dir, split='train', transform=transforms),\n batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=True)\n\n val_loader = DataLoader(KITTI(root=data_dir, split='val', transform=val_transforms),\n batch_size=val_batch_size, shuffle=False, num_workers=num_workers, pin_memory=True)\n\n return train_loader, val_loader\n\n\ndef run(args):\n if args.seed is not None:\n torch.manual_seed(args.seed)\n\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n num_classes = KITTI.num_classes()\n model = LiLaNet(num_classes)\n\n device_count = torch.cuda.device_count()\n if device_count > 1:\n print(\"Using %d GPU(s)\" % device_count)\n model = nn.DataParallel(model)\n args.batch_size = device_count * args.batch_size\n args.val_batch_size = device_count * args.val_batch_size\n\n model = model.to(device)\n\n train_loader, val_loader = get_data_loaders(args.dataset_dir, args.batch_size, args.val_batch_size,\n args.num_workers)\n\n criterion = nn.CrossEntropyLoss(weight=KITTI.class_weights()).to(device)\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"Loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch']\n model.load_state_dict(checkpoint['model'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"Loaded checkpoint '{}' (Epoch {})\".format(args.resume, checkpoint['epoch']))\n else:\n print(\"No checkpoint found at '{}'\".format(args.resume))\n\n def _prepare_batch(batch, non_blocking=True):\n distance, reflectivity, target = batch\n\n return (convert_tensor(distance, device=device, non_blocking=non_blocking),\n convert_tensor(reflectivity, device=device, non_blocking=non_blocking),\n convert_tensor(target, device=device, non_blocking=non_blocking))\n\n def _update(engine, batch):\n model.train()\n\n if engine.state.iteration % args.grad_accum == 0:\n optimizer.zero_grad()\n distance, reflectivity, target = _prepare_batch(batch)\n pred = model(distance, reflectivity)\n loss = criterion(pred, target) / args.grad_accum\n loss.backward()\n if engine.state.iteration % args.grad_accum == 0:\n optimizer.step()\n\n return loss.item()\n\n trainer = Engine(_update)\n\n # attach running average metrics\n RunningAverage(output_transform=lambda x: x).attach(trainer, 'loss')\n\n # attach progress bar\n pbar = ProgressBar(persist=True)\n pbar.attach(trainer, metric_names=['loss'])\n\n def _inference(engine, batch):\n model.eval()\n with torch.no_grad():\n distance, reflectivity, target = _prepare_batch(batch)\n pred = model(distance, reflectivity)\n\n return pred, target\n\n evaluator = Engine(_inference)\n cm = ConfusionMatrix(num_classes)\n IoU(cm, ignore_index=0).attach(evaluator, 'IoU')\n Loss(criterion).attach(evaluator, 'loss')\n\n pbar2 = ProgressBar(persist=True, desc='Eval Epoch')\n pbar2.attach(evaluator)\n\n def _global_step_transform(engine, event_name):\n if trainer.state is not None:\n return trainer.state.iteration\n else:\n return 1\n\n tb_logger = TensorboardLogger(args.log_dir)\n tb_logger.attach(trainer,\n log_handler=OutputHandler(tag='training',\n metric_names=['loss']),\n event_name=Events.ITERATION_COMPLETED)\n\n tb_logger.attach(evaluator,\n log_handler=OutputHandler(tag='validation',\n metric_names=['loss', 'IoU'],\n global_step_transform=_global_step_transform),\n event_name=Events.EPOCH_COMPLETED)\n\n @trainer.on(Events.STARTED)\n def initialize(engine):\n if args.resume:\n engine.state.epoch = args.start_epoch\n\n @evaluator.on(Events.EPOCH_COMPLETED)\n def save_checkpoint(engine):\n epoch = trainer.state.epoch if trainer.state is not None else 1\n iou = engine.state.metrics['IoU'] * 100.0\n mean_iou = iou.mean()\n\n name = 'epoch{}_mIoU={:.1f}.pth'.format(epoch, mean_iou)\n file = {'model': model.state_dict(), 'epoch': epoch, 'optimizer': optimizer.state_dict(),\n 'args': args}\n\n save(file, args.output_dir, 'checkpoint_{}'.format(name))\n save(model.state_dict(), args.output_dir, 'model_{}'.format(name))\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def run_validation(engine):\n pbar.log_message(\"Start Validation - Epoch: [{}/{}]\".format(engine.state.epoch, engine.state.max_epochs))\n evaluator.run(val_loader)\n metrics = evaluator.state.metrics\n loss = metrics['loss']\n iou = metrics['IoU'] * 100.0\n mean_iou = iou.mean()\n\n iou_text = ', '.join(['{}: {:.1f}'.format(KITTI.classes[i + 1].name, v) for i, v in enumerate(iou.tolist())])\n pbar.log_message(\"Validation results - Epoch: [{}/{}]: Loss: {:.2e}\\n IoU: {}\\n mIoU: {:.1f}\"\n .format(engine.state.epoch, engine.state.max_epochs, loss, iou_text, mean_iou))\n\n @trainer.on(Events.EXCEPTION_RAISED)\n def handle_exception(engine, e):\n if isinstance(e, KeyboardInterrupt) and (engine.state.iteration > 1):\n engine.terminate()\n warnings.warn(\"KeyboardInterrupt caught. Exiting gracefully.\")\n\n name = 'epoch{}_exception.pth'.format(trainer.state.epoch)\n file = {'model': model.state_dict(), 'epoch': trainer.state.epoch, 'optimizer': optimizer.state_dict(),\n 'args': args}\n\n save(file, args.output_dir, 'checkpoint_{}'.format(name))\n save(model.state_dict(), args.output_dir, 'model_{}'.format(name))\n else:\n raise e\n\n if args.eval_on_start:\n print(\"Start validation\")\n evaluator.run(val_loader, max_epochs=1)\n\n print(\"Start training\")\n trainer.run(train_loader, max_epochs=args.epochs)\n tb_logger.close()\n\n\nif __name__ == '__main__':\n parser = ArgumentParser('LiLaNet with PyTorch')\n parser.add_argument('--batch-size', type=int, default=10,\n help='input batch size for training')\n parser.add_argument('--val-batch-size', type=int, default=10,\n help='input batch size for validation')\n parser.add_argument('--num-workers', type=int, default=4,\n help='number of workers')\n parser.add_argument('--epochs', type=int, default=200,\n help='number of epochs to train')\n parser.add_argument('--lr', type=float, default=1e-3,\n help='learning rate')\n parser.add_argument('--seed', type=int, default=123,\n help='manual seed')\n parser.add_argument('--output-dir', default='checkpoints',\n help='directory to save model checkpoints')\n parser.add_argument('--resume', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\n parser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\n parser.add_argument('--log-interval', type=int, default=10,\n help='how many batches to wait before logging training status')\n parser.add_argument(\"--log-dir\", type=str, default=\"logs\",\n help=\"log directory for Tensorboard log output\")\n parser.add_argument(\"--dataset-dir\", type=str, default=\"data/kitti\",\n help=\"location of the dataset\")\n parser.add_argument(\"--eval-on-start\", type=bool, default=False,\n help=\"evaluate before training\")\n parser.add_argument('--grad-accum', type=int, default=1,\n help='grad accumulation')\n\n run(parser.parse_args())\n" ]
[ [ "torch.no_grad", "torch.cuda.device_count", "torch.manual_seed", "torch.cuda.is_available", "torch.load", "torch.nn.DataParallel" ] ]
jichilen/frcon
[ "f8646d39eae2fc648c7359aed63e89c85fee81d6" ]
[ "lib/roi_data_layer/minibatch.py" ]
[ "# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick and Xinlei Chen\n# --------------------------------------------------------\n\n\"\"\"Compute minibatch blobs for training a Fast R-CNN network.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport numpy.random as npr\nimport cv2\nfrom model.config import cfg\nfrom utils.blob import prep_im_for_blob, im_list_to_blob\n\ndef get_minibatch(roidb, num_classes):\n \"\"\"Given a roidb, construct a minibatch sampled from it.\"\"\"\n num_images = len(roidb)\n # Sample random scales to use for each image in this batch\n random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),\n size=num_images)\n assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \\\n 'num_images ({}) must divide BATCH_SIZE ({})'. \\\n format(num_images, cfg.TRAIN.BATCH_SIZE)\n\n # Get the input image blob, formatted for caffe\n im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)\n\n blobs = {'data': im_blob}\n\n assert len(im_scales) == 1, \"Single batch only\"\n assert len(roidb) == 1, \"Single batch only\"\n \n # gt boxes: (x1, y1, x2, y2, cls)\n ingt_inds = np.where(roidb[0]['ingt_classes'] != 0)[0]\n ingt_boxes = np.empty((len(ingt_inds), 5), dtype=np.float32)\n ingt_boxes[:, 0] = roidb[0]['ingt_classes'][ingt_inds]\n ingt_boxes[:, 1:5] = roidb[0]['inboxes'][ingt_inds, :] * im_scales[0]\n blobs['ingt_boxes'] = ingt_boxes\n if cfg.TRAIN.USE_ALL_GT:#Ture\n # Include all ground truth boxes\n gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]\n else:\n # For the COCO ground truth boxes, exclude the ones that are ''iscrowd'' \n gt_inds = np.where(roidb[0]['gt_classes'] != 0 & np.all(roidb[0]['gt_overlaps'].toarray() > -1.0, axis=1))[0]\n gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)\n gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]\n gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]\n blobs['gt_boxes'] = gt_boxes\n blobs['im_info'] = np.array(\n [im_blob.shape[1], im_blob.shape[2], im_scales[0]],\n dtype=np.float32)\n\n return blobs\n\ndef _get_image_blob(roidb, scale_inds):\n \"\"\"Builds an input blob from the images in the roidb at the specified\n scales.\n \"\"\"\n num_images = len(roidb)\n processed_ims = []\n im_scales = []\n for i in range(num_images):\n im = cv2.imread(roidb[i]['image'])\n if roidb[i]['flipped']:\n im = im[:, ::-1, :]\n target_size = cfg.TRAIN.SCALES[scale_inds[i]]\n im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,\n cfg.TRAIN.MAX_SIZE)\n im_scales.append(im_scale)\n processed_ims.append(im)\n\n # Create a blob to hold the input images\n blob = im_list_to_blob(processed_ims)\n\n return blob, im_scales\n" ]
[ [ "numpy.where", "numpy.array" ] ]
marcodemutti/SofAr-project
[ "6e807072f8567490cf5b4ebcf81d37ab5e8a4109" ]
[ "Math/math_pkg/scripts/J_computations.py" ]
[ "import numpy as np\n\ndef geometric_vectors(T_abs):\n \"\"\"!\n Computes the vectors needed to compute geometric jacobian.\n @param Tabs: the transformation matrices from joint to 0 frame in current configuration\n @return geom_v: geometric vectors exctracted from Tabs that allow to compute the jacobian.\n \"\"\"\n r = []\n k = []\n geom_v = []\n\n n_matrices = len(T_abs)\n\n for i in range(n_matrices-1):\n tmp_k = np.array([[T_abs[i][0][2], T_abs[i][1][2], T_abs[i][2][2]]])\n tmp_k = np.transpose(tmp_k)\n k.append(tmp_k)\n\n tmp_r = np.array([[T_abs[n_matrices-1][0][3] - T_abs[i][0][3], T_abs[n_matrices-1][1][3] - T_abs[i][1][3], T_abs[n_matrices-1][2][3] - T_abs[i][2][3]]])\n tmp_r = np.transpose(tmp_r)\n r.append(tmp_r)\n\n geom_v.append(k)\n geom_v.append(r)\n\n return geom_v\n\ndef jacob(k, r, n_joints, info):\n \"\"\"!\n Computes the jacobian matrix given the geometric vectors, number of joints and info.\n @param k: versors of axis z of the joints projected on 0.\n @param r: distance between joints and e.e. projected on 0.\n @param n_joints: explains it self.\n @param info: 1->revolute, 0->prismatic. In case there is a change in the serial chain the algorithm still works.\n @return J: jacobian matrix.\n \"\"\"\n \n Ja = np.array([[],\n [],\n []])\n Jl = np.array([[],\n [],\n []])\n\n for i in range(n_joints):\n if info[i] == 1:\n Ja = np.concatenate((Ja, k[i]), axis = 1)\n kx = k[i][0][0]\n ky = k[i][1][0]\n kz = k[i][2][0]\n k_skew = np.array([[0, -kz, ky],\n [kz, 0, -kx],\n [-ky, kx, 0]])\n l_column = np.dot(k_skew, r[i])\n Jl = np.concatenate((Jl, l_column), axis = 1)\n\n## else:\n## zero = np.array([[0],\n## [0],\n## [0]])\n## Ja = np.concatenate((Ja, zero), axis = 1)\n## Jl = np.concatenate((Jl, k[i]), axis = 1)\n J = np.concatenate((Jl, Ja), axis = 0)\n\n return J\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.dot", "numpy.transpose" ] ]
sbelenki/fastMRI
[ "9a359ffe340e9265491744e381d92241b36a6455", "9a359ffe340e9265491744e381d92241b36a6455" ]
[ "banding_removal/fastmri/model/classifiers/resnet_r1_simple.py", "banding_removal/fastmri/transforms/kspace.py" ]
[ "\"\"\"\nCopyright (c) Facebook, Inc. and its affiliates.\n\nThis source code is licensed under the MIT license found in the\nLICENSE file in the root directory of this source tree.\n\"\"\"\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.autograd import Variable\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport numpy as np\nimport pdb\n\nkernel_size = 3\n\nclass SimpleDiscriminator(nn.Module):\n \"\"\"\n Known to work well as a GAN discriminator\n \n \"\"\"\n def __init__(self, num_classes=1, args=None):\n super().__init__()\n nf = self.nf = 128\n\n # Submodules\n nlayers = 0\n self.nf0 = nf\n\n blocks = [\n ResnetBlock(nf, nf),\n ResnetBlock(nf, nf),\n ]\n\n # Initial up-channeling conv\n self.conv_img = nn.Conv2d(3, 1*nf, kernel_size=kernel_size, padding=kernel_size//2)\n\n self.resnet = nn.Sequential(*blocks)\n\n # Final stage is standard avg-pool followed by linear\n self.pool = nn.AdaptiveAvgPool2d((1,1))\n self.fc = nn.Linear(self.nf0, num_classes)\n #self.norm = nn.GroupNorm(1, 1, affine=False, eps=0.0)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n if m.weight is not None:\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x):\n batch_size = x.size(0)\n out = x\n \n #out = self.norm(out)\n #pdb.set_trace()\n out = self.conv_img(out)\n out = self.resnet(out)\n out = self.pool(out)\n out = out.view(batch_size, self.nf0)\n out = self.fc(actvn(out))\n \n return out\n\n\nclass ResnetBlock(nn.Module):\n def __init__(self, fin, fout, fhidden=None):\n super().__init__()\n # Attributes\n self.learned_shortcut = (fin != fout)\n self.fin = fin\n self.fout = fout\n if fhidden is None:\n self.fhidden = min(fin, fout)\n else:\n self.fhidden = fhidden\n\n # Submodules\n self.norm_0 = nn.GroupNorm(self.fin//32, self.fin)\n\n self.conv_0 = nn.Conv2d(self.fin, self.fhidden, \n kernel_size, stride=1, padding=kernel_size//2, bias=False)\n\n self.norm_1 = nn.GroupNorm(self.fhidden//32, self.fhidden)\n\n self.conv_1 = nn.Conv2d(self.fhidden, self.fout, \n kernel_size, stride=1, padding=kernel_size//2, bias=False)\n\n if self.learned_shortcut:\n self.conv_s = nn.Conv2d(self.fin, self.fout, \n 1, stride=1, padding=0, bias=False)\n\n def forward(self, x):\n x_s = self._shortcut(x)\n dx = self.conv_0(actvn(self.norm_0(x)))\n dx = self.conv_1(actvn(self.norm_1(dx)))\n out = x_s + dx\n\n return out\n\n def _shortcut(self, x):\n if self.learned_shortcut:\n x_s = self.conv_s(x)\n else:\n x_s = x\n return x_s\n\n\ndef actvn(x):\n return F.relu(x)\n #return F.leaky_relu(x, 2e-1)\n", "\"\"\"\nCopyright (c) Facebook, Inc. and its affiliates.\n\nThis source code is licensed under the MIT license found in the\nLICENSE file in the root directory of this source tree.\n\"\"\"\n\nimport h5py\nimport torch\nfrom collections import OrderedDict\nfrom fastmri.data import transforms\n\nimport numpy as np\nimport random\nimport pdb\n\ndef est_sens_maps(kspace, start, end, apodize_hori=0.07):\n num_coils, height, width = kspace.shape\n mask = np.zeros(width, dtype=kspace.dtype)\n mask[start:end] = 1\n kspace = np.where(mask, kspace, 0)\n mask = np.exp(-(np.linspace(-1, 1, width) / apodize_hori) ** 2, dtype=kspace.dtype)\n kspace = kspace * mask\n sens_maps = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(kspace), norm='ortho'))\n sens_maps /= np.sqrt(np.sum(np.abs(sens_maps) ** 2, axis=0, keepdims=True))\n return sens_maps\n\n\nclass KSpaceDataTransform(object):\n def __init__(self, args, mask_func, partition, use_seed=True):\n self.args = args\n self.mask_func = mask_func\n self.partition = partition\n self.use_seed = use_seed\n\n def __call__(self, target_ksp, target_im, attrs, fname, slice):\n kspace_np = target_ksp\n target_im = transforms.to_tensor(target_im)\n target_ksp = transforms.to_tensor(target_ksp)\n\n if self.args.coil_compress_coils:\n target_ksp = transforms.coil_compress(target_ksp, self.args.coil_compress_coils)\n\n if self.args.calculate_offsets_directly:\n krow = kspace_np.sum(axis=(0,1)) # flatten to a single row\n width = len(krow)\n offset = (krow != 0).argmax()\n acq_start = offset\n acq_end = width - (krow[::-1] != 0).argmax() #exclusive\n else:\n offset = None # Mask will pick randomly\n if self.partition == 'val' and 'mask_offset' in attrs:\n offset = attrs['mask_offset']\n\n acq_start = attrs['padding_left']\n acq_end = attrs['padding_right']\n\n #pdb.set_trace()\n\n seed = None if not self.use_seed else tuple(map(ord, fname))\n input_ksp, mask, num_lf = transforms.apply_mask(\n target_ksp, self.mask_func, \n seed, offset,\n (acq_start, acq_end))\n\n #pdb.set_trace()\n\n sens_map = torch.Tensor(0)\n if self.args.compute_sensitivities:\n start_of_center_mask = (kspace_np.shape[-1] - num_lf + 1) // 2\n end_of_center_mask = start_of_center_mask + num_lf\n sens_map = est_sens_maps(kspace_np, start_of_center_mask, end_of_center_mask)\n sens_map = transforms.to_tensor(sens_map)\n\n if self.args.grappa_input:\n with h5py.File(self.args.grappa_input_path / self.partition / fname, 'r') as hf:\n kernel = transforms.to_tensor(hf['kernel'][slice])\n input_ksp = transforms.apply_grappa(input_ksp, kernel, target_ksp, mask)\n\n grappa_kernel = torch.Tensor(0)\n if self.args.grappa_path is not None:\n with h5py.File(self.args.grappa_path / self.partition / fname, 'r') as hf:\n grappa_kernel = transforms.to_tensor(hf['kernel'][slice])\n\n if self.args.grappa_target:\n with h5py.File(self.args.grappa_target_path / self.partition / fname, 'r') as hf:\n kernel = transforms.to_tensor(hf['kernel'][slice])\n target_ksp = transforms.apply_grappa(target_ksp.clone(), kernel, target_ksp, mask, sample_accel=2)\n target_im = transforms.root_sum_of_squares(transforms.complex_abs(transforms.ifft2(target_ksp)))\n\n input_im = transforms.ifft2(input_ksp)\n if not self.args.scale_inputs:\n scale = torch.Tensor([1.])\n else:\n abs_input = transforms.complex_abs(input_im)\n if self.args.scale_type == 'max':\n scale = torch.max(abs_input)\n else:\n scale = torch.mean(abs_input)\n\n input_ksp /= scale\n target_ksp /= scale\n target_im /= scale\n\n scale = scale.view([1, 1, 1])\n attrs_dict = dict(**attrs)\n\n return OrderedDict(\n input = input_ksp,\n target = target_ksp,\n target_im = target_im,\n mask = mask,\n grappa_kernel = grappa_kernel,\n scale = scale,\n attrs_dict = attrs_dict,\n fname = fname,\n slice = slice,\n num_lf = num_lf,\n sens_map = sens_map,\n )\n" ]
[ [ "torch.nn.Linear", "torch.nn.AdaptiveAvgPool2d", "torch.nn.init.constant_", "torch.nn.Sequential", "torch.nn.init.kaiming_normal_", "torch.nn.GroupNorm", "torch.nn.Conv2d", "torch.nn.functional.relu" ], [ "numpy.zeros", "torch.max", "numpy.fft.ifftshift", "numpy.where", "numpy.abs", "numpy.linspace", "torch.Tensor", "torch.mean" ] ]
mlazzarin/qibo
[ "e82bc3e27c5182be7b6f0b23bd20bc1057e31701", "e82bc3e27c5182be7b6f0b23bd20bc1057e31701" ]
[ "src/qibo/tests/test_core_callbacks.py", "src/qibo/tests/test_core_hamiltonians_trotter.py" ]
[ "\"\"\"Test methods defined in `qibo/core/callbacks.py`.\"\"\"\nimport pytest\nimport numpy as np\nfrom qibo.models import Circuit, AdiabaticEvolution\nfrom qibo import gates, callbacks, K\nfrom qibo.config import EIGVAL_CUTOFF\n\n\n# Absolute testing tolerance for the cases of zero entanglement entropy\n_atol = 1e-8\n\n\ndef test_getitem_bad_indexing(backend):\n entropy = callbacks.EntanglementEntropy([0])\n c = Circuit(2)\n c.add(gates.RY(0, 0.1234))\n c.add(gates.CNOT(0, 1))\n c.add(gates.CallbackGate(entropy))\n final_state = c()\n entropy[0]\n with pytest.raises(IndexError):\n entropy[1]\n with pytest.raises(IndexError):\n entropy[\"a\"]\n\n\ndef test_entropy_product_state(backend):\n \"\"\"Check that the |++> state has zero entropy.\"\"\"\n entropy = callbacks.EntanglementEntropy()\n state = K.ones(4) / 2.0\n\n result = entropy(state)\n K.assert_allclose(result, 0, atol=_atol)\n\n\ndef test_entropy_singlet_state(backend):\n \"\"\"Check that the singlet state has maximum entropy.\"\"\"\n from qibo import K\n entropy = callbacks.EntanglementEntropy([0])\n state = np.zeros(4)\n state[0], state[-1] = 1, 1\n state = K.cast(state / np.sqrt(2))\n result = entropy(state)\n K.assert_allclose(result, 1.0)\n\n\ndef test_entropy_bad_state_type(backend):\n entropy = callbacks.EntanglementEntropy([0])\n with pytest.raises(TypeError):\n _ = entropy(\"test\")\n\n\ndef test_entropy_random_state(backend):\n \"\"\"Check that entropy calculation agrees with numpy.\"\"\"\n # Generate a random positive and hermitian density matrix\n rho = np.random.random((8, 8)) + 1j * np.random.random((8, 8))\n rho = rho + rho.conj().T\n _, u = np.linalg.eigh(rho)\n s = 5 * np.random.random(8)\n s = s / s.sum()\n rho = u.dot(np.diag(s)).dot(u.conj().T)\n\n callback = callbacks.EntanglementEntropy(compute_spectrum=True)\n result = callback.entropy(K.cast(rho))\n target = - (s * np.log2(s)).sum()\n K.assert_allclose(result, target)\n\n ref_eigvals = np.linalg.eigvalsh(rho)\n masked_eigvals = ref_eigvals[np.where(ref_eigvals > EIGVAL_CUTOFF)]\n ref_spectrum = - np.log(masked_eigvals)\n K.assert_allclose(callback.spectrum[0], ref_spectrum)\n\n\ndef test_entropy_switch_partition(backend):\n \"\"\"Check that partition is switched to the largest counterpart.\"\"\"\n entropy = callbacks.EntanglementEntropy([0])\n # Prepare ghz state of 5 qubits\n state = np.zeros(2 ** 5)\n state[0], state[-1] = 1, 1\n state = state / np.sqrt(2)\n\n result = entropy(K.cast(state))\n K.assert_allclose(result, 1.0)\n\n\ndef test_entropy_numerical(backend):\n \"\"\"Check that entropy calculation does not fail for tiny eigenvalues.\"\"\"\n from qibo import K\n eigvals = np.array([-1e-10, -1e-15, -2e-17, -1e-18, -5e-60, 1e-48, 4e-32,\n 5e-14, 1e-14, 9.9e-13, 9e-13, 5e-13, 1e-13, 1e-12,\n 1e-11, 1e-10, 1e-9, 1e-7, 1, 4, 10])\n rho = K.cast(np.diag(eigvals))\n callback = callbacks.EntanglementEntropy()\n result = callback.entropy(rho)\n\n mask = eigvals > 0\n target = - (eigvals[mask] * np.log2(eigvals[mask])).sum()\n K.assert_allclose(result, target)\n\n\n@pytest.mark.parametrize(\"density_matrix\", [False, True])\ndef test_entropy_in_circuit(backend, density_matrix):\n \"\"\"Check that entropy calculation works in circuit.\"\"\"\n entropy = callbacks.EntanglementEntropy([0], compute_spectrum=True)\n c = Circuit(2, density_matrix=density_matrix)\n c.add(gates.CallbackGate(entropy))\n c.add(gates.H(0))\n c.add(gates.CallbackGate(entropy))\n c.add(gates.CNOT(0, 1))\n c.add(gates.CallbackGate(entropy))\n state = c()\n\n target = [0, 0, 1.0]\n K.assert_allclose(entropy[:], target, atol=_atol)\n\n target_spectrum = [0, 0, np.log(2), np.log(2)]\n entropy_spectrum = np.concatenate(entropy.spectrum).ravel().tolist()\n K.assert_allclose(entropy_spectrum, target_spectrum, atol=_atol)\n\n\n@pytest.mark.parametrize(\"gateconf,target_entropy\",\n [([\"H\", \"CNOT\", \"entropy\"], [1.0]),\n ([\"H\", \"entropy\", \"CNOT\"], [0.0]),\n ([\"entropy\", \"H\", \"CNOT\"], [0.0]),\n ([\"entropy\", \"H\", \"CNOT\", \"entropy\"], [0.0, 1.0]),\n ([\"H\", \"entropy\", \"CNOT\", \"entropy\"], [0.0, 1.0]),\n ([\"entropy\", \"H\", \"entropy\", \"CNOT\"], [0.0, 0.0])])\ndef test_entropy_in_distributed_circuit(backend, accelerators, gateconf, target_entropy):\n \"\"\"Check that various entropy configurations work in distributed circuit.\"\"\"\n target_c = Circuit(4)\n target_c.add([gates.H(0), gates.CNOT(0, 1)])\n target_state = target_c()\n\n entropy = callbacks.EntanglementEntropy([0])\n c = Circuit(4, accelerators)\n for gate in gateconf:\n if gate == \"H\":\n c.add(gates.H(0))\n elif gate == \"CNOT\":\n c.add(gates.CNOT(0, 1))\n elif gate == \"entropy\":\n c.add(gates.CallbackGate(entropy))\n final_state = c()\n K.assert_allclose(final_state, target_state)\n K.assert_allclose(entropy[:], target_entropy, atol=_atol)\n\n\ndef test_entropy_in_compiled_circuit(backend):\n \"\"\"Check that entropy calculation works when circuit is compiled.\"\"\"\n from qibo import get_backend\n entropy = callbacks.EntanglementEntropy([0])\n c = Circuit(2)\n c.add(gates.CallbackGate(entropy))\n c.add(gates.H(0))\n c.add(gates.CallbackGate(entropy))\n c.add(gates.CNOT(0, 1))\n c.add(gates.CallbackGate(entropy))\n c.compile()\n final_state = c()\n K.assert_allclose(entropy[:], [0, 0, 1.0], atol=_atol)\n\n\ndef test_entropy_multiple_executions(backend, accelerators):\n \"\"\"Check entropy calculation when the callback is used in multiple executions.\"\"\"\n target_c = Circuit(4)\n target_c.add([gates.RY(0, 0.1234), gates.CNOT(0, 1)])\n target_state = target_c()\n\n entropy = callbacks.EntanglementEntropy([0])\n c = Circuit(4, accelerators)\n c.add(gates.RY(0, 0.1234))\n c.add(gates.CallbackGate(entropy))\n c.add(gates.CNOT(0, 1))\n c.add(gates.CallbackGate(entropy))\n state = c()\n K.assert_allclose(state, target_state)\n\n target_c = Circuit(4)\n target_c.add([gates.RY(0, 0.4321), gates.CNOT(0, 1)])\n target_state = target_c()\n\n c = Circuit(4, accelerators)\n c.add(gates.RY(0, 0.4321))\n c.add(gates.CallbackGate(entropy))\n c.add(gates.CNOT(0, 1))\n c.add(gates.CallbackGate(entropy))\n state = c()\n K.assert_allclose(state, target_state)\n\n def target_entropy(t):\n cos = np.cos(t / 2.0) ** 2\n sin = np.sin(t / 2.0) ** 2\n return - cos * np.log2(cos) - sin * np.log2(sin)\n\n target = [0, target_entropy(0.1234), 0, target_entropy(0.4321)]\n K.assert_allclose(entropy[:], target, atol=_atol)\n\n c = Circuit(8, accelerators)\n with pytest.raises(RuntimeError):\n c.add(gates.CallbackGate(entropy))\n state = c()\n\n\ndef test_entropy_large_circuit(backend, accelerators):\n \"\"\"Check that entropy calculation works for variational like circuit.\"\"\"\n thetas = np.pi * np.random.random((3, 8))\n target_entropy = callbacks.EntanglementEntropy([0, 2, 4, 5])\n c1 = Circuit(8)\n c1.add((gates.RY(i, thetas[0, i]) for i in range(8)))\n c1.add((gates.CZ(i, i + 1) for i in range(0, 7, 2)))\n state1 = c1()\n e1 = K.to_numpy(target_entropy(state1))\n\n c2 = Circuit(8)\n c2.add((gates.RY(i, thetas[1, i]) for i in range(8)))\n c2.add((gates.CZ(i, i + 1) for i in range(1, 7, 2)))\n c2.add(gates.CZ(0, 7))\n state2 = (c1 + c2)()\n e2 = K.to_numpy(target_entropy(state2))\n\n c3 = Circuit(8)\n c3.add((gates.RY(i, thetas[2, i]) for i in range(8)))\n c3.add((gates.CZ(i, i + 1) for i in range(0, 7, 2)))\n state3 = (c1 + c2 + c3)()\n e3 = K.to_numpy(target_entropy(state3))\n\n entropy = callbacks.EntanglementEntropy([0, 2, 4, 5])\n c = Circuit(8, accelerators)\n c.add(gates.CallbackGate(entropy))\n c.add((gates.RY(i, thetas[0, i]) for i in range(8)))\n c.add((gates.CZ(i, i + 1) for i in range(0, 7, 2)))\n c.add(gates.CallbackGate(entropy))\n c.add((gates.RY(i, thetas[1, i]) for i in range(8)))\n c.add((gates.CZ(i, i + 1) for i in range(1, 7, 2)))\n c.add(gates.CZ(0, 7))\n c.add(gates.CallbackGate(entropy))\n c.add((gates.RY(i, thetas[2, i]) for i in range(8)))\n c.add((gates.CZ(i, i + 1) for i in range(0, 7, 2)))\n c.add(gates.CallbackGate(entropy))\n state = c()\n\n K.assert_allclose(state3, state)\n K.assert_allclose(entropy[:], [0, e1, e2, e3])\n\n\ndef test_entropy_density_matrix(backend):\n from qibo.tests.utils import random_density_matrix\n rho = random_density_matrix(4)\n # this rho is not always positive. Make rho positive for this application\n _, u = np.linalg.eigh(rho)\n rho = u.dot(np.diag(5 * np.random.random(u.shape[0]))).dot(u.conj().T)\n # this is a positive rho\n\n entropy = callbacks.EntanglementEntropy([1, 3])\n entropy.density_matrix = True\n final_ent = entropy(rho)\n\n rho = rho.reshape(8 * (2,))\n reduced_rho = np.einsum(\"abcdafch->bdfh\", rho).reshape((4, 4))\n eigvals = np.linalg.eigvalsh(reduced_rho).real\n # assert that all eigenvalues are non-negative\n assert (eigvals >= 0).prod()\n mask = eigvals > 0\n target_ent = - (eigvals[mask] * np.log2(eigvals[mask])).sum()\n K.assert_allclose(final_ent, target_ent)\n\n\n@pytest.mark.parametrize(\"density_matrix\", [False, True])\ndef test_norm(backend, density_matrix):\n norm = callbacks.Norm()\n if density_matrix:\n norm.density_matrix = True\n state = np.random.random((2, 2)) + 1j * np.random.random((2, 2))\n target_norm = np.trace(state)\n else:\n state = np.random.random(4) + 1j * np.random.random(4)\n target_norm = np.sqrt((np.abs(state) ** 2).sum())\n\n K.assert_allclose(norm(K.cast(state)), target_norm)\n\n\n@pytest.mark.parametrize(\"density_matrix\", [False, True])\ndef test_overlap(backend, density_matrix):\n state0 = np.random.random(4) + 1j * np.random.random(4)\n state1 = np.random.random(4) + 1j * np.random.random(4)\n overlap = callbacks.Overlap(state0)\n if density_matrix:\n overlap.density_matrix = True\n with pytest.raises(NotImplementedError):\n overlap(state1)\n else:\n target_overlap = np.abs((state0.conj() * state1).sum())\n K.assert_allclose(overlap(K.cast(state1)), target_overlap)\n\n\n@pytest.mark.parametrize(\"density_matrix\", [False, True])\ndef test_energy(backend, density_matrix):\n from qibo import hamiltonians\n ham = hamiltonians.TFIM(4, h=1.0)\n energy = callbacks.Energy(ham)\n matrix = K.to_numpy(ham.matrix)\n if density_matrix:\n energy.density_matrix = True\n state = np.random.random((16, 16)) + 1j * np.random.random((16, 16))\n target_energy = np.trace(matrix.dot(state))\n else:\n state = np.random.random(16) + 1j * np.random.random(16)\n target_energy = state.conj().dot(matrix.dot(state))\n K.assert_allclose(energy(K.cast(state)), target_energy)\n\n\n@pytest.mark.parametrize(\"dense\", [False, True])\n@pytest.mark.parametrize(\"check_degenerate\", [False, True])\ndef test_gap(backend, dense, check_degenerate):\n from qibo import hamiltonians\n h0 = hamiltonians.X(4, dense=dense)\n if check_degenerate:\n # use h=0 to make this Hamiltonian degenerate\n h1 = hamiltonians.TFIM(4, h=0, dense=dense)\n else:\n h1 = hamiltonians.TFIM(4, h=1, dense=dense)\n\n ham = lambda t: (1 - t) * h0.matrix + t * h1.matrix\n targets = {\"ground\": [], \"excited\": [], \"gap\": []}\n for t in np.linspace(0, 1, 11):\n eigvals = np.linalg.eigvalsh(ham(t)).real\n targets[\"ground\"].append(eigvals[0])\n targets[\"excited\"].append(eigvals[1])\n targets[\"gap\"].append(eigvals[1] - eigvals[0])\n if check_degenerate:\n targets[\"gap\"][-1] = eigvals[3] - eigvals[0]\n\n gap = callbacks.Gap(check_degenerate=check_degenerate)\n ground = callbacks.Gap(0)\n excited = callbacks.Gap(1)\n evolution = AdiabaticEvolution(h0, h1, lambda t: t, dt=1e-1,\n callbacks=[gap, ground, excited])\n final_state = evolution(final_time=1.0)\n targets = {k: K.stack(v) for k, v in targets.items()}\n K.assert_allclose(ground[:], targets[\"ground\"])\n K.assert_allclose(excited[:], targets[\"excited\"])\n K.assert_allclose(gap[:], targets[\"gap\"])\n\n\ndef test_gap_errors():\n \"\"\"Check errors in gap callback instantiation.\"\"\"\n # invalid string ``mode``\n with pytest.raises(ValueError):\n gap = callbacks.Gap(\"test\")\n # invalid ``mode`` type\n with pytest.raises(TypeError):\n gap = callbacks.Gap([])\n\n gap = callbacks.Gap()\n # invalid evolution model type\n with pytest.raises(TypeError):\n gap.evolution = \"test\"\n # call before setting evolution model\n with pytest.raises(ValueError):\n gap(np.ones(4))\n # not implemented for density matrices\n gap.density_matrix = True\n with pytest.raises(NotImplementedError):\n gap(np.zeros(8))\n # for coverage\n _ = gap.density_matrix\n gap.density_matrix = False\n", "\"\"\"Test Trotter Hamiltonian methods from `qibo/core/hamiltonians.py`.\"\"\"\nimport pytest\nimport numpy as np\nimport qibo\nfrom qibo import hamiltonians, K\nfrom qibo.tests.utils import random_state, random_complex, random_hermitian\n\n\n@pytest.mark.parametrize(\"nqubits\", [3, 4])\n@pytest.mark.parametrize(\"model\", [\"TFIM\", \"XXZ\", \"Y\", \"MaxCut\"])\ndef test_trotter_hamiltonian_to_dense(backend, nqubits, model):\n \"\"\"Test that Trotter Hamiltonian dense form agrees with normal Hamiltonian.\"\"\"\n local_ham = getattr(hamiltonians, model)(nqubits, dense=False)\n target_ham = getattr(hamiltonians, model)(nqubits)\n final_ham = local_ham.dense\n K.assert_allclose(final_ham.matrix, target_ham.matrix, atol=1e-15)\n\n\ndef test_trotter_hamiltonian_scalar_mul(nqubits=3):\n \"\"\"Test multiplication of Trotter Hamiltonian with scalar.\"\"\"\n local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False)\n target_ham = 2 * hamiltonians.TFIM(nqubits, h=1.0)\n local_dense = (2 * local_ham).dense\n K.assert_allclose(local_dense.matrix, target_ham.matrix)\n\n local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False)\n local_dense = (local_ham * 2).dense\n K.assert_allclose(local_dense.matrix, target_ham.matrix)\n\n\ndef test_trotter_hamiltonian_scalar_add(nqubits=4):\n \"\"\"Test addition of Trotter Hamiltonian with scalar.\"\"\"\n local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False)\n target_ham = 2 + hamiltonians.TFIM(nqubits, h=1.0)\n local_dense = (2 + local_ham).dense\n K.assert_allclose(local_dense.matrix, target_ham.matrix)\n\n local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False)\n local_dense = (local_ham + 2).dense\n K.assert_allclose(local_dense.matrix, target_ham.matrix)\n\n\ndef test_trotter_hamiltonian_scalar_sub(nqubits=3):\n \"\"\"Test subtraction of Trotter Hamiltonian with scalar.\"\"\"\n local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False)\n target_ham = 2 - hamiltonians.TFIM(nqubits, h=1.0)\n local_dense = (2 - local_ham).dense\n K.assert_allclose(local_dense.matrix, target_ham.matrix)\n\n target_ham = hamiltonians.TFIM(nqubits, h=1.0) - 2\n local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False)\n local_dense = (local_ham - 2).dense\n K.assert_allclose(local_dense.matrix, target_ham.matrix)\n\n\ndef test_trotter_hamiltonian_operator_add_and_sub(nqubits=3):\n \"\"\"Test addition and subtraction between Trotter Hamiltonians.\"\"\"\n local_ham1 = hamiltonians.TFIM(nqubits, h=1.0, dense=False)\n local_ham2 = hamiltonians.TFIM(nqubits, h=0.5, dense=False)\n\n local_ham = local_ham1 + local_ham2\n target_ham = (hamiltonians.TFIM(nqubits, h=1.0) +\n hamiltonians.TFIM(nqubits, h=0.5))\n dense = local_ham.dense\n K.assert_allclose(dense.matrix, target_ham.matrix)\n\n local_ham = local_ham1 - local_ham2\n target_ham = (hamiltonians.TFIM(nqubits, h=1.0) -\n hamiltonians.TFIM(nqubits, h=0.5))\n dense = local_ham.dense\n K.assert_allclose(dense.matrix, target_ham.matrix)\n\n\n@pytest.mark.parametrize(\"nqubits,normalize\", [(3, False), (4, False)])\ndef test_trotter_hamiltonian_matmul(nqubits, normalize):\n \"\"\"Test Trotter Hamiltonian expectation value.\"\"\"\n local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False)\n dense_ham = hamiltonians.TFIM(nqubits, h=1.0)\n\n state = K.cast(random_complex((2 ** nqubits,)))\n trotter_ev = local_ham.expectation(state, normalize)\n target_ev = dense_ham.expectation(state, normalize)\n K.assert_allclose(trotter_ev, target_ev)\n\n state = random_complex((2 ** nqubits,))\n trotter_ev = local_ham.expectation(state, normalize)\n target_ev = dense_ham.expectation(state, normalize)\n K.assert_allclose(trotter_ev, target_ev)\n\n from qibo.core.states import VectorState\n state = VectorState.from_tensor(state)\n trotter_matmul = local_ham @ state\n target_matmul = dense_ham @ state\n K.assert_allclose(trotter_matmul, target_matmul)\n\n\ndef test_trotter_hamiltonian_three_qubit_term(backend):\n \"\"\"Test creating ``TrotterHamiltonian`` with three qubit term.\"\"\"\n from scipy.linalg import expm\n from qibo.core.terms import HamiltonianTerm\n m1 = random_hermitian(3)\n m2 = random_hermitian(2)\n m3 = random_hermitian(1)\n\n terms = [HamiltonianTerm(m1, 0, 1, 2), HamiltonianTerm(m2, 2, 3),\n HamiltonianTerm(m3, 1)]\n ham = hamiltonians.SymbolicHamiltonian()\n ham.terms = terms\n\n # Test that the `TrotterHamiltonian` dense matrix is correct\n eye = np.eye(2, dtype=m1.dtype)\n mm1 = np.kron(m1, eye)\n mm2 = np.kron(np.kron(eye, eye), m2)\n mm3 = np.kron(np.kron(eye, m3), np.kron(eye, eye))\n target_ham = hamiltonians.Hamiltonian(4, mm1 + mm2 + mm3)\n K.assert_allclose(ham.matrix, target_ham.matrix)\n\n dt = 1e-2\n initial_state = random_state(4)\n if K.op is not None:\n with pytest.raises(NotImplementedError):\n circuit = ham.circuit(dt=dt)\n else:\n circuit = ham.circuit(dt=dt)\n final_state = circuit(np.copy(initial_state))\n u = [expm(-0.5j * dt * (mm1 + mm3)), expm(-0.5j * dt * mm2)]\n target_state = u[1].dot(u[0].dot(initial_state))\n target_state = u[0].dot(u[1].dot(target_state))\n K.assert_allclose(final_state, target_state)\n\n\ndef test_old_trotter_hamiltonian_errors():\n \"\"\"Check errors when creating the deprecated ``TrotterHamiltonian`` object.\"\"\"\n with pytest.raises(NotImplementedError):\n h = hamiltonians.TrotterHamiltonian()\n with pytest.raises(NotImplementedError):\n h = hamiltonians.TrotterHamiltonian.from_symbolic(0, 1)\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.sin", "numpy.trace", "numpy.zeros", "numpy.log", "numpy.linalg.eigh", "numpy.ones", "numpy.where", "numpy.linalg.eigvalsh", "numpy.einsum", "numpy.sqrt", "numpy.cos", "numpy.abs", "numpy.random.random", "numpy.linspace", "numpy.diag", "numpy.log2" ], [ "scipy.linalg.expm", "numpy.copy", "numpy.kron", "numpy.eye" ] ]
marcia-marques/metroclima-crds
[ "eec46982a0a83a211ee8bace333fb1cb1e691d3b" ]
[ "src/pycrds/datafile.py" ]
[ "import os\nimport glob\nimport pandas as pd\nimport dask.dataframe as dd\nimport pathlib\nfrom datetime import datetime, timedelta\n\n\ndef read_data(dir_name, usecols, dtype, date_range=None):\n \"\"\"\n Return a dataframe with concatenated data.\n Set timestamp as index.\n\n Parameters:\n dir_name (str): directory name\n usecols (list-like): selected columns\n dtype (dict): data type for columns\n date_range (list of str): list with initial and final date 'yyyy/mm/dd'\n \"\"\"\n\n filenames = [filename for filename in glob.iglob(dir_name, recursive=True)]\n filenames.sort()\n if date_range:\n idx0 = filenames.index([x for x in filenames if date_range[0] in x][0])\n if idx0 != 0:\n idx0 -= 1\n idx1 = filenames.index([x for x in filenames if date_range[-1] in x][-1]) + 1\n filenames = filenames[idx0:idx1]\n df = dd.read_csv(filenames,\n sep=r'\\s+',\n usecols=usecols,\n dtype=dtype)\n df = df.compute()\n df['DATE_TIME'] = pd.to_datetime(df['DATE'] + ' ' + df['TIME'])\n df = df.set_index('DATE_TIME')\n df = df.drop(['DATE', 'TIME'], axis=1)\n\n if date_range:\n return df.loc[(df.index >= date_range[0]) &\n (df.index < datetime.strptime(date_range[-1], \"%Y/%m/%d\") + timedelta(days=1))]\n else:\n return df\n\n\ndef save_24h(df, path, file_id, level):\n \"\"\"\n Save 24-hour files\n\n Parameters:\n df (pandas DataFrame): dataframe\n path (str): path to save output files\n file_id (str): analyzer serial number\n level (str): data processing level\n \"\"\"\n for day in df.index.dayofyear.unique():\n df_24h = df[(df.index.dayofyear == day)]\n year = str(df_24h.index[0].strftime('%Y'))\n month = str(df_24h.index[0].strftime('%m'))\n full_path = path + '/' + year + '/' + month\n pathlib.Path(full_path).mkdir(parents=True, exist_ok=True)\n file_name = full_path + \\\n '/' + file_id + '-' + \\\n df_24h.index[0].strftime('%Y%m%d') + \\\n 'Z-DataLog_User_' + level + '.csv'\n df_24h.to_csv(file_name)\n\n\ndef resample_data(df, t, my_cols):\n \"\"\"\n Returns a dataframe with resampled data [mean, std, count].\n\n Parameters:\n df (pandas DataFrame): dataframe\n t ('T', 'H', 'D') : minute, hour or day\n my_cols (list-like): selected columns\n \"\"\"\n df_mean = df[my_cols].resample(t).mean()\n df_std = df[my_cols].resample(t).std()\n df_count = df[my_cols].resample(t).count()\n return df_mean.join(df_std, rsuffix='_std').join(df_count, rsuffix='_count')\n\n\ndef gantt_data(path, var, pos):\n \"\"\"\n Returns a dataframe with data availability info.\n\n Parameters:\n path (str): file name\n var (str): selected variable\n pos (int): position in the graph (from bottom to top)\n \"\"\"\n df = pd.read_csv(path)\n df = df.set_index('DATE_TIME')\n df.index = pd.to_datetime(df.index)\n df['avail'] = df[var].isnull() # look for null values\n df['avail'] = df['avail'].map({False: pos}) # poputlate with graph position\n return df\n" ]
[ [ "pandas.to_datetime", "pandas.read_csv" ] ]
gyfastas/CS7319E1G16
[ "03126af04766abcb269d0c8db481c96c856d21ef" ]
[ "datasets/debug_dataset.py" ]
[ "import torch\n\nfrom .casia_dataset import CasiaDataset\n\n\nclass DebugDataset(CasiaDataset):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._debug_image = torch.rand(3, 120, 120)\n self._debug_label = torch.tensor(1)\n\n def __getitem__(self, idx):\n return {\n \"data\": {str(i):self._debug_image for i in range(len(self.scale_size)+1)},\n \"label\": self._debug_label,\n 'image_path': \"fuck\"\n }\n\n def __len__(self):\n return 200" ]
[ [ "torch.rand", "torch.tensor" ] ]
yotamitai/Highway_Disagreements
[ "18dccaa67b238700691c0f89b9fc2dfc2dab6751" ]
[ "highway_disagreements/configs/reward_functions.py" ]
[ "import math\n\nimport numpy as np\n\nfrom highway_env.envs import HighwayEnv, Action\nfrom gym.envs.registration import register\n\nfrom highway_env.utils import lmap\nfrom highway_env.vehicle.controller import ControlledVehicle\n\n\nclass ParallelDriver(HighwayEnv):\n \"\"\"rewarded for driving in parallel to a car\"\"\"\n\n def _reward(self, action: Action) -> float:\n obs = self.observation_type.observe()\n other_cars = obs[1:]\n # closest car in front that is not in same lane\n cars_x_dist = [car[1] for car in other_cars if car[1] > 0 and abs(car[2]) > 0.2]\n closest_car = lmap(cars_x_dist[0], [0, 0.3], [0, 1]) if cars_x_dist \\\n else 0\n\n # safety distance from car in same lane\n dist_closest_car_in_lane = [x[1] for x in other_cars if x[1] > 0 and abs(x[2]) <= 0.05]\n if not dist_closest_car_in_lane or dist_closest_car_in_lane[0] > 0.01:\n keeping_distance = 0\n else:\n keeping_distance = -1\n\n reward = \\\n + self.config[\"parallel_distance_reward\"] * (1 - np.clip(closest_car,0,1)) \\\n + self.config[\"collision_reward\"] * self.vehicle.crashed \\\n + self.config[\"keep_distance_reward\"] * keeping_distance\n reward = -1 if not self.vehicle.on_road else reward\n return reward\n\n\nregister(\n id='ParallelDriver-v0',\n entry_point='highway_disagreements.configs.reward_functions:ParallelDriver',\n)\n\n\nclass SocialDistance(HighwayEnv):\n \"\"\"rewarded for keeping as much distance from all cars\"\"\"\n\n def _reward(self, action: Action) -> float:\n other_cars = self.observation_type.observe()[1:]\n # distance from all cars\n dist = 0\n max_dist = len(other_cars) * math.sqrt(0.4 ** 2 + 0.75 ** 2) # max in x and y coords relative to agent\n for i, car in enumerate(other_cars):\n dist += math.sqrt(abs(car[1]) ** 2 + abs(car[2]) ** 2)\n scaled_dist = lmap(dist, [0, 4 * max_dist], [0, 1])\n\n # safety distance from car in same lane\n dist_closest_car_in_lane = [x[1] for x in other_cars if x[1] > 0 and abs(x[2]) <= 0.05]\n if not dist_closest_car_in_lane or dist_closest_car_in_lane[0] > 0.01:\n keeping_distance = 1\n else:\n keeping_distance = -1\n\n reward = \\\n + self.config['distance_reward'] * np.clip(scaled_dist,0,1) \\\n + self.config['keep_distance_reward'] * keeping_distance \\\n + self.config[\"collision_reward\"] * self.vehicle.crashed\n reward = -1 if not self.vehicle.on_road else reward\n return reward\n\n\nregister(\n id='SocialDistance-v0',\n entry_point='highway_disagreements.configs.reward_functions:SocialDistance',\n)\n\n\nclass NoLaneChange(HighwayEnv):\n \"\"\"penalized for changing lanes, otherwise rewarded for speed\"\"\"\n\n def _reward(self, action: Action) -> float:\n obs = self.observation_type.observe()\n other_cars = obs[1:]\n # punish for changing lanes\n lane_change = action == 0 or action == 2\n # safety distance from car in same lane\n dist_closest_car_in_lane = [x[1] for x in other_cars if x[1] > 0 and abs(x[2]) <= 0.05]\n if not dist_closest_car_in_lane or dist_closest_car_in_lane[0] > 0.01:\n keeping_distance = 1\n else:\n keeping_distance = -1\n\n reward = \\\n + self.config[\"collision_reward\"] * self.vehicle.crashed \\\n + self.config[\"keep_distance_reward\"] * keeping_distance \\\n + self.config[\"lane_change_reward\"] * lane_change\n reward = -1 if not self.vehicle.on_road else reward\n return reward\n\n\nregister(\n id='NoLaneChange-v0',\n entry_point='highway_disagreements.configs.reward_functions:NoLaneChange',\n)\n\n\nclass ClearLane(HighwayEnv):\n\n def _reward(self, action: Action) -> float:\n \"\"\" if no cars in your lane - max reward,\n else reward based on how close agent is to a car in it's lane\"\"\"\n obs = self.observation_type.observe()\n other_cars = obs[1:]\n dist_closest_car_in_lane = [x[1] for x in other_cars if x[1] > 0 and abs(x[2]) <= 0.05]\n closest_car = lmap(dist_closest_car_in_lane[0], [0, 0.4], [0, 1]) \\\n if dist_closest_car_in_lane else 1\n\n # safety distance from car in same lane\n if not dist_closest_car_in_lane or dist_closest_car_in_lane[0] > 0.01:\n keeping_distance = 0\n else:\n keeping_distance = -1\n\n reward = \\\n + self.config[\"distance_reward\"] * (1 - np.clip(closest_car, 0, 1)) \\\n + self.config[\"keep_distance_reward\"] * keeping_distance \\\n + self.config[\"collision_reward\"] * self.vehicle.crashed\n reward = -1 if not self.vehicle.on_road else reward\n return reward\n\n\nregister(\n id='ClearLane-v0',\n entry_point='highway_disagreements.configs.reward_functions:ClearLane',\n)\n\n\nclass FastRight(HighwayEnv):\n\n def _reward(self, action: Action) -> float:\n \"\"\"\n The reward is defined to foster driving at high speed, on the rightmost lanes, and to avoid collisions.\n :param action: the last action performed\n :return: the corresponding reward\n \"\"\"\n obs = self.observation_type.observe()\n neighbours = self.road.network.all_side_lanes(self.vehicle.lane_index)\n lane = self.vehicle.target_lane_index[2] if isinstance(self.vehicle, ControlledVehicle) \\\n else self.vehicle.lane_index[2]\n scaled_speed = lmap(self.vehicle.speed, self.config[\"reward_speed_range\"], [0, 1])\n\n # safety distance from car in same lane\n other_cars = obs[1:]\n dist_closest_car_in_lane = [x[1] for x in other_cars if x[1] > 0 and abs(x[2]) <= 0.05]\n if not dist_closest_car_in_lane or dist_closest_car_in_lane[0] > 0.01:\n keeping_distance = 1\n else:\n keeping_distance = -1\n\n reward = \\\n + self.config[\"collision_reward\"] * self.vehicle.crashed \\\n + self.config[\"right_lane_reward\"] * lane / max(len(neighbours) - 1, 1) \\\n + self.config[\"high_speed_reward\"] * np.clip(scaled_speed, 0, 1) \\\n + self.config[\"keep_distance_reward\"] * keeping_distance\n\n reward = -1 if not self.vehicle.on_road else reward\n return reward\n\n\nregister(\n id='FastRight-v0',\n entry_point='highway_disagreements.configs.reward_functions:FastRight',\n)\n" ]
[ [ "numpy.clip" ] ]
expertailab/ISAAQ
[ "133e25adbf5c219aceef6e7f38135de248371cb1", "133e25adbf5c219aceef6e7f38135de248371cb1" ]
[ "tqa_ndq_ensembler.py", "pretrainings/pretrainings_tmc.py" ]
[ "from transformers import RobertaTokenizer\nimport numpy as np\nimport json\nfrom tqdm import tqdm\nimport torch\nimport random\nimport sys\nimport argparse\n\nfrom aux_methods import get_data_ndq, process_data_ndq, validation_ndq, get_upper_bound, ensembler\n\ndef main(argv):\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('-d', '--device', default='gpu', choices=['gpu', 'cpu'], help='device to train the model with. Options: cpu or gpu. Default: gpu')\n parser.add_argument('-p', '--pretrainingslist', default=[\"checkpoints/tmc_ndq_roberta_IR_e2.pth\", \"checkpoints/tmc_ndq_roberta_NSP_e2.pth\", \"checkpoints/tmc_ndq_roberta_NN_e3.pth\"], help='list of paths of the pretrainings model. They must be three. Default: checkpoints/tmc_ndq_roberta_IR_e2.pth, checkpoints/tmc_ndq_roberta_NSP_e2.pth, checkpoints/tmc_ndq_roberta_NN_e3.pth')\n parser.add_argument('-x', '--maxlen', default= 180, type=int, help='max sequence length. Default: 180')\n parser.add_argument('-b', '--batchsize', default= 32, type=int, help='size of the batches. Default: 512')\n args = parser.parse_args()\n print(args)\n \n models = [torch.load(args.pretrainingslist[0]), torch.load(args.pretrainingslist[1]), torch.load(args.pretrainingslist[2])]\n retrieval_solvers = [\"IR\", \"NSP\", \"NN\"]\n tokenizer = RobertaTokenizer.from_pretrained('roberta-large')\n \n max_len = args.maxlen\n batch_size = args.batchsize\n dataset_name = \"ndq\"\n \n feats_train = []\n feats_test = []\n for model, retrieval_solver in zip(models, retrieval_solvers):\n if args.device==\"gpu\":\n device = torch.device(\"cuda\")\n model.cuda()\n if args.device==\"cpu\":\n device = torch.device(\"cpu\") \n model.cpu()\n model.eval()\n print(\"\\n\")\n print(retrieval_solver)\n print(\"val\")\n raw_data_train = get_data_ndq(dataset_name, \"val\", retrieval_solver, tokenizer, max_len)\n train_dataloader = process_data_ndq(raw_data_train, batch_size, \"val\")\n feats_train.append(validation_ndq(model, train_dataloader, device))\n labels_train = raw_data_train[-1]\n \n print(\"test\")\n raw_data_test = get_data_ndq(dataset_name, \"test\", retrieval_solver, tokenizer, max_len)\n test_dataloader = process_data_ndq(raw_data_test, batch_size, \"test\")\n feats_test.append(validation_ndq(model, test_dataloader, device))\n labels_test = raw_data_test[-1]\n \n upper_bound_train = get_upper_bound(feats_train, labels_train)\n res = ensembler(feats_train, feats_test, labels_train, labels_test)\n print(\"\\nFINAL RESULTS:\")\n print(\"TEST SET: \")\n print(res)\n\n res = ensembler(feats_test, feats_train, labels_test, labels_train)\n print(\"VALIDATION SET: \")\n print(res)\n\nif __name__ == \"__main__\":\n # Set the seed value all over the place to make this reproducible.\n seed_val = 42\n random.seed(seed_val)\n np.random.seed(seed_val)\n torch.manual_seed(seed_val)\n torch.cuda.manual_seed_all(seed_val)\n \n main(sys.argv[1:])", "from transformers import AdamW, RobertaForMultipleChoice, RobertaTokenizer\nfrom transformers import get_linear_schedule_with_warmup\nimport numpy as np\nimport random\nimport torch\nimport sys\nimport argparse\n\nfrom aux_methods import get_data_pretrainings, process_data_ndq, training_ndq\n\ndef main(argv):\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('-d', '--device', default='gpu', choices=['gpu', 'cpu'], help='device to train the model with. Options: cpu or gpu. Default: gpu')\n parser.add_argument('-p', '--pretrainings', default='../checkpoints/RACE_e1.pth', help='path to the pretrainings model. Default: ../checkpoints/RACE_e1.pth')\n parser.add_argument('-b', '--batchsize', default= 1, type=int, help='size of the batches. Default: 1')\n parser.add_argument('-x', '--maxlen', default= 256, type=int, help='max sequence length. Default: 256')\n parser.add_argument('-l', '--lr', default= 1e-5, type=float, help='learning rate. Default: 1e-5')\n parser.add_argument('-e', '--epochs', default= 4, type=int, help='number of epochs. Default: 4')\n parser.add_argument('-s', '--save', default=False, help='save model at the end of the training', action='store_true')\n args = parser.parse_args()\n print(args)\n \n if args.pretrainings == \"\":\n model = RobertaForMultipleChoice.from_pretrained(\"roberta-large\")\n else:\n model = torch.load(args.pretrainings)\n tokenizer = RobertaTokenizer.from_pretrained('roberta-large')\n \n if args.device==\"gpu\":\n device = torch.device(\"cuda\")\n model.cuda()\n if args.device==\"cpu\":\n device = torch.device(\"cpu\") \n model.cpu()\n \n model.zero_grad()\n \n batch_size = args.batchsize\n max_len = args.maxlen\n dataset_name = \"pretrainings\"\n lr = args.lr\n epochs = args.epochs\n save_model = args.save\n\n raw_data_train = get_data_pretrainings(dataset_name, \"train\", tokenizer, max_len)\n raw_data_val = get_data_pretrainings(dataset_name, \"val\", tokenizer, max_len) \n \n train_dataloader = process_data_ndq(raw_data_train, batch_size, \"train\")\n val_dataloader = process_data_ndq(raw_data_val, batch_size, \"val\")\n\n optimizer = AdamW(model.parameters(), lr = lr, eps = 1e-8)\n total_steps = len(train_dataloader) * epochs\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps = 0, num_training_steps = total_steps)\n \n training_ndq(model, train_dataloader, val_dataloader, optimizer, scheduler, epochs, device, save_model, dataset_name)\nif __name__ == \"__main__\":\n # Set the seed value all over the place to make this reproducible.\n seed_val = 42\n random.seed(seed_val)\n np.random.seed(seed_val)\n torch.manual_seed(seed_val)\n torch.cuda.manual_seed_all(seed_val)\n \n main(sys.argv[1:])" ]
[ [ "torch.device", "torch.cuda.manual_seed_all", "numpy.random.seed", "torch.manual_seed", "torch.load" ], [ "torch.device", "torch.cuda.manual_seed_all", "numpy.random.seed", "torch.manual_seed", "torch.load" ] ]
FurenDeng/cora
[ "c252b84626dadde5c49458258cfb9284a71a40f8" ]
[ "cora/util/sphfunc.py" ]
[ "\"\"\"Special functions for calculations on the sphere.\n\nThese routines require `pygsl` to be installed. Although these functions are\navailable in `scipy` they tend to be inaccurate at large values of `l`.\n\"\"\"\n# === Start Python 2/3 compatibility\nfrom __future__ import (absolute_import, division,\n print_function, unicode_literals)\nfrom future.builtins import * # noqa pylint: disable=W0401, W0614\nfrom future.builtins.disabled import * # noqa pylint: disable=W0401, W0614\n# === End Python 2/3 compatibility\n\n\nimport numpy as np\n\nimport pygsl.testing.sf as sf\n\n\ndef jl(l, z):\n \"\"\"A fast spherical bessel function using approximations at low and high z.\n\n Parameters\n ----------\n l, z : scalar or np.ndarray\n Spherical bessel function arguments.\n\n Returns\n -------\n jl : scalar or np.ndarray\n Spherical bessel functions.\n \"\"\"\n\n lt = l\n zt = z\n\n l = np.atleast_1d(l)\n z = np.atleast_1d(z)\n\n zca = np.logical_and(z == 0.0, l > 0)\n zza = np.where(zca)\n lca = np.logical_and(np.logical_or(np.logical_and(l > 20, z < (l / 5.0)), z < (l * 1e-6)), z > 0.0)\n lza = np.where(lca)\n hca = np.logical_and(l > 20, z > 10.0 * l)\n hza = np.where(hca)\n gza = np.where(np.logical_not(np.logical_or(np.logical_or(lca, hca), zca)))\n la, za = np.broadcast_arrays(l, z)\n\n jla = np.empty_like(la).astype(np.float64)\n\n jla[zza] = 0.0\n jla[lza] = _jl_approx_lowz(la[lza], za[lza])\n jla[hza] = _jl_approx_highz(la[hza], za[hza])\n jla[gza] = _jl_gsl(la[gza], za[gza])\n\n if isinstance(lt, np.ndarray) or isinstance(zt, np.ndarray):\n return jla\n else:\n return jla[0]\n\n\ndef jl_d(l, z):\n \"\"\"First derivative of jl.\n\n Parameters\n ----------\n l, z : scalar or np.ndarray\n Spherical bessel function arguments.\n\n Returns\n -------\n jl_d : scalar or np.ndarray\n Derivate of spherical bessel functions.\n \"\"\"\n jl0 = jl(l, z)\n jl1 = jl(l + 1, z)\n return -jl1 + (l / z) * jl0\n\n\ndef jl_d2(l, z):\n \"\"\"Second derivative of jl.\n\n Parameters\n ----------\n l, z : scalar or np.ndarray\n Spherical bessel function arguments.\n\n Returns\n -------\n jl_d2 : scalar or np.ndarray\n Second derivate of spherical bessel functions.\n \"\"\"\n jl0 = jl(l, z)\n jl1 = jl(l + 1, z)\n jl2 = jl(l + 2, z)\n return jl2 - (2 * l / z) * jl1 + ((l**2 - 1) / z**2) * jl0\n\n\ndef Ylm(l, m, theta, phi):\n \"\"\"Calculate the spherical harmonic functions.\n\n Parameters\n ----------\n l, m : int or array_like\n Multipoles to calculate.\n theta, phi : float or array_like\n Angular position.\n\n Returns\n -------\n ylm : float or array_like\n \"\"\"\n\n l = np.array(l).astype(np.int32)\n m = np.array(m).astype(np.int32)\n x = np.array(np.cos(theta)).astype(np.float64)\n\n return sf.legendre_sphPlm(l, m, x) * np.exp(1.0J * m * phi)\n\n\ndef Ylm_array(lmax, theta, phi):\n \"\"\"Calculate the spherical harmonics up to lmax.\n\n Parameters\n ----------\n lmax : integer\n Maximum multipole.\n theta, phi : float or array_like\n Angular position.\n\n Returns\n -------\n ylm : np.ndarray[..., (lmax + 1) * (lmax + 2) / 2]\n The spherical harmonics for each angular position. The output array is\n the same shape at `theta`, `phi` but with a new last axis for the\n multipoles.\n \"\"\"\n m, l = np.triu_indices(lmax + 1)\n\n return Ylm(l, m, np.array(theta)[..., np.newaxis], np.array(phi)[..., np.newaxis])\n\n\ndef Ylm_spin2(l, m, theta, phi):\n \"\"\"Evaluate the spin-2 spherical harmonics.\n\n Uses Eq. 14 from Wiaux et al. 2007 (arXiv:astro-ph/0508514).\n\n\n Parameters\n ----------\n l, m : int or array_like\n Multipoles to calculate.\n theta, phi : float or array_like\n Angular position.\n\n Returns\n -------\n ylm_spin_plus2, ylm_spin_minus2 : float or array_like\n Two arrays of the +2 and -2 spin harmonics.\n \"\"\"\n\n def alpha(sign, l, m, theta):\n\n t = (2 * m**2 - l * (l + 1.0) -\n sign * 2 * m * (l - 1.0) * np.cos(theta) +\n l * (l - 1) * np.cos(theta)**2)\n\n return t / np.sin(theta)**2\n\n def beta(sign, l, m, theta):\n\n t = (2 * ((2.0 * l + 1.0) / (2.0 * l - 1.0) * (l**2 - m**2))**0.5 *\n (sign * m + np.cos(theta)))\n\n return t / np.sin(theta)**2\n\n y0 = Ylm(l, m, theta, phi)\n y1 = np.where(l <= m, 0.0, Ylm(l - 1, m, theta, phi))\n\n fac = (l - 1) * l * (l + 1) * (l + 2)\n fac = np.where(l < 2, 0.0, fac**-0.5)\n\n y2plus = fac * (alpha(1, l, m, theta) * y0 + beta(1, l, m, theta) * y1)\n y2minus = fac * (alpha(-1, l, m, theta) * y0 + beta(-1, l, m, theta) * y1)\n\n return y2plus, y2minus\n\n\ndef Ylm_spin2_array(lmax, theta, phi):\n \"\"\"Calculate the spin-2 spherical harmonics up to lmax.\n\n Parameters\n ----------\n lmax : integer\n Maximum multipole.\n theta, phi : float or array_like\n Angular position.\n\n Returns\n -------\n ylm_spin_plus2, ylm_spin_minus2 : np.ndarray[..., (lmax + 1) * (lmax + 2) / 2]\n The spin spherical harmonics for each angular position. The output array is\n the same shape at `theta`, `phi` but with a new last axis for the\n multipoles.\n \"\"\"\n m, l = np.triu_indices(lmax + 1)\n\n return Ylm_spin2(l, m, np.array(theta)[..., np.newaxis], np.array(phi)[..., np.newaxis])\n\n\ndef _jl_approx_lowz(l, z):\n \"\"\"Approximation of j_l for low z.\n\n From Gradsteyn and Ryzhik 8.452.\n \"\"\"\n\n nu = l + 0.5\n nutanha = (nu * nu - z * z)**0.5\n arg = nutanha - nu * np.arccosh(nu / z)\n\n return (np.exp(arg) * (1 + 1.0 / (8 * nutanha) - 5.0 * nu**2 / (24 * nutanha**3) +\n 9.0 / (128 * nutanha**2) - 231.0 * nu**2 / (576 * nutanha**4)) /\n (2 * (z * nutanha)**0.5))\n\n\ndef _jl_approx_highz(l, z):\n \"\"\"Approximation of j_l for large z (where z > l).\n\n From Gradsteyn and Ryzhik 8.453.\n \"\"\"\n\n nu = l + 0.5\n sinb = (1 - nu * nu / (z * z))**0.5\n cotb = nu / (z * sinb)\n arg = z * sinb - nu * (np.pi / 2 - np.arcsin(nu / z)) - np.pi / 4\n\n return (np.cos(arg) * (1 - 9.0 * cotb**2 / (128.0 * nu**2)) +\n np.sin(arg) * (cotb / (8 * nu))) / (z * sinb**0.5)\n\n\ndef _jl_gsl(l, z):\n \"\"\"Use GSL routines to calculate Spherical Bessel functions.\n\n Array arguments only.\n \"\"\"\n return sf.bessel_jl(l.astype(np.int32), z.astype(np.float64))\n" ]
[ [ "numpy.logical_or", "numpy.array", "numpy.sin", "numpy.triu_indices", "numpy.broadcast_arrays", "numpy.arcsin", "numpy.exp", "numpy.logical_and", "numpy.arccosh", "numpy.where", "numpy.atleast_1d", "numpy.cos", "numpy.empty_like" ] ]
shanglidan/under_water
[ "ed487c73666221762cdddd665d2e0330c0a67176" ]
[ "tools/visual_data.py" ]
[ "import matplotlib.pyplot as plt\r\nfrom matplotlib.pyplot import savefig\r\nimport os\r\nfrom PIL import Image\r\nimport json\r\nimport numpy as np\r\n\r\n\r\ndef show_boxes(im, bboxs, segs, img, color):\r\n\r\n # Display in largest to smallest order to reduce occlusion\r\n # min_area = 99999\r\n # bbox_min = [0,0,0,0]\r\n # for det in dets:\r\n # bbox = det[:4]\r\n # area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])\r\n # if area < min_area:\r\n # min_area = area\r\n # bbox_min = bbox.copy()\r\n # ax.add_patch(\r\n # plt.Rectangle((bbox_min[0], bbox_min[1]),\r\n # bbox_min[2] - bbox_min[0],\r\n # bbox_min[3] - bbox_min[1],\r\n # fill=False, edgecolor=color,\r\n # linewidth=2))\r\n for det in bboxs:\r\n bbox = np.array(det[:4]).astype(int)\r\n cv2.rectangle(im, (bbox[0],bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), (255,0,0), 1)\r\n # for det in segs:\r\n # # bbox = det[:4]\r\n # # cv2.rectangle(im, (bbox[0],bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), color, 2)\r\n # det = np.array(det)\r\n # det = det.reshape((-1,1,2))\r\n # im = cv2.polylines(im,[det],True,color, 2)\r\n # cv2.imwrite('train_draw/'+img, im)\r\n if im.shape[0] > 1000:\r\n im = cv2.resize(im, (int(0.5*im.shape[1]), int(0.5*im.shape[0])))\r\n cv2.imshow('img', im)\r\n cv2.waitKey(0)\r\n\r\n return im\r\n\r\n\r\nroot_dir = 'data/train/image/'\r\n# root_dir = 'submit/test_detection1'\r\nimages = [os.path.join(root_dir, f) for f in os.listdir(root_dir)]\r\nlabel_file = \"data/train/annotations/train.json\"\r\nlabels = json.load(open(label_file, 'r'))\r\nimgpath2id = {}\r\nfor img in labels[\"images\"]:\r\n imgpath2id[img[\"file_name\"]] = img[\"id\"]\r\n\r\nbboxs_imgid = {}\r\nsegs_imgid = {}\r\nfor anno in labels[\"annotations\"]:\r\n if anno[\"image_id\"] not in bboxs_imgid.keys():\r\n bboxs_imgid[anno[\"image_id\"]] = []\r\n segs_imgid[anno[\"image_id\"]] = []\r\n bboxs_imgid[anno[\"image_id\"]].append(anno[\"bbox\"])\r\n # segs_imgid[anno[\"image_id\"]].append(anno[\"minAreaRect\"])\r\n# print('201908302_1d4495a0b2ae68070201908300504594OK-2.jpg' in imgpath2id)\r\n# print(imgpath2id['201908302_1d4495a0b2ae68070201908300504594OK-2.jpg'])\r\n# print(bboxs_imgid[imgpath2id['201908302_1d4495a0b2ae68070201908300504594OK-2.jpg']])\r\nimport cv2\r\nfor img in images:\r\n if 'vertical_flips' in img:\r\n continue\r\n # if '-' not in img:\r\n # continue\r\n im = cv2.imread(img)\r\n assert im is not None\r\n # im = show_boxes(im, bboxs_imgid[imgpath2id[os.path.basename(img)]], segs_imgid[imgpath2id[os.path.basename(img)]], os.path.basename(img), color = (0, 0, 255))\r\n try:\r\n print(img)\r\n im = show_boxes(im, bboxs_imgid[imgpath2id[os.path.basename(img)]], segs_imgid, os.path.basename(img), color = (0, 0, 255))\r\n except:\r\n print(img+'err!!!')" ]
[ [ "numpy.array" ] ]
thesilenthero/reporting
[ "c1174765db01ae5ba9bffbd77a0fb133ae691c09" ]
[ "tools/report_readers.py" ]
[ "import csv\r\n\r\nimport os\r\nimport pandas as pd\r\nfrom datetime import datetime\r\nimport parsedatetime as pdt\r\nimport xlwings as xw\r\n\r\nfrom .prisma import MediaPlan\r\nfrom .config import dcm_report_path\r\nfrom api import run_and_download_report, Report\r\n\r\nimport warnings\r\n\r\n\r\ndef get_files_in_folder(path):\r\n files = next(os.walk(path))[-1]\r\n files = [os.path.join(path, file) for file in files]\r\n return files\r\n\r\n\r\ndef parse_datestr(datestr):\r\n cal = pdt.Calendar()\r\n return datetime(*cal.parse(datestr)[0][:6])\r\n\r\n\r\ndef redistribute_units(df, left_columns, right_column, weight_against='na'):\r\n \"\"\"\r\n When combining datasets, some columns might be duplicated. This is a common\r\n scenario when including planned costs in a DCM report that has more dimensions\r\n than just placement. DCM doesn't take into account repeated placements and\r\n the planned values are duplicated and thus inaccurate. This function divides\r\n a column evenly:\r\n Example:\r\n >>> df = DataFrame({'Col1': ['A', 'A', 'A', 'B', 'B'],\r\n 'Col2': [1000, 1000, 1000, 1000, 1000]})\r\n >>> df\r\n Col1 Col2\r\n 0 A 1000\r\n 1 A 1000\r\n 2 A 1000\r\n 3 B 2000\r\n 4 B 2000\r\n\r\n >>> redistribute_units(df, ['Col1'], 'Col2')\r\n >>>\r\n 0 333.333333\r\n 1 333.333333\r\n 2 333.333333\r\n 3 1000.000000\r\n 4 1000.000000\r\n dtype: float64\r\n \"\"\"\r\n\r\n if weight_against == \"na\":\r\n\r\n mapping = df.groupby(left_columns).count()\r\n counts = df.apply(lambda row: mapping.loc[tuple(row[left_columns])][right_column], axis=1)\r\n return df[right_column] / counts\r\n\r\n else:\r\n\r\n sums = df.groupby(left_columns)[weight_against].sum()\r\n weights = df.apply(lambda row: row[weight_against] / sums.loc[tuple(row[left_columns])], axis=1)\r\n\r\n return df[right_column] * weights\r\n\r\n\r\ndef load_from_csv(path):\r\n\r\n with open(path, \"r\") as f:\r\n reader = csv.reader(f)\r\n\r\n for i, row in enumerate(reader, 1):\r\n\r\n if \"Date/Time Generated\" in row:\r\n date_generated = pd.to_datetime(parse_datestr(row[1]))\r\n\r\n if \"Date Range\" in row:\r\n date_range = row[1]\r\n date_range = [x.strip() for x in date_range.split(\"-\")]\r\n date_range = [pd.to_datetime(parse_datestr(x)) for x in date_range]\r\n\r\n if \"Report Fields\" in row:\r\n skiprows = i\r\n break\r\n\r\n df = pd.read_csv(path, skiprows=skiprows, skipfooter=1, engine='python')\r\n\r\n warnings.filterwarnings('ignore')\r\n\r\n df.date_generated = date_generated\r\n df.date_range = date_range\r\n\r\n return df\r\n\r\n\r\ndef load_dcm(profileId, reportId, path=None, force_run=False):\r\n\r\n if path is None:\r\n path = os.path.join(dcm_report_path, Report(profileId, reportId).filename + '.csv')\r\n\r\n if not os.path.isfile(path) or force_run:\r\n run_and_download_report(profileId, reportId, path=path)\r\n\r\n df = load_from_csv(path)\r\n\r\n return df\r\n\r\n\r\ndef merge_with_prisma(df, plan_path, join_on=None):\r\n\r\n if join_on is None:\r\n join_on = [\"Campaign\", \"Placement\"]\r\n\r\n elif \"Placement\" not in join_on:\r\n raise ValueError(\"Reports must be merged by at least the placement level\")\r\n\r\n plan = MediaPlan(plan_path)\r\n plan.parse()\r\n\r\n plan_df = pd.DataFrame(plan.output, columns=[\"Campaign\", \"Placement\",\r\n \"Planned Units\", \"Planned Cost\",\r\n \"Rate\", \"Placement Start Date\",\r\n \"Placement End Date\"])\r\n\r\n df.sort_values(['Site (DCM)', 'Placement'], inplace=True)\r\n df = df.merge(plan_df.drop_duplicates(), how=\"left\", left_on=join_on,\r\n right_on=join_on)\r\n return df\r\n # df.fillna(0, inplace=True)\r\n\r\n\r\ndef write_to_spreadsheet(df, book_path, sheet, cellref=\"$A$1\", clear=True):\r\n\r\n df.index.name = \"Index\"\r\n book = xw.Book(book_path)\r\n sht = book.sheets(sheet)\r\n if clear:\r\n sht.clear_contents()\r\n\r\n # rnge = sht.range(cellref)\r\n # rnge.offset(1, 0).value = df.values\r\n # rnge.values = list(df.columns)\r\n sht.range(cellref).value = df\r\n" ]
[ [ "pandas.DataFrame", "pandas.read_csv" ] ]
akouminov/LipNet
[ "e61d270921707f49b10b932610e978bdeefd4935" ]
[ "evaluation/predict.py" ]
[ "from lipnet.lipreading.videos import Video\nfrom lipnet.lipreading.visualization import show_video_subtitle\nfrom lipnet.core.decoders import Decoder\nfrom lipnet.lipreading.helpers import labels_to_text\nfrom lipnet.utils.spell import Spell\nfrom lipnet.model2 import LipNet\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.compat.v1.keras import backend as K\nimport numpy as np\nimport sys\nimport os\n\nnp.random.seed(55)\n\nCURRENT_PATH = os.path.dirname(os.path.abspath(__file__))\n\nFACE_PREDICTOR_PATH = os.path.join(CURRENT_PATH,'..','common','predictors','shape_predictor_68_face_landmarks.dat')\n\nPREDICT_GREEDY = False\nPREDICT_BEAM_WIDTH = 200\nPREDICT_DICTIONARY = os.path.join(CURRENT_PATH,'..','common','dictionaries','grid.txt')\n\ndef predict(weight_path, video_path, absolute_max_string_len=32, output_size=28):\n print(\"\\nLoading data from disk...\")\n video = Video(vtype='face', face_predictor_path=FACE_PREDICTOR_PATH)\n if os.path.isfile(video_path):\n video.from_video(video_path)\n else:\n video.from_frames(video_path)\n print(\"Data loaded.\\n\")\n\n if K.image_data_format() == 'channels_first':\n img_c, frames_n, img_w, img_h = video.data.shape\n else:\n frames_n, img_w, img_h, img_c = video.data.shape\n\n\n lipnet = LipNet(img_c=img_c, img_w=img_w, img_h=img_h, frames_n=frames_n,\n absolute_max_string_len=absolute_max_string_len, output_size=output_size)\n\n adam = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n\n lipnet.model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=adam)\n lipnet.model.load_weights(weight_path)\n\n spell = Spell(path=PREDICT_DICTIONARY)\n decoder = Decoder(greedy=PREDICT_GREEDY, beam_width=PREDICT_BEAM_WIDTH,\n postprocessors=[labels_to_text, spell.sentence])\n\n X_data = np.array([video.data]).astype(np.float32) / 255\n input_length = np.array([len(video.data)])\n\n y_pred = lipnet.predict(X_data)\n result = decoder.decode(y_pred, input_length)[0]\n\n return (video, result)\n\nif __name__ == '__main__':\n if len(sys.argv) == 3:\n video, result = predict(sys.argv[1], sys.argv[2])\n elif len(sys.argv) == 4:\n video, result = predict(sys.argv[1], sys.argv[2], sys.argv[3])\n elif len(sys.argv) == 5:\n video, result = predict(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])\n else:\n video, result = None, \"\"\n\n if video is not None:\n show_video_subtitle(video.face, result)\n\n stripe = \"-\" * len(result)\n print( \"\")\n print( \" __ __ __ __ \")\n print( \"/\\\\ \\\\ __ /\\\\ \\\\/\\\\ \\\\ /\\\\ \\\\__ \")\n print( \"\\\\ \\\\ \\\\ /\\\\_\\\\ _____\\\\ \\\\ `\\\\\\\\ \\\\ __\\\\ \\\\ ,_\\\\ \")\n print( \" \\\\ \\\\ \\\\ __\\\\/\\\\ \\\\/\\\\ '__`\\\\ \\\\ , ` \\\\ /'__`\\\\ \\\\ \\\\/ \")\n print( \" \\\\ \\\\ \\\\L\\\\ \\\\\\\\ \\\\ \\\\ \\\\ \\\\L\\\\ \\\\ \\\\ \\\\`\\\\ \\\\/\\\\ __/\\\\ \\\\ \\\\_ \")\n print( \" \\\\ \\\\____/ \\\\ \\\\_\\\\ \\\\ ,__/\\\\ \\\\_\\\\ \\\\_\\\\ \\\\____\\\\\\\\ \\\\__\\\\\")\n print( \" \\\\/___/ \\\\/_/\\\\ \\\\ \\\\/ \\\\/_/\\\\/_/\\\\/____/ \\\\/__/\")\n print( \" \\\\ \\\\_\\\\ \")\n print( \" \\\\/_/ \")\n print( \"\")\n print( \" --{}- \".format(stripe))\n print( \"[ DECODED ] |> {} |\".format(result))\n print( \" --{}- \".format(stripe))\n" ]
[ [ "numpy.random.seed", "numpy.array", "tensorflow.compat.v1.keras.backend.image_data_format", "tensorflow.keras.optimizers.Adam" ] ]
HalmonLui/square-hackathon
[ "62d5be7a229f9e39e27a546c164facd779d28aa4" ]
[ "backend/discoverpage/discoverpage_metrics.py" ]
[ "import json\nfrom heapq import nlargest, nsmallest\nimport pandas as pd\nimport numpy as np\n# Recommended, Hot Deals, Trending Near you\n\n\"\"\"\nSo... current brute force approach:\nrecommendations ---> top 5 ratings (maybe incorporate distance(?))\ntrending near you ----> grab top 10 near you in distance and out of those 10 grab the top 5 with the most bookings (AND rating >4.0?). \nHot Deals ---> top 5 smallest price for some service\n\n3 trendings, 4 recs, 4 hot deals\n\"\"\"\n\ndef get_recommended_posts(data, date, n):\n # Brute Force recommendation based on ratings\n if date == 'n/a':\n topN = nlargest(n, data, key=lambda i: i[\"rating\"])\n else:\n topN = nlargest(n, data[date], key=lambda i: i[\"rating\"])\n\n return topN\n\ndef get_trending_posts(data, date, n):\n # grab top 10 near person in distance and out of those thosse grab top 1-9 with bookings\n # todo: rating filter >4\n # brute force\n if n > 9:\n raise ValueError(\"n must be 1 to 9\")\n if date == 'n/a':\n min10_dist = nsmallest(10, data, key=lambda i: i[\"distance\"])\n else:\n min10_dist = nsmallest(10, data[date], key=lambda i: i[\"distance\"])\n avail_pcts = []\n for d in min10_dist:\n num_1s = 0.\n num_0s = 0.\n for dd in d[\"stylists\"]:\n for key, val in dd[\"availability\"].items():\n if val == 1:\n num_1s += 1.\n else:\n num_0s += 1.\n avail_pcts.append(round(num_1s / (num_1s + num_0s), 3))\n \n arg_maxs = sorted(range(len(avail_pcts)), key=lambda i: avail_pcts[i])[-n:]\n\n output = [min10_dist[i] for i in arg_maxs]\n\n return output[::-1]\n\ndef get_hot_deals(data, date):\n salon_types = [\"Hair Salon\", \"Nail Salon\",\n \"Spa Center\", \"Piercing Parlor\"]\n \n if date == 'n/a':\n df = pd.DataFrame(data)\n else:\n df = pd.DataFrame(data[date])\n output = []\n for st in salon_types:\n salon_df = df[df.services == st]\n salon_df['sum_of_services'] = salon_df.apply(lambda row: sum(row.price.values()), axis=1)\n min_salon = salon_df.iloc[salon_df[\"sum_of_services\"].values.argmin()].to_dict()\n min_salon.pop('sum_of_services', None)\n output.append(min_salon)\n\n return output\n\n\nif __name__ == \"__main__\":\n f = open('/Users/sonamghosh/Desktop/square_hacks_2020/square-hackathon/backend/search/sample_names_data.json')\n data = json.load(f)\n\n #top5 = nlargest(5, data['2020-06-15'], key=lambda i: i[\"rating\"])\n #print(len(top5))\n #print([i['rating'] for i in top5])\n\n #print(get_trending_posts(data, '2020-06-15', 4))\n\n #df = pd.DataFrame(data['2020-06-15'])\n #print(df.head(10))\n\n #df2 = df[df.type == 'Hair Salon']\n\n #df2['sum_of_services'] = df2.apply(lambda row: sum(row.price.values()), axis=1)\n\n #print(df2.head(5))\n\n #import numpy as np\n #print(df2.iloc[df2[\"sum_of_services\"].values.argmin()].to_dict())\n #print(df.groupby('type').count())\n #print(get_hot_deals(data, '2020-06-15'))\n" ]
[ [ "pandas.DataFrame" ] ]
PySilentSubstitution/silent-sub
[ "ca1ec4c9f6dcd444a87149bbe6dfe00140f4c375" ]
[ "pysilsub/CIE.py" ]
[ "#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n'''\r\npysilsub.CIE\r\n============\r\n\r\nConvenience functions for accessing CIE standards.\r\n\r\nObtained from http://www.cvrl.org/\r\n\r\n@author: jtm\r\n\r\n'''\r\n\r\nfrom typing import Optional, Union\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n\r\ndef get_CIE_2006_10_deg_CMF(\r\n binwidth: Optional[int] = 1,\r\n asdf: Optional[bool] = True,\r\n trim_visible: Optional[bool] = True\r\n) -> Union[np.ndarray, pd.DataFrame]:\r\n \"\"\"Get the CIE 2006 XYZ 10-deg physiologically relevant color matching\r\n functions.\r\n\r\n Parameters\r\n ----------\r\n asdf : bool, optional\r\n Whether to return the results as a pandas DataFrame. The default is\r\n False.\r\n binwidth : int, optional\r\n Width of the wavelength bins in nanometers (must be `1` or `5`). The\r\n default is `1`.\r\n trim_visible : bool, optional\r\n Whether to trim the CMFs to 380-780 nm. The default is True.\r\n\r\n Returns\r\n -------\r\n cmf : numpy.ndarray or pandas.DataFrame\r\n The CIE 2006 XYZ 10-deg physiologically relevant CMFs.\r\n\r\n \"\"\"\r\n # breakpoint()\r\n colnames = ['Wavelength', 'X', 'Y', 'Z']\r\n\r\n cmf = np.array([\r\n 390, 2.95E-03, 4.08E-04, 1.32E-02,\r\n 391, 3.58E-03, 4.98E-04, 1.60E-02,\r\n 392, 4.33E-03, 6.06E-04, 1.94E-02,\r\n 393, 5.24E-03, 7.37E-04, 2.34E-02,\r\n 394, 6.33E-03, 8.93E-04, 2.84E-02,\r\n 395, 7.64E-03, 1.08E-03, 3.42E-02,\r\n 396, 9.20E-03, 1.30E-03, 4.13E-02,\r\n 397, 1.10E-02, 1.55E-03, 4.97E-02,\r\n 398, 1.32E-02, 1.85E-03, 5.96E-02,\r\n 399, 1.58E-02, 2.20E-03, 7.13E-02,\r\n 400, 1.88E-02, 2.59E-03, 8.51E-02,\r\n 401, 2.23E-02, 3.04E-03, 1.01E-01,\r\n 402, 2.63E-02, 3.54E-03, 1.20E-01,\r\n 403, 3.09E-02, 4.11E-03, 1.41E-01,\r\n 404, 3.61E-02, 4.75E-03, 1.65E-01,\r\n 405, 4.20E-02, 5.47E-03, 1.93E-01,\r\n 406, 4.87E-02, 6.29E-03, 2.24E-01,\r\n 407, 5.61E-02, 7.19E-03, 2.58E-01,\r\n 408, 6.43E-02, 8.18E-03, 2.96E-01,\r\n 409, 7.32E-02, 9.26E-03, 3.38E-01,\r\n 410, 8.28E-02, 1.04E-02, 3.83E-01,\r\n 411, 9.30E-02, 1.16E-02, 4.32E-01,\r\n 412, 1.04E-01, 1.29E-02, 4.83E-01,\r\n 413, 1.15E-01, 1.42E-02, 5.38E-01,\r\n 414, 1.27E-01, 1.56E-02, 5.96E-01,\r\n 415, 1.40E-01, 1.71E-02, 6.57E-01,\r\n 416, 1.53E-01, 1.87E-02, 7.21E-01,\r\n 417, 1.66E-01, 2.04E-02, 7.88E-01,\r\n 418, 1.80E-01, 2.21E-02, 8.56E-01,\r\n 419, 1.94E-01, 2.39E-02, 9.25E-01,\r\n 420, 2.08E-01, 2.58E-02, 9.93E-01,\r\n 421, 2.21E-01, 2.76E-02, 1.06E+00,\r\n 422, 2.33E-01, 2.95E-02, 1.12E+00,\r\n 423, 2.45E-01, 3.13E-02, 1.18E+00,\r\n 424, 2.57E-01, 3.33E-02, 1.25E+00,\r\n 425, 2.69E-01, 3.53E-02, 1.31E+00,\r\n 426, 2.81E-01, 3.74E-02, 1.37E+00,\r\n 427, 2.93E-01, 3.97E-02, 1.44E+00,\r\n 428, 3.06E-01, 4.20E-02, 1.50E+00,\r\n 429, 3.17E-01, 4.45E-02, 1.57E+00,\r\n 430, 3.28E-01, 4.70E-02, 1.62E+00,\r\n 431, 3.38E-01, 4.96E-02, 1.68E+00,\r\n 432, 3.47E-01, 5.22E-02, 1.73E+00,\r\n 433, 3.54E-01, 5.49E-02, 1.78E+00,\r\n 434, 3.62E-01, 5.77E-02, 1.82E+00,\r\n 435, 3.69E-01, 6.05E-02, 1.87E+00,\r\n 436, 3.77E-01, 6.33E-02, 1.91E+00,\r\n 437, 3.85E-01, 6.62E-02, 1.96E+00,\r\n 438, 3.92E-01, 6.91E-02, 2.01E+00,\r\n 439, 3.98E-01, 7.19E-02, 2.04E+00,\r\n 440, 4.03E-01, 7.47E-02, 2.08E+00,\r\n 441, 4.05E-01, 7.74E-02, 2.10E+00,\r\n 442, 4.06E-01, 8.00E-02, 2.11E+00,\r\n 443, 4.06E-01, 8.27E-02, 2.12E+00,\r\n 444, 4.05E-01, 8.54E-02, 2.13E+00,\r\n 445, 4.04E-01, 8.82E-02, 2.13E+00,\r\n 446, 4.03E-01, 9.12E-02, 2.14E+00,\r\n 447, 4.03E-01, 9.43E-02, 2.14E+00,\r\n 448, 4.01E-01, 9.75E-02, 2.15E+00,\r\n 449, 3.98E-01, 1.01E-01, 2.14E+00,\r\n 450, 3.93E-01, 1.04E-01, 2.13E+00,\r\n 451, 3.86E-01, 1.07E-01, 2.10E+00,\r\n 452, 3.78E-01, 1.10E-01, 2.07E+00,\r\n 453, 3.68E-01, 1.13E-01, 2.03E+00,\r\n 454, 3.58E-01, 1.16E-01, 1.99E+00,\r\n 455, 3.48E-01, 1.20E-01, 1.95E+00,\r\n 456, 3.38E-01, 1.23E-01, 1.91E+00,\r\n 457, 3.29E-01, 1.27E-01, 1.87E+00,\r\n 458, 3.19E-01, 1.32E-01, 1.84E+00,\r\n 459, 3.10E-01, 1.36E-01, 1.80E+00,\r\n 460, 3.01E-01, 1.41E-01, 1.77E+00,\r\n 461, 2.92E-01, 1.47E-01, 1.74E+00,\r\n 462, 2.83E-01, 1.52E-01, 1.70E+00,\r\n 463, 2.74E-01, 1.58E-01, 1.67E+00,\r\n 464, 2.64E-01, 1.64E-01, 1.63E+00,\r\n 465, 2.53E-01, 1.70E-01, 1.58E+00,\r\n 466, 2.42E-01, 1.76E-01, 1.53E+00,\r\n 467, 2.30E-01, 1.82E-01, 1.48E+00,\r\n 468, 2.17E-01, 1.88E-01, 1.43E+00,\r\n 469, 2.04E-01, 1.94E-01, 1.37E+00,\r\n 470, 1.91E-01, 2.00E-01, 1.31E+00,\r\n 471, 1.78E-01, 2.06E-01, 1.25E+00,\r\n 472, 1.65E-01, 2.12E-01, 1.19E+00,\r\n 473, 1.53E-01, 2.18E-01, 1.13E+00,\r\n 474, 1.40E-01, 2.25E-01, 1.07E+00,\r\n 475, 1.28E-01, 2.31E-01, 1.01E+00,\r\n 476, 1.17E-01, 2.38E-01, 9.54E-01,\r\n 477, 1.06E-01, 2.45E-01, 9.00E-01,\r\n 478, 9.51E-02, 2.53E-01, 8.47E-01,\r\n 479, 8.52E-02, 2.60E-01, 7.98E-01,\r\n 480, 7.59E-02, 2.68E-01, 7.52E-01,\r\n 481, 6.73E-02, 2.77E-01, 7.08E-01,\r\n 482, 5.93E-02, 2.85E-01, 6.67E-01,\r\n 483, 5.18E-02, 2.94E-01, 6.28E-01,\r\n 484, 4.49E-02, 3.02E-01, 5.91E-01,\r\n 485, 3.84E-02, 3.11E-01, 5.55E-01,\r\n 486, 3.24E-02, 3.19E-01, 5.20E-01,\r\n 487, 2.69E-02, 3.28E-01, 4.86E-01,\r\n 488, 2.20E-02, 3.37E-01, 4.55E-01,\r\n 489, 1.77E-02, 3.46E-01, 4.25E-01,\r\n 490, 1.40E-02, 3.55E-01, 3.98E-01,\r\n 491, 1.08E-02, 3.66E-01, 3.73E-01,\r\n 492, 8.17E-03, 3.78E-01, 3.50E-01,\r\n 493, 6.04E-03, 3.90E-01, 3.29E-01,\r\n 494, 4.46E-03, 4.02E-01, 3.09E-01,\r\n 495, 3.45E-03, 4.15E-01, 2.91E-01,\r\n 496, 3.01E-03, 4.27E-01, 2.73E-01,\r\n 497, 3.09E-03, 4.40E-01, 2.56E-01,\r\n 498, 3.61E-03, 4.52E-01, 2.39E-01,\r\n 499, 4.49E-03, 4.65E-01, 2.23E-01,\r\n 500, 5.65E-03, 4.78E-01, 2.08E-01,\r\n 501, 7.04E-03, 4.92E-01, 1.93E-01,\r\n 502, 8.67E-03, 5.05E-01, 1.79E-01,\r\n 503, 1.06E-02, 5.20E-01, 1.65E-01,\r\n 504, 1.29E-02, 5.34E-01, 1.52E-01,\r\n 505, 1.56E-02, 5.49E-01, 1.39E-01,\r\n 506, 1.88E-02, 5.64E-01, 1.28E-01,\r\n 507, 2.26E-02, 5.79E-01, 1.16E-01,\r\n 508, 2.69E-02, 5.94E-01, 1.06E-01,\r\n 509, 3.20E-02, 6.10E-01, 9.68E-02,\r\n 510, 3.78E-02, 6.25E-01, 8.85E-02,\r\n 511, 4.43E-02, 6.40E-01, 8.12E-02,\r\n 512, 5.15E-02, 6.55E-01, 7.46E-02,\r\n 513, 5.91E-02, 6.70E-01, 6.87E-02,\r\n 514, 6.71E-02, 6.86E-01, 6.33E-02,\r\n 515, 7.54E-02, 7.01E-01, 5.82E-02,\r\n 516, 8.38E-02, 7.17E-01, 5.35E-02,\r\n 517, 9.23E-02, 7.33E-01, 4.91E-02,\r\n 518, 1.01E-01, 7.49E-01, 4.51E-02,\r\n 519, 1.10E-01, 7.64E-01, 4.13E-02,\r\n 520, 1.20E-01, 7.79E-01, 3.78E-02,\r\n 521, 1.30E-01, 7.92E-01, 3.47E-02,\r\n 522, 1.41E-01, 8.05E-01, 3.18E-02,\r\n 523, 1.52E-01, 8.16E-01, 2.91E-02,\r\n 524, 1.64E-01, 8.27E-01, 2.66E-02,\r\n 525, 1.76E-01, 8.38E-01, 2.43E-02,\r\n 526, 1.88E-01, 8.47E-01, 2.22E-02,\r\n 527, 2.00E-01, 8.57E-01, 2.03E-02,\r\n 528, 2.13E-01, 8.66E-01, 1.85E-02,\r\n 529, 2.25E-01, 8.75E-01, 1.69E-02,\r\n 530, 2.38E-01, 8.83E-01, 1.54E-02,\r\n 531, 2.51E-01, 8.91E-01, 1.41E-02,\r\n 532, 2.64E-01, 8.99E-01, 1.28E-02,\r\n 533, 2.77E-01, 9.07E-01, 1.17E-02,\r\n 534, 2.90E-01, 9.15E-01, 1.07E-02,\r\n 535, 3.05E-01, 9.23E-01, 9.75E-03,\r\n 536, 3.20E-01, 9.32E-01, 8.89E-03,\r\n 537, 3.35E-01, 9.41E-01, 8.09E-03,\r\n 538, 3.51E-01, 9.50E-01, 7.36E-03,\r\n 539, 3.68E-01, 9.59E-01, 6.69E-03,\r\n 540, 3.84E-01, 9.67E-01, 6.08E-03,\r\n 541, 4.01E-01, 9.73E-01, 5.53E-03,\r\n 542, 4.17E-01, 9.79E-01, 5.03E-03,\r\n 543, 4.33E-01, 9.83E-01, 4.57E-03,\r\n 544, 4.48E-01, 9.87E-01, 4.15E-03,\r\n 545, 4.63E-01, 9.89E-01, 3.77E-03,\r\n 546, 4.78E-01, 9.90E-01, 3.42E-03,\r\n 547, 4.93E-01, 9.90E-01, 3.11E-03,\r\n 548, 5.07E-01, 9.90E-01, 2.82E-03,\r\n 549, 5.22E-01, 9.90E-01, 2.56E-03,\r\n 550, 5.37E-01, 9.91E-01, 2.32E-03,\r\n 551, 5.53E-01, 9.92E-01, 2.11E-03,\r\n 552, 5.70E-01, 9.94E-01, 1.91E-03,\r\n 553, 5.87E-01, 9.97E-01, 1.73E-03,\r\n 554, 6.05E-01, 9.99E-01, 1.57E-03,\r\n 555, 6.23E-01, 1.00E+00, 1.43E-03,\r\n 556, 6.41E-01, 1.00E+00, 1.29E-03,\r\n 557, 6.59E-01, 9.99E-01, 1.17E-03,\r\n 558, 6.77E-01, 9.98E-01, 1.07E-03,\r\n 559, 6.95E-01, 9.96E-01, 9.67E-04,\r\n 560, 7.12E-01, 9.94E-01, 8.78E-04,\r\n 561, 7.30E-01, 9.93E-01, 7.97E-04,\r\n 562, 7.48E-01, 9.91E-01, 7.23E-04,\r\n 563, 7.65E-01, 9.89E-01, 6.56E-04,\r\n 564, 7.83E-01, 9.87E-01, 5.96E-04,\r\n 565, 8.02E-01, 9.85E-01, 5.41E-04,\r\n 566, 8.20E-01, 9.82E-01, 4.91E-04,\r\n 567, 8.39E-01, 9.78E-01, 4.46E-04,\r\n 568, 8.57E-01, 9.74E-01, 4.05E-04,\r\n 569, 8.75E-01, 9.69E-01, 3.68E-04,\r\n 570, 8.93E-01, 9.64E-01, 3.34E-04,\r\n 571, 9.11E-01, 9.58E-01, 3.04E-04,\r\n 572, 9.27E-01, 9.52E-01, 2.76E-04,\r\n 573, 9.43E-01, 9.45E-01, 2.51E-04,\r\n 574, 9.58E-01, 9.37E-01, 2.28E-04,\r\n 575, 9.72E-01, 9.29E-01, 2.08E-04,\r\n 576, 9.85E-01, 9.19E-01, 1.89E-04,\r\n 577, 9.97E-01, 9.08E-01, 1.72E-04,\r\n 578, 1.01E+00, 8.98E-01, 1.57E-04,\r\n 579, 1.02E+00, 8.87E-01, 1.43E-04,\r\n 580, 1.03E+00, 8.78E-01, 1.30E-04,\r\n 581, 1.05E+00, 8.69E-01, 1.18E-04,\r\n 582, 1.06E+00, 8.61E-01, 1.08E-04,\r\n 583, 1.08E+00, 8.53E-01, 9.83E-05,\r\n 584, 1.09E+00, 8.45E-01, 8.97E-05,\r\n 585, 1.11E+00, 8.37E-01, 8.18E-05,\r\n 586, 1.12E+00, 8.28E-01, 7.47E-05,\r\n 587, 1.13E+00, 8.19E-01, 6.82E-05,\r\n 588, 1.14E+00, 8.09E-01, 6.23E-05,\r\n 589, 1.14E+00, 7.98E-01, 5.70E-05,\r\n 590, 1.15E+00, 7.87E-01, 5.21E-05,\r\n 591, 1.15E+00, 7.76E-01, 4.76E-05,\r\n 592, 1.16E+00, 7.64E-01, 4.36E-05,\r\n 593, 1.16E+00, 7.52E-01, 3.99E-05,\r\n 594, 1.16E+00, 7.40E-01, 3.65E-05,\r\n 595, 1.16E+00, 7.27E-01, 3.35E-05,\r\n 596, 1.16E+00, 7.15E-01, 3.07E-05,\r\n 597, 1.16E+00, 7.02E-01, 2.81E-05,\r\n 598, 1.16E+00, 6.89E-01, 2.58E-05,\r\n 599, 1.15E+00, 6.76E-01, 2.37E-05,\r\n 600, 1.15E+00, 6.63E-01, 2.18E-05,\r\n 601, 1.14E+00, 6.50E-01, 2.00E-05,\r\n 602, 1.14E+00, 6.37E-01, 1.84E-05,\r\n 603, 1.13E+00, 6.24E-01, 1.69E-05,\r\n 604, 1.12E+00, 6.10E-01, 1.55E-05,\r\n 605, 1.11E+00, 5.97E-01, 1.43E-05,\r\n 606, 1.10E+00, 5.83E-01, 1.32E-05,\r\n 607, 1.09E+00, 5.70E-01, 1.21E-05,\r\n 608, 1.08E+00, 5.56E-01, 1.12E-05,\r\n 609, 1.06E+00, 5.42E-01, 1.03E-05,\r\n 610, 1.05E+00, 5.28E-01, 9.53E-06,\r\n 611, 1.03E+00, 5.14E-01, 8.80E-06,\r\n 612, 1.02E+00, 5.01E-01, 8.13E-06,\r\n 613, 9.98E-01, 4.87E-01, 7.51E-06,\r\n 614, 9.80E-01, 4.74E-01, 6.95E-06,\r\n 615, 9.62E-01, 4.60E-01, 6.43E-06,\r\n 616, 9.42E-01, 4.47E-01, 0.00E+00,\r\n 617, 9.23E-01, 4.33E-01, 0.00E+00,\r\n 618, 9.03E-01, 4.20E-01, 0.00E+00,\r\n 619, 8.83E-01, 4.08E-01, 0.00E+00,\r\n 620, 8.63E-01, 3.95E-01, 0.00E+00,\r\n 621, 8.43E-01, 3.83E-01, 0.00E+00,\r\n 622, 8.23E-01, 3.71E-01, 0.00E+00,\r\n 623, 8.03E-01, 3.59E-01, 0.00E+00,\r\n 624, 7.82E-01, 3.47E-01, 0.00E+00,\r\n 625, 7.60E-01, 3.35E-01, 0.00E+00,\r\n 626, 7.37E-01, 3.23E-01, 0.00E+00,\r\n 627, 7.14E-01, 3.11E-01, 0.00E+00,\r\n 628, 6.90E-01, 2.99E-01, 0.00E+00,\r\n 629, 6.65E-01, 2.87E-01, 0.00E+00,\r\n 630, 6.41E-01, 2.75E-01, 0.00E+00,\r\n 631, 6.18E-01, 2.64E-01, 0.00E+00,\r\n 632, 5.95E-01, 2.53E-01, 0.00E+00,\r\n 633, 5.72E-01, 2.42E-01, 0.00E+00,\r\n 634, 5.50E-01, 2.32E-01, 0.00E+00,\r\n 635, 5.29E-01, 2.22E-01, 0.00E+00,\r\n 636, 5.08E-01, 2.12E-01, 0.00E+00,\r\n 637, 4.88E-01, 2.03E-01, 0.00E+00,\r\n 638, 4.69E-01, 1.94E-01, 0.00E+00,\r\n 639, 4.50E-01, 1.86E-01, 0.00E+00,\r\n 640, 4.32E-01, 1.78E-01, 0.00E+00,\r\n 641, 4.15E-01, 1.70E-01, 0.00E+00,\r\n 642, 3.98E-01, 1.62E-01, 0.00E+00,\r\n 643, 3.82E-01, 1.55E-01, 0.00E+00,\r\n 644, 3.66E-01, 1.48E-01, 0.00E+00,\r\n 645, 3.50E-01, 1.41E-01, 0.00E+00,\r\n 646, 3.33E-01, 1.34E-01, 0.00E+00,\r\n 647, 3.17E-01, 1.27E-01, 0.00E+00,\r\n 648, 3.02E-01, 1.21E-01, 0.00E+00,\r\n 649, 2.86E-01, 1.15E-01, 0.00E+00,\r\n 650, 2.71E-01, 1.08E-01, 0.00E+00,\r\n 651, 2.57E-01, 1.03E-01, 0.00E+00,\r\n 652, 2.43E-01, 9.68E-02, 0.00E+00,\r\n 653, 2.30E-01, 9.14E-02, 0.00E+00,\r\n 654, 2.18E-01, 8.63E-02, 0.00E+00,\r\n 655, 2.06E-01, 8.14E-02, 0.00E+00,\r\n 656, 1.94E-01, 7.67E-02, 0.00E+00,\r\n 657, 1.83E-01, 7.23E-02, 0.00E+00,\r\n 658, 1.73E-01, 6.81E-02, 0.00E+00,\r\n 659, 1.63E-01, 6.41E-02, 0.00E+00,\r\n 660, 1.54E-01, 6.03E-02, 0.00E+00,\r\n 661, 1.45E-01, 5.68E-02, 0.00E+00,\r\n 662, 1.36E-01, 5.34E-02, 0.00E+00,\r\n 663, 1.28E-01, 5.02E-02, 0.00E+00,\r\n 664, 1.21E-01, 4.71E-02, 0.00E+00,\r\n 665, 1.14E-01, 4.43E-02, 0.00E+00,\r\n 666, 1.07E-01, 4.15E-02, 0.00E+00,\r\n 667, 1.00E-01, 3.90E-02, 0.00E+00,\r\n 668, 9.41E-02, 3.66E-02, 0.00E+00,\r\n 669, 8.83E-02, 3.43E-02, 0.00E+00,\r\n 670, 8.28E-02, 3.21E-02, 0.00E+00,\r\n 671, 7.76E-02, 3.01E-02, 0.00E+00,\r\n 672, 7.27E-02, 2.82E-02, 0.00E+00,\r\n 673, 6.81E-02, 2.63E-02, 0.00E+00,\r\n 674, 6.37E-02, 2.46E-02, 0.00E+00,\r\n 675, 5.95E-02, 2.30E-02, 0.00E+00,\r\n 676, 5.56E-02, 2.15E-02, 0.00E+00,\r\n 677, 5.20E-02, 2.01E-02, 0.00E+00,\r\n 678, 4.85E-02, 1.87E-02, 0.00E+00,\r\n 679, 4.53E-02, 1.75E-02, 0.00E+00,\r\n 680, 4.22E-02, 1.63E-02, 0.00E+00,\r\n 681, 3.93E-02, 1.52E-02, 0.00E+00,\r\n 682, 3.67E-02, 1.41E-02, 0.00E+00,\r\n 683, 3.41E-02, 1.32E-02, 0.00E+00,\r\n 684, 3.17E-02, 1.22E-02, 0.00E+00,\r\n 685, 2.95E-02, 1.14E-02, 0.00E+00,\r\n 686, 2.74E-02, 1.05E-02, 0.00E+00,\r\n 687, 2.54E-02, 9.78E-03, 0.00E+00,\r\n 688, 2.35E-02, 9.06E-03, 0.00E+00,\r\n 689, 2.18E-02, 8.40E-03, 0.00E+00,\r\n 690, 2.03E-02, 7.80E-03, 0.00E+00,\r\n 691, 1.88E-02, 7.24E-03, 0.00E+00,\r\n 692, 1.75E-02, 6.73E-03, 0.00E+00,\r\n 693, 1.63E-02, 6.27E-03, 0.00E+00,\r\n 694, 1.52E-02, 5.83E-03, 0.00E+00,\r\n 695, 1.41E-02, 5.43E-03, 0.00E+00,\r\n 696, 1.31E-02, 5.05E-03, 0.00E+00,\r\n 697, 1.22E-02, 4.70E-03, 0.00E+00,\r\n 698, 1.14E-02, 4.37E-03, 0.00E+00,\r\n 699, 1.06E-02, 4.06E-03, 0.00E+00,\r\n 700, 9.82E-03, 3.78E-03, 0.00E+00,\r\n 701, 9.13E-03, 3.51E-03, 0.00E+00,\r\n 702, 8.49E-03, 3.27E-03, 0.00E+00,\r\n 703, 7.89E-03, 3.04E-03, 0.00E+00,\r\n 704, 7.33E-03, 2.82E-03, 0.00E+00,\r\n 705, 6.81E-03, 2.62E-03, 0.00E+00,\r\n 706, 6.32E-03, 2.43E-03, 0.00E+00,\r\n 707, 5.86E-03, 2.25E-03, 0.00E+00,\r\n 708, 5.43E-03, 2.09E-03, 0.00E+00,\r\n 709, 5.04E-03, 1.94E-03, 0.00E+00,\r\n 710, 4.67E-03, 1.80E-03, 0.00E+00,\r\n 711, 4.32E-03, 1.66E-03, 0.00E+00,\r\n 712, 4.01E-03, 1.54E-03, 0.00E+00,\r\n 713, 3.71E-03, 1.43E-03, 0.00E+00,\r\n 714, 3.44E-03, 1.33E-03, 0.00E+00,\r\n 715, 3.19E-03, 1.23E-03, 0.00E+00,\r\n 716, 2.96E-03, 1.14E-03, 0.00E+00,\r\n 717, 2.75E-03, 1.06E-03, 0.00E+00,\r\n 718, 2.56E-03, 9.85E-04, 0.00E+00,\r\n 719, 2.37E-03, 9.15E-04, 0.00E+00,\r\n 720, 2.21E-03, 8.50E-04, 0.00E+00,\r\n 721, 2.05E-03, 7.90E-04, 0.00E+00,\r\n 722, 1.90E-03, 7.33E-04, 0.00E+00,\r\n 723, 1.77E-03, 6.81E-04, 0.00E+00,\r\n 724, 1.64E-03, 6.33E-04, 0.00E+00,\r\n 725, 1.52E-03, 5.88E-04, 0.00E+00,\r\n 726, 1.42E-03, 5.47E-04, 0.00E+00,\r\n 727, 1.32E-03, 5.09E-04, 0.00E+00,\r\n 728, 1.23E-03, 4.73E-04, 0.00E+00,\r\n 729, 1.14E-03, 4.40E-04, 0.00E+00,\r\n 730, 1.06E-03, 4.10E-04, 0.00E+00,\r\n 731, 9.88E-04, 3.82E-04, 0.00E+00,\r\n 732, 9.19E-04, 3.55E-04, 0.00E+00,\r\n 733, 8.55E-04, 3.30E-04, 0.00E+00,\r\n 734, 7.96E-04, 3.08E-04, 0.00E+00,\r\n 735, 7.40E-04, 2.86E-04, 0.00E+00,\r\n 736, 6.88E-04, 2.66E-04, 0.00E+00,\r\n 737, 6.40E-04, 2.47E-04, 0.00E+00,\r\n 738, 5.95E-04, 2.30E-04, 0.00E+00,\r\n 739, 5.54E-04, 2.14E-04, 0.00E+00,\r\n 740, 5.15E-04, 1.99E-04, 0.00E+00,\r\n 741, 4.80E-04, 1.86E-04, 0.00E+00,\r\n 742, 4.48E-04, 1.73E-04, 0.00E+00,\r\n 743, 4.17E-04, 1.62E-04, 0.00E+00,\r\n 744, 3.89E-04, 1.51E-04, 0.00E+00,\r\n 745, 3.63E-04, 1.41E-04, 0.00E+00,\r\n 746, 3.39E-04, 1.31E-04, 0.00E+00,\r\n 747, 3.16E-04, 1.22E-04, 0.00E+00,\r\n 748, 2.94E-04, 1.14E-04, 0.00E+00,\r\n 749, 2.74E-04, 1.06E-04, 0.00E+00,\r\n 750, 2.56E-04, 9.93E-05, 0.00E+00,\r\n 751, 2.38E-04, 9.27E-05, 0.00E+00,\r\n 752, 2.22E-04, 8.65E-05, 0.00E+00,\r\n 753, 2.08E-04, 8.07E-05, 0.00E+00,\r\n 754, 1.94E-04, 7.54E-05, 0.00E+00,\r\n 755, 1.81E-04, 7.04E-05, 0.00E+00,\r\n 756, 1.69E-04, 6.58E-05, 0.00E+00,\r\n 757, 1.58E-04, 6.15E-05, 0.00E+00,\r\n 758, 1.47E-04, 5.75E-05, 0.00E+00,\r\n 759, 1.38E-04, 5.37E-05, 0.00E+00,\r\n 760, 1.29E-04, 5.02E-05, 0.00E+00,\r\n 761, 1.20E-04, 4.69E-05, 0.00E+00,\r\n 762, 1.12E-04, 4.38E-05, 0.00E+00,\r\n 763, 1.05E-04, 4.10E-05, 0.00E+00,\r\n 764, 9.81E-05, 3.83E-05, 0.00E+00,\r\n 765, 9.17E-05, 3.58E-05, 0.00E+00,\r\n 766, 8.58E-05, 3.35E-05, 0.00E+00,\r\n 767, 8.03E-05, 3.14E-05, 0.00E+00,\r\n 768, 7.51E-05, 2.94E-05, 0.00E+00,\r\n 769, 7.03E-05, 2.75E-05, 0.00E+00,\r\n 770, 6.58E-05, 2.57E-05, 0.00E+00,\r\n 771, 6.15E-05, 2.41E-05, 0.00E+00,\r\n 772, 5.75E-05, 2.25E-05, 0.00E+00,\r\n 773, 5.38E-05, 2.11E-05, 0.00E+00,\r\n 774, 5.03E-05, 1.97E-05, 0.00E+00,\r\n 775, 4.71E-05, 1.85E-05, 0.00E+00,\r\n 776, 4.41E-05, 1.73E-05, 0.00E+00,\r\n 777, 4.13E-05, 1.62E-05, 0.00E+00,\r\n 778, 3.87E-05, 1.52E-05, 0.00E+00,\r\n 779, 3.63E-05, 1.43E-05, 0.00E+00,\r\n 780, 3.41E-05, 1.34E-05, 0.00E+00,\r\n 781, 3.20E-05, 1.26E-05, 0.00E+00,\r\n 782, 3.00E-05, 1.18E-05, 0.00E+00,\r\n 783, 2.81E-05, 1.10E-05, 0.00E+00,\r\n 784, 2.63E-05, 1.04E-05, 0.00E+00,\r\n 785, 2.47E-05, 9.72E-06, 0.00E+00,\r\n 786, 2.32E-05, 9.12E-06, 0.00E+00,\r\n 787, 2.17E-05, 8.56E-06, 0.00E+00,\r\n 788, 2.04E-05, 8.03E-06, 0.00E+00,\r\n 789, 1.91E-05, 7.54E-06, 0.00E+00,\r\n 790, 1.79E-05, 7.07E-06, 0.00E+00,\r\n 791, 1.68E-05, 6.64E-06, 0.00E+00,\r\n 792, 1.58E-05, 6.23E-06, 0.00E+00,\r\n 793, 1.48E-05, 5.85E-06, 0.00E+00,\r\n 794, 1.39E-05, 5.49E-06, 0.00E+00,\r\n 795, 1.31E-05, 5.16E-06, 0.00E+00,\r\n 796, 1.23E-05, 4.85E-06, 0.00E+00,\r\n 797, 1.15E-05, 4.56E-06, 0.00E+00,\r\n 798, 1.08E-05, 4.28E-06, 0.00E+00,\r\n 799, 1.02E-05, 4.03E-06, 0.00E+00,\r\n 800, 9.57E-06, 3.79E-06, 0.00E+00,\r\n 801, 9.00E-06, 3.56E-06, 0.00E+00,\r\n 802, 8.46E-06, 3.35E-06, 0.00E+00,\r\n 803, 7.96E-06, 3.16E-06, 0.00E+00,\r\n 804, 7.48E-06, 2.97E-06, 0.00E+00,\r\n 805, 7.04E-06, 2.79E-06, 0.00E+00,\r\n 806, 6.62E-06, 2.63E-06, 0.00E+00,\r\n 807, 6.22E-06, 2.47E-06, 0.00E+00,\r\n 808, 5.85E-06, 2.33E-06, 0.00E+00,\r\n 809, 5.50E-06, 2.19E-06, 0.00E+00,\r\n 810, 5.17E-06, 2.06E-06, 0.00E+00,\r\n 811, 4.86E-06, 1.94E-06, 0.00E+00,\r\n 812, 4.57E-06, 1.82E-06, 0.00E+00,\r\n 813, 4.30E-06, 1.72E-06, 0.00E+00,\r\n 814, 4.05E-06, 1.62E-06, 0.00E+00,\r\n 815, 3.82E-06, 1.52E-06, 0.00E+00,\r\n 816, 3.59E-06, 1.44E-06, 0.00E+00,\r\n 817, 3.39E-06, 1.35E-06, 0.00E+00,\r\n 818, 3.19E-06, 1.28E-06, 0.00E+00,\r\n 819, 3.01E-06, 1.20E-06, 0.00E+00,\r\n 820, 2.84E-06, 1.14E-06, 0.00E+00,\r\n 821, 2.68E-06, 1.07E-06, 0.00E+00,\r\n 822, 2.52E-06, 1.01E-06, 0.00E+00,\r\n 823, 2.38E-06, 9.53E-07, 0.00E+00,\r\n 824, 2.24E-06, 8.99E-07, 0.00E+00,\r\n 825, 2.11E-06, 8.48E-07, 0.00E+00,\r\n 826, 1.99E-06, 8.00E-07, 0.00E+00,\r\n 827, 1.88E-06, 7.54E-07, 0.00E+00,\r\n 828, 1.77E-06, 7.12E-07, 0.00E+00,\r\n 829, 1.67E-06, 6.72E-07, 0.00E+00,\r\n 830, 1.58E-06, 6.35E-07, 0.00E+00,\r\n ])\r\n cmf = cmf.reshape(441, 4).astype(np.float64).T\r\n if trim_visible:\r\n prepend = np.array(\r\n [[wl, 0.00E+00, 0.00E+00, 0.00E+00] for wl in range(380, 390)])\r\n cmf = np.vstack((prepend, cmf.T))\r\n cmf = cmf[cmf[:, 0] < 781]\r\n cmf = cmf.T[:, ::binwidth].T\r\n if asdf:\r\n cmf = pd.DataFrame(data=cmf, columns=colnames)\r\n cmf.set_index('Wavelength', inplace=True)\r\n cmf.index = pd.Int64Index(cmf.index)\r\n return cmf\r\n\r\n\r\ndef get_CIE_CMF(asdf: Optional[bool] = True, binwidth: Optional[int] = 1):\r\n '''Get the CIE 1931 XYZ 2-deg color matching functions.\r\n\r\n Parameters\r\n ----------\r\n asdf : bool, optional\r\n Whether to return the results as a pandas DataFrame. The default is\r\n False.\r\n binwidth : int, optional\r\n Width of the wavelength bins in nanometers (must be `1` or `5`). The\r\n default is `1`.\r\n\r\n Returns\r\n -------\r\n cmf : numpy.ndarray or pandas.DataFrame\r\n The CIE 1931 XYZ 2-deg CMFs.\r\n\r\n '''\r\n colnames = ['Wavelength', 'X', 'Y', 'Z']\r\n\r\n cmf = np.array([\r\n 380, 0.001368000000, 0.000039000000, 0.006450001000,\r\n 381, 0.001502050000, 0.000042826400, 0.007083216000,\r\n 382, 0.001642328000, 0.000046914600, 0.007745488000,\r\n 383, 0.001802382000, 0.000051589600, 0.008501152000,\r\n 384, 0.001995757000, 0.000057176400, 0.009414544000,\r\n 385, 0.002236000000, 0.000064000000, 0.010549990000,\r\n 386, 0.002535385000, 0.000072344210, 0.011965800000,\r\n 387, 0.002892603000, 0.000082212240, 0.013655870000,\r\n 388, 0.003300829000, 0.000093508160, 0.015588050000,\r\n 389, 0.003753236000, 0.000106136100, 0.017730150000,\r\n 390, 0.004243000000, 0.000120000000, 0.020050010000,\r\n 391, 0.004762389000, 0.000134984000, 0.022511360000,\r\n 392, 0.005330048000, 0.000151492000, 0.025202880000,\r\n 393, 0.005978712000, 0.000170208000, 0.028279720000,\r\n 394, 0.006741117000, 0.000191816000, 0.031897040000,\r\n 395, 0.007650000000, 0.000217000000, 0.036210000000,\r\n 396, 0.008751373000, 0.000246906700, 0.041437710000,\r\n 397, 0.010028880000, 0.000281240000, 0.047503720000,\r\n 398, 0.011421700000, 0.000318520000, 0.054119880000,\r\n 399, 0.012869010000, 0.000357266700, 0.060998030000,\r\n 400, 0.014310000000, 0.000396000000, 0.067850010000,\r\n 401, 0.015704430000, 0.000433714700, 0.074486320000,\r\n 402, 0.017147440000, 0.000473024000, 0.081361560000,\r\n 403, 0.018781220000, 0.000517876000, 0.089153640000,\r\n 404, 0.020748010000, 0.000572218700, 0.098540480000,\r\n 405, 0.023190000000, 0.000640000000, 0.110200000000,\r\n 406, 0.026207360000, 0.000724560000, 0.124613300000,\r\n 407, 0.029782480000, 0.000825500000, 0.141701700000,\r\n 408, 0.033880920000, 0.000941160000, 0.161303500000,\r\n 409, 0.038468240000, 0.001069880000, 0.183256800000,\r\n 410, 0.043510000000, 0.001210000000, 0.207400000000,\r\n 411, 0.048995600000, 0.001362091000, 0.233692100000,\r\n 412, 0.055022600000, 0.001530752000, 0.262611400000,\r\n 413, 0.061718800000, 0.001720368000, 0.294774600000,\r\n 414, 0.069212000000, 0.001935323000, 0.330798500000,\r\n 415, 0.077630000000, 0.002180000000, 0.371300000000,\r\n 416, 0.086958110000, 0.002454800000, 0.416209100000,\r\n 417, 0.097176720000, 0.002764000000, 0.465464200000,\r\n 418, 0.108406300000, 0.003117800000, 0.519694800000,\r\n 419, 0.120767200000, 0.003526400000, 0.579530300000,\r\n 420, 0.134380000000, 0.004000000000, 0.645600000000,\r\n 421, 0.149358200000, 0.004546240000, 0.718483800000,\r\n 422, 0.165395700000, 0.005159320000, 0.796713300000,\r\n 423, 0.181983100000, 0.005829280000, 0.877845900000,\r\n 424, 0.198611000000, 0.006546160000, 0.959439000000,\r\n 425, 0.214770000000, 0.007300000000, 1.039050100000,\r\n 426, 0.230186800000, 0.008086507000, 1.115367300000,\r\n 427, 0.244879700000, 0.008908720000, 1.188497100000,\r\n 428, 0.258777300000, 0.009767680000, 1.258123300000,\r\n 429, 0.271807900000, 0.010664430000, 1.323929600000,\r\n 430, 0.283900000000, 0.011600000000, 1.385600000000,\r\n 431, 0.294943800000, 0.012573170000, 1.442635200000,\r\n 432, 0.304896500000, 0.013582720000, 1.494803500000,\r\n 433, 0.313787300000, 0.014629680000, 1.542190300000,\r\n 434, 0.321645400000, 0.015715090000, 1.584880700000,\r\n 435, 0.328500000000, 0.016840000000, 1.622960000000,\r\n 436, 0.334351300000, 0.018007360000, 1.656404800000,\r\n 437, 0.339210100000, 0.019214480000, 1.685295900000,\r\n 438, 0.343121300000, 0.020453920000, 1.709874500000,\r\n 439, 0.346129600000, 0.021718240000, 1.730382100000,\r\n 440, 0.348280000000, 0.023000000000, 1.747060000000,\r\n 441, 0.349599900000, 0.024294610000, 1.760044600000,\r\n 442, 0.350147400000, 0.025610240000, 1.769623300000,\r\n 443, 0.350013000000, 0.026958570000, 1.776263700000,\r\n 444, 0.349287000000, 0.028351250000, 1.780433400000,\r\n 445, 0.348060000000, 0.029800000000, 1.782600000000,\r\n 446, 0.346373300000, 0.031310830000, 1.782968200000,\r\n 447, 0.344262400000, 0.032883680000, 1.781699800000,\r\n 448, 0.341808800000, 0.034521120000, 1.779198200000,\r\n 449, 0.339094100000, 0.036225710000, 1.775867100000,\r\n 450, 0.336200000000, 0.038000000000, 1.772110000000,\r\n 451, 0.333197700000, 0.039846670000, 1.768258900000,\r\n 452, 0.330041100000, 0.041768000000, 1.764039000000,\r\n 453, 0.326635700000, 0.043766000000, 1.758943800000,\r\n 454, 0.322886800000, 0.045842670000, 1.752466300000,\r\n 455, 0.318700000000, 0.048000000000, 1.744100000000,\r\n 456, 0.314025100000, 0.050243680000, 1.733559500000,\r\n 457, 0.308884000000, 0.052573040000, 1.720858100000,\r\n 458, 0.303290400000, 0.054980560000, 1.705936900000,\r\n 459, 0.297257900000, 0.057458720000, 1.688737200000,\r\n 460, 0.290800000000, 0.060000000000, 1.669200000000,\r\n 461, 0.283970100000, 0.062601970000, 1.647528700000,\r\n 462, 0.276721400000, 0.065277520000, 1.623412700000,\r\n 463, 0.268917800000, 0.068042080000, 1.596022300000,\r\n 464, 0.260422700000, 0.070911090000, 1.564528000000,\r\n 465, 0.251100000000, 0.073900000000, 1.528100000000,\r\n 466, 0.240847500000, 0.077016000000, 1.486111400000,\r\n 467, 0.229851200000, 0.080266400000, 1.439521500000,\r\n 468, 0.218407200000, 0.083666800000, 1.389879900000,\r\n 469, 0.206811500000, 0.087232800000, 1.338736200000,\r\n 470, 0.195360000000, 0.090980000000, 1.287640000000,\r\n 471, 0.184213600000, 0.094917550000, 1.237422300000,\r\n 472, 0.173327300000, 0.099045840000, 1.187824300000,\r\n 473, 0.162688100000, 0.103367400000, 1.138761100000,\r\n 474, 0.152283300000, 0.107884600000, 1.090148000000,\r\n 475, 0.142100000000, 0.112600000000, 1.041900000000,\r\n 476, 0.132178600000, 0.117532000000, 0.994197600000,\r\n 477, 0.122569600000, 0.122674400000, 0.947347300000,\r\n 478, 0.113275200000, 0.127992800000, 0.901453100000,\r\n 479, 0.104297900000, 0.133452800000, 0.856619300000,\r\n 480, 0.095640000000, 0.139020000000, 0.812950100000,\r\n 481, 0.087299550000, 0.144676400000, 0.770517300000,\r\n 482, 0.079308040000, 0.150469300000, 0.729444800000,\r\n 483, 0.071717760000, 0.156461900000, 0.689913600000,\r\n 484, 0.064580990000, 0.162717700000, 0.652104900000,\r\n 485, 0.057950010000, 0.169300000000, 0.616200000000,\r\n 486, 0.051862110000, 0.176243100000, 0.582328600000,\r\n 487, 0.046281520000, 0.183558100000, 0.550416200000,\r\n 488, 0.041150880000, 0.191273500000, 0.520337600000,\r\n 489, 0.036412830000, 0.199418000000, 0.491967300000,\r\n 490, 0.032010000000, 0.208020000000, 0.465180000000,\r\n 491, 0.027917200000, 0.217119900000, 0.439924600000,\r\n 492, 0.024144400000, 0.226734500000, 0.416183600000,\r\n 493, 0.020687000000, 0.236857100000, 0.393882200000,\r\n 494, 0.017540400000, 0.247481200000, 0.372945900000,\r\n 495, 0.014700000000, 0.258600000000, 0.353300000000,\r\n 496, 0.012161790000, 0.270184900000, 0.334857800000,\r\n 497, 0.009919960000, 0.282293900000, 0.317552100000,\r\n 498, 0.007967240000, 0.295050500000, 0.301337500000,\r\n 499, 0.006296346000, 0.308578000000, 0.286168600000,\r\n 500, 0.004900000000, 0.323000000000, 0.272000000000,\r\n 501, 0.003777173000, 0.338402100000, 0.258817100000,\r\n 502, 0.002945320000, 0.354685800000, 0.246483800000,\r\n 503, 0.002424880000, 0.371698600000, 0.234771800000,\r\n 504, 0.002236293000, 0.389287500000, 0.223453300000,\r\n 505, 0.002400000000, 0.407300000000, 0.212300000000,\r\n 506, 0.002925520000, 0.425629900000, 0.201169200000,\r\n 507, 0.003836560000, 0.444309600000, 0.190119600000,\r\n 508, 0.005174840000, 0.463394400000, 0.179225400000,\r\n 509, 0.006982080000, 0.482939500000, 0.168560800000,\r\n 510, 0.009300000000, 0.503000000000, 0.158200000000,\r\n 511, 0.012149490000, 0.523569300000, 0.148138300000,\r\n 512, 0.015535880000, 0.544512000000, 0.138375800000,\r\n 513, 0.019477520000, 0.565690000000, 0.128994200000,\r\n 514, 0.023992770000, 0.586965300000, 0.120075100000,\r\n 515, 0.029100000000, 0.608200000000, 0.111700000000,\r\n 516, 0.034814850000, 0.629345600000, 0.103904800000,\r\n 517, 0.041120160000, 0.650306800000, 0.096667480000,\r\n 518, 0.047985040000, 0.670875200000, 0.089982720000,\r\n 519, 0.055378610000, 0.690842400000, 0.083845310000,\r\n 520, 0.063270000000, 0.710000000000, 0.078249990000,\r\n 521, 0.071635010000, 0.728185200000, 0.073208990000,\r\n 522, 0.080462240000, 0.745463600000, 0.068678160000,\r\n 523, 0.089739960000, 0.761969400000, 0.064567840000,\r\n 524, 0.099456450000, 0.777836800000, 0.060788350000,\r\n 525, 0.109600000000, 0.793200000000, 0.057250010000,\r\n 526, 0.120167400000, 0.808110400000, 0.053904350000,\r\n 527, 0.131114500000, 0.822496200000, 0.050746640000,\r\n 528, 0.142367900000, 0.836306800000, 0.047752760000,\r\n 529, 0.153854200000, 0.849491600000, 0.044898590000,\r\n 530, 0.165500000000, 0.862000000000, 0.042160000000,\r\n 531, 0.177257100000, 0.873810800000, 0.039507280000,\r\n 532, 0.189140000000, 0.884962400000, 0.036935640000,\r\n 533, 0.201169400000, 0.895493600000, 0.034458360000,\r\n 534, 0.213365800000, 0.905443200000, 0.032088720000,\r\n 535, 0.225749900000, 0.914850100000, 0.029840000000,\r\n 536, 0.238320900000, 0.923734800000, 0.027711810000,\r\n 537, 0.251066800000, 0.932092400000, 0.025694440000,\r\n 538, 0.263992200000, 0.939922600000, 0.023787160000,\r\n 539, 0.277101700000, 0.947225200000, 0.021989250000,\r\n 540, 0.290400000000, 0.954000000000, 0.020300000000,\r\n 541, 0.303891200000, 0.960256100000, 0.018718050000,\r\n 542, 0.317572600000, 0.966007400000, 0.017240360000,\r\n 543, 0.331438400000, 0.971260600000, 0.015863640000,\r\n 544, 0.345482800000, 0.976022500000, 0.014584610000,\r\n 545, 0.359700000000, 0.980300000000, 0.013400000000,\r\n 546, 0.374083900000, 0.984092400000, 0.012307230000,\r\n 547, 0.388639600000, 0.987418200000, 0.011301880000,\r\n 548, 0.403378400000, 0.990312800000, 0.010377920000,\r\n 549, 0.418311500000, 0.992811600000, 0.009529306000,\r\n 550, 0.433449900000, 0.994950100000, 0.008749999000,\r\n 551, 0.448795300000, 0.996710800000, 0.008035200000,\r\n 552, 0.464336000000, 0.998098300000, 0.007381600000,\r\n 553, 0.480064000000, 0.999112000000, 0.006785400000,\r\n 554, 0.495971300000, 0.999748200000, 0.006242800000,\r\n 555, 0.512050100000, 1.000000000000, 0.005749999000,\r\n 556, 0.528295900000, 0.999856700000, 0.005303600000,\r\n 557, 0.544691600000, 0.999304600000, 0.004899800000,\r\n 558, 0.561209400000, 0.998325500000, 0.004534200000,\r\n 559, 0.577821500000, 0.996898700000, 0.004202400000,\r\n 560, 0.594500000000, 0.995000000000, 0.003900000000,\r\n 561, 0.611220900000, 0.992600500000, 0.003623200000,\r\n 562, 0.627975800000, 0.989742600000, 0.003370600000,\r\n 563, 0.644760200000, 0.986444400000, 0.003141400000,\r\n 564, 0.661569700000, 0.982724100000, 0.002934800000,\r\n 565, 0.678400000000, 0.978600000000, 0.002749999000,\r\n 566, 0.695239200000, 0.974083700000, 0.002585200000,\r\n 567, 0.712058600000, 0.969171200000, 0.002438600000,\r\n 568, 0.728828400000, 0.963856800000, 0.002309400000,\r\n 569, 0.745518800000, 0.958134900000, 0.002196800000,\r\n 570, 0.762100000000, 0.952000000000, 0.002100000000,\r\n 571, 0.778543200000, 0.945450400000, 0.002017733000,\r\n 572, 0.794825600000, 0.938499200000, 0.001948200000,\r\n 573, 0.810926400000, 0.931162800000, 0.001889800000,\r\n 574, 0.826824800000, 0.923457600000, 0.001840933000,\r\n 575, 0.842500000000, 0.915400000000, 0.001800000000,\r\n 576, 0.857932500000, 0.907006400000, 0.001766267000,\r\n 577, 0.873081600000, 0.898277200000, 0.001737800000,\r\n 578, 0.887894400000, 0.889204800000, 0.001711200000,\r\n 579, 0.902318100000, 0.879781600000, 0.001683067000,\r\n 580, 0.916300000000, 0.870000000000, 0.001650001000,\r\n 581, 0.929799500000, 0.859861300000, 0.001610133000,\r\n 582, 0.942798400000, 0.849392000000, 0.001564400000,\r\n 583, 0.955277600000, 0.838622000000, 0.001513600000,\r\n 584, 0.967217900000, 0.827581300000, 0.001458533000,\r\n 585, 0.978600000000, 0.816300000000, 0.001400000000,\r\n 586, 0.989385600000, 0.804794700000, 0.001336667000,\r\n 587, 0.999548800000, 0.793082000000, 0.001270000000,\r\n 588, 1.009089200000, 0.781192000000, 0.001205000000,\r\n 589, 1.018006400000, 0.769154700000, 0.001146667000,\r\n 590, 1.026300000000, 0.757000000000, 0.001100000000,\r\n 591, 1.033982700000, 0.744754100000, 0.001068800000,\r\n 592, 1.040986000000, 0.732422400000, 0.001049400000,\r\n 593, 1.047188000000, 0.720003600000, 0.001035600000,\r\n 594, 1.052466700000, 0.707496500000, 0.001021200000,\r\n 595, 1.056700000000, 0.694900000000, 0.001000000000,\r\n 596, 1.059794400000, 0.682219200000, 0.000968640000,\r\n 597, 1.061799200000, 0.669471600000, 0.000929920000,\r\n 598, 1.062806800000, 0.656674400000, 0.000886880000,\r\n 599, 1.062909600000, 0.643844800000, 0.000842560000,\r\n 600, 1.062200000000, 0.631000000000, 0.000800000000,\r\n 601, 1.060735200000, 0.618155500000, 0.000760960000,\r\n 602, 1.058443600000, 0.605314400000, 0.000723680000,\r\n 603, 1.055224400000, 0.592475600000, 0.000685920000,\r\n 604, 1.050976800000, 0.579637900000, 0.000645440000,\r\n 605, 1.045600000000, 0.566800000000, 0.000600000000,\r\n 606, 1.039036900000, 0.553961100000, 0.000547866700,\r\n 607, 1.031360800000, 0.541137200000, 0.000491600000,\r\n 608, 1.022666200000, 0.528352800000, 0.000435400000,\r\n 609, 1.013047700000, 0.515632300000, 0.000383466700,\r\n 610, 1.002600000000, 0.503000000000, 0.000340000000,\r\n 611, 0.991367500000, 0.490468800000, 0.000307253300,\r\n 612, 0.979331400000, 0.478030400000, 0.000283160000,\r\n 613, 0.966491600000, 0.465677600000, 0.000265440000,\r\n 614, 0.952847900000, 0.453403200000, 0.000251813300,\r\n 615, 0.938400000000, 0.441200000000, 0.000240000000,\r\n 616, 0.923194000000, 0.429080000000, 0.000229546700,\r\n 617, 0.907244000000, 0.417036000000, 0.000220640000,\r\n 618, 0.890502000000, 0.405032000000, 0.000211960000,\r\n 619, 0.872920000000, 0.393032000000, 0.000202186700,\r\n 620, 0.854449900000, 0.381000000000, 0.000190000000,\r\n 621, 0.835084000000, 0.368918400000, 0.000174213300,\r\n 622, 0.814946000000, 0.356827200000, 0.000155640000,\r\n 623, 0.794186000000, 0.344776800000, 0.000135960000,\r\n 624, 0.772954000000, 0.332817600000, 0.000116853300,\r\n 625, 0.751400000000, 0.321000000000, 0.000100000000,\r\n 626, 0.729583600000, 0.309338100000, 0.000086133330,\r\n 627, 0.707588800000, 0.297850400000, 0.000074600000,\r\n 628, 0.685602200000, 0.286593600000, 0.000065000000,\r\n 629, 0.663810400000, 0.275624500000, 0.000056933330,\r\n 630, 0.642400000000, 0.265000000000, 0.000049999990,\r\n 631, 0.621514900000, 0.254763200000, 0.000044160000,\r\n 632, 0.601113800000, 0.244889600000, 0.000039480000,\r\n 633, 0.581105200000, 0.235334400000, 0.000035720000,\r\n 634, 0.561397700000, 0.226052800000, 0.000032640000,\r\n 635, 0.541900000000, 0.217000000000, 0.000030000000,\r\n 636, 0.522599500000, 0.208161600000, 0.000027653330,\r\n 637, 0.503546400000, 0.199548800000, 0.000025560000,\r\n 638, 0.484743600000, 0.191155200000, 0.000023640000,\r\n 639, 0.466193900000, 0.182974400000, 0.000021813330,\r\n 640, 0.447900000000, 0.175000000000, 0.000020000000,\r\n 641, 0.429861300000, 0.167223500000, 0.000018133330,\r\n 642, 0.412098000000, 0.159646400000, 0.000016200000,\r\n 643, 0.394644000000, 0.152277600000, 0.000014200000,\r\n 644, 0.377533300000, 0.145125900000, 0.000012133330,\r\n 645, 0.360800000000, 0.138200000000, 0.000010000000,\r\n 646, 0.344456300000, 0.131500300000, 0.000007733333,\r\n 647, 0.328516800000, 0.125024800000, 0.000005400000,\r\n 648, 0.313019200000, 0.118779200000, 0.000003200000,\r\n 649, 0.298001100000, 0.112769100000, 0.000001333333,\r\n 650, 0.283500000000, 0.107000000000, 0.000000000000,\r\n 651, 0.269544800000, 0.101476200000, 0.000000000000,\r\n 652, 0.256118400000, 0.096188640000, 0.000000000000,\r\n 653, 0.243189600000, 0.091122960000, 0.000000000000,\r\n 654, 0.230727200000, 0.086264850000, 0.000000000000,\r\n 655, 0.218700000000, 0.081600000000, 0.000000000000,\r\n 656, 0.207097100000, 0.077120640000, 0.000000000000,\r\n 657, 0.195923200000, 0.072825520000, 0.000000000000,\r\n 658, 0.185170800000, 0.068710080000, 0.000000000000,\r\n 659, 0.174832300000, 0.064769760000, 0.000000000000,\r\n 660, 0.164900000000, 0.061000000000, 0.000000000000,\r\n 661, 0.155366700000, 0.057396210000, 0.000000000000,\r\n 662, 0.146230000000, 0.053955040000, 0.000000000000,\r\n 663, 0.137490000000, 0.050673760000, 0.000000000000,\r\n 664, 0.129146700000, 0.047549650000, 0.000000000000,\r\n 665, 0.121200000000, 0.044580000000, 0.000000000000,\r\n 666, 0.113639700000, 0.041758720000, 0.000000000000,\r\n 667, 0.106465000000, 0.039084960000, 0.000000000000,\r\n 668, 0.099690440000, 0.036563840000, 0.000000000000,\r\n 669, 0.093330610000, 0.034200480000, 0.000000000000,\r\n 670, 0.087400000000, 0.032000000000, 0.000000000000,\r\n 671, 0.081900960000, 0.029962610000, 0.000000000000,\r\n 672, 0.076804280000, 0.028076640000, 0.000000000000,\r\n 673, 0.072077120000, 0.026329360000, 0.000000000000,\r\n 674, 0.067686640000, 0.024708050000, 0.000000000000,\r\n 675, 0.063600000000, 0.023200000000, 0.000000000000,\r\n 676, 0.059806850000, 0.021800770000, 0.000000000000,\r\n 677, 0.056282160000, 0.020501120000, 0.000000000000,\r\n 678, 0.052971040000, 0.019281080000, 0.000000000000,\r\n 679, 0.049818610000, 0.018120690000, 0.000000000000,\r\n 680, 0.046770000000, 0.017000000000, 0.000000000000,\r\n 681, 0.043784050000, 0.015903790000, 0.000000000000,\r\n 682, 0.040875360000, 0.014837180000, 0.000000000000,\r\n 683, 0.038072640000, 0.013810680000, 0.000000000000,\r\n 684, 0.035404610000, 0.012834780000, 0.000000000000,\r\n 685, 0.032900000000, 0.011920000000, 0.000000000000,\r\n 686, 0.030564190000, 0.011068310000, 0.000000000000,\r\n 687, 0.028380560000, 0.010273390000, 0.000000000000,\r\n 688, 0.026344840000, 0.009533311000, 0.000000000000,\r\n 689, 0.024452750000, 0.008846157000, 0.000000000000,\r\n 690, 0.022700000000, 0.008210000000, 0.000000000000,\r\n 691, 0.021084290000, 0.007623781000, 0.000000000000,\r\n 692, 0.019599880000, 0.007085424000, 0.000000000000,\r\n 693, 0.018237320000, 0.006591476000, 0.000000000000,\r\n 694, 0.016987170000, 0.006138485000, 0.000000000000,\r\n 695, 0.015840000000, 0.005723000000, 0.000000000000,\r\n 696, 0.014790640000, 0.005343059000, 0.000000000000,\r\n 697, 0.013831320000, 0.004995796000, 0.000000000000,\r\n 698, 0.012948680000, 0.004676404000, 0.000000000000,\r\n 699, 0.012129200000, 0.004380075000, 0.000000000000,\r\n 700, 0.011359160000, 0.004102000000, 0.000000000000,\r\n 701, 0.010629350000, 0.003838453000, 0.000000000000,\r\n 702, 0.009938846000, 0.003589099000, 0.000000000000,\r\n 703, 0.009288422000, 0.003354219000, 0.000000000000,\r\n 704, 0.008678854000, 0.003134093000, 0.000000000000,\r\n 705, 0.008110916000, 0.002929000000, 0.000000000000,\r\n 706, 0.007582388000, 0.002738139000, 0.000000000000,\r\n 707, 0.007088746000, 0.002559876000, 0.000000000000,\r\n 708, 0.006627313000, 0.002393244000, 0.000000000000,\r\n 709, 0.006195408000, 0.002237275000, 0.000000000000,\r\n 710, 0.005790346000, 0.002091000000, 0.000000000000,\r\n 711, 0.005409826000, 0.001953587000, 0.000000000000,\r\n 712, 0.005052583000, 0.001824580000, 0.000000000000,\r\n 713, 0.004717512000, 0.001703580000, 0.000000000000,\r\n 714, 0.004403507000, 0.001590187000, 0.000000000000,\r\n 715, 0.004109457000, 0.001484000000, 0.000000000000,\r\n 716, 0.003833913000, 0.001384496000, 0.000000000000,\r\n 717, 0.003575748000, 0.001291268000, 0.000000000000,\r\n 718, 0.003334342000, 0.001204092000, 0.000000000000,\r\n 719, 0.003109075000, 0.001122744000, 0.000000000000,\r\n 720, 0.002899327000, 0.001047000000, 0.000000000000,\r\n 721, 0.002704348000, 0.000976589600, 0.000000000000,\r\n 722, 0.002523020000, 0.000911108800, 0.000000000000,\r\n 723, 0.002354168000, 0.000850133200, 0.000000000000,\r\n 724, 0.002196616000, 0.000793238400, 0.000000000000,\r\n 725, 0.002049190000, 0.000740000000, 0.000000000000,\r\n 726, 0.001910960000, 0.000690082700, 0.000000000000,\r\n 727, 0.001781438000, 0.000643310000, 0.000000000000,\r\n 728, 0.001660110000, 0.000599496000, 0.000000000000,\r\n 729, 0.001546459000, 0.000558454700, 0.000000000000,\r\n 730, 0.001439971000, 0.000520000000, 0.000000000000,\r\n 731, 0.001340042000, 0.000483913600, 0.000000000000,\r\n 732, 0.001246275000, 0.000450052800, 0.000000000000,\r\n 733, 0.001158471000, 0.000418345200, 0.000000000000,\r\n 734, 0.001076430000, 0.000388718400, 0.000000000000,\r\n 735, 0.000999949300, 0.000361100000, 0.000000000000,\r\n 736, 0.000928735800, 0.000335383500, 0.000000000000,\r\n 737, 0.000862433200, 0.000311440400, 0.000000000000,\r\n 738, 0.000800750300, 0.000289165600, 0.000000000000,\r\n 739, 0.000743396000, 0.000268453900, 0.000000000000,\r\n 740, 0.000690078600, 0.000249200000, 0.000000000000,\r\n 741, 0.000640515600, 0.000231301900, 0.000000000000,\r\n 742, 0.000594502100, 0.000214685600, 0.000000000000,\r\n 743, 0.000551864600, 0.000199288400, 0.000000000000,\r\n 744, 0.000512429000, 0.000185047500, 0.000000000000,\r\n 745, 0.000476021300, 0.000171900000, 0.000000000000,\r\n 746, 0.000442453600, 0.000159778100, 0.000000000000,\r\n 747, 0.000411511700, 0.000148604400, 0.000000000000,\r\n 748, 0.000382981400, 0.000138301600, 0.000000000000,\r\n 749, 0.000356649100, 0.000128792500, 0.000000000000,\r\n 750, 0.000332301100, 0.000120000000, 0.000000000000,\r\n 751, 0.000309758600, 0.000111859500, 0.000000000000,\r\n 752, 0.000288887100, 0.000104322400, 0.000000000000,\r\n 753, 0.000269539400, 0.000097335600, 0.000000000000,\r\n 754, 0.000251568200, 0.000090845870, 0.000000000000,\r\n 755, 0.000234826100, 0.000084800000, 0.000000000000,\r\n 756, 0.000219171000, 0.000079146670, 0.000000000000,\r\n 757, 0.000204525800, 0.000073858000, 0.000000000000,\r\n 758, 0.000190840500, 0.000068916000, 0.000000000000,\r\n 759, 0.000178065400, 0.000064302670, 0.000000000000,\r\n 760, 0.000166150500, 0.000060000000, 0.000000000000,\r\n 761, 0.000155023600, 0.000055981870, 0.000000000000,\r\n 762, 0.000144621900, 0.000052225600, 0.000000000000,\r\n 763, 0.000134909800, 0.000048718400, 0.000000000000,\r\n 764, 0.000125852000, 0.000045447470, 0.000000000000,\r\n 765, 0.000117413000, 0.000042400000, 0.000000000000,\r\n 766, 0.000109551500, 0.000039561040, 0.000000000000,\r\n 767, 0.000102224500, 0.000036915120, 0.000000000000,\r\n 768, 0.000095394450, 0.000034448680, 0.000000000000,\r\n 769, 0.000089023900, 0.000032148160, 0.000000000000,\r\n 770, 0.000083075270, 0.000030000000, 0.000000000000,\r\n 771, 0.000077512690, 0.000027991250, 0.000000000000,\r\n 772, 0.000072313040, 0.000026113560, 0.000000000000,\r\n 773, 0.000067457780, 0.000024360240, 0.000000000000,\r\n 774, 0.000062928440, 0.000022724610, 0.000000000000,\r\n 775, 0.000058706520, 0.000021200000, 0.000000000000,\r\n 776, 0.000054770280, 0.000019778550, 0.000000000000,\r\n 777, 0.000051099180, 0.000018452850, 0.000000000000,\r\n 778, 0.000047676540, 0.000017216870, 0.000000000000,\r\n 779, 0.000044485670, 0.000016064590, 0.000000000000,\r\n 780, 0.000041509940, 0.000014990000, 0.000000000000\r\n ])\r\n\r\n cmf = cmf.reshape(401, 4).astype(np.float64).T\r\n cmf = cmf[:, ::binwidth]\r\n if asdf:\r\n cmf = pd.DataFrame(data=cmf.T, columns=colnames)\r\n cmf.set_index('Wavelength', inplace=True)\r\n cmf.index = pd.Int64Index(cmf.index)\r\n return cmf\r\n\r\n\r\ndef get_CIES026(binwidth: Optional[int] = 1,\r\n fillna: Optional[bool] = True) -> pd.DataFrame:\r\n '''Get the CIE026 spectral sensitivities.\r\n\r\n Parameters\r\n ----------\r\n binwidth : int, optional\r\n Width of the wavelength bins in nanometers (must be `1` or `5`). The\r\n default is `1`.\r\n fillna : bool, optional\r\n Whether to replace nan values with zero.\r\n\r\n Returns\r\n -------\r\n sss : numpy.ndarray or pandas.DataFrame\r\n CIES026 spectral sensitivities for s, m, l, rods, and melanopsin.\r\n\r\n '''\r\n if binwidth not in [1, 2, 5]:\r\n raise ValueError('Must specify 1 or 5 for binwidth')\r\n\r\n colnames = ['Wavelength', 'S', 'M', 'L', 'R', 'I']\r\n\r\n sss = np.array([\r\n 380, np.nan, np.nan, np.nan, 0.000589, 0.00091816,\r\n 381, np.nan, np.nan, np.nan, 0.000665, 0.0010456,\r\n 382, np.nan, np.nan, np.nan, 0.000752, 0.0011786,\r\n 383, np.nan, np.nan, np.nan, 0.000854, 0.0013228,\r\n 384, np.nan, np.nan, np.nan, 0.000972, 0.0014838,\r\n 385, np.nan, np.nan, np.nan, 0.001108, 0.0016672,\r\n 386, np.nan, np.nan, np.nan, 0.001268, 0.001881,\r\n 387, np.nan, np.nan, np.nan, 0.001453, 0.0021299,\r\n 388, np.nan, np.nan, np.nan, 0.001668, 0.0024146,\r\n 389, np.nan, np.nan, np.nan, 0.001918, 0.0027358,\r\n 390, 0.0061427, 0.00035823, 0.00040762, 0.002209, 0.0030944,\r\n 391, 0.0074428, 0.00043866, 0.00049707, 0.002547, 0.0035071,\r\n 392, 0.0090166, 0.00053623, 0.00060471, 0.002939, 0.0039908,\r\n 393, 0.010917, 0.00065406, 0.00073364, 0.003394, 0.0045468,\r\n 394, 0.013205, 0.00079565, 0.00088725, 0.003921, 0.0051763,\r\n 395, 0.015952, 0.00096483, 0.0010692, 0.00453, 0.0058804,\r\n 396, 0.019235, 0.0011657, 0.0012834, 0.00524, 0.0066933,\r\n 397, 0.023144, 0.0014026, 0.0015338, 0.00605, 0.007651,\r\n 398, 0.027775, 0.0016799, 0.0018244, 0.00698, 0.0087569,\r\n 399, 0.033234, 0.0020018, 0.002159, 0.00806, 0.010015,\r\n 400, 0.039631, 0.0023721, 0.0025407, 0.00929, 0.011428,\r\n 401, 0.04708, 0.0027943, 0.0029728, 0.0107, 0.013077,\r\n 402, 0.055701, 0.0032737, 0.0034599, 0.01231, 0.01504,\r\n 403, 0.065614, 0.0038166, 0.0040079, 0.01413, 0.017317,\r\n 404, 0.076932, 0.0044302, 0.0046237, 0.01619, 0.019907,\r\n 405, 0.089761, 0.0051232, 0.0053155, 0.01852, 0.022811,\r\n 406, 0.10419, 0.0059046, 0.0060914, 0.02113, 0.026319,\r\n 407, 0.12027, 0.0067801, 0.0069529, 0.02405, 0.030596,\r\n 408, 0.13804, 0.0077526, 0.0078963, 0.0273, 0.035454,\r\n 409, 0.15749, 0.0088229, 0.008913, 0.03089, 0.040703,\r\n 410, 0.17853, 0.0099884, 0.0099884, 0.03484, 0.046155,\r\n 411, 0.20108, 0.011245, 0.011105, 0.03916, 0.051782,\r\n 412, 0.22509, 0.012595, 0.012261, 0.0439, 0.05778,\r\n 413, 0.25057, 0.014042, 0.013458, 0.049, 0.064297,\r\n 414, 0.27751, 0.015594, 0.014704, 0.0545, 0.07148,\r\n 415, 0.30594, 0.01726, 0.016013, 0.0604, 0.079477,\r\n 416, 0.33586, 0.019047, 0.017396, 0.0668, 0.089181,\r\n 417, 0.36698, 0.020955, 0.018845, 0.0736, 0.10076,\r\n 418, 0.39888, 0.022976, 0.020344, 0.0808, 0.11326,\r\n 419, 0.431, 0.025102, 0.02187, 0.0885, 0.12573,\r\n 420, 0.46269, 0.027316, 0.023396, 0.0966, 0.13724,\r\n 421, 0.49336, 0.029606, 0.024896, 0.1052, 0.14745,\r\n 422, 0.52301, 0.031975, 0.026376, 0.1141, 0.15701,\r\n 423, 0.55194, 0.034433, 0.027854, 0.1235, 0.16646,\r\n 424, 0.5806, 0.036998, 0.029355, 0.1334, 0.17632,\r\n 425, 0.60957, 0.039693, 0.03091, 0.1436, 0.1871,\r\n 426, 0.63936, 0.04254, 0.03255, 0.1541, 0.19921,\r\n 427, 0.66965, 0.045547, 0.034271, 0.1651, 0.21241,\r\n 428, 0.69983, 0.048716, 0.036062, 0.1764, 0.22623,\r\n 429, 0.72918, 0.052047, 0.037905, 0.1879, 0.2402,\r\n 430, 0.75689, 0.055538, 0.039781, 0.1998, 0.25387,\r\n 431, 0.78229, 0.059188, 0.04167, 0.2119, 0.26702,\r\n 432, 0.80567, 0.062982, 0.043573, 0.2243, 0.27998,\r\n 433, 0.8276, 0.066903, 0.045493, 0.2369, 0.29303,\r\n 434, 0.84878, 0.070929, 0.047439, 0.2496, 0.3065,\r\n 435, 0.86998, 0.07503, 0.049417, 0.2625, 0.32068,\r\n 436, 0.89176, 0.079177, 0.051434, 0.2755, 0.33602,\r\n 437, 0.91344, 0.083346, 0.053474, 0.2886, 0.35236,\r\n 438, 0.93398, 0.087516, 0.05551, 0.3017, 0.36913,\r\n 439, 0.95222, 0.091662, 0.057517, 0.3149, 0.38573,\r\n 440, 0.96696, 0.095761, 0.059462, 0.3281, 0.40159,\r\n 441, 0.97734, 0.099798, 0.061324, 0.3412, 0.41647,\r\n 442, 0.98403, 0.1038, 0.063129, 0.3543, 0.4308,\r\n 443, 0.98814, 0.10783, 0.064919, 0.3673, 0.44492,\r\n 444, 0.99085, 0.11195, 0.066742, 0.3803, 0.4592,\r\n 445, 0.99334, 0.11622, 0.068654, 0.3931, 0.474,\r\n 446, 0.99637, 0.12071, 0.070696, 0.406, 0.48952,\r\n 447, 0.99904, 0.12536, 0.072851, 0.418, 0.50552,\r\n 448, 0.99998, 0.13011, 0.075078, 0.431, 0.52174,\r\n 449, 0.99784, 0.13486, 0.077332, 0.443, 0.5379,\r\n 450, 0.99133, 0.13949, 0.079565, 0.455, 0.55371,\r\n 451, 0.97966, 0.14394, 0.081737, 0.467, 0.5691,\r\n 452, 0.96391, 0.14828, 0.083883, 0.479, 0.58424,\r\n 453, 0.94557, 0.15264, 0.08606, 0.49, 0.59928,\r\n 454, 0.92608, 0.15716, 0.088332, 0.502, 0.61437,\r\n 455, 0.90673, 0.16201, 0.09077, 0.513, 0.62965,\r\n 456, 0.88851, 0.16733, 0.09344, 0.524, 0.64519,\r\n 457, 0.87135, 0.17314, 0.096358, 0.535, 0.66089,\r\n 458, 0.855, 0.17942, 0.09953, 0.546, 0.67666,\r\n 459, 0.8392, 0.18612, 0.10296, 0.557, 0.69241,\r\n 460, 0.82373, 0.1932, 0.10666, 0.567, 0.70805,\r\n 461, 0.80831, 0.20062, 0.11063, 0.578, 0.72359,\r\n 462, 0.79243, 0.20832, 0.11483, 0.588, 0.73911,\r\n 463, 0.77557, 0.21621, 0.11922, 0.599, 0.75456,\r\n 464, 0.75724, 0.22423, 0.12374, 0.61, 0.76994,\r\n 465, 0.73704, 0.23228, 0.12834, 0.62, 0.78522,\r\n 466, 0.71473, 0.24026, 0.13295, 0.631, 0.80068,\r\n 467, 0.69056, 0.24816, 0.13757, 0.642, 0.81635,\r\n 468, 0.66489, 0.25599, 0.14222, 0.653, 0.8318,\r\n 469, 0.63808, 0.26374, 0.14691, 0.664, 0.84659,\r\n 470, 0.61046, 0.27144, 0.15165, 0.676, 0.86029,\r\n 471, 0.58235, 0.2791, 0.15648, 0.687, 0.87292,\r\n 472, 0.55407, 0.28675, 0.1614, 0.699, 0.88487,\r\n 473, 0.5259, 0.29448, 0.16646, 0.71, 0.89624,\r\n 474, 0.49811, 0.30232, 0.17169, 0.722, 0.90716,\r\n 475, 0.47089, 0.31037, 0.17712, 0.734, 0.91773,\r\n 476, 0.44445, 0.31869, 0.18278, 0.745, 0.92834,\r\n 477, 0.41899, 0.32731, 0.18868, 0.757, 0.93895,\r\n 478, 0.3947, 0.33623, 0.19484, 0.769, 0.94903,\r\n 479, 0.37171, 0.34548, 0.20126, 0.781, 0.95809,\r\n 480, 0.35011, 0.35507, 0.20794, 0.793, 0.96561,\r\n 481, 0.3299, 0.36499, 0.21488, 0.805, 0.97198,\r\n 482, 0.31086, 0.37514, 0.22202, 0.817, 0.97783,\r\n 483, 0.29274, 0.38541, 0.22932, 0.828, 0.98301,\r\n 484, 0.27534, 0.39565, 0.23668, 0.84, 0.98733,\r\n 485, 0.2585, 0.40569, 0.24405, 0.851, 0.99062,\r\n 486, 0.24216, 0.41543, 0.25135, 0.862, 0.99334,\r\n 487, 0.2265, 0.42506, 0.2587, 0.873, 0.99589,\r\n 488, 0.21173, 0.43485, 0.26625, 0.884, 0.99801,\r\n 489, 0.19796, 0.4451, 0.2742, 0.894, 0.99946,\r\n 490, 0.1853, 0.45614, 0.28275, 0.904, 1,\r\n 491, 0.17375, 0.46824, 0.29207, 0.914, 0.99956,\r\n 492, 0.16315, 0.48125, 0.30209, 0.923, 0.99836,\r\n 493, 0.15331, 0.49493, 0.31267, 0.932, 0.99659,\r\n 494, 0.14409, 0.50895, 0.32364, 0.941, 0.99442,\r\n 495, 0.13535, 0.52297, 0.33479, 0.949, 0.99202,\r\n 496, 0.12701, 0.5367, 0.34594, 0.957, 0.98879,\r\n 497, 0.11902, 0.55019, 0.35713, 0.964, 0.98422,\r\n 498, 0.11133, 0.56362, 0.36842, 0.97, 0.97866,\r\n 499, 0.10393, 0.57715, 0.37991, 0.976, 0.97245,\r\n 500, 0.096799, 0.591, 0.39171, 0.982, 0.96595,\r\n 501, 0.089917, 0.60535, 0.40391, 0.986, 0.95884,\r\n 502, 0.083288, 0.62016, 0.4165, 0.99, 0.95072,\r\n 503, 0.076916, 0.63534, 0.42945, 0.994, 0.94178,\r\n 504, 0.070805, 0.6508, 0.44272, 0.997, 0.93224,\r\n 505, 0.064961, 0.6664, 0.45625, 0.998, 0.9223,\r\n 506, 0.059405, 0.68205, 0.47, 1, 0.91183,\r\n 507, 0.054208, 0.69767, 0.48393, 1, 0.9006,\r\n 508, 0.049428, 0.71319, 0.49801, 1, 0.88866,\r\n 509, 0.045099, 0.72853, 0.51223, 0.998, 0.87607,\r\n 510, 0.041234, 0.74361, 0.52654, 0.997, 0.86289,\r\n 511, 0.037814, 0.7584, 0.54092, 0.994, 0.8488,\r\n 512, 0.034763, 0.77297, 0.55541, 0.99, 0.83368,\r\n 513, 0.032003, 0.78746, 0.57003, 0.986, 0.81783,\r\n 514, 0.029475, 0.80202, 0.58483, 0.981, 0.80158,\r\n 515, 0.02713, 0.81681, 0.59987, 0.975, 0.78523,\r\n 516, 0.024938, 0.83192, 0.61516, 0.968, 0.76872,\r\n 517, 0.022893, 0.8471, 0.63057, 0.961, 0.75181,\r\n 518, 0.020996, 0.86197, 0.64588, 0.953, 0.73459,\r\n 519, 0.019243, 0.87615, 0.66088, 0.944, 0.71717,\r\n 520, 0.01763, 0.88921, 0.67531, 0.935, 0.69963,\r\n 521, 0.01615, 0.90081, 0.68898, 0.925, 0.68189,\r\n 522, 0.014791, 0.91101, 0.70189, 0.915, 0.66388,\r\n 523, 0.013541, 0.91997, 0.71414, 0.904, 0.64572,\r\n 524, 0.012388, 0.92789, 0.72584, 0.892, 0.62753,\r\n 525, 0.011325, 0.93498, 0.73711, 0.88, 0.60942,\r\n 526, 0.010344, 0.94141, 0.74805, 0.867, 0.59134,\r\n 527, 0.0094409, 0.94728, 0.75868, 0.854, 0.57321,\r\n 528, 0.0086137, 0.95262, 0.76903, 0.84, 0.5551,\r\n 529, 0.0078583, 0.9575, 0.7791, 0.826, 0.53711,\r\n 530, 0.0071709, 0.96196, 0.7889, 0.811, 0.51931,\r\n 531, 0.0065465, 0.96608, 0.79847, 0.796, 0.50165,\r\n 532, 0.0059778, 0.96997, 0.80795, 0.781, 0.48407,\r\n 533, 0.0054579, 0.97374, 0.81748, 0.765, 0.46664,\r\n 534, 0.0049812, 0.97754, 0.82724, 0.749, 0.44944,\r\n 535, 0.0045429, 0.98148, 0.8374, 0.733, 0.43253,\r\n 536, 0.0041391, 0.98563, 0.84808, 0.717, 0.41586,\r\n 537, 0.0037679, 0.98975, 0.85906, 0.7, 0.39937,\r\n 538, 0.0034278, 0.99355, 0.87007, 0.683, 0.38314,\r\n 539, 0.003117, 0.99671, 0.88078, 0.667, 0.36722,\r\n 540, 0.0028335, 0.99893, 0.89087, 0.65, 0.35171,\r\n 541, 0.0025756, 0.99994, 0.90006, 0.633, 0.33654,\r\n 542, 0.0023408, 0.99969, 0.90825, 0.616, 0.32165,\r\n 543, 0.0021272, 0.99818, 0.91543, 0.599, 0.30708,\r\n 544, 0.0019328, 0.9954, 0.92158, 0.581, 0.2929,\r\n 545, 0.0017557, 0.99138, 0.92666, 0.564, 0.27914,\r\n 546, 0.0015945, 0.9862, 0.93074, 0.548, 0.26574,\r\n 547, 0.0014478, 0.98023, 0.93416, 0.531, 0.25265,\r\n 548, 0.0013143, 0.97391, 0.93732, 0.514, 0.23992,\r\n 549, 0.0011928, 0.96765, 0.94063, 0.497, 0.22759,\r\n 550, 0.0010823, 0.96188, 0.94453, 0.481, 0.21572,\r\n 551, 0.00098182, 0.95682, 0.94929, 0.465, 0.20424,\r\n 552, 0.00089053, 0.95215, 0.95468, 0.448, 0.19307,\r\n 553, 0.00080769, 0.9474, 0.96031, 0.433, 0.18229,\r\n 554, 0.00073257, 0.9421, 0.96579, 0.417, 0.17193,\r\n 555, 0.00066451, 0.93583, 0.9707, 0.402, 0.16206,\r\n 556, 0.00060289, 0.92827, 0.97476, 0.3864, 0.1526,\r\n 557, 0.00054706, 0.91967, 0.97806, 0.3715, 0.14349,\r\n 558, 0.00049646, 0.91036, 0.98082, 0.3569, 0.13475,\r\n 559, 0.00045057, 0.90068, 0.98327, 0.3427, 0.12642,\r\n 560, 0.00040893, 0.89095, 0.98564, 0.3288, 0.11853,\r\n 561, 0.00037114, 0.88139, 0.98808, 0.3151, 0.11101,\r\n 562, 0.00033684, 0.87183, 0.99056, 0.3018, 0.10379,\r\n 563, 0.00030572, 0.86206, 0.99294, 0.2888, 0.096921,\r\n 564, 0.0002775, 0.85184, 0.99512, 0.2762, 0.090426,\r\n 565, 0.00025192, 0.84097, 0.99698, 0.2639, 0.084346,\r\n 566, 0.00022872, 0.8293, 0.99841, 0.2519, 0.07862,\r\n 567, 0.0002077, 0.81691, 0.99939, 0.2403, 0.073175,\r\n 568, 0.00018864, 0.80391, 0.99991, 0.2291, 0.068029,\r\n 569, 0.00017136, 0.79041, 0.99996, 0.2182, 0.063198,\r\n 570, 0.00015569, 0.77653, 0.99954, 0.2076, 0.058701,\r\n 571, 0.00014148, 0.76231, 0.99862, 0.1974, 0.054483,\r\n 572, 0.0001286, 0.74767, 0.99705, 0.1876, 0.050489,\r\n 573, 0.00011691, 0.73248, 0.9947, 0.1782, 0.046734,\r\n 574, 0.00010632, 0.71662, 0.99142, 0.169, 0.043236,\r\n 575, 9.67E-05, 0.70001, 0.98706, 0.1602, 0.040009,\r\n 576, 8.80E-05, 0.68265, 0.9816, 0.1517, 0.03701,\r\n 577, 8.01E-05, 0.66482, 0.97545, 0.1436, 0.03419,\r\n 578, 7.29E-05, 0.64686, 0.96912, 0.1358, 0.031556,\r\n 579, 6.64E-05, 0.62907, 0.96309, 0.1284, 0.029115,\r\n 580, 6.05E-05, 0.61173, 0.95784, 0.1212, 0.026875,\r\n 581, 5.51E-05, 0.595, 0.95366, 0.1143, 0.024801,\r\n 582, 5.02E-05, 0.57878, 0.95024, 0.1078, 0.02286,\r\n 583, 4.58E-05, 0.56293, 0.94709, 0.1015, 0.021053,\r\n 584, 4.18E-05, 0.54732, 0.94375, 0.0956, 0.019386,\r\n 585, 3.81E-05, 0.53182, 0.93978, 0.0899, 0.017862,\r\n 586, 3.48E-05, 0.51635, 0.93483, 0.0845, 0.016458,\r\n 587, 3.18E-05, 0.50087, 0.92892, 0.0793, 0.015147,\r\n 588, 2.90E-05, 0.48535, 0.92218, 0.0745, 0.013931,\r\n 589, 2.65E-05, 0.46978, 0.91473, 0.0699, 0.012812,\r\n 590, 2.43E-05, 0.45414, 0.90669, 0.0655, 0.01179,\r\n 591, 2.22E-05, 0.43845, 0.89817, 0.0613, 0.010849,\r\n 592, 2.03E-05, 0.42278, 0.88919, 0.0574, 0.0099711,\r\n 593, 1.86E-05, 0.40719, 0.87976, 0.0537, 0.0091585,\r\n 594, 1.70E-05, 0.39175, 0.86989, 0.0502, 0.0084124,\r\n 595, 1.56E-05, 0.37653, 0.8596, 0.0469, 0.0077343,\r\n 596, 1.43E-05, 0.36156, 0.84891, 0.0438, 0.0071125,\r\n 597, 1.31E-05, 0.34686, 0.83786, 0.0409, 0.0065348,\r\n 598, 1.20E-05, 0.33242, 0.82652, 0.03816, 0.0060011,\r\n 599, 1.10E-05, 0.31826, 0.81494, 0.03558, 0.0055117,\r\n 600, 1.01E-05, 0.30438, 0.80317, 0.03315, 0.0050669,\r\n 601, 9.31E-06, 0.29078, 0.79125, 0.03087, 0.0046587,\r\n 602, 8.56E-06, 0.27751, 0.77912, 0.02874, 0.0042795,\r\n 603, 7.87E-06, 0.26458, 0.76669, 0.02674, 0.0039294,\r\n 604, 7.24E-06, 0.25201, 0.7539, 0.02487, 0.0036087,\r\n 605, 6.67E-06, 0.23984, 0.74068, 0.02312, 0.0033177,\r\n 606, 6.14E-06, 0.22806, 0.72699, 0.02147, 0.0030511,\r\n 607, 5.66E-06, 0.2167, 0.71291, 0.01994, 0.0028037,\r\n 608, 5.22E-06, 0.20575, 0.69849, 0.01851, 0.0025756,\r\n 609, 4.81E-06, 0.19522, 0.68383, 0.01718, 0.0023667,\r\n 610, 4.44E-06, 0.1851, 0.66899, 0.01593, 0.002177,\r\n 611, 4.10E-06, 0.1754, 0.65404, 0.01477, 0.0020032,\r\n 612, 3.79E-06, 0.16609, 0.63898, 0.01369, 0.0018419,\r\n 613, 3.50E-06, 0.15717, 0.62382, 0.01269, 0.0016932,\r\n 614, 3.24E-06, 0.14862, 0.60858, 0.01175, 0.0015569,\r\n 615, 2.99E-06, 0.14043, 0.59325, 0.01088, 0.0014331,\r\n 616, np.nan, 0.13259, 0.57786, 0.01007, 0.0013197,\r\n 617, np.nan, 0.12509, 0.56249, 0.00932, 0.0012145,\r\n 618, np.nan, 0.11793, 0.54725, 0.00862, 0.0011174,\r\n 619, np.nan, 0.11109, 0.53221, 0.00797, 0.0010284,\r\n 620, np.nan, 0.10457, 0.51745, 0.00737, 0.00094731,\r\n 621, np.nan, 0.098366, 0.50299, 0.00682, 0.00087281,\r\n 622, np.nan, 0.092468, 0.48869, 0.0063, 0.00080358,\r\n 623, np.nan, 0.086876, 0.47438, 0.00582, 0.00073962,\r\n 624, np.nan, 0.081583, 0.4599, 0.00538, 0.00068097,\r\n 625, np.nan, 0.076584, 0.44512, 0.00497, 0.00062765,\r\n 626, np.nan, 0.071868, 0.43001, 0.00459, 0.00057875,\r\n 627, np.nan, 0.067419, 0.41469, 0.00424, 0.00053336,\r\n 628, np.nan, 0.063218, 0.39934, 0.003913, 0.00049144,\r\n 629, np.nan, 0.059249, 0.38412, 0.003613, 0.00045298,\r\n 630, np.nan, 0.055499, 0.36917, 0.003335, 0.00041796,\r\n 631, np.nan, 0.051955, 0.35458, 0.003079, 0.00038579,\r\n 632, np.nan, 0.04861, 0.34039, 0.002842, 0.00035591,\r\n 633, np.nan, 0.045459, 0.32661, 0.002623, 0.00032829,\r\n 634, np.nan, 0.042494, 0.31325, 0.002421, 0.00030293,\r\n 635, np.nan, 0.03971, 0.30032, 0.002235, 0.0002798,\r\n 636, np.nan, 0.037095, 0.28782, 0.002062, 0.00025854,\r\n 637, np.nan, 0.034635, 0.27576, 0.001903, 0.00023879,\r\n 638, np.nan, 0.032313, 0.26416, 0.001757, 0.00022051,\r\n 639, np.nan, 0.030115, 0.25301, 0.001621, 0.0002037,\r\n 640, np.nan, 0.028031, 0.24232, 0.001497, 0.00018834,\r\n 641, np.nan, 0.026056, 0.23206, 0.001382, 0.00017419,\r\n 642, np.nan, 0.024201, 0.22216, 0.001276, 0.00016102,\r\n 643, np.nan, 0.022476, 0.21252, 0.001178, 0.00014882,\r\n 644, np.nan, 0.020887, 0.20306, 0.001088, 0.00013759,\r\n 645, np.nan, 0.019437, 0.19373, 0.001005, 0.00012734,\r\n 646, np.nan, 0.01812, 0.18449, 0.000928, 0.00011789,\r\n 647, np.nan, 0.016915, 0.1754, 0.000857, 0.0001091,\r\n 648, np.nan, 0.015799, 0.16651, 0.000792, 0.00010095,\r\n 649, np.nan, 0.014754, 0.15787, 0.000732, 9.34E-05,\r\n 650, np.nan, 0.013766, 0.14951, 0.000677, 8.66E-05,\r\n 651, np.nan, 0.012825, 0.14147, 0.000626, 8.02E-05,\r\n 652, np.nan, 0.01193, 0.13376, 0.000579, 7.43E-05,\r\n 653, np.nan, 0.011085, 0.12638, 0.000536, 6.89E-05,\r\n 654, np.nan, 0.010289, 0.11934, 0.000496, 6.38E-05,\r\n 655, np.nan, 0.0095432, 0.11264, 0.000459, 5.92E-05,\r\n 656, np.nan, 0.0088461, 0.10626, 0.000425, 5.49E-05,\r\n 657, np.nan, 0.008196, 0.10021, 0.0003935, 5.09E-05,\r\n 658, np.nan, 0.0075906, 0.094456, 0.0003645, 4.72E-05,\r\n 659, np.nan, 0.0070275, 0.088993, 0.0003377, 4.38E-05,\r\n 660, np.nan, 0.0065045, 0.083808, 0.0003129, 4.07E-05,\r\n 661, np.nan, 0.0060195, 0.078886, 0.0002901, 3.78E-05,\r\n 662, np.nan, 0.0055709, 0.074219, 0.0002689, 3.51E-05,\r\n 663, np.nan, 0.0051573, 0.069795, 0.0002493, 3.26E-05,\r\n 664, np.nan, 0.0047769, 0.065605, 0.0002313, 3.03E-05,\r\n 665, np.nan, 0.0044279, 0.061638, 0.0002146, 2.81E-05,\r\n 666, np.nan, 0.0041083, 0.057886, 0.0001991, 2.62E-05,\r\n 667, np.nan, 0.0038147, 0.054337, 0.0001848, 2.43E-05,\r\n 668, np.nan, 0.0035439, 0.050981, 0.0001716, 2.26E-05,\r\n 669, np.nan, 0.0032933, 0.04781, 0.0001593, 2.10E-05,\r\n 670, np.nan, 0.0030605, 0.044813, 0.000148, 1.96E-05,\r\n 671, np.nan, 0.0028437, 0.041983, 0.0001375, 1.82E-05,\r\n 672, np.nan, 0.0026418, 0.039311, 0.0001277, 1.69E-05,\r\n 673, np.nan, 0.0024537, 0.036789, 0.0001187, 1.57E-05,\r\n 674, np.nan, 0.0022787, 0.03441, 0.0001104, 1.47E-05,\r\n 675, np.nan, 0.002116, 0.032166, 0.0001026, 1.36E-05,\r\n 676, np.nan, 0.0019646, 0.030051, 9.54E-05, 1.27E-05,\r\n 677, np.nan, 0.0018238, 0.028059, 8.88E-05, 1.18E-05,\r\n 678, np.nan, 0.0016929, 0.026186, 8.26E-05, 1.10E-05,\r\n 679, np.nan, 0.0015712, 0.024426, 7.69E-05, 1.03E-05,\r\n 680, np.nan, 0.001458, 0.022774, 7.15E-05, 9.58E-06,\r\n 681, np.nan, 0.0013527, 0.021224, 6.66E-05, 8.93E-06,\r\n 682, np.nan, 0.0012548, 0.019768, 6.20E-05, 8.33E-06,\r\n 683, np.nan, 0.0011634, 0.018399, 5.78E-05, 7.76E-06,\r\n 684, np.nan, 0.0010781, 0.017109, 5.38E-05, 7.24E-06,\r\n 685, np.nan, 0.00099842, 0.015894, 5.01E-05, 6.75E-06,\r\n 686, np.nan, 0.00092396, 0.014749, 4.67E-05, 6.31E-06,\r\n 687, np.nan, 0.00085471, 0.013677, 4.36E-05, 5.88E-06,\r\n 688, np.nan, 0.00079065, 0.01268, 4.06E-05, 5.49E-06,\r\n 689, np.nan, 0.00073168, 0.011759, 3.79E-05, 5.13E-06,\r\n 690, np.nan, 0.00067765, 0.010912, 3.53E-05, 4.79E-06,\r\n 691, np.nan, 0.0006283, 0.010137, 3.30E-05, 4.47E-06,\r\n 692, np.nan, 0.00058311, 0.0094257, 3.08E-05, 4.18E-06,\r\n 693, np.nan, 0.00054158, 0.0087692, 2.87E-05, 3.90E-06,\r\n 694, np.nan, 0.00050329, 0.0081608, 2.68E-05, 3.65E-06,\r\n 695, np.nan, 0.00046787, 0.0075945, 2.50E-05, 3.41E-06,\r\n 696, np.nan, 0.00043501, 0.0070659, 2.34E-05, 3.19E-06,\r\n 697, np.nan, 0.00040449, 0.0065725, 2.18E-05, 2.98E-06,\r\n 698, np.nan, 0.00037614, 0.0061126, 2.04E-05, 2.79E-06,\r\n 699, np.nan, 0.00034978, 0.0056844, 1.91E-05, 2.61E-06,\r\n 700, np.nan, 0.00032528, 0.0052861, 1.78E-05, 2.44E-06,\r\n 701, np.nan, 0.00030248, 0.0049157, 1.66E-05, 2.28E-06,\r\n 702, np.nan, 0.00028124, 0.0045709, 1.56E-05, 2.14E-06,\r\n 703, np.nan, 0.00026143, 0.0042491, 1.45E-05, 2.00E-06,\r\n 704, np.nan, 0.00024293, 0.0039483, 1.36E-05, 1.87E-06,\r\n 705, np.nan, 0.00022564, 0.0036667, 1.27E-05, 1.75E-06,\r\n 706, np.nan, 0.00020948, 0.003403, 1.19E-05, 1.64E-06,\r\n 707, np.nan, 0.0001944, 0.0031563, 1.11E-05, 1.54E-06,\r\n 708, np.nan, 0.00018037, 0.0029262, 1.04E-05, 1.44E-06,\r\n 709, np.nan, 0.00016735, 0.0027121, 9.76E-06, 1.35E-06,\r\n 710, np.nan, 0.00015529, 0.0025133, 9.14E-06, 1.27E-06,\r\n 711, np.nan, 0.00014413, 0.002329, 8.56E-06, 1.19E-06,\r\n 712, np.nan, 0.00013383, 0.0021584, 8.02E-06, 1.11E-06,\r\n 713, np.nan, 0.00012431, 0.0020007, 7.51E-06, 1.04E-06,\r\n 714, np.nan, 0.00011551, 0.0018552, 7.04E-06, 9.78E-07,\r\n 715, np.nan, 0.00010739, 0.0017211, 6.60E-06, 9.18E-07,\r\n 716, np.nan, 9.99E-05, 0.0015975, 6.18E-06, 8.62E-07,\r\n 717, np.nan, 9.29E-05, 0.0014834, 5.80E-06, 8.09E-07,\r\n 718, np.nan, 8.65E-05, 0.0013779, 5.44E-06, 7.59E-07,\r\n 719, np.nan, 8.05E-05, 0.00128, 5.10E-06, 7.12E-07,\r\n 720, np.nan, 7.49E-05, 0.001189, 4.78E-06, 6.69E-07,\r\n 721, np.nan, 6.98E-05, 0.0011043, 4.49E-06, 6.28E-07,\r\n 722, np.nan, 6.49E-05, 0.0010256, 4.21E-06, 5.90E-07,\r\n 723, np.nan, 6.05E-05, 0.0009526, 3.95E-06, 5.54E-07,\r\n 724, np.nan, 5.63E-05, 0.00088496, 3.71E-06, 5.21E-07,\r\n 725, np.nan, 5.25E-05, 0.0008224, 3.48E-06, 4.90E-07,\r\n 726, np.nan, 4.89E-05, 0.00076459, 3.27E-06, 4.60E-07,\r\n 727, np.nan, 4.56E-05, 0.00071111, 3.07E-06, 4.33E-07,\r\n 728, np.nan, 4.25E-05, 0.00066157, 2.88E-06, 4.07E-07,\r\n 729, np.nan, 3.97E-05, 0.00061561, 2.71E-06, 3.82E-07,\r\n 730, np.nan, 3.70E-05, 0.00057292, 2.55E-06, 3.60E-07,\r\n 731, np.nan, 3.46E-05, 0.00053321, 2.39E-06, 3.39E-07,\r\n 732, np.nan, 3.23E-05, 0.00049623, 2.25E-06, 3.18E-07,\r\n 733, np.nan, 3.01E-05, 0.00046178, 2.12E-06, 3.00E-07,\r\n 734, np.nan, 2.81E-05, 0.00042965, 1.99E-06, 2.82E-07,\r\n 735, np.nan, 2.62E-05, 0.00039967, 1.87E-06, 2.65E-07,\r\n 736, np.nan, 2.45E-05, 0.00037169, 1.76E-06, 2.50E-07,\r\n 737, np.nan, 2.28E-05, 0.00034565, 1.66E-06, 2.35E-07,\r\n 738, np.nan, 2.13E-05, 0.00032149, 1.56E-06, 2.22E-07,\r\n 739, np.nan, 1.99E-05, 0.00029916, 1.47E-06, 2.09E-07,\r\n 740, np.nan, 1.86E-05, 0.00027855, 1.38E-06, 1.97E-07,\r\n 741, np.nan, 1.74E-05, 0.00025958, 1.30E-06, 1.85E-07,\r\n 742, np.nan, 1.63E-05, 0.00024206, 1.22E-06, 1.75E-07,\r\n 743, np.nan, 1.53E-05, 0.00022581, 1.15E-06, 1.65E-07,\r\n 744, np.nan, 1.43E-05, 0.00021067, 1.08E-06, 1.55E-07,\r\n 745, np.nan, 1.34E-05, 0.00019653, 1.02E-06, 1.46E-07,\r\n 746, np.nan, 1.25E-05, 0.00018327, 9.62E-07, 1.38E-07,\r\n 747, np.nan, 1.17E-05, 0.00017087, 9.07E-07, 1.30E-07,\r\n 748, np.nan, 1.10E-05, 0.00015929, 8.55E-07, 1.23E-07,\r\n 749, np.nan, 1.03E-05, 0.00014851, 8.06E-07, 1.16E-07,\r\n 750, np.nan, 9.63E-06, 0.00013848, 7.60E-07, 1.09E-07,\r\n 751, np.nan, 9.02E-06, 0.00012918, 7.16E-07, 1.03E-07,\r\n 752, np.nan, 8.45E-06, 0.00012054, 6.75E-07, 9.74E-08,\r\n 753, np.nan, 7.92E-06, 0.00011252, 6.37E-07, 9.19E-08,\r\n 754, np.nan, 7.43E-06, 0.00010506, 6.01E-07, 8.68E-08,\r\n 755, np.nan, 6.97E-06, 9.81E-05, 5.67E-07, 8.20E-08,\r\n 756, np.nan, 6.53E-06, 9.17E-05, 5.35E-07, 7.74E-08,\r\n 757, np.nan, 6.13E-06, 8.56E-05, 5.05E-07, 7.31E-08,\r\n 758, np.nan, 5.75E-06, 8.00E-05, 4.77E-07, 6.91E-08,\r\n 759, np.nan, 5.40E-06, 7.48E-05, 4.50E-07, 6.53E-08,\r\n 760, np.nan, 5.07E-06, 6.99E-05, 4.25E-07, 6.17E-08,\r\n 761, np.nan, 4.75E-06, 6.53E-05, 4.01E-07, 5.83E-08,\r\n 762, np.nan, 4.46E-06, 6.10E-05, 3.79E-07, 5.51E-08,\r\n 763, np.nan, 4.18E-06, 5.70E-05, 3.58E-07, 5.21E-08,\r\n 764, np.nan, 3.93E-06, 5.33E-05, 3.38E-07, 4.93E-08,\r\n 765, np.nan, 3.69E-06, 4.98E-05, 3.20E-07, 4.66E-08,\r\n 766, np.nan, 3.46E-06, 4.66E-05, 3.02E-07, 4.41E-08,\r\n 767, np.nan, 3.25E-06, 4.36E-05, 2.86E-07, 4.17E-08,\r\n 768, np.nan, 3.05E-06, 4.09E-05, 2.70E-07, 3.94E-08,\r\n 769, np.nan, 2.87E-06, 3.82E-05, 2.55E-07, 3.73E-08,\r\n 770, np.nan, 2.70E-06, 3.58E-05, 2.41E-07, 3.53E-08,\r\n 771, np.nan, 2.53E-06, 3.35E-05, 2.28E-07, 3.34E-08,\r\n 772, np.nan, 2.38E-06, 3.13E-05, 2.16E-07, 3.17E-08,\r\n 773, np.nan, 2.23E-06, 2.93E-05, 2.04E-07, 3.00E-08,\r\n 774, np.nan, 2.09E-06, 2.74E-05, 1.93E-07, 2.84E-08,\r\n 775, np.nan, 1.97E-06, 2.56E-05, 1.83E-07, 2.69E-08,\r\n 776, np.nan, 1.85E-06, 2.40E-05, 1.73E-07, 2.55E-08,\r\n 777, np.nan, 1.74E-06, 2.25E-05, 1.64E-07, 2.42E-08,\r\n 778, np.nan, 1.64E-06, 2.11E-05, 1.55E-07, 2.29E-08,\r\n 779, np.nan, 1.55E-06, 1.98E-05, 1.47E-07, 2.17E-08,\r\n 780, np.nan, 1.46E-06, 1.86E-05, 1.39E-07, 2.05E-08\r\n ])\r\n\r\n sss = sss.reshape(401, 6).astype(np.float64).T\r\n sss = sss[:, ::binwidth]\r\n sss = pd.DataFrame(data=sss.T, columns=colnames)\r\n sss.set_index('Wavelength', inplace=True)\r\n sss.columns.name = 'Photoreceptor'\r\n sss.index = pd.Int64Index(sss.index)\r\n if fillna:\r\n sss = sss.fillna(0)\r\n return sss\r\n\r\n\r\ndef get_CIE_1924_photopic_vl(binwidth: Optional[int] = 1) -> pd.DataFrame:\r\n '''Get the CIE1924 photopic luminosity function.\r\n\r\n Parameters\r\n ----------\r\n binwidth : int, optional\r\n Width of the wavelength bins in nanometers (must be `1` or `5`). The\r\n default is `1`.\r\n\r\n Returns\r\n -------\r\n vl : pd.Series\r\n The CIE1924 photopic luminosity function.\r\n\r\n '''\r\n if binwidth not in [1, 5]:\r\n raise ValueError('Must specify 1 or 5 for binwidth')\r\n\r\n vl = np.array([\r\n 380, 0.0000390000000,\r\n 381, 0.0000428264000,\r\n 382, 0.0000469146000,\r\n 383, 0.0000515896000,\r\n 384, 0.0000571764000,\r\n 385, 0.0000640000000,\r\n 386, 0.0000723442100,\r\n 387, 0.0000822122400,\r\n 388, 0.0000935081600,\r\n 389, 0.0001061361000,\r\n 390, 0.0001200000000,\r\n 391, 0.0001349840000,\r\n 392, 0.0001514920000,\r\n 393, 0.0001702080000,\r\n 394, 0.0001918160000,\r\n 395, 0.0002170000000,\r\n 396, 0.0002469067000,\r\n 397, 0.0002812400000,\r\n 398, 0.0003185200000,\r\n 399, 0.0003572667000,\r\n 400, 0.0003960000000,\r\n 401, 0.0004337147000,\r\n 402, 0.0004730240000,\r\n 403, 0.0005178760000,\r\n 404, 0.0005722187000,\r\n 405, 0.0006400000000,\r\n 406, 0.0007245600000,\r\n 407, 0.0008255000000,\r\n 408, 0.0009411600000,\r\n 409, 0.0010698800000,\r\n 410, 0.0012100000000,\r\n 411, 0.0013620910000,\r\n 412, 0.0015307520000,\r\n 413, 0.0017203680000,\r\n 414, 0.0019353230000,\r\n 415, 0.0021800000000,\r\n 416, 0.0024548000000,\r\n 417, 0.0027640000000,\r\n 418, 0.0031178000000,\r\n 419, 0.0035264000000,\r\n 420, 0.0040000000000,\r\n 421, 0.0045462400000,\r\n 422, 0.0051593200000,\r\n 423, 0.0058292800000,\r\n 424, 0.0065461600000,\r\n 425, 0.0073000000000,\r\n 426, 0.0080865070000,\r\n 427, 0.0089087200000,\r\n 428, 0.0097676800000,\r\n 429, 0.0106644300000,\r\n 430, 0.0116000000000,\r\n 431, 0.0125731700000,\r\n 432, 0.0135827200000,\r\n 433, 0.0146296800000,\r\n 434, 0.0157150900000,\r\n 435, 0.0168400000000,\r\n 436, 0.0180073600000,\r\n 437, 0.0192144800000,\r\n 438, 0.0204539200000,\r\n 439, 0.0217182400000,\r\n 440, 0.0230000000000,\r\n 441, 0.0242946100000,\r\n 442, 0.0256102400000,\r\n 443, 0.0269585700000,\r\n 444, 0.0283512500000,\r\n 445, 0.0298000000000,\r\n 446, 0.0313108300000,\r\n 447, 0.0328836800000,\r\n 448, 0.0345211200000,\r\n 449, 0.0362257100000,\r\n 450, 0.0380000000000,\r\n 451, 0.0398466700000,\r\n 452, 0.0417680000000,\r\n 453, 0.0437660000000,\r\n 454, 0.0458426700000,\r\n 455, 0.0480000000000,\r\n 456, 0.0502436800000,\r\n 457, 0.0525730400000,\r\n 458, 0.0549805600000,\r\n 459, 0.0574587200000,\r\n 460, 0.0600000000000,\r\n 461, 0.0626019700000,\r\n 462, 0.0652775200000,\r\n 463, 0.0680420800000,\r\n 464, 0.0709110900000,\r\n 465, 0.0739000000000,\r\n 466, 0.0770160000000,\r\n 467, 0.0802664000000,\r\n 468, 0.0836668000000,\r\n 469, 0.0872328000000,\r\n 470, 0.0909800000000,\r\n 471, 0.0949175500000,\r\n 472, 0.0990458400000,\r\n 473, 0.1033674000000,\r\n 474, 0.1078846000000,\r\n 475, 0.1126000000000,\r\n 476, 0.1175320000000,\r\n 477, 0.1226744000000,\r\n 478, 0.1279928000000,\r\n 479, 0.1334528000000,\r\n 480, 0.1390200000000,\r\n 481, 0.1446764000000,\r\n 482, 0.1504693000000,\r\n 483, 0.1564619000000,\r\n 484, 0.1627177000000,\r\n 485, 0.1693000000000,\r\n 486, 0.1762431000000,\r\n 487, 0.1835581000000,\r\n 488, 0.1912735000000,\r\n 489, 0.1994180000000,\r\n 490, 0.2080200000000,\r\n 491, 0.2171199000000,\r\n 492, 0.2267345000000,\r\n 493, 0.2368571000000,\r\n 494, 0.2474812000000,\r\n 495, 0.2586000000000,\r\n 496, 0.2701849000000,\r\n 497, 0.2822939000000,\r\n 498, 0.2950505000000,\r\n 499, 0.3085780000000,\r\n 500, 0.3230000000000,\r\n 501, 0.3384021000000,\r\n 502, 0.3546858000000,\r\n 503, 0.3716986000000,\r\n 504, 0.3892875000000,\r\n 505, 0.4073000000000,\r\n 506, 0.4256299000000,\r\n 507, 0.4443096000000,\r\n 508, 0.4633944000000,\r\n 509, 0.4829395000000,\r\n 510, 0.5030000000000,\r\n 511, 0.5235693000000,\r\n 512, 0.5445120000000,\r\n 513, 0.5656900000000,\r\n 514, 0.5869653000000,\r\n 515, 0.6082000000000,\r\n 516, 0.6293456000000,\r\n 517, 0.6503068000000,\r\n 518, 0.6708752000000,\r\n 519, 0.6908424000000,\r\n 520, 0.7100000000000,\r\n 521, 0.7281852000000,\r\n 522, 0.7454636000000,\r\n 523, 0.7619694000000,\r\n 524, 0.7778368000000,\r\n 525, 0.7932000000000,\r\n 526, 0.8081104000000,\r\n 527, 0.8224962000000,\r\n 528, 0.8363068000000,\r\n 529, 0.8494916000000,\r\n 530, 0.8620000000000,\r\n 531, 0.8738108000000,\r\n 532, 0.8849624000000,\r\n 533, 0.8954936000000,\r\n 534, 0.9054432000000,\r\n 535, 0.9148501000000,\r\n 536, 0.9237348000000,\r\n 537, 0.9320924000000,\r\n 538, 0.9399226000000,\r\n 539, 0.9472252000000,\r\n 540, 0.9540000000000,\r\n 541, 0.9602561000000,\r\n 542, 0.9660074000000,\r\n 543, 0.9712606000000,\r\n 544, 0.9760225000000,\r\n 545, 0.9803000000000,\r\n 546, 0.9840924000000,\r\n 547, 0.9874182000000,\r\n 548, 0.9903128000000,\r\n 549, 0.9928116000000,\r\n 550, 0.9949501000000,\r\n 551, 0.9967108000000,\r\n 552, 0.9980983000000,\r\n 553, 0.9991120000000,\r\n 554, 0.9997482000000,\r\n 555, 1.0000000000000,\r\n 556, 0.9998567000000,\r\n 557, 0.9993046000000,\r\n 558, 0.9983255000000,\r\n 559, 0.9968987000000,\r\n 560, 0.9950000000000,\r\n 561, 0.9926005000000,\r\n 562, 0.9897426000000,\r\n 563, 0.9864444000000,\r\n 564, 0.9827241000000,\r\n 565, 0.9786000000000,\r\n 566, 0.9740837000000,\r\n 567, 0.9691712000000,\r\n 568, 0.9638568000000,\r\n 569, 0.9581349000000,\r\n 570, 0.9520000000000,\r\n 571, 0.9454504000000,\r\n 572, 0.9384992000000,\r\n 573, 0.9311628000000,\r\n 574, 0.9234576000000,\r\n 575, 0.9154000000000,\r\n 576, 0.9070064000000,\r\n 577, 0.8982772000000,\r\n 578, 0.8892048000000,\r\n 579, 0.8797816000000,\r\n 580, 0.8700000000000,\r\n 581, 0.8598613000000,\r\n 582, 0.8493920000000,\r\n 583, 0.8386220000000,\r\n 584, 0.8275813000000,\r\n 585, 0.8163000000000,\r\n 586, 0.8047947000000,\r\n 587, 0.7930820000000,\r\n 588, 0.7811920000000,\r\n 589, 0.7691547000000,\r\n 590, 0.7570000000000,\r\n 591, 0.7447541000000,\r\n 592, 0.7324224000000,\r\n 593, 0.7200036000000,\r\n 594, 0.7074965000000,\r\n 595, 0.6949000000000,\r\n 596, 0.6822192000000,\r\n 597, 0.6694716000000,\r\n 598, 0.6566744000000,\r\n 599, 0.6438448000000,\r\n 600, 0.6310000000000,\r\n 601, 0.6181555000000,\r\n 602, 0.6053144000000,\r\n 603, 0.5924756000000,\r\n 604, 0.5796379000000,\r\n 605, 0.5668000000000,\r\n 606, 0.5539611000000,\r\n 607, 0.5411372000000,\r\n 608, 0.5283528000000,\r\n 609, 0.5156323000000,\r\n 610, 0.5030000000000,\r\n 611, 0.4904688000000,\r\n 612, 0.4780304000000,\r\n 613, 0.4656776000000,\r\n 614, 0.4534032000000,\r\n 615, 0.4412000000000,\r\n 616, 0.4290800000000,\r\n 617, 0.4170360000000,\r\n 618, 0.4050320000000,\r\n 619, 0.3930320000000,\r\n 620, 0.3810000000000,\r\n 621, 0.3689184000000,\r\n 622, 0.3568272000000,\r\n 623, 0.3447768000000,\r\n 624, 0.3328176000000,\r\n 625, 0.3210000000000,\r\n 626, 0.3093381000000,\r\n 627, 0.2978504000000,\r\n 628, 0.2865936000000,\r\n 629, 0.2756245000000,\r\n 630, 0.2650000000000,\r\n 631, 0.2547632000000,\r\n 632, 0.2448896000000,\r\n 633, 0.2353344000000,\r\n 634, 0.2260528000000,\r\n 635, 0.2170000000000,\r\n 636, 0.2081616000000,\r\n 637, 0.1995488000000,\r\n 638, 0.1911552000000,\r\n 639, 0.1829744000000,\r\n 640, 0.1750000000000,\r\n 641, 0.1672235000000,\r\n 642, 0.1596464000000,\r\n 643, 0.1522776000000,\r\n 644, 0.1451259000000,\r\n 645, 0.1382000000000,\r\n 646, 0.1315003000000,\r\n 647, 0.1250248000000,\r\n 648, 0.1187792000000,\r\n 649, 0.1127691000000,\r\n 650, 0.1070000000000,\r\n 651, 0.1014762000000,\r\n 652, 0.0961886400000,\r\n 653, 0.0911229600000,\r\n 654, 0.0862648500000,\r\n 655, 0.0816000000000,\r\n 656, 0.0771206400000,\r\n 657, 0.0728255200000,\r\n 658, 0.0687100800000,\r\n 659, 0.0647697600000,\r\n 660, 0.0610000000000,\r\n 661, 0.0573962100000,\r\n 662, 0.0539550400000,\r\n 663, 0.0506737600000,\r\n 664, 0.0475496500000,\r\n 665, 0.0445800000000,\r\n 666, 0.0417587200000,\r\n 667, 0.0390849600000,\r\n 668, 0.0365638400000,\r\n 669, 0.0342004800000,\r\n 670, 0.0320000000000,\r\n 671, 0.0299626100000,\r\n 672, 0.0280766400000,\r\n 673, 0.0263293600000,\r\n 674, 0.0247080500000,\r\n 675, 0.0232000000000,\r\n 676, 0.0218007700000,\r\n 677, 0.0205011200000,\r\n 678, 0.0192810800000,\r\n 679, 0.0181206900000,\r\n 680, 0.0170000000000,\r\n 681, 0.0159037900000,\r\n 682, 0.0148371800000,\r\n 683, 0.0138106800000,\r\n 684, 0.0128347800000,\r\n 685, 0.0119200000000,\r\n 686, 0.0110683100000,\r\n 687, 0.0102733900000,\r\n 688, 0.0095333110000,\r\n 689, 0.0088461570000,\r\n 690, 0.0082100000000,\r\n 691, 0.0076237810000,\r\n 692, 0.0070854240000,\r\n 693, 0.0065914760000,\r\n 694, 0.0061384850000,\r\n 695, 0.0057230000000,\r\n 696, 0.0053430590000,\r\n 697, 0.0049957960000,\r\n 698, 0.0046764040000,\r\n 699, 0.0043800750000,\r\n 700, 0.0041020000000,\r\n 701, 0.0038384530000,\r\n 702, 0.0035890990000,\r\n 703, 0.0033542190000,\r\n 704, 0.0031340930000,\r\n 705, 0.0029290000000,\r\n 706, 0.0027381390000,\r\n 707, 0.0025598760000,\r\n 708, 0.0023932440000,\r\n 709, 0.0022372750000,\r\n 710, 0.0020910000000,\r\n 711, 0.0019535870000,\r\n 712, 0.0018245800000,\r\n 713, 0.0017035800000,\r\n 714, 0.0015901870000,\r\n 715, 0.0014840000000,\r\n 716, 0.0013844960000,\r\n 717, 0.0012912680000,\r\n 718, 0.0012040920000,\r\n 719, 0.0011227440000,\r\n 720, 0.0010470000000,\r\n 721, 0.0009765896000,\r\n 722, 0.0009111088000,\r\n 723, 0.0008501332000,\r\n 724, 0.0007932384000,\r\n 725, 0.0007400000000,\r\n 726, 0.0006900827000,\r\n 727, 0.0006433100000,\r\n 728, 0.0005994960000,\r\n 729, 0.0005584547000,\r\n 730, 0.0005200000000,\r\n 731, 0.0004839136000,\r\n 732, 0.0004500528000,\r\n 733, 0.0004183452000,\r\n 734, 0.0003887184000,\r\n 735, 0.0003611000000,\r\n 736, 0.0003353835000,\r\n 737, 0.0003114404000,\r\n 738, 0.0002891656000,\r\n 739, 0.0002684539000,\r\n 740, 0.0002492000000,\r\n 741, 0.0002313019000,\r\n 742, 0.0002146856000,\r\n 743, 0.0001992884000,\r\n 744, 0.0001850475000,\r\n 745, 0.0001719000000,\r\n 746, 0.0001597781000,\r\n 747, 0.0001486044000,\r\n 748, 0.0001383016000,\r\n 749, 0.0001287925000,\r\n 750, 0.0001200000000,\r\n 751, 0.0001118595000,\r\n 752, 0.0001043224000,\r\n 753, 0.0000973356000,\r\n 754, 0.0000908458700,\r\n 755, 0.0000848000000,\r\n 756, 0.0000791466700,\r\n 757, 0.0000738580000,\r\n 758, 0.0000689160000,\r\n 759, 0.0000643026700,\r\n 760, 0.0000600000000,\r\n 761, 0.0000559818700,\r\n 762, 0.0000522256000,\r\n 763, 0.0000487184000,\r\n 764, 0.0000454474700,\r\n 765, 0.0000424000000,\r\n 766, 0.0000395610400,\r\n 767, 0.0000369151200,\r\n 768, 0.0000344486800,\r\n 769, 0.0000321481600,\r\n 770, 0.0000300000000,\r\n 771, 0.0000279912500,\r\n 772, 0.0000261135600,\r\n 773, 0.0000243602400,\r\n 774, 0.0000227246100,\r\n 775, 0.0000212000000,\r\n 776, 0.0000197785500,\r\n 777, 0.0000184528500,\r\n 778, 0.0000172168700,\r\n 779, 0.0000160645900,\r\n 780, 0.0000149900000\r\n ])\r\n\r\n vl = vl.reshape(401, 2).astype(np.float64).T\r\n vl = vl[:, ::binwidth]\r\n idx = pd.Int64Index(vl[0], name='Wavelength')\r\n return pd.Series(data=vl[1], index=idx, name='vl')\r\n\r\n\r\ndef get_matrix_LMStoXYZ():\r\n '''Get LMS to XYZ conversion matrix for 10 degree field size.\r\n\r\n Returns\r\n -------\r\n np.ndarray\r\n The matrix.\r\n\r\n '''\r\n return np.array([\r\n [1.93986443, -1.34664359, 0.43044935],\r\n [0.69283932, 0.34967567, 0],\r\n [0., 0., 2.14687945]\r\n ])\r\n\r\n\r\ndef get_CIE170_2_chromaticity_coordinates(\r\n binwidth: int = 1, connect: bool = True) -> pd.DataFrame():\r\n\r\n colnames = ['Wavelength', 'x', 'y', 'z']\r\n coord = np.array([\r\n 390, 0.17842, 0.02464, 0.79694,\r\n 391, 0.17838, 0.02482, 0.79679,\r\n 392, 0.17831, 0.02496, 0.79673,\r\n 393, 0.17819, 0.02505, 0.79676,\r\n 394, 0.17803, 0.02510, 0.79687,\r\n 395, 0.17784, 0.02509, 0.79706,\r\n 396, 0.17763, 0.02504, 0.79733,\r\n 397, 0.17738, 0.02494, 0.79769,\r\n 398, 0.17711, 0.02478, 0.79811,\r\n 399, 0.17682, 0.02458, 0.79860,\r\n 400, 0.17652, 0.02432, 0.79915,\r\n 401, 0.17621, 0.02403, 0.79976,\r\n 402, 0.17590, 0.02371, 0.80040,\r\n 403, 0.17559, 0.02338, 0.80103,\r\n 404, 0.17530, 0.02307, 0.80163,\r\n 405, 0.17504, 0.02279, 0.80217,\r\n 406, 0.17480, 0.02255, 0.80265,\r\n 407, 0.17457, 0.02236, 0.80308,\r\n 408, 0.17432, 0.02218, 0.80349,\r\n 409, 0.17405, 0.02202, 0.80393,\r\n 410, 0.17372, 0.02185, 0.80442,\r\n 411, 0.17333, 0.02168, 0.80499,\r\n 412, 0.17290, 0.02150, 0.80560,\r\n 413, 0.17243, 0.02133, 0.80623,\r\n 414, 0.17196, 0.02118, 0.80685,\r\n 415, 0.17151, 0.02106, 0.80744,\r\n 416, 0.17107, 0.02097, 0.80796,\r\n 417, 0.17065, 0.02092, 0.80844,\r\n 418, 0.17023, 0.02090, 0.80887,\r\n 419, 0.16980, 0.02093, 0.80927,\r\n 420, 0.16935, 0.02100, 0.80966,\r\n 421, 0.16886, 0.02111, 0.81004,\r\n 422, 0.16834, 0.02126, 0.81040,\r\n 423, 0.16780, 0.02144, 0.81076,\r\n 424, 0.16726, 0.02165, 0.81109,\r\n 425, 0.16672, 0.02188, 0.81140,\r\n 426, 0.16620, 0.02213, 0.81167,\r\n 427, 0.16569, 0.02240, 0.81190,\r\n 428, 0.16518, 0.02271, 0.81211,\r\n 429, 0.16465, 0.02307, 0.81228,\r\n 430, 0.16408, 0.02349, 0.81243,\r\n 431, 0.16346, 0.02398, 0.81255,\r\n 432, 0.16280, 0.02453, 0.81267,\r\n 433, 0.16212, 0.02512, 0.81276,\r\n 434, 0.16142, 0.02572, 0.81285,\r\n 435, 0.16074, 0.02632, 0.81294,\r\n 436, 0.16010, 0.02689, 0.81301,\r\n 437, 0.15949, 0.02744, 0.81306,\r\n 438, 0.15890, 0.02800, 0.81309,\r\n 439, 0.15831, 0.02860, 0.81309,\r\n 440, 0.15769, 0.02925, 0.81306,\r\n 441, 0.15703, 0.02998, 0.81299,\r\n 442, 0.15632, 0.03080, 0.81289,\r\n 443, 0.15557, 0.03168, 0.81275,\r\n 444, 0.15479, 0.03262, 0.81259,\r\n 445, 0.15400, 0.03360, 0.81240,\r\n 446, 0.15320, 0.03462, 0.81218,\r\n 447, 0.15238, 0.03570, 0.81192,\r\n 448, 0.15154, 0.03686, 0.81159,\r\n 449, 0.15068, 0.03814, 0.81118,\r\n 450, 0.14977, 0.03958, 0.81065,\r\n 451, 0.14882, 0.04119, 0.80999,\r\n 452, 0.14779, 0.04300, 0.80921,\r\n 453, 0.14670, 0.04499, 0.80831,\r\n 454, 0.14551, 0.04717, 0.80732,\r\n 455, 0.14423, 0.04951, 0.80626,\r\n 456, 0.14283, 0.05202, 0.80515,\r\n 457, 0.14132, 0.05471, 0.80397,\r\n 458, 0.13972, 0.05759, 0.80269,\r\n 459, 0.13802, 0.06067, 0.80130,\r\n 460, 0.13627, 0.06397, 0.79976,\r\n 461, 0.13446, 0.06751, 0.79803,\r\n 462, 0.13258, 0.07132, 0.79610,\r\n 463, 0.13062, 0.07543, 0.79394,\r\n 464, 0.12855, 0.07992, 0.79153,\r\n 465, 0.12634, 0.08482, 0.78884,\r\n 466, 0.12395, 0.09020, 0.78585,\r\n 467, 0.12137, 0.09611, 0.78252,\r\n 468, 0.11859, 0.10260, 0.77881,\r\n 469, 0.11563, 0.10971, 0.77466,\r\n 470, 0.11247, 0.11750, 0.77003,\r\n 471, 0.10912, 0.12603, 0.76485,\r\n 472, 0.10557, 0.13535, 0.75908,\r\n 473, 0.10182, 0.14553, 0.75265,\r\n 474, 0.09784, 0.15663, 0.74553,\r\n 475, 0.09363, 0.16873, 0.73765,\r\n 476, 0.08916, 0.18188, 0.72896,\r\n 477, 0.08447, 0.19609, 0.71944,\r\n 478, 0.07957, 0.21135, 0.70908,\r\n 479, 0.07450, 0.22760, 0.69790,\r\n 480, 0.06929, 0.24478, 0.68593,\r\n 481, 0.06400, 0.26281, 0.67319,\r\n 482, 0.05863, 0.28170, 0.65966,\r\n 483, 0.05323, 0.30150, 0.64528,\r\n 484, 0.04781, 0.32221, 0.62998,\r\n 485, 0.04243, 0.34386, 0.61371,\r\n 486, 0.03714, 0.36644, 0.59642,\r\n 487, 0.03201, 0.38982, 0.57817,\r\n 488, 0.02711, 0.41387, 0.55902,\r\n 489, 0.02250, 0.43840, 0.53909,\r\n 490, 0.01826, 0.46323, 0.51851,\r\n 491, 0.01443, 0.48816, 0.49741,\r\n 492, 0.01110, 0.51301, 0.47589,\r\n 493, 0.00834, 0.53760, 0.45406,\r\n 494, 0.00623, 0.56173, 0.43204,\r\n 495, 0.00486, 0.58520, 0.40993,\r\n 496, 0.00428, 0.60787, 0.38785,\r\n 497, 0.00443, 0.62973, 0.36584,\r\n 498, 0.00520, 0.65087, 0.34393,\r\n 499, 0.00648, 0.67137, 0.32214,\r\n 500, 0.00817, 0.69130, 0.30052,\r\n 501, 0.01017, 0.71070, 0.27912,\r\n 502, 0.01251, 0.72943, 0.25806,\r\n 503, 0.01525, 0.74730, 0.23744,\r\n 504, 0.01846, 0.76414, 0.21740,\r\n 505, 0.02218, 0.77978, 0.19804,\r\n 506, 0.02648, 0.79401, 0.17951,\r\n 507, 0.03143, 0.80653, 0.16204,\r\n 508, 0.03704, 0.81710, 0.14587,\r\n 509, 0.04333, 0.82555, 0.13111,\r\n 510, 0.05030, 0.83185, 0.11785,\r\n 511, 0.05788, 0.83606, 0.10606,\r\n 512, 0.06588, 0.83858, 0.09554,\r\n 513, 0.07408, 0.83984, 0.08609,\r\n 514, 0.08228, 0.84018, 0.07754,\r\n 515, 0.09030, 0.83993, 0.06977,\r\n 516, 0.09804, 0.83930, 0.06266,\r\n 517, 0.10558, 0.83823, 0.05620,\r\n 518, 0.11304, 0.83660, 0.05035,\r\n 519, 0.12056, 0.83434, 0.04510,\r\n 520, 0.12825, 0.83134, 0.04040,\r\n 521, 0.13620, 0.82759, 0.03621,\r\n 522, 0.14434, 0.82318, 0.03248,\r\n 523, 0.15262, 0.81824, 0.02913,\r\n 524, 0.16097, 0.81290, 0.02613,\r\n 525, 0.16931, 0.80726, 0.02343,\r\n 526, 0.17760, 0.80140, 0.02100,\r\n 527, 0.18580, 0.79539, 0.01881,\r\n 528, 0.19386, 0.78929, 0.01686,\r\n 529, 0.20176, 0.78314, 0.01511,\r\n 530, 0.20946, 0.77699, 0.01355,\r\n 531, 0.21696, 0.77088, 0.01216,\r\n 532, 0.22432, 0.76476, 0.01092,\r\n 533, 0.23159, 0.75860, 0.00980,\r\n 534, 0.23885, 0.75235, 0.00879,\r\n 535, 0.24615, 0.74597, 0.00788,\r\n 536, 0.25353, 0.73942, 0.00705,\r\n 537, 0.26097, 0.73274, 0.00630,\r\n 538, 0.26841, 0.72597, 0.00562,\r\n 539, 0.27582, 0.71916, 0.00502,\r\n 540, 0.28316, 0.71236, 0.00448,\r\n 541, 0.29038, 0.70561, 0.00401,\r\n 542, 0.29751, 0.69891, 0.00359,\r\n 543, 0.30452, 0.69226, 0.00322,\r\n 544, 0.31144, 0.68568, 0.00288,\r\n 545, 0.31826, 0.67915, 0.00259,\r\n 546, 0.32499, 0.67269, 0.00233,\r\n 547, 0.33163, 0.66628, 0.00209,\r\n 548, 0.33819, 0.65992, 0.00188,\r\n 549, 0.34469, 0.65361, 0.00169,\r\n 550, 0.35114, 0.64734, 0.00152,\r\n 551, 0.35755, 0.64109, 0.00136,\r\n 552, 0.36397, 0.63481, 0.00122,\r\n 553, 0.37043, 0.62848, 0.00109,\r\n 554, 0.37696, 0.62206, 0.00098,\r\n 555, 0.38361, 0.61552, 0.00088,\r\n 556, 0.39036, 0.60885, 0.00079,\r\n 557, 0.39716, 0.60214, 0.00071,\r\n 558, 0.40393, 0.59543, 0.00064,\r\n 559, 0.41062, 0.58881, 0.00057,\r\n 560, 0.41716, 0.58232, 0.00051,\r\n 561, 0.42354, 0.57600, 0.00046,\r\n 562, 0.42980, 0.56979, 0.00042,\r\n 563, 0.43602, 0.56360, 0.00037,\r\n 564, 0.44227, 0.55740, 0.00034,\r\n 565, 0.44859, 0.55110, 0.00030,\r\n 566, 0.45503, 0.54470, 0.00027,\r\n 567, 0.46153, 0.53823, 0.00025,\r\n 568, 0.46803, 0.53175, 0.00022,\r\n 569, 0.47450, 0.52530, 0.00020,\r\n 570, 0.48088, 0.51894, 0.00018,\r\n 571, 0.48714, 0.51269, 0.00016,\r\n 572, 0.49331, 0.50654, 0.00015,\r\n 573, 0.49939, 0.50048, 0.00013,\r\n 574, 0.50541, 0.49447, 0.00012,\r\n 575, 0.51138, 0.48851, 0.00011,\r\n 576, 0.51732, 0.48258, 0.00010,\r\n 577, 0.52323, 0.47668, 0.00009,\r\n 578, 0.52914, 0.47078, 0.00008,\r\n 579, 0.53505, 0.46488, 0.00007,\r\n 580, 0.54097, 0.45896, 0.00007,\r\n 581, 0.54689, 0.45305, 0.00006,\r\n 582, 0.55275, 0.44719, 0.00006,\r\n 583, 0.55849, 0.44146, 0.00005,\r\n 584, 0.56405, 0.43591, 0.00005,\r\n 585, 0.56937, 0.43059, 0.00004,\r\n 586, 0.57444, 0.42552, 0.00004,\r\n 587, 0.57930, 0.42066, 0.00004,\r\n 588, 0.58401, 0.41596, 0.00003,\r\n 589, 0.58860, 0.41137, 0.00003,\r\n 590, 0.59312, 0.40685, 0.00003,\r\n 591, 0.59760, 0.40238, 0.00002,\r\n 592, 0.60201, 0.39796, 0.00002,\r\n 593, 0.60636, 0.39362, 0.00002,\r\n 594, 0.61060, 0.38938, 0.00002,\r\n 595, 0.61474, 0.38524, 0.00002,\r\n 596, 0.61877, 0.38121, 0.00002,\r\n 597, 0.62269, 0.37729, 0.00002,\r\n 598, 0.62653, 0.37346, 0.00001,\r\n 599, 0.63028, 0.36971, 0.00001,\r\n 600, 0.63396, 0.36602, 0.00001,\r\n 601, 0.63758, 0.36241, 0.00001,\r\n 602, 0.64111, 0.35888, 0.00001,\r\n 603, 0.64454, 0.35545, 0.00001,\r\n 604, 0.64785, 0.35214, 0.00001,\r\n 605, 0.65103, 0.34896, 0.00001,\r\n 606, 0.65407, 0.34593, 0.00001,\r\n 607, 0.65697, 0.34302, 0.00001,\r\n 608, 0.65975, 0.34024, 0.00001,\r\n 609, 0.66242, 0.33758, 0.00001,\r\n 610, 0.66498, 0.33502, 0.00001,\r\n 611, 0.66744, 0.33256, 0.00001,\r\n 612, 0.66980, 0.33019, 0.00001,\r\n 613, 0.67208, 0.32791, 0.00001,\r\n 614, 0.67427, 0.32572, 0.00000,\r\n 615, 0.67638, 0.32361, 0.00000,\r\n 616, 0.67841, 0.32159, 0.00000,\r\n 617, 0.68038, 0.31962, 0.00000,\r\n 618, 0.68228, 0.31772, 0.00000,\r\n 619, 0.68414, 0.31586, 0.00000,\r\n 620, 0.68596, 0.31404, 0.00000,\r\n 621, 0.68773, 0.31227, 0.00000,\r\n 622, 0.68945, 0.31055, 0.00000,\r\n 623, 0.69109, 0.30891, 0.00000,\r\n 624, 0.69262, 0.30738, 0.00000,\r\n 625, 0.69405, 0.30595, 0.00000,\r\n 626, 0.69535, 0.30465, 0.00000,\r\n 627, 0.69656, 0.30344, 0.00000,\r\n 628, 0.69768, 0.30232, 0.00000,\r\n 629, 0.69875, 0.30125, 0.00000,\r\n 630, 0.69977, 0.30023, 0.00000,\r\n 631, 0.70077, 0.29923, 0.00000,\r\n 632, 0.70174, 0.29826, 0.00000,\r\n 633, 0.70268, 0.29732, 0.00000,\r\n 634, 0.70359, 0.29641, 0.00000,\r\n 635, 0.70447, 0.29553, 0.00000,\r\n 636, 0.70533, 0.29467, 0.00000,\r\n 637, 0.70618, 0.29382, 0.00000,\r\n 638, 0.70702, 0.29298, 0.00000,\r\n 639, 0.70786, 0.29214, 0.00000,\r\n 640, 0.70871, 0.29129, 0.00000,\r\n 641, 0.70957, 0.29043, 0.00000,\r\n 642, 0.71042, 0.28958, 0.00000,\r\n 643, 0.71122, 0.28878, 0.00000,\r\n 644, 0.71195, 0.28805, 0.00000,\r\n 645, 0.71259, 0.28741, 0.00000,\r\n 646, 0.71312, 0.28688, 0.00000,\r\n 647, 0.71356, 0.28644, 0.00000,\r\n 648, 0.71395, 0.28605, 0.00000,\r\n 649, 0.71431, 0.28569, 0.00000,\r\n 650, 0.71465, 0.28535, 0.00000,\r\n 651, 0.71501, 0.28499, 0.00000,\r\n 652, 0.71537, 0.28463, 0.00000,\r\n 653, 0.71574, 0.28426, 0.00000,\r\n 654, 0.71611, 0.28389, 0.00000,\r\n 655, 0.71648, 0.28352, 0.00000,\r\n 656, 0.71685, 0.28315, 0.00000,\r\n 657, 0.71721, 0.28279, 0.00000,\r\n 658, 0.71756, 0.28244, 0.00000,\r\n 659, 0.71791, 0.28209, 0.00000,\r\n 660, 0.71824, 0.28176, 0.00000,\r\n 661, 0.71857, 0.28143, 0.00000,\r\n 662, 0.71887, 0.28113, 0.00000,\r\n 663, 0.71916, 0.28084, 0.00000,\r\n 664, 0.71943, 0.28057, 0.00000,\r\n 665, 0.71967, 0.28033, 0.00000,\r\n 666, 0.71988, 0.28012, 0.00000,\r\n 667, 0.72007, 0.27993, 0.00000,\r\n 668, 0.72024, 0.27976, 0.00000,\r\n 669, 0.72039, 0.27961, 0.00000,\r\n 670, 0.72054, 0.27946, 0.00000,\r\n 671, 0.72067, 0.27933, 0.00000,\r\n 672, 0.72080, 0.27920, 0.00000,\r\n 673, 0.72093, 0.27907, 0.00000,\r\n 674, 0.72104, 0.27896, 0.00000,\r\n 675, 0.72115, 0.27885, 0.00000,\r\n 676, 0.72125, 0.27875, 0.00000,\r\n 677, 0.72134, 0.27866, 0.00000,\r\n 678, 0.72143, 0.27857, 0.00000,\r\n 679, 0.72151, 0.27849, 0.00000,\r\n 680, 0.72158, 0.27842, 0.00000,\r\n 681, 0.72165, 0.27835, 0.00000,\r\n 682, 0.72171, 0.27829, 0.00000,\r\n 683, 0.72177, 0.27823, 0.00000,\r\n 684, 0.72183, 0.27817, 0.00000,\r\n 685, 0.72187, 0.27813, 0.00000,\r\n 686, 0.72192, 0.27808, 0.00000,\r\n 687, 0.72195, 0.27805, 0.00000,\r\n 688, 0.72199, 0.27801, 0.00000,\r\n 689, 0.72202, 0.27798, 0.00000,\r\n 690, 0.72205, 0.27795, 0.00000,\r\n 691, 0.72208, 0.27792, 0.00000,\r\n 692, 0.72211, 0.27789, 0.00000,\r\n 693, 0.72213, 0.27787, 0.00000,\r\n 694, 0.72215, 0.27785, 0.00000,\r\n 695, 0.72217, 0.27783, 0.00000,\r\n 696, 0.72218, 0.27782, 0.00000,\r\n 697, 0.72218, 0.27782, 0.00000,\r\n 698, 0.72219, 0.27781, 0.00000,\r\n 699, 0.72219, 0.27781, 0.00000,\r\n 700, 0.72219, 0.27781, 0.00000,\r\n 701, 0.72219, 0.27781, 0.00000,\r\n 702, 0.72219, 0.27781, 0.00000,\r\n 703, 0.72219, 0.27781, 0.00000,\r\n 704, 0.72219, 0.27781, 0.00000,\r\n 705, 0.72219, 0.27781, 0.00000,\r\n 706, 0.72218, 0.27782, 0.00000,\r\n 707, 0.72217, 0.27783, 0.00000,\r\n 708, 0.72216, 0.27784, 0.00000,\r\n 709, 0.72215, 0.27785, 0.00000,\r\n 710, 0.72213, 0.27787, 0.00000,\r\n 711, 0.72210, 0.27790, 0.00000,\r\n 712, 0.72207, 0.27793, 0.00000,\r\n 713, 0.72204, 0.27796, 0.00000,\r\n 714, 0.72201, 0.27799, 0.00000,\r\n 715, 0.72198, 0.27802, 0.00000,\r\n 716, 0.72195, 0.27805, 0.00000,\r\n 717, 0.72192, 0.27808, 0.00000,\r\n 718, 0.72189, 0.27811, 0.00000,\r\n 719, 0.72185, 0.27815, 0.00000,\r\n 720, 0.72182, 0.27818, 0.00000,\r\n 721, 0.72179, 0.27821, 0.00000,\r\n 722, 0.72175, 0.27825, 0.00000,\r\n 723, 0.72171, 0.27829, 0.00000,\r\n 724, 0.72167, 0.27833, 0.00000,\r\n 725, 0.72163, 0.27837, 0.00000,\r\n 726, 0.72159, 0.27841, 0.00000,\r\n 727, 0.72155, 0.27845, 0.00000,\r\n 728, 0.72151, 0.27849, 0.00000,\r\n 729, 0.72147, 0.27853, 0.00000,\r\n 730, 0.72142, 0.27858, 0.00000,\r\n 731, 0.72138, 0.27862, 0.00000,\r\n 732, 0.72134, 0.27866, 0.00000,\r\n 733, 0.72130, 0.27870, 0.00000,\r\n 734, 0.72125, 0.27875, 0.00000,\r\n 735, 0.72120, 0.27880, 0.00000,\r\n 736, 0.72115, 0.27885, 0.00000,\r\n 737, 0.72109, 0.27891, 0.00000,\r\n 738, 0.72104, 0.27896, 0.00000,\r\n 739, 0.72097, 0.27903, 0.00000,\r\n 740, 0.72091, 0.27909, 0.00000,\r\n 741, 0.72084, 0.27916, 0.00000,\r\n 742, 0.72078, 0.27922, 0.00000,\r\n 743, 0.72071, 0.27929, 0.00000,\r\n 744, 0.72064, 0.27936, 0.00000,\r\n 745, 0.72057, 0.27943, 0.00000,\r\n 746, 0.72050, 0.27950, 0.00000,\r\n 747, 0.72043, 0.27957, 0.00000,\r\n 748, 0.72036, 0.27964, 0.00000,\r\n 749, 0.72029, 0.27971, 0.00000,\r\n 750, 0.72022, 0.27978, 0.00000,\r\n 751, 0.72015, 0.27985, 0.00000,\r\n 752, 0.72009, 0.27991, 0.00000,\r\n 753, 0.72002, 0.27998, 0.00000,\r\n 754, 0.71995, 0.28005, 0.00000,\r\n 755, 0.71988, 0.28012, 0.00000,\r\n 756, 0.71980, 0.28020, 0.00000,\r\n 757, 0.71973, 0.28027, 0.00000,\r\n 758, 0.71965, 0.28035, 0.00000,\r\n 759, 0.71958, 0.28042, 0.00000,\r\n 760, 0.71950, 0.28050, 0.00000,\r\n 761, 0.71943, 0.28057, 0.00000,\r\n 762, 0.71935, 0.28065, 0.00000,\r\n 763, 0.71928, 0.28072, 0.00000,\r\n 764, 0.71921, 0.28079, 0.00000,\r\n 765, 0.71915, 0.28085, 0.00000,\r\n 766, 0.71908, 0.28092, 0.00000,\r\n 767, 0.71901, 0.28099, 0.00000,\r\n 768, 0.71894, 0.28106, 0.00000,\r\n 769, 0.71888, 0.28112, 0.00000,\r\n 770, 0.71881, 0.28119, 0.00000,\r\n 771, 0.71874, 0.28126, 0.00000,\r\n 772, 0.71867, 0.28133, 0.00000,\r\n 773, 0.71860, 0.28140, 0.00000,\r\n 774, 0.71852, 0.28148, 0.00000,\r\n 775, 0.71845, 0.28155, 0.00000,\r\n 776, 0.71838, 0.28162, 0.00000,\r\n 777, 0.71830, 0.28170, 0.00000,\r\n 778, 0.71822, 0.28178, 0.00000,\r\n 779, 0.71814, 0.28186, 0.00000,\r\n 780, 0.71807, 0.28193, 0.00000,\r\n 781, 0.71799, 0.28201, 0.00000,\r\n 782, 0.71791, 0.28209, 0.00000,\r\n 783, 0.71783, 0.28217, 0.00000,\r\n 784, 0.71775, 0.28225, 0.00000,\r\n 785, 0.71766, 0.28234, 0.00000,\r\n 786, 0.71758, 0.28242, 0.00000,\r\n 787, 0.71750, 0.28250, 0.00000,\r\n 788, 0.71742, 0.28258, 0.00000,\r\n 789, 0.71733, 0.28267, 0.00000,\r\n 790, 0.71725, 0.28275, 0.00000,\r\n 791, 0.71716, 0.28284, 0.00000,\r\n 792, 0.71708, 0.28292, 0.00000,\r\n 793, 0.71699, 0.28301, 0.00000,\r\n 794, 0.71690, 0.28310, 0.00000,\r\n 795, 0.71681, 0.28319, 0.00000,\r\n 796, 0.71671, 0.28329, 0.00000,\r\n 797, 0.71661, 0.28339, 0.00000,\r\n 798, 0.71651, 0.28349, 0.00000,\r\n 799, 0.71641, 0.28359, 0.00000,\r\n 800, 0.71630, 0.28370, 0.00000,\r\n 801, 0.71619, 0.28381, 0.00000,\r\n 802, 0.71609, 0.28391, 0.00000,\r\n 803, 0.71598, 0.28402, 0.00000,\r\n 804, 0.71588, 0.28412, 0.00000,\r\n 805, 0.71577, 0.28423, 0.00000,\r\n 806, 0.71566, 0.28434, 0.00000,\r\n 807, 0.71556, 0.28444, 0.00000,\r\n 808, 0.71545, 0.28455, 0.00000,\r\n 809, 0.71534, 0.28466, 0.00000,\r\n 810, 0.71523, 0.28477, 0.00000,\r\n 811, 0.71513, 0.28487, 0.00000,\r\n 812, 0.71502, 0.28498, 0.00000,\r\n 813, 0.71491, 0.28509, 0.00000,\r\n 814, 0.71480, 0.28520, 0.00000,\r\n 815, 0.71469, 0.28531, 0.00000,\r\n 816, 0.71459, 0.28541, 0.00000,\r\n 817, 0.71449, 0.28551, 0.00000,\r\n 818, 0.71438, 0.28562, 0.00000,\r\n 819, 0.71428, 0.28572, 0.00000,\r\n 820, 0.71418, 0.28582, 0.00000,\r\n 821, 0.71409, 0.28591, 0.00000,\r\n 822, 0.71400, 0.28600, 0.00000,\r\n 823, 0.71391, 0.28609, 0.00000,\r\n 824, 0.71382, 0.28618, 0.00000,\r\n 825, 0.71373, 0.28627, 0.00000,\r\n 826, 0.71365, 0.28635, 0.00000,\r\n 827, 0.71358, 0.28642, 0.00000,\r\n 828, 0.71350, 0.28650, 0.00000,\r\n 829, 0.71343, 0.28657, 0.00000,\r\n 830, 0.71336, 0.28664, 0.00000\r\n ])\r\n\r\n coord = coord.reshape(441, 4).astype(np.float64).T\r\n coord = coord[:, ::binwidth]\r\n coord = pd.DataFrame(data=coord.T, columns=colnames)\r\n coord.set_index('Wavelength', inplace=True)\r\n coord.index = pd.Int64Index(coord.index)\r\n if connect:\r\n coord = coord.append(coord.iloc[0], ignore_index=True)\r\n return coord\r\n" ]
[ [ "numpy.array", "pandas.Int64Index", "pandas.DataFrame", "pandas.Series", "numpy.vstack" ] ]
ymouad/adapt
[ "899bb5ce9af093b6d98210e384daa49c96c3d203" ]
[ "adapt/metrics.py" ]
[ "import inspect\nimport copy\n\nimport numpy as np\nimport tensorflow as tf\nfrom scipy import linalg\nfrom sklearn.metrics import pairwise\nfrom sklearn.base import clone\nfrom sklearn.model_selection import train_test_split\nfrom adapt.utils import get_default_discriminator\nfrom tensorflow.keras.optimizers import Adam\n\nEPS = np.finfo(float).eps\n\n\ndef _fit_alpha(Xs, Xt, centers, sigma):\n \"\"\"\n Fit alpha coeficients to compute J-score\n \"\"\"\n A = pairwise.rbf_kernel(Xt, centers, sigma)\n b = np.mean(pairwise.rbf_kernel(centers, Xs, sigma), axis=1)\n b = b.reshape(-1, 1)\n\n alpha = np.ones((len(centers), 1)) / len(centers)\n previous_objective = -np.inf\n objective = np.mean(np.log(np.dot(A, alpha) + EPS))\n\n k = 0\n while k < 5000 and objective-previous_objective > 1e-6:\n previous_objective = objective\n alpha_p = np.copy(alpha)\n alpha += 1e-4 * np.dot(\n np.transpose(A), 1./(np.dot(A, alpha) + EPS)\n )\n alpha += b * ((((1-np.dot(np.transpose(b), alpha)) /\n (np.dot(np.transpose(b), b) + EPS))))\n alpha = np.maximum(0, alpha)\n alpha /= (np.dot(np.transpose(b), alpha) + EPS)\n objective = np.mean(np.log(np.dot(A, alpha) + EPS))\n k += 1\n return alpha\n\n\ndef cov_distance(Xs, Xt):\n \"\"\"\n Compute the mean absolute difference\n between the covariance matrixes of Xs and Xt\n \n Parameters\n ----------\n Xs : array\n Source array\n \n Xt : array\n Target array\n \n Returns\n -------\n score : float\n \n See also\n --------\n frechet_distance\n CORAL\n\n References\n ----------\n .. [1] `[1] <https://arxiv.org/pdf/1511.05547.pdf>`_ Sun B., Feng J., Saenko K. \\\n\"Return of frustratingly easy domain adaptation\". In AAAI, 2016.\n \"\"\"\n cov_Xs = np.cov(Xs, rowvar=False)\n cov_Xt = np.cov(Xt, rowvar=False)\n return np.mean(np.abs(cov_Xs-cov_Xt))\n\n\ndef frechet_distance(Xs, Xt):\n \"\"\"\n Compute the frechet distance\n between Xs and Xt.\n \n .. math::\n \n \\\\Delta = ||\\\\mu_S - \\\\mu_T||_2^2 + Tr\\\\left(\\\\Sigma_S + \\\\Sigma_T\n - 2 (\\\\Sigma_S \\\\cdot \\\\Sigma_T)^{\\\\frac{1}{2}} \\\\right)\n \n Where:\n \n - :math:`\\\\mu_S, \\\\mu_T` are the mean of Xs, Xt along first axis.\n - :math:`\\\\Sigma_S, \\\\Sigma_T` are the covariance matrix of Xs, Xt.\n \n Parameters\n ----------\n Xs : array\n Source array\n \n Xt : array\n Target array\n \n Returns\n -------\n score : float\n \n See also\n --------\n normalized_frechet_distance\n linear_discrepancy\n normalized_linear_discrepancy\n \n References\n ----------\n .. [1] `[1] <https://www.sciencedirect.com/science/article/pii/00\\\n47259X8290077X?via%3Dihub>`_ Dowson, D. C; Landau, B. V. \"The Fréchet \\\ndistance between multivariate normal distributions\". JMVA. 1982\n \"\"\"\n mu1 = np.mean(Xs, axis=0) \n sigma1 = np.cov(Xs, rowvar=False)\n mu2 = np.mean(Xt, axis=0)\n sigma2 = np.cov(Xt, rowvar=False)\n ssdiff = np.sum((mu1 - mu2)**2.0)\n product = np.array(sigma1.dot(sigma2))\n if product.ndim < 2:\n product = product.reshape(-1, 1)\n covmean = linalg.sqrtm(product)\n if np.iscomplexobj(covmean):\n covmean = covmean.real\n return ssdiff + np.trace(sigma1 + sigma2 - 2.0 * covmean)\n\n\ndef linear_discrepancy(Xs, Xt, power_method=False, n_iter=20):\n \"\"\"\n Compute the linear discrepancy\n between Xs and Xt.\n \n .. math::\n \n \\\\Delta = \\\\max_{u \\\\in \\\\mathbb{R}^p} u^T (X_S^T X_S - X_T^T X_T) u\n \n Where:\n \n - :math:`p` is the number of features of Xs and Xt.\n \n Parameters\n ----------\n Xs : array\n Source array\n \n Xt : array\n Target array\n \n power_method : bool (default=False)\n Weither to use the power method\n approximation or not.\n \n n_iter : int (default=20)\n Number of iteration for power method\n \n Returns\n -------\n score : float\n \n See also\n --------\n normalized_linear_discrepancy\n frechet_distance\n normalized_frechet_distance\n\n References\n ----------\n .. [1] `[1] <https://arxiv.org/pdf/0902.3430.pdf>`_ \\\nY. Mansour, M. Mohri, and A. Rostamizadeh. \"Domain \\\nadaptation: Learning bounds and algorithms\". In COLT, 2009.\n \"\"\"\n M = (1/len(Xs)) * np.dot(np.transpose(Xs), Xs) - (1/len(Xt)) * np.dot(np.transpose(Xt), Xt)\n if power_method:\n x = np.ones(len(M))\n for _ in range(n_iter):\n x = M.dot(x)\n x_max = np.max(np.abs(x))\n x = (1 / (x_max + EPS)) * x\n else:\n e, v = linalg.eig(M)\n x_max = np.max(np.abs(e))\n return x_max\n\n\ndef normalized_linear_discrepancy(Xs, Xt, power_method=False, n_iter=20):\n \"\"\"\n Compute the normalized linear discrepancy\n between Xs and Xt.\n \n Xs and Xt are first scaled by a factor\n ``(std(Xs) + std(Xt)) / 2``\n and centered around ``(mean(Xs) + mean(Xt)) / 2``\n \n Then, the linear discrepancy is computed and divided by the number\n of features.\n \n Parameters\n ----------\n Xs : array\n Source array\n \n Xt : array\n Target array\n \n Returns\n -------\n score : float\n \n See also\n --------\n linear_discrepancy\n frechet_distance\n normalized_frechet_distance\n\n References\n ----------\n .. [1] `[1] <https://arxiv.org/pdf/0902.3430.pdf>`_ \\\nY. Mansour, M. Mohri, and A. Rostamizadeh. \"Domain \\\nadaptation: Learning bounds and algorithms\". In COLT, 2009.\n \"\"\"\n std = (np.std(Xs) + np.std(Xt) + EPS)/2\n mu = (np.mean(Xs) + np.mean(Xt))/2\n x_max = linear_discrepancy((Xs-mu)/std, (Xt-mu)/std, power_method, n_iter)\n return x_max / Xs.shape[1]\n\n\ndef normalized_frechet_distance(Xs, Xt):\n \"\"\"\n Compute the normalized frechet distance\n between Xs and Xt.\n \n Xs and Xt are first scaled by a factor\n ``(std(Xs) + std(Xt)) / 2``\n and centered around ``(mean(Xs) + mean(Xt)) / 2``\n \n Then, the frechet distance is computed and divided by the number\n of features.\n \n Parameters\n ----------\n Xs : array\n Source array\n \n Xt : array\n Target array\n \n Returns\n -------\n score : float\n \n See also\n --------\n frechet_distance\n linear_discrepancy\n normalized_linear_discrepancy\n \n References\n ----------\n .. [1] `[1] <https://www.sciencedirect.com/science/article/pii/00\\\n47259X8290077X?via%3Dihub>`_ Dowson, D. C; Landau, B. V. \"The Fréchet \\\ndistance between multivariate normal distributions\". JMVA. 1982\n \"\"\"\n std = (np.std(Xs) + np.std(Xt) + EPS)/2\n mu = (np.mean(Xs) + np.mean(Xt))/2\n x_max = frechet_distance((Xs-mu)/std, (Xt-mu)/std)\n return x_max / Xs.shape[1]\n\n\ndef j_score(Xs, Xt, max_centers=100, sigma=None):\n \"\"\"\n Compute the negative J-score between Xs and Xt.\n \n .. math::\n \n \\\\Delta = -\\\\int_{\\\\mathcal{X}} P(X_T) \\\\log(P(X_T) / P(X_S))\n \n Where:\n \n - :math:`P(X_S), P(X_T)` are the probability density\n functions of Xs and Xt.\n \n The source and target probability density functions\n are approximated with a mixture of gaussian kernels\n of bandwith ``sigma`` and centered in ``max_centers``\n random points of Xt. The coefficient of the mixture\n are determined by solving a convex optimization (see [1])\n \n Parameters\n ----------\n Xs : array\n Source array\n \n Xt : array\n Target array\n \n max_centers : int (default=100)\n Maximum number of centers from Xt\n \n sigma : float (default=None)\n Kernel bandwidth. If ``None``, the mean\n of pairwise distances between data from\n Xt is used.\n \n Returns\n -------\n score : float\n \n See also\n --------\n KLIEP\n\n References\n ----------\n .. [1] `[1] <https://papers.nips.cc/paper/3248-direct-importance-estimation\\\n-with-model-selection-and-its-application-to-covariate-shift-adaptation.pdf>`_ \\\nM. Sugiyama, S. Nakajima, H. Kashima, P. von Bünau and M. Kawanabe. \\\n\"Direct importance estimation with model selection and its application \\\nto covariateshift adaptation\". In NIPS 2007\n \"\"\"\n if len(Xt) > max_centers:\n random_index = np.random.choice(\n len(Xt), size=max_centers, replace=False)\n centers = Xt[random_index]\n else:\n centers = Xt\n \n if sigma is None:\n sigma = pairwise.euclidean_distances(Xt, Xt).mean()\n \n alphas = _fit_alpha(Xs, Xt, centers, sigma)\n \n j_score_ = np.mean(np.log(np.dot(\n pairwise.rbf_kernel(Xt,\n centers,\n sigma),\n alphas) + EPS))\n return -j_score_\n\n\ndef domain_classifier(Xs, Xt, classifier=None, **fit_params):\n \"\"\"\n Return 1 minus the mean square error of a classifer\n disciminating between Xs and Xt.\n \n .. math::\n \n \\\\Delta = 1 - \\\\min_{h \\\\in H} || h(X_S) - 1 ||^2 +\n || h(X_T) ||^2\n \n Where:\n \n - :math:`H` is a class of classifier.\n \n Parameters\n ----------\n Xs : array\n Source array\n \n Xt : array\n Target array\n \n classifier : sklearn estimator or tensorflow Model instance\n Classifier\n \n fit_params : key, value arguments\n Parameters for the fit method of the classifier.\n \n Returns\n -------\n score : float\n \n See also\n --------\n reverse_validation\n DANN\n \n References\n ----------\n .. [1] `[1] <http://jmlr.org/papers/volume17/15-239/15-239.pdf>`_ Y. Ganin, \\\nE. Ustinova, H. Ajakan, P. Germain, H. Larochelle, F. Laviolette, M. Marchand, \\\nand V. Lempitsky. \"Domain-adversarial training of neural networks\". In JMLR, 2016.\n \"\"\"\n Xs_train, Xs_test = train_test_split(Xs, train_size=0.8)\n Xt_train, Xt_test = train_test_split(Xt, train_size=0.8)\n \n X_train = np.concatenate((Xs_train, Xt_train))\n y_train = np.concatenate((np.zeros(len(Xs_train)),\n np.ones(len(Xt_train))))\n X_test = np.concatenate((Xs_test, Xt_test))\n y_test = np.concatenate((np.zeros(len(Xs_test)),\n np.ones(len(Xt_test))))\n \n if classifier is None:\n classifier = get_default_discriminator()\n classifier.compile(optimizer=Adam(0.001), loss=\"bce\")\n if fit_params == {}:\n fit_params = dict(epochs=max(1, int(3000 * 64 / len(X_train))),\n batch_size=64,\n verbose=0)\n classifier.fit(X_train, y_train, **fit_params)\n \n y_pred = classifier.predict(X_test)\n return 1-np.mean(np.square(y_pred-y_test.reshape(y_pred.shape)))\n\n\ndef reverse_validation(model, Xs, ys, Xt, **fit_params):\n \"\"\"\n Reverse validation.\n \n The reverse validation score is computed as a source error\n by inversing the role of the source and the target domains.\n A clone of the model is trained to adapt from the target to\n the source using the model target predictions as\n pseudo target labels. Then the final score is computed between\n the source prediction of the cloned model and the groundtruth.\n \n Parameters\n ----------\n model : BaseAdapt instance\n Adaptation model\n \n Xs : array\n Source input array\n \n ys : array\n Source output array\n \n Xt : array\n Target input array\n \n fit_params : key, value arguments\n Parameters for the fit method of the cloned model.\n \n Returns\n -------\n score : float\n \n See also\n --------\n domain_classifier\n DANN\n \n References\n ----------\n .. [1] `[1] <http://jmlr.org/papers/volume17/15-239/15-239.pdf>`_ Y. Ganin, \\\nE. Ustinova, H. Ajakan, P. Germain, H. Larochelle, F. Laviolette, M. Marchand, \\\nand V. Lempitsky. \"Domain-adversarial training of neural networks\". In JMLR, 2016.\n \"\"\"\n yt = model.predict(Xt)\n \n if yt.ndim == 1 and ys.ndim > 1:\n yt = yt.reshape(-1, 1)\n \n if ys.ndim == 1 and yt.ndim > 1:\n yt = yt.ravel()\n \n clone_model = clone(model)\n clone_model.fit(Xt, yt, Xs, **fit_params)\n \n return clone_model.score_estimator(Xs, ys)" ]
[ [ "numpy.dot", "numpy.copy", "sklearn.metrics.pairwise.rbf_kernel", "numpy.mean", "numpy.finfo", "numpy.iscomplexobj", "sklearn.metrics.pairwise.euclidean_distances", "numpy.concatenate", "scipy.linalg.eig", "numpy.transpose", "sklearn.base.clone", "tensorflow.keras.optimizers.Adam", "numpy.std", "scipy.linalg.sqrtm", "sklearn.model_selection.train_test_split", "numpy.cov", "numpy.trace", "numpy.sum", "numpy.abs", "numpy.maximum" ] ]
Accenture/Docknet
[ "eb3cad13701471a7aaeea1d573bc5608855bab52" ]
[ "src/docknet/data_generator/island_data_generator.py" ]
[ "from typing import Tuple\n\nimport numpy as np\n\nfrom docknet.data_generator.data_generator import DataGenerator\nfrom docknet.util.geometry import polar_to_cartesian, random_to_polar\n\n\nclass IslandDataGenerator(DataGenerator):\n \"\"\"\n The chessboard data generator generates two classes (0 and 1) of 2D vectors distributed as follows:\n\n 111\n 1 1\n 1 0 1\n 1 1\n 111\n \"\"\"\n\n def island(self, x: np.array):\n \"\"\"\n Generator function of 2D vectors of class 0 (the island in the center)\n :param x: a 2D random generated vector\n :return: the corresponding individual of class 0\n \"\"\"\n cartesian = polar_to_cartesian(random_to_polar(x))\n f = cartesian * self.island_radius + self.island_origin\n return f\n\n def sea(self, x: np.array):\n \"\"\"\n Generator function of 2D vectors of class 0 (the ring around the island)\n :param x: a 2D random generated vector\n :return: the corresponding individual of class 0\n \"\"\"\n polar = random_to_polar(x)\n polar[0] = polar[0] * self.sea_width + self.sea_inner_diameter\n cartesian = polar_to_cartesian(polar) * self.sea_scale + self.island_origin\n return cartesian\n\n def __init__(self, x0_range: Tuple[float, float], x1_range: Tuple[float, float]):\n \"\"\"\n Initializes the island data data generator\n :param x0_range: tuple of minimum and maximum x values\n :param x1_range: tuple of minimum and maximum y values\n \"\"\"\n super().__init__((self.island, self.sea))\n x_center = (x0_range[1] + x0_range[0]) / 2\n y_center = (x1_range[1] + x1_range[0]) / 2\n x_length = x0_range[1] - x0_range[0]\n y_length = x1_range[1] - x1_range[0]\n self.island_origin = np.array([x_center, y_center])\n self.island_radius = np.array([x_length / 6, y_length / 6])\n self.sea_width = 1/3\n self.sea_inner_diameter = 2/3\n self.sea_scale = np.array([x_length / 2, y_length / 2])\n" ]
[ [ "numpy.array" ] ]
matarof/tpu
[ "d2e3b810134b200214f42cb004f20fe6b8e2cab4" ]
[ "models/official/unet3d/tpu_executor.py" ]
[ "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Interface to run unet model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n#Standard imports\nfrom __future__ import print_function\n\nimport os\nfrom absl import flags\nfrom absl import logging\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\nfrom hyperparameters import params_dict\n\nFLAGS = flags.FLAGS\n\n\ndef define_tpu_flags():\n \"\"\"Define common flags for TPU.\"\"\"\n flags.DEFINE_string(\n 'tpu',\n default=None,\n help='The Cloud TPU to use for training. This should be either the name '\n 'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 '\n 'url.')\n flags.DEFINE_string(\n 'gcp_project',\n default=None,\n help='Project name for the Cloud TPU-enabled project. If not specified, we '\n 'will attempt to automatically detect the GCE project from metadata.')\n flags.DEFINE_string(\n 'tpu_zone',\n default=None,\n help='GCE zone where the Cloud TPU is located in. If not specified, we '\n 'will attempt to automatically detect the GCE project from metadata.')\n flags.DEFINE_integer(\n 'num_cores', default=8, help='Number of TPU cores for training')\n flags.DEFINE_string(\n 'eval_master',\n default='',\n help='GRPC URL of the eval master. Set to an appropiate value when running '\n 'on CPU/GPU')\n flags.DEFINE_bool('use_tpu', True, 'Use TPUs rather than CPUs')\n flags.DEFINE_multi_integer(\n 'input_partition_dims', [1],\n 'A list that describes the partition dims for all the tensors.')\n flags.DEFINE_integer('iterations_per_loop', 8,\n 'Number of iterations per TPU training loop')\n\n\ndef get_tpu_flags():\n \"\"\"Get TPU config related FLAGS as dictionary.\"\"\"\n return {\n 'tpu': FLAGS.tpu,\n 'gcp_project': FLAGS.gcp_project,\n 'tpu_zone': FLAGS.tpu_zone,\n 'num_cores': FLAGS.num_cores,\n 'eval_master': FLAGS.eval_master,\n 'use_tpu': FLAGS.use_tpu,\n 'input_partition_dims': FLAGS.input_partition_dims,\n 'iterations_per_loop': FLAGS.iterations_per_loop,\n }\n\n\ndef write_summary(logs, summary_writer, current_step):\n \"\"\"Write out summaries of current training step for the checkpoint.\"\"\"\n with tf.Graph().as_default():\n summaries = [\n tf.Summary.Value(tag=tag, simple_value=value)\n for tag, value in logs.items()\n ]\n tf_summary = tf.Summary(value=summaries)\n summary_writer.add_summary(tf_summary, current_step)\n\n\nclass TPUEstimatorExecuter(object):\n \"\"\"An executor class for running jobs on TPUs.\"\"\"\n\n def __init__(self, model_fn, params, train_input_shapes, eval_input_shapes):\n self._model_dir = params.model_dir\n self._params = params\n self._train_input_shapes = train_input_shapes\n self._eval_input_shapes = eval_input_shapes\n\n if train_input_shapes:\n self._train_estimator = self._build_estimator(\n params.tpu_config, model_fn, params, train_input_shapes)\n if eval_input_shapes:\n self._eval_estimator = self._build_estimator(\n params.tpu_config, model_fn, params, eval_input_shapes)\n\n def _save_params(self):\n \"\"\"Save parameters to config files if model_dir is defined.\"\"\"\n\n model_dir = self._model_dir\n if model_dir is not None:\n if not tf.gfile.Exists(model_dir):\n tf.gfile.MakeDirs(model_dir)\n params_dict.save_params_dict_to_yaml(self._params,\n model_dir + '/params.yaml')\n\n def _build_estimator(self, tpu_flags, model_fn, params, input_shapes):\n \"\"\"Creates TPUEstimator/Estimator instance.\n\n Args:\n tpu_flags: FLAGS of TPU configs for constructing the TPUEstimator.\n model_fn: model function that returns (TPU)EstimatorSpec.\n params: A ParamsDict of TPU configs and dictionary to pass to Estimator\n `model_fn`.\n input_shapes: A nested tuple or list indicating the shape of each input.\n For example, ([128, 128, 128, 1], [128, 128, 128, 3]).\n\n Returns:\n TFEstimator or TPUEstimator instance.\n \"\"\"\n eval_master = tpu_flags.eval_master\n logging.info('debug tpu_flags %s', tpu_flags.as_dict())\n if tpu_flags.use_tpu:\n tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(\n tpu_flags.tpu, zone=tpu_flags.tpu_zone, project=tpu_flags.gcp_project)\n tpu_grpc_url = tpu_cluster_resolver.get_master()\n if not eval_master:\n eval_master = tpu_grpc_url\n tf.Session.reset(tpu_grpc_url)\n else:\n tpu_cluster_resolver = None\n\n dims_overridden = params.input_partition_dims\n if tpu_flags.input_partition_dims != [1]:\n dims_overridden = tpu_flags.input_partition_dims\n\n if dims_overridden and dims_overridden != [1]:\n feature_shape, label_shape = input_shapes\n # The input function may drop the last channel dimension. We need to do\n # the same for spatial partition dims as well.\n # Do not forget the batch dimension.\n feature_partition = dims_overridden[:1 + len(feature_shape)]\n label_partition = dims_overridden[:1 + len(label_shape)]\n input_partition_dims = [\n feature_partition,\n label_partition,\n ]\n num_cores_per_replica = int(np.prod(dims_overridden))\n num_shards = tpu_flags.num_cores // num_cores_per_replica\n else:\n num_cores_per_replica = None\n input_partition_dims = None\n num_shards = tpu_flags.num_cores\n\n # Sets up config for TPUEstimator.\n tpu_config = tf.estimator.tpu.TPUConfig(\n tpu_flags.iterations_per_loop,\n num_shards=num_shards,\n num_cores_per_replica=num_cores_per_replica,\n input_partition_dims=input_partition_dims,\n per_host_input_for_training=tf.estimator.tpu.InputPipelineConfig.PER_HOST_V2 # pylint: disable=line-too-long\n )\n\n run_config = tf.estimator.tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n evaluation_master=eval_master,\n model_dir=self._model_dir,\n log_step_count_steps=tpu_flags.iterations_per_loop,\n tpu_config=tpu_config,\n )\n\n model_params = dict(\n params.as_dict(),\n use_tpu=tpu_flags.use_tpu,\n )\n\n return tf.estimator.tpu.TPUEstimator(\n model_fn=model_fn,\n use_tpu=tpu_flags.use_tpu,\n train_batch_size=params.train_batch_size,\n eval_batch_size=params.eval_batch_size,\n predict_batch_size=params.predict_batch_size,\n config=run_config,\n params=model_params)\n\n def train(self, input_fn):\n \"\"\"Training the model with training data and labels in input_fn.\"\"\"\n self._save_params()\n self._train_estimator.train(input_fn=input_fn,\n max_steps=self._params.train_steps)\n\n def evaluate(self, input_fn):\n \"\"\"Evaluating the model with data and labels in input_fn.\"\"\"\n output_dir = os.path.join(self._model_dir, 'eval')\n tf.gfile.MakeDirs(output_dir)\n\n # Summary writer writes out eval metrics.\n summary_writer = tf.summary.FileWriter(output_dir)\n\n def _terminate_eval():\n logging.info('Terminating eval after %d seconds of '\n 'no checkpoints', self._params.eval_timeout)\n return True\n\n eval_results = None\n # Run evaluation when there's a new checkpoint\n for ckpt in tf.train.checkpoints_iterator(\n self._model_dir,\n min_interval_secs=self._params.min_eval_interval,\n timeout=self._params.eval_timeout,\n timeout_fn=_terminate_eval):\n # Terminate eval job when final checkpoint is reached\n current_step = int(os.path.basename(ckpt).split('-')[1])\n\n logging.info('Starting to evaluate.')\n try:\n eval_results = self._eval_estimator.evaluate(\n input_fn=input_fn, steps=self._params.eval_steps)\n\n # Evaluation task could start before checkpoint is written,\n # get preempted, or faile to write checkpoint correctly.\n if eval_results is not None:\n write_summary(eval_results, summary_writer, current_step)\n\n if current_step >= self._params.train_steps:\n logging.info('Evaluation finished after training step %d',\n current_step)\n break\n except tf.errors.NotFoundError:\n # Since the coordinator is on a different job than the TPU worker,\n # sometimes the TPU worker does not finish initializing until long after\n # the CPU job tells it to start evaluating. In this case, the checkpoint\n # file could have been deleted already.\n logging.info('Checkpoint %s no longer exists, skipping checkpoint',\n ckpt)\n summary_writer.close()\n logging.info('Evaluation results %s.', eval_results)\n return eval_results\n\n def train_and_eval(self, train_input_fn, eval_input_fn):\n \"\"\"Run distributed train and eval on UNet model.\"\"\"\n\n self._save_params()\n output_dir = os.path.join(self._model_dir, 'eval')\n tf.gfile.MakeDirs(output_dir)\n summary_writer = tf.summary.FileWriter(output_dir)\n\n num_cycles = int(self._params.train_steps / self._params.num_steps_per_eval)\n for cycle in range(num_cycles):\n logging.info('Start training cycle %d.', cycle)\n self._train_estimator.train(\n input_fn=train_input_fn, steps=self._params.num_steps_per_eval)\n logging.info('Start evaluation cycle %d.', cycle)\n eval_results = self._eval_estimator.evaluate(\n input_fn=eval_input_fn, steps=self._params.eval_steps)\n\n current_step = int(cycle * self._params.num_steps_per_eval)\n write_summary(eval_results, summary_writer, current_step)\n\n logging.info('Starting training cycle %d.', num_cycles)\n self._train_estimator.train(\n input_fn=train_input_fn, steps=self._params.train_steps)\n eval_results = self._eval_estimator.evaluate(\n input_fn=eval_input_fn, steps=self._params.eval_steps)\n write_summary(eval_results, summary_writer, self._params.train_steps)\n summary_writer.close()\n logging.info('Evaluation results %s.', eval_results)\n return eval_results\n" ]
[ [ "tensorflow.compat.v1.distribute.cluster_resolver.TPUClusterResolver", "tensorflow.compat.v1.gfile.MakeDirs", "tensorflow.compat.v1.summary.FileWriter", "tensorflow.compat.v1.Session.reset", "tensorflow.compat.v1.Graph", "tensorflow.compat.v1.estimator.tpu.TPUEstimator", "tensorflow.compat.v1.gfile.Exists", "tensorflow.compat.v1.estimator.tpu.TPUConfig", "tensorflow.compat.v1.Summary", "numpy.prod", "tensorflow.compat.v1.train.checkpoints_iterator", "tensorflow.compat.v1.Summary.Value", "tensorflow.compat.v1.estimator.tpu.RunConfig" ] ]
KarrLab/wc_sim
[ "5b0ee03c3d19193fa67a3797d4258b753e6bc576" ]
[ "wc_sim/model_utilities.py" ]
[ "\"\"\" A set of static methods that help prepare Models for simulation.\n\n:Author: Arthur Goldberg <Arthur.Goldberg@mssm.edu>\n:Date: 2017-04-10\n:Copyright: 2016-2018, Karr Lab\n:License: MIT\n\"\"\"\n\nfrom enum import Enum\nfrom numpy.random import RandomState\nfrom scipy.constants import Avogadro\nimport collections\nimport numpy\nimport pint\nimport re\n\nfrom wc_lang import Species\nfrom wc_onto import onto\nfrom wc_sim.config import core as config_core_multialgorithm\nfrom wc_utils.util.list import difference\nfrom wc_utils.util.ontology import are_terms_equivalent\nfrom wc_utils.util.units import unit_registry\n\nconfig_multialgorithm = config_core_multialgorithm.get_config()['wc_sim']['multialgorithm']\nMEAN_TO_STD_DEV_RATIO = config_multialgorithm['mean_to_std_dev_ratio']\n\n\nclass ModelUtilities(object):\n \"\"\" A set of static methods that help prepare Models for simulation.\"\"\"\n\n @staticmethod\n def find_private_species(model, return_ids=False):\n \"\"\" Identify a model's species that are private to a submodel.\n\n Find the species in a model that are modeled privately by a single submodel. This analysis\n relies on the observation that a submodel can only access species that participate in\n reactions that occurs in the submodel. (It might not access some of these species too,\n if they're initialized with concentration=0 and the reactions in which they participate\n never fire. However, that cannot be determined statically.)\n\n Args:\n model (:obj:`Model`): a `Model` instance\n return_ids (:obj:`boolean`, optional): if set, return object ids rather than references\n\n Returns:\n :obj:`dict`: a dict that maps each submodel to a set containing the species\n modeled by only the submodel.\n \"\"\"\n species_to_submodels = collections.defaultdict(list)\n for submodel in model.get_submodels():\n for species in submodel.get_children(kind='submodel', __type=Species):\n species_to_submodels[species].append(submodel)\n\n private_species = dict()\n for submodel in model.get_submodels():\n private_species[submodel] = list()\n for species, submodels in species_to_submodels.items():\n if 1 == len(species_to_submodels[species]):\n submodel = species_to_submodels[species].pop()\n private_species[submodel].append(species)\n\n # TODO(Arthur): globally s/serialize()/id()/\n if return_ids:\n tmp_dict = {}\n for submodel, species in private_species.items():\n tmp_dict[submodel.get_primary_attribute()] = list([specie.serialize() for specie in species])\n return tmp_dict\n return private_species\n\n @staticmethod\n def find_shared_species(model, return_ids=False):\n \"\"\" Identify the model's species that are shared by multiple submodels.\n\n Find the species in a model that are modeled by multiple submodels.\n\n Args:\n model (:obj:`Model`): a `Model` instance\n return_ids (:obj:`boolean`, optional): if set, return object ids rather than references\n\n Returns:\n :obj:`set`: a set containing the shared species.\n \"\"\"\n all_species = model.get_species()\n\n private_species_dict = ModelUtilities.find_private_species(model)\n private_species = []\n for p in private_species_dict.values():\n private_species.extend(p)\n shared_species = difference(all_species, private_species)\n if return_ids:\n return set([shared_specie.serialize() for shared_specie in shared_species])\n return(shared_species)\n\n @staticmethod\n def sample_copy_num_from_concentration(species, volume, random_state):\n \"\"\" Provide the initial copy number of `species` from its specified value\n\n The initial copy number is sampled from a specified distribution whose mean is given\n in molecules or molarity.\n\n Args:\n species (:obj:`Species`): a `Species` instance; the `species.concentration.units` must\n be an instance of `unit_registry.Unit` and in `species.concentration.units.choices`\n volume (:obj:`float`): volume for calculating copy numbers\n random_state (:obj:`RandomState`): random state for sampling from distribution of initial\n concentrations\n\n Returns:\n :obj:`float`: the `species'` copy number\n\n Raises:\n :obj:`ValueError`: if the concentration uses illegal or unsupported units\n \"\"\"\n dist_conc = species.distribution_init_concentration\n if dist_conc is None:\n return 0\n else:\n if not are_terms_equivalent(dist_conc.distribution, onto['WC:normal_distribution']): # normal\n raise ValueError('Unsupported random distribution `{}`'.format(dist_conc.distribution.name))\n mean = dist_conc.mean\n std = dist_conc.std\n if numpy.isnan(std):\n std = mean / MEAN_TO_STD_DEV_RATIO\n conc = ModelUtilities.non_neg_normal_sample(random_state, mean, std)\n\n if not isinstance(dist_conc.units, unit_registry.Unit):\n raise ValueError('Unsupported unit type \"{}\"'.format(type(dist_conc.units)))\n units = unit_registry.parse_expression(str(dist_conc.units))\n\n try:\n scale = units.to(unit_registry.parse_units('molecule'))\n return scale.magnitude * conc\n except pint.DimensionalityError:\n pass\n \n try:\n scale = units.to(unit_registry.parse_units('M'))\n return scale.magnitude * conc * volume * Avogadro\n except pint.DimensionalityError as error:\n pass\n\n raise ValueError(\"Unsupported unit '{}'\".format(dist_conc.units))\n\n @staticmethod\n def get_species_types(species_ids):\n \"\"\" Get the specie types from an iterator that provides specie ids\n\n Deterministic -- that is, given a sequence of specie ids provided by `species_ids`\n will always return the same list of specie type ids\n\n Args:\n species_ids (:obj:`iterator`): an iterator that provides specie ids\n\n Returns:\n :obj:`list`: an iterator over the specie type ids in `species_ids`\n \"\"\"\n species_types = set()\n species_types_list = []\n for species_id in species_ids:\n species_type_id, _ = ModelUtilities.parse_species_id(species_id)\n if not species_type_id in species_types:\n species_types.add(species_type_id)\n species_types_list.append(species_type_id)\n return species_types_list\n\n @staticmethod\n def parse_species_id(species_id):\n \"\"\" Fast species id parser\n\n Args:\n species_id (:obj:`str`): species identifier\n\n Returns:\n :obj:`tuple` of (:obj:`str`, :obj:`str`): species type id, compartment id\n\n Raises:\n :obj:`ValueError`: if `species_id` is not of the form `species_type_id[compartment_id]`\n \"\"\"\n comp_start = species_id.find('[')\n if comp_start == -1 or comp_start == 0 or comp_start == len(species_id)-2 or species_id[-1] != ']':\n raise ValueError(f\"Species id format should be 'species_type_id[compartment_id]' but is '{species_id}'\")\n return species_id[0:comp_start], species_id[comp_start+1:-1]\n\n @staticmethod\n def non_neg_normal_sample(random_state, mean, std, max_iters=100):\n \"\"\" Obtain a non-negative sample from a normal distribution\n\n The distribution returned is 0 for x < 0 and normal for 0 <= x\n\n Args:\n random_state (:obj:`numpy.random.RandomState`): a random state\n mean (:obj:`float`): mean of the normal dist. to sample\n std (:obj:`float`): std of the normal dist. to sample\n max_iters (:obj:`int`, optional): maximum number of draws of the true normal distribution\n\n Returns:\n :obj:`float`: a normal sample that is not negative\n\n Raises:\n :obj:`ValueError`: if taking `max_iters` normal sample does not obtain one that is not negative\n \"\"\"\n iter = 0\n while True:\n sample = random_state.normal(mean, std)\n iter += 1\n if 0 <= sample:\n return sample\n if max_iters <= iter:\n raise ValueError(f\"{iter} draws of a normal dist. with mean {mean:.2E} and std {std:.2E} \"\n f\"fails to obtain a non-negative sample\")\n" ]
[ [ "numpy.isnan" ] ]
henchc/CLFL_2016
[ "b57fb2e532f218cc57b8434c0b0d8a702dcc35ed" ]
[ "CLFL_ngram.py" ]
[ "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\n\n# Created at UC Berkeley 2015\n# Authors: Christopher Hench\n# ==============================================================================\n'''This code trains and evaluates an n-gram tagger for MHG scansion based\non the paper presented at the NAACL-CLFL 2016 by Christopher Hench and\nAlex Estes.'''\n\nimport codecs\nimport itertools\nfrom itertools import chain\nimport nltk\nimport nltk.tag\nfrom nltk.tag import UnigramTagger\nfrom nltk.tag import BigramTagger\nfrom nltk.tag import TrigramTagger\nfrom nltk.tag import NgramTagger\nfrom CLFL_mdf_classification import classification_report, confusion_matrix\nfrom CLFL_mdf_classification import precision_recall_fscore_support\nfrom sklearn.preprocessing import LabelBinarizer\nimport sklearn\nimport itertools\nimport pandas as pd\nimport re\nimport random\nimport numpy as np\n\n# created at UC Berkeley 2015\n# Authors: Christopher Hench\n\n\ndef ngram_tagger(tagged_sents):\n patterns = [\n (r'''(b|c|d|f|g|h|j|k|l|m|n||p|q|r|s|t|v|w|x|z)e\n (b|c|d|f|g|h|j|k|l|m|n||p|q|r|s|t|v|w|x|z)''',\n 'MORA'),\n (r'.*(a|e|i|o|u|ä|î|ô|ü)(a|e|i|o|u|ä|î|ô|ü)', 'DOPPEL'),\n (r'.*', 'MORA_HAUPT')] # default\n regex_tagger = nltk.RegexpTagger(patterns)\n\n tagger1 = UnigramTagger(tagged_sents, backoff=regex_tagger)\n # cutoff = 3, if necessary\n tagger2 = BigramTagger(tagged_sents, backoff=tagger1)\n tagger3 = TrigramTagger(tagged_sents, backoff=tagger2)\n\n return tagger3\n\n\nwith open(\"Data/CLFL_dev-data.txt\", 'r', encoding='utf-8') as f:\n tagged = f.read() # text must be clean\n\ntagged = tagged.split('\\n')\n\nnewlines = []\nfor line in tagged:\n nl = \"BEGL/BEGL WBY/WBY \" + line + \" WBY/WBY ENDL/ENDL\"\n newlines.append(nl)\n\nftuples = []\nfor line in newlines:\n news = [nltk.tag.str2tuple(t) for t in line.split()]\n if len(news) > 0:\n ftuples.append(news)\n\n\n# for 10 fold validation\nnum_folds = 10\nsubset_size = int(len(ftuples) / num_folds)\nrand_all = random.sample(range(0, len(ftuples)), len(ftuples))\ntest_inds = [rand_all[x:x + subset_size]\n for x in range(0, len(rand_all), subset_size)]\n\nfor i, inds in enumerate(test_inds):\n\n test_inds = inds\n train_inds = list(set(range(0, len(ftuples))) - set(test_inds))\n\n test_lines = []\n train_lines = []\n\n for x in test_inds:\n test_lines.append(ftuples[x])\n\n for x in train_inds:\n train_lines.append(ftuples[x])\n\n tagger = ngram_tagger(train_lines)\n\n # get report\n def bio_classification_report(y_true, y_pred):\n \"\"\"\n Classification report for a list of BIO-encoded sequences.\n It computes token-level metrics and discards \"O\" labels.\n\n Note that it requires scikit-learn 0.15+ (or a version from\n github master) to calculate averages properly!\n \"\"\"\n lb = LabelBinarizer()\n y_true_combined = lb.fit_transform(list(chain.from_iterable(y_true)))\n y_pred_combined = lb.transform(list(chain.from_iterable(y_pred)))\n\n tagset = set(lb.classes_) - {'O'}\n tagset = sorted(tagset, key=lambda tag: tag.split('-', 1)[::-1])\n class_indices = {cls: idx for idx, cls in enumerate(lb.classes_)}\n\n labs = [class_indices[cls] for cls in tagset]\n\n return((precision_recall_fscore_support(y_true_combined,\n y_pred_combined,\n labels=labs,\n average=None,\n sample_weight=None)),\n (classification_report(\n y_true_combined,\n y_pred_combined,\n labels=[class_indices[cls] for cls in tagset],\n target_names=tagset,\n )), labs)\n\n take_out = [\"BEGL\", \"ENDL\", \"WBY\"]\n\n def y_test_f(tagged_sents):\n return [[tag for (word, tag) in line if tag not in take_out]\n for line in tagged_sents] # list of all the tags\n\n def y_pred_f(tagger, corpus):\n # notice we first untag the sentence\n return [tagger.tag(nltk.tag.untag(sent)) for sent in corpus]\n\n y_test = y_test_f(test_lines)\n y_pred = y_test_f(y_pred_f(tagger, test_lines))\n\n bioc = bio_classification_report(y_test, y_pred)\n\n # to parse\n p, r, f1, s = bioc[0]\n\n tot_avgs = []\n\n for v in (np.average(p, weights=s),\n np.average(r, weights=s),\n np.average(f1, weights=s)):\n tot_avgs.append(v)\n\n toext = [0] * (len(s) - 3)\n tot_avgs.extend(toext)\n\n all_s = [sum(s)] * len(s)\n\n rep = bioc[1]\n all_labels = []\n\n for word in rep.split():\n if word.isupper():\n all_labels.append(word)\n\n ext_labels = [\n \"DOPPEL\",\n \"EL\",\n \"HALB\",\n \"HALB_HAUPT\",\n \"HALB_NEBEN\",\n \"MORA\",\n \"MORA_HAUPT\",\n \"MORA_NEBEN\"]\n abs_labels = [l for l in ext_labels if l not in all_labels]\n\n # print(bio_classification_report(y_test, y_pred)[1])\n\n data = {\n \"labels\": all_labels,\n \"precision\": p,\n \"recall\": r,\n \"f1\": f1,\n \"support\": s,\n \"tots\": tot_avgs,\n \"all_s\": all_s}\n\n df = pd.DataFrame(data)\n\n if len(abs_labels) > 0:\n if \"HALB_NEBEN\" in abs_labels:\n line = pd.DataFrame({\"labels\": \"HALB_NEBEN\",\n \"precision\": 0,\n \"recall\": 0,\n \"f1\": 0,\n \"support\": 0,\n \"tots\": 0,\n \"all_s\": 0},\n index=[4])\n df = pd.concat([df.ix[:3], line, df.ix[4:]]).reset_index(drop=True)\n if \"EL\" in abs_labels:\n line = pd.DataFrame({\"labels\": \"EL\",\n \"precision\": 0,\n \"recall\": 0,\n \"f1\": 0,\n \"support\": 0,\n \"tots\": 0,\n \"all_s\": 0},\n index=[1])\n df = pd.concat([df.ix[0], line, df.ix[1:]]).reset_index(drop=True)\n\n df[\"w_p\"] = df.precision * df.support\n df[\"w_r\"] = df.recall * df.support\n df[\"w_f1\"] = df.f1 * df.support\n df[\"w_tots\"] = df.tots * df.all_s\n\n # to add and average cross validation\n if i != 0:\n df_all = df_all.add(df, axis=\"labels\", fill_value=0)\n else:\n df_all = df\n\n print(\"Fold \" + str(i) + \" complete.\\n\")\n\n\ndf_all[\"p_AVG\"] = df_all.w_p / df_all.support\ndf_all[\"r_AVG\"] = df_all.w_r / df_all.support\ndf_all[\"f1_AVG\"] = df_all.w_f1 / df_all.support\ndf_all[\"tots_AVG\"] = df_all.w_tots / df_all.all_s\n\ndf_all = df_all.drop(\"f1\", 1)\ndf_all = df_all.drop(\"precision\", 1)\ndf_all = df_all.drop(\"recall\", 1)\ndf_all = df_all.drop(\"tots\", 1)\ndf_all = df_all.drop(\"w_p\", 1)\ndf_all = df_all.drop(\"w_r\", 1)\ndf_all = df_all.drop(\"w_f1\", 1)\ndf_all = df_all.drop(\"w_tots\", 1)\n\n\nprint(df_all)\n" ]
[ [ "numpy.average", "pandas.DataFrame", "sklearn.preprocessing.LabelBinarizer", "pandas.concat" ] ]
zhiyu1998/JavaStudyXmind
[ "80a10f52ee8aacc247bcf3e295777cac644528d4", "80a10f52ee8aacc247bcf3e295777cac644528d4" ]
[ "Deep-Learning-Notes/9 GoogleNet/predict.py", "Deep-Learning-Notes/18 Vision Transformer/model.py" ]
[ "import torch\nfrom model import GoogleNet\nfrom PIL import Image\nfrom torchvision import transforms\nimport matplotlib.pyplot as plt\nimport json\n\nimport os\nos.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\ndata_transform = transforms.Compose(\n [\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ]\n)\n\n# load image\nimg = Image.open('./2.jpg')\nplt.imshow(img)\n# [N, C, H, W]\nimg = data_transform(img)\n# expend batch\n# unsqueeze: Returns a new tensor with a dimension of size one inserted at the specified position.\nimg = torch.unsqueeze(img, dim=0)\n\ntry:\n json_file = open('./class_indices.json', 'r')\n class_indict = json.load(json_file)\nexcept Exception as e:\n print(e)\n exit(-1)\n\nmodel = GoogleNet(num_classes=5, aux_logits=False).to(device)\n\nmodel_weight_path = './GoogleNet.pth'\nmissing_keys, unexpected_keys = model.load_state_dict(torch.load(model_weight_path, map_location=device), strict=False)\nmodel.eval()\nwith torch.no_grad():\n # squeeze: Returns a tensor with all the dimensions of input of size 1 removed.\n output = torch.squeeze(model(img.to(device))).cpu()\n predict = torch.softmax(output, dim=0)\n # argmax: Returns the indices of the maximum value of all elements in the input tensor.\n predict_cla = torch.argmax(predict).numpy()\nprint(class_indict[str(predict_cla)], predict[predict_cla].item())\nplt.show()\n", "import torch\nimport torch.nn as nn\nimport collections.abc\n\nfrom itertools import repeat\nfrom functools import partial\nfrom collections import OrderedDict\n\ndef drop_path(x, drop_prob: float = 0., training: bool = False):\n \"\"\"Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).\n This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,\n the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...\n See discussion: https://github.com/tensorflow/tpu/issues/494 \n changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use\n 'survival rate' as the argument.\n \"\"\"\n if drop_prob == 0. or not training:\n return x\n keep_prob = 1 - drop_prob\n shape = (x.shape[0],) + (1,) * (x.ndim - 1) \n random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)\n random_tensor.floor_() \n output = x.div(keep_prob) * random_tensor\n return output\n\n\nclass DropPath(nn.Module):\n \"\"\"Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).\n \"\"\"\n\n def __init__(self, drop_prob=None):\n super(DropPath, self).__init__()\n self.drop_prob = drop_prob\n\n def forward(self, x):\n return drop_path(x, self.drop_prob, self.training)\n\n\ndef _ntuple(n):\n def parse(x):\n if isinstance(x, collections.abc.Iterable):\n return x\n return tuple(repeat(x, n))\n\n return parse\n\n\nto_2tuple = _ntuple(2)\n\n\nclass PatchEmbed(nn.Module):\n \"\"\" 2D Image to Patch Embedding\n \"\"\"\n\n def __init__(self, img_size=224, patch_size=16, in_c=3, embed_dim=768, norm_layer=None, flatten=True):\n super().__init__()\n # 例子--花数据 传入: [img_size: 224, patch_size: 16, in_c=3, embed_dim: 768, norm_layer: None, flatten: True]\n img_size = to_2tuple(img_size) # (224, 224)\n patch_size = to_2tuple(patch_size) # (16, 16)\n self.img_size = img_size\n self.patch_size = patch_size\n self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) # grid_size: (14, 14)\n self.num_patches = self.grid_size[0] * self.grid_size[1] # 196\n self.flatten = flatten\n\n self.proj = nn.Conv2d(in_c, embed_dim, kernel_size=patch_size, stride=patch_size) # Conv2d(3, 768, kernel_size=(16, 16), stride=(16, 16))\n self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()\n\n def forward(self, x):\n # x: Tensor(16, 3, 224, 224)\n # H: 224 W:224 C:3 B: 16\n B, C, H, W = x.shape\n assert H == self.img_size[0] and W == self.img_size[1], \\\n f\"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).\"\n\n x = self.proj(x) # Tensor (16, 768, 14, 14)\n if self.flatten:\n # flatten(传入维度进行展平) -> x: Tensor(16, 768, 196) [B, C, H, W] -> [B, C, HW]\n # transpose(维度交换) -> x: Tensor(16, 196, 768) [B, C, HW] -> [B, HW, C]\n x = x.flatten(2).transpose(1, 2)\n x = self.norm(x)\n return x\n\n\nclass Attention(nn.Module):\n def __init__(self,\n dim, \n num_heads=8,\n qkv_bias=False,\n qk_scale=None,\n attn_drop=0.,\n proj_drop=0.):\n super().__init__()\n # 话数据为例: dim: 768 num_heads: 12\n self.num_heads = num_heads # 12\n head_dim = dim // num_heads # 64\n self.scale = qk_scale or head_dim ** -0.5 # 0.125\n\n self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) # Linear(in_features=768, out_features=2304, bias=True)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(dim, dim) \n self.proj_drop = nn.Dropout(proj_drop)\n\n def forward(self, x):\n # [batch_size, num_patches + 1, total_embed_dim]\n B, N, C = x.shape\n\n # qkv(): -> [batch_size, num_patches + 1, 3 * total_embed_dim]\n # reshape: -> [batch_size, num_patches + 1, 3, num_heads, embed_dim_per_head]\n # permute: -> [3, batch_size, num_heads, num_patches + 1, embed_dim_per_head]\n qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n # [batch_size, num_heads, num_patches + 1, embed_dim_per_head]\n q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)\n\n # transpose: -> [batch_size, num_heads, embed_dim_per_head, num_patches + 1]\n # @: multiply -> [batch_size, num_heads, num_patches + 1, num_patches + 1]\n attn = (q @ k.transpose(-2, -1)) * self.scale\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n\n # @: multiply -> [batch_size, num_heads, num_patches + 1, embed_dim_per_head]\n # transpose: -> [batch_size, num_patches + 1, num_heads, embed_dim_per_head]\n # reshape: -> [batch_size, num_patches + 1, total_embed_dim]\n x = (attn @ v).transpose(1, 2).reshape(B, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x\n\n# multi-layer perceptron\nclass Mlp(nn.Module):\n \"\"\" MLP as used in Vision Transformer, MLP-Mixer and related networks\n \"\"\"\n def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n self.fc1 = nn.Linear(in_features, hidden_features) # Linear(in_features=768, out_features=3072, bias=True)\n self.act = act_layer()\n self.fc2 = nn.Linear(hidden_features, out_features) # Linear(in_features=3072, out_features=768, bias=True)\n self.drop = nn.Dropout(drop)\n\n def forward(self, x):\n x = self.fc1(x) # (16, 197, 3072)\n x = self.act(x) # (16, 197, 3072)\n x = self.drop(x) # (16, 197, 3072)\n x = self.fc2(x) # (16, 197, 768)\n x = self.drop(x) # (16, 197, 768)\n return x\n\n# Encoder Block\nclass Block(nn.Module):\n\n def __init__(self,\n dim,\n num_heads,\n mlp_ratio=4.,\n qkv_bias=False,\n qk_scale=None,\n drop=0.,\n attn_drop=0.,\n drop_path=0.,\n act_layer=nn.GELU,\n norm_layer=nn.LayerNorm):\n super().__init__()\n # 花图为例: dim: 768 num_heads:12 mlp_ratio:4 qkv_bias:True qkv_scale:None drop:0.0 attn_drop:0.0 drop_path:0.0\n self.norm1 = norm_layer(dim)\n self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)\n \n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n self.norm2 = norm_layer(dim)\n # feature -> 2304\n\n mlp_hidden_dim = int(dim * mlp_ratio) # 3072 (dim(768) * 4)\n self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n # feature -> 3072\n\n def forward(self, x):\n x = x + self.drop_path(self.attn(self.norm1(x)))\n x = x + self.drop_path(self.mlp(self.norm2(x)))\n return x\n\nclass VisionTransformer(nn.Module):\n def __init__(self, img_size=224,\n patch_size=16,\n in_c=3,\n num_classes=1000,\n embed_dim=768,\n depth=12,\n num_heads=12,\n mlp_ratio=4.0,\n qkv_bias=True,\n qk_scale=None,\n representation_size=None,\n distilled=False,\n drop_ratio=0.,\n attn_drop_ratio=0.,\n drop_path_ratio=0.,\n embed_layer=PatchEmbed,\n norm_layer=None,\n act_layer=None):\n \"\"\"\n Args:\n img_size (int, tuple): input image size\n patch_size (int, tuple): patch size\n in_c (int): number of input channels\n num_classes (int): number of classes for classification head\n embed_dim (int): embedding dimension\n depth (int): depth of transformer\n num_heads (int): number of attention heads\n mlp_ratio (int): ratio of mlp hidden dim to embedding dim\n qkv_bias (bool): enable bias for qkv if True\n qk_scale (float): override default qk scale of head_dim ** -0.5 if set\n representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set\n distilled (bool): model includes a distillation token and head as in DeiT models 【为了兼容DEIT模型所使用,不用管】\n drop_ratio (float): dropout rate\n attn_drop_ratio (float): attention dropout rate\n drop_path_ratio (float): stochastic depth rate\n embed_layer (nn.Module): patch embedding layer\n norm_layer: (nn.Module): normalization layer\n \"\"\"\n super(VisionTransformer, self).__init__()\n self.num_classes = num_classes\n self.num_features = self.embed_dim = embed_dim \n self.num_tokens = 2 if distilled else 1 \n norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)\n act_layer = act_layer or nn.GELU\n\n self.patch_embed = embed_layer(img_size=img_size, patch_size=patch_size, in_c=in_c, embed_dim=embed_dim)\n num_patches = self.patch_embed.num_patches\n\n # cls_token_shape: [1, 1, 768]\n self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))\n self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None\n self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim)) \n self.pos_drop = nn.Dropout(p=drop_ratio)\n\n dpr = [x.item() for x in torch.linspace(0, drop_path_ratio, depth)] \n self.blocks = nn.Sequential(*[\n Block(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop_ratio, attn_drop=attn_drop_ratio, drop_path=dpr[i],\n norm_layer=norm_layer, act_layer=act_layer)\n for i in range(depth)\n ])\n self.norm = norm_layer(embed_dim)\n\n \n if representation_size and not distilled:\n self.has_logits = True\n self.num_features = representation_size\n self.pre_logits = nn.Sequential(OrderedDict([\n (\"fc\", nn.Linear(embed_dim, representation_size)),\n (\"act\", nn.Tanh())\n ]))\n else:\n self.has_logits = False\n self.pre_logits = nn.Identity()\n\n \n self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()\n self.head_dist = None\n if distilled:\n self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()\n\n \n nn.init.trunc_normal_(self.pos_embed, std=0.02)\n if self.dist_token is not None:\n nn.init.trunc_normal_(self.dist_token, std=0.02)\n\n nn.init.trunc_normal_(self.cls_token, std=0.02)\n self.apply(_init_vit_weights)\n\n def forward_features(self, x):\n \n x = self.patch_embed(x) \n # x.shape[16, 196, 768]\n # cls_token [16, 1, 768]\n cls_token = self.cls_token.expand(x.shape[0], -1, -1)\n if self.dist_token is None:\n x = torch.cat((cls_token, x), dim=1) \n else:\n x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1)\n # x.shape[16, 197, 768]\n x = self.pos_drop(x + self.pos_embed)\n x = self.blocks(x)\n x = self.norm(x)\n if self.dist_token is None:\n return self.pre_logits(x[:, 0])\n else:\n return x[:, 0], x[:, 1]\n\n def forward(self, x):\n x = self.forward_features(x)\n if self.head_dist is not None:\n x, x_dist = self.head(x[0]), self.head_dist(x[1])\n if self.training and not torch.jit.is_scripting():\n \n return x, x_dist\n else:\n return (x + x_dist) / 2\n else:\n x = self.head(x)\n return x\n\n\ndef _init_vit_weights(m):\n \"\"\"\n ViT weight initialization\n :param m: module\n \"\"\"\n if isinstance(m, nn.Linear):\n nn.init.trunc_normal_(m.weight, std=.01)\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode=\"fan_out\")\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.LayerNorm):\n nn.init.zeros_(m.bias)\n nn.init.ones_(m.weight)\n\ndef vit_base_patch16_224_in21k(num_classes: int = 21843, has_logits: bool = True):\n \"\"\"\n ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).\n ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.\n weights ported from official Google JAX impl:\n https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_patch16_224_in21k-e5005f0a.pth\n \"\"\"\n model = VisionTransformer(img_size=224,\n patch_size=16,\n embed_dim=768,\n depth=12,\n num_heads=12,\n representation_size=768 if has_logits else None,\n num_classes=num_classes)\n return model\n\n\ndef vit_base_patch32_224_in21k(num_classes: int = 21843, has_logits: bool = True):\n \"\"\"\n ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929).\n ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.\n weights ported from official Google JAX impl:\n https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_patch32_224_in21k-8db57226.pth\n \"\"\"\n model = VisionTransformer(img_size=224,\n patch_size=32,\n embed_dim=768,\n depth=12,\n num_heads=12,\n representation_size=768 if has_logits else None,\n num_classes=num_classes)\n return model\n\n\ndef vit_large_patch16_224_in21k(num_classes: int = 21843, has_logits: bool = True):\n \"\"\"\n ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).\n ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.\n weights ported from official Google JAX impl:\n https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch16_224_in21k-606da67d.pth\n \"\"\"\n model = VisionTransformer(img_size=224,\n patch_size=16,\n embed_dim=1024,\n depth=24,\n num_heads=16,\n representation_size=1024 if has_logits else None,\n num_classes=num_classes)\n return model\n\n\ndef vit_large_patch32_224_in21k(num_classes: int = 21843, has_logits: bool = True):\n \"\"\"\n ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).\n ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.\n weights ported from official Google JAX impl:\n https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch32_224_in21k-9046d2e7.pth\n \"\"\"\n model = VisionTransformer(img_size=224,\n patch_size=32,\n embed_dim=1024,\n depth=24,\n num_heads=16,\n representation_size=1024 if has_logits else None,\n num_classes=num_classes)\n return model\n\n\ndef vit_huge_patch14_224_in21k(num_classes: int = 21843, has_logits: bool = True):\n \"\"\"\n ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929).\n ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.\n NOTE: converted weights not currently available, too large for github release hosting.\n \"\"\"\n model = VisionTransformer(img_size=224,\n patch_size=14,\n embed_dim=1280,\n depth=32,\n num_heads=16,\n representation_size=1280 if has_logits else None,\n num_classes=num_classes)\n return model" ]
[ [ "torch.no_grad", "torch.softmax", "torch.unsqueeze", "torch.cuda.is_available", "torch.load", "matplotlib.pyplot.show", "torch.argmax", "matplotlib.pyplot.imshow" ], [ "torch.nn.Linear", "torch.rand", "torch.nn.Dropout", "torch.nn.Identity", "torch.zeros", "torch.cat", "torch.nn.init.trunc_normal_", "torch.nn.Tanh", "torch.nn.init.kaiming_normal_", "torch.linspace", "torch.nn.init.ones_", "torch.nn.Conv2d", "torch.jit.is_scripting", "torch.nn.init.zeros_" ] ]
hofaflo/mne-python
[ "041068d510cc746b0a994609a59213f279be0c81" ]
[ "mne/io/meas_info.py" ]
[ "# -*- coding: utf-8 -*-\n# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>\n# Matti Hämäläinen <msh@nmr.mgh.harvard.edu>\n# Teon Brooks <teon.brooks@gmail.com>\n# Stefan Appelhoff <stefan.appelhoff@mailbox.org>\n#\n# License: BSD-3-Clause\n\nfrom collections import Counter, OrderedDict\nimport contextlib\nfrom copy import deepcopy\nimport datetime\nfrom io import BytesIO\nimport operator\nfrom textwrap import shorten\n\nimport numpy as np\n\nfrom .pick import (channel_type, pick_channels, pick_info,\n get_channel_type_constants, pick_types)\nfrom .constants import FIFF, _coord_frame_named\nfrom .open import fiff_open\nfrom .tree import dir_tree_find\nfrom .tag import (read_tag, find_tag, _ch_coord_dict, _update_ch_info_named,\n _rename_list)\nfrom .proj import (_read_proj, _write_proj, _uniquify_projs, _normalize_proj,\n _proj_equal, Projection)\nfrom .ctf_comp import _read_ctf_comp, write_ctf_comp\nfrom .write import (start_file, end_file, start_block, end_block,\n write_string, write_dig_points, write_float, write_int,\n write_coord_trans, write_ch_info, write_name_list,\n write_julian, write_float_matrix, write_id, DATE_NONE)\nfrom .proc_history import _read_proc_history, _write_proc_history\nfrom ..transforms import invert_transform, Transform, _coord_frame_name\nfrom ..utils import (logger, verbose, warn, object_diff, _validate_type,\n _stamp_to_dt, _dt_to_stamp, _pl, _is_numeric,\n _check_option, _on_missing, _check_on_missing, fill_doc)\nfrom ._digitization import (_format_dig_points, _dig_kind_proper, DigPoint,\n _dig_kind_rev, _dig_kind_ints, _read_dig_fif)\nfrom ._digitization import write_dig as _dig_write_dig\nfrom .compensator import get_current_comp\nfrom ..data.html_templates import info_template\n\nb = bytes # alias\n\n_SCALAR_CH_KEYS = ('scanno', 'logno', 'kind', 'range', 'cal', 'coil_type',\n 'unit', 'unit_mul', 'coord_frame')\n_ALL_CH_KEYS_SET = set(_SCALAR_CH_KEYS + ('loc', 'ch_name'))\n# XXX we need to require these except when doing simplify_info\n_MIN_CH_KEYS_SET = set(('kind', 'cal', 'unit', 'loc', 'ch_name'))\n\n\ndef _get_valid_units():\n \"\"\"Get valid units according to the International System of Units (SI).\n\n The International System of Units (SI, :footcite:`WikipediaSI`) is the\n default system for describing units in the Brain Imaging Data Structure\n (BIDS). For more information, see the BIDS specification\n :footcite:`BIDSdocs` and the appendix \"Units\" therein.\n\n References\n ----------\n .. footbibliography::\n \"\"\"\n valid_prefix_names = ['yocto', 'zepto', 'atto', 'femto', 'pico', 'nano',\n 'micro', 'milli', 'centi', 'deci', 'deca', 'hecto',\n 'kilo', 'mega', 'giga', 'tera', 'peta', 'exa',\n 'zetta', 'yotta']\n valid_prefix_symbols = ['y', 'z', 'a', 'f', 'p', 'n', u'µ', 'm', 'c', 'd',\n 'da', 'h', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']\n valid_unit_names = ['metre', 'kilogram', 'second', 'ampere', 'kelvin',\n 'mole', 'candela', 'radian', 'steradian', 'hertz',\n 'newton', 'pascal', 'joule', 'watt', 'coulomb', 'volt',\n 'farad', 'ohm', 'siemens', 'weber', 'tesla', 'henry',\n 'degree Celsius', 'lumen', 'lux', 'becquerel', 'gray',\n 'sievert', 'katal']\n valid_unit_symbols = ['m', 'kg', 's', 'A', 'K', 'mol', 'cd', 'rad', 'sr',\n 'Hz', 'N', 'Pa', 'J', 'W', 'C', 'V', 'F', u'Ω', 'S',\n 'Wb', 'T', 'H', u'°C', 'lm', 'lx', 'Bq', 'Gy', 'Sv',\n 'kat']\n\n # Valid units are all possible combinations of either prefix name or prefix\n # symbol together with either unit name or unit symbol. E.g., nV for\n # nanovolt\n valid_units = []\n valid_units += ([''.join([prefix, unit]) for prefix in valid_prefix_names\n for unit in valid_unit_names])\n valid_units += ([''.join([prefix, unit]) for prefix in valid_prefix_names\n for unit in valid_unit_symbols])\n valid_units += ([''.join([prefix, unit]) for prefix in valid_prefix_symbols\n for unit in valid_unit_names])\n valid_units += ([''.join([prefix, unit]) for prefix in valid_prefix_symbols\n for unit in valid_unit_symbols])\n\n # units are also valid without a prefix\n valid_units += valid_unit_names\n valid_units += valid_unit_symbols\n\n # we also accept \"n/a\" as a unit, which is the default missing value in\n # BIDS\n valid_units += [\"n/a\"]\n\n return tuple(valid_units)\n\n\n@verbose\ndef _unique_channel_names(ch_names, max_length=None, verbose=None):\n \"\"\"Ensure unique channel names.\"\"\"\n if max_length is not None:\n ch_names[:] = [name[:max_length] for name in ch_names]\n unique_ids = np.unique(ch_names, return_index=True)[1]\n if len(unique_ids) != len(ch_names):\n dups = {ch_names[x]\n for x in np.setdiff1d(range(len(ch_names)), unique_ids)}\n warn('Channel names are not unique, found duplicates for: '\n '%s. Applying running numbers for duplicates.' % dups)\n for ch_stem in dups:\n overlaps = np.where(np.array(ch_names) == ch_stem)[0]\n # We need an extra character since we append '-'.\n # np.ceil(...) is the maximum number of appended digits.\n if max_length is not None:\n n_keep = (\n max_length - 1 - int(np.ceil(np.log10(len(overlaps)))))\n else:\n n_keep = np.inf\n n_keep = min(len(ch_stem), n_keep)\n ch_stem = ch_stem[:n_keep]\n for idx, ch_idx in enumerate(overlaps):\n ch_name = ch_stem + '-%s' % idx\n if ch_name not in ch_names:\n ch_names[ch_idx] = ch_name\n else:\n raise ValueError('Adding a running number for a '\n 'duplicate resulted in another '\n 'duplicate name %s' % ch_name)\n\n return ch_names\n\n\nclass MontageMixin(object):\n \"\"\"Mixin for Montage setting.\"\"\"\n\n @verbose\n def set_montage(self, montage, match_case=True, match_alias=False,\n on_missing='raise', verbose=None):\n \"\"\"Set %(montage_types)s channel positions and digitization points.\n\n Parameters\n ----------\n %(montage)s\n %(match_case)s\n %(match_alias)s\n %(on_missing_montage)s\n %(verbose_meth)s\n\n Returns\n -------\n inst : instance of Raw | Epochs | Evoked\n The instance.\n\n Notes\n -----\n Operates in place.\n\n .. warning::\n Only %(montage_types)s channels can have their positions set using\n a montage. Other channel types (e.g., MEG channels) should have\n their positions defined properly using their data reading\n functions.\n \"\"\"\n # How to set up a montage to old named fif file (walk through example)\n # https://gist.github.com/massich/f6a9f4799f1fbeb8f5e8f8bc7b07d3df\n\n from ..channels.montage import _set_montage\n info = self if isinstance(self, Info) else self.info\n _set_montage(info, montage, match_case, match_alias, on_missing)\n return self\n\n\ndef _format_trans(obj, key):\n try:\n t = obj[key]\n except KeyError:\n pass\n else:\n if t is not None:\n obj[key] = Transform(t['from'], t['to'], t['trans'])\n\n\ndef _check_ch_keys(ch, ci, name='info[\"chs\"]', check_min=True):\n ch_keys = set(ch)\n bad = sorted(ch_keys.difference(_ALL_CH_KEYS_SET))\n if bad:\n raise KeyError(\n f'key{_pl(bad)} errantly present for {name}[{ci}]: {bad}')\n if check_min:\n bad = sorted(_MIN_CH_KEYS_SET.difference(ch_keys))\n if bad:\n raise KeyError(\n f'key{_pl(bad)} missing for {name}[{ci}]: {bad}',)\n\n\nclass Info(dict, MontageMixin):\n \"\"\"Measurement information.\n\n This data structure behaves like a dictionary. It contains all metadata\n that is available for a recording. However, its keys are restricted to\n those provided by the\n `FIF format specification <https://github.com/mne-tools/fiff-constants>`__,\n so new entries should not be manually added.\n\n .. warning:: The only entries that should be manually changed by the user\n are ``info['bads']`` and ``info['description']``. All other\n entries should be considered read-only, though they can be\n modified by various MNE-Python functions or methods (which\n have safeguards to ensure all fields remain in sync).\n\n .. warning:: This class should not be instantiated directly. To create a\n measurement information structure, use\n :func:`mne.create_info`.\n\n Parameters\n ----------\n *args : list\n Arguments.\n **kwargs : dict\n Keyword arguments.\n\n Attributes\n ----------\n acq_pars : str | None\n MEG system acquition parameters.\n See :class:`mne.AcqParserFIF` for details.\n acq_stim : str | None\n MEG system stimulus parameters.\n bads : list of str\n List of bad (noisy/broken) channels, by name. These channels will by\n default be ignored by many processing steps.\n ch_names : list of str\n The names of the channels.\n chs : list of dict\n A list of channel information dictionaries, one per channel.\n See Notes for more information.\n comps : list of dict\n CTF software gradient compensation data.\n See Notes for more information.\n ctf_head_t : dict | None\n The transformation from 4D/CTF head coordinates to Neuromag head\n coordinates. This is only present in 4D/CTF data.\n custom_ref_applied : int\n Whether a custom (=other than average) reference has been applied to\n the EEG data. This flag is checked by some algorithms that require an\n average reference to be set.\n description : str | None\n String description of the recording.\n dev_ctf_t : dict | None\n The transformation from device coordinates to 4D/CTF head coordinates.\n This is only present in 4D/CTF data.\n dev_head_t : dict | None\n The device to head transformation.\n dig : list of dict | None\n The Polhemus digitization data in head coordinates.\n See Notes for more information.\n events : list of dict\n Event list, sometimes extracted from the stim channels by Neuromag\n systems. In general this should not be used and\n :func:`mne.find_events` should be used for event processing.\n See Notes for more information.\n experimenter : str | None\n Name of the person that ran the experiment.\n file_id : dict | None\n The FIF globally unique ID. See Notes for more information.\n highpass : float\n Highpass corner frequency in Hertz. Zero indicates a DC recording.\n hpi_meas : list of dict\n HPI measurements that were taken at the start of the recording\n (e.g. coil frequencies).\n See Notes for details.\n hpi_results : list of dict\n Head position indicator (HPI) digitization points and fit information\n (e.g., the resulting transform).\n See Notes for details.\n hpi_subsystem : dict | None\n Information about the HPI subsystem that was used (e.g., event\n channel used for cHPI measurements).\n See Notes for details.\n line_freq : float | None\n Frequency of the power line in Hertz.\n gantry_angle : float | None\n Tilt angle of the gantry in degrees.\n lowpass : float\n Lowpass corner frequency in Hertz.\n It is automatically set to half the sampling rate if there is\n otherwise no low-pass applied to the data.\n meas_date : datetime\n The time (UTC) of the recording.\n\n .. versionchanged:: 0.20\n This is stored as a :class:`~python:datetime.datetime` object\n instead of a tuple of seconds/microseconds.\n utc_offset : str\n \"UTC offset of related meas_date (sHH:MM).\n\n .. versionadded:: 0.19\n meas_id : dict | None\n The ID assigned to this measurement by the acquisition system or\n during file conversion. Follows the same format as ``file_id``.\n nchan : int\n Number of channels.\n proc_history : list of dict\n The MaxFilter processing history.\n See Notes for details.\n proj_id : int | None\n ID number of the project the experiment belongs to.\n proj_name : str | None\n Name of the project the experiment belongs to.\n projs : list of Projection\n List of SSP operators that operate on the data.\n See :class:`mne.Projection` for details.\n sfreq : float\n Sampling frequency in Hertz.\n subject_info : dict | None\n Information about the subject.\n See Notes for details.\n device_info : dict | None\n Information about the acquisition device. See Notes for details.\n\n .. versionadded:: 0.19\n helium_info : dict | None\n Information about the device helium. See Notes for details.\n\n .. versionadded:: 0.19\n\n See Also\n --------\n mne.create_info\n\n Notes\n -----\n The following parameters have a nested structure.\n\n * ``chs`` list of dict:\n\n cal : float\n The calibration factor to bring the channels to physical\n units. Used in product with ``range`` to scale the data read\n from disk.\n ch_name : str\n The channel name.\n coil_type : int\n Coil type, e.g. ``FIFFV_COIL_MEG``.\n coord_frame : int\n The coordinate frame used, e.g. ``FIFFV_COORD_HEAD``.\n kind : int\n The kind of channel, e.g. ``FIFFV_EEG_CH``.\n loc : array, shape (12,)\n Channel location. For MEG this is the position plus the\n normal given by a 3x3 rotation matrix. For EEG this is the\n position followed by reference position (with 6 unused).\n The values are specified in device coordinates for MEG and in\n head coordinates for EEG channels, respectively.\n logno : int\n Logical channel number, conventions in the usage of this\n number vary.\n range : float\n The hardware-oriented part of the calibration factor.\n This should be only applied to the continuous raw data.\n Used in product with ``cal`` to scale data read from disk.\n scanno : int\n Scanning order number, starting from 1.\n unit : int\n The unit to use, e.g. ``FIFF_UNIT_T_M``.\n unit_mul : int\n Unit multipliers, most commonly ``FIFF_UNITM_NONE``.\n\n * ``comps`` list of dict:\n\n ctfkind : int\n CTF compensation grade.\n colcals : ndarray\n Column calibrations.\n mat : dict\n A named matrix dictionary (with entries \"data\", \"col_names\", etc.)\n containing the compensation matrix.\n rowcals : ndarray\n Row calibrations.\n save_calibrated : bool\n Were the compensation data saved in calibrated form.\n\n * ``dig`` list of dict:\n\n kind : int\n The kind of channel,\n e.g. ``FIFFV_POINT_EEG``, ``FIFFV_POINT_CARDINAL``.\n r : array, shape (3,)\n 3D position in m. and coord_frame.\n ident : int\n Number specifying the identity of the point.\n e.g. ``FIFFV_POINT_NASION`` if kind is ``FIFFV_POINT_CARDINAL``, or\n 42 if kind is ``FIFFV_POINT_EEG``.\n coord_frame : int\n The coordinate frame used, e.g. ``FIFFV_COORD_HEAD``.\n\n * ``events`` list of dict:\n\n channels : list of int\n Channel indices for the events.\n list : ndarray, shape (n_events * 3,)\n Events in triplets as number of samples, before, after.\n\n * ``file_id`` dict:\n\n version : int\n FIF format version, i.e. ``FIFFC_VERSION``.\n machid : ndarray, shape (2,)\n Unique machine ID, usually derived from the MAC address.\n secs : int\n Time in seconds.\n usecs : int\n Time in microseconds.\n\n * ``hpi_meas`` list of dict:\n\n creator : str\n Program that did the measurement.\n sfreq : float\n Sample rate.\n nchan : int\n Number of channels used.\n nave : int\n Number of averages used.\n ncoil : int\n Number of coils used.\n first_samp : int\n First sample used.\n last_samp : int\n Last sample used.\n hpi_coils : list of dict\n Coils, containing:\n\n number: int\n Coil number\n epoch : ndarray\n Buffer containing one epoch and channel.\n slopes : ndarray, shape (n_channels,)\n HPI data.\n corr_coeff : ndarray, shape (n_channels,)\n HPI curve fit correlations.\n coil_freq : float\n HPI coil excitation frequency\n\n * ``hpi_results`` list of dict:\n\n dig_points : list\n Digitization points (see ``dig`` definition) for the HPI coils.\n order : ndarray, shape (ncoil,)\n The determined digitization order.\n used : ndarray, shape (nused,)\n The indices of the used coils.\n moments : ndarray, shape (ncoil, 3)\n The coil moments.\n goodness : ndarray, shape (ncoil,)\n The goodness of fits.\n good_limit : float\n The goodness of fit limit.\n dist_limit : float\n The distance limit.\n accept : int\n Whether or not the fit was accepted.\n coord_trans : instance of Transformation\n The resulting MEG<->head transformation.\n\n * ``hpi_subsystem`` dict:\n\n ncoil : int\n The number of coils.\n event_channel : str\n The event channel used to encode cHPI status (e.g., STI201).\n hpi_coils : list of ndarray\n List of length ``ncoil``, each 4-element ndarray contains the\n event bits used on the event channel to indicate cHPI status\n (using the first element of these arrays is typically\n sufficient).\n\n * ``proc_history`` list of dict:\n\n block_id : dict\n See ``id`` above.\n date : ndarray, shape (2,)\n 2-element tuple of seconds and microseconds.\n experimenter : str\n Name of the person who ran the program.\n creator : str\n Program that did the processing.\n max_info : dict\n Maxwel filtering info, can contain:\n\n sss_info : dict\n SSS processing information.\n max_st\n tSSS processing information.\n sss_ctc : dict\n Cross-talk processing information.\n sss_cal : dict\n Fine-calibration information.\n smartshield : dict\n MaxShield information. This dictionary is (always?) empty,\n but its presence implies that MaxShield was used during\n acquisiton.\n\n * ``subject_info`` dict:\n\n id : int\n Integer subject identifier.\n his_id : str\n String subject identifier.\n last_name : str\n Last name.\n first_name : str\n First name.\n middle_name : str\n Middle name.\n birthday : tuple of int\n Birthday in (year, month, day) format.\n sex : int\n Subject sex (0=unknown, 1=male, 2=female).\n hand : int\n Handedness (1=right, 2=left, 3=ambidextrous).\n\n * ``device_info`` dict:\n\n type : str\n Device type.\n model : str\n Device model.\n serial : str\n Device serial.\n site : str\n Device site.\n\n * ``helium_info`` dict:\n\n he_level_raw : float\n Helium level (%) before position correction.\n helium_level : float\n Helium level (%) after position correction.\n orig_file_guid : str\n Original file GUID.\n meas_date : tuple of int\n The helium level meas date.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Info, self).__init__(*args, **kwargs)\n # Deal with h5io writing things as dict\n for key in ('dev_head_t', 'ctf_head_t', 'dev_ctf_t'):\n _format_trans(self, key)\n for res in self.get('hpi_results', []):\n _format_trans(res, 'coord_trans')\n if self.get('dig', None) is not None and len(self['dig']):\n if isinstance(self['dig'], dict): # needs to be unpacked\n self['dig'] = _dict_unpack(self['dig'], _DIG_CAST)\n if not isinstance(self['dig'][0], DigPoint):\n self['dig'] = _format_dig_points(self['dig'])\n if isinstance(self.get('chs', None), dict):\n self['chs']['ch_name'] = [str(x) for x in np.char.decode(\n self['chs']['ch_name'], encoding='utf8')]\n self['chs'] = _dict_unpack(self['chs'], _CH_CAST)\n for pi, proj in enumerate(self.get('projs', [])):\n if not isinstance(proj, Projection):\n self['projs'][pi] = Projection(proj)\n # Old files could have meas_date as tuple instead of datetime\n try:\n meas_date = self['meas_date']\n except KeyError:\n pass\n else:\n self['meas_date'] = _ensure_meas_date_none_or_dt(meas_date)\n\n def copy(self):\n \"\"\"Copy the instance.\n\n Returns\n -------\n info : instance of Info\n The copied info.\n \"\"\"\n return deepcopy(self)\n\n def normalize_proj(self):\n \"\"\"(Re-)Normalize projection vectors after subselection.\n\n Applying projection after sub-selecting a set of channels that\n were originally used to compute the original projection vectors\n can be dangerous (e.g., if few channels remain, most power was\n in channels that are no longer picked, etc.). By default, mne\n will emit a warning when this is done.\n\n This function will re-normalize projectors to use only the\n remaining channels, thus avoiding that warning. Only use this\n function if you're confident that the projection vectors still\n adequately capture the original signal of interest.\n \"\"\"\n _normalize_proj(self)\n\n def __repr__(self):\n \"\"\"Summarize info instead of printing all.\"\"\"\n MAX_WIDTH = 68\n strs = ['<Info | %s non-empty values']\n non_empty = 0\n for k, v in self.items():\n if k == 'ch_names':\n if v:\n entr = shorten(', '.join(v), MAX_WIDTH, placeholder=' ...')\n else:\n entr = '[]' # always show\n non_empty -= 1 # don't count as non-empty\n elif k == 'bads':\n if v:\n entr = '{} items ('.format(len(v))\n entr += ', '.join(v)\n entr = shorten(entr, MAX_WIDTH, placeholder=' ...') + ')'\n else:\n entr = '[]' # always show\n non_empty -= 1 # don't count as non-empty\n elif k == 'projs':\n if v:\n entr = ', '.join(p['desc'] + ': o%s' %\n {0: 'ff', 1: 'n'}[p['active']] for p in v)\n entr = shorten(entr, MAX_WIDTH, placeholder=' ...')\n else:\n entr = '[]' # always show projs\n non_empty -= 1 # don't count as non-empty\n elif k == 'meas_date':\n if v is None:\n entr = 'unspecified'\n else:\n entr = v.strftime('%Y-%m-%d %H:%M:%S %Z')\n elif k == 'kit_system_id' and v is not None:\n from .kit.constants import KIT_SYSNAMES\n entr = '%i (%s)' % (v, KIT_SYSNAMES.get(v, 'unknown'))\n elif k == 'dig' and v is not None:\n counts = Counter(d['kind'] for d in v)\n counts = ['%d %s' % (counts[ii],\n _dig_kind_proper[_dig_kind_rev[ii]])\n for ii in _dig_kind_ints if ii in counts]\n counts = (' (%s)' % (', '.join(counts))) if len(counts) else ''\n entr = '%d item%s%s' % (len(v), _pl(len(v)), counts)\n elif isinstance(v, Transform):\n # show entry only for non-identity transform\n if not np.allclose(v[\"trans\"], np.eye(v[\"trans\"].shape[0])):\n frame1 = _coord_frame_name(v['from'])\n frame2 = _coord_frame_name(v['to'])\n entr = '%s -> %s transform' % (frame1, frame2)\n else:\n entr = ''\n elif k in ['sfreq', 'lowpass', 'highpass']:\n entr = '{:.1f} Hz'.format(v)\n elif isinstance(v, str):\n entr = shorten(v, MAX_WIDTH, placeholder=' ...')\n elif k == 'chs':\n ch_types = [channel_type(self, idx) for idx in range(len(v))]\n ch_counts = Counter(ch_types)\n entr = \"%s\" % ', '.join(\"%d %s\" % (count, ch_type.upper())\n for ch_type, count\n in ch_counts.items())\n elif k == 'custom_ref_applied':\n entr = str(bool(v))\n if not v:\n non_empty -= 1 # don't count if 0\n else:\n try:\n this_len = len(v)\n except TypeError:\n entr = '{}'.format(v) if v is not None else ''\n else:\n if this_len > 0:\n entr = ('%d item%s (%s)' % (this_len, _pl(this_len),\n type(v).__name__))\n else:\n entr = ''\n if entr != '':\n non_empty += 1\n strs.append('%s: %s' % (k, entr))\n st = '\\n '.join(sorted(strs))\n st += '\\n>'\n st %= non_empty\n return st\n\n def __deepcopy__(self, memodict):\n \"\"\"Make a deepcopy.\"\"\"\n result = Info.__new__(Info)\n for k, v in self.items():\n # chs is roughly half the time but most are immutable\n if k == 'chs':\n # dict shallow copy is fast, so use it then overwrite\n result[k] = list()\n for ch in v:\n ch = ch.copy() # shallow\n ch['loc'] = ch['loc'].copy()\n result[k].append(ch)\n elif k == 'ch_names':\n # we know it's list of str, shallow okay and saves ~100 µs\n result[k] = v.copy()\n elif k == 'hpi_meas':\n hms = list()\n for hm in v:\n hm = hm.copy()\n # the only mutable thing here is some entries in coils\n hm['hpi_coils'] = [coil.copy() for coil in hm['hpi_coils']]\n # There is a *tiny* risk here that someone could write\n # raw.info['hpi_meas'][0]['hpi_coils'][1]['epoch'] = ...\n # and assume that info.copy() will make an actual copy,\n # but copying these entries has a 2x slowdown penalty so\n # probably not worth it for such a deep corner case:\n # for coil in hpi_coils:\n # for key in ('epoch', 'slopes', 'corr_coeff'):\n # coil[key] = coil[key].copy()\n hms.append(hm)\n result[k] = hms\n else:\n result[k] = deepcopy(v, memodict)\n return result\n\n def _check_consistency(self, prepend_error=''):\n \"\"\"Do some self-consistency checks and datatype tweaks.\"\"\"\n missing = [bad for bad in self['bads'] if bad not in self['ch_names']]\n if len(missing) > 0:\n msg = '%sbad channel(s) %s marked do not exist in info'\n raise RuntimeError(msg % (prepend_error, missing,))\n meas_date = self.get('meas_date')\n if meas_date is not None:\n if (not isinstance(self['meas_date'], datetime.datetime) or\n self['meas_date'].tzinfo is None or\n self['meas_date'].tzinfo is not datetime.timezone.utc):\n raise RuntimeError('%sinfo[\"meas_date\"] must be a datetime '\n 'object in UTC or None, got %r'\n % (prepend_error, repr(self['meas_date']),))\n\n chs = [ch['ch_name'] for ch in self['chs']]\n if len(self['ch_names']) != len(chs) or any(\n ch_1 != ch_2 for ch_1, ch_2 in zip(self['ch_names'], chs)) or \\\n self['nchan'] != len(chs):\n raise RuntimeError('%sinfo channel name inconsistency detected, '\n 'please notify mne-python developers'\n % (prepend_error,))\n\n # make sure we have the proper datatypes\n for key in ('sfreq', 'highpass', 'lowpass'):\n if self.get(key) is not None:\n self[key] = float(self[key])\n\n # Ensure info['chs'] has immutable entries (copies much faster)\n for ci, ch in enumerate(self['chs']):\n _check_ch_keys(ch, ci)\n ch_name = ch['ch_name']\n if not isinstance(ch_name, str):\n raise TypeError(\n 'Bad info: info[\"chs\"][%d][\"ch_name\"] is not a string, '\n 'got type %s' % (ci, type(ch_name)))\n for key in _SCALAR_CH_KEYS:\n val = ch.get(key, 1)\n if not _is_numeric(val):\n raise TypeError(\n 'Bad info: info[\"chs\"][%d][%r] = %s is type %s, must '\n 'be float or int' % (ci, key, val, type(val)))\n loc = ch['loc']\n if not (isinstance(loc, np.ndarray) and loc.shape == (12,)):\n raise TypeError(\n 'Bad info: info[\"chs\"][%d][\"loc\"] must be ndarray with '\n '12 elements, got %r' % (ci, loc))\n\n # make sure channel names are unique\n self['ch_names'] = _unique_channel_names(self['ch_names'])\n for idx, ch_name in enumerate(self['ch_names']):\n self['chs'][idx]['ch_name'] = ch_name\n\n if 'filename' in self:\n warn('the \"filename\" key is misleading '\n 'and info should not have it')\n\n def _update_redundant(self):\n \"\"\"Update the redundant entries.\"\"\"\n self['ch_names'] = [ch['ch_name'] for ch in self['chs']]\n self['nchan'] = len(self['chs'])\n\n def pick_channels(self, ch_names, ordered=False):\n \"\"\"Pick channels from this Info object.\n\n Parameters\n ----------\n ch_names : list of str\n List of channels to keep. All other channels are dropped.\n ordered : bool\n If True (default False), ensure that the order of the channels\n matches the order of ``ch_names``.\n\n Returns\n -------\n info : instance of Info.\n The modified Info object.\n\n Notes\n -----\n Operates in-place.\n\n .. versionadded:: 0.20.0\n \"\"\"\n sel = pick_channels(self.ch_names, ch_names, exclude=[],\n ordered=ordered)\n return pick_info(self, sel, copy=False, verbose=False)\n\n @property\n def ch_names(self):\n return self['ch_names']\n\n def _repr_html_(self, caption=None):\n if isinstance(caption, str):\n html = f'<h4>{caption}</h4>'\n else:\n html = ''\n n_eeg = len(pick_types(self, meg=False, eeg=True))\n n_grad = len(pick_types(self, meg='grad'))\n n_mag = len(pick_types(self, meg='mag'))\n n_fnirs = len(pick_types(self, meg=False, eeg=False, fnirs=True))\n pick_eog = pick_types(self, meg=False, eog=True)\n if len(pick_eog) > 0:\n eog = ', '.join(np.array(self['ch_names'])[pick_eog])\n else:\n eog = 'Not available'\n pick_ecg = pick_types(self, meg=False, ecg=True)\n if len(pick_ecg) > 0:\n ecg = ', '.join(np.array(self['ch_names'])[pick_ecg])\n else:\n ecg = 'Not available'\n meas_date = self['meas_date']\n if meas_date is not None:\n meas_date = meas_date.strftime(\"%B %d, %Y %H:%M:%S\") + ' GMT'\n projs = self['projs']\n if projs:\n projs = '<br/>'.join(\n p['desc'] + ': o%s' % {0: 'ff', 1: 'n'}[p['active']]\n for p in projs\n )\n\n html += info_template.substitute(\n caption=caption, info=self, meas_date=meas_date, n_eeg=n_eeg,\n n_grad=n_grad, n_mag=n_mag, n_fnirs=n_fnirs, eog=eog, ecg=ecg,\n projs=projs)\n return html\n\n\ndef _simplify_info(info):\n \"\"\"Return a simplified info structure to speed up picking.\"\"\"\n chs = [{key: ch[key]\n for key in ('ch_name', 'kind', 'unit', 'coil_type', 'loc', 'cal')}\n for ch in info['chs']]\n sub_info = Info(chs=chs, bads=info['bads'], comps=info['comps'],\n projs=info['projs'],\n custom_ref_applied=info['custom_ref_applied'])\n sub_info._update_redundant()\n return sub_info\n\n\n@verbose\ndef read_fiducials(fname, verbose=None):\n \"\"\"Read fiducials from a fiff file.\n\n Parameters\n ----------\n fname : str\n The filename to read.\n %(verbose)s\n\n Returns\n -------\n pts : list of dicts\n List of digitizer points (each point in a dict).\n coord_frame : int\n The coordinate frame of the points (one of\n mne.io.constants.FIFF.FIFFV_COORD_...).\n \"\"\"\n fid, tree, _ = fiff_open(fname)\n with fid:\n isotrak = dir_tree_find(tree, FIFF.FIFFB_ISOTRAK)\n isotrak = isotrak[0]\n pts = []\n coord_frame = FIFF.FIFFV_COORD_HEAD\n for k in range(isotrak['nent']):\n kind = isotrak['directory'][k].kind\n pos = isotrak['directory'][k].pos\n if kind == FIFF.FIFF_DIG_POINT:\n tag = read_tag(fid, pos)\n pts.append(DigPoint(tag.data))\n elif kind == FIFF.FIFF_MNE_COORD_FRAME:\n tag = read_tag(fid, pos)\n coord_frame = tag.data[0]\n coord_frame = _coord_frame_named.get(coord_frame, coord_frame)\n\n # coord_frame is not stored in the tag\n for pt in pts:\n pt['coord_frame'] = coord_frame\n\n return pts, coord_frame\n\n\n@verbose\ndef write_fiducials(fname, pts, coord_frame=FIFF.FIFFV_COORD_UNKNOWN,\n verbose=None):\n \"\"\"Write fiducials to a fiff file.\n\n Parameters\n ----------\n fname : str\n Destination file name.\n pts : iterator of dict\n Iterator through digitizer points. Each point is a dictionary with\n the keys 'kind', 'ident' and 'r'.\n coord_frame : int\n The coordinate frame of the points (one of\n mne.io.constants.FIFF.FIFFV_COORD_...).\n %(verbose)s\n \"\"\"\n _dig_write_dig(fname, pts, coord_frame)\n\n\ndef write_dig(fname, pts, coord_frame=None):\n \"\"\"Write digitization data to a FIF file.\n\n Parameters\n ----------\n fname : str\n Destination file name.\n pts : iterator of dict\n Iterator through digitizer points. Each point is a dictionary with\n the keys 'kind', 'ident' and 'r'.\n coord_frame : int | str | None\n If all the points have the same coordinate frame, specify the type\n here. Can be None (default) if the points could have varying\n coordinate frames.\n \"\"\"\n return _dig_write_dig(fname, pts, coord_frame=coord_frame)\n\n\n@verbose\ndef read_info(fname, verbose=None):\n \"\"\"Read measurement info from a file.\n\n Parameters\n ----------\n fname : str\n File name.\n %(verbose)s\n\n Returns\n -------\n %(info_not_none)s\n \"\"\"\n f, tree, _ = fiff_open(fname)\n with f as fid:\n info = read_meas_info(fid, tree)[0]\n return info\n\n\ndef read_bad_channels(fid, node):\n \"\"\"Read bad channels.\n\n Parameters\n ----------\n fid : file\n The file descriptor.\n node : dict\n The node of the FIF tree that contains info on the bad channels.\n\n Returns\n -------\n bads : list\n A list of bad channel's names.\n \"\"\"\n return _read_bad_channels(fid, node)\n\n\ndef _read_bad_channels(fid, node, ch_names_mapping):\n ch_names_mapping = {} if ch_names_mapping is None else ch_names_mapping\n nodes = dir_tree_find(node, FIFF.FIFFB_MNE_BAD_CHANNELS)\n\n bads = []\n if len(nodes) > 0:\n for node in nodes:\n tag = find_tag(fid, node, FIFF.FIFF_MNE_CH_NAME_LIST)\n if tag is not None and tag.data is not None:\n bads = tag.data.split(':')\n bads[:] = _rename_list(bads, ch_names_mapping)\n return bads\n\n\n@verbose\ndef read_meas_info(fid, tree, clean_bads=False, verbose=None):\n \"\"\"Read the measurement info.\n\n Parameters\n ----------\n fid : file\n Open file descriptor.\n tree : tree\n FIF tree structure.\n clean_bads : bool\n If True, clean info['bads'] before running consistency check.\n Should only be needed for old files where we did not check bads\n before saving.\n %(verbose)s\n\n Returns\n -------\n %(info_not_none)s\n meas : dict\n Node in tree that contains the info.\n \"\"\"\n # Find the desired blocks\n meas = dir_tree_find(tree, FIFF.FIFFB_MEAS)\n if len(meas) == 0:\n raise ValueError('Could not find measurement data')\n if len(meas) > 1:\n raise ValueError('Cannot read more that 1 measurement data')\n meas = meas[0]\n\n meas_info = dir_tree_find(meas, FIFF.FIFFB_MEAS_INFO)\n if len(meas_info) == 0:\n raise ValueError('Could not find measurement info')\n if len(meas_info) > 1:\n raise ValueError('Cannot read more that 1 measurement info')\n meas_info = meas_info[0]\n\n # Read measurement info\n dev_head_t = None\n ctf_head_t = None\n dev_ctf_t = None\n meas_date = None\n utc_offset = None\n highpass = None\n lowpass = None\n nchan = None\n sfreq = None\n chs = []\n experimenter = None\n description = None\n proj_id = None\n proj_name = None\n line_freq = None\n gantry_angle = None\n custom_ref_applied = FIFF.FIFFV_MNE_CUSTOM_REF_OFF\n xplotter_layout = None\n kit_system_id = None\n for k in range(meas_info['nent']):\n kind = meas_info['directory'][k].kind\n pos = meas_info['directory'][k].pos\n if kind == FIFF.FIFF_NCHAN:\n tag = read_tag(fid, pos)\n nchan = int(tag.data)\n elif kind == FIFF.FIFF_SFREQ:\n tag = read_tag(fid, pos)\n sfreq = float(tag.data)\n elif kind == FIFF.FIFF_CH_INFO:\n tag = read_tag(fid, pos)\n chs.append(tag.data)\n elif kind == FIFF.FIFF_LOWPASS:\n tag = read_tag(fid, pos)\n if not np.isnan(tag.data):\n lowpass = float(tag.data)\n elif kind == FIFF.FIFF_HIGHPASS:\n tag = read_tag(fid, pos)\n if not np.isnan(tag.data):\n highpass = float(tag.data)\n elif kind == FIFF.FIFF_MEAS_DATE:\n tag = read_tag(fid, pos)\n meas_date = tuple(tag.data)\n if len(meas_date) == 1: # can happen from old C conversions\n meas_date = (meas_date[0], 0)\n elif kind == FIFF.FIFF_UTC_OFFSET:\n tag = read_tag(fid, pos)\n utc_offset = str(tag.data)\n elif kind == FIFF.FIFF_COORD_TRANS:\n tag = read_tag(fid, pos)\n cand = tag.data\n\n if cand['from'] == FIFF.FIFFV_COORD_DEVICE and \\\n cand['to'] == FIFF.FIFFV_COORD_HEAD:\n dev_head_t = cand\n elif cand['from'] == FIFF.FIFFV_COORD_HEAD and \\\n cand['to'] == FIFF.FIFFV_COORD_DEVICE:\n # this reversal can happen with BabyMEG data\n dev_head_t = invert_transform(cand)\n elif cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD and \\\n cand['to'] == FIFF.FIFFV_COORD_HEAD:\n ctf_head_t = cand\n elif cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_DEVICE and \\\n cand['to'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD:\n dev_ctf_t = cand\n elif kind == FIFF.FIFF_EXPERIMENTER:\n tag = read_tag(fid, pos)\n experimenter = tag.data\n elif kind == FIFF.FIFF_DESCRIPTION:\n tag = read_tag(fid, pos)\n description = tag.data\n elif kind == FIFF.FIFF_PROJ_ID:\n tag = read_tag(fid, pos)\n proj_id = tag.data\n elif kind == FIFF.FIFF_PROJ_NAME:\n tag = read_tag(fid, pos)\n proj_name = tag.data\n elif kind == FIFF.FIFF_LINE_FREQ:\n tag = read_tag(fid, pos)\n line_freq = float(tag.data)\n elif kind == FIFF.FIFF_GANTRY_ANGLE:\n tag = read_tag(fid, pos)\n gantry_angle = float(tag.data)\n elif kind in [FIFF.FIFF_MNE_CUSTOM_REF, 236]: # 236 used before v0.11\n tag = read_tag(fid, pos)\n custom_ref_applied = int(tag.data)\n elif kind == FIFF.FIFF_XPLOTTER_LAYOUT:\n tag = read_tag(fid, pos)\n xplotter_layout = str(tag.data)\n elif kind == FIFF.FIFF_MNE_KIT_SYSTEM_ID:\n tag = read_tag(fid, pos)\n kit_system_id = int(tag.data)\n ch_names_mapping = _read_extended_ch_info(chs, meas_info, fid)\n\n # Check that we have everything we need\n if nchan is None:\n raise ValueError('Number of channels is not defined')\n\n if sfreq is None:\n raise ValueError('Sampling frequency is not defined')\n\n if len(chs) == 0:\n raise ValueError('Channel information not defined')\n\n if len(chs) != nchan:\n raise ValueError('Incorrect number of channel definitions found')\n\n if dev_head_t is None or ctf_head_t is None:\n hpi_result = dir_tree_find(meas_info, FIFF.FIFFB_HPI_RESULT)\n if len(hpi_result) == 1:\n hpi_result = hpi_result[0]\n for k in range(hpi_result['nent']):\n kind = hpi_result['directory'][k].kind\n pos = hpi_result['directory'][k].pos\n if kind == FIFF.FIFF_COORD_TRANS:\n tag = read_tag(fid, pos)\n cand = tag.data\n if (cand['from'] == FIFF.FIFFV_COORD_DEVICE and\n cand['to'] == FIFF.FIFFV_COORD_HEAD and\n dev_head_t is None):\n dev_head_t = cand\n elif (cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD and\n cand['to'] == FIFF.FIFFV_COORD_HEAD and\n ctf_head_t is None):\n ctf_head_t = cand\n\n # Locate the Polhemus data\n dig = _read_dig_fif(fid, meas_info)\n\n # Locate the acquisition information\n acqpars = dir_tree_find(meas_info, FIFF.FIFFB_DACQ_PARS)\n acq_pars = None\n acq_stim = None\n if len(acqpars) == 1:\n acqpars = acqpars[0]\n for k in range(acqpars['nent']):\n kind = acqpars['directory'][k].kind\n pos = acqpars['directory'][k].pos\n if kind == FIFF.FIFF_DACQ_PARS:\n tag = read_tag(fid, pos)\n acq_pars = tag.data\n elif kind == FIFF.FIFF_DACQ_STIM:\n tag = read_tag(fid, pos)\n acq_stim = tag.data\n\n # Load the SSP data\n projs = _read_proj(\n fid, meas_info, ch_names_mapping=ch_names_mapping)\n\n # Load the CTF compensation data\n comps = _read_ctf_comp(\n fid, meas_info, chs, ch_names_mapping=ch_names_mapping)\n\n # Load the bad channel list\n bads = _read_bad_channels(\n fid, meas_info, ch_names_mapping=ch_names_mapping)\n\n #\n # Put the data together\n #\n if tree['id'] is not None:\n info = Info(file_id=tree['id'])\n else:\n info = Info(file_id=None)\n\n # Locate events list\n events = dir_tree_find(meas_info, FIFF.FIFFB_EVENTS)\n evs = list()\n for event in events:\n ev = dict()\n for k in range(event['nent']):\n kind = event['directory'][k].kind\n pos = event['directory'][k].pos\n if kind == FIFF.FIFF_EVENT_CHANNELS:\n ev['channels'] = read_tag(fid, pos).data\n elif kind == FIFF.FIFF_EVENT_LIST:\n ev['list'] = read_tag(fid, pos).data\n evs.append(ev)\n info['events'] = evs\n\n # Locate HPI result\n hpi_results = dir_tree_find(meas_info, FIFF.FIFFB_HPI_RESULT)\n hrs = list()\n for hpi_result in hpi_results:\n hr = dict()\n hr['dig_points'] = []\n for k in range(hpi_result['nent']):\n kind = hpi_result['directory'][k].kind\n pos = hpi_result['directory'][k].pos\n if kind == FIFF.FIFF_DIG_POINT:\n hr['dig_points'].append(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_HPI_DIGITIZATION_ORDER:\n hr['order'] = read_tag(fid, pos).data\n elif kind == FIFF.FIFF_HPI_COILS_USED:\n hr['used'] = read_tag(fid, pos).data\n elif kind == FIFF.FIFF_HPI_COIL_MOMENTS:\n hr['moments'] = read_tag(fid, pos).data\n elif kind == FIFF.FIFF_HPI_FIT_GOODNESS:\n hr['goodness'] = read_tag(fid, pos).data\n elif kind == FIFF.FIFF_HPI_FIT_GOOD_LIMIT:\n hr['good_limit'] = float(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_HPI_FIT_DIST_LIMIT:\n hr['dist_limit'] = float(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_HPI_FIT_ACCEPT:\n hr['accept'] = int(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_COORD_TRANS:\n hr['coord_trans'] = read_tag(fid, pos).data\n hrs.append(hr)\n info['hpi_results'] = hrs\n\n # Locate HPI Measurement\n hpi_meass = dir_tree_find(meas_info, FIFF.FIFFB_HPI_MEAS)\n hms = list()\n for hpi_meas in hpi_meass:\n hm = dict()\n for k in range(hpi_meas['nent']):\n kind = hpi_meas['directory'][k].kind\n pos = hpi_meas['directory'][k].pos\n if kind == FIFF.FIFF_CREATOR:\n hm['creator'] = str(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_SFREQ:\n hm['sfreq'] = float(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_NCHAN:\n hm['nchan'] = int(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_NAVE:\n hm['nave'] = int(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_HPI_NCOIL:\n hm['ncoil'] = int(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_FIRST_SAMPLE:\n hm['first_samp'] = int(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_LAST_SAMPLE:\n hm['last_samp'] = int(read_tag(fid, pos).data)\n hpi_coils = dir_tree_find(hpi_meas, FIFF.FIFFB_HPI_COIL)\n hcs = []\n for hpi_coil in hpi_coils:\n hc = dict()\n for k in range(hpi_coil['nent']):\n kind = hpi_coil['directory'][k].kind\n pos = hpi_coil['directory'][k].pos\n if kind == FIFF.FIFF_HPI_COIL_NO:\n hc['number'] = int(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_EPOCH:\n hc['epoch'] = read_tag(fid, pos).data\n hc['epoch'].flags.writeable = False\n elif kind == FIFF.FIFF_HPI_SLOPES:\n hc['slopes'] = read_tag(fid, pos).data\n hc['slopes'].flags.writeable = False\n elif kind == FIFF.FIFF_HPI_CORR_COEFF:\n hc['corr_coeff'] = read_tag(fid, pos).data\n hc['corr_coeff'].flags.writeable = False\n elif kind == FIFF.FIFF_HPI_COIL_FREQ:\n hc['coil_freq'] = float(read_tag(fid, pos).data)\n hcs.append(hc)\n hm['hpi_coils'] = hcs\n hms.append(hm)\n info['hpi_meas'] = hms\n del hms\n\n subject_info = dir_tree_find(meas_info, FIFF.FIFFB_SUBJECT)\n si = None\n if len(subject_info) == 1:\n subject_info = subject_info[0]\n si = dict()\n for k in range(subject_info['nent']):\n kind = subject_info['directory'][k].kind\n pos = subject_info['directory'][k].pos\n if kind == FIFF.FIFF_SUBJ_ID:\n tag = read_tag(fid, pos)\n si['id'] = int(tag.data)\n elif kind == FIFF.FIFF_SUBJ_HIS_ID:\n tag = read_tag(fid, pos)\n si['his_id'] = str(tag.data)\n elif kind == FIFF.FIFF_SUBJ_LAST_NAME:\n tag = read_tag(fid, pos)\n si['last_name'] = str(tag.data)\n elif kind == FIFF.FIFF_SUBJ_FIRST_NAME:\n tag = read_tag(fid, pos)\n si['first_name'] = str(tag.data)\n elif kind == FIFF.FIFF_SUBJ_MIDDLE_NAME:\n tag = read_tag(fid, pos)\n si['middle_name'] = str(tag.data)\n elif kind == FIFF.FIFF_SUBJ_BIRTH_DAY:\n try:\n tag = read_tag(fid, pos)\n except OverflowError:\n warn('Encountered an error while trying to read the '\n 'birthday from the input data. No birthday will be '\n 'set. Please check the integrity of the birthday '\n 'information in the input data.')\n continue\n si['birthday'] = tag.data\n elif kind == FIFF.FIFF_SUBJ_SEX:\n tag = read_tag(fid, pos)\n si['sex'] = int(tag.data)\n elif kind == FIFF.FIFF_SUBJ_HAND:\n tag = read_tag(fid, pos)\n si['hand'] = int(tag.data)\n elif kind == FIFF.FIFF_SUBJ_WEIGHT:\n tag = read_tag(fid, pos)\n si['weight'] = tag.data\n elif kind == FIFF.FIFF_SUBJ_HEIGHT:\n tag = read_tag(fid, pos)\n si['height'] = tag.data\n info['subject_info'] = si\n del si\n\n device_info = dir_tree_find(meas_info, FIFF.FIFFB_DEVICE)\n di = None\n if len(device_info) == 1:\n device_info = device_info[0]\n di = dict()\n for k in range(device_info['nent']):\n kind = device_info['directory'][k].kind\n pos = device_info['directory'][k].pos\n if kind == FIFF.FIFF_DEVICE_TYPE:\n tag = read_tag(fid, pos)\n di['type'] = str(tag.data)\n elif kind == FIFF.FIFF_DEVICE_MODEL:\n tag = read_tag(fid, pos)\n di['model'] = str(tag.data)\n elif kind == FIFF.FIFF_DEVICE_SERIAL:\n tag = read_tag(fid, pos)\n di['serial'] = str(tag.data)\n elif kind == FIFF.FIFF_DEVICE_SITE:\n tag = read_tag(fid, pos)\n di['site'] = str(tag.data)\n info['device_info'] = di\n del di\n\n helium_info = dir_tree_find(meas_info, FIFF.FIFFB_HELIUM)\n hi = None\n if len(helium_info) == 1:\n helium_info = helium_info[0]\n hi = dict()\n for k in range(helium_info['nent']):\n kind = helium_info['directory'][k].kind\n pos = helium_info['directory'][k].pos\n if kind == FIFF.FIFF_HE_LEVEL_RAW:\n tag = read_tag(fid, pos)\n hi['he_level_raw'] = float(tag.data)\n elif kind == FIFF.FIFF_HELIUM_LEVEL:\n tag = read_tag(fid, pos)\n hi['helium_level'] = float(tag.data)\n elif kind == FIFF.FIFF_ORIG_FILE_GUID:\n tag = read_tag(fid, pos)\n hi['orig_file_guid'] = str(tag.data)\n elif kind == FIFF.FIFF_MEAS_DATE:\n tag = read_tag(fid, pos)\n hi['meas_date'] = tuple(int(t) for t in tag.data)\n info['helium_info'] = hi\n del hi\n\n hpi_subsystem = dir_tree_find(meas_info, FIFF.FIFFB_HPI_SUBSYSTEM)\n hs = None\n if len(hpi_subsystem) == 1:\n hpi_subsystem = hpi_subsystem[0]\n hs = dict()\n for k in range(hpi_subsystem['nent']):\n kind = hpi_subsystem['directory'][k].kind\n pos = hpi_subsystem['directory'][k].pos\n if kind == FIFF.FIFF_HPI_NCOIL:\n tag = read_tag(fid, pos)\n hs['ncoil'] = int(tag.data)\n elif kind == FIFF.FIFF_EVENT_CHANNEL:\n tag = read_tag(fid, pos)\n hs['event_channel'] = str(tag.data)\n hpi_coils = dir_tree_find(hpi_subsystem, FIFF.FIFFB_HPI_COIL)\n hc = []\n for coil in hpi_coils:\n this_coil = dict()\n for j in range(coil['nent']):\n kind = coil['directory'][j].kind\n pos = coil['directory'][j].pos\n if kind == FIFF.FIFF_EVENT_BITS:\n tag = read_tag(fid, pos)\n this_coil['event_bits'] = np.array(tag.data)\n hc.append(this_coil)\n hs['hpi_coils'] = hc\n info['hpi_subsystem'] = hs\n\n # Read processing history\n info['proc_history'] = _read_proc_history(fid, tree)\n\n # Make the most appropriate selection for the measurement id\n if meas_info['parent_id'] is None:\n if meas_info['id'] is None:\n if meas['id'] is None:\n if meas['parent_id'] is None:\n info['meas_id'] = info['file_id']\n else:\n info['meas_id'] = meas['parent_id']\n else:\n info['meas_id'] = meas['id']\n else:\n info['meas_id'] = meas_info['id']\n else:\n info['meas_id'] = meas_info['parent_id']\n info['experimenter'] = experimenter\n info['description'] = description\n info['proj_id'] = proj_id\n info['proj_name'] = proj_name\n if meas_date is None:\n meas_date = (info['meas_id']['secs'], info['meas_id']['usecs'])\n info['meas_date'] = _ensure_meas_date_none_or_dt(meas_date)\n info['utc_offset'] = utc_offset\n\n info['sfreq'] = sfreq\n info['highpass'] = highpass if highpass is not None else 0.\n info['lowpass'] = lowpass if lowpass is not None else info['sfreq'] / 2.0\n info['line_freq'] = line_freq\n info['gantry_angle'] = gantry_angle\n\n # Add the channel information and make a list of channel names\n # for convenience\n info['chs'] = chs\n\n #\n # Add the coordinate transformations\n #\n info['dev_head_t'] = dev_head_t\n info['ctf_head_t'] = ctf_head_t\n info['dev_ctf_t'] = dev_ctf_t\n if dev_head_t is not None and ctf_head_t is not None and dev_ctf_t is None:\n from ..transforms import Transform\n head_ctf_trans = np.linalg.inv(ctf_head_t['trans'])\n dev_ctf_trans = np.dot(head_ctf_trans, info['dev_head_t']['trans'])\n info['dev_ctf_t'] = Transform('meg', 'ctf_head', dev_ctf_trans)\n\n # All kinds of auxliary stuff\n info['dig'] = _format_dig_points(dig)\n info['bads'] = bads\n info._update_redundant()\n if clean_bads:\n info['bads'] = [b for b in bads if b in info['ch_names']]\n info['projs'] = projs\n info['comps'] = comps\n info['acq_pars'] = acq_pars\n info['acq_stim'] = acq_stim\n info['custom_ref_applied'] = custom_ref_applied\n info['xplotter_layout'] = xplotter_layout\n info['kit_system_id'] = kit_system_id\n info._check_consistency()\n return info, meas\n\n\ndef _read_extended_ch_info(chs, parent, fid):\n ch_infos = dir_tree_find(parent, FIFF.FIFFB_CH_INFO)\n if len(ch_infos) == 0:\n return\n _check_option('length of channel infos', len(ch_infos), [len(chs)])\n logger.info(' Reading extended channel information')\n\n # Here we assume that ``remap`` is in the same order as the channels\n # themselves, which is hopefully safe enough.\n ch_names_mapping = dict()\n for new, ch in zip(ch_infos, chs):\n for k in range(new['nent']):\n kind = new['directory'][k].kind\n try:\n key, cast = _CH_READ_MAP[kind]\n except KeyError:\n # This shouldn't happen if we're up to date with the FIFF\n # spec\n warn(f'Discarding extra channel information kind {kind}')\n continue\n assert key in ch\n data = read_tag(fid, new['directory'][k].pos).data\n if data is not None:\n data = cast(data)\n if key == 'ch_name':\n ch_names_mapping[ch[key]] = data\n ch[key] = data\n _update_ch_info_named(ch)\n # we need to return ch_names_mapping so that we can also rename the\n # bad channels\n return ch_names_mapping\n\n\ndef _rename_comps(comps, ch_names_mapping):\n if not (comps and ch_names_mapping):\n return\n for comp in comps:\n data = comp['data']\n for key in ('row_names', 'col_names'):\n data[key][:] = _rename_list(data[key], ch_names_mapping)\n\n\ndef _ensure_meas_date_none_or_dt(meas_date):\n if meas_date is None or np.array_equal(meas_date, DATE_NONE):\n meas_date = None\n elif not isinstance(meas_date, datetime.datetime):\n meas_date = _stamp_to_dt(meas_date)\n return meas_date\n\n\ndef _check_dates(info, prepend_error=''):\n \"\"\"Check dates before writing as fif files.\n\n It's needed because of the limited integer precision\n of the fix standard.\n \"\"\"\n for key in ('file_id', 'meas_id'):\n value = info.get(key)\n if value is not None:\n assert 'msecs' not in value\n for key_2 in ('secs', 'usecs'):\n if (value[key_2] < np.iinfo('>i4').min or\n value[key_2] > np.iinfo('>i4').max):\n raise RuntimeError('%sinfo[%s][%s] must be between '\n '\"%r\" and \"%r\", got \"%r\"'\n % (prepend_error, key, key_2,\n np.iinfo('>i4').min,\n np.iinfo('>i4').max,\n value[key_2]),)\n\n meas_date = info.get('meas_date')\n if meas_date is None:\n return\n\n meas_date_stamp = _dt_to_stamp(meas_date)\n if (meas_date_stamp[0] < np.iinfo('>i4').min or\n meas_date_stamp[0] > np.iinfo('>i4').max):\n raise RuntimeError(\n '%sinfo[\"meas_date\"] seconds must be between \"%r\" '\n 'and \"%r\", got \"%r\"'\n % (prepend_error, (np.iinfo('>i4').min, 0),\n (np.iinfo('>i4').max, 0), meas_date_stamp[0],))\n\n\n@fill_doc\ndef write_meas_info(fid, info, data_type=None, reset_range=True):\n \"\"\"Write measurement info into a file id (from a fif file).\n\n Parameters\n ----------\n fid : file\n Open file descriptor.\n %(info_not_none)s\n data_type : int\n The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),\n 5 (FIFFT_DOUBLE), or 16 (FIFFT_DAU_PACK16) for\n raw data.\n reset_range : bool\n If True, info['chs'][k]['range'] will be set to unity.\n\n Notes\n -----\n Tags are written in a particular order for compatibility with maxfilter.\n \"\"\"\n info._check_consistency()\n _check_dates(info)\n\n # Measurement info\n start_block(fid, FIFF.FIFFB_MEAS_INFO)\n\n # Add measurement id\n if info['meas_id'] is not None:\n write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id'])\n\n for event in info['events']:\n start_block(fid, FIFF.FIFFB_EVENTS)\n if event.get('channels') is not None:\n write_int(fid, FIFF.FIFF_EVENT_CHANNELS, event['channels'])\n if event.get('list') is not None:\n write_int(fid, FIFF.FIFF_EVENT_LIST, event['list'])\n end_block(fid, FIFF.FIFFB_EVENTS)\n\n # HPI Result\n for hpi_result in info['hpi_results']:\n start_block(fid, FIFF.FIFFB_HPI_RESULT)\n write_dig_points(fid, hpi_result['dig_points'])\n if 'order' in hpi_result:\n write_int(fid, FIFF.FIFF_HPI_DIGITIZATION_ORDER,\n hpi_result['order'])\n if 'used' in hpi_result:\n write_int(fid, FIFF.FIFF_HPI_COILS_USED, hpi_result['used'])\n if 'moments' in hpi_result:\n write_float_matrix(fid, FIFF.FIFF_HPI_COIL_MOMENTS,\n hpi_result['moments'])\n if 'goodness' in hpi_result:\n write_float(fid, FIFF.FIFF_HPI_FIT_GOODNESS,\n hpi_result['goodness'])\n if 'good_limit' in hpi_result:\n write_float(fid, FIFF.FIFF_HPI_FIT_GOOD_LIMIT,\n hpi_result['good_limit'])\n if 'dist_limit' in hpi_result:\n write_float(fid, FIFF.FIFF_HPI_FIT_DIST_LIMIT,\n hpi_result['dist_limit'])\n if 'accept' in hpi_result:\n write_int(fid, FIFF.FIFF_HPI_FIT_ACCEPT, hpi_result['accept'])\n if 'coord_trans' in hpi_result:\n write_coord_trans(fid, hpi_result['coord_trans'])\n end_block(fid, FIFF.FIFFB_HPI_RESULT)\n\n # HPI Measurement\n for hpi_meas in info['hpi_meas']:\n start_block(fid, FIFF.FIFFB_HPI_MEAS)\n if hpi_meas.get('creator') is not None:\n write_string(fid, FIFF.FIFF_CREATOR, hpi_meas['creator'])\n if hpi_meas.get('sfreq') is not None:\n write_float(fid, FIFF.FIFF_SFREQ, hpi_meas['sfreq'])\n if hpi_meas.get('nchan') is not None:\n write_int(fid, FIFF.FIFF_NCHAN, hpi_meas['nchan'])\n if hpi_meas.get('nave') is not None:\n write_int(fid, FIFF.FIFF_NAVE, hpi_meas['nave'])\n if hpi_meas.get('ncoil') is not None:\n write_int(fid, FIFF.FIFF_HPI_NCOIL, hpi_meas['ncoil'])\n if hpi_meas.get('first_samp') is not None:\n write_int(fid, FIFF.FIFF_FIRST_SAMPLE, hpi_meas['first_samp'])\n if hpi_meas.get('last_samp') is not None:\n write_int(fid, FIFF.FIFF_LAST_SAMPLE, hpi_meas['last_samp'])\n for hpi_coil in hpi_meas['hpi_coils']:\n start_block(fid, FIFF.FIFFB_HPI_COIL)\n if hpi_coil.get('number') is not None:\n write_int(fid, FIFF.FIFF_HPI_COIL_NO, hpi_coil['number'])\n if hpi_coil.get('epoch') is not None:\n write_float_matrix(fid, FIFF.FIFF_EPOCH, hpi_coil['epoch'])\n if hpi_coil.get('slopes') is not None:\n write_float(fid, FIFF.FIFF_HPI_SLOPES, hpi_coil['slopes'])\n if hpi_coil.get('corr_coeff') is not None:\n write_float(fid, FIFF.FIFF_HPI_CORR_COEFF,\n hpi_coil['corr_coeff'])\n if hpi_coil.get('coil_freq') is not None:\n write_float(fid, FIFF.FIFF_HPI_COIL_FREQ,\n hpi_coil['coil_freq'])\n end_block(fid, FIFF.FIFFB_HPI_COIL)\n end_block(fid, FIFF.FIFFB_HPI_MEAS)\n\n # Polhemus data\n write_dig_points(fid, info['dig'], block=True)\n\n # megacq parameters\n if info['acq_pars'] is not None or info['acq_stim'] is not None:\n start_block(fid, FIFF.FIFFB_DACQ_PARS)\n if info['acq_pars'] is not None:\n write_string(fid, FIFF.FIFF_DACQ_PARS, info['acq_pars'])\n\n if info['acq_stim'] is not None:\n write_string(fid, FIFF.FIFF_DACQ_STIM, info['acq_stim'])\n\n end_block(fid, FIFF.FIFFB_DACQ_PARS)\n\n # Coordinate transformations if the HPI result block was not there\n if info['dev_head_t'] is not None:\n write_coord_trans(fid, info['dev_head_t'])\n\n if info['ctf_head_t'] is not None:\n write_coord_trans(fid, info['ctf_head_t'])\n\n if info['dev_ctf_t'] is not None:\n write_coord_trans(fid, info['dev_ctf_t'])\n\n # Projectors\n ch_names_mapping = _make_ch_names_mapping(info['chs'])\n _write_proj(fid, info['projs'], ch_names_mapping=ch_names_mapping)\n\n # Bad channels\n if len(info['bads']) > 0:\n bads = _rename_list(info['bads'], ch_names_mapping)\n start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)\n write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, bads)\n end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)\n\n # General\n if info.get('experimenter') is not None:\n write_string(fid, FIFF.FIFF_EXPERIMENTER, info['experimenter'])\n if info.get('description') is not None:\n write_string(fid, FIFF.FIFF_DESCRIPTION, info['description'])\n if info.get('proj_id') is not None:\n write_int(fid, FIFF.FIFF_PROJ_ID, info['proj_id'])\n if info.get('proj_name') is not None:\n write_string(fid, FIFF.FIFF_PROJ_NAME, info['proj_name'])\n if info.get('meas_date') is not None:\n write_int(fid, FIFF.FIFF_MEAS_DATE, _dt_to_stamp(info['meas_date']))\n if info.get('utc_offset') is not None:\n write_string(fid, FIFF.FIFF_UTC_OFFSET, info['utc_offset'])\n write_int(fid, FIFF.FIFF_NCHAN, info['nchan'])\n write_float(fid, FIFF.FIFF_SFREQ, info['sfreq'])\n if info['lowpass'] is not None:\n write_float(fid, FIFF.FIFF_LOWPASS, info['lowpass'])\n if info['highpass'] is not None:\n write_float(fid, FIFF.FIFF_HIGHPASS, info['highpass'])\n if info.get('line_freq') is not None:\n write_float(fid, FIFF.FIFF_LINE_FREQ, info['line_freq'])\n if info.get('gantry_angle') is not None:\n write_float(fid, FIFF.FIFF_GANTRY_ANGLE, info['gantry_angle'])\n if data_type is not None:\n write_int(fid, FIFF.FIFF_DATA_PACK, data_type)\n if info.get('custom_ref_applied'):\n write_int(fid, FIFF.FIFF_MNE_CUSTOM_REF, info['custom_ref_applied'])\n if info.get('xplotter_layout'):\n write_string(fid, FIFF.FIFF_XPLOTTER_LAYOUT, info['xplotter_layout'])\n\n # Channel information\n _write_ch_infos(fid, info['chs'], reset_range, ch_names_mapping)\n\n # Subject information\n if info.get('subject_info') is not None:\n start_block(fid, FIFF.FIFFB_SUBJECT)\n si = info['subject_info']\n if si.get('id') is not None:\n write_int(fid, FIFF.FIFF_SUBJ_ID, si['id'])\n if si.get('his_id') is not None:\n write_string(fid, FIFF.FIFF_SUBJ_HIS_ID, si['his_id'])\n if si.get('last_name') is not None:\n write_string(fid, FIFF.FIFF_SUBJ_LAST_NAME, si['last_name'])\n if si.get('first_name') is not None:\n write_string(fid, FIFF.FIFF_SUBJ_FIRST_NAME, si['first_name'])\n if si.get('middle_name') is not None:\n write_string(fid, FIFF.FIFF_SUBJ_MIDDLE_NAME, si['middle_name'])\n if si.get('birthday') is not None:\n write_julian(fid, FIFF.FIFF_SUBJ_BIRTH_DAY, si['birthday'])\n if si.get('sex') is not None:\n write_int(fid, FIFF.FIFF_SUBJ_SEX, si['sex'])\n if si.get('hand') is not None:\n write_int(fid, FIFF.FIFF_SUBJ_HAND, si['hand'])\n if si.get('weight') is not None:\n write_float(fid, FIFF.FIFF_SUBJ_WEIGHT, si['weight'])\n if si.get('height') is not None:\n write_float(fid, FIFF.FIFF_SUBJ_HEIGHT, si['height'])\n end_block(fid, FIFF.FIFFB_SUBJECT)\n del si\n\n if info.get('device_info') is not None:\n start_block(fid, FIFF.FIFFB_DEVICE)\n di = info['device_info']\n write_string(fid, FIFF.FIFF_DEVICE_TYPE, di['type'])\n for key in ('model', 'serial', 'site'):\n if di.get(key) is not None:\n write_string(fid, getattr(FIFF, 'FIFF_DEVICE_' + key.upper()),\n di[key])\n end_block(fid, FIFF.FIFFB_DEVICE)\n del di\n\n if info.get('helium_info') is not None:\n start_block(fid, FIFF.FIFFB_HELIUM)\n hi = info['helium_info']\n if hi.get('he_level_raw') is not None:\n write_float(fid, FIFF.FIFF_HE_LEVEL_RAW, hi['he_level_raw'])\n if hi.get('helium_level') is not None:\n write_float(fid, FIFF.FIFF_HELIUM_LEVEL, hi['helium_level'])\n if hi.get('orig_file_guid') is not None:\n write_string(fid, FIFF.FIFF_ORIG_FILE_GUID, hi['orig_file_guid'])\n write_int(fid, FIFF.FIFF_MEAS_DATE, hi['meas_date'])\n end_block(fid, FIFF.FIFFB_HELIUM)\n del hi\n\n if info.get('hpi_subsystem') is not None:\n hs = info['hpi_subsystem']\n start_block(fid, FIFF.FIFFB_HPI_SUBSYSTEM)\n if hs.get('ncoil') is not None:\n write_int(fid, FIFF.FIFF_HPI_NCOIL, hs['ncoil'])\n if hs.get('event_channel') is not None:\n write_string(fid, FIFF.FIFF_EVENT_CHANNEL, hs['event_channel'])\n if hs.get('hpi_coils') is not None:\n for coil in hs['hpi_coils']:\n start_block(fid, FIFF.FIFFB_HPI_COIL)\n if coil.get('event_bits') is not None:\n write_int(fid, FIFF.FIFF_EVENT_BITS,\n coil['event_bits'])\n end_block(fid, FIFF.FIFFB_HPI_COIL)\n end_block(fid, FIFF.FIFFB_HPI_SUBSYSTEM)\n del hs\n\n # CTF compensation info\n comps = info['comps']\n if ch_names_mapping:\n comps = deepcopy(comps)\n _rename_comps(comps, ch_names_mapping)\n write_ctf_comp(fid, comps)\n\n # KIT system ID\n if info.get('kit_system_id') is not None:\n write_int(fid, FIFF.FIFF_MNE_KIT_SYSTEM_ID, info['kit_system_id'])\n\n end_block(fid, FIFF.FIFFB_MEAS_INFO)\n\n # Processing history\n _write_proc_history(fid, info)\n\n\n@fill_doc\ndef write_info(fname, info, data_type=None, reset_range=True):\n \"\"\"Write measurement info in fif file.\n\n Parameters\n ----------\n fname : str\n The name of the file. Should end by -info.fif.\n %(info_not_none)s\n data_type : int\n The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),\n 5 (FIFFT_DOUBLE), or 16 (FIFFT_DAU_PACK16) for\n raw data.\n reset_range : bool\n If True, info['chs'][k]['range'] will be set to unity.\n \"\"\"\n with start_file(fname) as fid:\n start_block(fid, FIFF.FIFFB_MEAS)\n write_meas_info(fid, info, data_type, reset_range)\n end_block(fid, FIFF.FIFFB_MEAS)\n end_file(fid)\n\n\n@verbose\ndef _merge_info_values(infos, key, verbose=None):\n \"\"\"Merge things together.\n\n Fork for {'dict', 'list', 'array', 'other'}\n and consider cases where one or all are of the same type.\n\n Does special things for \"projs\", \"bads\", and \"meas_date\".\n \"\"\"\n values = [d[key] for d in infos]\n msg = (\"Don't know how to merge '%s'. Make sure values are \"\n \"compatible, got types:\\n %s\"\n % (key, [type(v) for v in values]))\n\n def _flatten(lists):\n return [item for sublist in lists for item in sublist]\n\n def _check_isinstance(values, kind, func):\n return func([isinstance(v, kind) for v in values])\n\n def _where_isinstance(values, kind):\n \"\"\"Get indices of instances.\"\"\"\n return np.where([isinstance(v, type) for v in values])[0]\n\n # list\n if _check_isinstance(values, list, all):\n lists = (d[key] for d in infos)\n if key == 'projs':\n return _uniquify_projs(_flatten(lists))\n elif key == 'bads':\n return sorted(set(_flatten(lists)))\n else:\n return _flatten(lists)\n elif _check_isinstance(values, list, any):\n idx = _where_isinstance(values, list)\n if len(idx) == 1:\n return values[int(idx)]\n elif len(idx) > 1:\n lists = (d[key] for d in infos if isinstance(d[key], list))\n return _flatten(lists)\n # dict\n elif _check_isinstance(values, dict, all):\n is_qual = all(object_diff(values[0], v) == '' for v in values[1:])\n if is_qual:\n return values[0]\n else:\n RuntimeError(msg)\n elif _check_isinstance(values, dict, any):\n idx = _where_isinstance(values, dict)\n if len(idx) == 1:\n return values[int(idx)]\n elif len(idx) > 1:\n raise RuntimeError(msg)\n # ndarray\n elif _check_isinstance(values, np.ndarray, all) or \\\n _check_isinstance(values, tuple, all):\n is_qual = all(np.array_equal(values[0], x) for x in values[1:])\n if is_qual:\n return values[0]\n elif key == 'meas_date':\n logger.info('Found multiple entries for %s. '\n 'Setting value to `None`' % key)\n return None\n else:\n raise RuntimeError(msg)\n elif _check_isinstance(values, (np.ndarray, tuple), any):\n idx = _where_isinstance(values, np.ndarray)\n if len(idx) == 1:\n return values[int(idx)]\n elif len(idx) > 1:\n raise RuntimeError(msg)\n # other\n else:\n unique_values = set(values)\n if len(unique_values) == 1:\n return list(values)[0]\n elif isinstance(list(unique_values)[0], BytesIO):\n logger.info('Found multiple StringIO instances. '\n 'Setting value to `None`')\n return None\n elif isinstance(list(unique_values)[0], str):\n logger.info('Found multiple filenames. '\n 'Setting value to `None`')\n return None\n else:\n raise RuntimeError(msg)\n\n\n@verbose\ndef _merge_info(infos, force_update_to_first=False, verbose=None):\n \"\"\"Merge multiple measurement info dictionaries.\n\n - Fields that are present in only one info object will be used in the\n merged info.\n - Fields that are present in multiple info objects and are the same\n will be used in the merged info.\n - Fields that are present in multiple info objects and are different\n will result in a None value in the merged info.\n - Channels will be concatenated. If multiple info objects contain\n channels with the same name, an exception is raised.\n\n Parameters\n ----------\n infos | list of instance of Info\n Info objects to merge into one info object.\n force_update_to_first : bool\n If True, force the fields for objects in `info` will be updated\n to match those in the first item. Use at your own risk, as this\n may overwrite important metadata.\n %(verbose)s\n\n Returns\n -------\n info : instance of Info\n The merged info object.\n \"\"\"\n for info in infos:\n info._check_consistency()\n if force_update_to_first is True:\n infos = deepcopy(infos)\n _force_update_info(infos[0], infos[1:])\n info = Info()\n info['chs'] = []\n for this_info in infos:\n info['chs'].extend(this_info['chs'])\n info._update_redundant()\n duplicates = {ch for ch in info['ch_names']\n if info['ch_names'].count(ch) > 1}\n if len(duplicates) > 0:\n msg = (\"The following channels are present in more than one input \"\n \"measurement info objects: %s\" % list(duplicates))\n raise ValueError(msg)\n\n transforms = ['ctf_head_t', 'dev_head_t', 'dev_ctf_t']\n for trans_name in transforms:\n trans = [i[trans_name] for i in infos if i[trans_name]]\n if len(trans) == 0:\n info[trans_name] = None\n elif len(trans) == 1:\n info[trans_name] = trans[0]\n elif all(np.all(trans[0]['trans'] == x['trans']) and\n trans[0]['from'] == x['from'] and\n trans[0]['to'] == x['to']\n for x in trans[1:]):\n info[trans_name] = trans[0]\n else:\n msg = (\"Measurement infos provide mutually inconsistent %s\" %\n trans_name)\n raise ValueError(msg)\n\n # KIT system-IDs\n kit_sys_ids = [i['kit_system_id'] for i in infos if i['kit_system_id']]\n if len(kit_sys_ids) == 0:\n info['kit_system_id'] = None\n elif len(set(kit_sys_ids)) == 1:\n info['kit_system_id'] = kit_sys_ids[0]\n else:\n raise ValueError(\"Trying to merge channels from different KIT systems\")\n\n # hpi infos and digitization data:\n fields = ['hpi_results', 'hpi_meas', 'dig']\n for k in fields:\n values = [i[k] for i in infos if i[k]]\n if len(values) == 0:\n info[k] = []\n elif len(values) == 1:\n info[k] = values[0]\n elif all(object_diff(values[0], v) == '' for v in values[1:]):\n info[k] = values[0]\n else:\n msg = (\"Measurement infos are inconsistent for %s\" % k)\n raise ValueError(msg)\n\n # other fields\n other_fields = ['acq_pars', 'acq_stim', 'bads',\n 'comps', 'custom_ref_applied', 'description',\n 'experimenter', 'file_id', 'highpass', 'utc_offset',\n 'hpi_subsystem', 'events', 'device_info', 'helium_info',\n 'line_freq', 'lowpass', 'meas_id',\n 'proj_id', 'proj_name', 'projs', 'sfreq', 'gantry_angle',\n 'subject_info', 'sfreq', 'xplotter_layout', 'proc_history']\n for k in other_fields:\n info[k] = _merge_info_values(infos, k)\n\n info['meas_date'] = infos[0]['meas_date']\n info._check_consistency()\n return info\n\n\n@verbose\ndef create_info(ch_names, sfreq, ch_types='misc', verbose=None):\n \"\"\"Create a basic Info instance suitable for use with create_raw.\n\n Parameters\n ----------\n ch_names : list of str | int\n Channel names. If an int, a list of channel names will be created\n from ``range(ch_names)``.\n sfreq : float\n Sample rate of the data.\n ch_types : list of str | str\n Channel types, default is ``'misc'`` which is not a\n :term:`data channel <data channels>`.\n Currently supported fields are 'ecg', 'bio', 'stim', 'eog', 'misc',\n 'seeg', 'dbs', 'ecog', 'mag', 'eeg', 'ref_meg', 'grad', 'emg', 'hbr'\n or 'hbo'. If str, then all channels are assumed to be of the same type.\n %(verbose)s\n\n Returns\n -------\n %(info_not_none)s\n\n Notes\n -----\n The info dictionary will be sparsely populated to enable functionality\n within the rest of the package. Advanced functionality such as source\n localization can only be obtained through substantial, proper\n modifications of the info structure (not recommended).\n\n Note that the MEG device-to-head transform ``info['dev_head_t']`` will\n be initialized to the identity transform.\n\n Proper units of measure:\n * V: eeg, eog, seeg, dbs, emg, ecg, bio, ecog\n * T: mag\n * T/m: grad\n * M: hbo, hbr\n * Am: dipole\n * AU: misc\n \"\"\"\n try:\n ch_names = operator.index(ch_names) # int-like\n except TypeError:\n pass\n else:\n ch_names = list(np.arange(ch_names).astype(str))\n _validate_type(ch_names, (list, tuple), \"ch_names\",\n (\"list, tuple, or int\"))\n sfreq = float(sfreq)\n if sfreq <= 0:\n raise ValueError('sfreq must be positive')\n nchan = len(ch_names)\n if isinstance(ch_types, str):\n ch_types = [ch_types] * nchan\n ch_types = np.atleast_1d(np.array(ch_types, np.str_))\n if ch_types.ndim != 1 or len(ch_types) != nchan:\n raise ValueError('ch_types and ch_names must be the same length '\n '(%s != %s) for ch_types=%s'\n % (len(ch_types), nchan, ch_types))\n info = _empty_info(sfreq)\n ch_types_dict = get_channel_type_constants(include_defaults=True)\n for ci, (ch_name, ch_type) in enumerate(zip(ch_names, ch_types)):\n _validate_type(ch_name, 'str', \"each entry in ch_names\")\n _validate_type(ch_type, 'str', \"each entry in ch_types\")\n if ch_type not in ch_types_dict:\n raise KeyError(f'kind must be one of {list(ch_types_dict)}, '\n f'not {ch_type}')\n this_ch_dict = ch_types_dict[ch_type]\n kind = this_ch_dict['kind']\n # handle chpi, where kind is a *list* of FIFF constants:\n kind = kind[0] if isinstance(kind, (list, tuple)) else kind\n # mirror what tag.py does here\n coord_frame = _ch_coord_dict.get(kind, FIFF.FIFFV_COORD_UNKNOWN)\n coil_type = this_ch_dict.get('coil_type', FIFF.FIFFV_COIL_NONE)\n unit = this_ch_dict.get('unit', FIFF.FIFF_UNIT_NONE)\n chan_info = dict(loc=np.full(12, np.nan),\n unit_mul=FIFF.FIFF_UNITM_NONE, range=1., cal=1.,\n kind=kind, coil_type=coil_type, unit=unit,\n coord_frame=coord_frame, ch_name=str(ch_name),\n scanno=ci + 1, logno=ci + 1)\n info['chs'].append(chan_info)\n\n info._update_redundant()\n info._check_consistency()\n return info\n\n\nRAW_INFO_FIELDS = (\n 'acq_pars', 'acq_stim', 'bads', 'ch_names', 'chs',\n 'comps', 'ctf_head_t', 'custom_ref_applied', 'description', 'dev_ctf_t',\n 'dev_head_t', 'dig', 'experimenter', 'events', 'utc_offset', 'device_info',\n 'file_id', 'highpass', 'hpi_meas', 'hpi_results', 'helium_info',\n 'hpi_subsystem', 'kit_system_id', 'line_freq', 'lowpass', 'meas_date',\n 'meas_id', 'nchan', 'proj_id', 'proj_name', 'projs', 'sfreq',\n 'subject_info', 'xplotter_layout', 'proc_history', 'gantry_angle',\n)\n\n\ndef _empty_info(sfreq):\n \"\"\"Create an empty info dictionary.\"\"\"\n _none_keys = (\n 'acq_pars', 'acq_stim', 'ctf_head_t', 'description',\n 'dev_ctf_t', 'dig', 'experimenter', 'utc_offset', 'device_info',\n 'file_id', 'highpass', 'hpi_subsystem', 'kit_system_id', 'helium_info',\n 'line_freq', 'lowpass', 'meas_date', 'meas_id', 'proj_id', 'proj_name',\n 'subject_info', 'xplotter_layout', 'gantry_angle',\n )\n _list_keys = ('bads', 'chs', 'comps', 'events', 'hpi_meas', 'hpi_results',\n 'projs', 'proc_history')\n info = Info()\n for k in _none_keys:\n info[k] = None\n for k in _list_keys:\n info[k] = list()\n info['custom_ref_applied'] = FIFF.FIFFV_MNE_CUSTOM_REF_OFF\n info['highpass'] = 0.\n info['sfreq'] = float(sfreq)\n info['lowpass'] = info['sfreq'] / 2.\n info['dev_head_t'] = Transform('meg', 'head')\n info._update_redundant()\n info._check_consistency()\n return info\n\n\ndef _force_update_info(info_base, info_target):\n \"\"\"Update target info objects with values from info base.\n\n Note that values in info_target will be overwritten by those in info_base.\n This will overwrite all fields except for: 'chs', 'ch_names', 'nchan'.\n\n Parameters\n ----------\n info_base : mne.Info\n The Info object you want to use for overwriting values\n in target Info objects.\n info_target : mne.Info | list of mne.Info\n The Info object(s) you wish to overwrite using info_base. These objects\n will be modified in-place.\n \"\"\"\n exclude_keys = ['chs', 'ch_names', 'nchan']\n info_target = np.atleast_1d(info_target).ravel()\n all_infos = np.hstack([info_base, info_target])\n for ii in all_infos:\n if not isinstance(ii, Info):\n raise ValueError('Inputs must be of type Info. '\n 'Found type %s' % type(ii))\n for key, val in info_base.items():\n if key in exclude_keys:\n continue\n for i_targ in info_target:\n i_targ[key] = val\n\n\ndef _add_timedelta_to_stamp(meas_date_stamp, delta_t):\n \"\"\"Add a timedelta to a meas_date tuple.\"\"\"\n if meas_date_stamp is not None:\n meas_date_stamp = _dt_to_stamp(_stamp_to_dt(meas_date_stamp) + delta_t)\n return meas_date_stamp\n\n\n@verbose\ndef anonymize_info(info, daysback=None, keep_his=False, verbose=None):\n \"\"\"Anonymize measurement information in place.\n\n .. warning:: If ``info`` is part of an object like\n :class:`raw.info <mne.io.Raw>`, you should directly use\n the method :meth:`raw.anonymize() <mne.io.Raw.anonymize>`\n to ensure that all parts of the data are anonymized and\n stay synchronized (e.g.,\n :class:`raw.annotations <mne.Annotations>`).\n\n Parameters\n ----------\n %(info_not_none)s\n %(anonymize_info_parameters)s\n %(verbose)s\n\n Returns\n -------\n info : instance of Info\n The anonymized measurement information.\n\n Notes\n -----\n %(anonymize_info_notes)s\n \"\"\"\n _validate_type(info, 'info', \"self\")\n\n default_anon_dos = datetime.datetime(2000, 1, 1, 0, 0, 0,\n tzinfo=datetime.timezone.utc)\n default_str = \"mne_anonymize\"\n default_subject_id = 0\n default_sex = 0\n default_desc = (\"Anonymized using a time shift\"\n \" to preserve age at acquisition\")\n\n none_meas_date = info['meas_date'] is None\n\n if none_meas_date:\n if daysback is not None:\n warn('Input info has \"meas_date\" set to None. '\n 'Removing all information from time/date structures, '\n '*NOT* performing any time shifts!')\n else:\n # compute timeshift delta\n if daysback is None:\n delta_t = info['meas_date'] - default_anon_dos\n else:\n delta_t = datetime.timedelta(days=daysback)\n info['meas_date'] = info['meas_date'] - delta_t\n\n # file_id and meas_id\n for key in ('file_id', 'meas_id'):\n value = info.get(key)\n if value is not None:\n assert 'msecs' not in value\n if (none_meas_date or\n ((value['secs'], value['usecs']) == DATE_NONE)):\n # Don't try to shift backwards in time when no measurement\n # date is available or when file_id is already a place holder\n tmp = DATE_NONE\n else:\n tmp = _add_timedelta_to_stamp(\n (value['secs'], value['usecs']), -delta_t)\n value['secs'] = tmp[0]\n value['usecs'] = tmp[1]\n # The following copy is needed for a test CTF dataset\n # otherwise value['machid'][:] = 0 would suffice\n _tmp = value['machid'].copy()\n _tmp[:] = 0\n value['machid'] = _tmp\n\n # subject info\n subject_info = info.get('subject_info')\n if subject_info is not None:\n if subject_info.get('id') is not None:\n subject_info['id'] = default_subject_id\n if keep_his:\n logger.info('Not fully anonymizing info - keeping '\n 'his_id, sex, and hand info')\n else:\n if subject_info.get('his_id') is not None:\n subject_info['his_id'] = str(default_subject_id)\n if subject_info.get('sex') is not None:\n subject_info['sex'] = default_sex\n if subject_info.get('hand') is not None:\n del subject_info['hand'] # there's no \"unknown\" setting\n\n for key in ('last_name', 'first_name', 'middle_name'):\n if subject_info.get(key) is not None:\n subject_info[key] = default_str\n\n # anonymize the subject birthday\n if none_meas_date:\n subject_info.pop('birthday', None)\n elif subject_info.get('birthday') is not None:\n dob = datetime.datetime(subject_info['birthday'][0],\n subject_info['birthday'][1],\n subject_info['birthday'][2])\n dob -= delta_t\n subject_info['birthday'] = dob.year, dob.month, dob.day\n\n for key in ('weight', 'height'):\n if subject_info.get(key) is not None:\n subject_info[key] = 0\n\n info['experimenter'] = default_str\n info['description'] = default_desc\n\n if info['proj_id'] is not None:\n info['proj_id'] = np.zeros_like(info['proj_id'])\n if info['proj_name'] is not None:\n info['proj_name'] = default_str\n if info['utc_offset'] is not None:\n info['utc_offset'] = None\n\n proc_hist = info.get('proc_history')\n if proc_hist is not None:\n for record in proc_hist:\n record['block_id']['machid'][:] = 0\n record['experimenter'] = default_str\n if none_meas_date:\n record['block_id']['secs'] = DATE_NONE[0]\n record['block_id']['usecs'] = DATE_NONE[1]\n record['date'] = DATE_NONE\n else:\n this_t0 = (record['block_id']['secs'],\n record['block_id']['usecs'])\n this_t1 = _add_timedelta_to_stamp(\n this_t0, -delta_t)\n record['block_id']['secs'] = this_t1[0]\n record['block_id']['usecs'] = this_t1[1]\n record['date'] = _add_timedelta_to_stamp(\n record['date'], -delta_t)\n\n hi = info.get('helium_info')\n if hi is not None:\n if hi.get('orig_file_guid') is not None:\n hi['orig_file_guid'] = default_str\n if none_meas_date and hi.get('meas_date') is not None:\n hi['meas_date'] = DATE_NONE\n elif hi.get('meas_date') is not None:\n hi['meas_date'] = _add_timedelta_to_stamp(\n hi['meas_date'], -delta_t)\n\n di = info.get('device_info')\n if di is not None:\n for k in ('serial', 'site'):\n if di.get(k) is not None:\n di[k] = default_str\n\n err_mesg = ('anonymize_info generated an inconsistent info object. '\n 'Underlying Error:\\n')\n info._check_consistency(prepend_error=err_mesg)\n err_mesg = ('anonymize_info generated an inconsistent info object. '\n 'daysback parameter was too large. '\n 'Underlying Error:\\n')\n _check_dates(info, prepend_error=err_mesg)\n\n return info\n\n\n@fill_doc\ndef _bad_chans_comp(info, ch_names):\n \"\"\"Check if channel names are consistent with current compensation status.\n\n Parameters\n ----------\n %(info_not_none)s\n\n ch_names : list of str\n The channel names to check.\n\n Returns\n -------\n status : bool\n True if compensation is *currently* in use but some compensation\n channels are not included in picks\n\n False if compensation is *currently* not being used\n or if compensation is being used and all compensation channels\n in info and included in picks.\n\n missing_ch_names: array-like of str, shape (n_missing,)\n The names of compensation channels not included in picks.\n Returns [] if no channels are missing.\n\n \"\"\"\n if 'comps' not in info:\n # should this be thought of as a bug?\n return False, []\n\n # only include compensation channels that would affect selected channels\n ch_names_s = set(ch_names)\n comp_names = []\n for comp in info['comps']:\n if len(ch_names_s.intersection(comp['data']['row_names'])) > 0:\n comp_names.extend(comp['data']['col_names'])\n comp_names = sorted(set(comp_names))\n\n missing_ch_names = sorted(set(comp_names).difference(ch_names))\n\n if get_current_comp(info) != 0 and len(missing_ch_names) > 0:\n return True, missing_ch_names\n\n return False, missing_ch_names\n\n\n_DIG_CAST = dict(\n kind=int, ident=int, r=lambda x: x, coord_frame=int)\n# key -> const, cast, write\n_CH_INFO_MAP = OrderedDict(\n scanno=(FIFF.FIFF_CH_SCAN_NO, int, write_int),\n logno=(FIFF.FIFF_CH_LOGICAL_NO, int, write_int),\n kind=(FIFF.FIFF_CH_KIND, int, write_int),\n range=(FIFF.FIFF_CH_RANGE, float, write_float),\n cal=(FIFF.FIFF_CH_CAL, float, write_float),\n coil_type=(FIFF.FIFF_CH_COIL_TYPE, int, write_int),\n loc=(FIFF.FIFF_CH_LOC, lambda x: x, write_float),\n unit=(FIFF.FIFF_CH_UNIT, int, write_int),\n unit_mul=(FIFF.FIFF_CH_UNIT_MUL, int, write_int),\n ch_name=(FIFF.FIFF_CH_DACQ_NAME, str, write_string),\n coord_frame=(FIFF.FIFF_CH_COORD_FRAME, int, write_int),\n)\n# key -> cast\n_CH_CAST = OrderedDict((key, val[1]) for key, val in _CH_INFO_MAP.items())\n# const -> key, cast\n_CH_READ_MAP = OrderedDict((val[0], (key, val[1]))\n for key, val in _CH_INFO_MAP.items())\n\n\n@contextlib.contextmanager\ndef _writing_info_hdf5(info):\n # Make info writing faster by packing chs and dig into numpy arrays\n orig_dig = info.get('dig', None)\n orig_chs = info['chs']\n try:\n if orig_dig is not None and len(orig_dig) > 0:\n info['dig'] = _dict_pack(info['dig'], _DIG_CAST)\n info['chs'] = _dict_pack(info['chs'], _CH_CAST)\n info['chs']['ch_name'] = np.char.encode(\n info['chs']['ch_name'], encoding='utf8')\n yield\n finally:\n if orig_dig is not None:\n info['dig'] = orig_dig\n info['chs'] = orig_chs\n\n\ndef _dict_pack(obj, casts):\n # pack a list of dict into dict of array\n return {key: np.array([o[key] for o in obj]) for key in casts}\n\n\ndef _dict_unpack(obj, casts):\n # unpack a dict of array into a list of dict\n n = len(obj[list(casts)[0]])\n return [{key: cast(obj[key][ii]) for key, cast in casts.items()}\n for ii in range(n)]\n\n\ndef _make_ch_names_mapping(chs):\n orig_ch_names = [c['ch_name'] for c in chs]\n ch_names = orig_ch_names.copy()\n _unique_channel_names(ch_names, max_length=15, verbose='error')\n ch_names_mapping = dict()\n if orig_ch_names != ch_names:\n ch_names_mapping.update(zip(orig_ch_names, ch_names))\n return ch_names_mapping\n\n\ndef _write_ch_infos(fid, chs, reset_range, ch_names_mapping):\n ch_names_mapping = dict() if ch_names_mapping is None else ch_names_mapping\n for k, c in enumerate(chs):\n # Scan numbers may have been messed up\n c = c.copy()\n c['ch_name'] = ch_names_mapping.get(c['ch_name'], c['ch_name'])\n assert len(c['ch_name']) <= 15\n c['scanno'] = k + 1\n # for float/double, the \"range\" param is unnecessary\n if reset_range:\n c['range'] = 1.0\n write_ch_info(fid, c)\n # only write new-style channel information if necessary\n if len(ch_names_mapping):\n logger.info(\n ' Writing channel names to FIF truncated to 15 characters '\n 'with remapping')\n for ch in chs:\n start_block(fid, FIFF.FIFFB_CH_INFO)\n assert set(ch) == set(_CH_INFO_MAP)\n for (key, (const, _, write)) in _CH_INFO_MAP.items():\n write(fid, const, ch[key])\n end_block(fid, FIFF.FIFFB_CH_INFO)\n\n\ndef _ensure_infos_match(info1, info2, name, *, on_mismatch='raise'):\n \"\"\"Check if infos match.\n\n Parameters\n ----------\n info1, info2 : instance of Info\n The infos to compare.\n name : str\n The name of the object appearing in the error message of the comparison\n fails.\n on_mismatch : 'raise' | 'warn' | 'ignore'\n What to do in case of a mismatch of ``dev_head_t`` between ``info1``\n and ``info2``.\n \"\"\"\n _check_on_missing(on_missing=on_mismatch, name='on_mismatch')\n\n info1._check_consistency()\n info2._check_consistency()\n\n if info1['nchan'] != info2['nchan']:\n raise ValueError(f'{name}.info[\\'nchan\\'] must match')\n if set(info1['bads']) != set(info2['bads']):\n raise ValueError(f'{name}.info[\\'bads\\'] must match')\n if info1['sfreq'] != info2['sfreq']:\n raise ValueError(f'{name}.info[\\'sfreq\\'] must match')\n if set(info1['ch_names']) != set(info2['ch_names']):\n raise ValueError(f'{name}.info[\\'ch_names\\'] must match')\n if len(info2['projs']) != len(info1['projs']):\n raise ValueError(f'SSP projectors in {name} must be the same')\n if any(not _proj_equal(p1, p2) for p1, p2 in\n zip(info2['projs'], info1['projs'])):\n raise ValueError(f'SSP projectors in {name} must be the same')\n if (info1['dev_head_t'] is None) != (info2['dev_head_t'] is None) or \\\n (info1['dev_head_t'] is not None and not\n np.allclose(info1['dev_head_t']['trans'],\n info2['dev_head_t']['trans'], rtol=1e-6)):\n msg = (f\"{name}.info['dev_head_t'] differs. The \"\n f\"instances probably come from different runs, and \"\n f\"are therefore associated with different head \"\n f\"positions. Manually change info['dev_head_t'] to \"\n f\"avoid this message but beware that this means the \"\n f\"MEG sensors will not be properly spatially aligned. \"\n f\"See mne.preprocessing.maxwell_filter to realign the \"\n f\"runs to a common head position.\")\n _on_missing(on_missing=on_mismatch, msg=msg,\n name='on_mismatch')\n" ]
[ [ "numpy.full", "numpy.array", "numpy.dot", "numpy.array_equal", "numpy.zeros_like", "numpy.isnan", "numpy.char.decode", "numpy.eye", "numpy.allclose", "numpy.atleast_1d", "numpy.arange", "numpy.all", "numpy.char.encode", "numpy.hstack", "numpy.iinfo", "numpy.linalg.inv", "numpy.unique" ] ]
macodroid/strawberry_disease_classification
[ "41900bc958e53077099370d0fb25f2a5ed7c99da" ]
[ "model.py" ]
[ "import torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass ConvNet(nn.Module):\n def __init__(self):\n super(ConvNet, self).__init__()\n # convolution layers\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=(3, 3), stride=(2, 2), padding=1)\n self.bn1 = nn.BatchNorm2d(64)\n\n self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(3, 3), stride=(2, 2), padding=1)\n self.bn2 = nn.BatchNorm2d(128)\n\n self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=(3, 3), stride=(2, 2), padding=1)\n self.bn3 = nn.BatchNorm2d(256)\n\n self.conv4 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=(3, 3), stride=(2, 2), padding=1)\n self.bn4 = nn.BatchNorm2d(512)\n\n self.conv5 = nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=(3, 3), stride=(2, 2), padding=1)\n self.bn5 = nn.BatchNorm2d(1024)\n\n # pooling\n self.pool = nn.MaxPool2d(2, 2)\n\n # dropout\n self.dropout = nn.Dropout2d(0.5)\n self.dropoutConvLayer = nn.Dropout2d(0.1)\n\n # Fully connected layers\n self.fc1 = nn.Linear(in_features=1024 * 1 * 1, out_features=4096)\n self.fc2 = nn.Linear(in_features=4096, out_features=1024)\n self.fc3 = nn.Linear(in_features=1024, out_features=7)\n\n def forward(self, x):\n # Here we are connecting them\n\n # first layer\n x = self.conv1(x)\n x = self.dropoutConvLayer(x)\n x = self.bn1(x)\n x = F.relu(x)\n x = self.pool(x)\n\n # second layer\n x = self.conv2(x)\n x = self.dropoutConvLayer(x)\n x = self.bn2(x)\n x = F.relu(x)\n x = self.pool(x)\n\n # third layer\n x = self.conv3(x)\n x = self.dropoutConvLayer(x)\n x = self.bn3(x)\n x = F.relu(x)\n x = self.pool(x)\n\n # forth layer\n x = self.conv4(x)\n x = self.dropoutConvLayer(x)\n x = self.bn4(x)\n x = F.relu(x)\n x = self.pool(x)\n\n # fifth layer\n x = self.conv5(x)\n x = self.dropoutConvLayer(x)\n x = self.bn5(x)\n x = F.relu(x)\n\n x = x.view(-1, 1024 * 1 * 1)\n x = nn.functional.relu(self.fc1(self.dropout(x)))\n x = nn.functional.relu(self.fc2(self.dropout(x)))\n x = self.fc3(x)\n return x\n" ]
[ [ "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.BatchNorm2d", "torch.nn.Conv2d", "torch.nn.functional.relu", "torch.nn.Dropout2d" ] ]
renie26/CR_motion
[ "5e73f4e788b5c6345af8703918f3fc9dfa90faae" ]
[ "scripts/pre-prosessing/json2txt.py" ]
[ "# python ../json2txt.py face00103135 \"1\"\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport sys\nimport pickle\n\nsearchterm = sys.argv[1] + '_keypoints.csv'\nmodel = int(sys.argv[2])\n\n# 1: upper body, 2: lower body, 3: left side, 4: right side, 5: legs and arms\nmodel_col_set = [['#head_JA', '#r_shoulder_JA', '#l_shoulder_JA', '#r_upleg_JA','#l_upleg_JA','#r_elbow_JA', '#l_elbow_JA', '#upbody_JA'],\n ['#waist_JA','#r_body_JA', '#l_body_JA', '#r_knee_JA', '#l_knee_JA'],\n ['#head_JA', '#l_shoulder_JA','#l_upleg_JA','#l_elbow_JA','#l_body_JA','#l_knee_JA'],\n [\"#head_JA\", '#r_shoulder_JA', '#r_upleg_JA','#r_elbow_JA', '#upbody_JA','#r_body_JA','#r_knee_JA'],\n ['#r_upleg_JA','#l_upleg_JA','#r_elbow_JA', '#l_elbow_JA','#r_knee_JA','#l_knee_JA']]\nmodel_col = []\nmodel_col = model_col_set[model -1]\n\nroot = os.getcwd()\n\ndirs = os.listdir(root)\n\n#data\njAngleSet = {}\ndataset_angles = dict()\n\ntempJointAngle = pd.read_csv(searchterm)\ntempJointAngle.columns = ['#head_JA', '#r_shoulder_JA', '#l_shoulder_JA', '#r_upleg_JA','#l_upleg_JA','#r_elbow_JA', '#l_elbow_JA', '#upbody_JA', '#waist_JA','#r_body_JA', '#l_body_JA', '#r_knee_JA', '#l_knee_JA']\nX = tempJointAngle[model_col] \ndataset_angles.update({searchterm:X})\n\ni = 1\nfor file in dirs:\n if file !=searchterm:\n if file.endswith((\".csv\")):\n tempJointAngle = pd.read_csv(file)\n tempJointAngle.columns = ['#head_JA', '#r_shoulder_JA', '#l_shoulder_JA', '#r_upleg_JA','#l_upleg_JA','#r_elbow_JA', '#l_elbow_JA', '#upbody_JA', '#waist_JA','#r_body_JA', '#l_body_JA', '#r_knee_JA', '#l_knee_JA']\n X = tempJointAngle[model_col] \n dataset_angles.update({file:X})\n i = i+1\n\nwith open('../outputFile_angles.txt','wb') as outputFile :\n pickle.dump(dataset_angles,outputFile)" ]
[ [ "pandas.read_csv" ] ]
eteters/Sampling-NCRF-BIP-Final
[ "59bf3d0bb1e571cfaa8dc70916bd54faff9d4e04" ]
[ "wsi/utils/make_tiff.py" ]
[ "import openslide\nfrom PIL import Image\n\nimport numpy as np\nimport json\nimport libtiff\nimport sys\nsys.setrecursionlimit(200000)\n\n\n\n\n\n\n\n\n# Work in progress\nfilename = \"Tumor_001.json\"\nreal_image_name = \"tumor_001.tif\"\nnew_image_name = \"tumor_mask_001.tiff\"\nlevel = 5\n\n\nwith open(filename, 'r') as f:\n tumor_dict = json.load(f)\n print(\"loaded json\")\n slide = openslide.OpenSlide(real_image_name)\n print(\"opened slide\")\n\n mask = np.zeros(slide.level_dimensions[level])\n print(slide.level_dimensions[level])\n print(\"closing slide\")\n scaley = slide.level_dimensions[0][0]/slide.level_dimensions[level][0]\n scalex = slide.level_dimensions[0][1]/slide.level_dimensions[level][1]\n slide.close()\n\n tumors = tumor_dict[\"positive\"]\n for tumor in tumors:\n vertices = tumor[\"vertices\"]\n x_u = 0\n y_u = 0\n i=0\n for vertex in vertices:\n x, y = vertex[0], vertex[1]\n x_u +=int(x/(scalex))\n y_u +=int(y/(scaley))\n print(int(x/(scalex)),int(y/(scaley)))\n mask[int(x/(scalex)),int(y/(scaley))] = 1\n mask[int(x/(scalex))+1,int(y/(scaley))+1] = 1\n mask[int(x/(scalex))+1,int(y/(scaley))-1] = 1\n mask[int(x/(scalex))-1,int(y/(scaley))+1] = 1\n mask[int(x/(scalex))-1,int(y/(scaley))-1] = 1\n mask[int(x/(scalex)) ,int(y/(scaley))+1] = 1\n mask[int(x/(scalex)) ,int(y/(scaley))-1] = 1\n mask[int(x/(scalex))+1,int(y/(scaley)) ] = 1\n mask[int(x/(scalex))-1,int(y/(scaley)) ] = 1\n mask[int(x/(scalex)),int(y/(scaley))] = 1\n mask[int(x/(scalex))+2,int(y/(scaley))+2] = 1\n mask[int(x/(scalex))+2,int(y/(scaley))-2] = 1\n mask[int(x/(scalex))-2,int(y/(scaley))+2] = 1\n mask[int(x/(scalex))-2,int(y/(scaley))-2] = 1\n mask[int(x/(scalex)) ,int(y/(scaley))+2] = 1\n mask[int(x/(scalex)) ,int(y/(scaley))-2] = 1\n mask[int(x/(scalex))+2,int(y/(scaley)) ] = 1\n mask[int(x/(scalex))-2,int(y/(scaley)) ] = 1\n\n i+=1\n def fill(x,y,n):\n print(x, y)\n mask[x,y] = 1\n if(n > 30000):\n return\n if mask[x,y+1]==0:\n fill(x,y+1,n+1)\n if mask[x,y-1]==0:\n fill(x,y-1,n+1)\n if mask[x+1,y]==0:\n fill(x+1,y,n+1)\n if mask[x-1,y]==0:\n fill(x-1,y,n+1)\n\n\n return\n fill(int(x_u/i), int(y_u/i),1)\n\n print(\"creating image from mask\")\n image = Image.fromarray(mask)\n print(\"saving image\")\n image.save(new_image_name, \"TIFF\")\n #imsave(new_image_name, mask)\n \n" ]
[ [ "numpy.zeros" ] ]
modsim/junn
[ "a40423b98c6a3739dd0b2ba02d546a5db91f9215" ]
[ "junn/networks/mixins/weighted_loss.py" ]
[ "\"\"\"Weightes loss mixin.\"\"\"\nimport tensorflow as tf\n\nfrom ...common.functions import convolve, get_gaussian_kernel\nfrom .tile_based_network import TilebasedNetwork\n\n\n@tf.function\ndef calculate_weightmap(\n image, sigma=3.5, overlap_ratio=2.5, inner_ratio=0.75, empty=0.25\n):\n \"\"\"\n Calculate a weight map by blurring the border regions.\n\n :param image:\n :param sigma:\n :param overlap_ratio:\n :param inner_ratio:\n :param empty:\n :return:\n \"\"\"\n blurred = convolve(image, get_gaussian_kernel(sigma))\n weightmap = (\n (1 - image) * overlap_ratio * blurred\n + inner_ratio * image\n + (empty * ((image - 1) / -1))\n )\n return weightmap\n\n\n@tf.function\ndef split_weight_off(raw_y_true, y_pred):\n \"\"\"\n Detach weights off a tensor again.\n\n :param raw_y_true:\n :param y_pred:\n :return:\n \"\"\"\n size = tf.shape(raw_y_true)[1]\n\n y_true = raw_y_true[:, : size // 2, :, :]\n y_weight = raw_y_true[:, size // 2 :, :, :]\n\n return y_true, y_pred, y_weight\n\n\nclass WeightedLoss(TilebasedNetwork, TilebasedNetwork.Virtual):\n \"\"\"WeightedLoss mixin for NeuralNetwork s.\"\"\"\n\n def get_training_fn(self, validation: bool = False): # noqa: D102\n parent_fn = super().get_training_fn(validation=validation)\n\n weighted_loss = (\n self.parameters['weighted_loss']\n if 'weighted_loss' in self.parameters\n else False\n )\n\n if weighted_loss:\n\n @tf.function\n def _inner(image, labels):\n image, labels = parent_fn(image, labels)\n weights = calculate_weightmap(labels)\n\n labels_and_weights = tf.concat([labels, weights], axis=0)\n\n return image, labels_and_weights\n\n return _inner\n else:\n return parent_fn\n\n def get_loss(self): # noqa: D102\n weighted_loss = (\n self.parameters['weighted_loss']\n if 'weighted_loss' in self.parameters\n else False\n )\n\n if weighted_loss:\n from ...common.losses import dice_index_weighted\n\n @tf.function\n def _inner(raw_y_true, y_pred):\n y_true, y_pred, y_weight = split_weight_off(raw_y_true, y_pred)\n return -dice_index_weighted(y_true, y_pred, y_weight)\n\n return _inner\n else:\n return super().get_loss()\n\n def get_metrics(self): # noqa: D102\n weighted_loss = (\n self.parameters['weighted_loss']\n if 'weighted_loss' in self.parameters\n else False\n )\n\n metrics = super().get_metrics()\n\n def _process(fun):\n @tf.function\n def _inner(raw_y_true, y_pred):\n y_true, y_pred, _ = split_weight_off(raw_y_true, y_pred)\n return fun(y_true, y_pred)\n\n _inner.__name__ = fun.__name__\n\n return _inner\n\n if weighted_loss:\n return [_process(metric) for metric in metrics]\n else:\n return metrics\n" ]
[ [ "tensorflow.shape", "tensorflow.concat" ] ]
UBC-Solar/Simulation
[ "296ce78bb24b3bd7849d07a62bc544f98f212db1" ]
[ "examples/max_distance_from_speed_using_arrays.py" ]
[ "import numpy as np\n\nimport simulation\nfrom simulation.common import helpers\nfrom simulation.optimization.bayesian import BayesianOptimization\nfrom simulation.optimization.random import RandomOptimization\nfrom simulation.utils.InputBounds import InputBounds\n\n\"\"\"\nDescription: Given an hourly driving speed, find the range at the speed\nbefore the battery runs out [speed -> distance].\n\"\"\"\n\n\n@helpers.timeit\ndef main():\n # indicates a constant speed of 35km/h throughout the simulation\n\n input_speed = np.array([50] * 24)\n\n \"\"\"\n Note: it no longer matters how many elements the input_speed array has, the simulation automatically\n reshapes the array depending on the simulation_length. \n\n Examples:\n If you want a constant speed for the entire simulation, insert a single element\n into the input_speed array. \n \n >>> input_speed = np.array([30]) <-- constant speed of 30km/h\n \n If you want 50km/h in the first half of the simulation and 60km/h in the second half,\n do the following:\n\n >>> input_speed = np.array([50, 60])\n \n This logic will apply for all subsequent array lengths (3, 4, 5, etc.)\n \n Keep in mind, however, that the condition len(input_speed) <= simulation_length must be true\n \"\"\"\n\n simulation_model = simulation.Simulation(race_type=\"FSGP\")\n distance_travelled = simulation_model.run_model(speed=input_speed, plot_results=True, verbose=False)\n\n bounds = InputBounds()\n bounds.add_bounds(8, 20, 60)\n optimization = BayesianOptimization(bounds, simulation_model.run_model)\n random_optimization = RandomOptimization(bounds, simulation_model.run_model)\n\n results = optimization.maximize(init_points=3, n_iter=1, kappa=10)\n optimized = simulation_model.run_model(speed=np.fromiter(results, dtype=float), plot_results=True, verbose=False)\n results_random = random_optimization.maximize(iterations=15)\n optimized_random = simulation_model.run_model(speed=np.fromiter(results_random, dtype=float), plot_results=True,\n verbose=False)\n\n print(f'Distance travelled: {distance_travelled}')\n print(f'Optimized results. Max traversable distance: {optimized}')\n print(f'Random results. Max traversable distance: {optimized_random}')\n print(f'Optimized Speeds array: {results}')\n print(f'Random Speeds array: {results_random}')\n\n return distance_travelled\n\n\nif __name__ == \"__main__\":\n # import cProfile\n # import pstats\n #\n # with cProfile.Profile() as pr:\n main()\n\n # stats = pstats.Stats(pr)\n # stats.sort_stats(pstats.SortKey.TIME)\n # stats.print_stats()\n" ]
[ [ "numpy.array", "numpy.fromiter" ] ]
alanhdu/pytorch-lightning
[ "b7a22ba046ba57072a71b12d16caff000e66f798", "b7a22ba046ba57072a71b12d16caff000e66f798" ]
[ "tests/checkpointing/test_checkpoint_callback_frequency.py", "tests/accelerators/test_accelerator_connector.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nfrom unittest import mock\n\nimport pytest\nimport torch\n\nfrom pytorch_lightning import callbacks, seed_everything, Trainer\nfrom tests.helpers import BoringModel\nfrom tests.helpers.runif import RunIf\n\n\n@mock.patch.dict(os.environ, {\"PL_DEV_DEBUG\": \"1\"})\ndef test_mc_called(tmpdir):\n seed_everything(1234)\n\n # -----------------\n # TRAIN LOOP ONLY\n # -----------------\n train_step_only_model = BoringModel()\n train_step_only_model.validation_step = None\n\n # no callback\n trainer = Trainer(max_epochs=3, checkpoint_callback=False)\n trainer.fit(train_step_only_model)\n assert len(trainer.dev_debugger.checkpoint_callback_history) == 0\n\n # -----------------\n # TRAIN + VAL LOOP ONLY\n # -----------------\n val_train_model = BoringModel()\n # no callback\n trainer = Trainer(max_epochs=3, checkpoint_callback=False)\n trainer.fit(val_train_model)\n assert len(trainer.dev_debugger.checkpoint_callback_history) == 0\n\n\n@mock.patch('torch.save')\n@pytest.mark.parametrize(\n ['epochs', 'val_check_interval', 'expected'],\n [(1, 1.0, 1), (2, 1.0, 2), (1, 0.25, 4), (2, 0.3, 7)],\n)\ndef test_default_checkpoint_freq(save_mock, tmpdir, epochs: int, val_check_interval: float, expected: int):\n\n model = BoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=epochs,\n weights_summary=None,\n val_check_interval=val_check_interval,\n progress_bar_refresh_rate=0,\n )\n trainer.fit(model)\n\n # make sure types are correct\n assert save_mock.call_count == expected\n\n\n@mock.patch('torch.save')\n@pytest.mark.parametrize(['k', 'epochs', 'val_check_interval', 'expected'], [\n (1, 1, 1.0, 1),\n (2, 2, 1.0, 2),\n (2, 1, 0.25, 4),\n (2, 2, 0.3, 7),\n])\ndef test_top_k(save_mock, tmpdir, k: int, epochs: int, val_check_interval: float, expected: int):\n\n class TestModel(BoringModel):\n\n def __init__(self):\n super().__init__()\n self.last_coeff = 10.0\n\n def training_step(self, batch, batch_idx):\n loss = self.step(torch.ones(32))\n loss = loss / (loss + 0.0000001)\n loss += self.last_coeff\n self.log('my_loss', loss)\n self.last_coeff *= 0.999\n return loss\n\n model = TestModel()\n trainer = Trainer(\n callbacks=[callbacks.ModelCheckpoint(dirpath=tmpdir, monitor='my_loss', save_top_k=k)],\n default_root_dir=tmpdir,\n max_epochs=epochs,\n weights_summary=None,\n val_check_interval=val_check_interval\n )\n trainer.fit(model)\n\n # make sure types are correct\n assert save_mock.call_count == expected\n\n\n@mock.patch('torch.save')\n@RunIf(special=True, min_gpus=2)\n@pytest.mark.parametrize(['k', 'epochs', 'val_check_interval', 'expected'], [(1, 1, 1.0, 1), (2, 2, 0.3, 5)])\ndef test_top_k_ddp(save_mock, tmpdir, k, epochs, val_check_interval, expected):\n\n class TestModel(BoringModel):\n\n def training_step(self, batch, batch_idx):\n local_rank = int(os.getenv(\"LOCAL_RANK\"))\n self.log('my_loss', batch_idx * (1 + local_rank), on_epoch=True)\n return super().training_step(batch, batch_idx)\n\n def training_epoch_end(self, outputs) -> None:\n data = str(self.global_rank)\n obj = [[data], (data, ), set(data)]\n out = self.trainer.training_type_plugin.broadcast(obj)\n assert obj == [[str(self.global_rank)], (str(self.global_rank), ), set(str(self.global_rank))]\n assert out == [['0'], ('0', ), set('0')]\n\n model = TestModel()\n trainer = Trainer(\n callbacks=[callbacks.ModelCheckpoint(dirpath=tmpdir, monitor='my_loss_step', save_top_k=k, mode=\"max\")],\n default_root_dir=tmpdir,\n max_epochs=epochs,\n weights_summary=None,\n val_check_interval=val_check_interval,\n accelerator=\"ddp\",\n gpus=2,\n limit_train_batches=64,\n limit_val_batches=32,\n )\n if os.getenv(\"LOCAL_RANK\") == \"0\":\n with pytest.raises(UserWarning, match=\"The value associated to the key my_loss_epoch: [15.5, 31.0]\"):\n trainer.fit(model)\n assert save_mock.call_count == expected\n else:\n trainer.fit(model)\n", "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\n\nimport os\nfrom typing import Optional\nfrom unittest import mock\n\nimport pytest\nimport torch\n\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.accelerators.accelerator import Accelerator\nfrom pytorch_lightning.accelerators.cpu import CPUAccelerator\nfrom pytorch_lightning.accelerators.gpu import GPUAccelerator\nfrom pytorch_lightning.callbacks import Callback\nfrom pytorch_lightning.plugins import (\n DDP2Plugin,\n DDPPlugin,\n DDPShardedPlugin,\n DDPSpawnPlugin,\n DDPSpawnShardedPlugin,\n DeepSpeedPlugin,\n ParallelPlugin,\n PrecisionPlugin,\n SingleDevicePlugin,\n)\nfrom pytorch_lightning.plugins.environments import LightningEnvironment, SLURMEnvironment, TorchElasticEnvironment\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom tests.helpers.boring_model import BoringModel\nfrom tests.helpers.runif import RunIf\n\n\ndef test_accelerator_choice_cpu(tmpdir):\n trainer = Trainer(\n default_root_dir=tmpdir,\n fast_dev_run=True,\n )\n assert isinstance(trainer.accelerator, CPUAccelerator)\n assert isinstance(trainer.training_type_plugin, SingleDevicePlugin)\n\n\ndef test_accelerator_choice_ddp_cpu(tmpdir):\n trainer = Trainer(\n fast_dev_run=True,\n accelerator='ddp_cpu',\n )\n assert isinstance(trainer.accelerator, CPUAccelerator)\n assert isinstance(trainer.training_type_plugin, DDPSpawnPlugin)\n assert isinstance(trainer.training_type_plugin.cluster_environment, LightningEnvironment)\n\n\n@mock.patch.dict(os.environ, {\"CUDA_VISIBLE_DEVICES\": \"0,1\"})\n@mock.patch('torch.cuda.device_count', return_value=2)\n@mock.patch('torch.cuda.is_available', return_value=True)\ndef test_accelerator_choice_ddp(cuda_available_mock, device_count_mock):\n trainer = Trainer(\n fast_dev_run=True,\n accelerator='ddp',\n gpus=1,\n )\n assert isinstance(trainer.accelerator, GPUAccelerator)\n assert isinstance(trainer.training_type_plugin, DDPPlugin)\n assert isinstance(trainer.training_type_plugin.cluster_environment, LightningEnvironment)\n\n\n@mock.patch.dict(os.environ, {\"CUDA_VISIBLE_DEVICES\": \"0,1\"})\n@mock.patch('torch.cuda.device_count', return_value=2)\n@mock.patch('torch.cuda.is_available', return_value=True)\ndef test_accelerator_choice_ddp_spawn(cuda_available_mock, device_count_mock):\n trainer = Trainer(\n fast_dev_run=True,\n accelerator='ddp_spawn',\n gpus=1,\n )\n assert isinstance(trainer.accelerator, GPUAccelerator)\n assert isinstance(trainer.training_type_plugin, DDPSpawnPlugin)\n assert isinstance(trainer.training_type_plugin.cluster_environment, LightningEnvironment)\n\n\n@RunIf(min_gpus=2)\n@mock.patch.dict(\n os.environ, {\n \"CUDA_VISIBLE_DEVICES\": \"0,1\",\n \"SLURM_NTASKS\": \"2\",\n \"SLURM_JOB_NAME\": \"SOME_NAME\",\n \"SLURM_NODEID\": \"0\",\n \"SLURM_LOCALID\": \"10\"\n }\n)\n@mock.patch('pytorch_lightning.plugins.DDPPlugin.setup_distributed', autospec=True)\ndef test_accelerator_choice_ddp_slurm(setup_distributed_mock):\n\n class CB(Callback):\n\n def on_fit_start(self, trainer, pl_module):\n assert trainer.use_ddp\n assert trainer.accelerator_connector.is_slurm_managing_tasks\n assert isinstance(trainer.accelerator, GPUAccelerator)\n assert isinstance(trainer.training_type_plugin, DDPPlugin)\n assert isinstance(trainer.training_type_plugin.cluster_environment, SLURMEnvironment)\n assert trainer.training_type_plugin.cluster_environment.local_rank() == 10\n assert trainer.training_type_plugin.task_idx == 10\n raise SystemExit()\n\n model = BoringModel()\n trainer = Trainer(\n fast_dev_run=True,\n accelerator='ddp',\n gpus=2,\n callbacks=[CB()],\n )\n\n with pytest.raises(SystemExit):\n trainer.fit(model)\n\n\n@RunIf(min_gpus=1)\n@mock.patch.dict(\n os.environ, {\n \"CUDA_VISIBLE_DEVICES\": \"0,1\",\n \"SLURM_NTASKS\": \"2\",\n \"SLURM_JOB_NAME\": \"SOME_NAME\",\n \"SLURM_NODEID\": \"0\",\n \"LOCAL_RANK\": \"0\",\n \"SLURM_LOCALID\": \"10\"\n }\n)\n@mock.patch('torch.cuda.device_count', return_value=2)\n@mock.patch('pytorch_lightning.plugins.DDPPlugin.setup_distributed', autospec=True)\ndef test_accelerator_choice_ddp2_slurm(device_count_mock, setup_distributed_mock):\n\n class CB(Callback):\n\n def on_fit_start(self, trainer, pl_module):\n assert trainer.use_ddp2\n assert trainer.accelerator_connector.is_slurm_managing_tasks\n assert isinstance(trainer.accelerator, GPUAccelerator)\n assert isinstance(trainer.training_type_plugin, DDP2Plugin)\n assert isinstance(trainer.training_type_plugin.cluster_environment, SLURMEnvironment)\n assert trainer.training_type_plugin.cluster_environment.local_rank() == 10\n assert trainer.training_type_plugin.task_idx == 10\n raise SystemExit()\n\n model = BoringModel()\n trainer = Trainer(\n fast_dev_run=True,\n accelerator='ddp2',\n gpus=2,\n callbacks=[CB()],\n )\n\n with pytest.raises(SystemExit):\n trainer.fit(model)\n\n\n@RunIf(min_gpus=1)\n@mock.patch.dict(os.environ, {\"CUDA_VISIBLE_DEVICES\": \"0,1\", \"WORLD_SIZE\": \"2\", \"LOCAL_RANK\": \"10\", \"NODE_RANK\": \"0\"})\n@mock.patch('torch.cuda.device_count', return_value=2)\n@mock.patch('pytorch_lightning.plugins.DDPPlugin.setup_distributed', autospec=True)\ndef test_accelerator_choice_ddp_te(device_count_mock, setup_distributed_mock):\n\n class CB(Callback):\n\n def on_fit_start(self, trainer, pl_module):\n assert trainer.use_ddp\n assert isinstance(trainer.accelerator, GPUAccelerator)\n assert isinstance(trainer.training_type_plugin, DDPPlugin)\n assert isinstance(trainer.training_type_plugin.cluster_environment, TorchElasticEnvironment)\n assert trainer.training_type_plugin.cluster_environment.local_rank() == 10\n assert trainer.training_type_plugin.task_idx == 10\n raise SystemExit()\n\n model = BoringModel()\n trainer = Trainer(\n fast_dev_run=True,\n accelerator='ddp',\n gpus=2,\n callbacks=[CB()],\n )\n\n with pytest.raises(SystemExit):\n trainer.fit(model)\n\n\n@RunIf(min_gpus=1)\n@mock.patch.dict(os.environ, {\"CUDA_VISIBLE_DEVICES\": \"0,1\", \"WORLD_SIZE\": \"2\", \"LOCAL_RANK\": \"10\", \"NODE_RANK\": \"0\"})\n@mock.patch('torch.cuda.device_count', return_value=2)\n@mock.patch('pytorch_lightning.plugins.DDPPlugin.setup_distributed', autospec=True)\ndef test_accelerator_choice_ddp2_te(device_count_mock, setup_distributed_mock):\n\n class CB(Callback):\n\n def on_fit_start(self, trainer, pl_module):\n assert trainer.use_ddp2\n assert isinstance(trainer.accelerator, GPUAccelerator)\n assert isinstance(trainer.training_type_plugin, DDP2Plugin)\n assert isinstance(trainer.training_type_plugin.cluster_environment, TorchElasticEnvironment)\n assert trainer.training_type_plugin.cluster_environment.local_rank() == 10\n assert trainer.training_type_plugin.task_idx == 10\n raise SystemExit()\n\n model = BoringModel()\n trainer = Trainer(\n fast_dev_run=True,\n accelerator='ddp2',\n gpus=2,\n callbacks=[CB()],\n )\n\n with pytest.raises(SystemExit):\n trainer.fit(model)\n\n\n@mock.patch.dict(os.environ, {\n \"WORLD_SIZE\": \"1\",\n \"LOCAL_RANK\": \"10\",\n \"NODE_RANK\": \"0\",\n})\n@mock.patch('torch.cuda.device_count', return_value=0)\n@mock.patch('pytorch_lightning.plugins.DDPPlugin.setup_distributed', autospec=True)\ndef test_accelerator_choice_ddp_cpu_te(device_count_mock, setup_distributed_mock):\n\n class CB(Callback):\n\n def on_fit_start(self, trainer, pl_module):\n assert trainer.use_ddp\n assert isinstance(trainer.accelerator, CPUAccelerator)\n assert isinstance(trainer.training_type_plugin, DDPPlugin)\n assert isinstance(trainer.training_type_plugin.cluster_environment, TorchElasticEnvironment)\n assert trainer.training_type_plugin.cluster_environment.local_rank() == 10\n assert trainer.training_type_plugin.task_idx == 10\n raise SystemExit()\n\n model = BoringModel()\n trainer = Trainer(\n fast_dev_run=True,\n accelerator='ddp_cpu',\n num_processes=2,\n callbacks=[CB()],\n )\n\n with pytest.raises(SystemExit):\n trainer.fit(model)\n\n\n@mock.patch.dict(\n os.environ, {\n \"SLURM_NTASKS\": \"2\",\n \"SLURM_JOB_NAME\": \"SOME_NAME\",\n \"SLURM_NODEID\": \"0\",\n \"LOCAL_RANK\": \"0\",\n \"SLURM_LOCALID\": \"0\"\n }\n)\n@mock.patch('torch.cuda.device_count', return_value=0)\n@mock.patch('pytorch_lightning.plugins.DDPPlugin.setup_distributed', autospec=True)\ndef test_accelerator_choice_ddp_cpu_slurm(device_count_mock, setup_distributed_mock):\n\n class CB(Callback):\n\n def on_fit_start(self, trainer, pl_module):\n assert trainer.use_ddp\n assert trainer.accelerator_connector.is_slurm_managing_tasks\n assert isinstance(trainer.accelerator, CPUAccelerator)\n assert isinstance(trainer.training_type_plugin, DDPPlugin)\n assert isinstance(trainer.training_type_plugin.cluster_environment, SLURMEnvironment)\n assert trainer.training_type_plugin.task_idx == 0\n raise SystemExit()\n\n model = BoringModel()\n trainer = Trainer(\n fast_dev_run=True,\n accelerator='ddp_cpu',\n num_processes=2,\n callbacks=[CB()],\n )\n\n with pytest.raises(SystemExit):\n trainer.fit(model)\n\n\n@mock.patch.dict(\n os.environ, {\n \"SLURM_NTASKS\": \"2\",\n \"SLURM_JOB_NAME\": \"SOME_NAME\",\n \"SLURM_NODEID\": \"0\",\n \"LOCAL_RANK\": \"0\",\n \"SLURM_LOCALID\": \"0\"\n }\n)\n@mock.patch('torch.cuda.device_count', return_value=0)\n@mock.patch('pytorch_lightning.plugins.DDPPlugin.setup_distributed', autospec=True)\ndef test_accelerator_choice_ddp_cpu_custom_cluster(device_count_mock, setup_distributed_mock):\n \"\"\"\n Test that we choose the custom cluster even when SLURM or TE flags are around\n \"\"\"\n\n class CustomCluster(LightningEnvironment):\n\n def master_address(self):\n return 'asdf'\n\n def creates_children(self) -> bool:\n return True\n\n class CB(Callback):\n\n def on_fit_start(self, trainer, pl_module):\n assert trainer.use_ddp\n assert isinstance(trainer.accelerator, CPUAccelerator)\n assert isinstance(trainer.training_type_plugin, DDPPlugin)\n assert isinstance(trainer.training_type_plugin.cluster_environment, CustomCluster)\n raise SystemExit()\n\n model = BoringModel()\n trainer = Trainer(\n plugins=[CustomCluster()],\n fast_dev_run=True,\n accelerator='ddp_cpu',\n num_processes=2,\n callbacks=[CB()],\n )\n\n with pytest.raises(SystemExit):\n trainer.fit(model)\n\n\n@mock.patch.dict(\n os.environ, {\n \"SLURM_NTASKS\": \"2\",\n \"SLURM_JOB_NAME\": \"SOME_NAME\",\n \"SLURM_NODEID\": \"0\",\n \"LOCAL_RANK\": \"0\",\n \"SLURM_LOCALID\": \"0\"\n }\n)\n@mock.patch('torch.cuda.device_count', return_value=0)\n@mock.patch('pytorch_lightning.plugins.DDPPlugin.setup_distributed', autospec=True)\ndef test_custom_accelerator(device_count_mock, setup_distributed_mock):\n\n class Accel(Accelerator):\n pass\n\n class Prec(PrecisionPlugin):\n pass\n\n class TrainTypePlugin(SingleDevicePlugin):\n pass\n\n accelerator = Accel(\n training_type_plugin=TrainTypePlugin(device=torch.device(\"cpu\")),\n precision_plugin=Prec(),\n )\n trainer = Trainer(\n accelerator=accelerator,\n fast_dev_run=True,\n num_processes=2,\n )\n assert isinstance(trainer.accelerator, Accel)\n assert isinstance(trainer.training_type_plugin, TrainTypePlugin)\n assert isinstance(trainer.precision_plugin, Prec)\n\n\n@mock.patch.dict(\n os.environ, {\n \"SLURM_NTASKS\": \"2\",\n \"SLURM_JOB_NAME\": \"SOME_NAME\",\n \"SLURM_NODEID\": \"0\",\n \"LOCAL_RANK\": \"0\",\n \"SLURM_LOCALID\": \"0\"\n }\n)\n@mock.patch('torch.cuda.device_count', return_value=0)\n@mock.patch('pytorch_lightning.plugins.DDPPlugin.setup_distributed', autospec=True)\ndef test_dist_backend_accelerator_mapping(device_count_mock, setup_distributed_mock):\n\n class CB(Callback):\n\n def on_fit_start(self, trainer, pl_module):\n assert isinstance(trainer.accelerator, CPUAccelerator)\n assert isinstance(trainer.training_type_plugin, DDPPlugin)\n assert trainer.training_type_plugin.task_idx == 0\n raise SystemExit()\n\n model = BoringModel()\n trainer = Trainer(\n fast_dev_run=True,\n accelerator='ddp_cpu',\n num_processes=2,\n callbacks=[CB()],\n )\n\n with pytest.raises(SystemExit):\n trainer.fit(model)\n\n\n@mock.patch(\"pytorch_lightning.utilities._IS_INTERACTIVE\", return_value=True)\n@mock.patch('torch.cuda.device_count', return_value=2)\ndef test_ipython_incompatible_backend_error(*_):\n with pytest.raises(MisconfigurationException, match=\"backend ddp is not compatible\"):\n Trainer(accelerator=\"ddp\", gpus=2)\n\n with pytest.raises(MisconfigurationException, match=\"backend ddp is not compatible\"):\n Trainer(accelerator=\"ddp_cpu\", num_processes=2)\n\n with pytest.raises(MisconfigurationException, match=\"backend ddp2 is not compatible\"):\n Trainer(accelerator=\"ddp2\", gpus=2)\n\n\n@pytest.mark.parametrize(\n [\"accelerator\", \"plugin\"],\n [('ddp_spawn', 'ddp_sharded'), (None, 'ddp_sharded')],\n)\ndef test_plugin_accelerator_choice(accelerator: Optional[str], plugin: str):\n \"\"\"Ensure that when a plugin and accelerator is passed in, that the plugin takes precedent.\"\"\"\n trainer = Trainer(accelerator=accelerator, plugins=plugin, num_processes=2)\n assert isinstance(trainer.accelerator.training_type_plugin, DDPShardedPlugin)\n\n trainer = Trainer(plugins=plugin, num_processes=2)\n assert isinstance(trainer.accelerator.training_type_plugin, DDPShardedPlugin)\n\n\n@pytest.mark.parametrize([\"accelerator\", \"plugin\"], [\n ('ddp', DDPPlugin),\n ('ddp_spawn', DDPSpawnPlugin),\n ('ddp_sharded', DDPShardedPlugin),\n ('ddp_sharded_spawn', DDPSpawnShardedPlugin),\n pytest.param('deepspeed', DeepSpeedPlugin, marks=RunIf(deepspeed=True)),\n])\n@mock.patch('torch.cuda.is_available', return_value=True)\n@mock.patch('torch.cuda.device_count', return_value=2)\ndef test_accelerator_choice_multi_node_gpu(\n mock_is_available, mock_device_count, tmpdir, accelerator: str, plugin: ParallelPlugin\n):\n trainer = Trainer(\n accelerator=accelerator,\n default_root_dir=tmpdir,\n num_nodes=2,\n gpus=2,\n )\n assert isinstance(trainer.training_type_plugin, plugin)\n" ]
[ [ "torch.ones" ], [ "torch.device" ] ]
taoxianpeng/pytorch-AutoEncoders
[ "cb0edd050c763f0fc29f8275c266b151f5d5dca9" ]
[ "src/StackedAutoEncoder/models.py" ]
[ "import torch\nfrom torch import nn, optim, functional, utils\nimport torchvision\nfrom torchvision import datasets, utils\n\nimport time, os\n\n\nclass AutoEncoderLayer(nn.Module):\n \"\"\"\n fully-connected linear layers for stacked autoencoders.\n This module can automatically be trained when training each layer is enabled\n Yes, this is much like the simplest auto-encoder\n \"\"\"\n\n def __init__(self, input_dim=None, output_dim=None, SelfTraining=False):\n super(AutoEncoderLayer, self).__init__()\n # if input_dim is None or output_dim is None:\n # raise ValueError\n self.in_features = input_dim\n self.out_features = output_dim\n self.is_training_self = SelfTraining # 指示是否进行逐层预训练,还是训练整个网络\n self.encoder = nn.Sequential(\n nn.Linear(self.in_features, self.out_features, bias=True),\n nn.Sigmoid() # 统一使用Sigmoid激活\n )\n self.decoder = nn.Sequential( # 此处decoder不使用encoder的转置, 并使用Sigmoid进行激活.\n nn.Linear(self.out_features, self.in_features, bias=True),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n out = self.encoder(x)\n if self.is_training_self:\n return self.decoder(out)\n else:\n return out\n\n def lock_grad(self):\n for param in self.parameters():\n param.requires_grad = False\n\n def acquire_grad(self):\n for param in self.parameters():\n param.requires_grad = True\n\n @property\n def input_dim(self):\n return self.in_features\n\n @property\n def output_dim(self):\n return self.out_features\n\n @property\n def is_training_layer(self):\n return self.is_training_self\n\n @is_training_layer.setter\n def is_training_layer(self, other: bool):\n self.is_training_self = other\n\n\nclass StackedAutoEncoder(nn.Module):\n \"\"\"\n Construct the whole network with layers_list\n > 栈式自编码器的架构一般是关于中间隐层对称的\n \"\"\"\n\n def __init__(self, layers_list=None):\n super(StackedAutoEncoder, self).__init__()\n self.layers_list = layers_list\n self.initialize()\n self.encoder_1 = self.layers_list[0]\n self.encoder_2 = self.layers_list[1]\n self.encoder_3 = self.layers_list[2]\n self.encoder_4 = self.layers_list[3]\n\n def initialize(self):\n for layer in self.layers_list:\n # assert isinstance(layer, AutoEncoderLayer)\n layer.is_training_layer = False\n # for param in layer.parameters():\n # param.requires_grad = True\n\n def forward(self, x):\n out = x\n # for layer in self.layers_list:\n # out = layer(out)\n out = self.encoder_1(out)\n out = self.encoder_2(out)\n out = self.encoder_3(out)\n out = self.encoder_4(out)\n return out\n" ]
[ [ "torch.nn.Linear", "torch.nn.Sigmoid" ] ]
uvidyadharan/fmltc
[ "a35e3d3ff6b34e4a2b1084822e0546996d87d9bf" ]
[ "server/tflite_creator.py" ]
[ "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n__author__ = \"lizlooney@google.com (Liz Looney)\"\n\n# Python Standard Library\nimport logging\nimport os\nimport shutil\nimport uuid\n\n# Other Modules\nfrom google.protobuf import text_format\nfrom object_detection import export_tflite_graph_lib_tf2\nfrom object_detection.protos import pipeline_pb2\nimport tensorflow as tf\nfrom tflite_support.metadata_writers import object_detector\nfrom tflite_support.metadata_writers import writer_utils\n\n# My Modules\nimport action\nimport blob_storage\nimport exceptions\nimport storage\n\n\ndef trigger_create_tflite(team_uuid, model_uuid):\n action_parameters = action.create_action_parameters(\n team_uuid, action.ACTION_NAME_CREATE_TFLITE)\n action_parameters['team_uuid'] = team_uuid\n action_parameters['model_uuid'] = model_uuid\n action.trigger_action_via_blob(action_parameters)\n\ndef create_tflite(action_parameters):\n team_uuid = action_parameters['team_uuid']\n model_uuid = action_parameters['model_uuid']\n\n model_entity = storage.retrieve_model_entity(team_uuid, model_uuid)\n model_folder = model_entity['model_folder']\n\n # The following code is inspired by\n # https://colab.sandbox.google.com/github/tensorflow/models/blob/master/research/object_detection/colab_tutorials/convert_odt_model_to_TFLite.ipynb\n # and\n # https://github.com/tensorflow/models/blob/b3483b3942ab9bddc94fcbc5bd00fc790d1ddfcb/research/object_detection/export_tflite_graph_tf2.py\n\n if not blob_storage.tflite_saved_model_exists(model_folder):\n # Export TFLite inference graph.\n pipeline_config_path = blob_storage.get_pipeline_config_path(model_folder)\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\n with tf.io.gfile.GFile(pipeline_config_path, 'r') as f:\n text_format.Parse(f.read(), pipeline_config)\n trained_checkpoint_path = model_entity['trained_checkpoint_path']\n if trained_checkpoint_path == '':\n message = 'Error: Trained checkpoint not found for model_uuid=%s.' % model_uuid\n logging.critical(message)\n raise exceptions.HttpErrorNotFound(message)\n trained_checkpoint_dir = trained_checkpoint_path[:trained_checkpoint_path.rindex('/')]\n output_directory = blob_storage.get_tflite_folder_path(model_folder)\n max_detections = 10 # This matches the default for TFObjectDetector.Parameters.maxNumDetections in the the FTC SDK.\n export_tflite_graph_lib_tf2.export_tflite_model(pipeline_config, trained_checkpoint_dir,\n output_directory, max_detections, use_regular_nms=False)\n\n action.retrigger_if_necessary(action_parameters)\n\n if not blob_storage.tflite_quantized_model_exists(model_folder):\n # Convert to a quantized tflite model\n saved_model_path = blob_storage.get_tflite_saved_model_path(model_folder)\n converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_path)\n converter.optimizations = [tf.lite.Optimize.DEFAULT] # DEFAULT means the tflite model will be quantized.\n tflite_quantized_model = converter.convert()\n blob_storage.store_tflite_quantized_model(model_folder, tflite_quantized_model)\n\n action.retrigger_if_necessary(action_parameters)\n\n if not blob_storage.tflite_label_map_txt_exists(model_folder):\n # Create the label map.\n blob_storage.store_tflite_label_map_txt(model_folder,\n '\\n'.join(model_entity['sorted_label_list']))\n\n action.retrigger_if_necessary(action_parameters)\n\n if not blob_storage.tflite_model_with_metadata_exists(model_folder):\n # Add Metadata\n # Make a temporary directory\n folder = '/tmp/tflite_creater/%s' % str(uuid.uuid4().hex)\n os.makedirs(folder, exist_ok=True)\n try:\n quantized_model_filename = '%s/quantized_model' % folder\n blob_storage.write_tflite_quantized_model_to_file(model_folder, quantized_model_filename)\n label_map_txt_filename = '%s/label_map.txt' % folder\n blob_storage.write_tflite_label_map_txt_to_file(model_folder, label_map_txt_filename)\n model_with_metadata_filename = '%s/model_with_metadata.tflite' % folder\n\n writer = object_detector.MetadataWriter.create_for_inference(\n writer_utils.load_file(quantized_model_filename),\n input_norm_mean=[127.5], input_norm_std=[127.5],\n label_file_paths=[label_map_txt_filename])\n writer_utils.save_file(writer.populate(), model_with_metadata_filename)\n\n blob_storage.store_tflite_model_with_metadata(model_folder, model_with_metadata_filename)\n finally:\n # Delete the temporary directory.\n shutil.rmtree(folder)\n" ]
[ [ "tensorflow.lite.TFLiteConverter.from_saved_model", "tensorflow.io.gfile.GFile" ] ]
elijahr/python-portaudio
[ "8434396cf7a9faa8934cab289749daf08b04d0b3" ]
[ "src/portaudio/stream_helpers.py" ]
[ "import asyncio\nfrom functools import reduce\nfrom operator import mul\n\nfrom typing import TYPE_CHECKING, Any, Union\nfrom ringbuf import Array, concatenate\n\nfrom . import exceptions, pa\n\ntry:\n import numpy as np\nexcept ImportError:\n np = None\n\nif TYPE_CHECKING:\n from . import streams\n\n\nasync def stream_read(\n self: 'streams.Stream',\n count: int,\n spin_wait: Union[int, float] = 0) -> Union[Array, None]:\n if not self.running or self.stopping:\n exceptions.check_error(pa.STREAM_IS_STOPPED)\n\n if not self.is_input:\n exceptions.check_error(pa.CAN_NOT_READ_FROM_AN_OUTPUT_ONLY_STREAM)\n\n buffer = self.buffer[0]\n parts = []\n count *= self.config[0].channels\n total = 0\n while True:\n # spin until all requested data has been popped, or timeout\n popped = buffer.pop(count - total)\n if popped is not None:\n total += len(popped)\n parts.append(popped)\n if total < count:\n await asyncio.sleep(spin_wait)\n if not self.running or self.stopping:\n exceptions.check_error(pa.STREAM_IS_STOPPED)\n else:\n break\n\n if len(parts) == 1:\n return parts[0]\n elif len(parts):\n return concatenate(*parts)\n return None\n\n\nasync def stream_write(\n self: 'streams.Stream',\n data: Any,\n spin_wait: Union[int, float] = 0) -> None:\n\n if not self.running or self.stopping:\n exceptions.check_error(pa.STREAM_IS_STOPPED)\n\n if not self.is_output:\n exceptions.check_error(pa.CAN_NOT_WRITE_TO_AN_INPUT_ONLY_STREAM)\n\n buffer = self.buffer[1]\n remaining = unshape(data, interleaved=self.config[1].interleaved)\n\n while True:\n # spin until all data has been pushed\n remaining = buffer.push(remaining)\n if remaining is not None:\n await asyncio.sleep(spin_wait)\n if not self.running:\n exceptions.check_error(pa.STREAM_IS_STOPPED)\n else:\n break\n\n while self.output_read_available > 0:\n # spin until all data has been consumed or stream is stopped\n await asyncio.sleep(spin_wait)\n if not self.running or self.stopping:\n exceptions.check_error(pa.STREAM_IS_STOPPED)\n\n\ndef unshape(data: Any, interleaved: bool) -> Any:\n memview = memoryview(data)\n try:\n if memview.ndim > 1:\n if np is None:\n raise ValueError('Only 1-dimensional buffers are supported without numpy')\n else:\n # Reshape, since ringbuf only accepts 1-d data\n shape = (reduce(mul, memview.shape, 1),)\n data = np.array(memview, dtype=memview.format)\n if not interleaved:\n data = data.T\n data = data.reshape(shape)\n data = np.ascontiguousarray(data)\n finally:\n memview.release()\n\n return data\n" ]
[ [ "numpy.ascontiguousarray", "numpy.array" ] ]
Devalent/aws-realtime-predictions
[ "2b8c34a9dc19ad789e38e3350c9ec5fed8522cc1" ]
[ "pipeline/preprocess.py" ]
[ "import argparse\nimport json\nimport logging\nimport pathlib\n\nimport boto3\nimport pandas as pd\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\nlogger.addHandler(logging.StreamHandler())\n\nfeature_columns_names = [\"country\", \"dos\", \"dtype\", \"dbrowser\"]\nclass_column_name = \"category\"\ny_column_name = \"class\"\n\nbase_dir = \"/opt/ml/processing\"\n# base_dir = \"temp\"\n\nif __name__ == \"__main__\":\n logger.debug(\"Starting preprocessing.\")\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--input-columns\", type=str, required=True)\n parser.add_argument(\"--input-classes\", type=str, required=True)\n args = parser.parse_args()\n\n pathlib.Path(f\"{base_dir}\").mkdir(parents=True, exist_ok=True)\n\n input_columns = args.input_columns\n bucket_columns = input_columns.split(\"/\")[2]\n prefix_columns = \"/\".join(input_columns.split(\"/\")[3:])\n print(input_columns)\n\n input_classes = args.input_classes\n bucket_classes = input_classes.split(\"/\")[2]\n prefix_classes = \"/\".join(input_classes.split(\"/\")[3:])\n print(input_classes)\n\n s3 = boto3.resource(\"s3\")\n s3client = boto3.client(\"s3\")\n\n response_columns = s3client.list_objects_v2(\n Bucket=bucket_columns,\n Prefix=prefix_columns,\n )\n key_columns = response_columns['Contents'][0]['Key']\n\n logger.info(\"Downloading columns data from bucket: %s, key: %s\", bucket_classes, key_columns)\n file_columns = f\"{base_dir}/columns.csv\"\n\n s3.Bucket(bucket_columns).download_file(key_columns, file_columns)\n\n response_classes = s3client.list_objects_v2(\n Bucket=bucket_classes,\n Prefix=prefix_classes,\n )\n key_classes = response_classes['Contents'][0]['Key']\n\n logger.info(\"Downloading classes data from bucket: %s, key: %s\", bucket_classes, key_classes)\n file_classes = f\"{base_dir}/classes.csv\"\n\n s3.Bucket(bucket_classes).download_file(key_classes, file_classes)\n\n logger.debug(\"Processing columns.\")\n\n pathlib.Path(f\"{base_dir}/columns\").mkdir(parents=True, exist_ok=True)\n \n df_columns = pd.read_csv(file_columns)\n\n with open(f\"{base_dir}/columns/columns.json\", 'w') as f:\n json.dump(list(df_columns.columns.values), f)\n\n logger.debug(\"Processing classes.\")\n\n pathlib.Path(f\"{base_dir}/classes\").mkdir(parents=True, exist_ok=True)\n\n df_classes = pd.read_csv(file_classes)\n dic_offer = {}\n\n for index, row in df_classes.iterrows():\n dic_offer[row['offer']] = int(row['category'])\n\n with open(f\"{base_dir}/classes/classes.json\", 'w') as f:\n json.dump({\n \"classes\": dic_offer,\n \"length\": len(dic_offer),\n \"length_str\": str(len(dic_offer)),\n }, f)\n" ]
[ [ "pandas.read_csv" ] ]
paroque28/tensorflow-yolov4-tflite-1
[ "ad582a20d74a97bda78cbbc78a180ea5c7afa335" ]
[ "convert_trt.py" ]
[ "import tensorflow as tf\nfrom absl import app, flags, logging\nfrom absl.flags import FLAGS\nimport numpy as np\nimport cv2\nfrom tensorflow.python.compiler.tensorrt import trt_convert as trt\nimport core.utils as utils\nfrom tensorflow.python.saved_model import signature_constants\nimport os\nfrom tensorflow.compat.v1 import ConfigProto\nfrom tensorflow.compat.v1 import InteractiveSession\n\nflags.DEFINE_string('weights', './checkpoints/yolov4-416', 'path to weights file')\nflags.DEFINE_string('output', './checkpoints/yolov4-trt-fp16-416', 'path to output')\nflags.DEFINE_integer('input_size', 416, 'path to output')\nflags.DEFINE_string('quantize_mode', 'float16', 'quantize mode (int8, float16)')\nflags.DEFINE_string('dataset', \"./coco_dataset/coco/5k.txt\", 'path to dataset')\nflags.DEFINE_integer('loop', 10, 'loop')\n\ndef representative_data_gen():\n fimage = open(FLAGS.dataset).read().split()\n for input_value in range(FLAGS.loop):\n if os.path.exists(fimage[input_value]):\n original_image=cv2.imread(fimage[input_value])\n original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)\n image_data = utils.image_preporcess(np.copy(original_image), [FLAGS.input_size, FLAGS.input_size])\n img_in = image_data[np.newaxis, ...].astype(np.float32)\n batched_input = tf.constant(img_in)\n print(input_value)\n yield (batched_input, )\n else:\n continue\n\ndef save_trt():\n if FLAGS.quantize_mode == 'int8':\n conversion_params = trt.DEFAULT_TRT_CONVERSION_PARAMS._replace(\n precision_mode=trt.TrtPrecisionMode.INT8,\n max_workspace_size_bytes=8000000000,\n use_calibration=True,\n max_batch_size=32)\n converter = trt.TrtGraphConverterV2(\n input_saved_model_dir=FLAGS.weights,\n conversion_params=conversion_params)\n converter.convert(calibration_input_fn=representative_data_gen)\n elif FLAGS.quantize_mode == 'float16':\n conversion_params = trt.DEFAULT_TRT_CONVERSION_PARAMS._replace(\n precision_mode=trt.TrtPrecisionMode.FP16,\n max_workspace_size_bytes=8000000000,\n max_batch_size=32)\n converter = trt.TrtGraphConverterV2(\n input_saved_model_dir=FLAGS.weights, conversion_params=conversion_params)\n converter.convert()\n else :\n conversion_params = trt.DEFAULT_TRT_CONVERSION_PARAMS._replace(\n precision_mode=trt.TrtPrecisionMode.FP32,\n max_workspace_size_bytes=8000000000,\n max_batch_size=32)\n converter = trt.TrtGraphConverterV2(\n input_saved_model_dir=FLAGS.weights, conversion_params=conversion_params)\n converter.convert()\n\n converter.build(input_fn=representative_data_gen)\n converter.save(output_saved_model_dir=FLAGS.output)\n print('Done Converting to TF-TRT')\n\n saved_model_loaded = tf.saved_model.load(FLAGS.output)\n graph_func = saved_model_loaded.signatures[\n signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]\n trt_graph = graph_func.graph.as_graph_def()\n for n in trt_graph.node:\n print(n.op)\n if n.op == \"TRTEngineOp\":\n print(\"Node: %s, %s\" % (n.op, n.name.replace(\"/\", \"_\")))\n else:\n print(\"Exclude Node: %s, %s\" % (n.op, n.name.replace(\"/\", \"_\")))\n logging.info(\"model saved to: {}\".format(FLAGS.output))\n\n trt_engine_nodes = len([1 for n in trt_graph.node if str(n.op) == 'TRTEngineOp'])\n print(\"numb. of trt_engine_nodes in TensorRT graph:\", trt_engine_nodes)\n all_nodes = len([1 for n in trt_graph.node])\n print(\"numb. of all_nodes in TensorRT graph:\", all_nodes)\n\ndef main(_argv):\n config = ConfigProto()\n config.gpu_options.allow_growth = True\n session = InteractiveSession(config=config)\n save_trt()\n\nif __name__ == '__main__':\n try:\n app.run(main)\n except SystemExit:\n pass\n\n\n" ]
[ [ "tensorflow.python.compiler.tensorrt.trt_convert.TrtGraphConverterV2", "tensorflow.compat.v1.ConfigProto", "tensorflow.compat.v1.InteractiveSession", "numpy.copy", "tensorflow.constant", "tensorflow.python.compiler.tensorrt.trt_convert.DEFAULT_TRT_CONVERSION_PARAMS._replace", "tensorflow.saved_model.load" ] ]
catalystneuro/hussaini-lab-to-nwb
[ "c3b2338fffb1d727a923bd361eac7215e93f31fe" ]
[ "hussaini_lab_to_nwb/tint_conversion/export_spike_waveforms.py" ]
[ "import os\nimport struct\nfrom pathlib import Path\n\nimport numpy as np\nimport spiketoolkit as st\n\nfrom .utils import get_group_property_name, assert_group_names_match\n\n\ndef parse_generic_header(filename):\n \"\"\"\n Given a binary file with phrases and line breaks, enters the\n first word of a phrase as dictionary key and the following\n string (without linebreaks) as value. Returns the dictionary.\n\n Parameters\n ----------\n filename : str or Path\n Full filename.\n \"\"\"\n header = {}\n with open(filename, 'rb') as f:\n for bin_line in f:\n if b'data_start' in bin_line:\n break\n line = bin_line.decode('cp1252').replace('\\r\\n', '').replace('\\r', '').strip()\n parts = line.split(' ')\n key = parts[0]\n value = ' '.join(parts[1:])\n header[key] = value\n\n return header\n\n\ndef get_set_header(set_file):\n \"\"\"\n Given a .set filename, extract the first few lines up until and\n including the line with `sw_version`.\n\n Parameters\n ----------\n set_file : str or Path\n Full filename of .set file\n\n ---\n Largely based on gebaSpike implementation by Geoff Barrett\n https://github.com/GeoffBarrett/gebaSpike\n \"\"\"\n header = ''\n with open(set_file, 'r+') as f:\n for line in f:\n header += line\n if 'sw_version' in line:\n break\n\n return header\n\n\ndef get_unit_group_ids(sorting):\n '''Get group ids.\n\n Parameters\n ----------\n sorting : SortingExtractor\n\n Returns\n -------\n group_ids : List\n List of groups ids for each Unit in `sorting`.\n '''\n group_property_name = get_group_property_name(sorting)\n\n unit_ids = sorting.get_unit_ids()\n group_ids = [sorting.get_unit_property(\n unit_id=unit_id, property_name=group_property_name) for unit_id in unit_ids\n ]\n\n return [int(group_id) for group_id in group_ids]\n\n\ndef get_waveforms(recording, sorting, unit_ids, header, waveforms_center):\n '''Get waveforms for specific tetrode.\n\n Parameters\n ----------\n recording : RecordingExtractor\n sorting : SortingExtractor\n unit_ids : List\n List of unit ids to extract waveforms\n header : dict\n maps parameters from .set file to their values (as strings).\n\n Returns\n -------\n waveforms : List\n List of np.array (n_spikes, n_channels, n_timepoints) with waveforms for each unit\n '''\n sampling_rate = recording.get_sampling_frequency()\n samples_before = int(50 * waveforms_center)\n samples_after = 50 - samples_before\n header['pretrigSamps'] = str(samples_before)\n header['spikeLockout'] = str(samples_after)\n\n ms_before = samples_before / (sampling_rate / 1000) + 0.001\n ms_after = samples_after / (sampling_rate / 1000) + 0.001\n\n group_property_name = get_group_property_name(sorting)\n \n waveforms = st.postprocessing.get_unit_waveforms(\n recording,\n sorting,\n unit_ids=unit_ids,\n max_spikes_per_unit=None,\n grouping_property=group_property_name,\n recompute_info=True,\n ms_before=ms_before,\n ms_after=ms_after,\n return_idxs=False,\n return_scaled=False,\n dtype=np.int8\n )\n\n return waveforms\n\n\ndef write_tetrode_file_header(tetrode_file, n_spikes_chan, Fs):\n ''' Generate and write header of tetrode file\n\n Parameters\n ----------\n tetrode_file : str or Path\n Full filename to write to\n n_spikes_chan : int\n Number of spikes to write to file\n Fs : int\n Sampling frequency of data\n '''\n path = Path(tetrode_file).parent\n filename = Path(tetrode_file).name\n basename = filename.split('.')[0]\n set_file = path / '{}.set'.format(basename)\n \n # We are enforcing the defaults from the file format manual\n header = get_set_header(set_file)\n to_write = [\n header,\n 'num_chans 4\\n',\n 'timebase {} hz\\n'.format(96000),\n 'bytes_per_timestamp {}\\n'.format(4),\n 'samples_per_spike {}\\n'.format(50),\n 'sample_rate {} hz\\n'.format(int(Fs)),\n 'bytes_per_sample {}\\n'.format(1),\n 'spike_format t,ch1,t,ch2,t,ch3,t,ch4\\n',\n 'num_spikes {}\\n'.format(n_spikes_chan),\n 'data_start'\n ]\n\n with open(tetrode_file, 'w') as f:\n f.writelines(to_write)\n\n\ndef write_tetrode_file_data(tetrode_file, all_spikes, all_waveforms, Fs):\n ''' Write binary data to tetrode file\n\n Parameters\n ----------\n tetrode_file : str or Path\n Full filename of tetrode file to write to\n all_spikes : np.array\n Array with all spike timestamps for tetrode (int64)\n all_waveforms : np.array\n Array with all corresponding waveforms (np.memmap) (int8)\n Fs : int\n Sampling frequency of data\n '''\n\n # create ordered spike times and waveforms from input dict\n spike_times = all_spikes\n spike_times = np.tile(spike_times, (4, 1))\n spike_times = spike_times.flatten(order='F')\n\n n_spikes = spike_times.shape[0]\n spike_values = all_waveforms\n spike_values = spike_values.reshape((n_spikes, 50))\n\n # re-adjust spike_times to reflect 96000 hz sampling rate\n spike_times *= 96000 // int(Fs)\n\n t_packed = struct.pack('>%di' % n_spikes, *spike_times)\n spike_data_pack = struct.pack('<%db' % (n_spikes * 50), *spike_values.flatten())\n\n # combine timestamps (4 bytes per sample) and waveforms (1 byte per sample)\n comb_list = [None] * (2 * n_spikes)\n comb_list[::2] = [t_packed[i:i + 4] for i in range(0, len(t_packed), 4)]\n comb_list[1::2] = [spike_data_pack[i:i + 50] for i in range(0, len(spike_data_pack), 50)]\n\n with open(tetrode_file, 'ab') as f:\n f.writelines(comb_list)\n f.writelines([bytes('\\r\\ndata_end\\r\\n', 'utf-8')])\n\n\ndef write_tetrode(tetrode_file, all_spikes, all_waveforms, Fs):\n ''' Write data to tetrode (`.X`) file\n\n Parameters\n ----------\n tetrode_file : str or Path\n Full filename of tetrode file to write to\n all_spikes : np.array\n Array with all spike timestamps for tetrode (int64)\n all_waveforms : np.array\n Array with all corresponding waveforms (np.memmap) (int8)\n Fs : int\n Sampling frequency of data\n '''\n write_tetrode_file_header(tetrode_file, len(all_spikes), Fs)\n write_tetrode_file_data(tetrode_file, all_spikes, all_waveforms, Fs)\n\n\ndef write_to_tetrode_files(recording, sorting, group_ids, set_file, waveforms_center=0.5):\n '''Get spike samples and waveforms for all tetrodes specified in\n `group_ids`. Note that `group_ids` is 0-indexed, whereas tetrodes are\n 1-indexed (so if you want tetrodes 1+2, specify group_ids=[0, 1]).\n\n Parameters\n ----------\n recording : RecordingExtractor\n sorting : SortingExtractor\n group_ids : array like\n Tetrodes to include, but 0-indexed (i.e. tetrodeID - 1)\n set_file : Path or str\n .set file location. Used to determine how many samples prior to and\n post spike sample should be cut out for each waveform. .X files will have\n the same base filename as the .set file. So if you do not want to overwrite\n existing .X files in your .set file directory, copy the .set file to a new\n folder and give its new location. The new .X files will appear there.\n waveforms_center: float\n Controls the waveform peak location in the 1ms TINT cutout (e.g. 0.5: peak is at 0.5ms)\n '''\n\n assert_group_names_match(sorting, recording)\n\n sampling_rate = recording.get_sampling_frequency()\n group_ids = get_unit_group_ids(sorting)\n unit_ids = sorting.get_unit_ids()\n header = parse_generic_header(set_file)\n\n for group_id in np.unique(group_ids):\n\n # get spike samples and waveforms of this group / tetrode\n group_unit_ids = [unit_ids[i] for i, gid in enumerate(group_ids) if gid == group_id]\n group_waveforms = get_waveforms(\n recording, sorting, group_unit_ids, header, waveforms_center)\n group_spike_samples = sorting.get_units_spike_train(unit_ids=group_unit_ids)\n\n # concatenate all spikes and waveforms\n all_spikes = np.concatenate(group_spike_samples)\n all_waveforms = np.concatenate(group_waveforms)\n\n tetrode_filename = str(set_file).split('.')[0] + '.{}'.format(group_id + 1)\n print('Writing', Path(tetrode_filename).name)\n\n # write to tetrode file\n write_tetrode(tetrode_filename, all_spikes, all_waveforms, sampling_rate)\n" ]
[ [ "numpy.concatenate", "numpy.tile", "numpy.unique" ] ]
IINemo/isanlp_rst
[ "2d71b4fa874e6777aa437989024294bf9f6983c0" ]
[ "src/maintenance/utils/parse_rs3_rightbranch.py" ]
[ "\"\"\"\nScript to convert Rhetorical Structure Theory trees from .rs3 format\nto relationships examples pairs.\n\"\"\"\n\nfrom rs3_feature_extraction import ParsedToken\nimport re, sys, codecs, os, tempfile, subprocess, ntpath\nimport xml\nfrom xml.dom import minidom\nfrom xml.parsers.expat import ExpatError\nfrom argparse import ArgumentParser, FileType\nimport pandas as pd\nimport glob\nimport copy\nfrom file_reading import prepare_text, text_html_map\n\n\nOUT_PATH = 'data_right_branch'\n\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n\nclass NODE:\n def __init__(self, id, left, right, parent, depth, kind, text, relname, relkind):\n\n self.id = id\n self.parent = parent\n self.left = left\n self.right = right\n self.depth = depth\n self.kind = kind\n self.text = text\n self.relname = relname\n self.relkind = relkind\n self.sortdepth = depth\n self.leftmost_child = \"\"\n self.children = []\n self.dep_parent = \"\"\n self.dep_rel = relname\n\n def to_row(self):\n return [self.id, self.text, self.dep_parent, self.dep_rel, self.kind]\n\n def __repr__(self):\n return \"\\t\".join([self.id, self.dep_parent, self.dep_rel, self.kind])\n\n\ndef get_left_right(node_id, nodes, min_left, max_right, rel_hash):\n \"\"\"\n Calculate leftmost and rightmost EDU covered by a NODE object. For EDUs this is the number of the EDU\n itself. For spans and multinucs, the leftmost and rightmost child dominated by the NODE is found recursively.\n \"\"\"\n if nodes[node_id].parent != \"0\" and node_id != \"0\":\n parent = nodes[nodes[node_id].parent]\n if min_left > nodes[node_id].left or min_left == 0:\n if nodes[node_id].left != 0:\n min_left = nodes[node_id].left\n if max_right < nodes[node_id].right or max_right == 0:\n max_right = nodes[node_id].right\n if nodes[node_id].relname == \"span\":\n if parent.left > min_left or parent.left == 0:\n parent.left = min_left\n if parent.right < max_right:\n parent.right = max_right\n elif nodes[node_id].relname in rel_hash:\n if parent.kind == \"multinuc\" and rel_hash[nodes[node_id].relname] == \"multinuc\":\n if parent.left > min_left or parent.left == 0:\n parent.left = min_left\n if parent.right < max_right:\n parent.right = max_right\n get_left_right(parent.id, nodes, min_left, max_right, rel_hash)\n\n\ndef get_depth(orig_node, probe_node, nodes):\n if probe_node.parent != \"0\":\n parent = nodes[probe_node.parent]\n if parent.kind != \"edu\" and (\n probe_node.relname == \"span\" or parent.kind == \"multinuc\" and probe_node.relkind == \"multinuc\"):\n orig_node.depth += 1\n orig_node.sortdepth += 1\n elif parent.kind == \"edu\":\n orig_node.sortdepth += 1\n get_depth(orig_node, parent, nodes)\n\n\ndef read_rst(filename, rel_hash):\n f = codecs.open(filename, \"r\", \"utf-8\")\n try:\n xmldoc = minidom.parseString(codecs.encode(f.read(), \"utf-8\"))\n except ExpatError:\n message = \"Invalid .rs3 file\"\n return message\n\n nodes = []\n ordered_id = {}\n schemas = []\n default_rst = \"\"\n\n # Get relation names and their types, append type suffix to disambiguate\n # relation names that can be both RST and multinuc\n item_list = xmldoc.getElementsByTagName(\"rel\")\n for rel in item_list:\n relname = re.sub(r\"[:;,]\", \"\", rel.attributes[\"name\"].value)\n if rel.hasAttribute(\"type\"):\n rel_hash[relname + \"_\" + rel.attributes[\"type\"].value[0:1]] = rel.attributes[\"type\"].value\n if rel.attributes[\"type\"].value == \"rst\" and default_rst == \"\":\n default_rst = relname + \"_\" + rel.attributes[\"type\"].value[0:1]\n else: # This is a schema relation\n schemas.append(relname)\n\n item_list = xmldoc.getElementsByTagName(\"segment\")\n if len(item_list) < 1:\n return '<div class=\"warn\">No segment elements found in .rs3 file</div>'\n\n id_counter = 0\n\n # Get hash to reorder EDUs and spans according to the order of appearance in .rs3 file\n for segment in item_list:\n id_counter += 1\n ordered_id[segment.attributes[\"id\"].value] = id_counter\n item_list = xmldoc.getElementsByTagName(\"group\")\n for group in item_list:\n id_counter += 1\n ordered_id[group.attributes[\"id\"].value] = id_counter\n ordered_id[\"0\"] = 0\n\n element_types = {}\n node_elements = xmldoc.getElementsByTagName(\"segment\")\n for element in node_elements:\n element_types[element.attributes[\"id\"].value] = \"edu\"\n node_elements = xmldoc.getElementsByTagName(\"group\")\n for element in node_elements:\n element_types[element.attributes[\"id\"].value] = element.attributes[\"type\"].value\n\n id_counter = 0\n item_list = xmldoc.getElementsByTagName(\"segment\")\n for segment in item_list:\n id_counter += 1\n if segment.hasAttribute(\"parent\"):\n parent = segment.attributes[\"parent\"].value\n else:\n parent = \"0\"\n if segment.hasAttribute(\"relname\"):\n relname = segment.attributes[\"relname\"].value\n else:\n relname = default_rst\n\n # Tolerate schemas, but no real support yet:\n if relname in schemas:\n relname = \"span\"\n\n relname = re.sub(r\"[:;,]\", \"\", relname) # remove characters used for undo logging, not allowed in rel names\n # Note that in RSTTool, a multinuc child with a multinuc compatible relation is always interpreted as multinuc\n if parent in element_types:\n if element_types[parent] == \"multinuc\" and relname + \"_m\" in rel_hash:\n relname = relname + \"_m\"\n elif relname != \"span\":\n relname = relname + \"_r\"\n else:\n if not relname.endswith(\"_r\") and len(relname) > 0:\n relname = relname + \"_r\"\n edu_id = segment.attributes[\"id\"].value\n if len(segment.childNodes):\n try:\n contents = segment.childNodes[0].data.strip()\n nodes.append(\n [str(ordered_id[edu_id]), id_counter, id_counter, str(ordered_id[parent]), 0, \"edu\", contents,\n relname])\n except KeyError as e:\n print(bcolors.FAIL + 'PARENT ID ERROR: ' + str(e) + bcolors.ENDC)\n\n item_list = xmldoc.getElementsByTagName(\"group\")\n for group in item_list:\n if group.attributes.length == 4:\n parent = group.attributes[\"parent\"].value\n else:\n parent = \"0\"\n if group.attributes.length == 4:\n relname = group.attributes[\"relname\"].value\n # Tolerate schemas by treating as spans\n if relname in schemas:\n relname = \"span\"\n\n relname = re.sub(r\"[:;,]\", \"\", relname) # remove characters used for undo logging, not allowed in rel names\n # Note that in RSTTool, a multinuc child with a multinuc compatible relation is always interpreted as multinuc\n if parent in element_types:\n if element_types[parent] == \"multinuc\" and relname + \"_m\" in rel_hash:\n relname = relname + \"_m\"\n elif relname != \"span\":\n relname = relname + \"_r\"\n else:\n relname = \"\"\n else:\n relname = \"\"\n group_id = group.attributes[\"id\"].value\n group_type = group.attributes[\"type\"].value\n contents = \"\"\n nodes.append([str(ordered_id[group_id]), 0, 0, str(ordered_id[parent]), 0, group_type, contents, relname])\n\n elements = {}\n for row in nodes:\n elements[row[0]] = NODE(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], \"\")\n\n for element in elements:\n if elements[element].kind == \"edu\":\n get_left_right(element, elements, 0, 0, rel_hash)\n\n for element in elements:\n node = elements[element]\n get_depth(node, node, elements)\n\n for nid in elements:\n node = elements[nid]\n if node.parent != \"0\":\n elements[node.parent].children.append(nid)\n if node.left == elements[node.parent].left:\n elements[node.parent].leftmost_child = nid\n\n # Ensure left most multinuc children are recognized even if there is an rst dependent further to the left\n for nid in elements:\n node = elements[nid]\n if node.kind == \"multinuc\" and node.leftmost_child == \"\":\n min_left = node.right\n leftmost = \"\"\n for child_id in node.children:\n child = elements[child_id]\n if child.relname.endswith(\"_m\"): # Using _m suffix to recognize multinuc relations\n\n if child.left < min_left:\n min_left = child.left\n leftmost = child_id\n node.leftmost_child = leftmost\n\n return elements\n\n\ndef seek_other_edu_child(nodes, source, exclude, block):\n \"\"\"\n Recursive function to find some child of a node which is an EDU and does not have the excluded ID\n :param nodes: dictionary of IDs to NODE objects\n :param source: the source node from which to traverse\n :param exclude: node ID to exclude as target child\n :param block: list of IDs for which children should not be traversed (multinuc right children)\n :return: the found child ID or None if none match\n \"\"\"\n\n if source == \"0\":\n return None\n else:\n # Check if this is already an EDU\n if nodes[source].kind == \"edu\" and source != exclude and source not in block:\n return source\n # Loop through children of this node\n children_to_search = [child for child in nodes[source].children if\n child not in nodes[exclude].children and child not in block]\n if len(children_to_search) > 0:\n if int(exclude) < int(children_to_search[0]):\n children_to_search.sort(key=lambda x: int(x))\n else:\n children_to_search.sort(key=lambda x: int(x), reverse=True)\n for child_id in children_to_search:\n # Found an EDU child which is not the original caller\n if nodes[child_id].kind == \"edu\" and child_id != exclude and (\n nodes[source].kind != \"span\" or nodes[child_id].relname == \"span\") and \\\n not (nodes[source].kind == \"multinuc\" and nodes[source].leftmost_child == exclude) and \\\n (nodes[nodes[child_id].parent].kind not in [\"span\", \"multinuc\"]):\n return child_id\n # Found a non-terminal child\n elif child_id != exclude:\n # If it's a span, check below it, following only span relation paths\n if nodes[source].kind == \"span\":\n if nodes[child_id].relname == \"span\":\n candidate = seek_other_edu_child(nodes, child_id, exclude, block)\n if candidate is not None:\n return candidate\n # If it's a multinuc, only consider the left most child as representing it topographically\n elif nodes[source].kind == \"multinuc\" and child_id == nodes[source].leftmost_child:\n candidate = seek_other_edu_child(nodes, child_id, exclude, block)\n if candidate is not None:\n return candidate\n return None\n\n\ndef find_dep_head(nodes, source, exclude, block):\n parent = nodes[source].parent\n if parent != \"0\":\n if nodes[parent].kind == \"multinuc\":\n if int(nodes[nodes[source].parent].left) == int(source):\n return None\n if nodes[source].parent == source:\n return None\n for child in nodes[parent].children:\n # Check whether exclude and child are under the same multinuc and exclude is further to the left\n if nodes[child].left > int(exclude) and nodes[child].left >= nodes[parent].left and int(exclude) >= nodes[parent].left:\n block.append(child)\n else:\n # Prevent EDU children of root from being dep head - only multinuc children possible at this point\n for child in nodes[source].children:\n if nodes[child].kind == \"edu\":\n block.append(child)\n candidate = seek_other_edu_child(nodes, nodes[source].parent, exclude, block)\n\n if candidate is not None:\n return candidate\n else:\n if parent == \"0\":\n return None\n else:\n if parent not in nodes:\n raise IOError(\"Node with id \" + source + \" has parent id \" + parent + \" which is not listed\\n\")\n return find_dep_head(nodes, parent, exclude, block)\n\n\ndef get_nonspan_rel(nodes, node):\n if node.parent == \"0\": # Reached the root\n return \"ROOT\"\n elif nodes[node.parent].kind == \"multinuc\" and nodes[node.parent].leftmost_child == node.id:\n return get_nonspan_rel(nodes, nodes[node.parent])\n elif nodes[node.parent].kind == \"multinuc\" and nodes[node.parent].leftmost_child != node.id:\n return node.relname\n elif nodes[node.parent].relname != \"span\":\n grandparent = nodes[node.parent].parent\n if grandparent == \"0\":\n return \"ROOT\"\n elif not (nodes[grandparent].kind == \"multinuc\" and nodes[node.parent].left == nodes[grandparent].left):\n return nodes[node.parent].relname\n else:\n return get_nonspan_rel(nodes, nodes[node.parent])\n else:\n if node.relname.endswith(\"_r\"):\n return node.relname\n else:\n return get_nonspan_rel(nodes, nodes[node.parent])\n\n\ndef get_pairs(df, text):\n pd.options.mode.chained_assignment = None\n \n df['id'] = df.index\n table = df.merge(df, left_on='dep_parent', right_on='id', how='inner', sort=False, right_index=True) \\\n .drop(columns=['dep_parent_y', 'dep_rel_y', 'dep_parent_x', 'kind_x', 'kind_y']) \\\n .rename(columns={\"dep_rel_x\": \"category_id\"})\n del df\n\n table = table[table.category_id != 'ROOT']\n table = table[table.category_id != 'span']\n \n for key in text_html_map.keys():\n #text = text.replace(key, text_html_map[key])\n table['snippet_x'].replace(key, text_html_map[key], regex=True, inplace=True)\n table['snippet_y'].replace(key, text_html_map[key], regex=True, inplace=True)\n\n def remove_prefix(text, prefix):\n if text.startswith(prefix):\n return text[len(prefix):]\n if text.endswith(prefix):\n return text[:-len(prefix)]\n return text\n\n table.snippet_x = table.apply(lambda row: remove_prefix(row.snippet_x.strip(), row.snippet_y.strip()), axis=1)\n table.snippet_y = table.apply(lambda row: remove_prefix(row.snippet_y.strip(), row.snippet_x.strip()), axis=1)\n table['snippet_x'] = table['snippet_x'].apply(lambda row: row.strip())\n table['snippet_y'] = table['snippet_y'].apply(lambda row: row.strip())\n \n def find_in_text(plain_text, x, y):\n cand_x = plain_text.find(x)\n cand_y = plain_text.find(y, cand_x + len(x))\n if cand_y - cand_x > len(x) + 3:\n cand_x = plain_text.find(x, cand_x)\n cand_y = plain_text.find(y, cand_x + len(x))\n return (cand_x, cand_y) \n \n locations = table.apply(lambda row: find_in_text(text, row.snippet_x.strip(), row.snippet_y.strip()), axis=1)\n table['loc_x'] = locations.map(lambda row: row[0])\n table['loc_y'] = locations.map(lambda row: row[1])\n\n def exact_order(row):\n \n if 'order' in row.keys():\n order = row.order\n \n if row.category_id[-2:] == '_m':\n order = 'NN'\n else:\n order = ''\n\n if row.loc_x < row.loc_y:\n order = 'SN'\n\n if row.loc_x > row.loc_y:\n order = 'NS'\n \n if row.loc_x == -1 and row.category_id == 'elaboration_r':\n order = 'NS'\n\n if row.loc_x == -1 and row.category_id == 'preparation_r':\n order = 'NS'\n \n return order\n \n table['order'] = table.apply(lambda row: exact_order(row), axis=1)\n \n ns = table[table.order == 'NS']\n sn = table[table.order == 'SN']\n\n ns = ns.rename(columns={\n 'snippet_x': 'snippet_y_',\n 'id_x': 'id_y_',\n 'id_y': 'id_x_',\n 'snippet_y': 'snippet_x_'\n })\n ns = ns.rename(columns={\n 'snippet_x_': 'snippet_x',\n 'id_x_': 'id_x',\n 'id_y_': 'id_y',\n 'snippet_y_': 'snippet_y'\n })\n\n table = pd.concat([sn, ns], ignore_index=True, sort=False)\n \n table.loc[table.category_id.str[-2:] == '_m', 'order'] = 'NN'\n table.snippet_y = table.apply(lambda row: remove_prefix(row.snippet_y.strip(), row.snippet_x.strip()), axis=1)\n\n locations = table.apply(lambda row: find_in_text(text, row.snippet_x.strip(), row.snippet_y.strip()), axis=1)\n table['loc_x'] = locations.map(lambda row: row[0])\n table['loc_y'] = locations.map(lambda row: row[1])\n \n table = table[table.loc_x != -1]\n table = table[table.loc_y != -1]\n \n def cut_middle_injections(row):\n if row.loc_x + len(row.snippet_x) > row.loc_y:\n row.snippet_x = row.snippet_x[:row.loc_y - row.loc_x]\n return row\n \n table = table.apply(lambda row: cut_middle_injections(row), axis=1)\n \n table.drop(columns=['id_x', 'id_y',\n #'loc_x', 'loc_y', 'loc_x+y',\n #'new_paragraph_x', 'new_paragraph_y',\n 'dep_parent',\n ], inplace=True)\n\n table.drop_duplicates(inplace=True)\n \n return table\n\n#######################################################################################################\n\ndesc = \"Usage example:\\n\\n\" + \\\n \"python rst2dep.py <INFILES>\"\nparser = ArgumentParser(description=desc)\nparser.add_argument('path', nargs='+', help='Path of a file or a folder of files.')\nparser.add_argument(\"-r\", \"--root\", action=\"store\", dest=\"root\", default=\"\",\n help=\"optional: path to corpus root folder containing a directory dep/ and \\n\" +\\\n \"a directory xml/ containing additional corpus formats\")\n\noptions = parser.parse_args()\nfull_paths = [os.path.join(os.getcwd(), path) for path in options.path]\nfiles = set()\nfor path in full_paths:\n if os.path.isfile(path):\n files.add(path)\n else:\n files |= set(glob.glob(path + '/*' + '.rs3'))\n\nfor rstfile in files:\n print('>>> read file', rstfile)\n \n out_file = rstfile.split('/')[-1]\n if out_file.endswith(\"rs3\"):\n out_file = out_file.replace(\"rs3\", \"json\")\n else:\n out_file = out_file + \".pkl\"\n \n out_file = os.path.join(OUT_PATH, out_file)\n\n\n ### 1. save edus in <filename>.edus ##############################################################\n \n try:\n xmldoc = minidom.parse(rstfile)\n except xml.parsers.expat.ExpatError as e:\n original = open(rstfile, 'r').read()\n \n mapping = {\n r'&amp;': r'&',\n r'&quot;': r'\"',\n r'&ndash;': r'–',\n r'&ouml;': r'o',\n r'&hellip;': r'...',\n r'&eacute;': r'e',\n r'&aacute;': r'a',\n r'&rsquo;': r\"'\",\n r'&lsquo;': r\"'\",\n r' & ': r' and ', #\n r'&id=': r'_id=',\n }\n \n mapped = original\n for key, value in mapping.items():\n mapped = mapped.replace(key, value)\n \n with open(rstfile, 'w') as buffer:\n buffer.write(mapped)\n \n try:\n xmldoc = minidom.parse(rstfile)\n except xml.parsers.expat.ExpatError as e: \n with open(rstfile, 'w') as f:\n f.write(original)\n \n print('Error occured in file:', rstfile)\n print(e)\n\n edus = xmldoc.getElementsByTagName('segment')\n with open(out_file.replace(\"json\", \"edus\"), 'w') as f:\n for edu in edus:\n #if edu.attributes['relname'].nodeValue != \"antithesis\":\n if len(edu.childNodes) > 0:\n f.write(edu.childNodes[0].nodeValue + '\\n')\n\n ### 2. save trees in <filename>.json #############################################################\n \n nodes = read_rst(rstfile, {})\n out_graph = []\n dep_root = options.root \n\n # Add tokens to terminal nodes\n if nodes == \"Invalid .rs3 file\":\n print(nodes)\n else:\n edus = list(nodes[nid] for nid in nodes if nodes[nid].kind == \"edu\")\n edus.sort(key=lambda x: int(x.id))\n token_reached = 0\n\n # Get each node with 'span' relation its nearest non-span relname\n for nid in nodes:\n node = nodes[nid]\n if nid == \"9\":\n pass\n new_rel = node.relname\n if node.parent == \"0\":\n new_rel = \"ROOT\"\n node.dep_rel = new_rel\n\n counter = 0\n joint_trees = []\n\n for nid in nodes:\n node = nodes[nid]\n\n if node.parent != '0' and nodes[node.parent].kind == \"span\" and (\n int(nodes[node.parent].left) - 1 == int(node.id) or int(nodes[node.parent].right) + 1 == int(node.id)):\n dummy_text = ''\n parent = nodes[node.parent]\n for node_id in range(parent.left, parent.right + 1):\n if nodes.get(str(node_id)):\n dummy_text += nodes[str(node_id)].text + \" \"\n if dummy_text:\n # print('1.', node.id, dummy_text)\n parent = copy.copy(parent)\n parent.text = dummy_text\n parent.children = []\n parent.dep_parent = '0'\n parent.dep_rel = \"ROOT\"\n node.dep_parent = parent.id\n #out_graph.append(parent)\n out_graph.append(node)\n\n elif nid != '0' and node.kind in [\"multinuc\", \"span\"]:\n if node.parent == '0' and node.kind == 'multinuc':\n dummy_text = ''\n for node_id in range(node.left, node.right + 1):\n if nodes.get(str(node_id)):\n dummy_text += nodes[str(node_id)].text + \" \"\n node.text = dummy_text\n node.children = []\n out_graph.append(node)\n\n elif node.parent == '0' and node.kind == 'span':\n dummy_text = ''\n for node_id in range(node.left, node.right + 1):\n if nodes.get(str(node_id)):\n dummy_text += nodes[str(node_id)].text + \" \"\n \n node.text = dummy_text\n node.children = []\n out_graph.append(node)\n\n elif node.parent != '0':\n dummy_text = ''\n for node_id in range(node.left, node.right + 1):\n if nodes.get(str(node_id)):\n dummy_text += nodes[str(node_id)].text + \" \"\n\n if dummy_text:\n if node.kind == \"multinuc\" and (node.left, node.right) != (\n nodes[node.parent].left, nodes[node.parent].right):\n node.dep_parent = node.parent\n node.text = dummy_text\n #print('3.', node.id, node.text)\n node.children = []\n out_graph.append(node)\n elif nodes[node.parent].kind == 'multinuc':\n node.dep_parent = node.parent\n node.text = dummy_text\n #print('4.', node.id, node.text)\n\n if node.dep_rel in ['joint_m', 'same-unit_m']:\n # if node.dep_rel == 'joint_m':\n # print('4.', node.id, node.text, node.dep_rel)\n children = nodes[node.parent].children\n if len(children) > 2 and children[0] != node.id and not children in joint_trees:\n # print('::', children)\n joint_trees.append(nodes[node.parent].children)\n\n #node.children = []\n out_graph.append(node)\n else:\n node.text = dummy_text\n # print('5.', node.id, node.text) #106\n node.dep_parent = node.parent\n node.children = []\n out_graph.append(node)\n\n elif node.kind == \"edu\":\n dep_parent = find_dep_head(nodes, nid, nid, [])\n if dep_parent is None:\n # This is the root\n node.dep_parent = \"0\"\n node.dep_rel = \"ROOT\"\n elif node.parent != '0' and nodes[node.parent].kind == 'span':\n node.dep_parent = \"0\"\n node.dep_rel = \"ROOT\"\n else:\n if node.dep_rel in ['joint_m', 'same-unit_m']:\n # if node.dep_rel == 'joint_m':\n # print('4.', node.id, node.text, node.dep_rel)\n children = nodes[node.parent].children\n if len(children) > 2 and children[0] != node.id and not children in joint_trees:\n # print('::', children)\n joint_trees.append(nodes[node.parent].children)\n node.dep_parent = node.parent\n out_graph.append(node)\n\n else:\n pass\n #print('>>>', nid, node.kind)\n\n out_graph.sort(key=lambda x: int(x.id))\n\n def get_node(id):\n return [i for i, x in enumerate(out_graph) if x.id == id][0]\n\n for joint_tree in joint_trees:\n nid = [get_node(id) for id in joint_tree]\n\n # news_44/108-115\n for i in range(len(nid)):\n if not out_graph[nid[i]].children:\n out_graph[nid[i]].children = [out_graph[nid[i]].id]\n\n for i in range(len(nid)):\n for k in range(0, len(nid)-i-1):\n if int(out_graph[nid[k]].children[0]) > int(out_graph[nid[k + 1]].children[0]):\n nid[k], nid[k + 1] = nid[k + 1], nid[k]\n \n for i in range(len(nid)-2, 1, -1):\n out_graph[nid[i]].dep_parent = out_graph[nid[i+1]].id\n out_graph[nid[i]].dep_rel = out_graph[nid[-2]].dep_rel\n out_graph[nid[i]].text = ' '.join([out_graph[nid[k]].text.strip() for k in range(i, i + 2)])\n out_graph[nid[i]].text.replace(out_graph[nid[i-1]].text, '')\n \n out_graph[nid[0]].dep_parent = out_graph[nid[-2]].id\n out_graph[nid[0]].dep_rel = out_graph[nid[-2]].dep_rel\n\n\n data = []\n\n for node in out_graph:\n data.append(node.to_row())\n\n filename = '.'.join(out_file.split('/')[-1].split('.')[:-1])\n textfile = '/'.join(rstfile.split('/')[:-1]).replace('rs3', 'txt') + '/' + filename + '.txt'\n\n with open(textfile, 'r') as f:\n text = prepare_text(f.read())\n \n df = pd.DataFrame(data, columns=['id', 'snippet', 'dep_parent', 'dep_rel', 'kind']).set_index('id')\n new_df = get_pairs(df, text)\n new_df['filename'] = filename\n new_df.to_json(out_file)\n" ]
[ [ "pandas.DataFrame", "pandas.concat" ] ]
NatalieP-J/spectralspace
[ "66c53878febaf6aee919cfc86c34b5dc21465343" ]
[ "spectralspace/examples/psm.py" ]
[ "\nimport numpy as np\n\n## Polynomial Model from Yuan-Sen Ting (Rix+ 2017) ##\n\ndatadir = '/geir_data/scr/price-jones/Code/synspec/data/'\n\npsminfo = np.load('{0}/kurucz_quadratic_psm.npz'.format(datadir))\ncoeff_array = psminfo['coeff_array']\n\n# a set of training labels\ntraining_labels = psminfo['training_labels']\nwavelength = psminfo['wavelength']\n\n# auxiliary arrays to reconstruct the spectrum (because we need to choose a reference point to \"Taylor-expand\"\ninds = psminfo['indices']\nreference_flux = psminfo['reference_flux']\nreference_point = psminfo['reference_point']\nTeff,logg,vturb,ch,nh,oh,nah,mgh,alh,sih,sh,kh,cah,tih,vh,mnh,nih,feh,c12c13 = reference_point\n\n#LABEL ORDER Teff [1000K], logg, vturb [km/s] (micro), ch, nh, oh, nah, mgh, alh, sih, sh, kh, cah, \n#tih, vh, mnh, nih, feh, log10(c12c13)\n\n#==================================================\n# make generate APOGEE spectrum\ndef generate_spectrum(labels=None,Teff=Teff,logg=logg,vturb=vturb,ch=ch,nh=nh,\n oh=oh,nah=nah,mgh=mgh,alh=alh,sih=sih,sh=sh,kh=kh,\n cah=cah,tih=tih,vh=vh,mnh=mnh,nih=nih,feh=feh,\n c12c13=c12c13,order=2):\n if not isinstance(labels,(list,np.ndarray)):\n labels = np.array([Teff,logg,vturb,ch,nh,oh,nah,mgh,alh,sih,sh,kh,cah,\n tih,vh,mnh,nih,feh,c12c13])\n \n # make quadratic labels\n linear_terms = np.array(labels) - reference_point\n if order == 1:\n lvec = np.hstack((linear_terms))\n # generate spectrum \n lin_coeff = coeff_array.T[:len(linear_terms)].T\n spec_generate = np.dot(lin_coeff,lvec) + reference_flux\n if order == 1.5:\n linind = 19\n t = linear_terms[0]\n g = linear_terms[1]\n f = linear_terms[17]\n fit_terms = np.array([t**2,t*g,t*f,g**2,g*f,f**2])\n lvec = np.hstack((linear_terms,fit_terms))\n coeffs = np.array([coeff_array[:,0+linind],coeff_array[:,1+linind],\n coeff_array[:,17+linind],\n coeff_array[:,19+linind],\n coeff_array[:,35+linind],\n coeff_array[:,187+linind]])\n coeffs = np.concatenate((coeff_array.T[:len(linear_terms)],\n coeffs)).T\n spec_generate = np.dot(coeffs,lvec) + reference_flux\n if order == 2:\n quadratic_terms = np.einsum('i,j->ij',linear_terms,\n linear_terms)[inds[:,0],inds[:,1]]\n lvec = np.hstack((linear_terms, quadratic_terms))\n # generate spectrum \n spec_generate = np.dot(coeff_array,lvec) + reference_flux\n \n return spec_generate\n\nlinind = 19\nlin_coeff = coeff_array.T[:linind].T\nquad_coeff = np.array([coeff_array[:,0+linind],coeff_array[:,19+linind],\n coeff_array[:,37+linind],coeff_array[:,54+linind],\n coeff_array[:,70+linind],coeff_array[:,85+linind],\n coeff_array[:,99+linind],coeff_array[:,112+linind],\n coeff_array[:,124+linind],coeff_array[:,135+linind],\n coeff_array[:,145+linind],coeff_array[:,154+linind],\n coeff_array[:,162+linind],coeff_array[:,169+linind],\n coeff_array[:,175+linind],coeff_array[:,180+linind],\n coeff_array[:,184+linind],coeff_array[:,187+linind],\n coeff_array[:,189+linind]]).T\n\ncross_inds = {0:np.arange(1,19)+linind, #Teff\n 1:np.append(np.arange(20,37),[1])+linind,#logg\n 2:np.append(np.arange(38,54),[2,20])+linind, #vturb\n 3:np.append(np.arange(55,70),[3,21,38])+linind, #ch\n 4:np.append(np.arange(71,85),[4,22,39,55])+linind, #nh\n 5:np.append(np.arange(86,99),[5,23,40,56,71])+linind, #oh\n 6:np.append(np.arange(100,112),[6,24,41,57,72,86])+linind, #nah\n 7:np.append(np.arange(113,124),[7,25,42,58,73,87,100])+linind, #mgh\n 8:np.append(np.arange(125,135),[8,26,43,59,74,88,101,113])+linind, #alh\n 9:np.append(np.arange(136,145),[9,27,44,60,75,89,102,114,125])+linind, #sih\n 10:np.append(np.arange(146,154),[10,28,45,61,76,90,103,115,126,136])+linind, #sh\n 11:np.append(np.arange(155,162),[11,29,46,62,77,91,104,116,127,137,146])+linind, #kh\n 12:np.append(np.arange(163,169),[12,30,47,63,78,92,105,117,128,138,147,155])+linind, #cah\n 13:np.append(np.arange(170,175),[13,31,48,64,79,93,106,118,129,139,148,156,163])+linind, # tih\n 14:np.append(np.arange(176,180),[14,32,49,65,80,94,107,119,130,140,149,157,164,170])+linind, #vh\n 15:np.append(np.arange(181,184),[15,33,50,66,81,95,108,118,131,141,150,158,165,171,176])+linind, #mnh\n 16:np.append(np.arange(185,187),[16,34,51,67,82,96,109,119,132,142,151,159,166,172,177,181])+linind, #nih\n 17:np.append(np.arange(188,189),[17,35,52,68,83,97,110,120,133,143,152,160,167,173,178,182,185])+linind, #feh\n 18:np.array([18,36,53,69,84,98,111,121,134,144,153,161,168,174,179,183,186,188])+linind #c12c13\n }\n\ndef new_reference(labels=None,Teff=Teff,logg=logg,vturb=vturb,ch=ch,nh=nh,\n oh=oh,nah=nah,mgh=mgh,alh=alh,sih=sih,sh=sh,kh=kh,\n cah=cah,tih=tih,vh=vh,mnh=mnh,nih=nih,feh=feh,\n c12c13=c12c13,order=2,newref=reference_point):\n if not isinstance(labels,(list,np.ndarray)):\n labels = np.array([Teff,logg,vturb,ch,nh,oh,nah,mgh,alh,sih,sh,kh,cah,\n tih,vh,mnh,nih,feh,c12c13])\n refdiff = newref-reference_point\n newconst = generate_spectrum(labels=newref)\n linear_terms = np.array(labels) - newref\n quadratic_terms = np.einsum('i,j->ij',linear_terms,\n linear_terms)[inds[:,0],inds[:,1]]\n spec = newconst\n if order == 1:\n for l in range(linind):\n coeff = lin_coeff[:,l] + 2*refdiff[l]*quad_coeff[:,2]\n for c in range(linind-1):\n coeff += -coeff_array[:,cross_inds[l][c]]*refdiff[c]\n spec += linear_terms[l]*(coeff)\n\n if order == 1.5:\n for l in range(linind):\n coeff = lin_coeff[:,l] + 2*refdiff[l]*quad_coeff[:,2]\n for c in range(linind-1):\n coeff += -coeff_array[:,cross_inds[l][c]]*refdiff[c]\n spec += linear_terms[l]*(coeff)\n t = linear_terms[0]\n g = linear_terms[1]\n f = linear_terms[17]\n fit_terms = np.array([t**2,t*g,t*f,g**2,g*f,f**2])\n lvec = np.hstack((fit_terms))\n coeffs = np.array([coeff_array[:,0+linind],coeff_array[:,1+linind],\n coeff_array[:,17+linind],\n coeff_array[:,19+linind],\n coeff_array[:,35+linind],\n coeff_array[:,187+linind]]).T\n \n spec += np.dot(coeffs,lvec)\n \n if order == 2:\n for l in range(linind):\n coeff = lin_coeff[:,l] + 2*refdiff[l]*quad_coeff[:,2]\n for c in range(linind-1):\n coeff += -coeff_array[:,cross_inds[l][c]]*refdiff[c]\n spec += linear_terms[l]*coeff\n \n lvec = np.hstack((quadratic_terms))\n # generate spectrum \n spec += np.dot(coeff_array.T[linind:].T,lvec)\n \n return spec\n" ]
[ [ "numpy.array", "numpy.dot", "numpy.arange", "numpy.einsum", "numpy.hstack" ] ]
MTandHJ/PyTorch-Robust
[ "3f046fce515a7ed66ab34079329cd3496ca5087c" ]
[ "AVmixup/src/loadopts.py" ]
[ "\nfrom typing import Callable, Tuple\nimport numpy as np\nimport torch\nimport torchvision\nimport foolbox as fb\n\nimport time\nfrom tqdm import tqdm\n\n\nfrom models.base import AdversarialDefensiveModule\nfrom .base import AdversaryForValid\nfrom .config import *\nfrom .utils import getLogger, mkdirs\n\n\n\nclass ModelNotDefineError(Exception): pass\nclass LossNotDefineError(Exception): pass\nclass OptimNotIncludeError(Exception): pass\nclass AttackNotIncludeError(Exception): pass\nclass DatasetNotIncludeError(Exception): pass\n\n\n# return the num_classes of corresponding data set\ndef get_num_classes(dataset_type: str) -> int:\n if dataset_type in ('mnist', 'fashionmnist', 'svhn', 'cifar10'):\n return 10\n elif dataset_type in ('cifar100', ):\n return 100\n elif dataset_type in ('tinyimagenet', ):\n return 200\n else:\n raise DatasetNotIncludeError(\"Dataset {0} is not included.\" \\\n \"Refer to the following: {1}\".format(dataset_type, _dataset.__doc__))\n\n\ndef load_model(model_type: str) -> Callable[..., torch.nn.Module]:\n \"\"\"\n mnist: the model designed for MNIST dataset\n cifar: the model designed for CIFAR dataset\n resnet8|20|32|44|110|1202\n resnet18|34|50|101|50_32x4d\n preactresnet18|34|50|101\n wrn_28_10: depth-28, width-10\n wrn_34_10: depth-34, width-10\n wrn_34_20: depth-34, width-20\n \"\"\"\n resnets = ['resnet8', 'resnet20', 'resnet32', 'resnet44', \n 'resnet56', 'resnet110', 'resnet1202']\n srns = ['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnext50_32x4d']\n prns = ['preactresnet18', 'preactresnet34', 'preactresnet50', 'preactresnet101']\n wrns = ['wrn_28_10', 'wrn_34_10', 'wrn_34_20']\n\n model: Callable[..., AdversarialDefensiveModule]\n if model_type == \"mnist\":\n from models.mnist import MNIST\n model = MNIST\n elif model_type == \"cifar\":\n from models.cifar import CIFAR\n model = CIFAR\n elif model_type in resnets:\n import models.resnet as resnet\n model = getattr(resnet, model_type)\n elif model_type in srns:\n import models.cifar_resnet as srn\n model = getattr(srn, model_type)\n elif model_type in prns:\n import models.preactresnet as prn\n model = getattr(prn, model_type)\n elif model_type in wrns:\n import models.wide_resnet as wrn\n model = getattr(wrn, model_type)\n else:\n raise ModelNotDefineError(f\"model {model_type} is not defined.\\n\" \\\n f\"Refer to the following: {load_model.__doc__}\\n\")\n return model\n\n\ndef load_loss_func(loss_type: str) -> Callable:\n \"\"\"\n cross_entropy: the cross entropy loss with logits\n cross_entropy_softmax: the cross entropy loss with probs\n kl_loss: kl divergence\n mse_loss: MSE\n \"\"\"\n loss_func: Callable[..., torch.Tensor]\n if loss_type == \"cross_entropy\":\n from .loss_zoo import cross_entropy\n loss_func = cross_entropy\n elif loss_type == \"cross_entropy_softmax\":\n from .loss_zoo import cross_entropy_softmax\n loss_func = cross_entropy_softmax\n elif loss_type == \"kl_loss\":\n from .loss_zoo import kl_divergence\n loss_func = kl_divergence\n elif loss_type == \"mse_loss\":\n from .loss_zoo import mse_loss\n loss_func = mse_loss\n else:\n raise LossNotDefineError(f\"Loss {loss_type} is not defined.\\n\" \\\n f\"Refer to the following: {load_loss_func.__doc__}\")\n return loss_func\n\n\ndef _dataset(\n dataset_type: str, \n train: bool = True\n) -> torch.utils.data.Dataset:\n \"\"\"\n Dataset:\n mnist: MNIST\n fashionmnist: FashionMNIST\n svhn: SVHN\n cifar10: CIFAR-10\n cifar100: CIFAR-100\n tinyimagenet: Tiny ImageNet 200\n \"\"\"\n if dataset_type == \"mnist\":\n dataset = torchvision.datasets.MNIST(\n root=ROOT, train=train, download=DOWNLOAD\n )\n elif dataset_type == \"fashionmnist\":\n dataset = torchvision.datasets.FashionMNIST(\n root=ROOT, train=train, download=DOWNLOAD\n )\n elif dataset_type == \"svhn\":\n split = 'train' if train else 'test'\n dataset = torchvision.datasets.SVHN(\n root=ROOT, split=split, download=DOWNLOAD\n )\n elif dataset_type == \"cifar10\":\n dataset = torchvision.datasets.CIFAR10(\n root=ROOT, train=train, download=DOWNLOAD\n )\n elif dataset_type == \"cifar100\":\n dataset = torchvision.datasets.CIFAR100(\n root=ROOT, train=train, download=DOWNLOAD\n )\n elif dataset_type == \"tinyimagenet\":\n from src.datasets import TinyImageNet\n split = 'train' if train else 'val'\n dataset = TinyImageNet(root=ROOT, split=split)\n else:\n raise DatasetNotIncludeError(\"Dataset {0} is not included.\" \\\n \"Refer to the following: {1}\".format(dataset_type, _dataset.__doc__))\n \n return dataset\n\n\ndef load_normalizer(dataset_type: str, ndim: int = 3) -> Tuple[torch.Tensor]:\n size = (-1,) + (1,) * (ndim - 1)\n mean = MEANS[dataset_type]\n std = STDS[dataset_type]\n mean = torch.tensor(mean).view(size)\n std = torch.tensor(std).view(size)\n return mean, std\n\n\ndef _split_dataset(\n dataset: torch.utils.data.Dataset,\n ratio: float = .1, seed: int = VALIDSEED,\n shuffle: bool = True\n) -> Tuple[torch.utils.data.Dataset]:\n from torch.utils.data import Subset\n datasize = len(dataset)\n indices = list(range(datasize))\n if shuffle:\n np.random.seed(seed)\n np.random.shuffle(indices)\n validsize = int(ratio * datasize)\n getLogger().info(f\"[Dataset] Split the dataset into trainset({datasize-validsize}) and validset({validsize}) ...\")\n train_indices, valid_indices = indices[validsize:], indices[:validsize]\n trainset = Subset(dataset, train_indices)\n validset = Subset(dataset, valid_indices)\n return trainset, validset\n\ndef load_dataset(\n dataset_type: str, \n transforms: str ='default', \n ratio: float = 0.1,\n seed: int = VALIDSEED,\n shuffle: bool = True,\n train: bool = True\n) -> torch.utils.data.Dataset:\n from .datasets import WrapperSet\n dataset = _dataset(dataset_type, train)\n if train:\n transforms = TRANSFORMS[dataset_type] if transforms == 'default' else transforms\n getLogger().info(f\"[Dataset] Apply transforms of '{transforms}' to trainset ...\")\n trainset, validset = _split_dataset(dataset, ratio, seed, shuffle)\n trainset = WrapperSet(trainset, transforms=transforms)\n validset = WrapperSet(validset, transforms=TRANSFORMS['validation'])\n return trainset, validset\n else:\n getLogger().info(f\"[Dataset] Apply transforms of '{transforms}' to testset ...\")\n testset = WrapperSet(dataset, transforms=transforms)\n return testset\n\n\nclass _TQDMDataLoader(torch.utils.data.DataLoader):\n def __iter__(self):\n return iter(\n tqdm(\n super(_TQDMDataLoader, self).__iter__(), \n leave=False, desc=\"վ'ᴗ' ի-\"\n )\n )\n\ndef load_dataloader(\n dataset: torch.utils.data.Dataset, \n batch_size: int, \n train: bool = True, \n show_progress: bool = False\n) -> torch.utils.data.DataLoader:\n\n dataloader = _TQDMDataLoader if show_progress else torch.utils.data.DataLoader\n if train:\n loader = dataloader(\n dataset, batch_size=batch_size, shuffle=True,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY\n )\n else:\n loader = dataloader(\n dataset, batch_size=batch_size, shuffle=False,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY\n )\n return loader\n\n\ndef load_optimizer(\n model: torch.nn.Module, \n optim_type: str, *,\n lr: float = 0.1, momentum: float = 0.9,\n betas: Tuple[float, float] = (0.9, 0.999),\n weight_decay: float = 1e-4,\n nesterov: bool = False,\n **kwargs: \"other hyper-parameters for optimizer\"\n) -> torch.optim.Optimizer:\n \"\"\"\n sgd: SGD\n adam: Adam\n \"\"\"\n try:\n cfg = OPTIMS[optim_type]\n except KeyError:\n raise OptimNotIncludeError(f\"Optim {optim_type} is not included.\\n\" \\\n f\"Refer to the following: {load_optimizer.__doc__}\")\n \n kwargs.update(lr=lr, momentum=momentum, betas=betas, \n weight_decay=weight_decay, nesterov=nesterov)\n \n cfg.update(**kwargs) # update the kwargs needed automatically\n logger = getLogger()\n logger.info(cfg)\n if optim_type == \"sgd\":\n optim = torch.optim.SGD(model.parameters(), **cfg)\n elif optim_type == \"adam\":\n optim = torch.optim.Adam(model.parameters(), **cfg)\n\n return optim\n\n\ndef load_learning_policy(\n optimizer: torch.optim.Optimizer,\n learning_policy_type: str,\n **kwargs: \"other hyper-parameters for learning scheduler\"\n) -> \"learning policy\":\n \"\"\"\n default: (100, 105), 110 epochs suggested\n null:\n STD: (82, 123), 164 epochs suggested\n STD-wrn: (60, 120, 160), 200 epochs suggested\n AT: (102, 154), 200 epochs suggested\n TRADES: (75, 90, 100), 76 epochs suggested\n TRADES-M: (55, 75, 90), 100 epochs suggested\n cosine: CosineAnnealingLR, kwargs: T_max, eta_min, last_epoch\n \"\"\"\n try:\n learning_policy_ = LEARNING_POLICY[learning_policy_type]\n except KeyError:\n raise NotImplementedError(f\"Learning_policy {learning_policy_type} is not defined.\\n\" \\\n f\"Refer to the following: {load_learning_policy.__doc__}\")\n\n lp_type = learning_policy_[0]\n lp_cfg = learning_policy_[1]\n lp_cfg.update(**kwargs) # update the kwargs needed automatically\n logger = getLogger()\n logger.info(f\"{lp_cfg} {lp_type}\")\n learning_policy = getattr(\n torch.optim.lr_scheduler, \n lp_type\n )(optimizer, **lp_cfg)\n \n return learning_policy\n\n\ndef load_fb_attack(attack_type: str, steps: int, stepsize: float) -> fb.attacks.Attack:\n \"\"\"\n pgd-linf: \\ell_{\\infty} rel_stepsize=stepsize, steps=steps;\n pgd-l1: \\ell_1 version;\n pgd-l2: \\ell_2 version;\n fgsm: no hyper-parameters;\n cw-l2: stepsize=stepsize, steps=steps;\n ead: initial_stepsize=stepsize, steps=steps;\n slide: \\ell_1 attack, rel_stepsize=stepsize, steps=steps;\n deepfool-linf: \\ell_{\\infty} version, overshoot=stepsize, steps=steps;\n deepfool-l2: \\ell_2 version;\n bba-inf: \\ell_{infty} version, lr=stepsize, steps=steps, overshott=1.1;\n bba-l1: \\ell_1 version;\n bba-l2: \\ell_2 version\n \"\"\"\n attack: fb.attacks.Attack\n if attack_type == \"pgd-linf\":\n attack = fb.attacks.LinfPGD(\n rel_stepsize=stepsize,\n steps=steps\n )\n elif attack_type == \"pgd-l2\":\n attack = fb.attacks.L2PGD(\n rel_stepsize=stepsize,\n steps=steps\n )\n elif attack_type == \"pgd-l1\":\n attack = fb.attacks.L1PGD(\n rel_stepsize=stepsize,\n steps=steps\n )\n elif attack_type == \"fgsm\":\n attack = fb.attacks.LinfFastGradientAttack(\n random_start=False\n )\n elif attack_type == \"cw-l2\":\n attack = fb.attacks.L2CarliniWagnerAttack(\n stepsize=stepsize,\n steps=steps\n )\n elif attack_type == \"ead\":\n attack = fb.attacks.EADAttack(\n initial_stepsize=stepsize,\n steps=steps\n )\n elif attack_type == \"slide\":\n attack = fb.attacks.SparseL1DescentAttack(\n rel_stepsize=stepsize,\n steps=steps\n )\n elif attack_type == \"deepfool-linf\":\n attack = fb.attacks.LinfDeepFoolAttack(\n overshoot=stepsize,\n steps=steps\n )\n elif attack_type == \"deepfool-l2\":\n attack = fb.attacks.L2DeepFoolAttack(\n overshoot=stepsize,\n steps=steps\n )\n elif attack_type == \"bba-linf\":\n attack = fb.attacks.LinfinityBrendelBethgeAttack(\n lr=stepsize,\n steps=steps\n )\n elif attack_type == \"bba-l2\":\n attack = fb.attacks.L2BrendelBethgeAttack(\n lr=stepsize,\n steps=steps\n )\n elif attack_type == \"bba-l1\":\n attack = fb.attacks.L1BrendelBethgeAttack(\n lr=stepsize,\n steps=steps\n )\n else:\n raise AttackNotIncludeError(f\"Attack {attack_type} is not included.\\n\" \\\n f\"Refer to the following: {load_fb_attack.__doc__}\")\n return attack\n\n\ndef load_attack(\n attack_type: str, epsilon: float, \n steps: int, stepsize: float,\n random_start: bool = True, bounds: Tuple[float] = BOUNDS\n) -> Callable:\n '''\n pgd-linf: \\ell_{\\infty};\n pgd-l2: \\ell_2 version;\n pgd-linf-kl: \\ell_{infty} with kl divergence\n pgd-l2l-kl: \\ell_2 with kl divergence\n '''\n if attack_type == 'pgd-linf':\n from .attacks import LinfPGD\n attack = LinfPGD\n elif attack_type == 'pgd-l2':\n from .attacks import L2PGD\n attack = L2PGD\n elif attack_type == 'pgd-linf-kl':\n from .attacks import LinfPGDKLdiv\n attack = LinfPGDKLdiv\n elif attack_type == 'pgd-l2-kl':\n from .attacks import L2PGDKLdiv\n attack = L2PGDKLdiv\n else:\n raise AttackNotIncludeError(f\"Attack {attack_type} is not included.\\n\" \\\n f\"Refer to the following: {load_attack.__doc__}\")\n attack = attack(\n epsilon=epsilon, steps=steps, stepsize=stepsize,\n random_start=random_start, bounds=bounds\n )\n return attack\n\n\ndef load_valider(\n model: torch.nn.Module, dataset_type: str, device: torch.device = DEVICE,\n) -> AdversaryForValid:\n cfg = VALIDER[dataset_type]\n attack = load_attack(**cfg)\n valider = AdversaryForValid(\n model=model, attacker=attack, device=device\n )\n return valider\n\n\ndef generate_path(\n method: str, dataset_type: str, model:str, description: str\n) -> Tuple[str, str]:\n info_path = INFO_PATH.format(\n method=method,\n dataset=dataset_type,\n model=model,\n description=description\n )\n log_path = LOG_PATH.format(\n method=method,\n dataset=dataset_type,\n model=model,\n description=description,\n time=time.strftime(TIMEFMT)\n )\n mkdirs(info_path, log_path)\n return info_path, log_path\n\n" ]
[ [ "torch.utils.data.Subset", "torch.tensor", "numpy.random.shuffle", "numpy.random.seed" ] ]
markmelnic/Carsen-Crawler
[ "6f5a7b7b7ba7cdad04c0dd30a53d17239dda6965" ]
[ "crawler/visualizer.py" ]
[ "import time\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\n# generate the live graph\ndef live_graph(crawler):\n plt.style.use(\"dark_background\")\n fig, (links_plot, perf_plot) = plt.subplots(2)\n fig.canvas.set_window_title(\"Crawler Activity Visualizer\")\n\n # timestamps = []\n # try:\n # timestamps.append(time.time() - timestamps[-1])\n # except IndexError:\n # timestamps.append(time.time())\n\n # performance plot data\n crawler.interval_processed = []\n\n # al - active links\n # pl - processed links\n # lu - listings rewrite_table_values\n crawler.al_history = []\n crawler.pl_history = []\n crawler.lu_history = []\n\n def animate(i):\n # links plot\n crawler.limit_size(crawler.al_history, len(crawler.active_links))\n crawler.limit_size(crawler.pl_history, len(crawler.processed_links))\n crawler.limit_size(crawler.lu_history, len(crawler.listings_links))\n\n links_plot.clear()\n links_plot.plot(\n crawler.pl_history,\n crawler.al_history,\n label=\"Active links\",\n color=\"#f4a261\",\n )\n links_plot.plot(\n crawler.pl_history,\n crawler.lu_history,\n label=\"Nr. of listings\",\n color=\"#2a9d8f\",\n )\n links_plot.set_title(\"\")\n links_plot.set_xlabel(\"Processed links\")\n links_plot.set_ylabel(\"Number of urls\")\n links_plot.legend()\n\n # performance plot\n try:\n crawler.limit_size(\n crawler.interval_processed,\n crawler.pl_history[-1] - crawler.pl_history[-2],\n )\n except IndexError:\n crawler.limit_size(crawler.interval_processed, 0)\n perf_plot.clear()\n perf_plot.plot(\n crawler.pl_history,\n crawler.interval_processed,\n label=\"Interval\",\n color=\"#e9c46a\",\n )\n perf_plot.set_title(\"Crawler performance\")\n perf_plot.set_xlabel(\"Number of processed links\")\n perf_plot.set_ylabel(\"Processed per iterations\")\n perf_plot.legend()\n\n anim = animation.FuncAnimation(fig, animate, interval=1000)\n plt.show()\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.subplots", "matplotlib.animation.FuncAnimation", "matplotlib.pyplot.style.use" ] ]
Joanna0123/QueryInst
[ "6f75240610439e92bca5398054e3f7adc37bfd53" ]
[ "mmdet/models/roi_heads/track_roi_head.py" ]
[ "import torch\nimport numpy as np\n\nfrom mmdet.core import bbox2result, bbox2roi, bbox_xyxy_to_cxcywh, bbox_flip\nfrom mmdet.core.bbox.samplers import PseudoSampler\nfrom ..builder import HEADS\nfrom .cascade_roi_head import CascadeRoIHead\n\nfrom mmcv.ops.nms import batched_nms\n\ndef mask2results(mask_preds, det_labels, num_classes):\n cls_segms = [[] for _ in range(num_classes)]\n for i in range(mask_preds.shape[0]):\n cls_segms[det_labels[i]].append(mask_preds[i])\n return cls_segms\n\n@HEADS.register_module()\nclass QueryRoIHead(CascadeRoIHead):\n r\"\"\"\n\n Args:\n num_stages (int): Number of stage whole iterative process.\n Defaults to 6.\n stage_loss_weights (Tuple[float]): The loss\n weight of each stage. By default all stages have\n the same weight 1.\n bbox_roi_extractor (dict): Config of box roi extractor.\n bbox_head (dict): Config of box head.\n train_cfg (dict, optional): Configuration information in train stage.\n Defaults to None.\n test_cfg (dict, optional): Configuration information in test stage.\n Defaults to None.\n pretrained (str, optional): model pretrained path. Default: None\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n\n \"\"\"\n\n def __init__(self,\n num_stages=6,\n stage_loss_weights=(1, 1, 1, 1, 1, 1),\n proposal_feature_channel=256,\n bbox_roi_extractor=dict(\n type='SingleRoIExtractor',\n roi_layer=dict(\n type='RoIAlign', output_size=7, sampling_ratio=2),\n out_channels=256,\n featmap_strides=[4, 8, 16, 32]),\n mask_roi_extractor=dict(\n type='SingleRoIExtractor',\n roi_layer=dict(\n type='RoIAlign', output_size=14, sampling_ratio=2),\n out_channels=256,\n featmap_strides=[4, 8, 16, 32]),\n track_roi_extractor=dict(\n type='SingleRoIExtractor',\n roi_layer=dict(\n type='RoIAlign', output_size=14, sampling_ratio=2),\n out_channels=256,\n featmap_strides=[4, 8, 16, 32]),\n bbox_head=dict(\n type='DIIHead',\n num_classes=80,\n num_fcs=2,\n num_heads=8,\n num_cls_fcs=1,\n num_reg_fcs=3,\n feedforward_channels=2048,\n hidden_channels=256,\n dropout=0.0,\n roi_feat_size=7,\n ffn_act_cfg=dict(type='ReLU', inplace=True)),\n mask_head=dict(\n type='DynamicMaskHead',\n dynamic_conv_cfg=dict(\n type='DynamicConv',\n in_channels=256,\n feat_channels=64,\n out_channels=256,\n input_feat_shape=14,\n with_proj=False,\n act_cfg=dict(type='ReLU', inplace=True),\n norm_cfg=dict(type='LN')),\n dropout=0.0,\n num_convs=4,\n roi_feat_size=14,\n in_channels=256,\n conv_kernel_size=3,\n conv_out_channels=256,\n class_agnostic=False,\n norm_cfg=dict(type='BN'),\n upsample_cfg=dict(type='deconv', scale_factor=2),\n loss_dice=dict(type='DiceLoss', loss_weight=8.0)),\n track_head=dict(\n type='TrackHead',\n\n ),\n train_cfg=None,\n test_cfg=None,\n pretrained=None,\n init_cfg=None):\n assert bbox_roi_extractor is not None\n assert mask_roi_extractor is not None\n assert track_roi_extractor is not None\n assert bbox_head is not None\n assert mask_head is not None\n assert track_head is not None\n assert len(stage_loss_weights) == num_stages\n self.num_stages = num_stages\n self.stage_loss_weights = stage_loss_weights\n self.proposal_feature_channel = proposal_feature_channel\n super(QueryRoIHead, self).__init__(\n num_stages,\n stage_loss_weights,\n bbox_roi_extractor=bbox_roi_extractor,\n mask_roi_extractor=mask_roi_extractor,\n track_roi_extractor=track_roi_extractor,\n bbox_head=bbox_head,\n mask_head=mask_head,\n track_head=track_head,\n train_cfg=train_cfg,\n test_cfg=test_cfg,\n pretrained=pretrained,\n init_cfg=init_cfg)\n # train_cfg would be None when run the test.py\n if train_cfg is not None:\n for stage in range(num_stages):\n assert isinstance(self.bbox_sampler[stage], PseudoSampler), \\\n 'QueryInst only support `PseudoSampler`'\n\n def _bbox_forward(self, stage, x, rois, object_feats, img_metas):\n \"\"\"Box head forward function used in both training and testing. Returns\n all regression, classification results and a intermediate feature.\n\n Args:\n stage (int): The index of current stage in\n iterative process.\n x (List[Tensor]): List of FPN features\n rois (Tensor): Rois in total batch. With shape (num_proposal, 5).\n the last dimension 5 represents (img_index, x1, y1, x2, y2).\n object_feats (Tensor): The object feature extracted from\n the previous stage.\n img_metas (dict): meta information of images.\n\n Returns:\n dict[str, Tensor]: a dictionary of bbox head outputs,\n Containing the following results:\n\n - cls_score (Tensor): The score of each class, has\n shape (batch_size, num_proposals, num_classes)\n when use focal loss or\n (batch_size, num_proposals, num_classes+1)\n otherwise.\n - decode_bbox_pred (Tensor): The regression results\n with shape (batch_size, num_proposal, 4).\n The last dimension 4 represents\n [tl_x, tl_y, br_x, br_y].\n - object_feats (Tensor): The object feature extracted\n from current stage\n - detach_cls_score_list (list[Tensor]): The detached\n classification results, length is batch_size, and\n each tensor has shape (num_proposal, num_classes).\n - detach_proposal_list (list[tensor]): The detached\n regression results, length is batch_size, and each\n tensor has shape (num_proposal, 4). The last\n dimension 4 represents [tl_x, tl_y, br_x, br_y].\n \"\"\"\n num_imgs = len(img_metas)\n bbox_roi_extractor = self.bbox_roi_extractor[stage]\n bbox_head = self.bbox_head[stage]\n bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],\n rois)\n cls_score, bbox_pred, object_feats, attn_feats = bbox_head(bbox_feats,\n object_feats)\n proposal_list = self.bbox_head[stage].refine_bboxes(\n rois,\n rois.new_zeros(len(rois)), # dummy arg\n bbox_pred.view(-1, bbox_pred.size(-1)),\n [rois.new_zeros(object_feats.size(1)) for _ in range(num_imgs)],\n img_metas)\n bbox_results = dict(\n cls_score=cls_score,\n decode_bbox_pred=torch.cat(proposal_list),\n object_feats=object_feats,\n attn_feats=attn_feats,\n # detach then use it in label assign\n detach_cls_score_list=[\n cls_score[i].detach() for i in range(num_imgs)\n ],\n detach_proposal_list=[item.detach() for item in proposal_list])\n\n return bbox_results\n\n def _mask_forward(self, stage, x, rois, attn_feats):\n mask_roi_extractor = self.mask_roi_extractor[stage]\n mask_head = self.mask_head[stage]\n mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs],\n rois)\n # do not support caffe_c4 model anymore\n mask_pred = mask_head(mask_feats, attn_feats)\n\n mask_results = dict(mask_pred=mask_pred)\n return mask_results\n\n def _mask_forward_train(self, stage, x, attn_feats, sampling_results, gt_masks, rcnn_train_cfg):\n\n if sum([len(gt_mask) for gt_mask in gt_masks])==0:\n print('Ground Truth Not Found!')\n loss_mask = sum([_.sum() for _ in self.mask_head[stage].parameters()]) * 0.\n return dict(loss_mask=loss_mask)\n pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])\n attn_feats = torch.cat([feats[res.pos_inds] for (feats, res) in zip(attn_feats, sampling_results)])\n mask_results = self._mask_forward(stage, x, pos_rois, attn_feats)\n\n mask_targets = self.mask_head[stage].get_targets(\n sampling_results, gt_masks, rcnn_train_cfg)\n\n pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])\n\n loss_mask = self.mask_head[stage].loss(mask_results['mask_pred'],\n mask_targets, pos_labels)\n mask_results.update(loss_mask)\n return mask_results\n \n def _track_forward_train(self,stage,x,rois,object_feats,attn_feats):\n\n track_roi_extractor = self.mask_roi_extractor[stage]\n \n track_feats = self.track_roi_extractor(x[:track_roi_extractor.num_inputs],rois)\n inst_emb = self.track_head[stage].get_emb(track_feats,attn_feats)\n match_score = self.track_head.score(inst_emb)\n loss_match = self.track_head.loss(match_score)\n\n return loss_match\n \n def forward_train(self,\n x,\n proposal_boxes,\n proposal_features,\n img_metas,\n gt_bboxes,\n gt_labels,\n gt_bboxes_ignore=None,\n imgs_whwh=None,\n gt_masks=None):\n \"\"\"Forward function in training stage.\n\n Args:\n x (list[Tensor]): list of multi-level img features.\n proposals (Tensor): Decoded proposal bboxes, has shape\n (batch_size, num_proposals, 4)\n proposal_features (Tensor): Expanded proposal\n features, has shape\n (batch_size, num_proposals, proposal_feature_channel)\n img_metas (list[dict]): list of image info dict where\n each dict has: 'img_shape', 'scale_factor', 'flip',\n and may also contain 'filename', 'ori_shape',\n 'pad_shape', and 'img_norm_cfg'. For details on the\n values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n imgs_whwh (Tensor): Tensor with shape (batch_size, 4),\n the dimension means\n [img_width,img_height, img_width, img_height].\n gt_masks (None | Tensor) : true segmentation masks for each box\n used if the architecture supports a segmentation task.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components of all stage.\n \"\"\"\n\n num_imgs = len(img_metas)\n num_proposals = proposal_boxes.size(1)\n imgs_whwh = imgs_whwh.repeat(1, num_proposals, 1)\n all_stage_bbox_results = []\n proposal_list = [proposal_boxes[i] for i in range(len(proposal_boxes))]\n object_feats = proposal_features\n all_stage_loss = {}\n for stage in range(self.num_stages):\n rois = bbox2roi(proposal_list)\n bbox_results = self._bbox_forward(stage, x, rois, object_feats,\n img_metas)\n all_stage_bbox_results.append(bbox_results)\n \n if gt_bboxes_ignore is None:\n # TODO support ignore\n gt_bboxes_ignore = [None for _ in range(num_imgs)]\n sampling_results = []\n cls_pred_list = bbox_results['detach_cls_score_list']\n proposal_list = bbox_results['detach_proposal_list']\n for i in range(num_imgs):\n normolize_bbox_ccwh = bbox_xyxy_to_cxcywh(proposal_list[i] /\n imgs_whwh[i])\n assign_result = self.bbox_assigner[stage].assign(\n normolize_bbox_ccwh, cls_pred_list[i], gt_bboxes[i],\n gt_labels[i], img_metas[i])\n sampling_result = self.bbox_sampler[stage].sample(\n assign_result, proposal_list[i], gt_bboxes[i])\n sampling_results.append(sampling_result)\n bbox_targets = self.bbox_head[stage].get_targets(\n sampling_results, gt_bboxes, gt_labels, self.train_cfg[stage],\n True)\n cls_score = bbox_results['cls_score']\n decode_bbox_pred = bbox_results['decode_bbox_pred']\n object_feats = bbox_results['object_feats']\n\n single_stage_loss = self.bbox_head[stage].loss(\n cls_score.view(-1, cls_score.size(-1)),\n decode_bbox_pred.view(-1, 4),\n *bbox_targets,\n imgs_whwh=imgs_whwh)\n\n if self.with_mask:\n mask_results = self._mask_forward_train(stage, x, bbox_results['attn_feats'], \n sampling_results, gt_masks, self.train_cfg[stage])\n single_stage_loss['loss_mask'] = mask_results['loss_mask']\n\n if self.with_track:\n loss_match = self._track_forward_train(self,stage,x,rois,object_feats,attn_feats)\n single_stage_loss['loss_match'] = loss_match\n\n for key, value in single_stage_loss.items():\n all_stage_loss[f'stage{stage}_{key}'] = value * \\\n self.stage_loss_weights[stage]\n\n return all_stage_loss\n\n def simple_test(self,\n x,\n proposal_boxes,\n proposal_features,\n img_metas,\n imgs_whwh,\n rescale=False):\n \"\"\"Test without augmentation.\n\n Args:\n x (list[Tensor]): list of multi-level img features.\n proposal_boxes (Tensor): Decoded proposal bboxes, has shape\n (batch_size, num_proposals, 4)\n proposal_features (Tensor): Expanded proposal\n features, has shape\n (batch_size, num_proposals, proposal_feature_channel)\n img_metas (dict): meta information of images.\n imgs_whwh (Tensor): Tensor with shape (batch_size, 4),\n the dimension means\n [img_width,img_height, img_width, img_height].\n rescale (bool): If True, return boxes in original image\n space. Defaults to False.\n\n Returns:\n bbox_results (list[tuple[np.ndarray]]): \\\n [[cls1_det, cls2_det, ...], ...]. \\\n The outer list indicates images, and the inner \\\n list indicates per-class detected bboxes. The \\\n np.ndarray has shape (num_det, 5) and the last \\\n dimension 5 represents (x1, y1, x2, y2, score).\n \"\"\"\n assert self.with_bbox, 'Bbox head must be implemented.'\n assert self.with_track, 'Track head must be implemented'\n # Decode initial proposals\n num_imgs = len(img_metas)\n proposal_list = [proposal_boxes[i] for i in range(num_imgs)]\n ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)\n scale_factors = tuple(meta['scale_factor'] for meta in img_metas)\n # \"ms\" in variable names means multi-stage\n ms_bbox_result = {}\n ms_segm_result = {}\n\n object_feats = proposal_features\n for stage in range(self.num_stages):\n rois = bbox2roi(proposal_list)\n bbox_results = self._bbox_forward(stage, x, rois, object_feats,\n img_metas)\n object_feats = bbox_results['object_feats']\n cls_score = bbox_results['cls_score']\n proposal_list = bbox_results['detach_proposal_list']\n\n if self.with_mask:\n rois = bbox2roi(proposal_list)\n mask_results = self._mask_forward(stage, x, rois, bbox_results['attn_feats'])\n mask_results['mask_pred'] = mask_results['mask_pred'].reshape(\n num_imgs, -1, *mask_results['mask_pred'].size()[1:]\n )\n\n num_classes = self.bbox_head[-1].num_classes\n det_bboxes = []\n det_labels = []\n\n if self.bbox_head[-1].loss_cls.use_sigmoid:\n cls_score = cls_score.sigmoid()\n else:\n cls_score = cls_score.softmax(-1)[..., :-1]\n\n for img_id in range(num_imgs):\n cls_score_per_img = cls_score[img_id]\n scores_per_img, topk_indices = cls_score_per_img.flatten(\n 0, 1).topk(\n self.test_cfg.max_per_img, sorted=False)\n labels_per_img = topk_indices % num_classes\n bbox_pred_per_img = proposal_list[img_id][topk_indices //\n num_classes]\n if rescale:\n scale_factor = img_metas[img_id]['scale_factor']\n bbox_pred_per_img /= bbox_pred_per_img.new_tensor(scale_factor)\n det_bboxes.append(\n torch.cat([bbox_pred_per_img, scores_per_img[:, None]], dim=1))\n det_labels.append(labels_per_img)\n\n bbox_results = [\n bbox2result(det_bboxes[i], det_labels[i], num_classes)\n for i in range(num_imgs)\n ]\n ms_bbox_result['ensemble'] = bbox_results\n\n if self.with_mask:\n if rescale and not isinstance(scale_factors[0], float):\n scale_factors = [\n torch.from_numpy(scale_factor).to(det_bboxes[0].device)\n for scale_factor in scale_factors\n ]\n _bboxes = [\n det_bboxes[i][:, :4] *\n scale_factors[i] if rescale else det_bboxes[i][:, :4]\n for i in range(len(det_bboxes))\n ]\n segm_results = []\n mask_pred = mask_results['mask_pred']\n for img_id in range(num_imgs):\n mask_pred_per_img = mask_pred[img_id].flatten(0, 1)[topk_indices]\n mask_pred_per_img = mask_pred_per_img[:, None, ...].repeat(1, num_classes, 1, 1)\n segm_result = self.mask_head[-1].get_seg_masks(\n mask_pred_per_img, _bboxes[img_id], det_labels[img_id],\n self.test_cfg, ori_shapes[img_id], scale_factors[img_id],\n rescale)\n segm_results.append(segm_result)\n\n ms_segm_result['ensemble'] = segm_results\n \n if self.with_mask:\n results = list(\n zip(ms_bbox_result['ensemble'], ms_segm_result['ensemble']))\n else:\n results = ms_bbox_result['ensemble']\n\n return results\n\n def aug_test(self,\n aug_x,\n aug_proposal_boxes,\n aug_proposal_features,\n aug_img_metas,\n aug_imgs_whwh,\n rescale=False):\n \n samples_per_gpu = len(aug_img_metas[0])\n aug_det_bboxes = [[] for _ in range(samples_per_gpu)]\n aug_det_labels = [[] for _ in range(samples_per_gpu)]\n aug_mask_preds = [[] for _ in range(samples_per_gpu)]\n for x, proposal_boxes, proposal_features, img_metas, imgs_whwh in \\\n zip(aug_x, aug_proposal_boxes, aug_proposal_features, aug_img_metas, aug_imgs_whwh):\n \n\n num_imgs = len(img_metas)\n proposal_list = [proposal_boxes[i] for i in range(num_imgs)]\n ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)\n scale_factors = tuple(meta['scale_factor'] for meta in img_metas)\n\n object_feats = proposal_features\n for stage in range(self.num_stages):\n rois = bbox2roi(proposal_list)\n bbox_results = self._bbox_forward(stage, x, rois, object_feats,\n img_metas)\n object_feats = bbox_results['object_feats']\n cls_score = bbox_results['cls_score']\n proposal_list = bbox_results['detach_proposal_list']\n \n if self.with_mask:\n rois = bbox2roi(proposal_list)\n mask_results = self._mask_forward(stage, x, rois, bbox_results['attn_feats'])\n mask_results['mask_pred'] = mask_results['mask_pred'].reshape(\n num_imgs, -1, *mask_results['mask_pred'].size()[1:]\n )\n \n num_classes = self.bbox_head[-1].num_classes\n det_bboxes = []\n det_labels = []\n\n if self.bbox_head[-1].loss_cls.use_sigmoid:\n cls_score = cls_score.sigmoid()\n else:\n cls_score = cls_score.softmax(-1)[..., :-1]\n\n for img_id in range(num_imgs):\n cls_score_per_img = cls_score[img_id]\n scores_per_img, topk_indices = cls_score_per_img.flatten(\n 0, 1).topk(\n self.test_cfg.max_per_img, sorted=False)\n labels_per_img = topk_indices % num_classes\n bbox_pred_per_img = proposal_list[img_id][topk_indices //\n num_classes]\n if rescale:\n scale_factor = img_metas[img_id]['scale_factor']\n bbox_pred_per_img /= bbox_pred_per_img.new_tensor(scale_factor)\n aug_det_bboxes[img_id].append(\n torch.cat([bbox_pred_per_img, scores_per_img[:, None]], dim=1))\n det_bboxes.append(\n torch.cat([bbox_pred_per_img, scores_per_img[:, None]], dim=1))\n aug_det_labels[img_id].append(labels_per_img)\n det_labels.append(labels_per_img)\n \n if self.with_mask:\n if rescale and not isinstance(scale_factors[0], float):\n scale_factors = [\n torch.from_numpy(scale_factor).to(det_bboxes[0].device)\n for scale_factor in scale_factors\n ]\n _bboxes = [\n det_bboxes[i][:, :4] *\n scale_factors[i] if rescale else det_bboxes[i][:, :4]\n for i in range(len(det_bboxes))\n ]\n mask_pred = mask_results['mask_pred']\n for img_id in range(num_imgs):\n mask_pred_per_img = mask_pred[img_id].flatten(0, 1)[topk_indices]\n mask_pred_per_img = mask_pred_per_img[:, None, ...].repeat(1, num_classes, 1, 1)\n segm_result = self.mask_head[-1].get_seg_masks(\n mask_pred_per_img, _bboxes[img_id], det_labels[img_id],\n self.test_cfg, ori_shapes[img_id], scale_factors[img_id],\n rescale, format=False)\n aug_mask_preds[img_id].append(segm_result.detach().cpu().numpy())\n\n det_bboxes, det_labels, mask_preds = [], [], []\n\n for img_id in range(samples_per_gpu):\n for aug_id in range(len(aug_det_bboxes[img_id])):\n img_meta = aug_img_metas[aug_id][img_id]\n img_shape = img_meta['ori_shape']\n flip = img_meta['flip']\n flip_direction = img_meta['flip_direction']\n aug_det_bboxes[img_id][aug_id][:, :-1] = bbox_flip(aug_det_bboxes[img_id][aug_id][:, :-1],\n img_shape, flip_direction) if flip else aug_det_bboxes[img_id][aug_id][:, :-1]\n if flip:\n if flip_direction == 'horizontal':\n aug_mask_preds[img_id][aug_id] = aug_mask_preds[img_id][aug_id][:, :, ::-1]\n else:\n aug_mask_preds[img_id][aug_id] = aug_mask_preds[img_id][aug_id][:, ::-1, :]\n\n for img_id in range(samples_per_gpu):\n det_bboxes_per_im = torch.cat(aug_det_bboxes[img_id])\n det_labels_per_im = torch.cat(aug_det_labels[img_id])\n mask_preds_per_im = np.concatenate(aug_mask_preds[img_id])\n\n # TODO(vealocia): implement batched_nms here.\n det_bboxes_per_im, keep_inds = batched_nms(det_bboxes_per_im[:, :-1], det_bboxes_per_im[:, -1].contiguous(), det_labels_per_im, self.test_cfg.nms)\n det_bboxes_per_im = det_bboxes_per_im[:self.test_cfg.max_per_img, ...]\n det_labels_per_im = det_labels_per_im[keep_inds][:self.test_cfg.max_per_img, ...]\n mask_preds_per_im = mask_preds_per_im[keep_inds.detach().cpu().numpy()][:self.test_cfg.max_per_img, ...]\n det_bboxes.append(det_bboxes_per_im)\n det_labels.append(det_labels_per_im)\n mask_preds.append(mask_preds_per_im)\n \n ms_bbox_result = {}\n ms_segm_result = {}\n num_classes = self.bbox_head[-1].num_classes\n bbox_results = [\n bbox2result(det_bboxes[i], det_labels[i], num_classes)\n for i in range(samples_per_gpu)\n ]\n ms_bbox_result['ensemble'] = bbox_results\n mask_results = [\n mask2results(mask_preds[i], det_labels[i], num_classes)\n for i in range(samples_per_gpu)\n ]\n ms_segm_result['ensemble'] = mask_results\n\n if self.with_mask:\n results = list(\n zip(ms_bbox_result['ensemble'], ms_segm_result['ensemble']))\n else:\n results = ms_bbox_result['ensemble']\n return results\n\n def forward_dummy(self, x, proposal_boxes, proposal_features, img_metas):\n \"\"\"Dummy forward function when do the flops computing.\"\"\"\n all_stage_bbox_results = []\n proposal_list = [proposal_boxes[i] for i in range(len(proposal_boxes))]\n object_feats = proposal_features\n if self.with_bbox:\n for stage in range(self.num_stages):\n rois = bbox2roi(proposal_list)\n bbox_results = self._bbox_forward(stage, x, rois, object_feats,\n img_metas)\n\n all_stage_bbox_results.append(bbox_results)\n proposal_list = bbox_results['detach_proposal_list']\n object_feats = bbox_results['object_feats']\n return all_stage_bbox_results\n" ]
[ [ "numpy.concatenate", "torch.cat", "torch.from_numpy" ] ]
Site-Command/lightning-flash
[ "bfff08ded9cf193cce1cd16e7034d8005de172ae", "bfff08ded9cf193cce1cd16e7034d8005de172ae" ]
[ "flash/core/classification.py", "tests/core/test_finetuning.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Callable, List, Mapping, Optional, Sequence, Union\n\nimport torch\nimport torch.nn.functional as F\nimport torchmetrics\nfrom pytorch_lightning.utilities import rank_zero_warn\n\nfrom flash.core.data.data_source import LabelsState\nfrom flash.core.data.process import Serializer\nfrom flash.core.model import Task\n\n\ndef binary_cross_entropy_with_logits(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n \"\"\"Calls BCE with logits and cast the target one_hot (y) encoding to floating point precision.\"\"\"\n return F.binary_cross_entropy_with_logits(x, y.float())\n\n\nclass ClassificationTask(Task):\n\n def __init__(\n self,\n *args,\n loss_fn: Optional[Callable] = None,\n metrics: Union[torchmetrics.Metric, Mapping, Sequence, None] = None,\n multi_label: bool = False,\n serializer: Optional[Union[Serializer, Mapping[str, Serializer]]] = None,\n **kwargs,\n ) -> None:\n if metrics is None:\n metrics = torchmetrics.Accuracy(subset_accuracy=multi_label)\n\n if loss_fn is None:\n loss_fn = binary_cross_entropy_with_logits if multi_label else F.cross_entropy\n super().__init__(\n *args,\n loss_fn=loss_fn,\n metrics=metrics,\n serializer=serializer or Classes(multi_label=multi_label),\n **kwargs,\n )\n\n def to_metrics_format(self, x: torch.Tensor) -> torch.Tensor:\n if getattr(self.hparams, \"multi_label\", False):\n return torch.sigmoid(x)\n # we'll assume that the data always comes as `(B, C, ...)`\n return torch.softmax(x, dim=1)\n\n\nclass ClassificationSerializer(Serializer):\n \"\"\"A base class for classification serializers.\n\n Args:\n multi_label: If true, treats outputs as multi label logits.\n \"\"\"\n\n def __init__(self, multi_label: bool = False):\n super().__init__()\n\n self._mutli_label = multi_label\n\n @property\n def multi_label(self) -> bool:\n return self._mutli_label\n\n\nclass Logits(ClassificationSerializer):\n \"\"\"A :class:`.Serializer` which simply converts the model outputs (assumed to be logits) to a list.\"\"\"\n\n def serialize(self, sample: Any) -> Any:\n return sample.tolist()\n\n\nclass Probabilities(ClassificationSerializer):\n \"\"\"A :class:`.Serializer` which applies a softmax to the model outputs (assumed to be logits) and converts to a\n list.\"\"\"\n\n def serialize(self, sample: Any) -> Any:\n if self.multi_label:\n return torch.sigmoid(sample).tolist()\n return torch.softmax(sample, -1).tolist()\n\n\nclass Classes(ClassificationSerializer):\n \"\"\"A :class:`.Serializer` which applies an argmax to the model outputs (either logits or probabilities) and\n converts to a list.\n\n Args:\n multi_label: If true, treats outputs as multi label logits.\n\n threshold: The threshold to use for multi_label classification.\n \"\"\"\n\n def __init__(self, multi_label: bool = False, threshold: float = 0.5):\n super().__init__(multi_label)\n\n self.threshold = threshold\n\n def serialize(self, sample: Any) -> Union[int, List[int]]:\n if self.multi_label:\n one_hot = (sample.sigmoid() > self.threshold).int().tolist()\n result = []\n for index, value in enumerate(one_hot):\n if value == 1:\n result.append(index)\n return result\n return torch.argmax(sample, -1).tolist()\n\n\nclass Labels(Classes):\n \"\"\"A :class:`.Serializer` which converts the model outputs (either logits or probabilities) to the label of the\n argmax classification.\n\n Args:\n labels: A list of labels, assumed to map the class index to the label for that class. If ``labels`` is not\n provided, will attempt to get them from the :class:`.LabelsState`.\n\n multi_label: If true, treats outputs as multi label logits.\n\n threshold: The threshold to use for multi_label classification.\n \"\"\"\n\n def __init__(self, labels: Optional[List[str]] = None, multi_label: bool = False, threshold: float = 0.5):\n super().__init__(multi_label=multi_label, threshold=threshold)\n self._labels = labels\n\n if labels is not None:\n self.set_state(LabelsState(labels))\n\n def serialize(self, sample: Any) -> Union[int, List[int], str, List[str]]:\n labels = None\n\n if self._labels is not None:\n labels = self._labels\n else:\n state = self.get_state(LabelsState)\n if state is not None:\n labels = state.labels\n\n classes = super().serialize(sample)\n\n if labels is not None:\n if self.multi_label:\n return [labels[cls] for cls in classes]\n return labels[classes]\n else:\n rank_zero_warn(\"No LabelsState was found, this serializer will act as a Classes serializer.\", UserWarning)\n return classes\n", "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any\n\nimport pytest\nimport torch\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\n\nfrom flash import Trainer\nfrom flash.core.finetuning import NoFreeze\nfrom flash.core.utilities.imports import _TORCHVISION_AVAILABLE\nfrom flash.image.classification import ImageClassifier\n\n\nclass DummyDataset(torch.utils.data.Dataset):\n\n def __getitem__(self, index: int) -> Any:\n return {\"input\": torch.rand(3, 64, 64), \"target\": torch.randint(10, size=(1, )).item()}\n\n def __len__(self) -> int:\n return 100\n\n\n@pytest.mark.skipif(not _TORCHVISION_AVAILABLE, reason=\"torchvision isn't installed.\")\n@pytest.mark.parametrize(\n \"strategy\", ['no_freeze', 'freeze', 'freeze_unfreeze', 'unfreeze_milestones', None, 'cls', 'chocolat']\n)\ndef test_finetuning(tmpdir: str, strategy):\n train_dl = torch.utils.data.DataLoader(DummyDataset())\n val_dl = torch.utils.data.DataLoader(DummyDataset())\n task = ImageClassifier(10, backbone=\"resnet18\")\n trainer = Trainer(fast_dev_run=True, default_root_dir=tmpdir)\n if strategy == \"cls\":\n strategy = NoFreeze()\n if strategy == 'chocolat' or strategy is None:\n with pytest.raises(MisconfigurationException, match=\"strategy should be provided\"):\n trainer.finetune(task, train_dl, val_dl, strategy=strategy)\n else:\n trainer.finetune(task, train_dl, val_dl, strategy=strategy)\n" ]
[ [ "torch.sigmoid", "torch.argmax", "torch.softmax" ], [ "torch.rand", "torch.randint" ] ]
sophiaalthammer/parm
[ "ecf2dce5ee225b18e1ed3736a86696cc81e0797c" ]
[ "preprocessing/coliee21_task2_dpr.py" ]
[ "import os\r\nimport argparse\r\nimport jsonlines\r\nimport json\r\nimport pickle\r\nimport matplotlib.pyplot as plt\r\nimport csv\r\nfrom preprocessing.dpr_preprocessing import corpus_to_ctx_file\r\nfrom eval.eval_bm25_coliee2021 import read_label_file\r\nfrom preprocessing.coliee21_task2_bm25 import ranking_eval, eval_ranking_bm25\r\nfrom analysis.compare_bm25_dpr import evaluate_weight, sort_write_trec_output, read_in_aggregated_scores, \\\r\n compare_overlap_rel, analyze_score_distribution\r\n\r\n\r\ndef entailed_fragment_to_qa(train_dir):\r\n list_dir = [x for x in os.walk(train_dir)]\r\n for sub_dir in list_dir[0][1]:\r\n with open(os.path.join(train_dir, sub_dir, 'qa.csv'), 'wt') as tsv_file:\r\n writer = csv.writer(tsv_file, delimiter='\\t', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\r\n with open(os.path.join(train_dir, sub_dir, 'entailed_fragment.txt'), 'r') as entailed_fragment:\r\n query_text_lines = entailed_fragment.read().splitlines()\r\n query_text = ' '.join([text.strip().replace('\\n', '') for text in query_text_lines])\r\n writer.writerow([query_text, [str(sub_dir)]])\r\n\r\n\r\ndef read_run_whole_doc(pred_dir: str, scores='ranks'):\r\n # geh in den bm25 folder, lies in dokument und query: dann dict {query: {top 1000}}\r\n run = {}\r\n for root, dirs, files in os.walk(pred_dir):\r\n for file in files:\r\n with open(os.path.join(pred_dir, file), 'r') as json_file:\r\n pred = json.load(json_file)\r\n\r\n for question in pred:\r\n question_id = question.get('answers')[0].split('_')[0]\r\n pred_list = {}\r\n i = 0\r\n for predition in question.get('ctxs'):\r\n if scores == 'scores':\r\n pred_list.update({predition.get('id').split('_')[1]: float(predition.get('score'))})\r\n else:\r\n pred_list.update({predition.get('id').split('_')[1]: len(question.get('ctxs')) - i})\r\n i += 1\r\n run.update({question_id: pred_list})\r\n return run\r\n\r\n\r\ndef create_plot_data(measures):\r\n plotting_data = {}\r\n for key, value in measures.items():\r\n if not plotting_data.get(key.split('_')[1]):\r\n plotting_data.update({key.split('_')[1]: [0, 0]})\r\n if 'P' in key:\r\n plotting_data.get(key.split('_')[1])[1] = value\r\n if 'recall' in key:\r\n plotting_data.get(key.split('_')[1])[0] = value\r\n\r\n # order them:\r\n desired_order_list = [int(key) for key, value in plotting_data.items()]\r\n desired_order_list.sort()\r\n desired_order_list = [str(x) for x in desired_order_list]\r\n plotting_data_sorted = {k: plotting_data[k] for k in desired_order_list}\r\n return plotting_data_sorted\r\n\r\n\r\ndef plot_measures(measures, eval_dir, plot_file):\r\n plt.figure(figsize=(10, 8))\r\n plt.xlabel('recall', fontsize=15)\r\n plt.ylabel('precision', fontsize=15)\r\n for name, measure in measures.items():\r\n xs, ys = zip(*measure.values())\r\n labels = measure.keys()\r\n # display\r\n plt.scatter(xs, ys, marker='o')\r\n plt.plot(xs, ys, label=name)\r\n for label, x, y in zip(labels, xs, ys):\r\n plt.annotate(label, xy=(x, y))\r\n plt.legend(loc=\"upper right\")\r\n plt.savefig(os.path.join(eval_dir, plot_file))\r\n\r\n\r\ndef calculcate_f1_score(plotting_data, output_dir, output_file):\r\n with open(os.path.join(output_dir, output_file), 'w') as f:\r\n for name, measure in plotting_data.items():\r\n for key, value in measure.items():\r\n f1_score = 2*value[0]*value[1]/(value[0]+value[1])\r\n f.writelines(' '.join([name, key, str(f1_score)]) + '\\n')\r\n\r\n\r\ndef plot_f1_score(plotting_data, eval_dir, plot_file):\r\n f1_dict = {}\r\n for name, measure in plotting_data.items():\r\n f1_dict.update({name: {}})\r\n for key, value in measure.items():\r\n f1_score = 2 * value[0] * value[1] / (value[0] + value[1])\r\n f1_dict.get(name).update({key: [key, f1_score]})\r\n plt.figure(figsize=(10, 8))\r\n plt.xlabel('cut-off value', fontsize=15)\r\n plt.ylabel('f1-score', fontsize=15)\r\n for name, measure in f1_dict.items():\r\n xs, ys = zip(*measure.values())\r\n labels = measure.keys()\r\n plt.scatter(xs, ys, marker='o')\r\n plt.plot(xs, ys, label=name)\r\n for label, x, y in zip(labels, xs, ys):\r\n plt.annotate(label, xy=(x, y))\r\n plt.legend(loc=\"upper right\")\r\n plt.savefig(os.path.join(eval_dir, plot_file))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n mode = ['val', False]\r\n train_dir = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2021/task2/{}'.format(mode[0])\r\n\r\n # create corpus for each sub_dir\r\n #list_dir = [x for x in os.walk(train_dir)]\r\n #for sub_dir in list_dir[0][1]:\r\n # jsonl_file = os.path.join(train_dir, sub_dir, 'candidates.jsonl')\r\n # out_file = os.path.join(train_dir, sub_dir, 'ctx_candidates.tsv')\r\n # corpus_to_ctx_file(jsonl_file, out_file)\r\n\r\n # create qa files for each sub_dir\r\n #entailed_fragment_to_qa(train_dir)\r\n\r\n # evaluate\r\n if mode[0] == 'train':\r\n label_file = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2021/task2/task2_train_wo_val_labels_2021.json'\r\n elif mode[0] == 'val':\r\n label_file = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2021/task2/task2_val_labels_2021.json'\r\n\r\n pred_dir = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2021/task2/dpr/output/train_wo_val/{}'.format(mode[0])\r\n eval_dir = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2021/task2/dpr/eval/train_wo_val/{}'.format(mode[0])\r\n output_file = 'eval_dpr_{}'.format(mode[0])\r\n\r\n # evaluate and plot the recall and precision\r\n qrels = read_label_file(label_file)\r\n run = read_run_whole_doc(pred_dir, 'scores')\r\n\r\n measures = ranking_eval(qrels, run, eval_dir, output_file)\r\n #plot_recall_precision(measures, eval_dir)\r\n\r\n\r\n # bm25 measures!\r\n train_dir = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2021/task2/{}'.format(mode[0])\r\n top_n = 50\r\n bm25_folder = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2021/task2/bm25/search/{}/whole_doc_{}'.format(\r\n mode[0], mode[1])\r\n\r\n # evaluate bm25\r\n eval_dir = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2021/task2/bm25/eval/{}/whole_doc_{}'.format(mode[0], mode[1])\r\n output_file = 'eval_bm25_recall_{}_whole_doc_{}'.format(mode[0], mode[1])\r\n\r\n if mode[0] == 'train':\r\n label_file = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2021/task2/task2_train_wo_val_labels_2021.json'\r\n elif mode[0] == 'val':\r\n label_file = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2021/task2/task2_val_labels_2021.json'\r\n\r\n # evaluate ranking of bm25 as if it was for whole documents -> no aggregation needed\r\n measures_bm25 = eval_ranking_bm25(label_file, bm25_folder, eval_dir, output_file)\r\n\r\n plotting_data_dpr = create_plot_data(measures)\r\n plotting_data_bm25 = create_plot_data(measures_bm25)\r\n\r\n plotting_data = {'DPR':plotting_data_dpr, 'BM25':plotting_data_bm25}\r\n #plot_measures(plotting_data, eval_dir, 'prec_rec_bm25_dpr_comparison.svg')\r\n\r\n # also add the weigthed files to plot!\r\n mode = ['val', 'weighting_1_4', 'scores']\r\n dpr_file = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2021/task2/dpr/aggregate/{}/search_{}_something_aggregation_scores.txt'.format(\r\n mode[0], mode[0])\r\n bm25_file = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2021/task2/bm25/aggregate/{}/search_{}_something_aggregation_overlap_scores.txt'.format(\r\n mode[0], mode[0])\r\n output_dir = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2021/task2/plots'\r\n\r\n dpr_dict = read_in_aggregated_scores(dpr_file, 50)\r\n bm25_dict = read_in_aggregated_scores(bm25_file, 50)\r\n\r\n bm25_dict_new = {}\r\n for key, value in bm25_dict.items():\r\n bm25_dict_new.update({key:{}})\r\n for key2, value2 in value.items():\r\n bm25_dict_new.get(key).update({key2.split('_')[1]:value2})\r\n\r\n # check if both dictionaries contain the same query ids\r\n dpr_keys = list(dpr_dict.keys())\r\n dpr_keys.sort()\r\n assert dpr_keys == list(bm25_dict_new.keys())\r\n\r\n # read in the label files\r\n label_file_val = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2021/task2/task2_val_labels_2021.json'\r\n\r\n if mode[0] == 'val':\r\n qrels = read_label_file(label_file_val)\r\n\r\n #compare_overlap_rel(dpr_dict, bm25_dict_new, qrels)\r\n\r\n # analyze score distribution\r\n #analyze_score_distribution(dpr_dict, 'dpr', output_dir)\r\n #analyze_score_distribution(bm25_dict_new, 'bm25', output_dir)\r\n weights = [[1, 1], [1, 2], [1, 3], [1, 4]]\r\n for weight in weights:\r\n run, measures = evaluate_weight(dpr_dict, bm25_dict_new, qrels, mode, weight[0], weight[1])\r\n # write out in trec format the aggregated list with the combined weights!\r\n # best is: overlap_ranks, weight_dpr=1 weight_bm25=3\r\n output_dir = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2021/task2/bm25_dpr/eval/{}/'.format(mode[0])\r\n sort_write_trec_output(run, output_dir, mode)\r\n\r\n # same plotting graph for dpr and bm25 for task1, same graph of f1 score for task1!\r\n output_dir = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2021/task2/bm25_dpr/eval/{}/'.format(mode[0])\r\n\r\n plotting_data_weighted = {}\r\n for weight in weights:\r\n with open(os.path.join(output_dir, 'measures_bm25_dpr_weigting_{}_{}.json'.format(weight[0], weight[1])),\r\n 'r') as f:\r\n measures_weight = json.load(f)\r\n plotting_data_weighted.update({'BM25:{} DPR:{}'.format(weight[1], weight[0]):create_plot_data(measures_weight)})\r\n\r\n plotting_data_weighted.update({'DPR':plotting_data_dpr, 'BM25':plotting_data_bm25})\r\n plot_measures(plotting_data_weighted, output_dir, 'prec_rec_bm25_dpr_weighting3.svg')\r\n calculcate_f1_score(plotting_data_weighted, output_dir, 'f1_scores_bm25_dpr.txt')\r\n\r\n plot_f1_score(plotting_data_weighted, output_dir, 'f1_scores_bm25_dpr_weighting.svg')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" ]
[ [ "matplotlib.pyplot.annotate", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.scatter" ] ]
ludmila-kuncarova/matched_markets
[ "ded2469997e5bf5b87613f35c38aeb6d35c0ca6e" ]
[ "matched_markets/tests/test_tbr_iroas.py" ]
[ "# Copyright 2020 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for //ads/amt/geoexperiments/methodology/tbr_iroas.py.\"\"\"\n\nimport copy\nimport os\n\nfrom absl import flags\nfrom matched_markets.examples import salesandcost\nfrom matched_markets.methodology import semantics\nfrom matched_markets.methodology import tbr_iroas\nfrom matched_markets.methodology import utils\nimport numpy as np\n\nimport unittest\n\n\n\n\nclass TBRiROASTest(unittest.TestCase):\n\n def setUp(self):\n \"\"\"This method will be run before each of the test methods in the class.\"\"\"\n\n super(TBRiROASTest, self).setUp()\n\n # Load the salesandcost dataset.\n csv_path = 'matched_markets/csv/'\n csv_dir = os.path.join(\"\", csv_path)\n self.data = salesandcost.example_data_formatted(csv_dir)\n\n # Data frame names for the salesandcost example.\n self.key_response = 'sales'\n self.key_cost = 'cost'\n self.key_group = 'geo.group'\n self.key_period = 'period'\n self.key_geo = 'geo'\n self.key_date = 'date'\n\n # Semantics for groups and periods.\n self.groups = semantics.GroupSemantics()\n self.periods = semantics.PeriodSemantics()\n\n def testFixedCostIROASSummary(self):\n\n # Fully set up a TBR object.\n iroas_model = tbr_iroas.TBRiROAS(use_cooldown=False)\n iroas_model.fit(self.data,\n key_response=self.key_response,\n key_cost=self.key_cost,\n key_group=self.key_group,\n key_period=self.key_period,\n key_date=self.key_date)\n\n # Arguments for the type of tests to conduct.\n level = 0.9\n posterior_threshold = 0.0\n tails = 1\n\n # Summary values from R.\n r_estimate = 2.946742\n r_precision = 0.120548\n r_lower = 2.826194\n r_incr_resp = 147337.122\n r_incr_cost = 50000\n r_probability = 1.0\n\n # Summary values from python.\n iroas = iroas_model.summary(\n level=level, posterior_threshold=posterior_threshold, tails=tails)\n py_estimate = iroas['estimate'].iloc[0]\n py_precision = iroas['precision'].iloc[0]\n py_lower = iroas['lower'].iloc[0]\n py_incr_resp = iroas['incremental_response'].iloc[0]\n py_incr_cost = iroas['incremental_cost'].iloc[0]\n py_probability = iroas['probability'].iloc[0]\n\n # Must do it like this as the R value is given with lower number of dps.\n order_estimate = utils.float_order(r_estimate - py_estimate)\n order_precision = utils.float_order(r_precision - py_precision)\n order_lower = utils.float_order(r_lower - py_lower)\n order_iresp = utils.float_order(r_incr_resp - py_incr_resp)\n order_icost = utils.float_order(r_incr_cost - py_incr_cost)\n order_probability = utils.float_order(r_probability - py_probability)\n\n # Conduct the tests.\n self.assertLess(order_estimate, -5)\n self.assertLess(order_precision, -5)\n self.assertLess(order_lower, -5)\n self.assertLess(order_iresp, -2) # incremental_response is a larger number.\n self.assertLess(order_icost, -5)\n self.assertLess(order_probability, -5)\n\n def testVariableCostIROASSummary(self, seed=1234):\n\n # Make behaviour deterministic.\n np.random.seed(seed=seed)\n\n # Fully set up a TBR object.\n iroas_model = tbr_iroas.TBRiROAS(use_cooldown=False)\n data = copy.copy(self.data)\n data.cost += 0.00001*np.random.normal(size=data.shape[0])\n\n iroas_model.fit(data,\n key_response=self.key_response,\n key_cost=self.key_cost,\n key_group=self.key_group,\n key_period=self.key_period,\n key_date=self.key_date)\n\n # Arguments for the type of tests to conduct.\n level = 0.9\n posterior_threshold = 0.0\n tails = 1\n\n # Summary values from R, treated as constants.\n # pylint: disable=invalid-name\n R_ESTIMATE = 2.946742\n R_PRECISION = 0.120548\n R_LOWER = 2.826194\n R_INCR_RESP = 147337.122\n R_INCR_COST = 50000\n R_PROBABILITY = 1.0\n # pylint: enable=invalid-name\n\n # Summary values from python.\n iroas = iroas_model.summary(\n level=level, posterior_threshold=posterior_threshold, tails=tails)\n py_estimate = iroas['estimate'].iloc[0]\n py_precision = iroas['precision'].iloc[0]\n py_lower = iroas['lower'].iloc[0]\n py_incr_resp = iroas['incremental_response'].iloc[0]\n py_incr_cost = iroas['incremental_cost'].iloc[0]\n py_probability = iroas['probability'].iloc[0]\n\n # Must do it like this as the R value is given with lower number of dps.\n order_estimate = utils.float_order(R_ESTIMATE - py_estimate)\n order_precision = utils.float_order(R_PRECISION - py_precision)\n order_lower = utils.float_order(R_LOWER - py_lower)\n order_iresp = utils.float_order(R_INCR_RESP - py_incr_resp)\n order_icost = utils.float_order(R_INCR_COST - py_incr_cost)\n order_probability = utils.float_order(R_PROBABILITY - py_probability)\n\n # Conduct the tests. Easier threshold as we added some noise.\n self.assertLess(order_estimate, -2)\n self.assertLess(order_precision, -2)\n self.assertLess(order_lower, -2)\n self.assertLess(order_iresp, -2) # incremental_response is a larger number.\n self.assertLess(order_icost, -2)\n self.assertLess(order_probability, -2)\n\n def testVariableCostIROASSummaryTwoTails(self, seed=1234):\n\n # Make behaviour deterministic.\n np.random.seed(seed=seed)\n\n # Fully set up a TBR object.\n iroas_model = tbr_iroas.TBRiROAS(use_cooldown=False)\n data = copy.copy(self.data)\n data.cost += 0.00001*np.random.normal(size=data.shape[0])\n\n iroas_model.fit(data,\n key_response=self.key_response,\n key_cost=self.key_cost,\n key_group=self.key_group,\n key_period=self.key_period,\n key_date=self.key_date)\n\n # Arguments for the type of tests to conduct.\n level = 0.9\n posterior_threshold = 0.0\n tails = 2\n\n # Summary values from R, treated as constants.\n # pylint: disable=invalid-name\n R_ESTIMATE = 2.947012\n R_PRECISION = 0.1557932\n R_LOWER = 2.79135\n R_UPPER = 3.102936\n R_INCR_RESP = 147337.122\n R_INCR_COST = 50000\n R_PROBABILITY = 1.0\n # pylint: enable=invalid-name\n\n # Summary values from python.\n iroas = iroas_model.summary(\n level=level, posterior_threshold=posterior_threshold, tails=tails)\n py_estimate = iroas['estimate'].iloc[0]\n py_precision = iroas['precision'].iloc[0]\n py_lower = iroas['lower'].iloc[0]\n py_upper = iroas['upper'].iloc[0]\n py_incr_resp = iroas['incremental_response'].iloc[0]\n py_incr_cost = iroas['incremental_cost'].iloc[0]\n py_probability = iroas['probability'].iloc[0]\n\n # Must do it like this as the R value is given with lower number of dps.\n order_estimate = utils.float_order(R_ESTIMATE - py_estimate)\n order_precision = utils.float_order(R_PRECISION - py_precision)\n order_lower = utils.float_order(R_LOWER - py_lower)\n order_upper = utils.float_order(R_UPPER - py_upper)\n order_iresp = utils.float_order(R_INCR_RESP - py_incr_resp)\n order_icost = utils.float_order(R_INCR_COST - py_incr_cost)\n order_probability = utils.float_order(R_PROBABILITY - py_probability)\n\n # Conduct the tests. Easier threshold as we added some noise.\n print(py_lower)\n print(py_upper)\n self.assertLess(order_estimate, -2)\n self.assertLess(order_precision, -2)\n self.assertLess(order_lower, -2)\n self.assertLess(order_upper, -2)\n self.assertLess(order_iresp, -2) # incremental_response is a larger number.\n self.assertLess(order_icost, -2)\n self.assertLess(order_probability, -2)\n\n def testIROASSummaryWithCooldown(self):\n\n # Fully set up a TBR object.\n iroas_model = tbr_iroas.TBRiROAS(use_cooldown=True)\n iroas_model.fit(self.data,\n key_response=self.key_response,\n key_cost=self.key_cost,\n key_group=self.key_group,\n key_period=self.key_period,\n key_date=self.key_date)\n\n # Arguments for the type of tests to conduct.\n level = 0.9\n posterior_threshold = 0.0\n tails = 1\n\n # Summary values from R, treated as constants.\n r_estimate = 2.946742\n\n # Summary values from python.\n iroas = iroas_model.summary(\n level=level, posterior_threshold=posterior_threshold, tails=tails)\n py_estimate = iroas['estimate'].iloc[0]\n\n # Must do it like this as the R value is given with lower number of dps.\n order_estimate = utils.float_order(r_estimate - py_estimate)\n\n # Conduct the tests. Easier threshold as we added some noise.\n self.assertLessEqual(order_estimate, -2)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.random.seed", "numpy.random.normal" ] ]
fredzett/datastack
[ "ece795599c33ccbefcaef32f89559188b0185d58", "ece795599c33ccbefcaef32f89559188b0185d58", "ece795599c33ccbefcaef32f89559188b0185d58" ]
[ "datastack/helper.py", "datastack/datacolumn.py", "tests/tests_verbs/test_order_by.py" ]
[ "from typing import Dict\nimport numpy as np\nfrom numpy.lib.arraysetops import isin\n\n# Checks if dicts in DataTable._data are equal\n# needed because nested dicts containing different types (e.g. np.ndarray)\ndef _dicts_equal(a: Dict, b: Dict) -> bool:\n # Check if labels / columns are equal\n if a.keys() != b.keys():\n return False\n\n # Check if arrays are equal\n checks = []\n for arr1, arr2 in zip(a.values(), b.values()):\n if not isinstance(arr1, np.ndarray): arr1 = np.asarray(arr1)\n if not isinstance(arr2, np.ndarray): arr2 = np.asarray(arr2)\n #if arr1.dtype.type != arr2.dtype.type:\n # return False\n if arr1.dtype.type is np.str_:\n checks.append(np.array_equal(arr1, arr2))\n else:\n \n checks.append(np.allclose(arr1, arr2, rtol=1e-08))\n if not all(checks):\n return False\n return True\n\n\ndef cols_are_equal(col1, col2):\n \"Returns true\"\n return all(col1 == col2)\n\n", "from __future__ import annotations\n\n\nimport numpy as np\nfrom numbers import Number\nfrom typing import Collection, Dict, Type, Union, Any\nimport datastack as dt\n\ndef unpack1ddict(d: Dict):\n label, data = list(d.keys())[0], list(d.values())[0]\n return label, data\n\nclass DataColumn:\n '''DataColumn is a one dimensional data structure\n containing data array and a label'''\n def __init__(self, *label_and_column):\n if len(label_and_column) != 2:\n raise TypeError(f\"DataColumn is created using two parameters (label, data). {len(label_and_column)} where provided.\")\n label, data = label_and_column # unpack parameters\n self._label, self._data = self._check_and_convert(label, data)\n self._dtype = self._data.dtype.type\n\n def _check_and_convert(self, label, data) -> Union[str, np.ndarray]:\n 'Ensures that parameters for DataColumn are of correct type and converts if not and possible'\n if not isinstance(label, str): \n if isinstance(label, Number): \n label = str(label)\n else:\n raise TypeError(f\"Label of type {type(label)} cannot be stored in Column\")\n if not isinstance(data, np.ndarray): \n if isinstance(data, Number) or isinstance(data, str):\n data = np.asarray([data])\n elif isinstance(data, Collection):\n data = np.asarray(data)\n else:\n raise TypeError(f\"Data of type {type(data)} cannot be stored in Column\")\n return label, data\n\n\n def __repr__(self):\n return str(self.__dict__)\n\n def __len__(self) -> int:\n return len(self._data)\n\n ########################\n # From methods\n ########################\n\n @staticmethod\n def from_dict(**d: Dict) -> DataColumn:\n if len(d) > 1: raise ValueError(\"Dict must have one key only\")\n label, data = unpack1ddict(d)\n return DataColumn(label, data)\n\n\n ########################\n # To methods\n ########################\n def to_dict(self):\n return {self._label:self._data}\n\n def to_array(self):\n return self._data\n\n ########################\n # Comparison operators\n ########################\n\n def __eq__(self, other) -> DataColumn:\n 'Checks elementwise if self(el) == other(el) and returns np.array with bools' \n if isinstance(other, self.__class__): \n if (self._dtype == np.str_) or (other._dtype == np.str_):\n out = [True if el1 == el2 else False for el1, el2 in zip(self._data, other._data)]\n else:\n out = self._data == other._data\n elif isinstance(other, Number):\n out = self._data == np.repeat(other, len(self))\n elif isinstance(other, str):\n out = np.array([True if el == other else False for el in self._data])\n else:\n raise TypeError(f\"DataColumn cannot be compared with {type(other)}\")\n\n return DataColumn(\"\", out)\n\n def __ne__(self, other) -> np.ndarray:\n comp = self == other\n comp._data = ~comp._data \n return comp\n\n def __gt__(self, other) -> DataColumn:\n 'Checks elementwise if self(el) > other(el) and returns DataColumn with bools' \n if isinstance(other, self.__class__): \n if (self._dtype == np.str_) or (other._dtype == np.str_):\n raise TypeError(f\"DataColumn(s) must be of numeric type to apply > check\")\n else:\n out = self._data > other._data\n elif isinstance(other, Number):\n out = self._data > np.repeat(other, len(self))\n else:\n raise TypeError(f\"DataColumn cannot be compared with {type(other)}\")\n\n return DataColumn(\"\", out)\n\n def __ge__(self, other) -> DataColumn:\n 'Checks elementwise if self(el) > other(el) and returns DataColumn with bools' \n if isinstance(other, self.__class__): \n if (self._dtype == np.str_) or (other._dtype == np.str_):\n raise TypeError(f\"DataColumn(s) must be of numeric type to apply > check\")\n else:\n out = self._data >= other._data\n elif isinstance(other, Number):\n out = self._data >= np.repeat(other, len(self))\n else:\n raise TypeError(f\"DataColumn cannot be compared with {type(other)}\")\n\n return DataColumn(\"\", out)\n\n def __lt__(self, other) -> DataColumn:\n return self > other # CHECK: why not other > self?\n\n def __le__(self, other) -> DataColumn:\n return self >= other # Check: why not other >= self?\n\n\n\n ########################\n # Base math operators\n ########################\n\n\n def __add__(self, other):\n if isinstance(other, self.__class__):\n self._data = self._data + other._data \n\n elif isinstance(other, Number):\n self._data = self._data + other\n else:\n raise TypeError(f\"Cannot add type {type(other)} to DataColumn\") \n\n return self\n\n __radd__ = __add__\n\n def __sub__(self, other):\n if isinstance(other, self.__class__):\n self._data = self._data - other._data \n\n elif isinstance(other, Number):\n self._data = self._data - other\n else:\n raise TypeError(f\"Cannot add type {type(other)} to DataColumn\") \n\n return self\n\n def __rsub__(self, other):\n return (self - other) * -1\n #if isinstance(other, self.__class__):\n # self._data = other._data - self._data \n#\n #elif isinstance(other, Number):\n # self._data = other - self._data \n #else:\n # raise TypeError(f\"Cannot add type {type(other)} to DataColumn\") \n#\n #return self\n\n\n def __mul__(self, other):\n if isinstance(other, self.__class__):\n self._data = self._data * other._data \n\n elif isinstance(other, Number):\n self._data = self._data * other\n else:\n raise TypeError(f\"Cannot add type {type(other)} to DataColumn\") \n\n return self\n\n def __rmul__(self, other):\n return self.__mul__(other)\n \n def __truediv__(self, other):\n if isinstance(other, self.__class__):\n self._data = self._data / other._data \n\n elif isinstance(other, Number):\n self._data = self._data / other\n else:\n raise TypeError(f\"Cannot add type {type(other)} to DataColumn\") \n\n return self\n\n def __rtruediv__(self, other):\n if isinstance(other, self.__class__):\n self._data = other._data / self._data \n elif isinstance(other, Number):\n self._data = other / self._data\n else:\n raise TypeError(f\"Cannot add type {type(other)} to DataColumn\") \n return self\n\n def __pow__(self, other):\n if isinstance(other, self.__class__):\n self._data = self._data ** other._data \n\n elif isinstance(other, Number):\n self._data = self._data ** other\n else:\n raise TypeError(f\"Cannot add type {type(other)} to DataColumn\") \n\n return self\n\n\n def __rpow__(self, other):\n if isinstance(other, self.__class__):\n self._data = other._data ** self._data \n\n elif isinstance(other, Number):\n self._data = other ** self._data \n else:\n raise TypeError(f\"Cannot pow type {type(other)} to DataColumn\") \n\n return self\n\n\n\n\n\n ########################\n # Bind methods\n ########################\n def append(self, el: Any) -> Self:\n '''Appends an element to the end of the DataColumn\n \n Example:\n\n col = (DataColumn(\"H1\", (1,2,3))\n .append(34)\n )\n\n > DataColumn(\"H1\", (1,2,3,4))\n '''\n self._data = np.hstack((self._data, np.array([el])))\n return self\n\n def vstack(self, other: DataColumn) -> DataColumn:\n '''Appends a DataColumn to the end of the DataColumn'''\n if not isinstance(other, DataColumn): raise TypeError(f\"vstack requires a DataColumn as a parameter. You provided {type(other)}.\")\n data = np.hstack((self._data, other._data))\n return DataColumn(self._label, data)\n\n def hstack(self, other: DataColumn) -> dt.DataTable:\n if not len(self) == len(other): raise ValueError(\"Both DataColumns must have the same number of elements to be hstacked\")\n if not isinstance(other, self.__class__): raise TypeError(f\"You can only hstack DataColumns not {type(other)}\")\n \n if self._label == other._label: raise ValueError(\"Cannot hstack two datacolumns with same label\")\n\n new_dict = dict(self.to_dict(), **other.to_dict())\n return dt.DataTable.from_dict(new_dict) \n\n ########################\n # Iterator\n ########################\n def __iter__(self):\n return ColumnIterator(self)\n\n\n ########################\n # Other functions\n ########################\n def rename(self, name: str):\n self._label = name \n return self\n\n def sum(self, skip_nan=False) -> Number:\n if self._dtype == np.str_: raise TypeError(\"Cannot apply sum to data consisting of strings\")\n if skip_nan: \n return np.nansum(self._data)\n else:\n return np.sum(self._data)\n\n def cumsum(self) -> DataColumn:\n if self._dtype == np.str_: raise TypeError(\"Cannot apply sum to data consisting of strings\")\n return DataColumn(\"cumsum\", self._data.cumsum())\n\n def lag(self, n: int) -> DataColumn:\n \n replace = np.nan\n data = self._data\n\n if self._dtype == np.str_: \n replace = \"nan\"\n elif (self._dtype == np.int_) or (self._dtype == np.bool_):\n data = data.astype(float)\n\n\n if abs(n) > len(self): \n raise ValueError(f\"You can not calculate lag {n} for DataColumn with length {len(self)}\")\n else:\n if n > 0:\n shifted = np.pad(data, (n,0), constant_values=replace)[:-n]\n elif n < 0:\n shifted = np.pad(data, (0,abs(n)), constant_values=replace)[abs(n):]\n else:\n shifted = self._data\n n = str(n) if n > 0 else f\"neg{abs(n)}\"\n return DataColumn(\"lag_\" + n, shifted)\n\n\n##### Utility functions for DataColumm\n\ndef are_equal(*cs: DataColumn) -> bool:\n 'Checks if DataColumns are equal including label and returns True / False'\n first_col = cs[0]\n first_type = first_col._data.dtype.type\n for col in cs[1:]:\n if col._label == first_col._label:\n if (first_type == np.str_) or (col._data.dtype.type == np.str_):\n return np.array_equal(first_col._data, col._data)\n else:\n return np.allclose(first_col._data,col._data)\n return False\n\n\ndef are_not_equal(*cs: DataColumn) -> bool:\n return not are_equal(*cs)\n\nclass ColumnIterator:\n 'Iterator class used by DataColumn'\n def __init__(self, dc: DataColumn):\n self._datacolumn = dc\n self._idx = 0\n \n def __iter__(self):\n return self\n\n def __next__(self):\n if self._idx < len(self._datacolumn):\n result = self._datacolumn._data[self._idx]\n else:\n raise StopIteration\n self._idx += 1\n return result", "from datastack import DataTable, DataColumn, label, col, desc\nimport pytest \n\nimport numpy as np\n\ndef test_one():\n tbl = (DataTable(a=(1,2,1,2,3,1), b=(4,5,6,3,2,1),c=(6,7,8,1,2,3))\n .order_by(desc(label(\"b\")))\n )\n exp = DataTable(a=(1,2,1,2,3,1), b=(6,5,4,3,2,1), c=(8,7,6,1,2,3))\n assert tbl == exp\n\ndef test_one_str():\n tbl = (DataTable(a=(1,2,1,2,3,1), b=(4,5,6,3,2,1),c=list(\"abcdef\"))\n .order_by(label(\"b\"))\n )\n exp = DataTable(a=(1,3,2,1,2,1), b=(1,2,3,4,5,6), c=list(\"fedabc\"))\n assert tbl == exp\n\ndef test_two():\n tbl = (DataTable(b=(4,5,2,3,2,1),c=(6,7,8,1,2,3),a=(1,2,1,2,3,1))\n .order_by(label(\"b\"), desc(label(\"a\")), label(\"c\"), )\n )\n exp = DataTable( b=(1,2,2,3,4,5), c=(3,2,8,1,6,7),a=(1,3,1,2,1,2))\n assert tbl == exp\n\ndef test_two_asc():\n data = {\"col1\": np.array((1, 2, 3, 4, 5, 4, 3, 2, 1)),\n \"col2\": np.array(list(\"abcdeabcd\")),\n \"col3\": np.array((10, 11, 9, 8, 7, 2, 12, 100, 1))}\n\n tbl = (DataTable.from_dict(data)\n .order_by(label(\"col1\"), label(\"col2\"))\n )\n exp = DataTable.from_dict({'col1': np.array([1, 1, 2, 2, 3, 3, 4, 4, 5]),\n 'col2': np.array(['a', 'd', 'b', 'c', 'b', 'c', 'a', 'd', 'e']),\n 'col3': np.array([10, 1, 11, 100, 12, 9, 2, 8, 7])})\n assert tbl == exp\n\ndef test_two_asc_desc():\n data = {\"col1\": np.array((1, 2, 3, 4, 5, 4, 3, 2, 1)),\n \"col2\": np.array(list(\"abcdeabcd\")),\n \"col3\": np.array((10, 11, 9, 8, 7, 2, 12, 100, 1))}\n\n tbl = (DataTable.from_dict(data)\n .order_by(label(\"col1\"), desc(label(\"col2\")))\n )\n exp = DataTable.from_dict({'col1': np.array([1, 1, 2, 2, 3, 3, 4, 4, 5]),\n 'col2': np.array(['d', 'a', 'c', 'b', 'c', 'b', 'd', 'a', 'e']),\n 'col3': np.array([1, 10, 100, 11, 9, 12, 8, 2, 7])})\n assert tbl == exp\n\n\n " ]
[ [ "numpy.allclose", "numpy.array_equal", "numpy.asarray" ], [ "numpy.array", "numpy.pad", "numpy.array_equal", "numpy.asarray", "numpy.sum", "numpy.nansum", "numpy.allclose", "numpy.hstack" ], [ "numpy.array" ] ]
wm75/gemini2cbio
[ "1464f9799d6310a54cc5ff701d1358f9c0acaefd" ]
[ "converter_panda.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*- \n\nimport pandas as pd\n\nimport config\n\n\n#read input and save as df\ndf = pd.read_csv(config.inputfile, sep='\\t')\n\n\n#replace empty cells in columns impact and impact_so with Targeted Region \ndf['impact'] = df.impact.fillna('Targeted_Region')\ndf['impact_so'] = df.impact_so.fillna('Targeted_Region')\n\n\n#replace cells in column impact\ndf['impact'] = df.impact.replace({\n #VEP terms (uses SO by default)\n\t'splice_acceptor_variant' : 'Splice_Site',\n\t'splice_donor_variant' : 'Splice_Site',\n\t'stop_gained' : 'Nonsense_Mutation',\n\t'stop_lost' : 'Nonstop_Mutation',\n\t'frameshift_variant' : 'Frame_Shift_',\n\t'initiator_codon_variant' : 'Translation_Start_Site',\n\t'missense_variant' : 'Missense_Mutation',\n\t'inframe_insertion' : 'In_Frame_Ins',\n\t'inframe_deletion' : 'In_Frame_Del',\n\t'splice_region_variant' : 'Splice_Region',\n\t'mature_miRNA_variant' : 'RNA',\n\t'regulatory_region_variant' : 'IGR',\n\t'TF_binding_site_variant' : 'IGR',\n\t'regulatory_region_ablation' : '',\n\t'regulatory_region_amplification' : '',\n\t'TFBS_ablation' : '',\n\t'TFBS_amplification' : '',\n\t'stop_retained_variant' : 'Silent',\n\t'synonymous_variant' : 'Silent',\n\t'5_prime_UTR_variant' : \"5'UTR\",\n\t'3_prime_UTR_variant' : \"3'UTR\",\n\t'intron_variant' : 'Intron',\n\t'coding_sequence_variant' : 'Missense_Mutation',\n\t'upstream_gene_variant' : \"5'Flank\",\n\t'downstream_gene_variant' : \"3'Flank\",\n\t'intergenic_variant' : 'RNA',\n\t'nc_transcript_variant' : 'RNA',\n\t'NMD_transcript_variant' : 'Silent',\n\t'incomplete_terminal_codon_variant' : 'Silent',\n\t'non_coding_exon_variant' : 'RNA',\n\t'transcript_ablation' : 'Splice_Site',\n\t'transcript_amplification' : 'Intron',\n\t'feature_elongation' : '',\n\t'feature_truncation' : ''\n})\n\n\n#replace cells in column impact_so\ndf['impact_so'] = df.impact_so.replace({\n #snpEff terms\n 'SPLICE_SITE_ACCEPTOR' : 'Splice_Site',\n\t'SPLICE_SITE_DONOR' : 'Splice_Site', \n\t'STOP_GAINED' : 'Nonsense_Mutation',\n\t'STOP_LOST' : 'Nonstop_Mutation', \n\t'FRAME_SHIFT' : 'Frame_Shift_', \n\t'START_LOST' : '', \n\t'EXON_DELETED' : '',\n\t'NON_SYNONYMOUS_START' : '',\n\t'CHROMOSOME_LARGE_DELETION' : '',\n\t'RARE_AMINO_ACID' : '',\n\t'NON_SYNONYMOUS_CODING' : 'Missense_Mutation',\n\t'CODON_INSERTION' : 'In_Frame_Ins',\n\t'CODON_DELETION' : 'In_Frame_Del',\n\t'CODON_CHANGE' : '',\n\t'CODON_CHANGE_PLUS_CODON_DELETION' : '',\n\t'CODON_CHANGE_PLUS_CODON_INSERTION' : '',\n\t'UTR_5_DELETED' : '',\n\t'UTR_3_DELETED' : '',\n\t'SPLICE_SITE_REGION' : 'Splice_Region',\n\t'SYNONYMOUS_STOP' : 'Silent',\n\t'SYNONYMOUS_CODING' : 'Silent',\n\t'UTR_5_PRIME' : \"5'UTR\",\n\t'UTR_3_PRIME' : \"3'UTR\",\n\t'INTRON' : 'Intron',\n\t'CDS' : 'Missense_Mutation',\n\t'UPSTREAM' : \"5'Flank\",\n\t'DOWNSTREAM' : \"3'Flank\",\n\t'INTERGENIC' : 'RNA',\n\t'INTERGENIC_CONSERVED' : '',\n\t'INTRAGENIC' : 'Intron',\n\t'GENE' : '',\n\t'TRANSCRIPT' : '',\n\t'EXON' : 'RNA',\n\t'START_GAINED' : '',\n\t'SYNONYMOUS_START' : '',\n\t'INTRON_CONSERVED' : ''\n})\n\n\n#add missing columns\nif 'Mutation_Status' not in df:\n df['Mutation_Status'] = config.mutationStatus\n\nif 'Tumor_Sample_Barcode' not in df:\n df['Tumor_Sample_Barcode'] = config.tumorSampleBarcode\n\nif 'NCBI_Build' not in df:\n df['NCBI_Build'] = config.ncbiBuild\n\nif 'Center' not in df:\n df['Center'] = config.center\n\n\n#merge impact and impact_so to column impact (replace impact value with impact_so value in case that impact value is empty)\ndf.loc[df.impact == 'Targeted_Region', 'impact'] = df.impact_so\n\n\n#add ins or del from sub_type to impact in case of Frame_Shift\ndf.loc[df.impact == 'Frame_Shift_', 'impact'] = df.impact.astype(str) + df.sub_type.astype(str).apply(lambda x: x.capitalize())\n\n\n#select columns and order\ndf1 = df[['gene', 'entrez_id', 'Center', 'NCBI_Build', 'start', 'end', 'strand', 'impact', 'type', 'ref', 'alt', 'Tumor_Sample_Barcode', 'Mutation_Status', 'aa_change', 'chrom']]\n\n\n#rename columns (gemini --> cBioPortal)\ndf2 = df1.rename(columns={\n 'chrom' : 'Chromosome',\n 'start' : 'Start_Position',\n\t'end' : 'End_Position',\n\t'ref' : 'Reference_Allele',\n\t'alt' : 'Tumor_Seq_Allele1',\n\t'type' : 'Variant_Type',\n\t'gene' : 'Hugo_Symbol',\n\t'aa_change' : 'HGVSp_Short',\n\t'impact' : 'Variant_Classification',\n\t'transcript' : 'Transcript_ID',\n\t'entrez_id' : 'Entrez_Gene_Id',\n\t'strand' : 'Strand'\n})\n\n\n#generate output\ndf2.to_csv(config.outputfile, sep='\\t', encoding='utf-8', index=False)\n" ]
[ [ "pandas.read_csv" ] ]
ReinFS/password-generator
[ "85635061723e66ae7a8aa141dd31b138834ad365" ]
[ "main.py" ]
[ "import numpy as np\nfrom random import randint\n\nhighAlfabhet = np.array([\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\",\"H\",\"I\",\"J\",\"K\",\"L\",\"M\",\"N\",\"O\",\"P\",\"Q\",\"R\",\"S\",\"T\",\"U\",\"V\",\"W\",\"X\",\"Y\",\"Z\"])\nlowAlfabhet = np.array([\"a\",\"b\",\"c\",\"d\",\"e\",\"f\",\"g\",\"h\",\"i\",\"j\",\"k\",\"l\",\"m\",\"n\",\"o\",\"p\",\"q\",\"r\",\"s\",\"t\",\"u\",\"v\",\"w\",\"x\",\"y\",\"z\"])\nsymbols = np.array([\"!\",\"@\",\"#\",\"\\x24\", \"%\",\"^\",\"&\",\"*\",\"(\",\")\",\"-\",\"_\",\"+\",\"=\"])\n\ndef GetPassword(length):\n finalPass = ''\n \n if length < 12:\n print(\"Do not make a password under 12 characters. But, anyways it's your command.\")\n\n def GetKey():\n typeIs = randint(1,4)\n\n if typeIs == 1: \n return highAlfabhet[randint(0,25)]\n if typeIs == 2: \n return lowAlfabhet[randint(0,25)]\n if typeIs == 3: \n return randint(0,9)\n if typeIs == 4: \n return symbols[randint(0,13)]\n\n while len(finalPass) < length:\n finalPass += str(GetKey())\n\n print(f'The Password is \"{finalPass}\"')\n\n def SaveToFile(number=1):\n \n try:\n with open(f\"password{number}.txt\", \"x\") as f:\n f.write(finalPass)\n\n except:\n tries = number + 1\n SaveToFile(tries)\n\n else:\n with open(f\"password{number}.txt\", \"w\") as f:\n f.write(finalPass)\n\n SaveToFile()\n\ndef GetLength():\n try:\n pwdLen = int(input(\"Password Length? (numbers only) \"))\n GetPassword(pwdLen)\n\n except ValueError:\n print(\"I didn't know what you're doing with the input, but i said the input MUST BE an number.\")\n GetLength()\n \nif __name__ == \"__main__\": \n GetLength()\n" ]
[ [ "numpy.array" ] ]
louisccc/code2vec
[ "125ab5ab7e4bda68a4ba6dbe4babe2854c7bca80" ]
[ "cores/code2vec.py" ]
[ "from pathlib import Path\nfrom tqdm import tqdm\nimport pickle\nimport numpy as np\nimport tensorflow as tf\n\n\nclass code2vec(tf.keras.Model): \n\n def __init__(self, config):\n super(code2vec, self).__init__()\n\n self.config = config \n\n self.def_parameters()\n\n def def_parameters(self): \n emb_initializer = tf.initializers.glorot_normal()\n self.ents_embeddings = tf.Variable(emb_initializer(shape=(self.config.num_of_words, self.config.embedding_size)), name='ents')\n self.path_embeddings = tf.Variable(emb_initializer(shape=(self.config.num_of_paths, self.config.embedding_size)), name='paths')\n self.tags_embeddings = tf.Variable(emb_initializer(shape=(self.config.num_of_tags, self.config.code_embedding_size)), name='tags')\n self.attention_param = tf.Variable(emb_initializer(shape=(self.config.code_embedding_size, 1)), name='attention_param')\n self.transform_matrix= tf.Variable(emb_initializer(shape=(3*self.config.embedding_size, self.config.code_embedding_size)), name='transform')\n\n def forward(self, e1, p, e2, train=True):\n # e1_e is [batch_size, max_contexts, embeddings size]\n # p_e is [batch_size, max_contexts, embeddings size]\n # e2_e is [batch_size, max_contexts, embeddings size]\n e1_e = tf.nn.embedding_lookup(params=self.ents_embeddings, ids=e1)\n p_e = tf.nn.embedding_lookup(params=self.path_embeddings, ids=p)\n e2_e = tf.nn.embedding_lookup(params=self.ents_embeddings, ids=e2)\n\n # context_emb = [batch_size, max_contexts, 3*embedding_size] \n context_e = tf.concat([e1_e, p_e, e2_e], axis=-1) \n\n # apply a dropout to context emb. \n if train:\n context_e = tf.nn.dropout(context_e, rate=1-self.config.dropout_factor)\n\n # flatten context embeddings => [batch_size*max_contexts, 3*embedding_size]\n context_e = tf.reshape(context_e, [-1, 3*self.config.embedding_size])\n\n # tranform context embeddings -> to [batch_size*max_contexts, code_embedding_size]\n flat_emb = tf.tanh(tf.matmul(context_e, self.transform_matrix))\n\n # calculate weights => to [batch_size*max_contexts, 1]\n contexts_weights = tf.matmul(flat_emb, self.attention_param)\n\n # reshapeing context weights => to [batch_size, max_contexts, 1]\n batched_contexts_weights = tf.reshape(contexts_weights, [-1, self.config.max_contexts, 1])\n\n # calculate softmax for attention weights. \n attention_weights = tf.nn.softmax(batched_contexts_weights, axis=1)\n\n # reshaping the embeddings => to [batch_size, max_contexts, code_embedding_size]\n batched_flat_emb = tf.reshape(flat_emb, [-1, self.config.max_contexts, self.config.code_embedding_size])\n\n # calculating the code vectors => to [batch_size, code_embedding_size]\n code_vectors = tf.reduce_sum(tf.multiply(batched_flat_emb, attention_weights), axis=1)\n\n return code_vectors, attention_weights" ]
[ [ "tensorflow.initializers.glorot_normal", "tensorflow.multiply", "tensorflow.concat", "tensorflow.matmul", "tensorflow.reshape", "tensorflow.nn.embedding_lookup", "tensorflow.nn.softmax", "tensorflow.nn.dropout" ] ]
donehower/pandas
[ "ed20822a54e3863b393554ecca801654af105555" ]
[ "pandas/tests/arithmetic/test_timedelta64.py" ]
[ "# Arithmetic tests for DataFrame/Series/Index/Array classes that should\n# behave identically.\nfrom datetime import datetime, timedelta\n\nimport numpy as np\nimport pytest\n\nfrom pandas.errors import NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n DatetimeIndex,\n NaT,\n Series,\n Timedelta,\n TimedeltaIndex,\n Timestamp,\n timedelta_range,\n)\nfrom pandas.tests.arithmetic.test_datetime64 import assert_invalid_comparison\nimport pandas.util.testing as tm\n\n\ndef get_upcast_box(box, vector):\n \"\"\"\n Given two box-types, find the one that takes priority\n \"\"\"\n if box is DataFrame or isinstance(vector, DataFrame):\n return DataFrame\n if box is Series or isinstance(vector, Series):\n return Series\n if box is pd.Index or isinstance(vector, pd.Index):\n return pd.Index\n return box\n\n\n# ------------------------------------------------------------------\n# Timedelta64[ns] dtype Comparisons\n\n\nclass TestTimedelta64ArrayLikeComparisons:\n # Comparison tests for timedelta64[ns] vectors fully parametrized over\n # DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison\n # tests will eventually end up here.\n\n def test_compare_timedelta64_zerodim(self, box_with_array):\n # GH#26689 should unbox when comparing with zerodim array\n box = box_with_array\n xbox = box_with_array if box_with_array is not pd.Index else np.ndarray\n\n tdi = pd.timedelta_range(\"2H\", periods=4)\n other = np.array(tdi.to_numpy()[0])\n\n tdi = tm.box_expected(tdi, box)\n res = tdi <= other\n expected = np.array([True, False, False, False])\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(res, expected)\n\n with pytest.raises(TypeError):\n # zero-dim of wrong dtype should still raise\n tdi >= np.array(4)\n\n @pytest.mark.parametrize(\n \"td_scalar\",\n [timedelta(days=1), Timedelta(days=1), Timedelta(days=1).to_timedelta64()],\n )\n def test_compare_timedeltalike_scalar(self, box_with_array, td_scalar):\n # regression test for GH#5963\n box = box_with_array\n xbox = box if box is not pd.Index else np.ndarray\n ser = pd.Series([timedelta(days=1), timedelta(days=2)])\n ser = tm.box_expected(ser, box)\n actual = ser > td_scalar\n expected = pd.Series([False, True])\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(actual, expected)\n\n @pytest.mark.parametrize(\"invalid\", [345600000000000, \"a\"])\n def test_td64_comparisons_invalid(self, box_with_array, invalid):\n # GH#13624 for str\n box = box_with_array\n rng = timedelta_range(\"1 days\", periods=10)\n obj = tm.box_expected(rng, box)\n\n assert_invalid_comparison(obj, invalid, box)\n\n\nclass TestTimedelta64ArrayComparisons:\n # TODO: All of these need to be parametrized over box\n\n @pytest.mark.parametrize(\"dtype\", [None, object])\n def test_comp_nat(self, dtype):\n left = pd.TimedeltaIndex(\n [pd.Timedelta(\"1 days\"), pd.NaT, pd.Timedelta(\"3 days\")]\n )\n right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta(\"3 days\")])\n\n lhs, rhs = left, right\n if dtype is object:\n lhs, rhs = left.astype(object), right.astype(object)\n\n result = rhs == lhs\n expected = np.array([False, False, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = rhs != lhs\n expected = np.array([True, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n expected = np.array([False, False, False])\n tm.assert_numpy_array_equal(lhs == pd.NaT, expected)\n tm.assert_numpy_array_equal(pd.NaT == rhs, expected)\n\n expected = np.array([True, True, True])\n tm.assert_numpy_array_equal(lhs != pd.NaT, expected)\n tm.assert_numpy_array_equal(pd.NaT != lhs, expected)\n\n expected = np.array([False, False, False])\n tm.assert_numpy_array_equal(lhs < pd.NaT, expected)\n tm.assert_numpy_array_equal(pd.NaT > lhs, expected)\n\n def test_comparisons_nat(self):\n tdidx1 = pd.TimedeltaIndex(\n [\n \"1 day\",\n pd.NaT,\n \"1 day 00:00:01\",\n pd.NaT,\n \"1 day 00:00:01\",\n \"5 day 00:00:03\",\n ]\n )\n tdidx2 = pd.TimedeltaIndex(\n [\"2 day\", \"2 day\", pd.NaT, pd.NaT, \"1 day 00:00:02\", \"5 days 00:00:03\"]\n )\n tdarr = np.array(\n [\n np.timedelta64(2, \"D\"),\n np.timedelta64(2, \"D\"),\n np.timedelta64(\"nat\"),\n np.timedelta64(\"nat\"),\n np.timedelta64(1, \"D\") + np.timedelta64(2, \"s\"),\n np.timedelta64(5, \"D\") + np.timedelta64(3, \"s\"),\n ]\n )\n\n cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]\n\n # Check pd.NaT is handles as the same as np.nan\n for idx1, idx2 in cases:\n\n result = idx1 < idx2\n expected = np.array([True, False, False, False, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx2 > idx1\n expected = np.array([True, False, False, False, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 <= idx2\n expected = np.array([True, False, False, False, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx2 >= idx1\n expected = np.array([True, False, False, False, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 == idx2\n expected = np.array([False, False, False, False, False, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 != idx2\n expected = np.array([True, True, True, True, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n # TODO: better name\n def test_comparisons_coverage(self):\n rng = timedelta_range(\"1 days\", periods=10)\n\n result = rng < rng[3]\n expected = np.array([True, True, True] + [False] * 7)\n tm.assert_numpy_array_equal(result, expected)\n\n result = rng == list(rng)\n exp = rng == rng\n tm.assert_numpy_array_equal(result, exp)\n\n\n# ------------------------------------------------------------------\n# Timedelta64[ns] dtype Arithmetic Operations\n\n\nclass TestTimedelta64ArithmeticUnsorted:\n # Tests moved from type-specific test files but not\n # yet sorted/parametrized/de-duplicated\n\n def test_ufunc_coercions(self):\n # normal ops are also tested in tseries/test_timedeltas.py\n idx = TimedeltaIndex([\"2H\", \"4H\", \"6H\", \"8H\", \"10H\"], freq=\"2H\", name=\"x\")\n\n for result in [idx * 2, np.multiply(idx, 2)]:\n assert isinstance(result, TimedeltaIndex)\n exp = TimedeltaIndex([\"4H\", \"8H\", \"12H\", \"16H\", \"20H\"], freq=\"4H\", name=\"x\")\n tm.assert_index_equal(result, exp)\n assert result.freq == \"4H\"\n\n for result in [idx / 2, np.divide(idx, 2)]:\n assert isinstance(result, TimedeltaIndex)\n exp = TimedeltaIndex([\"1H\", \"2H\", \"3H\", \"4H\", \"5H\"], freq=\"H\", name=\"x\")\n tm.assert_index_equal(result, exp)\n assert result.freq == \"H\"\n\n idx = TimedeltaIndex([\"2H\", \"4H\", \"6H\", \"8H\", \"10H\"], freq=\"2H\", name=\"x\")\n for result in [-idx, np.negative(idx)]:\n assert isinstance(result, TimedeltaIndex)\n exp = TimedeltaIndex(\n [\"-2H\", \"-4H\", \"-6H\", \"-8H\", \"-10H\"], freq=\"-2H\", name=\"x\"\n )\n tm.assert_index_equal(result, exp)\n assert result.freq == \"-2H\"\n\n idx = TimedeltaIndex([\"-2H\", \"-1H\", \"0H\", \"1H\", \"2H\"], freq=\"H\", name=\"x\")\n for result in [abs(idx), np.absolute(idx)]:\n assert isinstance(result, TimedeltaIndex)\n exp = TimedeltaIndex([\"2H\", \"1H\", \"0H\", \"1H\", \"2H\"], freq=None, name=\"x\")\n tm.assert_index_equal(result, exp)\n assert result.freq is None\n\n def test_subtraction_ops(self):\n # with datetimes/timedelta and tdi/dti\n tdi = TimedeltaIndex([\"1 days\", pd.NaT, \"2 days\"], name=\"foo\")\n dti = pd.date_range(\"20130101\", periods=3, name=\"bar\")\n td = Timedelta(\"1 days\")\n dt = Timestamp(\"20130101\")\n\n msg = \"cannot subtract a datelike from a TimedeltaArray\"\n with pytest.raises(TypeError, match=msg):\n tdi - dt\n with pytest.raises(TypeError, match=msg):\n tdi - dti\n\n msg = r\"unsupported operand type\\(s\\) for -\"\n with pytest.raises(TypeError, match=msg):\n td - dt\n\n msg = \"(bad|unsupported) operand type for unary\"\n with pytest.raises(TypeError, match=msg):\n td - dti\n\n result = dt - dti\n expected = TimedeltaIndex([\"0 days\", \"-1 days\", \"-2 days\"], name=\"bar\")\n tm.assert_index_equal(result, expected)\n\n result = dti - dt\n expected = TimedeltaIndex([\"0 days\", \"1 days\", \"2 days\"], name=\"bar\")\n tm.assert_index_equal(result, expected)\n\n result = tdi - td\n expected = TimedeltaIndex([\"0 days\", pd.NaT, \"1 days\"], name=\"foo\")\n tm.assert_index_equal(result, expected, check_names=False)\n\n result = td - tdi\n expected = TimedeltaIndex([\"0 days\", pd.NaT, \"-1 days\"], name=\"foo\")\n tm.assert_index_equal(result, expected, check_names=False)\n\n result = dti - td\n expected = DatetimeIndex([\"20121231\", \"20130101\", \"20130102\"], name=\"bar\")\n tm.assert_index_equal(result, expected, check_names=False)\n\n result = dt - tdi\n expected = DatetimeIndex([\"20121231\", pd.NaT, \"20121230\"], name=\"foo\")\n tm.assert_index_equal(result, expected)\n\n def test_subtraction_ops_with_tz(self):\n\n # check that dt/dti subtraction ops with tz are validated\n dti = pd.date_range(\"20130101\", periods=3)\n ts = Timestamp(\"20130101\")\n dt = ts.to_pydatetime()\n dti_tz = pd.date_range(\"20130101\", periods=3).tz_localize(\"US/Eastern\")\n ts_tz = Timestamp(\"20130101\").tz_localize(\"US/Eastern\")\n ts_tz2 = Timestamp(\"20130101\").tz_localize(\"CET\")\n dt_tz = ts_tz.to_pydatetime()\n td = Timedelta(\"1 days\")\n\n def _check(result, expected):\n assert result == expected\n assert isinstance(result, Timedelta)\n\n # scalars\n result = ts - ts\n expected = Timedelta(\"0 days\")\n _check(result, expected)\n\n result = dt_tz - ts_tz\n expected = Timedelta(\"0 days\")\n _check(result, expected)\n\n result = ts_tz - dt_tz\n expected = Timedelta(\"0 days\")\n _check(result, expected)\n\n # tz mismatches\n msg = \"Timestamp subtraction must have the same timezones or no timezones\"\n with pytest.raises(TypeError, match=msg):\n dt_tz - ts\n msg = \"can't subtract offset-naive and offset-aware datetimes\"\n with pytest.raises(TypeError, match=msg):\n dt_tz - dt\n msg = \"Timestamp subtraction must have the same timezones or no timezones\"\n with pytest.raises(TypeError, match=msg):\n dt_tz - ts_tz2\n msg = \"can't subtract offset-naive and offset-aware datetimes\"\n with pytest.raises(TypeError, match=msg):\n dt - dt_tz\n msg = \"Timestamp subtraction must have the same timezones or no timezones\"\n with pytest.raises(TypeError, match=msg):\n ts - dt_tz\n with pytest.raises(TypeError, match=msg):\n ts_tz2 - ts\n with pytest.raises(TypeError, match=msg):\n ts_tz2 - dt\n with pytest.raises(TypeError, match=msg):\n ts_tz - ts_tz2\n\n # with dti\n with pytest.raises(TypeError, match=msg):\n dti - ts_tz\n with pytest.raises(TypeError, match=msg):\n dti_tz - ts\n with pytest.raises(TypeError, match=msg):\n dti_tz - ts_tz2\n\n result = dti_tz - dt_tz\n expected = TimedeltaIndex([\"0 days\", \"1 days\", \"2 days\"])\n tm.assert_index_equal(result, expected)\n\n result = dt_tz - dti_tz\n expected = TimedeltaIndex([\"0 days\", \"-1 days\", \"-2 days\"])\n tm.assert_index_equal(result, expected)\n\n result = dti_tz - ts_tz\n expected = TimedeltaIndex([\"0 days\", \"1 days\", \"2 days\"])\n tm.assert_index_equal(result, expected)\n\n result = ts_tz - dti_tz\n expected = TimedeltaIndex([\"0 days\", \"-1 days\", \"-2 days\"])\n tm.assert_index_equal(result, expected)\n\n result = td - td\n expected = Timedelta(\"0 days\")\n _check(result, expected)\n\n result = dti_tz - td\n expected = DatetimeIndex([\"20121231\", \"20130101\", \"20130102\"], tz=\"US/Eastern\")\n tm.assert_index_equal(result, expected)\n\n def test_dti_tdi_numeric_ops(self):\n # These are normally union/diff set-like ops\n tdi = TimedeltaIndex([\"1 days\", pd.NaT, \"2 days\"], name=\"foo\")\n dti = pd.date_range(\"20130101\", periods=3, name=\"bar\")\n\n # TODO(wesm): unused?\n # td = Timedelta('1 days')\n # dt = Timestamp('20130101')\n\n result = tdi - tdi\n expected = TimedeltaIndex([\"0 days\", pd.NaT, \"0 days\"], name=\"foo\")\n tm.assert_index_equal(result, expected)\n\n result = tdi + tdi\n expected = TimedeltaIndex([\"2 days\", pd.NaT, \"4 days\"], name=\"foo\")\n tm.assert_index_equal(result, expected)\n\n result = dti - tdi # name will be reset\n expected = DatetimeIndex([\"20121231\", pd.NaT, \"20130101\"])\n tm.assert_index_equal(result, expected)\n\n def test_addition_ops(self):\n # with datetimes/timedelta and tdi/dti\n tdi = TimedeltaIndex([\"1 days\", pd.NaT, \"2 days\"], name=\"foo\")\n dti = pd.date_range(\"20130101\", periods=3, name=\"bar\")\n td = Timedelta(\"1 days\")\n dt = Timestamp(\"20130101\")\n\n result = tdi + dt\n expected = DatetimeIndex([\"20130102\", pd.NaT, \"20130103\"], name=\"foo\")\n tm.assert_index_equal(result, expected)\n\n result = dt + tdi\n expected = DatetimeIndex([\"20130102\", pd.NaT, \"20130103\"], name=\"foo\")\n tm.assert_index_equal(result, expected)\n\n result = td + tdi\n expected = TimedeltaIndex([\"2 days\", pd.NaT, \"3 days\"], name=\"foo\")\n tm.assert_index_equal(result, expected)\n\n result = tdi + td\n expected = TimedeltaIndex([\"2 days\", pd.NaT, \"3 days\"], name=\"foo\")\n tm.assert_index_equal(result, expected)\n\n # unequal length\n msg = \"cannot add indices of unequal length\"\n with pytest.raises(ValueError, match=msg):\n tdi + dti[0:1]\n with pytest.raises(ValueError, match=msg):\n tdi[0:1] + dti\n\n # random indexes\n with pytest.raises(NullFrequencyError):\n tdi + pd.Int64Index([1, 2, 3])\n\n # this is a union!\n # pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)\n\n result = tdi + dti # name will be reset\n expected = DatetimeIndex([\"20130102\", pd.NaT, \"20130105\"])\n tm.assert_index_equal(result, expected)\n\n result = dti + tdi # name will be reset\n expected = DatetimeIndex([\"20130102\", pd.NaT, \"20130105\"])\n tm.assert_index_equal(result, expected)\n\n result = dt + td\n expected = Timestamp(\"20130102\")\n assert result == expected\n\n result = td + dt\n expected = Timestamp(\"20130102\")\n assert result == expected\n\n # TODO: Needs more informative name, probably split up into\n # more targeted tests\n @pytest.mark.parametrize(\"freq\", [\"D\", \"B\"])\n def test_timedelta(self, freq):\n index = pd.date_range(\"1/1/2000\", periods=50, freq=freq)\n\n shifted = index + timedelta(1)\n back = shifted + timedelta(-1)\n tm.assert_index_equal(index, back)\n\n if freq == \"D\":\n expected = pd.tseries.offsets.Day(1)\n assert index.freq == expected\n assert shifted.freq == expected\n assert back.freq == expected\n else: # freq == 'B'\n assert index.freq == pd.tseries.offsets.BusinessDay(1)\n assert shifted.freq is None\n assert back.freq == pd.tseries.offsets.BusinessDay(1)\n\n result = index - timedelta(1)\n expected = index + timedelta(-1)\n tm.assert_index_equal(result, expected)\n\n # GH#4134, buggy with timedeltas\n rng = pd.date_range(\"2013\", \"2014\")\n s = Series(rng)\n result1 = rng - pd.offsets.Hour(1)\n result2 = DatetimeIndex(s - np.timedelta64(100000000))\n result3 = rng - np.timedelta64(100000000)\n result4 = DatetimeIndex(s - pd.offsets.Hour(1))\n tm.assert_index_equal(result1, result4)\n tm.assert_index_equal(result2, result3)\n\n def test_tda_add_sub_index(self):\n # Check that TimedeltaArray defers to Index on arithmetic ops\n tdi = TimedeltaIndex([\"1 days\", pd.NaT, \"2 days\"])\n tda = tdi.array\n\n dti = pd.date_range(\"1999-12-31\", periods=3, freq=\"D\")\n\n result = tda + dti\n expected = tdi + dti\n tm.assert_index_equal(result, expected)\n\n result = tda + tdi\n expected = tdi + tdi\n tm.assert_index_equal(result, expected)\n\n result = tda - tdi\n expected = tdi - tdi\n tm.assert_index_equal(result, expected)\n\n\nclass TestAddSubNaTMasking:\n # TODO: parametrize over boxes\n\n def test_tdi_add_timestamp_nat_masking(self):\n # GH#17991 checking for overflow-masking with NaT\n tdinat = pd.to_timedelta([\"24658 days 11:15:00\", \"NaT\"])\n\n tsneg = Timestamp(\"1950-01-01\")\n ts_neg_variants = [\n tsneg,\n tsneg.to_pydatetime(),\n tsneg.to_datetime64().astype(\"datetime64[ns]\"),\n tsneg.to_datetime64().astype(\"datetime64[D]\"),\n ]\n\n tspos = Timestamp(\"1980-01-01\")\n ts_pos_variants = [\n tspos,\n tspos.to_pydatetime(),\n tspos.to_datetime64().astype(\"datetime64[ns]\"),\n tspos.to_datetime64().astype(\"datetime64[D]\"),\n ]\n\n for variant in ts_neg_variants + ts_pos_variants:\n res = tdinat + variant\n assert res[1] is pd.NaT\n\n def test_tdi_add_overflow(self):\n # See GH#14068\n # preliminary test scalar analogue of vectorized tests below\n with pytest.raises(OutOfBoundsDatetime):\n pd.to_timedelta(106580, \"D\") + Timestamp(\"2000\")\n with pytest.raises(OutOfBoundsDatetime):\n Timestamp(\"2000\") + pd.to_timedelta(106580, \"D\")\n\n _NaT = int(pd.NaT) + 1\n msg = \"Overflow in int64 addition\"\n with pytest.raises(OverflowError, match=msg):\n pd.to_timedelta([106580], \"D\") + Timestamp(\"2000\")\n with pytest.raises(OverflowError, match=msg):\n Timestamp(\"2000\") + pd.to_timedelta([106580], \"D\")\n with pytest.raises(OverflowError, match=msg):\n pd.to_timedelta([_NaT]) - Timedelta(\"1 days\")\n with pytest.raises(OverflowError, match=msg):\n pd.to_timedelta([\"5 days\", _NaT]) - Timedelta(\"1 days\")\n with pytest.raises(OverflowError, match=msg):\n (\n pd.to_timedelta([_NaT, \"5 days\", \"1 hours\"])\n - pd.to_timedelta([\"7 seconds\", _NaT, \"4 hours\"])\n )\n\n # These should not overflow!\n exp = TimedeltaIndex([pd.NaT])\n result = pd.to_timedelta([pd.NaT]) - Timedelta(\"1 days\")\n tm.assert_index_equal(result, exp)\n\n exp = TimedeltaIndex([\"4 days\", pd.NaT])\n result = pd.to_timedelta([\"5 days\", pd.NaT]) - Timedelta(\"1 days\")\n tm.assert_index_equal(result, exp)\n\n exp = TimedeltaIndex([pd.NaT, pd.NaT, \"5 hours\"])\n result = pd.to_timedelta([pd.NaT, \"5 days\", \"1 hours\"]) + pd.to_timedelta(\n [\"7 seconds\", pd.NaT, \"4 hours\"]\n )\n tm.assert_index_equal(result, exp)\n\n\nclass TestTimedeltaArraylikeAddSubOps:\n # Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__\n\n # TODO: moved from frame tests; needs parametrization/de-duplication\n def test_td64_df_add_int_frame(self):\n # GH#22696 Check that we don't dispatch to numpy implementation,\n # which treats int64 as m8[ns]\n tdi = pd.timedelta_range(\"1\", periods=3)\n df = tdi.to_frame()\n other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`\n with pytest.raises(TypeError):\n df + other\n with pytest.raises(TypeError):\n other + df\n with pytest.raises(TypeError):\n df - other\n with pytest.raises(TypeError):\n other - df\n\n # TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs\n # parametrization+de-duplication\n def test_timedelta_ops_with_missing_values(self):\n # setup\n s1 = pd.to_timedelta(Series([\"00:00:01\"]))\n s2 = pd.to_timedelta(Series([\"00:00:02\"]))\n\n msg = r\"dtype datetime64\\[ns\\] cannot be converted to timedelta64\\[ns\\]\"\n with pytest.raises(TypeError, match=msg):\n # Passing datetime64-dtype data to TimedeltaIndex is no longer\n # supported GH#29794\n pd.to_timedelta(Series([pd.NaT]))\n\n sn = pd.to_timedelta(Series([pd.NaT], dtype=\"m8[ns]\"))\n\n df1 = pd.DataFrame([\"00:00:01\"]).apply(pd.to_timedelta)\n df2 = pd.DataFrame([\"00:00:02\"]).apply(pd.to_timedelta)\n with pytest.raises(TypeError, match=msg):\n # Passing datetime64-dtype data to TimedeltaIndex is no longer\n # supported GH#29794\n pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)\n\n dfn = pd.DataFrame([pd.NaT.value]).apply(pd.to_timedelta)\n\n scalar1 = pd.to_timedelta(\"00:00:01\")\n scalar2 = pd.to_timedelta(\"00:00:02\")\n timedelta_NaT = pd.to_timedelta(\"NaT\")\n\n actual = scalar1 + scalar1\n assert actual == scalar2\n actual = scalar2 - scalar1\n assert actual == scalar1\n\n actual = s1 + s1\n tm.assert_series_equal(actual, s2)\n actual = s2 - s1\n tm.assert_series_equal(actual, s1)\n\n actual = s1 + scalar1\n tm.assert_series_equal(actual, s2)\n actual = scalar1 + s1\n tm.assert_series_equal(actual, s2)\n actual = s2 - scalar1\n tm.assert_series_equal(actual, s1)\n actual = -scalar1 + s2\n tm.assert_series_equal(actual, s1)\n\n actual = s1 + timedelta_NaT\n tm.assert_series_equal(actual, sn)\n actual = timedelta_NaT + s1\n tm.assert_series_equal(actual, sn)\n actual = s1 - timedelta_NaT\n tm.assert_series_equal(actual, sn)\n actual = -timedelta_NaT + s1\n tm.assert_series_equal(actual, sn)\n\n with pytest.raises(TypeError):\n s1 + np.nan\n with pytest.raises(TypeError):\n np.nan + s1\n with pytest.raises(TypeError):\n s1 - np.nan\n with pytest.raises(TypeError):\n -np.nan + s1\n\n actual = s1 + pd.NaT\n tm.assert_series_equal(actual, sn)\n actual = s2 - pd.NaT\n tm.assert_series_equal(actual, sn)\n\n actual = s1 + df1\n tm.assert_frame_equal(actual, df2)\n actual = s2 - df1\n tm.assert_frame_equal(actual, df1)\n actual = df1 + s1\n tm.assert_frame_equal(actual, df2)\n actual = df2 - s1\n tm.assert_frame_equal(actual, df1)\n\n actual = df1 + df1\n tm.assert_frame_equal(actual, df2)\n actual = df2 - df1\n tm.assert_frame_equal(actual, df1)\n\n actual = df1 + scalar1\n tm.assert_frame_equal(actual, df2)\n actual = df2 - scalar1\n tm.assert_frame_equal(actual, df1)\n\n actual = df1 + timedelta_NaT\n tm.assert_frame_equal(actual, dfn)\n actual = df1 - timedelta_NaT\n tm.assert_frame_equal(actual, dfn)\n\n with pytest.raises(TypeError):\n df1 + np.nan\n with pytest.raises(TypeError):\n df1 - np.nan\n\n actual = df1 + pd.NaT # NaT is datetime, not timedelta\n tm.assert_frame_equal(actual, dfn)\n actual = df1 - pd.NaT\n tm.assert_frame_equal(actual, dfn)\n\n # TODO: moved from tests.series.test_operators, needs splitting, cleanup,\n # de-duplication, box-parametrization...\n def test_operators_timedelta64(self):\n # series ops\n v1 = pd.date_range(\"2012-1-1\", periods=3, freq=\"D\")\n v2 = pd.date_range(\"2012-1-2\", periods=3, freq=\"D\")\n rs = Series(v2) - Series(v1)\n xp = Series(1e9 * 3600 * 24, rs.index).astype(\"int64\").astype(\"timedelta64[ns]\")\n tm.assert_series_equal(rs, xp)\n assert rs.dtype == \"timedelta64[ns]\"\n\n df = DataFrame(dict(A=v1))\n td = Series([timedelta(days=i) for i in range(3)])\n assert td.dtype == \"timedelta64[ns]\"\n\n # series on the rhs\n result = df[\"A\"] - df[\"A\"].shift()\n assert result.dtype == \"timedelta64[ns]\"\n\n result = df[\"A\"] + td\n assert result.dtype == \"M8[ns]\"\n\n # scalar Timestamp on rhs\n maxa = df[\"A\"].max()\n assert isinstance(maxa, Timestamp)\n\n resultb = df[\"A\"] - df[\"A\"].max()\n assert resultb.dtype == \"timedelta64[ns]\"\n\n # timestamp on lhs\n result = resultb + df[\"A\"]\n values = [Timestamp(\"20111230\"), Timestamp(\"20120101\"), Timestamp(\"20120103\")]\n expected = Series(values, name=\"A\")\n tm.assert_series_equal(result, expected)\n\n # datetimes on rhs\n result = df[\"A\"] - datetime(2001, 1, 1)\n expected = Series([timedelta(days=4017 + i) for i in range(3)], name=\"A\")\n tm.assert_series_equal(result, expected)\n assert result.dtype == \"m8[ns]\"\n\n d = datetime(2001, 1, 1, 3, 4)\n resulta = df[\"A\"] - d\n assert resulta.dtype == \"m8[ns]\"\n\n # roundtrip\n resultb = resulta + d\n tm.assert_series_equal(df[\"A\"], resultb)\n\n # timedeltas on rhs\n td = timedelta(days=1)\n resulta = df[\"A\"] + td\n resultb = resulta - td\n tm.assert_series_equal(resultb, df[\"A\"])\n assert resultb.dtype == \"M8[ns]\"\n\n # roundtrip\n td = timedelta(minutes=5, seconds=3)\n resulta = df[\"A\"] + td\n resultb = resulta - td\n tm.assert_series_equal(df[\"A\"], resultb)\n assert resultb.dtype == \"M8[ns]\"\n\n # inplace\n value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))\n rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))\n assert rs[2] == value\n\n def test_timedelta64_ops_nat(self):\n # GH 11349\n timedelta_series = Series([NaT, Timedelta(\"1s\")])\n nat_series_dtype_timedelta = Series([NaT, NaT], dtype=\"timedelta64[ns]\")\n single_nat_dtype_timedelta = Series([NaT], dtype=\"timedelta64[ns]\")\n\n # subtraction\n tm.assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)\n tm.assert_series_equal(-NaT + timedelta_series, nat_series_dtype_timedelta)\n\n tm.assert_series_equal(\n timedelta_series - single_nat_dtype_timedelta, nat_series_dtype_timedelta\n )\n tm.assert_series_equal(\n -single_nat_dtype_timedelta + timedelta_series, nat_series_dtype_timedelta\n )\n\n # addition\n tm.assert_series_equal(\n nat_series_dtype_timedelta + NaT, nat_series_dtype_timedelta\n )\n tm.assert_series_equal(\n NaT + nat_series_dtype_timedelta, nat_series_dtype_timedelta\n )\n\n tm.assert_series_equal(\n nat_series_dtype_timedelta + single_nat_dtype_timedelta,\n nat_series_dtype_timedelta,\n )\n tm.assert_series_equal(\n single_nat_dtype_timedelta + nat_series_dtype_timedelta,\n nat_series_dtype_timedelta,\n )\n\n tm.assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)\n tm.assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta)\n\n tm.assert_series_equal(\n timedelta_series + single_nat_dtype_timedelta, nat_series_dtype_timedelta\n )\n tm.assert_series_equal(\n single_nat_dtype_timedelta + timedelta_series, nat_series_dtype_timedelta\n )\n\n tm.assert_series_equal(\n nat_series_dtype_timedelta + NaT, nat_series_dtype_timedelta\n )\n tm.assert_series_equal(\n NaT + nat_series_dtype_timedelta, nat_series_dtype_timedelta\n )\n\n tm.assert_series_equal(\n nat_series_dtype_timedelta + single_nat_dtype_timedelta,\n nat_series_dtype_timedelta,\n )\n tm.assert_series_equal(\n single_nat_dtype_timedelta + nat_series_dtype_timedelta,\n nat_series_dtype_timedelta,\n )\n\n # multiplication\n tm.assert_series_equal(\n nat_series_dtype_timedelta * 1.0, nat_series_dtype_timedelta\n )\n tm.assert_series_equal(\n 1.0 * nat_series_dtype_timedelta, nat_series_dtype_timedelta\n )\n\n tm.assert_series_equal(timedelta_series * 1, timedelta_series)\n tm.assert_series_equal(1 * timedelta_series, timedelta_series)\n\n tm.assert_series_equal(timedelta_series * 1.5, Series([NaT, Timedelta(\"1.5s\")]))\n tm.assert_series_equal(1.5 * timedelta_series, Series([NaT, Timedelta(\"1.5s\")]))\n\n tm.assert_series_equal(timedelta_series * np.nan, nat_series_dtype_timedelta)\n tm.assert_series_equal(np.nan * timedelta_series, nat_series_dtype_timedelta)\n\n # division\n tm.assert_series_equal(timedelta_series / 2, Series([NaT, Timedelta(\"0.5s\")]))\n tm.assert_series_equal(timedelta_series / 2.0, Series([NaT, Timedelta(\"0.5s\")]))\n tm.assert_series_equal(timedelta_series / np.nan, nat_series_dtype_timedelta)\n\n # -------------------------------------------------------------\n # Invalid Operations\n\n @pytest.mark.parametrize(\"other\", [\"a\", 3.14, np.array([2.0, 3.0])])\n def test_td64arr_add_sub_invalid(self, box_with_array, other):\n # GH#13624 for str\n tdi = TimedeltaIndex([\"1 day\", \"2 days\"])\n tdarr = tm.box_expected(tdi, box_with_array)\n\n with pytest.raises(TypeError):\n tdarr + other\n with pytest.raises(TypeError):\n other + tdarr\n with pytest.raises(TypeError):\n tdarr - other\n with pytest.raises(TypeError):\n other - tdarr\n\n @pytest.mark.parametrize(\"freq\", [None, \"H\"])\n def test_td64arr_sub_period(self, box_with_array, freq):\n # GH#13078\n # not supported, check TypeError\n p = pd.Period(\"2011-01-01\", freq=\"D\")\n idx = TimedeltaIndex([\"1 hours\", \"2 hours\"], freq=freq)\n idx = tm.box_expected(idx, box_with_array)\n\n with pytest.raises(TypeError):\n idx - p\n\n with pytest.raises(TypeError):\n p - idx\n\n @pytest.mark.parametrize(\"pi_freq\", [\"D\", \"W\", \"Q\", \"H\"])\n @pytest.mark.parametrize(\"tdi_freq\", [None, \"H\"])\n def test_td64arr_sub_pi(self, box_with_array, tdi_freq, pi_freq):\n # GH#20049 subtracting PeriodIndex should raise TypeError\n tdi = TimedeltaIndex([\"1 hours\", \"2 hours\"], freq=tdi_freq)\n dti = Timestamp(\"2018-03-07 17:16:40\") + tdi\n pi = dti.to_period(pi_freq)\n\n # TODO: parametrize over box for pi?\n tdi = tm.box_expected(tdi, box_with_array)\n with pytest.raises(TypeError):\n tdi - pi\n\n # -------------------------------------------------------------\n # Binary operations td64 arraylike and datetime-like\n\n def test_td64arr_sub_timestamp_raises(self, box_with_array):\n idx = TimedeltaIndex([\"1 day\", \"2 day\"])\n idx = tm.box_expected(idx, box_with_array)\n\n msg = (\n \"cannot subtract a datelike from|\"\n \"Could not operate|\"\n \"cannot perform operation\"\n )\n with pytest.raises(TypeError, match=msg):\n idx - Timestamp(\"2011-01-01\")\n\n def test_td64arr_add_timestamp(self, box_with_array, tz_naive_fixture):\n # GH#23215\n\n # TODO: parametrize over scalar datetime types?\n tz = tz_naive_fixture\n other = Timestamp(\"2011-01-01\", tz=tz)\n\n idx = TimedeltaIndex([\"1 day\", \"2 day\"])\n expected = DatetimeIndex([\"2011-01-02\", \"2011-01-03\"], tz=tz)\n\n idx = tm.box_expected(idx, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = idx + other\n tm.assert_equal(result, expected)\n\n result = other + idx\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"ts\",\n [\n Timestamp(\"2012-01-01\"),\n Timestamp(\"2012-01-01\").to_pydatetime(),\n Timestamp(\"2012-01-01\").to_datetime64(),\n ],\n )\n def test_td64arr_add_sub_datetimelike_scalar(self, ts, box_with_array):\n # GH#11925, GH#29558\n tdi = timedelta_range(\"1 day\", periods=3)\n expected = pd.date_range(\"2012-01-02\", periods=3)\n\n tdarr = tm.box_expected(tdi, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n tm.assert_equal(ts + tdarr, expected)\n tm.assert_equal(tdarr + ts, expected)\n\n expected2 = pd.date_range(\"2011-12-31\", periods=3, freq=\"-1D\")\n expected2 = tm.box_expected(expected2, box_with_array)\n\n tm.assert_equal(ts - tdarr, expected2)\n tm.assert_equal(ts + (-tdarr), expected2)\n\n with pytest.raises(TypeError):\n tdarr - ts\n\n def test_tdi_sub_dt64_array(self, box_with_array):\n dti = pd.date_range(\"2016-01-01\", periods=3)\n tdi = dti - dti.shift(1)\n dtarr = dti.values\n expected = pd.DatetimeIndex(dtarr) - tdi\n\n tdi = tm.box_expected(tdi, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n with pytest.raises(TypeError):\n tdi - dtarr\n\n # TimedeltaIndex.__rsub__\n result = dtarr - tdi\n tm.assert_equal(result, expected)\n\n def test_tdi_add_dt64_array(self, box_with_array):\n dti = pd.date_range(\"2016-01-01\", periods=3)\n tdi = dti - dti.shift(1)\n dtarr = dti.values\n expected = pd.DatetimeIndex(dtarr) + tdi\n\n tdi = tm.box_expected(tdi, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = tdi + dtarr\n tm.assert_equal(result, expected)\n result = dtarr + tdi\n tm.assert_equal(result, expected)\n\n def test_td64arr_add_datetime64_nat(self, box_with_array):\n # GH#23215\n other = np.datetime64(\"NaT\")\n\n tdi = timedelta_range(\"1 day\", periods=3)\n expected = pd.DatetimeIndex([\"NaT\", \"NaT\", \"NaT\"])\n\n tdser = tm.box_expected(tdi, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n tm.assert_equal(tdser + other, expected)\n tm.assert_equal(other + tdser, expected)\n\n # ------------------------------------------------------------------\n # Operations with int-like others\n\n @pytest.mark.parametrize(\n \"other\",\n [\n # GH#19123\n 1,\n Series([20, 30, 40], dtype=\"uint8\"),\n np.array([20, 30, 40], dtype=\"uint8\"),\n pd.UInt64Index([20, 30, 40]),\n pd.Int64Index([20, 30, 40]),\n Series([2, 3, 4]),\n 1.5,\n np.array(2),\n ],\n )\n def test_td64arr_addsub_numeric_invalid(self, box_with_array, other):\n box = box_with_array\n tdser = pd.Series([\"59 Days\", \"59 Days\", \"NaT\"], dtype=\"m8[ns]\")\n tdser = tm.box_expected(tdser, box)\n\n err = TypeError\n if box in [pd.Index, tm.to_array] and not isinstance(other, float):\n err = NullFrequencyError\n\n with pytest.raises(err):\n tdser + other\n with pytest.raises(err):\n other + tdser\n with pytest.raises(err):\n tdser - other\n with pytest.raises(err):\n other - tdser\n\n @pytest.mark.parametrize(\n \"dtype\",\n [\n \"int64\",\n \"int32\",\n \"int16\",\n \"uint64\",\n \"uint32\",\n \"uint16\",\n \"uint8\",\n \"float64\",\n \"float32\",\n \"float16\",\n ],\n )\n @pytest.mark.parametrize(\n \"vec\",\n [\n np.array([1, 2, 3]),\n pd.Index([1, 2, 3]),\n Series([1, 2, 3])\n # TODO: Add DataFrame in here?\n ],\n ids=lambda x: type(x).__name__,\n )\n def test_td64arr_add_sub_numeric_arr_invalid(self, box_with_array, vec, dtype):\n box = box_with_array\n tdser = pd.Series([\"59 Days\", \"59 Days\", \"NaT\"], dtype=\"m8[ns]\")\n tdser = tm.box_expected(tdser, box)\n err = TypeError\n if box in [pd.Index, tm.to_array] and not dtype.startswith(\"float\"):\n err = NullFrequencyError\n\n vector = vec.astype(dtype)\n with pytest.raises(err):\n tdser + vector\n with pytest.raises(err):\n vector + tdser\n with pytest.raises(err):\n tdser - vector\n with pytest.raises(err):\n vector - tdser\n\n # ------------------------------------------------------------------\n # Operations with timedelta-like others\n\n # TODO: this was taken from tests.series.test_ops; de-duplicate\n def test_operators_timedelta64_with_timedelta(self, scalar_td):\n # smoke tests\n td1 = Series([timedelta(minutes=5, seconds=3)] * 3)\n td1.iloc[2] = np.nan\n\n td1 + scalar_td\n scalar_td + td1\n td1 - scalar_td\n scalar_td - td1\n td1 / scalar_td\n scalar_td / td1\n\n # TODO: this was taken from tests.series.test_ops; de-duplicate\n def test_timedelta64_operations_with_timedeltas(self):\n # td operate with td\n td1 = Series([timedelta(minutes=5, seconds=3)] * 3)\n td2 = timedelta(minutes=5, seconds=4)\n result = td1 - td2\n expected = Series([timedelta(seconds=0)] * 3) - Series(\n [timedelta(seconds=1)] * 3\n )\n assert result.dtype == \"m8[ns]\"\n tm.assert_series_equal(result, expected)\n\n result2 = td2 - td1\n expected = Series([timedelta(seconds=1)] * 3) - Series(\n [timedelta(seconds=0)] * 3\n )\n tm.assert_series_equal(result2, expected)\n\n # roundtrip\n tm.assert_series_equal(result + td2, td1)\n\n # Now again, using pd.to_timedelta, which should build\n # a Series or a scalar, depending on input.\n td1 = Series(pd.to_timedelta([\"00:05:03\"] * 3))\n td2 = pd.to_timedelta(\"00:05:04\")\n result = td1 - td2\n expected = Series([timedelta(seconds=0)] * 3) - Series(\n [timedelta(seconds=1)] * 3\n )\n assert result.dtype == \"m8[ns]\"\n tm.assert_series_equal(result, expected)\n\n result2 = td2 - td1\n expected = Series([timedelta(seconds=1)] * 3) - Series(\n [timedelta(seconds=0)] * 3\n )\n tm.assert_series_equal(result2, expected)\n\n # roundtrip\n tm.assert_series_equal(result + td2, td1)\n\n def test_td64arr_add_td64_array(self, box_with_array):\n box = box_with_array\n dti = pd.date_range(\"2016-01-01\", periods=3)\n tdi = dti - dti.shift(1)\n tdarr = tdi.values\n\n expected = 2 * tdi\n tdi = tm.box_expected(tdi, box)\n expected = tm.box_expected(expected, box)\n\n result = tdi + tdarr\n tm.assert_equal(result, expected)\n result = tdarr + tdi\n tm.assert_equal(result, expected)\n\n def test_td64arr_sub_td64_array(self, box_with_array):\n box = box_with_array\n dti = pd.date_range(\"2016-01-01\", periods=3)\n tdi = dti - dti.shift(1)\n tdarr = tdi.values\n\n expected = 0 * tdi\n tdi = tm.box_expected(tdi, box)\n expected = tm.box_expected(expected, box)\n\n result = tdi - tdarr\n tm.assert_equal(result, expected)\n result = tdarr - tdi\n tm.assert_equal(result, expected)\n\n # TODO: parametrize over [add, sub, radd, rsub]?\n @pytest.mark.parametrize(\n \"names\",\n [\n (None, None, None),\n (\"Egon\", \"Venkman\", None),\n (\"NCC1701D\", \"NCC1701D\", \"NCC1701D\"),\n ],\n )\n def test_td64arr_add_sub_tdi(self, box, names):\n # GH#17250 make sure result dtype is correct\n # GH#19043 make sure names are propagated correctly\n if box is pd.DataFrame and names[1] == \"Venkman\":\n pytest.skip(\n \"Name propagation for DataFrame does not behave like \"\n \"it does for Index/Series\"\n )\n\n tdi = TimedeltaIndex([\"0 days\", \"1 day\"], name=names[0])\n ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])\n expected = Series(\n [Timedelta(hours=3), Timedelta(days=1, hours=4)], name=names[2]\n )\n\n ser = tm.box_expected(ser, box)\n expected = tm.box_expected(expected, box)\n\n result = tdi + ser\n tm.assert_equal(result, expected)\n if box is not pd.DataFrame:\n assert result.dtype == \"timedelta64[ns]\"\n else:\n assert result.dtypes[0] == \"timedelta64[ns]\"\n\n result = ser + tdi\n tm.assert_equal(result, expected)\n if box is not pd.DataFrame:\n assert result.dtype == \"timedelta64[ns]\"\n else:\n assert result.dtypes[0] == \"timedelta64[ns]\"\n\n expected = Series(\n [Timedelta(hours=-3), Timedelta(days=1, hours=-4)], name=names[2]\n )\n expected = tm.box_expected(expected, box)\n\n result = tdi - ser\n tm.assert_equal(result, expected)\n if box is not pd.DataFrame:\n assert result.dtype == \"timedelta64[ns]\"\n else:\n assert result.dtypes[0] == \"timedelta64[ns]\"\n\n result = ser - tdi\n tm.assert_equal(result, -expected)\n if box is not pd.DataFrame:\n assert result.dtype == \"timedelta64[ns]\"\n else:\n assert result.dtypes[0] == \"timedelta64[ns]\"\n\n def test_td64arr_add_sub_td64_nat(self, box_with_array):\n # GH#23320 special handling for timedelta64(\"NaT\")\n box = box_with_array\n tdi = pd.TimedeltaIndex([NaT, Timedelta(\"1s\")])\n other = np.timedelta64(\"NaT\")\n expected = pd.TimedeltaIndex([\"NaT\"] * 2)\n\n obj = tm.box_expected(tdi, box)\n expected = tm.box_expected(expected, box)\n\n result = obj + other\n tm.assert_equal(result, expected)\n result = other + obj\n tm.assert_equal(result, expected)\n result = obj - other\n tm.assert_equal(result, expected)\n result = other - obj\n tm.assert_equal(result, expected)\n\n def test_td64arr_sub_NaT(self, box_with_array):\n # GH#18808\n box = box_with_array\n ser = Series([NaT, Timedelta(\"1s\")])\n expected = Series([NaT, NaT], dtype=\"timedelta64[ns]\")\n\n ser = tm.box_expected(ser, box)\n expected = tm.box_expected(expected, box)\n\n res = ser - pd.NaT\n tm.assert_equal(res, expected)\n\n def test_td64arr_add_timedeltalike(self, two_hours, box_with_array):\n # only test adding/sub offsets as + is now numeric\n box = box_with_array\n rng = timedelta_range(\"1 days\", \"10 days\")\n expected = timedelta_range(\"1 days 02:00:00\", \"10 days 02:00:00\", freq=\"D\")\n rng = tm.box_expected(rng, box)\n expected = tm.box_expected(expected, box)\n\n result = rng + two_hours\n tm.assert_equal(result, expected)\n\n def test_td64arr_sub_timedeltalike(self, two_hours, box_with_array):\n # only test adding/sub offsets as - is now numeric\n box = box_with_array\n rng = timedelta_range(\"1 days\", \"10 days\")\n expected = timedelta_range(\"0 days 22:00:00\", \"9 days 22:00:00\")\n\n rng = tm.box_expected(rng, box)\n expected = tm.box_expected(expected, box)\n\n result = rng - two_hours\n tm.assert_equal(result, expected)\n\n # ------------------------------------------------------------------\n # __add__/__sub__ with DateOffsets and arrays of DateOffsets\n\n # TODO: this was taken from tests.series.test_operators; de-duplicate\n def test_timedelta64_operations_with_DateOffset(self):\n # GH#10699\n td = Series([timedelta(minutes=5, seconds=3)] * 3)\n result = td + pd.offsets.Minute(1)\n expected = Series([timedelta(minutes=6, seconds=3)] * 3)\n tm.assert_series_equal(result, expected)\n\n result = td - pd.offsets.Minute(1)\n expected = Series([timedelta(minutes=4, seconds=3)] * 3)\n tm.assert_series_equal(result, expected)\n\n with tm.assert_produces_warning(PerformanceWarning):\n result = td + Series(\n [pd.offsets.Minute(1), pd.offsets.Second(3), pd.offsets.Hour(2)]\n )\n expected = Series(\n [\n timedelta(minutes=6, seconds=3),\n timedelta(minutes=5, seconds=6),\n timedelta(hours=2, minutes=5, seconds=3),\n ]\n )\n tm.assert_series_equal(result, expected)\n\n result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)\n expected = Series([timedelta(minutes=6, seconds=15)] * 3)\n tm.assert_series_equal(result, expected)\n\n # valid DateOffsets\n for do in [\"Hour\", \"Minute\", \"Second\", \"Day\", \"Micro\", \"Milli\", \"Nano\"]:\n op = getattr(pd.offsets, do)\n td + op(5)\n op(5) + td\n td - op(5)\n op(5) - td\n\n @pytest.mark.parametrize(\n \"names\", [(None, None, None), (\"foo\", \"bar\", None), (\"foo\", \"foo\", \"foo\")]\n )\n def test_td64arr_add_offset_index(self, names, box):\n # GH#18849, GH#19744\n if box is pd.DataFrame and names[1] == \"bar\":\n pytest.skip(\n \"Name propagation for DataFrame does not behave like \"\n \"it does for Index/Series\"\n )\n\n tdi = TimedeltaIndex([\"1 days 00:00:00\", \"3 days 04:00:00\"], name=names[0])\n other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)], name=names[1])\n\n expected = TimedeltaIndex(\n [tdi[n] + other[n] for n in range(len(tdi))], freq=\"infer\", name=names[2]\n )\n tdi = tm.box_expected(tdi, box)\n expected = tm.box_expected(expected, box)\n\n # The DataFrame operation is transposed and so operates as separate\n # scalar operations, which do not issue a PerformanceWarning\n warn = PerformanceWarning if box is not pd.DataFrame else None\n with tm.assert_produces_warning(warn):\n res = tdi + other\n tm.assert_equal(res, expected)\n\n with tm.assert_produces_warning(warn):\n res2 = other + tdi\n tm.assert_equal(res2, expected)\n\n # TODO: combine with test_td64arr_add_offset_index by parametrizing\n # over second box?\n def test_td64arr_add_offset_array(self, box_with_array):\n # GH#18849\n box = box_with_array\n tdi = TimedeltaIndex([\"1 days 00:00:00\", \"3 days 04:00:00\"])\n other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])\n\n expected = TimedeltaIndex(\n [tdi[n] + other[n] for n in range(len(tdi))], freq=\"infer\"\n )\n\n tdi = tm.box_expected(tdi, box)\n expected = tm.box_expected(expected, box)\n\n # The DataFrame operation is transposed and so operates as separate\n # scalar operations, which do not issue a PerformanceWarning\n warn = PerformanceWarning if box is not pd.DataFrame else None\n with tm.assert_produces_warning(warn):\n res = tdi + other\n tm.assert_equal(res, expected)\n\n with tm.assert_produces_warning(warn):\n res2 = other + tdi\n tm.assert_equal(res2, expected)\n\n @pytest.mark.parametrize(\n \"names\", [(None, None, None), (\"foo\", \"bar\", None), (\"foo\", \"foo\", \"foo\")]\n )\n def test_td64arr_sub_offset_index(self, names, box_with_array):\n # GH#18824, GH#19744\n box = box_with_array\n xbox = box if box is not tm.to_array else pd.Index\n exname = names[2] if box is not tm.to_array else names[1]\n\n if box is pd.DataFrame and names[1] == \"bar\":\n pytest.skip(\n \"Name propagation for DataFrame does not behave like \"\n \"it does for Index/Series\"\n )\n\n tdi = TimedeltaIndex([\"1 days 00:00:00\", \"3 days 04:00:00\"], name=names[0])\n other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)], name=names[1])\n\n expected = TimedeltaIndex(\n [tdi[n] - other[n] for n in range(len(tdi))], freq=\"infer\", name=exname\n )\n\n tdi = tm.box_expected(tdi, box)\n expected = tm.box_expected(expected, xbox)\n\n # The DataFrame operation is transposed and so operates as separate\n # scalar operations, which do not issue a PerformanceWarning\n warn = PerformanceWarning if box is not pd.DataFrame else None\n with tm.assert_produces_warning(warn):\n res = tdi - other\n tm.assert_equal(res, expected)\n\n def test_td64arr_sub_offset_array(self, box_with_array):\n # GH#18824\n tdi = TimedeltaIndex([\"1 days 00:00:00\", \"3 days 04:00:00\"])\n other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])\n\n expected = TimedeltaIndex(\n [tdi[n] - other[n] for n in range(len(tdi))], freq=\"infer\"\n )\n\n tdi = tm.box_expected(tdi, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n # The DataFrame operation is transposed and so operates as separate\n # scalar operations, which do not issue a PerformanceWarning\n warn = None if box_with_array is pd.DataFrame else PerformanceWarning\n with tm.assert_produces_warning(warn):\n res = tdi - other\n tm.assert_equal(res, expected)\n\n @pytest.mark.parametrize(\n \"names\", [(None, None, None), (\"foo\", \"bar\", None), (\"foo\", \"foo\", \"foo\")]\n )\n def test_td64arr_with_offset_series(self, names, box_df_fail):\n # GH#18849\n box = box_df_fail\n box2 = Series if box in [pd.Index, tm.to_array] else box\n exname = names[2] if box is not tm.to_array else names[1]\n\n tdi = TimedeltaIndex([\"1 days 00:00:00\", \"3 days 04:00:00\"], name=names[0])\n other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)], name=names[1])\n\n expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))], name=exname)\n tdi = tm.box_expected(tdi, box)\n expected_add = tm.box_expected(expected_add, box2)\n\n with tm.assert_produces_warning(PerformanceWarning):\n res = tdi + other\n tm.assert_equal(res, expected_add)\n\n with tm.assert_produces_warning(PerformanceWarning):\n res2 = other + tdi\n tm.assert_equal(res2, expected_add)\n\n # TODO: separate/parametrize add/sub test?\n expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))], name=exname)\n expected_sub = tm.box_expected(expected_sub, box2)\n\n with tm.assert_produces_warning(PerformanceWarning):\n res3 = tdi - other\n tm.assert_equal(res3, expected_sub)\n\n @pytest.mark.parametrize(\"obox\", [np.array, pd.Index, pd.Series])\n def test_td64arr_addsub_anchored_offset_arraylike(self, obox, box_with_array):\n # GH#18824\n tdi = TimedeltaIndex([\"1 days 00:00:00\", \"3 days 04:00:00\"])\n tdi = tm.box_expected(tdi, box_with_array)\n\n anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])\n\n # addition/subtraction ops with anchored offsets should issue\n # a PerformanceWarning and _then_ raise a TypeError.\n with pytest.raises(TypeError):\n with tm.assert_produces_warning(PerformanceWarning):\n tdi + anchored\n with pytest.raises(TypeError):\n with tm.assert_produces_warning(PerformanceWarning):\n anchored + tdi\n with pytest.raises(TypeError):\n with tm.assert_produces_warning(PerformanceWarning):\n tdi - anchored\n with pytest.raises(TypeError):\n with tm.assert_produces_warning(PerformanceWarning):\n anchored - tdi\n\n\nclass TestTimedeltaArraylikeMulDivOps:\n # Tests for timedelta64[ns]\n # __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__\n\n # TODO: Moved from tests.series.test_operators; needs cleanup\n @pytest.mark.parametrize(\"m\", [1, 3, 10])\n @pytest.mark.parametrize(\"unit\", [\"D\", \"h\", \"m\", \"s\", \"ms\", \"us\", \"ns\"])\n def test_timedelta64_conversions(self, m, unit):\n startdate = Series(pd.date_range(\"2013-01-01\", \"2013-01-03\"))\n enddate = Series(pd.date_range(\"2013-03-01\", \"2013-03-03\"))\n\n ser = enddate - startdate\n ser[2] = np.nan\n\n # op\n expected = Series([x / np.timedelta64(m, unit) for x in ser])\n result = ser / np.timedelta64(m, unit)\n tm.assert_series_equal(result, expected)\n\n # reverse op\n expected = Series([Timedelta(np.timedelta64(m, unit)) / x for x in ser])\n result = np.timedelta64(m, unit) / ser\n tm.assert_series_equal(result, expected)\n\n # ------------------------------------------------------------------\n # Multiplication\n # organized with scalar others first, then array-like\n\n def test_td64arr_mul_int(self, box_with_array):\n idx = TimedeltaIndex(np.arange(5, dtype=\"int64\"))\n idx = tm.box_expected(idx, box_with_array)\n\n result = idx * 1\n tm.assert_equal(result, idx)\n\n result = 1 * idx\n tm.assert_equal(result, idx)\n\n def test_td64arr_mul_tdlike_scalar_raises(self, two_hours, box_with_array):\n rng = timedelta_range(\"1 days\", \"10 days\", name=\"foo\")\n rng = tm.box_expected(rng, box_with_array)\n with pytest.raises(TypeError):\n rng * two_hours\n\n def test_tdi_mul_int_array_zerodim(self, box_with_array):\n rng5 = np.arange(5, dtype=\"int64\")\n idx = TimedeltaIndex(rng5)\n expected = TimedeltaIndex(rng5 * 5)\n\n idx = tm.box_expected(idx, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = idx * np.array(5, dtype=\"int64\")\n tm.assert_equal(result, expected)\n\n def test_tdi_mul_int_array(self, box_with_array):\n rng5 = np.arange(5, dtype=\"int64\")\n idx = TimedeltaIndex(rng5)\n expected = TimedeltaIndex(rng5 ** 2)\n\n idx = tm.box_expected(idx, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = idx * rng5\n tm.assert_equal(result, expected)\n\n def test_tdi_mul_int_series(self, box_with_array):\n box = box_with_array\n xbox = pd.Series if box in [pd.Index, tm.to_array] else box\n\n idx = TimedeltaIndex(np.arange(5, dtype=\"int64\"))\n expected = TimedeltaIndex(np.arange(5, dtype=\"int64\") ** 2)\n\n idx = tm.box_expected(idx, box)\n expected = tm.box_expected(expected, xbox)\n\n result = idx * pd.Series(np.arange(5, dtype=\"int64\"))\n tm.assert_equal(result, expected)\n\n def test_tdi_mul_float_series(self, box_with_array):\n box = box_with_array\n xbox = pd.Series if box in [pd.Index, tm.to_array] else box\n\n idx = TimedeltaIndex(np.arange(5, dtype=\"int64\"))\n idx = tm.box_expected(idx, box)\n\n rng5f = np.arange(5, dtype=\"float64\")\n expected = TimedeltaIndex(rng5f * (rng5f + 1.0))\n expected = tm.box_expected(expected, xbox)\n\n result = idx * Series(rng5f + 1.0)\n tm.assert_equal(result, expected)\n\n # TODO: Put Series/DataFrame in others?\n @pytest.mark.parametrize(\n \"other\",\n [\n np.arange(1, 11),\n pd.Int64Index(range(1, 11)),\n pd.UInt64Index(range(1, 11)),\n pd.Float64Index(range(1, 11)),\n pd.RangeIndex(1, 11),\n ],\n ids=lambda x: type(x).__name__,\n )\n def test_tdi_rmul_arraylike(self, other, box_with_array):\n box = box_with_array\n xbox = get_upcast_box(box, other)\n\n tdi = TimedeltaIndex([\"1 Day\"] * 10)\n expected = timedelta_range(\"1 days\", \"10 days\")\n expected._data.freq = None\n\n tdi = tm.box_expected(tdi, box)\n expected = tm.box_expected(expected, xbox)\n\n result = other * tdi\n tm.assert_equal(result, expected)\n commute = tdi * other\n tm.assert_equal(commute, expected)\n\n # ------------------------------------------------------------------\n # __div__, __rdiv__\n\n def test_td64arr_div_nat_invalid(self, box_with_array):\n # don't allow division by NaT (maybe could in the future)\n rng = timedelta_range(\"1 days\", \"10 days\", name=\"foo\")\n rng = tm.box_expected(rng, box_with_array)\n\n with pytest.raises(TypeError, match=\"unsupported operand type\"):\n rng / pd.NaT\n with pytest.raises(TypeError, match=\"Cannot divide NaTType by\"):\n pd.NaT / rng\n\n def test_td64arr_div_td64nat(self, box_with_array):\n # GH#23829\n rng = timedelta_range(\"1 days\", \"10 days\")\n rng = tm.box_expected(rng, box_with_array)\n\n other = np.timedelta64(\"NaT\")\n\n expected = np.array([np.nan] * 10)\n expected = tm.box_expected(expected, box_with_array)\n\n result = rng / other\n tm.assert_equal(result, expected)\n\n result = other / rng\n tm.assert_equal(result, expected)\n\n def test_td64arr_div_int(self, box_with_array):\n idx = TimedeltaIndex(np.arange(5, dtype=\"int64\"))\n idx = tm.box_expected(idx, box_with_array)\n\n result = idx / 1\n tm.assert_equal(result, idx)\n\n with pytest.raises(TypeError, match=\"Cannot divide\"):\n # GH#23829\n 1 / idx\n\n def test_td64arr_div_tdlike_scalar(self, two_hours, box_with_array):\n # GH#20088, GH#22163 ensure DataFrame returns correct dtype\n rng = timedelta_range(\"1 days\", \"10 days\", name=\"foo\")\n expected = pd.Float64Index((np.arange(10) + 1) * 12, name=\"foo\")\n\n rng = tm.box_expected(rng, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = rng / two_hours\n tm.assert_equal(result, expected)\n\n result = two_hours / rng\n expected = 1 / expected\n tm.assert_equal(result, expected)\n\n def test_td64arr_div_tdlike_scalar_with_nat(self, two_hours, box_with_array):\n rng = TimedeltaIndex([\"1 days\", pd.NaT, \"2 days\"], name=\"foo\")\n expected = pd.Float64Index([12, np.nan, 24], name=\"foo\")\n\n rng = tm.box_expected(rng, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = rng / two_hours\n tm.assert_equal(result, expected)\n\n result = two_hours / rng\n expected = 1 / expected\n tm.assert_equal(result, expected)\n\n def test_td64arr_div_td64_ndarray(self, box_with_array):\n # GH#22631\n rng = TimedeltaIndex([\"1 days\", pd.NaT, \"2 days\"])\n expected = pd.Float64Index([12, np.nan, 24])\n\n rng = tm.box_expected(rng, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n other = np.array([2, 4, 2], dtype=\"m8[h]\")\n result = rng / other\n tm.assert_equal(result, expected)\n\n result = rng / tm.box_expected(other, box_with_array)\n tm.assert_equal(result, expected)\n\n result = rng / other.astype(object)\n tm.assert_equal(result, expected)\n\n result = rng / list(other)\n tm.assert_equal(result, expected)\n\n # reversed op\n expected = 1 / expected\n result = other / rng\n tm.assert_equal(result, expected)\n\n result = tm.box_expected(other, box_with_array) / rng\n tm.assert_equal(result, expected)\n\n result = other.astype(object) / rng\n tm.assert_equal(result, expected)\n\n result = list(other) / rng\n tm.assert_equal(result, expected)\n\n def test_tdarr_div_length_mismatch(self, box_with_array):\n rng = TimedeltaIndex([\"1 days\", pd.NaT, \"2 days\"])\n mismatched = [1, 2, 3, 4]\n\n rng = tm.box_expected(rng, box_with_array)\n for obj in [mismatched, mismatched[:2]]:\n # one shorter, one longer\n for other in [obj, np.array(obj), pd.Index(obj)]:\n with pytest.raises(ValueError):\n rng / other\n with pytest.raises(ValueError):\n other / rng\n\n # ------------------------------------------------------------------\n # __floordiv__, __rfloordiv__\n\n def test_td64arr_floordiv_tdscalar(self, box_with_array, scalar_td):\n # GH#18831\n td1 = Series([timedelta(minutes=5, seconds=3)] * 3)\n td1.iloc[2] = np.nan\n\n expected = Series([0, 0, np.nan])\n\n td1 = tm.box_expected(td1, box_with_array, transpose=False)\n expected = tm.box_expected(expected, box_with_array, transpose=False)\n\n result = td1 // scalar_td\n tm.assert_equal(result, expected)\n\n def test_td64arr_rfloordiv_tdscalar(self, box_with_array, scalar_td):\n # GH#18831\n td1 = Series([timedelta(minutes=5, seconds=3)] * 3)\n td1.iloc[2] = np.nan\n\n expected = Series([1, 1, np.nan])\n\n td1 = tm.box_expected(td1, box_with_array, transpose=False)\n expected = tm.box_expected(expected, box_with_array, transpose=False)\n\n result = scalar_td // td1\n tm.assert_equal(result, expected)\n\n def test_td64arr_rfloordiv_tdscalar_explicit(self, box_with_array, scalar_td):\n # GH#18831\n td1 = Series([timedelta(minutes=5, seconds=3)] * 3)\n td1.iloc[2] = np.nan\n\n expected = Series([1, 1, np.nan])\n\n td1 = tm.box_expected(td1, box_with_array, transpose=False)\n expected = tm.box_expected(expected, box_with_array, transpose=False)\n\n # We can test __rfloordiv__ using this syntax,\n # see `test_timedelta_rfloordiv`\n result = td1.__rfloordiv__(scalar_td)\n tm.assert_equal(result, expected)\n\n def test_td64arr_floordiv_int(self, box_with_array):\n idx = TimedeltaIndex(np.arange(5, dtype=\"int64\"))\n idx = tm.box_expected(idx, box_with_array)\n result = idx // 1\n tm.assert_equal(result, idx)\n\n pattern = \"floor_divide cannot use operands|Cannot divide int by Timedelta*\"\n with pytest.raises(TypeError, match=pattern):\n 1 // idx\n\n def test_td64arr_floordiv_tdlike_scalar(self, two_hours, box_with_array):\n tdi = timedelta_range(\"1 days\", \"10 days\", name=\"foo\")\n expected = pd.Int64Index((np.arange(10) + 1) * 12, name=\"foo\")\n\n tdi = tm.box_expected(tdi, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = tdi // two_hours\n tm.assert_equal(result, expected)\n\n # TODO: Is this redundant with test_td64arr_floordiv_tdlike_scalar?\n @pytest.mark.parametrize(\n \"scalar_td\",\n [\n timedelta(minutes=10, seconds=7),\n Timedelta(\"10m7s\"),\n Timedelta(\"10m7s\").to_timedelta64(),\n ],\n ids=lambda x: type(x).__name__,\n )\n def test_td64arr_rfloordiv_tdlike_scalar(self, scalar_td, box_with_array):\n # GH#19125\n tdi = TimedeltaIndex([\"00:05:03\", \"00:05:03\", pd.NaT], freq=None)\n expected = pd.Index([2.0, 2.0, np.nan])\n\n tdi = tm.box_expected(tdi, box_with_array, transpose=False)\n expected = tm.box_expected(expected, box_with_array, transpose=False)\n\n res = tdi.__rfloordiv__(scalar_td)\n tm.assert_equal(res, expected)\n\n expected = pd.Index([0.0, 0.0, np.nan])\n expected = tm.box_expected(expected, box_with_array, transpose=False)\n\n res = tdi // (scalar_td)\n tm.assert_equal(res, expected)\n\n # ------------------------------------------------------------------\n # mod, divmod\n # TODO: operations with timedelta-like arrays, numeric arrays,\n # reversed ops\n\n def test_td64arr_mod_tdscalar(self, box_with_array, three_days):\n tdi = timedelta_range(\"1 Day\", \"9 days\")\n tdarr = tm.box_expected(tdi, box_with_array)\n\n expected = TimedeltaIndex([\"1 Day\", \"2 Days\", \"0 Days\"] * 3)\n expected = tm.box_expected(expected, box_with_array)\n\n result = tdarr % three_days\n tm.assert_equal(result, expected)\n\n if box_with_array is pd.DataFrame:\n pytest.xfail(\"DataFrame does not have __divmod__ or __rdivmod__\")\n\n result = divmod(tdarr, three_days)\n tm.assert_equal(result[1], expected)\n tm.assert_equal(result[0], tdarr // three_days)\n\n def test_td64arr_mod_int(self, box_with_array):\n tdi = timedelta_range(\"1 ns\", \"10 ns\", periods=10)\n tdarr = tm.box_expected(tdi, box_with_array)\n\n expected = TimedeltaIndex([\"1 ns\", \"0 ns\"] * 5)\n expected = tm.box_expected(expected, box_with_array)\n\n result = tdarr % 2\n tm.assert_equal(result, expected)\n\n with pytest.raises(TypeError):\n 2 % tdarr\n\n if box_with_array is pd.DataFrame:\n pytest.xfail(\"DataFrame does not have __divmod__ or __rdivmod__\")\n\n result = divmod(tdarr, 2)\n tm.assert_equal(result[1], expected)\n tm.assert_equal(result[0], tdarr // 2)\n\n def test_td64arr_rmod_tdscalar(self, box_with_array, three_days):\n tdi = timedelta_range(\"1 Day\", \"9 days\")\n tdarr = tm.box_expected(tdi, box_with_array)\n\n expected = [\"0 Days\", \"1 Day\", \"0 Days\"] + [\"3 Days\"] * 6\n expected = TimedeltaIndex(expected)\n expected = tm.box_expected(expected, box_with_array)\n\n result = three_days % tdarr\n tm.assert_equal(result, expected)\n\n if box_with_array is pd.DataFrame:\n pytest.xfail(\"DataFrame does not have __divmod__ or __rdivmod__\")\n\n result = divmod(three_days, tdarr)\n tm.assert_equal(result[1], expected)\n tm.assert_equal(result[0], three_days // tdarr)\n\n # ------------------------------------------------------------------\n # Operations with invalid others\n\n def test_td64arr_mul_tdscalar_invalid(self, box_with_array, scalar_td):\n td1 = Series([timedelta(minutes=5, seconds=3)] * 3)\n td1.iloc[2] = np.nan\n\n td1 = tm.box_expected(td1, box_with_array)\n\n # check that we are getting a TypeError\n # with 'operate' (from core/ops.py) for the ops that are not\n # defined\n pattern = \"operate|unsupported|cannot|not supported\"\n with pytest.raises(TypeError, match=pattern):\n td1 * scalar_td\n with pytest.raises(TypeError, match=pattern):\n scalar_td * td1\n\n def test_td64arr_mul_too_short_raises(self, box_with_array):\n idx = TimedeltaIndex(np.arange(5, dtype=\"int64\"))\n idx = tm.box_expected(idx, box_with_array)\n with pytest.raises(TypeError):\n idx * idx[:3]\n with pytest.raises(ValueError):\n idx * np.array([1, 2])\n\n def test_td64arr_mul_td64arr_raises(self, box_with_array):\n idx = TimedeltaIndex(np.arange(5, dtype=\"int64\"))\n idx = tm.box_expected(idx, box_with_array)\n with pytest.raises(TypeError):\n idx * idx\n\n # ------------------------------------------------------------------\n # Operations with numeric others\n\n @pytest.mark.parametrize(\"one\", [1, np.array(1), 1.0, np.array(1.0)])\n def test_td64arr_mul_numeric_scalar(self, box_with_array, one):\n # GH#4521\n # divide/multiply by integers\n tdser = pd.Series([\"59 Days\", \"59 Days\", \"NaT\"], dtype=\"m8[ns]\")\n expected = Series([\"-59 Days\", \"-59 Days\", \"NaT\"], dtype=\"timedelta64[ns]\")\n\n tdser = tm.box_expected(tdser, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = tdser * (-one)\n tm.assert_equal(result, expected)\n result = (-one) * tdser\n tm.assert_equal(result, expected)\n\n expected = Series([\"118 Days\", \"118 Days\", \"NaT\"], dtype=\"timedelta64[ns]\")\n expected = tm.box_expected(expected, box_with_array)\n\n result = tdser * (2 * one)\n tm.assert_equal(result, expected)\n result = (2 * one) * tdser\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize(\"two\", [2, 2.0, np.array(2), np.array(2.0)])\n def test_td64arr_div_numeric_scalar(self, box_with_array, two):\n # GH#4521\n # divide/multiply by integers\n tdser = pd.Series([\"59 Days\", \"59 Days\", \"NaT\"], dtype=\"m8[ns]\")\n expected = Series([\"29.5D\", \"29.5D\", \"NaT\"], dtype=\"timedelta64[ns]\")\n\n tdser = tm.box_expected(tdser, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = tdser / two\n tm.assert_equal(result, expected)\n\n with pytest.raises(TypeError, match=\"Cannot divide\"):\n two / tdser\n\n @pytest.mark.parametrize(\n \"dtype\",\n [\n \"int64\",\n \"int32\",\n \"int16\",\n \"uint64\",\n \"uint32\",\n \"uint16\",\n \"uint8\",\n \"float64\",\n \"float32\",\n \"float16\",\n ],\n )\n @pytest.mark.parametrize(\n \"vector\",\n [np.array([20, 30, 40]), pd.Index([20, 30, 40]), Series([20, 30, 40])],\n ids=lambda x: type(x).__name__,\n )\n def test_td64arr_rmul_numeric_array(self, box_with_array, vector, dtype):\n # GH#4521\n # divide/multiply by integers\n xbox = get_upcast_box(box_with_array, vector)\n\n tdser = pd.Series([\"59 Days\", \"59 Days\", \"NaT\"], dtype=\"m8[ns]\")\n vector = vector.astype(dtype)\n\n expected = Series([\"1180 Days\", \"1770 Days\", \"NaT\"], dtype=\"timedelta64[ns]\")\n\n tdser = tm.box_expected(tdser, box_with_array)\n expected = tm.box_expected(expected, xbox)\n\n result = tdser * vector\n tm.assert_equal(result, expected)\n\n result = vector * tdser\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"dtype\",\n [\n \"int64\",\n \"int32\",\n \"int16\",\n \"uint64\",\n \"uint32\",\n \"uint16\",\n \"uint8\",\n \"float64\",\n \"float32\",\n \"float16\",\n ],\n )\n @pytest.mark.parametrize(\n \"vector\",\n [np.array([20, 30, 40]), pd.Index([20, 30, 40]), Series([20, 30, 40])],\n ids=lambda x: type(x).__name__,\n )\n def test_td64arr_div_numeric_array(self, box_with_array, vector, dtype):\n # GH#4521\n # divide/multiply by integers\n xbox = get_upcast_box(box_with_array, vector)\n tdser = pd.Series([\"59 Days\", \"59 Days\", \"NaT\"], dtype=\"m8[ns]\")\n vector = vector.astype(dtype)\n expected = Series([\"2.95D\", \"1D 23H 12m\", \"NaT\"], dtype=\"timedelta64[ns]\")\n\n tdser = tm.box_expected(tdser, box_with_array)\n expected = tm.box_expected(expected, xbox)\n\n result = tdser / vector\n tm.assert_equal(result, expected)\n\n pattern = (\n \"true_divide cannot use operands|\"\n \"cannot perform __div__|\"\n \"cannot perform __truediv__|\"\n \"unsupported operand|\"\n \"Cannot divide\"\n )\n with pytest.raises(TypeError, match=pattern):\n vector / tdser\n\n if not isinstance(vector, pd.Index):\n # Index.__rdiv__ won't try to operate elementwise, just raises\n result = tdser / vector.astype(object)\n if box_with_array is pd.DataFrame:\n expected = [tdser.iloc[0, n] / vector[n] for n in range(len(vector))]\n else:\n expected = [tdser[n] / vector[n] for n in range(len(tdser))]\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(result, expected)\n\n with pytest.raises(TypeError, match=pattern):\n vector.astype(object) / tdser\n\n @pytest.mark.parametrize(\n \"names\",\n [\n (None, None, None),\n (\"Egon\", \"Venkman\", None),\n (\"NCC1701D\", \"NCC1701D\", \"NCC1701D\"),\n ],\n )\n def test_td64arr_mul_int_series(self, box_df_fail, names):\n # GH#19042 test for correct name attachment\n box = box_df_fail # broadcasts along wrong axis, but doesn't raise\n exname = names[2] if box is not tm.to_array else names[1]\n\n tdi = TimedeltaIndex(\n [\"0days\", \"1day\", \"2days\", \"3days\", \"4days\"], name=names[0]\n )\n # TODO: Should we be parametrizing over types for `ser` too?\n ser = Series([0, 1, 2, 3, 4], dtype=np.int64, name=names[1])\n\n expected = Series(\n [\"0days\", \"1day\", \"4days\", \"9days\", \"16days\"],\n dtype=\"timedelta64[ns]\",\n name=exname,\n )\n\n tdi = tm.box_expected(tdi, box)\n box = Series if (box is pd.Index or box is tm.to_array) else box\n expected = tm.box_expected(expected, box)\n\n result = ser * tdi\n tm.assert_equal(result, expected)\n\n # The direct operation tdi * ser still needs to be fixed.\n result = ser.__rmul__(tdi)\n tm.assert_equal(result, expected)\n\n # TODO: Should we be parametrizing over types for `ser` too?\n @pytest.mark.parametrize(\n \"names\",\n [\n (None, None, None),\n (\"Egon\", \"Venkman\", None),\n (\"NCC1701D\", \"NCC1701D\", \"NCC1701D\"),\n ],\n )\n def test_float_series_rdiv_td64arr(self, box_with_array, names):\n # GH#19042 test for correct name attachment\n # TODO: the direct operation TimedeltaIndex / Series still\n # needs to be fixed.\n box = box_with_array\n tdi = TimedeltaIndex(\n [\"0days\", \"1day\", \"2days\", \"3days\", \"4days\"], name=names[0]\n )\n ser = Series([1.5, 3, 4.5, 6, 7.5], dtype=np.float64, name=names[1])\n\n xname = names[2] if box is not tm.to_array else names[1]\n expected = Series(\n [tdi[n] / ser[n] for n in range(len(ser))],\n dtype=\"timedelta64[ns]\",\n name=xname,\n )\n\n xbox = box\n if box in [pd.Index, tm.to_array] and type(ser) is Series:\n xbox = Series\n\n tdi = tm.box_expected(tdi, box)\n expected = tm.box_expected(expected, xbox)\n\n result = ser.__rdiv__(tdi)\n if box is pd.DataFrame:\n # TODO: Should we skip this case sooner or test something else?\n assert result is NotImplemented\n else:\n tm.assert_equal(result, expected)\n\n\nclass TestTimedelta64ArrayLikeArithmetic:\n # Arithmetic tests for timedelta64[ns] vectors fully parametrized over\n # DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all arithmetic\n # tests will eventually end up here.\n\n def test_td64arr_pow_invalid(self, scalar_td, box_with_array):\n td1 = Series([timedelta(minutes=5, seconds=3)] * 3)\n td1.iloc[2] = np.nan\n\n td1 = tm.box_expected(td1, box_with_array)\n\n # check that we are getting a TypeError\n # with 'operate' (from core/ops.py) for the ops that are not\n # defined\n pattern = \"operate|unsupported|cannot|not supported\"\n with pytest.raises(TypeError, match=pattern):\n scalar_td ** td1\n\n with pytest.raises(TypeError, match=pattern):\n td1 ** scalar_td\n" ]
[ [ "pandas.DatetimeIndex", "pandas.offsets.Minute", "pandas.offsets.Day", "pandas.tests.arithmetic.test_datetime64.assert_invalid_comparison", "numpy.negative", "pandas.util.testing.box_expected", "numpy.multiply", "pandas.Timestamp", "pandas.UInt64Index", "pandas.util.testing.assert_numpy_array_equal", "numpy.divide", "pandas.Timedelta", "pandas.DataFrame", "pandas.util.testing.assert_index_equal", "pandas.timedelta_range", "pandas.tseries.offsets.BusinessDay", "numpy.arange", "pandas.util.testing.assert_produces_warning", "pandas.TimedeltaIndex", "pandas.tseries.offsets.Day", "pandas.Period", "pandas.util.testing.assert_equal", "numpy.array", "pandas.offsets.Second", "numpy.timedelta64", "numpy.absolute", "numpy.datetime64", "pandas.offsets.MonthEnd", "pandas.Index", "pandas.util.testing.assert_frame_equal", "pandas.Int64Index", "pandas.date_range", "pandas.to_timedelta", "pandas.Float64Index", "pandas.offsets.Hour", "pandas.RangeIndex", "pandas.util.testing.assert_series_equal", "pandas.Series" ] ]
LeandroTeodoroRJ/SistemasDeControlePython
[ "83122f45f55faac0bd069668dc6e1c19f6e6fcd4" ]
[ "Revisao_Ex1_Polos_Simples.py" ]
[ "'''\r\nExercício – Pólos Simples \r\n1. Para o sistema representado pela seguinte equação diferencial: \r\n\r\ndiff(y, t, 2) + 5*diff(y, t, 1) + 6*y = u\r\n\r\nDetermine: \r\n(a) A função de transferência; \r\n(b) A equação característica; \r\n(c) Os pólos e os zeros; \r\n(d) A estabilidade do sistema; \r\n(e) A resposta no tempo para u(t) igual a um impulso unitário.\r\n\r\n'''\r\n#importação de módulos\r\nfrom sympy import *\r\ninit_printing()\r\nfrom sympy.abc import s, u, y, t\r\n\r\nfrom control import *\r\nfrom control.matlab import *\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n#RESOLUÇÃO\r\nprint('Respostas')\r\n\r\nprint('Função no domínio da frequência')\r\nfunc = s**2*y + 5*s*y + 6*y - u\r\npprint(func)\r\n\r\nprint('\\nLetra A')\r\nft = solve(func, y) #A função solve retorna uma lista de resoluções\r\nft = ft[0]/u\r\npprint(ft)\r\n\r\n#---------------------------------\r\nprint('\\nLetra B e C: Encontrando os polos')\r\nft_den = fraction(ft)\r\nft_den = ft_den[1]\r\npprint(ft_den)\r\nprint('Polos:')\r\npprint(solve(ft_den, s))\r\n\r\n#---------------------------------\r\nprint('\\nLetra D: Resposta do sistema para um impulso unitário')\r\n# A função do impulso unitário no domínio da frequência é U(S) = 1\r\nu_impulse = 1\r\nresp = ft*u_impulse\r\nprint('\\nResposta no domínio da frequência:')\r\npprint(ft)\r\nprint('\\nResposta no domínio do tempo:')\r\nresp_t = inverse_laplace_transform(resp, s, t)\r\npprint(resp_t)\r\nprint('\\nExpandindo a função:')\r\nresp_t = expand(resp_t)\r\npprint(resp_t)\r\n\r\n\r\n#------------------------------------------------------\r\nprint('\\nPlus, plotando o grafico de resposta no tempo')\r\n\r\n#Criando o sistema pela função de transferência\r\n#Os itens das listas representam os coeficientes do\r\n#numerador e denominador da função de transferência.\r\nnum = [1]\r\nden = [1, 5, 6]\r\nsys = tf(num, den)\r\nprint('\\nEncontrando os polos novamente')\r\nprint(pole(sys))\r\n\r\n#Criando a base de tempo para resposta\r\nex = np.arange(0.0, 12.0, 0.1)\r\nex = list(ex)\r\n\r\n#Plotando o gráfico de resposta ao impulso unitário\r\n#baseado no objeto sys\r\nT, Y = impulse_response(sys, ex)\r\nplt.plot(T, Y)\r\nplt.show()\r\n\r\n#Plotando o gráfico de resposta ao impulso unitário\r\n#baseado na equação da inversa de La Place\r\ney = [ resp_t.subs([(t, i)]) for i in ex]\r\nplt.plot(ex, ey)\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.plot", "numpy.arange" ] ]
sean-engelstad/funtofem
[ "1f905089c45f856ec6f34c5401dc3029c1651460" ]
[ "funtofem/mphys/mphys_meld.py" ]
[ "import numpy as np\nimport openmdao.api as om\nfrom mphys import Builder\n\nfrom funtofem import TransferScheme\n\nclass MeldDispXfer(om.ExplicitComponent):\n \"\"\"\n Component to perform displacement transfer using MELD\n \"\"\"\n def initialize(self):\n self.options.declare('xfer_object', recordable=False)\n self.options.declare('struct_ndof')\n self.options.declare('struct_nnodes')\n self.options.declare('aero_nnodes')\n self.options.declare('check_partials')\n\n self.meld = None\n self.initialized_meld = False\n\n self.struct_ndof = None\n self.struct_nnodes = None\n self.aero_nnodes = None\n self.check_partials = False\n\n def setup(self):\n self.meld = self.options['xfer_object']\n\n self.struct_ndof = self.options['struct_ndof']\n self.struct_nnodes = self.options['struct_nnodes']\n self.aero_nnodes = self.options['aero_nnodes']\n self.check_partials= self.options['check_partials']\n\n #self.set_check_partial_options(wrt='*',method='cs',directional=True)\n\n # inputs\n self.add_input('x_struct0', shape_by_conn=True,\n distributed=True,\n desc='initial structural node coordinates',\n tags=['mphys_coordinates'])\n self.add_input('x_aero0', shape_by_conn=True,\n distributed=True,\n desc='initial aero surface node coordinates',\n tags=['mphys_coordinates'])\n self.add_input('u_struct', shape_by_conn=True,\n distributed=True,\n desc='structural node displacements',\n tags=['mphys_coupling'])\n\n # outputs\n self.add_output('u_aero', shape = self.aero_nnodes*3,\n distributed=True,\n val=np.zeros(self.aero_nnodes*3),\n desc='aerodynamic surface displacements',\n tags=['mphys_coupling'])\n\n # partials\n #self.declare_partials('u_aero',['x_struct0','x_aero0','u_struct'])\n\n def compute(self, inputs, outputs):\n x_s0 = np.array(inputs['x_struct0'],dtype=TransferScheme.dtype)\n x_a0 = np.array(inputs['x_aero0'],dtype=TransferScheme.dtype)\n u_a = np.array(outputs['u_aero'],dtype=TransferScheme.dtype)\n\n u_s = np.zeros(self.struct_nnodes*3,dtype=TransferScheme.dtype)\n for i in range(3):\n u_s[i::3] = inputs['u_struct'][i::self.struct_ndof]\n\n self.meld.setStructNodes(x_s0)\n self.meld.setAeroNodes(x_a0)\n\n if not self.initialized_meld:\n self.meld.initialize()\n self.initialized_meld = True\n\n self.meld.transferDisps(u_s,u_a)\n\n outputs['u_aero'] = u_a\n\n def compute_jacvec_product(self, inputs, d_inputs, d_outputs, mode):\n \"\"\"\n The explicit component is defined as:\n u_a = g(u_s,x_a0,x_s0)\n The MELD residual is defined as:\n D = u_a - g(u_s,x_a0,x_s0)\n So explicit partials below for u_a are negative partials of D\n \"\"\"\n if self.check_partials:\n x_s0 = np.array(inputs['x_struct0'],dtype=TransferScheme.dtype)\n x_a0 = np.array(inputs['x_aero0'],dtype=TransferScheme.dtype)\n self.meld.setStructNodes(x_s0)\n self.meld.setAeroNodes(x_a0)\n u_s = np.zeros(self.struct_nnodes*3,dtype=TransferScheme.dtype)\n for i in range(3):\n u_s[i::3] = inputs['u_struct'][i::self.struct_ndof]\n u_a = np.zeros(self.aero_nnodes*3,dtype=TransferScheme.dtype)\n self.meld.transferDisps(u_s,u_a)\n\n if mode == 'fwd':\n if 'u_aero' in d_outputs:\n if 'u_struct' in d_inputs:\n d_in = np.zeros(self.struct_nnodes*3,dtype=TransferScheme.dtype)\n for i in range(3):\n d_in[i::3] = d_inputs['u_struct'][i::self.struct_ndof]\n prod = np.zeros(self.aero_nnodes*3,dtype=TransferScheme.dtype)\n self.meld.applydDduS(d_in,prod)\n d_outputs['u_aero'] -= np.array(prod,dtype=float)\n\n if 'x_aero0' in d_inputs:\n if self.check_partials:\n pass\n else:\n raise ValueError('MELD forward mode requested but not implemented')\n\n if 'x_struct0' in d_inputs:\n if self.check_partials:\n pass\n else:\n raise ValueError('MELD forward mode requested but not implemented')\n\n if mode == 'rev':\n if 'u_aero' in d_outputs:\n du_a = np.array(d_outputs['u_aero'],dtype=TransferScheme.dtype)\n if 'u_struct' in d_inputs:\n # du_a/du_s^T * psi = - dD/du_s^T psi\n prod = np.zeros(self.struct_nnodes*3,dtype=TransferScheme.dtype)\n self.meld.applydDduSTrans(du_a,prod)\n for i in range(3):\n d_inputs['u_struct'][i::self.struct_ndof] -= np.array(prod[i::3],dtype=np.float64)\n\n # du_a/dx_a0^T * psi = - psi^T * dD/dx_a0 in F2F terminology\n if 'x_aero0' in d_inputs:\n prod = np.zeros(d_inputs['x_aero0'].size,dtype=TransferScheme.dtype)\n self.meld.applydDdxA0(du_a,prod)\n d_inputs['x_aero0'] -= np.array(prod,dtype=float)\n\n if 'x_struct0' in d_inputs:\n prod = np.zeros(self.struct_nnodes*3,dtype=TransferScheme.dtype)\n self.meld.applydDdxS0(du_a,prod)\n d_inputs['x_struct0'] -= np.array(prod,dtype=float)\n\nclass MeldLoadXfer(om.ExplicitComponent):\n \"\"\"\n Component to perform load transfers using MELD\n \"\"\"\n def initialize(self):\n self.options.declare('xfer_object', recordable=False)\n self.options.declare('struct_ndof')\n self.options.declare('struct_nnodes')\n self.options.declare('aero_nnodes')\n self.options.declare('check_partials')\n\n self.meld = None\n self.initialized_meld = False\n\n self.struct_ndof = None\n self.struct_nnodes = None\n self.aero_nnodes = None\n self.check_partials = False\n\n def setup(self):\n # get the transfer scheme object\n self.meld = self.options['xfer_object']\n\n self.struct_ndof = self.options['struct_ndof']\n self.struct_nnodes = self.options['struct_nnodes']\n self.aero_nnodes = self.options['aero_nnodes']\n self.check_partials= self.options['check_partials']\n\n #self.set_check_partial_options(wrt='*',method='cs',directional=True)\n\n struct_ndof = self.struct_ndof\n struct_nnodes = self.struct_nnodes\n\n # inputs\n self.add_input('x_struct0', shape_by_conn=True,\n distributed=True,\n desc='initial structural node coordinates',\n tags=['mphys_coordinates'])\n self.add_input('x_aero0', shape_by_conn=True,\n distributed=True,\n desc='initial aero surface node coordinates',\n tags=['mphys_coordinates'])\n self.add_input('u_struct', shape_by_conn=True,\n distributed=True,\n desc='structural node displacements',\n tags=['mphys_coupling'])\n self.add_input('f_aero', shape_by_conn=True,\n distributed=True,\n desc='aerodynamic force vector',\n tags=['mphys_coupling'])\n\n # outputs\n self.add_output('f_struct', shape = struct_nnodes*struct_ndof,\n distributed=True,\n desc='structural force vector',\n tags=['mphys_coupling'])\n\n # partials\n #self.declare_partials('f_struct',['x_struct0','x_aero0','u_struct','f_aero'])\n\n def compute(self, inputs, outputs):\n if self.check_partials:\n x_s0 = np.array(inputs['x_struct0'],dtype=TransferScheme.dtype)\n x_a0 = np.array(inputs['x_aero0'],dtype=TransferScheme.dtype)\n self.meld.setStructNodes(x_s0)\n self.meld.setAeroNodes(x_a0)\n f_a = np.array(inputs['f_aero'],dtype=TransferScheme.dtype)\n f_s = np.zeros(self.struct_nnodes*3,dtype=TransferScheme.dtype)\n\n u_s = np.zeros(self.struct_nnodes*3,dtype=TransferScheme.dtype)\n for i in range(3):\n u_s[i::3] = inputs['u_struct'][i::self.struct_ndof]\n u_a = np.zeros(inputs['f_aero'].size,dtype=TransferScheme.dtype)\n self.meld.transferDisps(u_s,u_a)\n\n self.meld.transferLoads(f_a,f_s)\n\n outputs['f_struct'][:] = 0.0\n for i in range(3):\n outputs['f_struct'][i::self.struct_ndof] = f_s[i::3]\n\n def compute_jacvec_product(self, inputs, d_inputs, d_outputs, mode):\n \"\"\"\n The explicit component is defined as:\n f_s = g(f_a,u_s,x_a0,x_s0)\n The MELD internal residual is defined as:\n L = f_s - g(f_a,u_s,x_a0,x_s0)\n So explicit partials below for f_s are negative partials of L\n \"\"\"\n if self.check_partials:\n x_s0 = np.array(inputs['x_struct0'],dtype=TransferScheme.dtype)\n x_a0 = np.array(inputs['x_aero0'],dtype=TransferScheme.dtype)\n self.meld.setStructNodes(x_s0)\n self.meld.setAeroNodes(x_a0)\n f_a = np.array(inputs['f_aero'],dtype=TransferScheme.dtype)\n f_s = np.zeros(self.struct_nnodes*3,dtype=TransferScheme.dtype)\n\n u_s = np.zeros(self.struct_nnodes*3,dtype=TransferScheme.dtype)\n for i in range(3):\n u_s[i::3] = inputs['u_struct'][i::self.struct_ndof]\n u_a = np.zeros(inputs['f_aero'].size,dtype=TransferScheme.dtype)\n self.meld.transferDisps(u_s,u_a)\n self.meld.transferLoads(f_a,f_s)\n\n if mode == 'fwd':\n if 'f_struct' in d_outputs:\n if 'u_struct' in d_inputs:\n d_in = np.zeros(self.struct_nnodes*3,dtype=TransferScheme.dtype)\n for i in range(3):\n d_in[i::3] = d_inputs['u_struct'][i::self.struct_ndof]\n prod = np.zeros(self.struct_nnodes*3,dtype=TransferScheme.dtype)\n self.meld.applydLduS(d_in,prod)\n for i in range(3):\n d_outputs['f_struct'][i::self.struct_ndof] -= np.array(prod[i::3],dtype=float)\n\n if 'f_aero' in d_inputs:\n # df_s/df_a psi = - dL/df_a * psi = -dD/du_s^T * psi\n prod = np.zeros(self.struct_nnodes*3,dtype=TransferScheme.dtype)\n df_a = np.array(d_inputs['f_aero'],dtype=TransferScheme.dtype)\n self.meld.applydDduSTrans(df_a,prod)\n for i in range(3):\n d_outputs['f_struct'][i::self.struct_ndof] -= np.array(prod[i::3],dtype=float)\n\n if 'x_aero0' in d_inputs:\n if self.check_partials:\n pass\n else:\n raise ValueError('forward mode requested but not implemented')\n\n if 'x_struct0' in d_inputs:\n if self.check_partials:\n pass\n else:\n raise ValueError('forward mode requested but not implemented')\n\n if mode == 'rev':\n if 'f_struct' in d_outputs:\n d_out = np.zeros(self.struct_nnodes*3,dtype=TransferScheme.dtype)\n for i in range(3):\n d_out[i::3] = d_outputs['f_struct'][i::self.struct_ndof]\n\n if 'u_struct' in d_inputs:\n d_in = np.zeros(self.struct_nnodes*3,dtype=TransferScheme.dtype)\n # df_s/du_s^T * psi = - dL/du_s^T * psi\n self.meld.applydLduSTrans(d_out,d_in)\n\n for i in range(3):\n d_inputs['u_struct'][i::self.struct_ndof] -= np.array(d_in[i::3],dtype=float)\n\n if 'f_aero' in d_inputs:\n # df_s/df_a^T psi = - dL/df_a^T * psi = -dD/du_s * psi\n prod = np.zeros(self.aero_nnodes*3,dtype=TransferScheme.dtype)\n self.meld.applydDduS(d_out,prod)\n d_inputs['f_aero'] -= np.array(prod,dtype=float)\n\n if 'x_aero0' in d_inputs:\n # df_s/dx_a0^T * psi = - psi^T * dL/dx_a0 in F2F terminology\n prod = np.zeros(self.aero_nnodes*3,dtype=TransferScheme.dtype)\n self.meld.applydLdxA0(d_out,prod)\n d_inputs['x_aero0'] -= np.array(prod,dtype=float)\n\n if 'x_struct0' in d_inputs:\n # df_s/dx_s0^T * psi = - psi^T * dL/dx_s0 in F2F terminology\n prod = np.zeros(self.struct_nnodes*3,dtype=TransferScheme.dtype)\n self.meld.applydLdxS0(d_out,prod)\n d_inputs['x_struct0'] -= np.array(prod,dtype=float)\n\nclass MeldBuilder(Builder):\n def __init__(self, aero_builder, struct_builder,\n isym=-1, n=200, beta = 0.5, check_partials=False):\n self.aero_builder = aero_builder\n self.struct_builder = struct_builder\n self.isym = isym\n self.n = n\n self.beta = beta\n self.check_partials = check_partials\n\n def initialize(self, comm):\n self.nnodes_aero = self.aero_builder.get_number_of_nodes()\n self.nnodes_struct = self.struct_builder.get_number_of_nodes()\n self.ndof_struct = self.struct_builder.get_ndof()\n\n self.meld = TransferScheme.pyMELD(comm,\n comm, 0,\n comm, 0,\n self.isym, self.n, self.beta)\n\n def get_coupling_group_subsystem(self, scenario_name=None):\n disp_xfer = MeldDispXfer(\n xfer_object=self.meld,\n struct_ndof=self.ndof_struct,\n struct_nnodes=self.nnodes_struct,\n aero_nnodes=self.nnodes_aero,\n check_partials=self.check_partials\n )\n\n load_xfer = MeldLoadXfer(\n xfer_object=self.meld,\n struct_ndof=self.ndof_struct,\n struct_nnodes=self.nnodes_struct,\n aero_nnodes=self.nnodes_aero,\n check_partials=self.check_partials\n )\n\n return disp_xfer, load_xfer\n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
courspython/matplotlib
[ "d03f05332e33167d93f3c67f5a929ddd22dcb8d9" ]
[ "lib/matplotlib/tri/triplot.py" ]
[ "from __future__ import print_function\nfrom matplotlib.cbook import ls_mapper\nfrom matplotlib.patches import PathPatch\nfrom matplotlib.path import Path\nfrom matplotlib.tri.triangulation import Triangulation\nimport numpy as np\n\n\ndef triplot(ax, *args, **kwargs):\n \"\"\"\n Draw a unstructured triangular grid as lines and/or markers.\n\n The triangulation to plot can be specified in one of two ways;\n either::\n\n triplot(triangulation, ...)\n\n where triangulation is a :class:`matplotlib.tri.Triangulation`\n object, or\n\n ::\n\n triplot(x, y, ...)\n triplot(x, y, triangles, ...)\n triplot(x, y, triangles=triangles, ...)\n triplot(x, y, mask=mask, ...)\n triplot(x, y, triangles, mask=mask, ...)\n\n in which case a Triangulation object will be created. See\n :class:`~matplotlib.tri.Triangulation` for a explanation of these\n possibilities.\n\n The remaining args and kwargs are the same as for\n :meth:`~matplotlib.axes.Axes.plot`.\n\n **Example:**\n\n .. plot:: mpl_examples/pylab_examples/triplot_demo.py\n \"\"\"\n import matplotlib.axes\n\n tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)\n\n x = tri.x\n y = tri.y\n edges = tri.edges\n\n # If draw both lines and markers at the same time, e.g.\n # ax.plot(x[edges].T, y[edges].T, *args, **kwargs)\n # then the markers are drawn more than once which is incorrect if alpha<1.\n # Hence draw lines and markers separately.\n\n # Decode plot format string, e.g., 'ro-'\n fmt = ''\n if len(args) > 0:\n fmt = args[0]\n linestyle, marker, color = matplotlib.axes._base._process_plot_format(fmt)\n\n # Draw lines without markers, if lines are required.\n if linestyle is not None and linestyle is not 'None':\n kw = kwargs.copy()\n kw.pop('marker', None) # Ignore marker if set.\n kw['linestyle'] = ls_mapper[linestyle]\n kw['edgecolor'] = color\n kw['facecolor'] = None\n\n vertices = np.column_stack((x[edges].flatten(), y[edges].flatten()))\n codes = ([Path.MOVETO] + [Path.LINETO])*len(edges)\n\n path = Path(vertices, codes)\n pathpatch = PathPatch(path, **kw)\n\n ax.add_patch(pathpatch)\n\n # Draw markers without lines.\n # Should avoid drawing markers for points that are not in any triangle?\n kwargs['linestyle'] = ''\n ax.plot(x, y, *args, **kwargs)\n" ]
[ [ "matplotlib.path.Path", "matplotlib.tri.triangulation.Triangulation.get_from_args_and_kwargs", "matplotlib.patches.PathPatch" ] ]
jc-bao/panda-gym
[ "1860bf7459d50fc1a3677d937c984034fd936a7e", "1860bf7459d50fc1a3677d937c984034fd936a7e" ]
[ "examples/tower_test.py", "panda_gym/envs/robots/panda_2.py" ]
[ "import gym\nimport panda_gym\nimport numpy as np\n\ndef policy(obs, timestep):\n goal = obs['desired_goal']\n goal1_pos = goal[:3]\n # goal2_pos = goal[3:]\n obs = obs['observation']\n robot1_obs = obs[:7]\n robot2_obs = obs[7:14]\n task_obs = obs[14:]\n obj1_pos = task_obs[:3]\n # obj2_pos = task_obs[12:15]\n robot1_pos = robot1_obs[:3]\n robot2_pos = robot2_obs[:3]\n delta1 = obj1_pos + [-0.04,0,0.003] - robot1_pos\n delta2 = obj1_pos + [0.04,0,0.003] - robot2_pos\n delta3 = goal1_pos - obj1_pos\n # delta4 = goal2_pos - obj2_pos\n act1, act2 = [0]*4, [0]*4\n if timestep<40:\n # print('reach')\n act1 = np.append(delta1/np.linalg.norm(delta1)*0.3, 1)\n act2 = np.append(delta2/np.linalg.norm(delta2)*0.3, 1)\n if timestep>=40 and timestep < 45:\n # print('pick')\n act1 = np.array([0]*3+[-1])\n act2 = np.array([0]*3+[-1])\n if timestep>=45 and timestep < 90:\n # print('lift')\n act1 = np.append(delta3/np.linalg.norm(delta3)*0.3, -1)\n act2 = np.append(delta2/np.linalg.norm(delta2)*0.3, 1)\n # act1 = np.array([0,0,0.5,-1])\n # act2 = np.array([0,0,0.5,-1])\n if timestep>=90:\n # print('hold')\n act1 = np.array([0,0,0,-1])\n act2 = np.array([0,0,0,-1])\n return np.concatenate((act1, act2))\n # return np.concatenate((act1, [0]*4))\n\nenv = gym.make(\"PandaTowerBimanualParallelFinalRewSubgoal-v2\", render=True)\ntotal_rew = 0\nfor _ in range(10):\n obs = env.reset()\n for t in range(100):\n action = env.action_space.sample()\n action = policy(obs, t)\n obs, reward, done, info = env.step(action)\n total_rew += reward\n # env.render(mode='human')\n print(reward)\nenv.close()", "import numpy as np\nfrom gym import spaces\n\nfrom panda_gym.envs.core import PyBulletRobot\nfrom panda_gym.pybullet import PyBullet\n\n\nclass Panda(PyBulletRobot):\n \"\"\"Panda robot in PyBullet.\n\n Args:\n sim (PyBullet): Simulation instance.\n block_gripper (bool, optional): Whether the gripper is blocked. Defaults to False.\n base_position (np.ndarray, optionnal): Position of the base base of the robot, as (x, y, z). Defaults to (0, 0, 0).\n control_type (str, optional): \"ee\" to control end-effector displacement or \"joints\" to control joint angles.\n Defaults to \"ee\".\n \"\"\"\n\n def __init__(\n self,\n sim: PyBullet,\n block_gripper: bool = False,\n base_position: np.ndarray = np.array([0.0, 0.0, 0.0]),\n base_orientation: np.ndarray=np.array([0,0,0,1]),\n control_type: str = \"ee\",\n index: int = 0,\n max_move_per_step = 0.05, \n noise_obs = False,\n eef_orientation=np.array([1.0, 0., 0., 0.]),\n ) -> None:\n self.noise_obs = noise_obs\n self.max_move_per_step = max_move_per_step\n self.block_gripper = block_gripper\n self.control_type = control_type\n n_action = 3 if self.control_type == \"ee\" else 7 # control (x, y z) if \"ee\", else, control the 7 joints\n n_action += 0 if self.block_gripper else 1\n action_space = spaces.Box(-1.0, 1.0, shape=(n_action,), dtype=np.float32)\n self.eef_orientation = eef_orientation\n super().__init__(\n sim,\n body_name=\"panda\"+str(index),\n file_name=\"franka_panda/panda.urdf\",\n base_position=base_position,\n base_orientation = base_orientation,\n action_space=action_space,\n joint_indices=np.array([0, 1, 2, 3, 4, 5, 6, 9, 10]),\n joint_forces=np.array([87.0, 87.0, 87.0, 87.0, 12.0, 120.0, 120.0, 170.0, 170.0]),\n )\n\n self.fingers_indices = np.array([9, 10])\n self.neutral_joint_values = np.array([0.00, 0.41, 0.00, -1.85, 0.00, 2.26, 0.79, 0.00, 0.00])\n self.ee_link = 11\n self.sim.set_lateral_friction(self.body_name, self.fingers_indices[0], lateral_friction=1.0)\n self.sim.set_lateral_friction(self.body_name, self.fingers_indices[1], lateral_friction=1.0)\n self.sim.set_spinning_friction(self.body_name, self.fingers_indices[0], spinning_friction=0.001)\n self.sim.set_spinning_friction(self.body_name, self.fingers_indices[1], spinning_friction=0.001)\n\n def set_action(self, action: np.ndarray) -> None:\n action = action.copy() # ensure action don't change\n action = np.clip(action, self.action_space.low, self.action_space.high)\n if self.control_type == \"ee\":\n ee_displacement = action[:3]\n target_arm_angles = self.ee_displacement_to_target_arm_angles(ee_displacement)\n else:\n arm_joint_ctrl = action[:7]\n target_arm_angles = self.arm_joint_ctrl_to_target_arm_angles(arm_joint_ctrl)\n\n if self.block_gripper:\n target_fingers_width = 0\n else:\n fingers_ctrl = action[-1] * 0.2 # limit maximum change in position\n fingers_width = self.get_fingers_width()\n target_fingers_width = fingers_width + fingers_ctrl\n target_angles = np.concatenate((target_arm_angles, [target_fingers_width / 2, target_fingers_width / 2]))\n self.control_joints(target_angles=target_angles)\n\n def ee_displacement_to_target_arm_angles(self, ee_displacement: np.ndarray) -> np.ndarray:\n \"\"\"Compute the target arm angles from the end-effector displacement.\n\n Args:\n ee_displacement (np.ndarray): End-effector displacement, as (dx, dy, dy).\n\n Returns:\n np.ndarray: Target arm angles, as the angles of the 7 arm joints.\n \"\"\"\n ee_displacement = ee_displacement[:3] * self.max_move_per_step # limit maximum change in position\n # get the current position and the target position\n ee_position = self.get_ee_position()\n target_ee_position = ee_position + ee_displacement\n # Clip the height target. For some reason, it has a great impact on learning\n target_ee_position[2] = np.max((0, target_ee_position[2]))\n # compute the new joint angles\n target_arm_angles = self.inverse_kinematics(\n link=self.ee_link, position=target_ee_position, orientation=self.eef_orientation\n )\n target_arm_angles = target_arm_angles[:7] # remove fingers angles\n return target_arm_angles\n\n def arm_joint_ctrl_to_target_arm_angles(self, arm_joint_ctrl: np.ndarray) -> np.ndarray:\n \"\"\"Compute the target arm angles from the arm joint control.\n\n Args:\n arm_joint_ctrl (np.ndarray): Control of the 7 joints.\n\n Returns:\n np.ndarray: Target arm angles, as the angles of the 7 arm joints.\n \"\"\"\n arm_joint_ctrl = arm_joint_ctrl * 0.05 # limit maximum change in position\n # get the current position and the target position\n current_arm_joint_angles = np.array([self.get_joint_angle(joint=i) for i in range(7)])\n target_arm_angles = current_arm_joint_angles + arm_joint_ctrl\n return target_arm_angles\n\n def get_obs(self) -> np.ndarray:\n # end-effector position and velocity\n ee_position = np.array(self.get_ee_position())\n ee_velocity = np.array(self.get_ee_velocity())\n # fingers opening\n if not self.block_gripper:\n fingers_width = self.get_fingers_width()\n obs = np.concatenate((ee_position, ee_velocity, [fingers_width]))\n else:\n obs = np.concatenate((ee_position, ee_velocity))\n if self.noise_obs:\n obs[:3] += np.random.randn(3)*0.002 # 2mm\n obs[3:6] += np.random.randn(3)*0.0002 # max 0.06\n obs[6] += np.random.randn(1)*0.0008 # max 0.08\n return obs\n\n def reset(self, init_pos=None) -> None:\n self.set_joint_neutral()\n # set initial position\n if isinstance(init_pos, (np.ndarray, np.generic)):\n assert len(init_pos) == 3\n target_arm_angles = self.inverse_kinematics(\n link=self.ee_link, position=init_pos, orientation=self.eef_orientation\n )\n self.set_joint_angles(target_arm_angles)\n # for _ in range(10):\n # target_arm_angles = self.inverse_kinematics(\n # link=self.ee_link, position=init_pos, orientation=self.eef_orientation\n # )\n # self.control_joints(target_angles=target_arm_angles)\n # self.sim.step()\n\n def set_joint_neutral(self) -> None:\n \"\"\"Set the robot to its neutral pose.\"\"\"\n self.set_joint_angles(self.neutral_joint_values)\n\n def get_fingers_width(self) -> float:\n \"\"\"Get the distance between the fingers.\"\"\"\n finger1 = self.sim.get_joint_angle(self.body_name, self.fingers_indices[0])\n finger2 = self.sim.get_joint_angle(self.body_name, self.fingers_indices[1])\n return finger1 + finger2\n\n def get_ee_position(self) -> np.ndarray:\n \"\"\"Returns the position of the ned-effector as (x, y, z)\"\"\"\n return self.get_link_position(self.ee_link)\n\n def get_ee_velocity(self) -> np.ndarray:\n \"\"\"Returns the velocity of the end-effector as (vx, vy, vz)\"\"\"\n return self.get_link_velocity(self.ee_link)\n\n def get_ee_orn(self):\n return self.sim.get_link_orientation(self.body_name, self.ee_link)\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.linalg.norm" ], [ "numpy.concatenate", "numpy.max", "numpy.array", "numpy.random.randn", "numpy.clip" ] ]
nikhilaravi/pytorch3d-1
[ "2480723adf1ce8a5cfca5c190f5fba7a48549f75" ]
[ "pytorch3d/renderer/cameras.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nimport math\nimport numpy as np\nfrom typing import Tuple, Optional, Sequence\nimport torch\nimport torch.nn.functional as F\n\nfrom pytorch3d.transforms import Rotate, Transform3d, Translate\n\nfrom .utils import TensorProperties, convert_to_tensors_and_broadcast\n\n# Default values for rotation and translation matrices.\nr = np.expand_dims(np.eye(3), axis=0) # (1, 3, 3)\nt = np.expand_dims(np.zeros(3), axis=0) # (1, 3)\n\n\nclass OpenGLPerspectiveCameras(TensorProperties):\n \"\"\"\n A class which stores a batch of parameters to generate a batch of\n projection matrices using the OpenGL convention for a perspective camera.\n\n The extrinsics of the camera (R and T matrices) can also be set in the\n initializer or passed in to `get_full_projection_transform` to get\n the full transformation from world -> screen.\n\n The `transform_points` method calculates the full world -> screen transform\n and then applies it to the input points.\n\n The transforms can also be returned separately as Transform3d objects.\n \"\"\"\n\n def __init__(\n self,\n znear=1.0,\n zfar=100.0,\n aspect_ratio=1.0,\n fov=60.0,\n degrees: bool = True,\n R=r,\n T=t,\n device=\"cpu\",\n ):\n \"\"\"\n __init__(self, znear, zfar, aspect_ratio, fov, degrees, R, T, device) -> None # noqa\n\n Args:\n znear: near clipping plane of the view frustrum.\n zfar: far clipping plane of the view frustrum.\n aspect_ratio: ratio of screen_width/screen_height.\n fov: field of view angle of the camera.\n degrees: bool, set to True if fov is specified in degrees.\n R: Rotation matrix of shape (N, 3, 3)\n T: Translation matrix of shape (N, 3)\n device: torch.device or string\n \"\"\"\n # The initializer formats all inputs to torch tensors and broadcasts\n # all the inputs to have the same batch dimension where necessary.\n super().__init__(\n device=device,\n znear=znear,\n zfar=zfar,\n aspect_ratio=aspect_ratio,\n fov=fov,\n R=R,\n T=T,\n )\n\n # No need to convert to tensor or broadcast.\n self.degrees = degrees\n\n def get_projection_transform(self, **kwargs) -> Transform3d:\n \"\"\"\n Calculate the OpenGL perpective projection matrix with a symmetric\n viewing frustrum. Use column major order.\n\n Args:\n **kwargs: parameters for the projection can be passed in as keyword\n arguments to override the default values set in `__init__`.\n\n Return:\n P: a Transform3d object which represents a batch of projection\n matrices of shape (N, 3, 3)\n\n .. code-block:: python\n\n f1 = -(far + near)/(far−near)\n f2 = -2*far*near/(far-near)\n h1 = (top + bottom)/(top - bottom)\n w1 = (right + left)/(right - left)\n tanhalffov = tan((fov/2))\n s1 = 1/tanhalffov\n s2 = 1/(tanhalffov * (aspect_ratio))\n\n P = [\n [s1, 0, w1, 0],\n [0, s2, h1, 0],\n [0, 0, f1, f2],\n [0, 0, -1, 0],\n ]\n \"\"\"\n znear = kwargs.get(\"znear\", self.znear) # pyre-ignore[16]\n zfar = kwargs.get(\"zfar\", self.zfar) # pyre-ignore[16]\n fov = kwargs.get(\"fov\", self.fov) # pyre-ignore[16]\n # pyre-ignore[16]\n aspect_ratio = kwargs.get(\"aspect_ratio\", self.aspect_ratio)\n degrees = kwargs.get(\"degrees\", self.degrees)\n\n P = torch.zeros(\n (self._N, 4, 4), device=self.device, dtype=torch.float32\n )\n ones = torch.ones((self._N), dtype=torch.float32, device=self.device)\n if degrees:\n fov = (np.pi / 180) * fov\n\n if not torch.is_tensor(fov):\n fov = torch.tensor(fov, device=self.device)\n tanHalfFov = torch.tan((fov / 2))\n top = tanHalfFov * znear\n bottom = -top\n right = top * aspect_ratio\n left = -right\n\n # NOTE: In OpenGL the projection matrix changes the handedness of the\n # coordinate frame. i.e the NDC space postive z direction is the\n # camera space negative z direction. This is because the sign of the z\n # in the projection matrix is set to -1.0.\n # In pytorch3d we maintain a right handed coordinate system throughout\n # so the so the z sign is 1.0.\n z_sign = 1.0\n\n P[:, 0, 0] = 2.0 * znear / (right - left)\n P[:, 1, 1] = 2.0 * znear / (top - bottom)\n P[:, 0, 2] = (right + left) / (right - left)\n P[:, 1, 2] = (top + bottom) / (top - bottom)\n P[:, 3, 2] = z_sign * ones\n\n # NOTE: This part of the matrix is for z renormalization in OpenGL\n # which maps the z to [-1, 1]. This won't work yet as the torch3d\n # rasterizer ignores faces which have z < 0.\n # P[:, 2, 2] = z_sign * (far + near) / (far - near)\n # P[:, 2, 3] = -2.0 * far * near / (far - near)\n # P[:, 3, 2] = z_sign * torch.ones((N))\n\n # NOTE: This maps the z coordinate from [0, 1] where z = 0 if the point\n # is at the near clipping plane and z = 1 when the point is at the far\n # clipping plane. This replaces the OpenGL z normalization to [-1, 1]\n # until rasterization is changed to clip at z = -1.\n P[:, 2, 2] = z_sign * zfar / (zfar - znear)\n P[:, 2, 3] = -(zfar * znear) / (zfar - znear)\n\n # OpenGL uses column vectors so need to transpose the projection matrix\n # as torch3d uses row vectors.\n transform = Transform3d(device=self.device)\n transform._matrix = P.transpose(1, 2).contiguous()\n return transform\n\n def clone(self):\n other = OpenGLPerspectiveCameras(device=self.device)\n return super().clone(other)\n\n def get_camera_center(self, **kwargs):\n \"\"\"\n Return the 3D location of the camera optical center\n in the world coordinates.\n\n Args:\n **kwargs: parameters for the camera extrinsics can be passed in\n as keyword arguments to override the default values\n set in __init__.\n\n Setting T here will update the values set in init as this\n value may be needed later on in the rendering pipeline e.g. for\n lighting calculations.\n\n Returns:\n C: a batch of 3D locations of shape (N, 3) denoting\n the locations of the center of each camera in the batch.\n \"\"\"\n w2v_trans = self.get_world_to_view_transform(**kwargs)\n P = w2v_trans.inverse().get_matrix()\n # the camera center is the translation component (the first 3 elements\n # of the last row) of the inverted world-to-view\n # transform (4x4 RT matrix)\n C = P[:, 3, :3]\n return C\n\n def get_world_to_view_transform(self, **kwargs) -> Transform3d:\n \"\"\"\n Return the world-to-view transform.\n\n Args:\n **kwargs: parameters for the camera extrinsics can be passed in\n as keyword arguments to override the default values\n set in __init__.\n\n Setting R and T here will update the values set in init as these\n values may be needed later on in the rendering pipeline e.g. for\n lighting calculations.\n\n Returns:\n T: a Transform3d object which represents a batch of transforms\n of shape (N, 3, 3)\n \"\"\"\n self.R = kwargs.get(\"R\", self.R) # pyre-ignore[16]\n self.T = kwargs.get(\"T\", self.T) # pyre-ignore[16]\n world_to_view_transform = get_world_to_view_transform(\n R=self.R, T=self.T\n )\n return world_to_view_transform\n\n def get_full_projection_transform(self, **kwargs) -> Transform3d:\n \"\"\"\n Return the full world-to-screen transform composing the\n world-to-view and view-to-screen transforms.\n\n Args:\n **kwargs: parameters for the projection transforms can be passed in\n as keyword arguments to override the default values\n set in __init__.\n\n Setting R and T here will update the values set in init as these\n values may be needed later on in the rendering pipeline e.g. for\n lighting calculations.\n\n Returns:\n T: a Transform3d object which represents a batch of transforms\n of shape (N, 3, 3)\n \"\"\"\n self.R = kwargs.get(\"R\", self.R) # pyre-ignore[16]\n self.T = kwargs.get(\"T\", self.T) # pyre-ignore[16]\n world_to_view_transform = self.get_world_to_view_transform(\n R=self.R, T=self.T\n )\n view_to_screen_transform = self.get_projection_transform(**kwargs)\n return world_to_view_transform.compose(view_to_screen_transform)\n\n def transform_points(self, points, **kwargs) -> torch.Tensor:\n \"\"\"\n Transform input points from world to screen space.\n\n Args:\n points: torch tensor of shape (..., 3).\n\n Returns\n new_points: transformed points with the same shape as the input.\n \"\"\"\n world_to_screen_transform = self.get_full_projection_transform(**kwargs)\n return world_to_screen_transform.transform_points(points)\n\n\nclass OpenGLOrthographicCameras(TensorProperties):\n \"\"\"\n A class which stores a batch of parameters to generate a batch of\n transformation matrices using the OpenGL convention for orthographic camera.\n \"\"\"\n\n def __init__(\n self,\n znear=1.0,\n zfar=100.0,\n top=1.0,\n bottom=-1.0,\n left=-1.0,\n right=1.0,\n scale_xyz=((1.0, 1.0, 1.0),), # (1, 3)\n R=r,\n T=t,\n device=\"cpu\",\n ):\n \"\"\"\n __init__(self, znear, zfar, top, bottom, left, right, scale_xyz, R, T, device) -> None # noqa\n\n Args:\n znear: near clipping plane of the view frustrum.\n zfar: far clipping plane of the view frustrum.\n top: position of the top of the screen.\n bottom: position of the bottom of the screen.\n left: position of the left of the screen.\n right: position of the right of the screen.\n scale_xyz: scale factors for each axis of shape (N, 3).\n R: Rotation matrix of shape (N, 3, 3).\n T: Translation of shape (N, 3).\n device: torch.device or string.\n\n Only need to set left, right, top, bottom for viewing frustrums\n which are non symmetric about the origin.\n \"\"\"\n # The initializer formats all inputs to torch tensors and broadcasts\n # all the inputs to have the same batch dimension where necessary.\n super().__init__(\n device=device,\n znear=znear,\n zfar=zfar,\n top=top,\n bottom=bottom,\n left=left,\n right=right,\n scale_xyz=scale_xyz,\n R=R,\n T=T,\n )\n\n def get_projection_transform(self, **kwargs) -> Transform3d:\n \"\"\"\n Calculate the OpenGL orthographic projection matrix.\n Use column major order.\n\n Args:\n **kwargs: parameters for the projection can be passed in to\n override the default values set in __init__.\n Return:\n P: a Transform3d object which represents a batch of projection\n matrices of shape (N, 3, 3)\n\n .. code-block:: python\n\n scale_x = 2/(right - left)\n scale_y = 2/(top - bottom)\n scale_z = 2/(far-near)\n mid_x = (right + left)/(right - left)\n mix_y = (top + bottom)/(top - bottom)\n mid_z = (far + near)/(far−near)\n\n P = [\n [scale_x, 0, 0, -mid_x],\n [0, scale_y, 0, -mix_y],\n [0, 0, -scale_z, -mid_z],\n [0, 0, 0, 1],\n ]\n \"\"\"\n znear = kwargs.get(\"znear\", self.znear) # pyre-ignore[16]\n zfar = kwargs.get(\"zfar\", self.zfar) # pyre-ignore[16]\n left = kwargs.get(\"left\", self.left) # pyre-ignore[16]\n right = kwargs.get(\"right\", self.right) # pyre-ignore[16]\n top = kwargs.get(\"top\", self.top) # pyre-ignore[16]\n bottom = kwargs.get(\"bottom\", self.bottom) # pyre-ignore[16]\n scale_xyz = kwargs.get(\"scale_xyz\", self.scale_xyz) # pyre-ignore[16]\n\n P = torch.zeros(\n (self._N, 4, 4), dtype=torch.float32, device=self.device\n )\n ones = torch.ones((self._N), dtype=torch.float32, device=self.device)\n # NOTE: OpenGL flips handedness of coordinate system between camera\n # space and NDC space so z sign is -ve. In PyTorch3D we maintain a\n # right handed coordinate system throughout.\n z_sign = +1.0\n\n P[:, 0, 0] = (2.0 / (right - left)) * scale_xyz[:, 0]\n P[:, 1, 1] = (2.0 / (top - bottom)) * scale_xyz[:, 1]\n P[:, 0, 3] = -(right + left) / (right - left)\n P[:, 1, 3] = -(top + bottom) / (top - bottom)\n P[:, 3, 3] = ones\n\n # NOTE: This maps the z coordinate to the range [0, 1] and replaces the\n # the OpenGL z normalization to [-1, 1]\n P[:, 2, 2] = z_sign * (1.0 / (zfar - znear)) * scale_xyz[:, 2]\n P[:, 2, 3] = -znear / (zfar - znear)\n\n # NOTE: This part of the matrix is for z renormalization in OpenGL.\n # The z is mapped to the range [-1, 1] but this won't work yet in\n # pytorch3d as the rasterizer ignores faces which have z < 0.\n # P[:, 2, 2] = z_sign * (2.0 / (far - near)) * scale[:, 2]\n # P[:, 2, 3] = -(far + near) / (far - near)\n\n transform = Transform3d(device=self.device)\n transform._matrix = P.transpose(1, 2).contiguous()\n return transform\n\n def clone(self):\n other = OpenGLOrthographicCameras(device=self.device)\n return super().clone(other)\n\n def get_camera_center(self, **kwargs):\n \"\"\"\n Return the 3D location of the camera optical center\n in the world coordinates.\n\n Args:\n **kwargs: parameters for the camera extrinsics can be passed in\n as keyword arguments to override the default values\n set in __init__.\n\n Setting T here will update the values set in init as this\n value may be needed later on in the rendering pipeline e.g. for\n lighting calculations.\n\n\n Returns:\n C: a batch of 3D locations of shape (N, 3) denoting\n the locations of the center of each camera in the batch.\n \"\"\"\n w2v_trans = self.get_world_to_view_transform(**kwargs)\n P = w2v_trans.inverse().get_matrix()\n # The camera center is the translation component (the first 3 elements\n # of the last row) of the inverted world-to-view\n # transform (4x4 RT matrix).\n C = P[:, 3, :3]\n return C\n\n def get_world_to_view_transform(self, **kwargs) -> Transform3d:\n \"\"\"\n Return the world-to-view transform.\n\n Args:\n **kwargs: parameters for the camera extrinsics can be passed in\n as keyword arguments to override the default values\n set in __init__.\n\n Setting R and T here will update the values set in init as these\n values may be needed later on in the rendering pipeline e.g. for\n lighting calculations.\n\n Returns:\n T: a Transform3d object which represents a batch of transforms\n of shape (N, 3, 3)\n \"\"\"\n self.R = kwargs.get(\"R\", self.R) # pyre-ignore[16]\n self.T = kwargs.get(\"T\", self.T) # pyre-ignore[16]\n world_to_view_transform = get_world_to_view_transform(\n R=self.R, T=self.T\n )\n return world_to_view_transform\n\n def get_full_projection_transform(self, **kwargs) -> Transform3d:\n \"\"\"\n Return the full world-to-screen transform composing the\n world-to-view and view-to-screen transforms.\n\n Args:\n **kwargs: parameters for the projection transforms can be passed in\n as keyword arguments to override the default values\n set in `__init__`.\n\n Setting R and T here will update the values set in init as these\n values may be needed later on in the rendering pipeline e.g. for\n lighting calculations.\n\n Returns:\n T: a Transform3d object which represents a batch of transforms\n of shape (N, 3, 3)\n \"\"\"\n self.R = kwargs.get(\"R\", self.R) # pyre-ignore[16]\n self.T = kwargs.get(\"T\", self.T) # pyre-ignore[16]\n world_to_view_transform = self.get_world_to_view_transform(\n R=self.R, T=self.T\n )\n view_to_screen_transform = self.get_projection_transform(**kwargs)\n return world_to_view_transform.compose(view_to_screen_transform)\n\n def transform_points(self, points, **kwargs) -> torch.Tensor:\n \"\"\"\n Transform input points from world to screen space.\n\n Args:\n points: torch tensor of shape (..., 3).\n\n Returns\n new_points: transformed points with the same shape as the input.\n \"\"\"\n world_to_screen_transform = self.get_full_projection_transform(**kwargs)\n return world_to_screen_transform.transform_points(points)\n\n\nclass SfMPerspectiveCameras(TensorProperties):\n \"\"\"\n A class which stores a batch of parameters to generate a batch of\n transformation matrices using the multi-view geometry convention for\n perspective camera.\n \"\"\"\n\n def __init__(\n self,\n focal_length=1.0,\n principal_point=((0.0, 0.0),),\n R=r,\n T=t,\n device=\"cpu\",\n ):\n \"\"\"\n __init__(self, focal_length, principal_point, R, T, device) -> None\n\n Args:\n focal_length: Focal length of the camera in world units.\n A tensor of shape (N, 1) or (N, 2) for\n square and non-square pixels respectively.\n principal_point: xy coordinates of the center of\n the principal point of the camera in pixels.\n A tensor of shape (N, 2).\n R: Rotation matrix of shape (N, 3, 3)\n T: Translation matrix of shape (N, 3)\n device: torch.device or string\n \"\"\"\n # The initializer formats all inputs to torch tensors and broadcasts\n # all the inputs to have the same batch dimension where necessary.\n super().__init__(\n device=device,\n focal_length=focal_length,\n principal_point=principal_point,\n R=R,\n T=T,\n )\n\n def get_projection_transform(self, **kwargs) -> Transform3d:\n \"\"\"\n Calculate the projection matrix using the\n multi-view geometry convention.\n\n Args:\n **kwargs: parameters for the projection can be passed in as keyword\n arguments to override the default values set in __init__.\n\n Returns:\n P: a batch of projection matrices of shape (N, 4, 4)\n\n .. code-block:: python\n\n fx = focal_length[:,0]\n fy = focal_length[:,1]\n px = principal_point[:,0]\n py = principal_point[:,1]\n\n P = [\n [fx, 0, 0, px],\n [0, fy, 0, py],\n [0, 0, 0, 1],\n [0, 0, 1, 0],\n ]\n \"\"\"\n # pyre-ignore[16]\n principal_point = kwargs.get(\"principal_point\", self.principal_point)\n # pyre-ignore[16]\n focal_length = kwargs.get(\"focal_length\", self.focal_length)\n\n P = _get_sfm_calibration_matrix(\n self._N, self.device, focal_length, principal_point, False\n )\n\n transform = Transform3d(device=self.device)\n transform._matrix = P.transpose(1, 2).contiguous()\n return transform\n\n def clone(self):\n other = SfMPerspectiveCameras(device=self.device)\n return super().clone(other)\n\n def get_camera_center(self, **kwargs):\n \"\"\"\n Return the 3D location of the camera optical center\n in the world coordinates.\n\n Args:\n **kwargs: parameters for the camera extrinsics can be passed in\n as keyword arguments to override the default values\n set in __init__.\n\n Setting T here will update the values set in init as this\n value may be needed later on in the rendering pipeline e.g. for\n lighting calculations.\n\n Returns:\n C: a batch of 3D locations of shape (N, 3) denoting\n the locations of the center of each camera in the batch.\n \"\"\"\n w2v_trans = self.get_world_to_view_transform(**kwargs)\n P = w2v_trans.inverse().get_matrix()\n # the camera center is the translation component (the first 3 elements\n # of the last row) of the inverted world-to-view\n # transform (4x4 RT matrix)\n C = P[:, 3, :3]\n return C\n\n def get_world_to_view_transform(self, **kwargs) -> Transform3d:\n \"\"\"\n Return the world-to-view transform.\n\n Args:\n **kwargs: parameters for the camera extrinsics can be passed in\n as keyword arguments to override the default values\n set in __init__.\n\n Setting R and T here will update the values set in init as these\n values may be needed later on in the rendering pipeline e.g. for\n lighting calculations.\n\n Returns:\n T: a Transform3d object which represents a batch of transforms\n of shape (N, 3, 3)\n \"\"\"\n self.R = kwargs.get(\"R\", self.R) # pyre-ignore[16]\n self.T = kwargs.get(\"T\", self.T) # pyre-ignore[16]\n world_to_view_transform = get_world_to_view_transform(\n R=self.R, T=self.T\n )\n return world_to_view_transform\n\n def get_full_projection_transform(self, **kwargs) -> Transform3d:\n \"\"\"\n Return the full world-to-screen transform composing the\n world-to-view and view-to-screen transforms.\n\n Args:\n **kwargs: parameters for the projection transforms can be passed in\n as keyword arguments to override the default values\n set in __init__.\n\n Setting R and T here will update the values set in init as these\n values may be needed later on in the rendering pipeline e.g. for\n lighting calculations.\n \"\"\"\n self.R = kwargs.get(\"R\", self.R) # pyre-ignore[16]\n self.T = kwargs.get(\"T\", self.T) # pyre-ignore[16]\n world_to_view_transform = self.get_world_to_view_transform(\n R=self.R, T=self.T\n )\n view_to_screen_transform = self.get_projection_transform(**kwargs)\n return world_to_view_transform.compose(view_to_screen_transform)\n\n def transform_points(self, points, **kwargs) -> torch.Tensor:\n \"\"\"\n Transform input points from world to screen space.\n\n Args:\n points: torch tensor of shape (..., 3).\n\n Returns\n new_points: transformed points with the same shape as the input.\n \"\"\"\n world_to_screen_transform = self.get_full_projection_transform(**kwargs)\n return world_to_screen_transform.transform_points(points)\n\n\nclass SfMOrthographicCameras(TensorProperties):\n \"\"\"\n A class which stores a batch of parameters to generate a batch of\n transformation matrices using the multi-view geometry convention for\n orthographic camera.\n \"\"\"\n\n def __init__(\n self,\n focal_length=1.0,\n principal_point=((0.0, 0.0),),\n R=r,\n T=t,\n device=\"cpu\",\n ):\n \"\"\"\n __init__(self, focal_length, principal_point, R, T, device) -> None\n\n Args:\n focal_length: Focal length of the camera in world units.\n A tensor of shape (N, 1) or (N, 2) for\n square and non-square pixels respectively.\n principal_point: xy coordinates of the center of\n the principal point of the camera in pixels.\n A tensor of shape (N, 2).\n R: Rotation matrix of shape (N, 3, 3)\n T: Translation matrix of shape (N, 3)\n device: torch.device or string\n \"\"\"\n # The initializer formats all inputs to torch tensors and broadcasts\n # all the inputs to have the same batch dimension where necessary.\n super().__init__(\n device=device,\n focal_length=focal_length,\n principal_point=principal_point,\n R=R,\n T=T,\n )\n\n def get_projection_transform(self, **kwargs) -> Transform3d:\n \"\"\"\n Calculate the projection matrix using\n the multi-view geometry convention.\n\n Args:\n **kwargs: parameters for the projection can be passed in as keyword\n arguments to override the default values set in __init__.\n\n Return:\n P: a batch of projection matrices of shape (N, 4, 4)\n\n .. code-block:: python\n\n fx = focal_length[:,0]\n fy = focal_length[:,1]\n px = principal_point[:,0]\n py = principal_point[:,1]\n\n P = [\n [fx, 0, 0, px],\n [0, fy, 0, py],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n ]\n \"\"\"\n # pyre-ignore[16]\n principal_point = kwargs.get(\"principal_point\", self.principal_point)\n # pyre-ignore[16]\n focal_length = kwargs.get(\"focal_length\", self.focal_length)\n\n P = _get_sfm_calibration_matrix(\n self._N, self.device, focal_length, principal_point, True\n )\n\n transform = Transform3d(device=self.device)\n transform._matrix = P.transpose(1, 2).contiguous()\n return transform\n\n def clone(self):\n other = SfMOrthographicCameras(device=self.device)\n return super().clone(other)\n\n def get_camera_center(self, **kwargs):\n \"\"\"\n Return the 3D location of the camera optical center\n in the world coordinates.\n\n Args:\n **kwargs: parameters for the camera extrinsics can be passed in\n as keyword arguments to override the default values\n set in __init__.\n\n Setting T here will update the values set in init as this\n value may be needed later on in the rendering pipeline e.g. for\n lighting calculations.\n\n Returns:\n C: a batch of 3D locations of shape (N, 3) denoting\n the locations of the center of each camera in the batch.\n \"\"\"\n w2v_trans = self.get_world_to_view_transform(**kwargs)\n P = w2v_trans.inverse().get_matrix()\n # the camera center is the translation component (the first 3 elements\n # of the last row) of the inverted world-to-view\n # transform (4x4 RT matrix)\n C = P[:, 3, :3]\n return C\n\n def get_world_to_view_transform(self, **kwargs) -> Transform3d:\n \"\"\"\n Return the world-to-view transform.\n\n Args:\n **kwargs: parameters for the camera extrinsics can be passed in\n as keyword arguments to override the default values\n set in __init__.\n\n Setting R and T here will update the values set in init as these\n values may be needed later on in the rendering pipeline e.g. for\n lighting calculations.\n\n Returns:\n T: a Transform3d object which represents a batch of transforms\n of shape (N, 3, 3)\n \"\"\"\n self.R = kwargs.get(\"R\", self.R) # pyre-ignore[16]\n self.T = kwargs.get(\"T\", self.T) # pyre-ignore[16]\n world_to_view_transform = get_world_to_view_transform(\n R=self.R, T=self.T\n )\n return world_to_view_transform\n\n def get_full_projection_transform(self, **kwargs) -> Transform3d:\n \"\"\"\n Return the full world-to-screen transform composing the\n world-to-view and view-to-screen transforms.\n\n Args:\n **kwargs: parameters for the projection transforms can be passed in\n as keyword arguments to override the default values\n set in `__init__`.\n\n Setting R and T here will update the values set in init as these\n values may be needed later on in the rendering pipeline e.g. for\n lighting calculations.\n \"\"\"\n self.R = kwargs.get(\"R\", self.R) # pyre-ignore[16]\n self.T = kwargs.get(\"T\", self.T) # pyre-ignore[16]\n world_to_view_transform = self.get_world_to_view_transform(\n R=self.R, T=self.T\n )\n view_to_screen_transform = self.get_projection_transform(**kwargs)\n return world_to_view_transform.compose(view_to_screen_transform)\n\n def transform_points(self, points, **kwargs) -> torch.Tensor:\n \"\"\"\n Transform input points from world to screen space.\n\n Args:\n points: torch tensor of shape (..., 3).\n\n Returns\n new_points: transformed points with the same shape as the input.\n \"\"\"\n world_to_screen_transform = self.get_full_projection_transform(**kwargs)\n return world_to_screen_transform.transform_points(points)\n\n\n# SfMCameras helper\ndef _get_sfm_calibration_matrix(\n N, device, focal_length, principal_point, orthographic: bool\n) -> torch.Tensor:\n \"\"\"\n Returns a calibration matrix of a perspective/orthograpic camera.\n\n Args:\n N: Number of cameras.\n focal_length: Focal length of the camera in world units.\n principal_point: xy coordinates of the center of\n the principal point of the camera in pixels.\n\n The calibration matrix `K` is set up as follows:\n\n .. code-block:: python\n\n fx = focal_length[:,0]\n fy = focal_length[:,1]\n px = principal_point[:,0]\n py = principal_point[:,1]\n\n for orthographic==True:\n K = [\n [fx, 0, 0, px],\n [0, fy, 0, py],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n ]\n else:\n K = [\n [fx, 0, 0, px],\n [0, fy, 0, py],\n [0, 0, 0, 1],\n [0, 0, 1, 0],\n ]\n\n Returns:\n A calibration matrix `K` of the SfM-conventioned camera\n of shape (N, 4, 4).\n \"\"\"\n\n if not torch.is_tensor(focal_length):\n focal_length = torch.tensor(focal_length, device=device)\n\n if len(focal_length.shape) in (0, 1) or focal_length.shape[1] == 1:\n fx = fy = focal_length\n else:\n fx, fy = focal_length.unbind(1)\n\n if not torch.is_tensor(principal_point):\n principal_point = torch.tensor(principal_point, device=device)\n\n px, py = principal_point.unbind(1)\n\n K = fx.new_zeros(N, 4, 4)\n K[:, 0, 0] = fx\n K[:, 1, 1] = fy\n K[:, 0, 3] = px\n K[:, 1, 3] = py\n if orthographic:\n K[:, 2, 2] = 1.0\n K[:, 3, 3] = 1.0\n else:\n K[:, 3, 2] = 1.0\n K[:, 2, 3] = 1.0\n\n return K\n\n\n################################################\n# Helper functions for world to view transforms\n################################################\n\n\ndef get_world_to_view_transform(R=r, T=t) -> Transform3d:\n \"\"\"\n This function returns a Transform3d representing the transformation\n matrix to go from world space to view space by applying a rotation and\n a translation.\n\n Pytorch3d uses the same convention as Hartley & Zisserman.\n I.e., for camera extrinsic parameters R (rotation) and T (translation),\n we map a 3D point `X_world` in world coordinates to\n a point `X_cam` in camera coordinates with:\n `X_cam = X_world R + T`\n\n Args:\n R: (N, 3, 3) matrix representing the rotation.\n T: (N, 3) matrix representing the translation.\n\n Returns:\n a Transform3d object which represents the composed RT transformation.\n\n \"\"\"\n # TODO: also support the case where RT is specified as one matrix\n # of shape (N, 4, 4).\n\n if T.shape[0] != R.shape[0]:\n msg = \"Expected R, T to have the same batch dimension; got %r, %r\"\n raise ValueError(msg % (R.shape[0], T.shape[0]))\n if T.dim() != 2 or T.shape[1:] != (3,):\n msg = \"Expected T to have shape (N, 3); got %r\"\n raise ValueError(msg % repr(T.shape))\n if R.dim() != 3 or R.shape[1:] != (3, 3):\n msg = \"Expected R to have shape (N, 3, 3); got %r\"\n raise ValueError(msg % repr(R.shape))\n\n # Create a Transform3d object\n T = Translate(T, device=T.device)\n R = Rotate(R, device=R.device)\n return R.compose(T)\n\n\ndef camera_position_from_spherical_angles(\n distance, elevation, azimuth, degrees: bool = True, device: str = \"cpu\"\n) -> torch.Tensor:\n \"\"\"\n Calculate the location of the camera based on the distance away from\n the target point, the elevation and azimuth angles.\n\n Args:\n distance: distance of the camera from the object.\n elevation, azimuth: angles.\n The inputs distance, elevation and azimuth can be one of the following\n - Python scalar\n - Torch scalar\n - Torch tensor of shape (N) or (1)\n degrees: bool, whether the angles are specified in degrees or radians.\n device: str or torch.device, device for new tensors to be placed on.\n\n The vectors are broadcast against each other so they all have shape (N, 1).\n\n Returns:\n camera_position: (N, 3) xyz location of the camera.\n \"\"\"\n broadcasted_args = convert_to_tensors_and_broadcast(\n distance, elevation, azimuth, device=device\n )\n dist, elev, azim = broadcasted_args\n if degrees:\n elev = math.pi / 180.0 * elev\n azim = math.pi / 180.0 * azim\n x = dist * torch.cos(elev) * torch.sin(azim)\n y = dist * torch.sin(elev)\n z = dist * torch.cos(elev) * torch.cos(azim)\n camera_position = torch.stack([x, y, z], dim=1)\n if camera_position.dim() == 0:\n camera_position = camera_position.view(1, -1) # add batch dim.\n return camera_position.view(-1, 3)\n\n\ndef look_at_rotation(\n camera_position, at=((0, 0, 0),), up=((0, 1, 0),), device: str = \"cpu\"\n) -> torch.Tensor:\n \"\"\"\n This function takes a vector 'camera_position' which specifies the location\n of the camera in world coordinates and two vectors `at` and `up` which\n indicate the position of the object and the up directions of the world\n coordinate system respectively. The object is assumed to be centered at\n the origin.\n\n The output is a rotation matrix representing the transformation\n from world coordinates -> view coordinates.\n\n Args:\n camera_position: position of the camera in world coordinates\n at: position of the object in world coordinates\n up: vector specifying the up direction in the world coordinate frame.\n\n The inputs camera_position, at and up can each be a\n - 3 element tuple/list\n - torch tensor of shape (1, 3)\n - torch tensor of shape (N, 3)\n\n The vectors are broadcast against each other so they all have shape (N, 3).\n\n Returns:\n R: (N, 3, 3) batched rotation matrices\n \"\"\"\n # Format input and broadcast\n broadcasted_args = convert_to_tensors_and_broadcast(\n camera_position, at, up, device=device\n )\n camera_position, at, up = broadcasted_args\n for t, n in zip([camera_position, at, up], [\"camera_position\", \"at\", \"up\"]):\n if t.shape[-1] != 3:\n msg = \"Expected arg %s to have shape (N, 3); got %r\"\n raise ValueError(msg % (n, t.shape))\n z_axis = F.normalize(at - camera_position, eps=1e-5)\n x_axis = F.normalize(torch.cross(up, z_axis), eps=1e-5)\n y_axis = F.normalize(torch.cross(z_axis, x_axis), eps=1e-5)\n R = torch.cat(\n (x_axis[:, None, :], y_axis[:, None, :], z_axis[:, None, :]), dim=1\n )\n return R.transpose(1, 2)\n\n\ndef look_at_view_transform(\n dist=1.0,\n elev=0.0,\n azim=0.0,\n degrees: bool = True,\n eye: Optional[Sequence] = None,\n at=((0, 0, 0),), # (1, 3)\n up=((0, 1, 0),), # (1, 3)\n device=\"cpu\",\n) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n This function returns a rotation and translation matrix\n to apply the 'Look At' transformation from world -> view coordinates [0].\n\n Args:\n dist: distance of the camera from the object\n elev: angle in degres or radians. This is the angle between the\n vector from the object to the camera, and the horizonal plane.\n azim: angle in degrees or radians. The vector from the object to\n the camera is projected onto a horizontal plane y = z = 0.\n azim is the angle between the projected vector and a\n reference vector at (1, 0, 0) on the reference plane.\n dist, elem and azim can be of shape (1), (N).\n degrees: boolean flag to indicate if the elevation and azimuth\n angles are specified in degrees or radians.\n eye: the position of the camera(s) in world coordinates. If eye is not\n None, it will overide the camera position derived from dist, elev, azim.\n up: the direction of the x axis in the world coordinate system.\n at: the position of the object(s) in world coordinates.\n eye, up and at can be of shape (1, 3) or (N, 3).\n\n Returns:\n 2-element tuple containing\n\n - **R**: the rotation to apply to the points to align with the camera.\n - **T**: the translation to apply to the points to align with the camera.\n\n References:\n [0] https://www.scratchapixel.com\n \"\"\"\n\n if eye is not None:\n broadcasted_args = convert_to_tensors_and_broadcast(\n eye, at, up, device=device)\n eye, at, up = broadcasted_args\n C = eye\n else:\n broadcasted_args = convert_to_tensors_and_broadcast(\n dist, elev, azim, at, up, device=device)\n dist, elev, azim, at, up = broadcasted_args\n C = camera_position_from_spherical_angles(\n dist, elev, azim, degrees=degrees, device=device)\n\n R = look_at_rotation(C, at, up, device=device)\n T = -torch.bmm(R.transpose(1, 2), C[:, :, None])[:, :, 0]\n return R, T\n" ]
[ [ "torch.zeros", "torch.nn.functional.normalize", "torch.cat", "torch.cos", "torch.stack", "numpy.zeros", "torch.tan", "torch.is_tensor", "torch.sin", "numpy.eye", "torch.ones", "torch.tensor", "torch.cross" ] ]
Childhoo/Chen_Matcher
[ "ca89a4774a083d10177186020c35f60c3e8b7b37" ]
[ "examples/hesaffnet/hesaffnet.py" ]
[ "#!/usr/bin/python2 -utt\n# -*- coding: utf-8 -*-\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport sys\nimport os\nimport time\n\nfrom PIL import Image\nfrom torch.autograd import Variable\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nfrom tqdm import tqdm\nimport math\nimport torch.nn.functional as F\n\nfrom copy import deepcopy\n\nfrom SparseImgRepresenter import ScaleSpaceAffinePatchExtractor\nfrom LAF import denormalizeLAFs, LAFs2ell, abc2A\nfrom Utils import line_prepender\nfrom architectures import AffNetFast\nUSE_CUDA = False\nth = 28.41 # default threshold for HessianAffine \nth = -1\ntry:\n input_img_fname = sys.argv[1]\n output_fname = sys.argv[2]\n nfeats = int(sys.argv[3])\nexcept:\n print(\"Wrong input format. Try python hesaffnet.py imgs/cat.png cat.txt 2000\")\n sys.exit(1)\n\nimg = Image.open(input_img_fname).convert('RGB')\nimg = np.mean(np.array(img), axis = 2)\n\nvar_image = torch.autograd.Variable(torch.from_numpy(img.astype(np.float32)), volatile = True)\nvar_image_reshape = var_image.view(1, 1, var_image.size(0),var_image.size(1))\n\n\nAffNetPix = AffNetFast(PS = 32)\nweightd_fname = '../../pretrained/AffNet.pth'\n\ncheckpoint = torch.load(weightd_fname)\nAffNetPix.load_state_dict(checkpoint['state_dict'])\n\nAffNetPix.eval()\n \nHA = ScaleSpaceAffinePatchExtractor( mrSize = 5.192, num_features = nfeats, border = 5, num_Baum_iters = 1, th = th, AffNet = AffNetPix)\nif USE_CUDA:\n HA = HA.cuda()\n var_image_reshape = var_image_reshape.cuda()\nwith torch.no_grad():\n LAFs, resp = HA(var_image_reshape)\nells = LAFs2ell(LAFs.data.cpu().numpy())\n\nnp.savetxt(output_fname, ells, delimiter=' ', fmt='%10.10f')\nline_prepender(output_fname, str(len(ells)))\nline_prepender(output_fname, '1.0')\n" ]
[ [ "numpy.array", "numpy.savetxt", "torch.no_grad", "torch.load" ] ]
GreenAIproject/ICT4S22
[ "e1af70be3240c3532b8c46889cfdb9ac4dbdaa21" ]
[ "src/energy_experiment.py" ]
[ "from text_classification import generate_model, model_validation\nfrom modify_dataset import modify_dataset_and_raw_data_with_percentage_size_to_keep\nfrom modify_dataset import modify_dataset_select_features\n\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.naive_bayes import ComplementNB\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, BaggingClassifier\nfrom sklearn.linear_model import LogisticRegression\n\nfrom joblib import load\nfrom text_preprocessing import _load_data\nfrom codecarbon import EmissionsTracker\nimport csv\nimport time\nimport random\n\nfrom click import progressbar\nimport click\n\nRESULTS_FILE = 'results.csv'\nRESULTS_HEADER = [\n 'algorithm',\n 'RQ',\n 'experiment_id',\n 'iteration',\n 'no_datapoints',\n 'no_features',\n 'preprocessing_energy(J)',\n 'preprocessing_time(s)',\n 'train_energy(J)',\n 'train_time(s)',\n 'predict_energy(J)',\n 'predict_time(s)',\n 'datatype',\n 'accuracy',\n 'precision',\n 'recall',\n 'f1',\n]\n\nresults = []\n\nraw_data = _load_data()\npreprocessed_data = load('output/preprocessed_data.joblib')\n\n\nNUMBER_OF_EXPERIMENTAL_RUNS = 30\nSLEEP_TIME = 5\nCLASSIFIERS = {\n 'SVM': SVC(class_weight=\"balanced\"),\n 'Decision Tree': DecisionTreeClassifier(),\n 'Naive Bayes': ComplementNB(),\n 'KNN': KNeighborsClassifier(),\n 'Random Forest': RandomForestClassifier(class_weight=\"balanced\"),\n 'AdaBoost': AdaBoostClassifier(),\n 'Bagging Classifier': BaggingClassifier()\n}\n\ndataset_size_percentages = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]\nfeatureset_size_percentages = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]\n\ndef energy_stats(energy_consumption_kwh, energy_tracker):\n \"\"\"Extract and compute energy metrics from codecarbon Energy Tracker.\n IMPORTANT: this function should be called right after stopping the tracker.\n \"\"\"\n energy_consumption_joules = energy_consumption_kwh * 1000 * 3600 #Joules\n duration = energy_tracker._last_measured_time - energy_tracker._start_time\n return energy_consumption_joules, duration\n\ndef write_header(filename):\n with open(filename, mode='w') as results_file:\n result_writer = csv.writer(results_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n result_writer.writerow(RESULTS_HEADER)\n\ndef write_result(result, filename):\n with open(filename, mode='a') as results_file:\n result_writer = csv.writer(results_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n result_writer.writerow(result)\n\n\ndef run_experiment(RQ, iteration, experiment_id, classifier_name, dataset_percentage, featureset_percentage):\n classifier = CLASSIFIERS[classifier_name]\n \n print(f\"Starting Experiment: {experiment_id},\"\n f\"\\n research question {RQ},\"\n f\"\\n iteration {iteration},\"\n f\"\\n classifier {classifier},\"\n f\"\\n dataset_percentage {dataset_percentage},\"\n f\"\\n featureset_percentage {featureset_percentage}\"\n )\n time.sleep(SLEEP_TIME)\n\n preprocessing_tracker = EmissionsTracker(save_to_file=False)\n #### START TIMED PREPROCESSING SECTION ####\n preprocessing_tracker.start()\n # Danger: dataset_percentage\n modified_preprocessed_data, modified_raw_data = modify_dataset_and_raw_data_with_percentage_size_to_keep(\n preprocessed_data, raw_data, dataset_percentage)\n\n # feature selection\n modified_preprocessed_data, modified_raw_data = modify_dataset_select_features(\n modified_preprocessed_data, modified_raw_data, featureset_percentage\n )\n preprocessing_energy_consumption_kwh = preprocessing_tracker.stop()\n #### STOP TIMED PREPROCESSING SECTION ####\n preprocessing_energy_consumption, preprocessing_duration = energy_stats(preprocessing_energy_consumption_kwh,\n preprocessing_tracker)\n\n training_tracker = EmissionsTracker(save_to_file=False)\n #### START TIMED TRAINING SECTION ####\n training_tracker.start()\n classifier, X_train, X_test, y_train, y_test, test_messages = generate_model(classifier, modified_raw_data,\n modified_preprocessed_data)\n training_energy_consumption_kwh = training_tracker.stop()\n #### STOP TIMED TRAINING SECTION ####\n training_energy_consumption, training_duration = energy_stats(training_energy_consumption_kwh, training_tracker)\n\n predict_tracker = EmissionsTracker(save_to_file=False)\n #### START TIMED PREDICTION SECTION ####\n predict_tracker.start()\n _, scores, report = model_validation(classifier, X_test, y_test)\n print (scores)\n print (report)\n predict_energy_consumption_kwh = predict_tracker.stop()\n #### STOP TIMED PREDICTION SECTION ####\n predict_energy_consumption, predict_duration = energy_stats(predict_energy_consumption_kwh, predict_tracker)\n\n number_of_datapoints = len(y_train)\n number_of_features = X_train.shape[1]\n\n print(f\"Experiment ID {experiment_id}\")\n print(f\"Run {iteration}\")\n print(f\" Energy Consumption: {training_energy_consumption} Joules\")\n print(f\" Duration: {training_duration} seconds\")\n\n result_row = [\n classifier_name,\n RQ,\n experiment_id,\n iteration,\n number_of_datapoints,\n number_of_features,\n preprocessing_energy_consumption,\n preprocessing_duration,\n training_energy_consumption,\n training_duration,\n predict_energy_consumption,\n predict_duration,\n \"float64\", #datatype\n scores['accuracy'],\n scores['precision'],\n scores['recall'],\n scores['f1'],\n ]\n results.append(result_row)\n write_result(result_row, RESULTS_FILE)\n\ndef collect_previous_experiments(filename):\n try:\n with open(filename, mode='r') as results_file:\n result_reader = csv.DictReader(results_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n return list(result_reader)\n except FileNotFoundError:\n return None\n\ndef _compute_experiment_hash(exp):\n return f\"{exp['experiment_id']}_{exp['iteration']}\"\n\ndef run_experiment_batch(experiments):\n previous_experiments = collect_previous_experiments(RESULTS_FILE)\n if previous_experiments:\n previous_ids = [_compute_experiment_hash(exp) for exp in previous_experiments]\n print(f\"There were {len(previous_ids)} experiments in {RESULTS_FILE}.\"\n f\" Skipping ids {previous_ids}.\")\n experiments = [exp for exp in experiments if _compute_experiment_hash(exp) not in previous_ids]\n else:\n write_header('results.csv')\n print(f\"Remaining experiments: {len(experiments)}.\")\n random.shuffle(experiments)\n with progressbar(experiments) as bar:\n for experiment in bar:\n print(\"\\n\")\n run_experiment(**experiment)\n\ndef fibonacci(n):\n if n<= 0:\n print(\"Incorrect input\")\n # First Fibonacci number is 0\n elif n == 1:\n return 0\n # Second Fibonacci number is 1\n elif n == 2:\n return 1\n else:\n return fibonacci(n-1)+fibonacci(n-2)\n\n# default values\nfeatureset_percentage = 100\ndataset_percentage = 100\n# initial values\nexperiment_id=0\nexperiments = []\n\n# run classification experiment\nfor classifier_name in CLASSIFIERS.keys():\n RQ=\"2.1\"\n for dataset_percentage in dataset_size_percentages:\n experiment_id += 1\n for iteration in range(NUMBER_OF_EXPERIMENTAL_RUNS):\n experiments.append({\n \"RQ\": RQ,\n \"iteration\": iteration,\n \"experiment_id\": experiment_id,\n \"classifier_name\": classifier_name,\n \"dataset_percentage\": dataset_percentage,\n \"featureset_percentage\": featureset_percentage,\n })\n \n dataset_percentage = 100\n\n RQ=\"2.2\"\n for featureset_percentage in featureset_size_percentages:\n experiment_id += 1\n for iteration in range(NUMBER_OF_EXPERIMENTAL_RUNS):\n experiments.append({\n \"RQ\": RQ,\n \"iteration\": iteration,\n \"experiment_id\": experiment_id,\n \"classifier_name\": classifier_name,\n \"dataset_percentage\": dataset_percentage,\n \"featureset_percentage\": featureset_percentage,\n })\n featureset_percentage = 100\n\nfibonacci(35)\nrun_experiment_batch(experiments)\n" ]
[ [ "sklearn.ensemble.BaggingClassifier", "sklearn.ensemble.AdaBoostClassifier", "sklearn.naive_bayes.ComplementNB", "sklearn.ensemble.RandomForestClassifier", "sklearn.neighbors.KNeighborsClassifier", "sklearn.svm.SVC", "sklearn.tree.DecisionTreeClassifier" ] ]
ssrivastava836/visualizing-the-weather-data-set
[ "0114300f0240533c863d9c04ea0e8c5f895b4df5" ]
[ "code.py" ]
[ "# --------------\n# --------------\r\n# Import the required Libraries\r\nfrom matplotlib import pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nimport calendar\r\nimport seaborn as sns\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\n\r\n# Generate a line chart that visualizes the readings in the months\r\n\r\ndef line_chart(df,period,col):\r\n \"\"\" A line chart that visualizes the readings in the months\r\n This function accepts the dataframe df ,period(day/month/year) and col(feature), which plots the aggregated value of the feature based on the periods. Ensure the period labels are properly named.\r\n Keyword arguments:\r\n df - Pandas dataframe which has the data.\r\n period - Period of time over which you want to aggregate the data\r\n col - Feature of the dataframe\r\n \"\"\"\r\n if period == \"Month\":\r\n data = df.groupby(df.index.month).mean()\r\n elif period == \"Day\":\r\n data = df.groupby(df.index.day).mean()\r\n elif period == \"Year\":\r\n data = df.groupby(df.index.year).mean()\r\n\r\n calendar_months = calendar.month_name[1:]\r\n x_series = calendar_months\r\n y_series = data[col]\r\n\r\n plt.plot(x_series, y_series)\r\n plt.title('Temperature Trend, 2012')\r\n plt.xlabel(period)\r\n plt.xticks(rotation=90)\r\n plt.ylabel(col)\r\n\r\n plt.show()\r\n\r\n\r\n\r\n# Function to perform univariate analysis of categorical columns\r\ndef plot_categorical_columns(df):\r\n \"\"\" Univariate analysis of categorical columns\r\n This function accepts the dataframe df which analyzes all the variable in the data and performs the univariate analysis using bar plot.\r\n Keyword arguments:\r\n df - Pandas dataframe which has the data.\r\n \"\"\"\r\n categorical_columns = df.select_dtypes(include=['object']).columns\r\n\r\n for i in range(0,len(categorical_columns),2):\r\n if len(categorical_columns) > i+1:\r\n\r\n plt.figure(figsize=(10,4))\r\n plt.subplot(121)\r\n df[categorical_columns[i]].value_counts(normalize=True).plot(kind='bar')\r\n plt.title(categorical_columns[i])\r\n plt.subplot(122) \r\n df[categorical_columns[i+1]].value_counts(normalize=True).plot(kind='bar')\r\n plt.title(categorical_columns[i+1])\r\n plt.tight_layout()\r\n plt.show()\r\n\r\n else:\r\n df[categorical_columns[i]].value_counts(normalize=True).plot(kind='bar')\r\n plt.title(categorical_columns[i])\r\n plt.show()\r\n\r\n\r\n\r\n# Function to plot continous plots\r\ndef plot_cont(df,plt_typ):\r\n \"\"\" Univariate analysis of Numerical columns\r\n This function accepts the dataframe df, plt_type(boxplot/distplot) which analyzes all the variable in the data and performs the univariate analysis using boxplot or distplot plot.\r\n Keyword arguments:\r\n df - Pandas dataframe which has the data.\r\n plt_type - type of plot through which you want to visualize the data\r\n \"\"\"\r\n numeric_columns = df.select_dtypes(include=['number']).columns.tolist()\r\n df = df[numeric_columns]\r\n\r\n for i in range(0,len(numeric_columns),2):\r\n if len(numeric_columns) > i+1:\r\n plt.figure(figsize=(10,4))\r\n plt.subplot(121)\r\n\r\n if plt_typ == \"boxplot\": \r\n sns.boxplot(df[numeric_columns[i]])\r\n plt.subplot(122) \r\n sns.boxplot(df[numeric_columns[i+1]])\r\n elif plt_typ == \"distplot\":\r\n sns.distplot(df[numeric_columns[i]])\r\n plt.subplot(122) \r\n sns.distplot(df[numeric_columns[i+1]]) \r\n else:\r\n print(\"Pass either distplot/boxplot\")\r\n\r\n plt.tight_layout()\r\n plt.show()\r\n\r\n\r\n# Function to plot grouped values based on the feature\r\ndef group_values(df,col1,agg1,col2):\r\n \"\"\" Agrregate values by grouping\r\n This function accepts a dataframe, 2 column(feature) and aggregated function(agg1) which groupby the dataframe based on the column and plots the bar plot.\r\n Keyword arguments:\r\n df - Pandas dataframe which has the data.\r\n col1 - Feature of the dataframe on which values will be aggregated.\r\n agg1 - Dictionary of aggregate functions with feature as the key and func as the value\r\n col2 - Feature of the dataframe to be plot against grouped data.\r\n Returns:\r\n grouping - Dataframe with all columns on which it is grouped on.\r\n \"\"\"\r\n aggregate = {'mean':np.mean,'max':np.max,'min':np.min}\r\n grouping = df.groupby(col1).agg(aggregate[agg1])\r\n plt.figure(figsize=(10,4))\r\n plt.ylabel(col2)\r\n grouping[col2].plot(kind=\"bar\")\r\n plt.show()\r\n\r\n\r\n# Read the Data and pass the parameter as parse_dates=True, index_col='Date/Time'\r\nweather_df = pd.read_csv(path, parse_dates=True, index_col='Date/Time')\r\nprint(weather_df.head(5))\r\nprint(weather_df.shape)\r\n\r\n\r\n# Lets try to generate a line chart that visualizes the temperature readings in the months.\r\n# Call the function line_chart() with the appropriate parameters.\r\nline_chart(weather_df,\"Month\",\"Temp (C)\") \r\n\r\n\r\n# Now let's perform the univariate analysis of categorical features.\r\n# Call the \"function plot_categorical_columns()\" with appropriate parameters.\r\nplot_categorical_columns(weather_df)\r\n\r\n\r\n# Let's plot the Univariate analysis of Numerical columns.\r\n# Call the function \"plot_cont()\" with the appropriate parameters to plot distplot\r\nplot_cont(weather_df,\"distplot\")\r\n\r\n\r\n# Call the function \"plot_cont()\" with the appropriate parameters to plot boxplot\r\nplot_cont(weather_df,\"boxplot\")\r\n\r\n# Groupby the data by Weather and plot the graph of the mean visibility during different weathers. Call the function group_values to plot the graph.\r\n# Feel free to try on diffrent features and aggregated functions like max, min.\r\ngroup_values(weather_df,\"Weather\",\"mean\",\"Visibility (km)\")\r\n\n\n\n" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show", "pandas.read_csv", "matplotlib.pyplot.xticks", "matplotlib.pyplot.subplot" ] ]
nkjulia/magenta
[ "063d320d59276a15afa0f8a3a8d386ad74594070", "063d320d59276a15afa0f8a3a8d386ad74594070", "063d320d59276a15afa0f8a3a8d386ad74594070" ]
[ "magenta/models/improv_rnn/improv_rnn_config_flags.py", "magenta/models/onsets_frames_transcription/audio_label_data_utils_test.py", "magenta/models/onsets_frames_transcription/melspec_input.py" ]
[ "# Copyright 2021 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Provides a class, defaults, and utils for improv RNN model configuration.\"\"\"\n\nfrom magenta.models.improv_rnn import improv_rnn_model\nimport tensorflow.compat.v1 as tf\n\nFLAGS = tf.app.flags.FLAGS\ntf.app.flags.DEFINE_string(\n 'config',\n None,\n \"Which config to use. Must be one of 'basic_improv', 'attention_improv', \"\n \"or 'chord_pitches_improv'.\")\ntf.app.flags.DEFINE_string(\n 'generator_id',\n None,\n 'A unique ID for the generator, overriding the default.')\ntf.app.flags.DEFINE_string(\n 'generator_description',\n None,\n 'A description of the generator, overriding the default.')\ntf.app.flags.DEFINE_string(\n 'hparams', '',\n 'Comma-separated list of `name=value` pairs. For each pair, the value of '\n 'the hyperparameter named `name` is set to `value`. This mapping is merged '\n 'with the default hyperparameters.')\n\n\nclass ImprovRnnConfigError(Exception):\n pass\n\n\ndef config_from_flags():\n \"\"\"Parses flags and returns the appropriate ImprovRnnConfig.\n\n Returns:\n The appropriate ImprovRnnConfig based on the supplied flags.\n\n Raises:\n ImprovRnnConfigError: When an invalid config is supplied.\n \"\"\"\n if FLAGS.config not in improv_rnn_model.default_configs:\n raise ImprovRnnConfigError(\n '`--config` must be one of %s. Got %s.' % (\n improv_rnn_model.default_configs.keys(), FLAGS.config))\n config = improv_rnn_model.default_configs[FLAGS.config]\n config.hparams.parse(FLAGS.hparams)\n if FLAGS.generator_id is not None:\n config.details.id = FLAGS.generator_id\n if FLAGS.generator_description is not None:\n config.details.description = FLAGS.generator_description\n return config\n", "# Copyright 2021 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test for audio_label_data_utils.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom magenta.models.onsets_frames_transcription import audio_label_data_utils\n\nfrom note_seq import audio_io\nfrom note_seq import constants\nfrom note_seq import testing_lib\nfrom note_seq.protobuf import music_pb2\n\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\nSAMPLE_RATE = 16000\n\n\nclass SplitAudioTest(tf.test.TestCase):\n\n def _CreateSyntheticSequence(self):\n seq = music_pb2.NoteSequence(total_time=10)\n testing_lib.add_track_to_sequence(seq, 0, [(50, 20, 0, 5)])\n testing_lib.add_track_to_sequence(seq, 0, [(50, 80, 5, 10)])\n return seq\n\n def _CreateSyntheticExample(self):\n sequence = self._CreateSyntheticSequence()\n wav_samples = np.zeros(9 * SAMPLE_RATE, np.float32)\n wav_data = audio_io.samples_to_wav_data(wav_samples, SAMPLE_RATE)\n return wav_data, sequence\n\n def testSplitAudioLabelData(self):\n wav_data, sequence = self._CreateSyntheticExample()\n records = audio_label_data_utils.process_record(\n wav_data, sequence, 'test', sample_rate=SAMPLE_RATE)\n\n for record in records:\n audio = record.features.feature['audio'].bytes_list.value[0]\n velocity_range = music_pb2.VelocityRange.FromString(\n record.features.feature['velocity_range'].bytes_list.value[0])\n note_sequence = music_pb2.NoteSequence.FromString(\n record.features.feature['sequence'].bytes_list.value[0])\n\n expected_samples = np.zeros(10 * SAMPLE_RATE)\n np.testing.assert_array_equal(\n expected_samples,\n audio_io.wav_data_to_samples(audio, sample_rate=SAMPLE_RATE))\n self.assertEqual(velocity_range.min, 20)\n self.assertEqual(velocity_range.max, 80)\n self.assertEqual(note_sequence.notes[0].velocity, 20)\n self.assertEqual(note_sequence.notes[0].end_time, 5.)\n self.assertEqual(note_sequence.notes[1].velocity, 80)\n self.assertEqual(note_sequence.notes[1].end_time, 10.)\n\n def testSplitMidi(self):\n sequence = music_pb2.NoteSequence()\n sequence.notes.add(pitch=60, start_time=1.0, end_time=2.9)\n sequence.notes.add(pitch=60, start_time=8.0, end_time=11.0)\n sequence.notes.add(pitch=60, start_time=14.0, end_time=17.0)\n sequence.notes.add(pitch=60, start_time=20.0, end_time=23.0)\n sequence.total_time = 25.\n\n sample_rate = 160\n samples = np.zeros(sample_rate * int(sequence.total_time))\n splits = audio_label_data_utils.find_split_points(\n sequence, samples, sample_rate, 0, 3)\n\n self.assertEqual(splits, [0., 3., 6., 9., 12., 15., 18., 21., 24., 25.])\n\n samples[int(8.5 * sample_rate)] = 1\n samples[int(8.5 * sample_rate) + 1] = -1\n splits = audio_label_data_utils.find_split_points(\n sequence, samples, sample_rate, 0, 3)\n\n self.assertEqual(splits, [\n 0.0, 3.0, 6.0, 8.50625, 11.50625, 14.50625, 17.50625, 20.50625,\n 23.50625, 25.\n ])\n\n\nclass MixSequencesTest(tf.test.TestCase):\n\n def testMixSequences(self):\n sample_rate = 10\n\n sequence1 = music_pb2.NoteSequence()\n sequence1.notes.add(pitch=60, start_time=0.5, end_time=1.0, velocity=90)\n sequence1.notes.add(pitch=62, start_time=1.0, end_time=2.0, velocity=90)\n sequence1.total_time = 2.0\n\n samples1 = np.linspace(0, 1, int(sample_rate * sequence1.total_time))\n\n sequence2 = music_pb2.NoteSequence()\n sequence2.notes.add(pitch=64, start_time=0.5, end_time=1.0, velocity=90)\n sequence2.total_time = 1.0\n\n samples2 = np.linspace(0, 1, int(sample_rate * sequence2.total_time))\n\n mixed_samples, mixed_sequence = audio_label_data_utils.mix_sequences(\n [samples1, samples2], sample_rate, [sequence1, sequence2])\n\n expected_sequence = music_pb2.NoteSequence()\n expected_sequence.ticks_per_quarter = constants.STANDARD_PPQ\n expected_sequence.notes.add(\n pitch=60, start_time=0.5, end_time=1.0, velocity=127)\n expected_sequence.notes.add(\n pitch=62, start_time=1.0, end_time=2.0, velocity=127)\n expected_sequence.notes.add(\n pitch=64, start_time=0.5, end_time=1.0, velocity=127)\n expected_sequence.notes.add(\n pitch=64, start_time=1.5, end_time=2.0, velocity=127)\n expected_sequence.total_time = 2.0\n\n self.assertProtoEquals(expected_sequence, mixed_sequence)\n\n expected_samples = np.concatenate([samples2, samples2]) * .5 + samples1 * .5\n np.testing.assert_array_equal(expected_samples, mixed_samples)\n\n def testMixSequencesLongerNoteSequence(self):\n sample_rate = 10\n\n sequence1 = music_pb2.NoteSequence()\n sequence1.notes.add(pitch=60, start_time=0.5, end_time=1.0, velocity=90)\n sequence1.notes.add(pitch=62, start_time=1.0, end_time=2.0, velocity=90)\n sequence1.total_time = 2.0\n\n # samples1 will be .1 seconds shorter than sequence1\n samples1 = np.linspace(0, 1, int(sample_rate * (sequence1.total_time - .1)))\n\n sequence2 = music_pb2.NoteSequence()\n sequence2.notes.add(pitch=64, start_time=0.5, end_time=1.0, velocity=90)\n sequence2.total_time = 1.0\n\n samples2 = np.linspace(0, 1, int(sample_rate * sequence2.total_time))\n\n mixed_samples, mixed_sequence = audio_label_data_utils.mix_sequences(\n [samples1, samples2], sample_rate, [sequence1, sequence2])\n\n expected_sequence = music_pb2.NoteSequence()\n expected_sequence.ticks_per_quarter = constants.STANDARD_PPQ\n expected_sequence.notes.add(\n pitch=60, start_time=0.5, end_time=1.0, velocity=127)\n expected_sequence.notes.add(\n pitch=62, start_time=1.0, end_time=2.0, velocity=127)\n expected_sequence.notes.add(\n pitch=64, start_time=0.5, end_time=1.0, velocity=127)\n expected_sequence.notes.add(\n pitch=64, start_time=1.5, end_time=2.0, velocity=127)\n expected_sequence.total_time = 2.0\n\n self.assertProtoEquals(expected_sequence, mixed_sequence)\n\n # We expect samples1 to have 2 samples of padding and samples2 to be\n # repeated 1 time fully and once with a single sample.\n expected_samples = (\n np.concatenate([samples2, samples2, [samples2[0]]]) * .5 +\n np.concatenate([samples1, [0, 0]]) * .5)\n np.testing.assert_array_equal(expected_samples, mixed_samples)\n\n def testMixSequencesWithSustain(self):\n sample_rate = 10\n\n sequence1 = music_pb2.NoteSequence()\n sequence1.notes.add(pitch=60, start_time=0.5, end_time=0.6, velocity=90)\n sequence1.notes.add(pitch=62, start_time=1.0, end_time=2.0, velocity=90)\n sequence1.total_time = 2.0\n testing_lib.add_control_changes_to_sequence(\n sequence1, 0, [(0.0, 64, 127), (1.0, 64, 0)])\n\n samples1 = np.linspace(0, 1, int(sample_rate * sequence1.total_time))\n\n sequence2 = music_pb2.NoteSequence()\n sequence2.notes.add(pitch=64, start_time=0.5, end_time=0.6, velocity=90)\n sequence2.total_time = 1.0\n testing_lib.add_control_changes_to_sequence(\n sequence2, 0, [(0.0, 64, 127), (0.9, 64, 0)])\n\n samples2 = np.linspace(0, 1, int(sample_rate * sequence2.total_time))\n\n mixed_samples, mixed_sequence = audio_label_data_utils.mix_sequences(\n [samples1, samples2], sample_rate, [sequence1, sequence2])\n\n expected_sequence = music_pb2.NoteSequence()\n expected_sequence.ticks_per_quarter = constants.STANDARD_PPQ\n expected_sequence.notes.add(\n pitch=60, start_time=0.5, end_time=1.0, velocity=127)\n expected_sequence.notes.add(\n pitch=62, start_time=1.0, end_time=2.0, velocity=127)\n expected_sequence.notes.add(\n pitch=64, start_time=0.5, end_time=0.9, velocity=127)\n expected_sequence.notes.add(\n pitch=64, start_time=1.5, end_time=1.9, velocity=127)\n expected_sequence.total_time = 2.0\n\n self.assertProtoEquals(expected_sequence, mixed_sequence)\n\n expected_samples = np.concatenate([samples2, samples2]) * .5 + samples1 * .5\n np.testing.assert_array_equal(expected_samples, mixed_samples)\n\n def testMixSequencesTotalTime(self):\n sample_rate = 10\n\n sequence1 = music_pb2.NoteSequence()\n sequence1.notes.add(pitch=60, start_time=0.5, end_time=1.0, velocity=90)\n sequence1.notes.add(pitch=62, start_time=1.0, end_time=1.5, velocity=90)\n sequence1.total_time = 1.5\n\n samples1 = np.linspace(0, 1, int(sample_rate * 2))\n\n sequence2 = music_pb2.NoteSequence()\n sequence2.notes.add(pitch=64, start_time=0.5, end_time=0.9, velocity=90)\n sequence2.total_time = 0.9\n\n samples2 = np.linspace(0, 1, int(sample_rate * 1))\n\n mixed_samples, mixed_sequence = audio_label_data_utils.mix_sequences(\n [samples1, samples2], sample_rate, [sequence1, sequence2])\n\n expected_sequence = music_pb2.NoteSequence()\n expected_sequence.ticks_per_quarter = constants.STANDARD_PPQ\n expected_sequence.notes.add(\n pitch=60, start_time=0.5, end_time=1.0, velocity=127)\n expected_sequence.notes.add(\n pitch=62, start_time=1.0, end_time=1.5, velocity=127)\n expected_sequence.notes.add(\n pitch=64, start_time=0.5, end_time=0.9, velocity=127)\n expected_sequence.notes.add(\n pitch=64, start_time=1.5, end_time=1.9, velocity=127)\n\n # Expected time is 1.9 because the sequences are repeated according to the\n # length of their associated audio. So sequence1 is not repeated at all\n # (audio is 2 seconds) and sequence2 is repeated once after shifting all the\n # notes by the audio length of 1 second. The final total_time is left as is\n # after the last repeat, so it ends up being 1 + .9 seconds.\n expected_sequence.total_time = 1.9\n\n self.assertProtoEquals(expected_sequence, mixed_sequence)\n\n expected_samples = np.concatenate([samples2, samples2]) * .5 + samples1 * .5\n np.testing.assert_array_equal(expected_samples, mixed_samples)\n\n def testMixSequencesNormalize(self):\n sample_rate = 10\n\n sequence1 = music_pb2.NoteSequence()\n sequence1.notes.add(pitch=60, start_time=0.5, end_time=1.0, velocity=32)\n sequence1.notes.add(pitch=62, start_time=1.0, end_time=2.0, velocity=64)\n sequence1.total_time = 2.0\n\n samples1 = np.linspace(0, .5, int(sample_rate * sequence1.total_time))\n\n sequence2 = music_pb2.NoteSequence()\n sequence2.notes.add(pitch=64, start_time=0.5, end_time=1.0, velocity=90)\n sequence2.total_time = 1.0\n\n samples2 = np.linspace(0, 1, int(sample_rate * sequence2.total_time))\n\n mixed_samples, mixed_sequence = audio_label_data_utils.mix_sequences(\n [samples1, samples2], sample_rate, [sequence1, sequence2])\n\n expected_sequence = music_pb2.NoteSequence()\n expected_sequence.ticks_per_quarter = constants.STANDARD_PPQ\n expected_sequence.notes.add(\n pitch=60, start_time=0.5, end_time=1.0, velocity=63)\n expected_sequence.notes.add(\n pitch=62, start_time=1.0, end_time=2.0, velocity=127)\n expected_sequence.notes.add(\n pitch=64, start_time=0.5, end_time=1.0, velocity=127)\n expected_sequence.notes.add(\n pitch=64, start_time=1.5, end_time=2.0, velocity=127)\n expected_sequence.total_time = 2.0\n\n self.assertProtoEquals(expected_sequence, mixed_sequence)\n\n samples1_normalized = samples1 * 2 # previous max was .5\n expected_samples = (np.concatenate([samples2, samples2]) * .5 +\n samples1_normalized * .5)\n np.testing.assert_array_equal(expected_samples, mixed_samples)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2021 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Create TF graphs for calculating log-mel-spectral features.\n\nNOTE: This code is very experimental and will likely change, both in interface\nand what it outputs.\n\nThe single published method is build_mel_calculation_graph, which\nwill assemble a TF graph from a provided waveform input vector\nthrough to a (num_frames, frame_width, num_mel_bins) tensor of log-\ntransformed mel spectrogram patches, suitable for feeding the input\nto a typical classifier. All the mel calculation parameters\nare available as options, but default to their standard values\n(e.g. frame_width=96, frame_hop=10). The input waveform can have\nsize (None,), meaning it will be specified at run-time.\n\nwith tflite_compatible=True, the returned graph is constructed only\nfrom tflite-compatible ops (i.e., it uses matmul for the DFT, and\nexplicitly unrolled framing). In this case, the input waveform tensor\nmust have an explicit size at graph-building time.\n\"\"\"\n\nimport fractions\n\nimport math\n\nfrom magenta.models.onsets_frames_transcription import mfcc_mel\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\n\ndef _stft_magnitude_full_tf(waveform_input, window_length_samples,\n hop_length_samples, fft_length):\n \"\"\"Calculate STFT magnitude (spectrogram) using tf.signal ops.\"\"\"\n stft_magnitude = tf.abs(\n tf.signal.stft(\n waveform_input,\n frame_length=window_length_samples,\n frame_step=hop_length_samples,\n fft_length=fft_length),\n name='magnitude_spectrogram')\n return stft_magnitude\n\n\ndef _dft_matrix(dft_length):\n \"\"\"Calculate the full DFT matrix in numpy.\"\"\"\n omega = (0 + 1j) * 2.0 * np.pi / float(dft_length)\n # Don't include 1/sqrt(N) scaling, tf.signal.rfft doesn't apply it.\n return np.exp(omega * np.outer(np.arange(dft_length), np.arange(dft_length)))\n\n\ndef _naive_rdft(signal_tensor, fft_length):\n \"\"\"Implement real-input Fourier Transform by matmul.\"\"\"\n # We are right-multiplying by the DFT matrix, and we are keeping\n # only the first half (\"positive frequencies\").\n # So discard the second half of rows, but transpose the array for\n # right-multiplication.\n # The DFT matrix is symmetric, so we could have done it more\n # directly, but this reflects our intention better.\n complex_dft_matrix_kept_values = _dft_matrix(fft_length)[:(\n fft_length // 2 + 1), :].transpose()\n real_dft_tensor = tf.constant(\n np.real(complex_dft_matrix_kept_values).astype(np.float32),\n name='real_dft_matrix')\n imag_dft_tensor = tf.constant(\n np.imag(complex_dft_matrix_kept_values).astype(np.float32),\n name='imaginary_dft_matrix')\n signal_frame_length = int(signal_tensor.shape[-1])\n half_pad = (fft_length - signal_frame_length) // 2\n pad_values = tf.concat([\n tf.zeros([tf.rank(signal_tensor) - 1, 2], tf.int32),\n [[half_pad, fft_length - signal_frame_length - half_pad]]\n ],\n axis=0)\n padded_signal = tf.pad(signal_tensor, pad_values)\n result_real_part = tf.matmul(padded_signal, real_dft_tensor)\n result_imag_part = tf.matmul(padded_signal, imag_dft_tensor)\n return result_real_part, result_imag_part\n\n\ndef _fixed_frame(signal, frame_length, frame_step, first_axis=False):\n \"\"\"tflite-compatible tf.signal.frame for fixed-size input.\n\n Args:\n signal: Tensor containing signal(s).\n frame_length: Number of samples to put in each frame.\n frame_step: Sample advance between successive frames.\n first_axis: If true, framing is applied to first axis of tensor; otherwise,\n it is applied to last axis.\n\n Returns:\n A new tensor where the last axis (or first, if first_axis) of input\n signal has been replaced by a (num_frames, frame_length) array of individual\n frames where each frame is drawn frame_step samples after the previous one.\n\n Raises:\n ValueError: if signal has an undefined axis length. This routine only\n supports framing of signals whose shape is fixed at graph-build time.\n \"\"\"\n signal_shape = signal.shape.as_list()\n if first_axis:\n length_samples = signal_shape[0]\n else:\n length_samples = signal_shape[-1]\n if length_samples <= 0:\n raise ValueError('fixed framing requires predefined constant signal length')\n num_frames = max(0, 1 + (length_samples - frame_length) // frame_step)\n if first_axis:\n inner_dimensions = signal_shape[1:]\n result_shape = [num_frames, frame_length] + inner_dimensions\n gather_axis = 0\n else:\n outer_dimensions = signal_shape[:-1]\n result_shape = outer_dimensions + [num_frames, frame_length]\n # Currently tflite's gather only supports axis==0, but that may still\n # work if we want the last of 1 axes.\n gather_axis = len(outer_dimensions)\n\n subframe_length = fractions.gcd(frame_length, frame_step) # pylint: disable=deprecated-method\n subframes_per_frame = frame_length // subframe_length\n subframes_per_hop = frame_step // subframe_length\n num_subframes = length_samples // subframe_length\n\n if first_axis:\n trimmed_input_size = [num_subframes * subframe_length] + inner_dimensions\n subframe_shape = [num_subframes, subframe_length] + inner_dimensions\n else:\n trimmed_input_size = outer_dimensions + [num_subframes * subframe_length]\n subframe_shape = outer_dimensions + [num_subframes, subframe_length]\n subframes = tf.reshape(\n tf.slice(\n signal,\n begin=np.zeros(len(signal_shape), np.int32),\n size=trimmed_input_size), subframe_shape)\n\n # frame_selector is a [num_frames, subframes_per_frame] tensor\n # that indexes into the appropriate frame in subframes. For example:\n # [[0, 0, 0, 0], [2, 2, 2, 2], [4, 4, 4, 4]]\n frame_selector = np.reshape(\n np.arange(num_frames) * subframes_per_hop, [num_frames, 1])\n\n # subframe_selector is a [num_frames, subframes_per_frame] tensor\n # that indexes into the appropriate subframe within a frame. For example:\n # [[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]]\n subframe_selector = np.reshape(\n np.arange(subframes_per_frame), [1, subframes_per_frame])\n\n # Adding the 2 selector tensors together produces a [num_frames,\n # subframes_per_frame] tensor of indices to use with tf.gather to select\n # subframes from subframes. We then reshape the inner-most subframes_per_frame\n # dimension to stitch the subframes together into frames. For example:\n # [[0, 1, 2, 3], [2, 3, 4, 5], [4, 5, 6, 7]].\n selector = frame_selector + subframe_selector\n frames = tf.reshape(\n tf.gather(subframes, selector.astype(np.int32), axis=gather_axis),\n result_shape)\n return frames\n\n\ndef _stft_tflite(signal, frame_length, frame_step, fft_length):\n \"\"\"tflite-compatible implementation of tf.signal.stft.\n\n Compute the short-time Fourier transform of a 1D input while avoiding tf ops\n that are not currently supported in tflite (Rfft, Range, SplitV).\n fft_length must be fixed. A Hann window is of frame_length is always\n applied.\n\n Since fixed (precomputed) framing must be used, signal.shape[-1] must be a\n specific value (so \"?\"/None is not supported).\n\n Args:\n signal: 1D tensor containing the time-domain waveform to be transformed.\n frame_length: int, the number of points in each Fourier frame.\n frame_step: int, the number of samples to advance between successive frames.\n fft_length: int, the size of the Fourier transform to apply.\n\n Returns:\n Two (num_frames, fft_length) tensors containing the real and imaginary parts\n of the short-time Fourier transform of the input signal.\n \"\"\"\n # Make the window be shape (1, frame_length) instead of just frame_length\n # in an effort to help the tflite broadcast logic.\n window = tf.reshape(\n tf.constant(\n (0.5 - 0.5 * np.cos(2 * np.pi * np.arange(0, 1.0, 1.0 / frame_length))\n ).astype(np.float32),\n name='window'), [1, frame_length])\n framed_signal = _fixed_frame(\n signal, frame_length, frame_step, first_axis=False)\n framed_signal *= window\n real_spectrogram, imag_spectrogram = _naive_rdft(framed_signal, fft_length)\n return real_spectrogram, imag_spectrogram\n\n\ndef _stft_magnitude_tflite(waveform_input, window_length_samples,\n hop_length_samples, fft_length):\n \"\"\"Calculate spectrogram avoiding tflite incompatible ops.\"\"\"\n real_stft, imag_stft = _stft_tflite(\n waveform_input,\n frame_length=window_length_samples,\n frame_step=hop_length_samples,\n fft_length=fft_length)\n stft_magnitude = tf.sqrt(\n tf.add(real_stft * real_stft, imag_stft * imag_stft),\n name='magnitude_spectrogram')\n return stft_magnitude\n\n\ndef build_mel_calculation_graph(waveform_input,\n sample_rate=16000,\n window_length_seconds=0.025,\n hop_length_seconds=0.010,\n num_mel_bins=64,\n lower_edge_hz=125.0,\n upper_edge_hz=7500.0,\n frame_width=96,\n frame_hop=10,\n tflite_compatible=False):\n \"\"\"Build a TF graph to go from waveform to mel spectrum patches.\n\n Args:\n waveform_input: 1D Tensor which will be filled with 16 kHz waveform as\n tf.float32.\n sample_rate: Scalar giving the sampling rate of the waveform. Only 16 kHz\n is acceptable at present.\n window_length_seconds: Duration of window used for each Fourier transform.\n hop_length_seconds: Time shift between successive analysis time frames.\n num_mel_bins: The number of mel frequency bins to calculate.\n lower_edge_hz: Frequency boundary at bottom edge of mel mapping.\n upper_edge_hz: Frequency boundary at top edge of mel mapping.\n frame_width: The number of successive time frames to include in each patch.\n frame_hop: The frame advance between successive patches.\n tflite_compatible: Avoid ops not currently supported in tflite.\n\n Returns:\n Tensor holding [num_patches, frame_width, num_mel_bins] log-mel-spectrogram\n patches.\n \"\"\"\n # `waveform_input` is a [?] vector as a tensor.\n # `magnitude_spectrogram` is a [?, fft_length/2 + 1] tensor of spectrograms.\n # Derive the dependent parameters.\n window_length_samples = int(round(window_length_seconds * sample_rate))\n hop_length_samples = int(round(hop_length_seconds * sample_rate))\n fft_length = 2**int(\n math.ceil(math.log(window_length_samples) / math.log(2.0)))\n if tflite_compatible:\n magnitude_spectrogram = _stft_magnitude_tflite(\n waveform_input, window_length_samples, hop_length_samples, fft_length)\n else:\n magnitude_spectrogram = _stft_magnitude_full_tf(\n waveform_input, window_length_samples, hop_length_samples, fft_length)\n\n # Warp the linear-scale, magnitude spectrograms into the mel-scale.\n num_spectrogram_bins = int(magnitude_spectrogram.shape[-1])\n if tflite_compatible:\n linear_to_mel_weight_matrix = tf.constant(\n mfcc_mel.SpectrogramToMelMatrix(num_mel_bins, num_spectrogram_bins,\n sample_rate, lower_edge_hz,\n upper_edge_hz).astype(np.float32),\n name='linear_to_mel_matrix')\n else:\n # In full tf, the mel weight matrix is calculated at run time within the\n # TF graph. This avoids including a matrix of 64 x 256 float values (i.e.,\n # 100 kB or more, depending on the representation) in the exported graph.\n linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(\n num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hz,\n upper_edge_hz)\n\n mel_spectrogram = tf.matmul(\n magnitude_spectrogram,\n linear_to_mel_weight_matrix,\n name='mel_spectrogram')\n log_offset = 0.001\n log_mel_spectrogram = tf.log(\n mel_spectrogram + log_offset, name='log_mel_spectrogram')\n # log_mel_spectrogram is a [?, num_mel_bins] gram.\n if tflite_compatible:\n features = _fixed_frame(\n log_mel_spectrogram,\n frame_length=frame_width,\n frame_step=frame_hop,\n first_axis=True)\n else:\n features = tf.signal.frame(\n log_mel_spectrogram,\n frame_length=frame_width,\n frame_step=frame_hop,\n axis=0)\n # features is [num_patches, frame_width, num_mel_bins].\n return features\n" ]
[ [ "tensorflow.compat.v1.app.flags.DEFINE_string" ], [ "numpy.concatenate", "numpy.testing.assert_array_equal", "tensorflow.compat.v1.test.main", "numpy.zeros" ], [ "tensorflow.compat.v1.signal.linear_to_mel_weight_matrix", "tensorflow.compat.v1.pad", "tensorflow.compat.v1.add", "tensorflow.compat.v1.log", "tensorflow.compat.v1.rank", "tensorflow.compat.v1.signal.stft", "numpy.real", "tensorflow.compat.v1.matmul", "tensorflow.compat.v1.signal.frame", "numpy.arange", "numpy.imag" ] ]
AndreasWunsch/CNN_KarstSpringModeling
[ "9239f5ac6920cdc82956289d9b480ed8558908ea" ]
[ "Example Code/heatmaps_calculation.py" ]
[ "# -*- coding: utf-8 -*-.\n\n\"\"\"\ndoi of according publication [preprint]:\nhttps://doi.org/10.5194/hess-2021-403\n\nContact: andreas.wunsch@kit.edu\nORCID: 0000-0002-0585-9549\n\nhttps://github.com/AndreasWunsch/CNN_KarstSpringModeling/\nMIT License\n\nlarge parts opf the code from Sam Anderson (https://github.com/andersonsam/cnn_lstm_era)\nsee also: Anderson & Radic (2021): Evaluation and interpretation of convolutional-recurrent networks for regional hydrological modelling\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\nimport tensorflow as tf\nimport pickle\nfrom random import seed\nimport os\nfrom bayes_opt import BayesianOptimization\nfrom bayes_opt.util import load_logs\nfrom scipy import interpolate\n\n\n#%% functions\ndef bayesOpt_function():\n # just a placeholder needed to load json logs\n return\n\n#%% \ndef make_heat(model, x_test, y_test, style_dict, timesteps, iters_total, iters_one_pass, verbose, tol, p_channel, batch_size):\n \"\"\"\n Function by Sam Anderson (2021), slightly modified by Andreas Wunsch (2021).\n \n model: \n keras model\n x_test:\n tf tensor; test set of ERA data, input to model (shape = Ntest x 365 x height x width x channels)\n y_test:\n tf tensor; test set of streamflow data, target output of model (shape = Ntest x 1)\n style_dict:\n dictionary: {'style' : 'RISE' or 'gauss',\n 'params' : [h,w,p_1] or sigma}\n where [h,w,p_1] are the height/width/probability of perturbation of low-res mask (for RISE algorithm); sigma is the gaussian RMS width\n timesteps:\n rangef timesteps in test set to perturb (e.g. timesteps = range(0,365) will perturb the first 365 timesteps in the test set)\n iters_total:\n number of total iterations of perturbation to do for each day in timesteps\n iters_one_pass:\n number of iterations to do at one time (typically less than iters_total for memory limits)\n verbose:\n 0: print nothing\n 1: print every 50th day\n tol:\n relative error threshold (when to stop pertubing model)\n batch_size:\n batchsize of the model training process\n p_channel:\n number that defines the channel that will be perturbed (others will be used as is)\n \"\"\"\n #initialize heatmap as 3D numpy array: lat x lon x 1\n heat_mean = np.zeros((np.size(x_test[0,0,:,:,0]), 1))\n\n H = np.shape(x_test)[2] #height of input video, in pixels (latitude)\n W = np.shape(x_test)[3] #width of input video, in pixels (longitude)\n\n heat_prev = np.zeros((H*W,1)) #initially, the previous heatmap is 0's (for first pass)\n heat_curr = np.zeros((H*W,1)) #also initialize the current heatmap as 0's (will fill once calculated at end of first pass)\n kk = 0\n err = tol+1 #just to enter while loop\n while err > tol:\n \n print(kk)\n #loop through specified timesteps to generate mean sensitivity\n for ts in timesteps: #for each day that we will perturb\n\n #state progress\n if verbose:\n if np.mod(ts,iters_one_pass)==0:\n print(' Timestep ' + str(ts) + '/' + str(len(ts))) \n\n #number of iterations of perturbations for one forward pass through model\n iters = iters_one_pass \n\n #define perturbation: rectangular as from RISE, or gaussian \n if style_dict['style'] == 'RISE':\n \n h = style_dict['params'][0]\n w = style_dict['params'][1]\n p_1 = style_dict['params'][2]\n\n x_int = np.linspace(0,W,w) #low-res x indices\n y_int = np.linspace(0,H,h) #low-res y indices\n\n xnew = np.arange(W)\n ynew = np.arange(H) \n\n perturb_small = np.random.choice([0,1],size = (iters,1,h,w), p = [1-p_1,p_1]) #binary perturbation on coarse grid\n perturb = np.half([interpolate.interp2d(x_int,y_int,perturb_small[iter][0])(xnew,ynew) for iter in range(iters)]) #perturbation is interpolated to finer grid\n\n elif style_dict['style'] == 'gauss':\n\n sigma = style_dict['params']\n\n x_int = np.arange(W)\n y_int = np.arange(H)\n x_mesh, y_mesh = np.meshgrid(x_int, y_int)\n\n #define gaussian perturbation for each iteration being passed\n perturb = np.half([np.exp( - ( (x_mesh - np.random.randint(0,W))**2 + (y_mesh - np.random.randint(0,H))**2 ) / (2*sigma**2) ) for iter in range(iters)])\n\n #copy/expand dimensions of the perturbation to be added to weather video\n perturb_2D = np.copy(perturb) #the 2D perturbation for each iteration of this pass\n\n perturb = tf.repeat(tf.expand_dims(tf.convert_to_tensor(perturb),3),nchannels, axis = 3) #expand along channels in one image\n\n perturb = tf.repeat(tf.expand_dims(tf.convert_to_tensor(perturb),1),steps_in, axis = 1) #expand along images in one video\n\n # only perturb one channel\n mask = np.zeros((perturb.shape))\n mask[:,:,:,:,p_channel] = 1\n mask = np.half(mask)\n mask = tf.convert_to_tensor(mask)\n perturb = perturb*mask\n \n xday = x_test[ts] #current timestep in test set\n xday_iters = [xday for val in range(iters)] #repeat for each iteration (e.g. make copy for each perturbation)\n\n factor = np.random.choice([-1,1],p = [0.5,0.5]).astype('float16') #whether to add or subtract perturbation from input video, 50-50 chance of each\n perturb = factor*perturb\n\n x1 = perturb\n x2 = tf.convert_to_tensor(xday_iters)\n xday_iters_perturb = tf.math.add(x1,x2)\n\n x_all = tf.squeeze(tf.concat((tf.expand_dims(xday, axis = 0),xday_iters_perturb), axis = 0)) #'all' refers to original (xday) and perturbed (xday_iters_perturb)\n x_all_ds = tf.data.Dataset.from_tensor_slices(x_all).batch(batch_size = batch_size)\n y_all = model.predict(x_all_ds)\n\n yday = y_all[0] #first element is unperturbed model prediction\n yday_perturb = y_all[1:] #all others are perturbed model predictions for each iteration of perturbation\n\n ydiffs = np.abs(np.reshape(yday - yday_perturb[:iters],(-1,1))) #magnitude difference between perturbed and unperturbed streamflow\n delta = np.ones((len(ydiffs),H,W)) * ydiffs[:,None] #get dimensions to match so delta can be multiplied by perturbation\n\n heat_iters = np.asarray(delta[:iters]) * np.asarray(perturb_2D)\n heat = np.mean(heat_iters[:iters], axis=0) \n\n heat_mean[:,0] += heat.flatten() \n\n del heat, heat_iters, delta, ydiffs, x_all, xday_iters #delete for memory\n\n n_iters = iters_one_pass*(kk+1)\n heat_curr = np.copy(heat_mean) / n_iters\n err = np.mean(np.abs(heat_curr - heat_prev)) / np.mean(heat_prev)\n\n heat_prev = np.copy(heat_curr)\n\n kk += 1\n\n heat_mean = heat_mean /(iters_total * len(timesteps))\n\n return heat_mean\n\n\n#%% Set directories and load data\ndir_data = './data_pickle' #where to save trained model outputs\ndir_models = './Results' #where to save trained model outputs\ndir_output = './heatmaps'\n\n# os.chdir(dir_output)\n\n# load data, which is already preprocessed and is a pickled dictionary with format:\n# 'date': Datetimeindex (No_of_timesteps,)\n# 'Variable': list (No_of_timesteps,)\n# each line of 'Variable' contains an array with dimensions (X_cells,Y_cells) (grid for each timestep)\n \n# one pickle file for each variable\npickle_in = open(dir_data + '/' + 'TDict.pickle','rb')\ntempDict = pickle.load(pickle_in)\n\npickle_in = open(dir_data + '/' + 'TsinDict.pickle','rb')\ntsinDict = pickle.load(pickle_in)\n\npickle_in = open(dir_data + '/' + 'PDict.pickle','rb')\nprecDict = pickle.load(pickle_in)\n\npickle_in = open(dir_data + '/' + 'SMLTDict.pickle','rb')\nsnowmeltDict = pickle.load(pickle_in)\n\npickle_in = open(dir_data + '/' + 'EDict.pickle','rb')\nEDict = pickle.load(pickle_in)\n\npickle_in = open(dir_data + '/' + 'SFDict.pickle','rb')\nSFDict = pickle.load(pickle_in)\n\npickle_in = open(dir_data + '/' + 'SWVL1Dict.pickle','rb')\nSWVL1Dict = pickle.load(pickle_in)\n\npickle_in = open(dir_data + '/' + 'SWVL2Dict.pickle','rb')\nSWVL2Dict = pickle.load(pickle_in)\n\npickle_in = open(dir_data + '/' + 'SWVL3Dict.pickle','rb')\nSWVL3Dict = pickle.load(pickle_in)\n\npickle_in = open(dir_data + '/' + 'SWVL4Dict.pickle','rb')\nSWVL4Dict = pickle.load(pickle_in)\n\nT = np.asarray(tempDict['T'])\nTsin = np.asarray(tsinDict['Tsin'])\nSMLT = np.asarray(snowmeltDict['SMLT'])\nP = np.asarray(precDict['P'])\nE = np.asarray(EDict['E'])\nSF = np.asarray(SFDict['SF'])\nSWVL1 = np.asarray(SWVL1Dict['SWVL1'])\nSWVL2 = np.asarray(SWVL2Dict['SWVL2'])\nSWVL3 = np.asarray(SWVL3Dict['SWVL3'])\nSWVL4 = np.asarray(SWVL4Dict['SWVL4'])\n\n# pickle file for Q contains only an array ('Q' time series) and a datetimeindex ('date')\npickle_in = open(dir_data + '/' + 'QDict.pickle','rb')\nQDict=pickle.load(pickle_in)\nQ = np.asarray(QDict['Q']) \n\nt = QDict['date']\n\n\n#%% split data\n\n#years/indices of testing/training\n#modify accordingly\ntrainStartYear = 2012\ntrainFinYear = 2017\n\nvalStartYear = 2018\nvalFinYear = 2018\n\noptStartYear = 2019\noptFinYear = 2019\n\ntestStartYear = 2020\ntestFinYear = 2020\n\ntrainInds = np.squeeze(np.argwhere((t.year>=trainStartYear) & (t.year<=trainFinYear)))\nvalInds = np.squeeze(np.argwhere((t.year>=valStartYear) & (t.year<=valFinYear)))\noptInds = np.squeeze(np.argwhere((t.year>=optStartYear) & (t.year<=optFinYear)))\ntestInds = np.squeeze(np.argwhere((t.year>=testStartYear) & (t.year<=testFinYear)))\n\nrefInds = np.squeeze(np.argwhere((t.year<testStartYear)))\n \nNtrain = len(trainInds)\nNval = len(valInds)\nNopt = len(optInds)\nNtest = len(testInds)\n\n#scaling\nscaler = StandardScaler()\n\nTnorm = scaler.fit_transform(T.reshape(-1, T.shape[-1])).reshape(T.shape)\nSMLTnorm = scaler.fit_transform(SMLT.reshape(-1, SMLT.shape[-1])).reshape(SMLT.shape)\nPnorm = scaler.fit_transform(P.reshape(-1, P.shape[-1])).reshape(P.shape)\nTsinnorm = scaler.fit_transform(Tsin.reshape(-1, Tsin.shape[-1])).reshape(Tsin.shape)\nEnorm = scaler.fit_transform(E.reshape(-1, E.shape[-1])).reshape(E.shape)\nSFnorm = scaler.fit_transform(SF.reshape(-1, SF.shape[-1])).reshape(SF.shape)\nSWVL1norm = scaler.fit_transform(SWVL1.reshape(-1, SWVL1.shape[-1])).reshape(SWVL1.shape)\nSWVL2norm = scaler.fit_transform(SWVL2.reshape(-1, SWVL2.shape[-1])).reshape(SWVL2.shape)\nSWVL3norm = scaler.fit_transform(SWVL3.reshape(-1, SWVL3.shape[-1])).reshape(SWVL3.shape)\nSWVL4norm = scaler.fit_transform(SWVL4.reshape(-1, SWVL4.shape[-1])).reshape(SWVL4.shape)\n\n\nQscaler = StandardScaler()\nQscaler.fit(pd.DataFrame(Q))\nQnorm = Qscaler.transform(pd.DataFrame(Q))\n\n\n#%% Define Bayesian Optimization to be able to load from existing logs:\n\npbounds = {'steps_in': (1,10*4),\n 'n': (7,7),\n 'batchsize': (7,7),\n 'inpTsin': (0,1),\n 'inpSMLT': (0,1), \n 'inpE': (0,1),\n 'inpT': (0,1),\n 'inpSF': (0,1),\n 'inpSWVL1': (0,1),\n 'inpSWVL2': (0,1),\n 'inpSWVL3': (0,1),\n 'inpSWVL4': (0,1)}\n\noptimizer = BayesianOptimization(\n f= bayesOpt_function, \n pbounds=pbounds, \n random_state=1, \n verbose = 0 \n )\n\n# #load existing optimizer\nlog_already_available = 0\nif os.path.isfile(\"./logs.json\"):\n load_logs(optimizer, logs=[\"./logs.json\"]);\n print(\"\\nExisting optimizer is already aware of {} points.\".format(len(optimizer.space)))\n log_already_available = 1\n \n#get best values from optimizer\nn = 2**int(optimizer.max.get(\"params\").get(\"n\"))\nsteps_in= 6*int(optimizer.max.get(\"params\").get(\"steps_in\"))\nbatch_size = 2**int(optimizer.max.get(\"params\").get(\"batchsize\"))\n\ninpT = int(round(optimizer.max.get(\"params\").get(\"inpT\")))\ninpTsin = int(round(optimizer.max.get(\"params\").get(\"inpTsin\")))\ninpSMLT = int(round(optimizer.max.get(\"params\").get(\"inpSMLT\")))\ninpE = int(round(optimizer.max.get(\"params\").get(\"inpE\")))\ninpSF = int(round(optimizer.max.get(\"params\").get(\"inpSF\")))\ninpSWVL1 = int(round(optimizer.max.get(\"params\").get(\"inpSWVL1\")))\ninpSWVL2 = int(round(optimizer.max.get(\"params\").get(\"inpSWVL2\")))\ninpSWVL3 = int(round(optimizer.max.get(\"params\").get(\"inpSWVL3\")))\ninpSWVL4 = int(round(optimizer.max.get(\"params\").get(\"inpSWVL4\")))\n\n# correct and print best values to console\nmaxDict = optimizer.max\nmaxDict['params']['n'] = n\nmaxDict['params']['steps_in'] = steps_in\nmaxDict['params']['batchsize'] = batch_size\nmaxDict['params']['steps_in(days)'] = steps_in/24\nprint(\"\\nBEST:\\t{}\".format(maxDict))\n\n\n#%% Compile test data\nlearning_rate = 1e-3\ntraining_epochs = 100 \nearlystopping_patience = 12\n\nnchannels = 1 + inpT + inpTsin + inpSMLT + inpE + inpSF + inpSWVL1 + inpSWVL2 + inpSWVL3 + inpSWVL4\n\ny_train = np.squeeze([Qnorm[steps_in:trainInds[-1]+1,]]).T\ny_val = np.squeeze([Qnorm[valInds,] ]).T\ny_test = np.squeeze([Qnorm[testInds,] ]).T\n\ny_train = y_train.astype(dtype = np.float16)\ny_val = y_val.astype(dtype = np.float16)\ny_test = y_test.astype(dtype = np.float16)\n\nx_intermediate = np.empty(np.shape(Pnorm) + (nchannels,),dtype='single')\nx_intermediate[:,:,:,0] = Pnorm\nchannel_names = ['P']\nidx = 1\nif inpT:\n x_intermediate[:,:,:,idx] = Tnorm\n channel_names.append('T')\n idx = idx+1\nif inpSMLT:\n x_intermediate[:,:,:,idx] = SMLTnorm\n channel_names.append('SMLT')\n idx = idx+1\nif inpTsin:\n x_intermediate[:,:,:,idx] = Tsinnorm\n channel_names.append('Tsin')\n idx = idx+1\nif inpE:\n x_intermediate[:,:,:,idx] = Enorm\n channel_names.append('E')\n idx = idx+1\nif inpSF:\n x_intermediate[:,:,:,idx] = SFnorm\n channel_names.append('SF')\n idx = idx+1\nif inpSWVL1:\n x_intermediate[:,:,:,idx] = SWVL1norm\n channel_names.append('SWVL1')\n idx = idx+1\nif inpSWVL2:\n x_intermediate[:,:,:,idx] = SWVL2norm\n channel_names.append('SWVL2')\n idx = idx+1\nif inpSWVL3:\n x_intermediate[:,:,:,idx] = SWVL3norm\n channel_names.append('SWVL3')\n idx = idx+1\nif inpSWVL4:\n x_intermediate[:,:,:,idx] = SWVL4norm\n channel_names.append('SWVL4')\n idx = idx+1\n\nx_train = np.empty((Ntrain-steps_in, steps_in, ) + np.shape(Tnorm)[1:] + (nchannels,),dtype=np.float16)\nx_val = np.empty((Nval, steps_in,) + np.shape(Tnorm)[1:] + (nchannels,), dtype = np.float16)\nx_test = np.empty((Ntest, steps_in,) + np.shape(Tnorm)[1:] + (nchannels,),dtype=np.float16)\n\n#training\nfor ii in range(Ntrain-steps_in):\n x_train[ii] = x_intermediate[ii:ii+steps_in]\n# #validation\nfor ii in range(Nval):\n x_val[ii] = x_intermediate[ii + Ntrain - steps_in : ii + Ntrain]\n# #testing ()\nfor ii in range(Ntest):\n x_test[ii] = x_intermediate[ii + Ntrain + Nval + Nopt - steps_in : ii + Ntrain + Nval + Nopt]\n\n# #convert target arrays to tensors\nx_train = tf.convert_to_tensor(x_train)\nx_val = tf.convert_to_tensor(x_val)\nx_test = tf.convert_to_tensor(x_test)\ny_train = tf.convert_to_tensor(y_train)\ny_val = tf.convert_to_tensor(y_val)\ny_test = tf.convert_to_tensor(y_test)\n\n#%% Load existing Models and calculate heatmaps\nwith tf.device(\"/gpu:2\"): # adapt to your available device\n \n for c in range(nchannels): #perturb one channel at a time\n \n inimax = 10 # use 10 different trained models\n heat = np.zeros((T.shape[1]*T.shape[2],inimax)) # preallocate\n print(channel_names[c])\n for ini in range(inimax):\n \n fileName = dir_output + '/heatmap_'+channel_names[c]+'_channel_ini'+str(ini)+'.csv'\n if os.path.isfile(fileName): # check for previous calculation runs to save time\n temp_load = pd.read_csv(fileName,header=None)\n temp = np.asarray(temp_load[0])\n else:\n print(\"Model: ini \"+str(ini)+\" of \"+str(inimax-1))\n seed(ini+37657)\n tf.random.set_seed(ini+37657)\n \n model_name = 'model_ERA5_ini' + str(ini)# + '.h5'\n model = tf.keras.models.load_model(dir_models + '/' + model_name)\n print(\"model loading successful\")\n \n #parameters for heatmaps\n sigma = 1.5 #radius of perturbation\n style_dict = {'style' : 'gauss', #style of perturbation: gaussian (not RISE/rectangular)\n 'params' : sigma}\n timesteps_heat = range(Ntest) #timesteps in test set to perturb\n iters_total = 200 #total iterations of perturbation\n iters_one_pass = 50 #number of iterations to pass through model at one time (too high --> RAM issues)\n tol = 5e-3\n \n # heatmap\n temp = make_heat(model = model,\n x_test = x_test, \n y_test = y_test, \n style_dict = style_dict, \n timesteps = timesteps_heat, \n iters_total = iters_total, \n iters_one_pass = iters_one_pass, \n verbose = 0,\n tol = tol,\n p_channel = c, # channel to pertubate, other are left as is\n batch_size = batch_size\n )\n \n heat[:,ini] = temp.reshape(-1,)\n \n fileName = 'heatmap_'+channel_names[c]+'_channel_ini'+str(ini)+'.csv'\n np.savetxt(dir_output + '/' + fileName, temp.reshape(-1,), delimiter = ',')\n\n heat_mean = np.mean(heat,axis=1)\n \n #save mean heatmap\n fileName = 'heatmap_'+channel_names[c]+'_channel.csv'\n np.savetxt(dir_output + '/' + fileName, heat_mean, delimiter = ',')\n \n#save channel_names file for enx script: plot heatmaps\nnp.savetxt(dir_output+'/channelnames.txt', channel_names,fmt='%s', delimiter = ',')\n" ]
[ [ "numpy.random.choice", "tensorflow.data.Dataset.from_tensor_slices", "numpy.copy", "numpy.half", "numpy.mean", "numpy.size", "pandas.read_csv", "tensorflow.random.set_seed", "pandas.DataFrame", "numpy.arange", "numpy.random.randint", "numpy.mod", "tensorflow.math.add", "numpy.savetxt", "numpy.reshape", "numpy.zeros", "tensorflow.expand_dims", "scipy.interpolate.interp2d", "numpy.shape", "tensorflow.keras.models.load_model", "numpy.argwhere", "numpy.squeeze", "tensorflow.convert_to_tensor", "numpy.asarray", "sklearn.preprocessing.StandardScaler", "numpy.abs", "tensorflow.device", "numpy.linspace", "numpy.meshgrid" ] ]
shan18/TensorNet
[ "c79a0c64152dbeb3499d204994772858326f668c" ]
[ "tensornet/engine/learner.py" ]
[ "import math\nimport time\nimport torch\nfrom copy import deepcopy\n\nfrom tensornet.engine.ops.regularizer import l1\nfrom tensornet.engine.ops.checkpoint import ModelCheckpoint\nfrom tensornet.engine.ops.tensorboard import TensorBoard\nfrom tensornet.data.processing import InfiniteDataLoader\nfrom tensornet.utils.progress_bar import ProgressBar\n\n\nclass Learner:\n \"\"\"Model Trainer and Validator.\n\n Args:\n train_loader (torch.utils.data.DataLoader): Training data loader.\n optimizer (torch.optim): Optimizer for the model.\n criterion (torch.nn): Loss Function.\n device (:obj:`str` or :obj:`torch.device`, optional): Device where the data\n will be loaded. (default='cpu')\n epochs (:obj:`int`, optional): Numbers of epochs/iterations to train the model for.\n (default: 1)\n l1_factor (:obj:`float`, optional): L1 regularization factor. (default: 0)\n val_loader (:obj:`torch.utils.data.DataLoader`, optional): Validation data loader.\n callbacks (:obj:`list`, optional): List of callbacks to be used during training.\n metrics (:obj:`list`, optional): List of names of the metrics for model\n evaluation.\n\n *Note*: If the model has multiple outputs, then this will be a nested list\n where each individual sub-list will specify the metrics which are to be used for\n evaluating each output respectively. In such cases, the model checkpoint will\n consider only the metric of the first output for saving checkpoints.\n activate_loss_logits (:obj:`bool`, optional): If True, the logits will first pass\n through the `activate_logits` function before going to the criterion.\n (default: False)\n record_train (:obj:`bool`, optional): If False, metrics will be calculated only\n during validation. (default: True)\n \"\"\"\n\n def __init__(\n self, train_loader, optimizer, criterion, device='cpu',\n epochs=1, l1_factor=0.0, val_loader=None, callbacks=None, metrics=None,\n activate_loss_logits=False, record_train=True\n ):\n self.model = None\n self.optimizer = optimizer\n self.criterion = criterion\n self.train_loader = train_loader\n self.device = device\n self.epochs = epochs\n self.val_loader = val_loader\n self.l1_factor = l1_factor\n self.activate_loss_logits = activate_loss_logits\n self.record_train = record_train\n\n self.lr_schedulers = {\n 'step_lr': None,\n 'lr_plateau': None,\n 'one_cycle_policy': None,\n 'cyclic_lr': None,\n }\n self.checkpoint = None\n self.summary_writer = None\n if callbacks is not None:\n self._setup_callbacks(callbacks)\n\n # Training\n self.train_losses = [] # Change in loss\n self.train_metrics = [] # Change in evaluation metric\n\n self.val_losses = [] # Change in loss\n self.val_metrics = [] # Change in evaluation metric\n\n # Set evaluation metrics\n self.metrics = []\n if metrics:\n self._setup_metrics(metrics)\n\n def _setup_callbacks(self, callbacks):\n \"\"\"Extract callbacks passed to the class.\n\n Args:\n callbacks (list): List of callbacks.\n \"\"\"\n for callback in callbacks:\n if isinstance(callback, torch.optim.lr_scheduler.StepLR):\n self.lr_schedulers['step_lr'] = callback\n elif isinstance(callback, torch.optim.lr_scheduler.ReduceLROnPlateau):\n self.lr_schedulers['lr_plateau'] = callback\n elif isinstance(callback, torch.optim.lr_scheduler.OneCycleLR):\n self.lr_schedulers['one_cycle_policy'] = callback\n elif isinstance(callback, ModelCheckpoint):\n if callback.monitor.startswith('train_'):\n if self.record_train:\n self.checkpoint = callback\n else:\n raise ValueError(\n 'Cannot use checkpoint for a training metric if record_train is set to False'\n )\n else:\n self.checkpoint = callback\n elif isinstance(callback, TensorBoard):\n self.summary_writer = callback\n elif isinstance(callback, torch.optim.lr_scheduler.CyclicLR):\n self.lr_schedulers['cyclic_lr'] = callback\n\n def set_model(self, model):\n \"\"\"Assign model to learner.\n\n Args:\n model (torch.nn.Module): Model Instance.\n \"\"\"\n self.model = model\n if self.summary_writer is not None:\n self.summary_writer.write_model(self.model)\n\n def _accuracy(self, label, prediction, idx=0):\n \"\"\"Calculate accuracy.\n\n Args:\n label (torch.Tensor): Ground truth.\n prediction (torch.Tensor): Prediction.\n \"\"\"\n self.metrics[idx]['accuracy']['sum'] += prediction.eq(\n label.view_as(prediction)\n ).sum().item()\n self.metrics[idx]['accuracy']['num_steps'] += len(label)\n self.metrics[idx]['accuracy']['value'] = round(\n 100 * self.metrics[idx]['accuracy']['sum'] / self.metrics[idx]['accuracy']['num_steps'], 2\n )\n\n def _iou(self, label, prediction, idx=0):\n \"\"\"Calculate Intersection over Union.\n\n Args:\n label (torch.Tensor): Ground truth.\n prediction (torch.Tensor): Prediction.\n \"\"\"\n # Remove 1 channel dimension\n label = label.squeeze(1)\n prediction = prediction.squeeze(1)\n\n intersection = (prediction * label).sum(2).sum(1)\n union = (prediction + label).sum(2).sum(1) - intersection\n\n # epsilon is added to avoid 0/0\n epsilon = 1e-6\n iou = (intersection + epsilon) / (union + epsilon)\n\n self.metrics[idx]['iou']['sum'] += iou.sum().item()\n self.metrics[idx]['iou']['num_steps'] += label.size(0)\n self.metrics[idx]['iou']['value'] = round(\n self.metrics[idx]['iou']['sum'] / self.metrics[idx]['iou']['num_steps'], 3\n )\n\n def _pred_label_diff(self, label, prediction, rel=False):\n \"\"\"Calculate the difference between label and prediction.\n\n Args:\n label (torch.Tensor): Ground truth.\n prediction (torch.Tensor): Prediction.\n rel (:obj:`bool`, optional): If True, return the relative\n difference. (default: False)\n\n Returns:\n Difference between label and prediction\n \"\"\"\n # For numerical stability\n valid_labels = label > 0.0001\n _label = label[valid_labels]\n _prediction = prediction[valid_labels]\n valid_element_count = _label.size(0)\n\n if valid_element_count > 0:\n diff = torch.abs(_label - _prediction)\n if rel:\n diff = torch.div(diff, _label)\n\n return diff, valid_element_count\n\n def _rmse(self, label, prediction, idx=0):\n \"\"\"Calculate Root Mean Square Error.\n\n Args:\n label (torch.Tensor): Ground truth.\n prediction (torch.Tensor): Prediction.\n \"\"\"\n diff = self._pred_label_diff(label, prediction)\n rmse = 0\n if diff is not None:\n rmse = math.sqrt(torch.sum(torch.pow(diff[0], 2)) / diff[1])\n\n self.metrics[idx]['rmse']['num_steps'] += label.size(0)\n self.metrics[idx]['rmse']['sum'] += rmse * label.size(0)\n self.metrics[idx]['rmse']['value'] = round(\n self.metrics[idx]['rmse']['sum'] / self.metrics[idx]['rmse']['num_steps'], 3\n )\n\n def _mae(self, label, prediction, idx=0):\n \"\"\"Calculate Mean Average Error.\n\n Args:\n label (torch.Tensor): Ground truth.\n prediction (torch.Tensor): Prediction.\n \"\"\"\n diff = self._pred_label_diff(label, prediction)\n mae = 0\n if diff is not None:\n mae = torch.sum(diff[0]).item() / diff[1]\n\n self.metrics[idx]['mae']['num_steps'] += label.size(0)\n self.metrics[idx]['mae']['sum'] += mae * label.size(0)\n self.metrics[idx]['mae']['value'] = round(\n self.metrics[idx]['mae']['sum'] / self.metrics[idx]['mae']['num_steps'], 3\n )\n\n def _abs_rel(self, label, prediction, idx=0):\n \"\"\"Calculate Absolute Relative Error.\n\n Args:\n label (torch.Tensor): Ground truth.\n prediction (torch.Tensor): Prediction.\n \"\"\"\n diff = self._pred_label_diff(label, prediction, rel=True)\n abs_rel = 0\n if diff is not None:\n abs_rel = torch.sum(diff[0]).item() / diff[1]\n\n self.metrics[idx]['abs_rel']['num_steps'] += label.size(0)\n self.metrics[idx]['abs_rel']['sum'] += abs_rel * label.size(0)\n self.metrics[idx]['abs_rel']['value'] = round(\n self.metrics[idx]['abs_rel']['sum'] / self.metrics[idx]['abs_rel']['num_steps'], 3\n )\n\n def _setup_metrics(self, metrics):\n \"\"\"Validate the evaluation metrics passed to the class.\n\n Args:\n metrics (:obj:`list` or :obj:`dict`): Metrics.\n \"\"\"\n\n if not isinstance(metrics[0], (list, tuple)):\n metrics = [metrics]\n\n for idx, metric_list in enumerate(metrics):\n metric_dict = {}\n for metric in metric_list:\n metric_info = {'value': 0, 'sum': 0, 'num_steps': 0}\n if metric == 'accuracy':\n metric_info['func'] = self._accuracy\n elif metric == 'rmse':\n metric_info['func'] = self._rmse\n elif metric == 'mae':\n metric_info['func'] = self._mae\n elif metric == 'abs_rel':\n metric_info['func'] = self._abs_rel\n elif metric == 'iou':\n metric_info['func'] = self._iou\n\n if 'func' in metric_info:\n metric_dict[metric] = metric_info\n\n if metric_dict:\n self.metrics.append(metric_dict)\n self.train_metrics.append({\n x: [] for x in metric_dict.keys()\n })\n self.val_metrics.append({\n x: [] for x in metric_dict.keys()\n })\n\n def _calculate_metrics(self, labels, predictions):\n \"\"\"Update evaluation metric values.\n\n Args:\n label (:obj:`torch.Tensor` or :obj:`dict`): Ground truth.\n prediction (:obj:`torch.Tensor` or :obj:`dict`): Prediction.\n \"\"\"\n predictions = self.activate_logits(predictions)\n\n if not isinstance(labels, (list, tuple)):\n labels = [labels]\n predictions = [predictions]\n\n for idx, (label, prediction) in enumerate(zip(labels, predictions)):\n # If predictions are one-hot encoded\n if label.size() != prediction.size():\n prediction = prediction.argmax(dim=1, keepdim=True) * 1.0\n\n if idx < len(self.metrics):\n for metric in self.metrics[idx]:\n self.metrics[idx][metric]['func'](\n label, prediction, idx=idx\n )\n\n def _reset_metrics(self):\n \"\"\"Reset metric params.\"\"\"\n for idx in range(len(self.metrics)):\n for metric in self.metrics[idx]:\n self.metrics[idx][metric]['value'] = 0\n self.metrics[idx][metric]['sum'] = 0\n self.metrics[idx][metric]['num_steps'] = 0\n\n def _get_pbar_values(self, loss):\n \"\"\"Create progress bar description.\n\n Args:\n loss (float): Loss value.\n \"\"\"\n pbar_values = [('loss', round(loss, 2))]\n if self.metrics and self.record_train:\n for idx in range(len(self.metrics)):\n for metric, info in self.metrics[idx].items():\n metric_name = metric\n if len(self.metrics) > 1:\n metric_name = f'{idx} - {metric}'\n pbar_values.append((metric_name, info['value']))\n return pbar_values\n\n def update_training_history(self, loss):\n \"\"\"Update the training history.\n\n Args:\n loss (float): Loss value.\n \"\"\"\n self.train_losses.append(loss)\n if self.record_train:\n for idx in range(len(self.metrics)):\n for metric in self.metrics[idx]:\n self.train_metrics[idx][metric].append(\n self.metrics[idx][metric]['value']\n )\n\n def reset_history(self):\n \"\"\"Reset the training history\"\"\"\n self.train_losses = []\n self.val_losses = []\n for idx in range(len(self.metrics)):\n for metric in self.metrics[idx]:\n self.train_metrics[idx][metric] = []\n self.val_metrics[idx][metric] = []\n self._reset_metrics()\n\n def activate_logits(self, logits):\n \"\"\"Apply activation function to the logits if needed.\n After this the logits will be sent for calculation of\n loss or evaluation metrics.\n\n Args:\n logits (torch.Tensor): Model output\n\n Returns:\n (*torch.Tensor*): activated logits\n \"\"\"\n return logits\n\n def calculate_criterion(self, logits, targets, train=True):\n \"\"\"Calculate loss.\n\n Args:\n logits (torch.Tensor): Prediction.\n targets (torch.Tensor): Ground truth.\n train (:obj:`bool`, optional): If True, loss is sent to the\n L1 regularization function. (default: True)\n\n Returns:\n (*torch.Tensor*): loss value\n \"\"\"\n if self.activate_loss_logits:\n logits = self.activate_logits(logits)\n if train:\n return l1(self.model, self.criterion(logits, targets), self.l1_factor)\n return self.criterion(logits, targets)\n\n def fetch_data(self, data):\n \"\"\"Fetch data from loader and load it to GPU.\n\n Args:\n data (:obj:`tuple` or :obj:`list`): List containing inputs and targets.\n\n Returns:\n inputs and targets loaded to GPU.\n \"\"\"\n return data[0].to(self.device), data[1].to(self.device)\n\n def train_batch(self, data):\n \"\"\"Train the model on a batch of data.\n\n Args:\n data (:obj:`tuple` or :obj:`list`): Input and target data for the model.\n\n Returns:\n (*float*): Batch loss.\n \"\"\"\n inputs, targets = self.fetch_data(data)\n self.optimizer.zero_grad() # Set gradients to zero before starting backpropagation\n y_pred = self.model(inputs) # Predict output\n loss = self.calculate_criterion(y_pred, targets, train=True) # Calculate loss\n\n # Perform backpropagation\n loss.backward()\n self.optimizer.step()\n\n if self.record_train:\n self._calculate_metrics(targets, y_pred)\n\n # One Cycle Policy for learning rate\n if self.lr_schedulers['one_cycle_policy'] is not None:\n self.lr_schedulers['one_cycle_policy'].step()\n\n # Cyclic LR policy\n if self.lr_schedulers['cyclic_lr'] is not None:\n self.lr_schedulers['cyclic_lr'].step()\n\n return loss.item()\n\n def train_epoch(self, verbose=True):\n \"\"\"Run an epoch of model training.\n\n Args:\n verbose (:obj:`bool`, optional): Print logs. (default: True)\n \"\"\"\n\n self.model.train()\n if verbose:\n pbar = ProgressBar(target=len(self.train_loader), width=8)\n\n for batch_idx, data in enumerate(self.train_loader, 0):\n # Train a batch\n loss = self.train_batch(data)\n\n # Update Progress Bar\n if verbose:\n pbar_values = self._get_pbar_values(loss)\n pbar.update(batch_idx, values=pbar_values)\n\n # Update training history\n self.update_training_history(loss)\n if verbose:\n pbar_values = self._get_pbar_values(loss)\n pbar.add(1, values=pbar_values)\n\n self._reset_metrics()\n\n def train_iterations(self, verbose=True):\n \"\"\"Train model for the 'self.epochs' number of batches.\"\"\"\n\n self.model.train()\n\n if verbose:\n pbar = ProgressBar(target=self.epochs, width=8)\n\n iterator = InfiniteDataLoader(self.train_loader)\n for iteration in range(self.epochs):\n # Train a batch\n loss = self.train_batch(iterator.get_batch())\n\n # Update Progress Bar\n if verbose:\n pbar_values = self._get_pbar_values(loss)\n pbar.update(iteration, values=pbar_values)\n\n # Update training history\n self.update_training_history(loss)\n\n if verbose:\n pbar.add(1, values=pbar_values)\n\n def evaluate(self, loader, verbose=True, log_message='Evaluation'):\n \"\"\"Evaluate the model on a custom data loader.\n\n Args:\n loader (torch.utils.data.DataLoader): Data loader.\n verbose (:obj:`bool`, optional): Print loss and metrics. (default: True)\n log_message (str): Prefix for the logs which are printed at the end.\n\n Returns:\n loss and metric values\n \"\"\"\n\n start_time = time.time()\n self.model.eval()\n eval_loss = 0\n with torch.no_grad():\n for data in loader:\n inputs, targets = self.fetch_data(data)\n output = self.model(inputs) # Get trained model output\n eval_loss += self.calculate_criterion(\n output, targets, train=False\n ).item() # Sum up batch loss\n self._calculate_metrics(targets, output) # Calculate evaluation metrics\n\n eval_loss /= len(loader.dataset)\n eval_metrics = deepcopy(self.metrics)\n end_time = time.time()\n\n # Time spent during validation\n duration = int(end_time - start_time)\n minutes = duration // 60\n seconds = duration % 60\n\n if verbose:\n log = f'{log_message} (took {minutes} minutes, {seconds} seconds): Average loss: {eval_loss:.4f}'\n for idx in range(len(self.metrics)):\n for metric in self.metrics[idx]:\n log += f', {metric}: {self.metrics[idx][metric][\"value\"]}'\n log += '\\n'\n print(log)\n\n self._reset_metrics()\n\n return eval_loss, eval_metrics\n\n def validate(self, verbose=True):\n \"\"\"Validate an epoch of model training.\n\n Args:\n verbose (:obj:`bool`, optional): Print validation loss and metrics.\n (default: True)\n \"\"\"\n eval_loss, eval_metrics = self.evaluate(\n self.val_loader, verbose=verbose, log_message='Validation set'\n )\n\n # Update validation logs\n self.val_losses.append(eval_loss)\n for idx in range(len(eval_metrics)):\n for metric in eval_metrics[idx]:\n self.val_metrics[idx][metric].append(\n eval_metrics[idx][metric]['value']\n )\n\n def save_checkpoint(self, epoch=None):\n \"\"\"Save model checkpoint.\n\n Args:\n epoch (:obj:`int`, optional): Current epoch number.\n \"\"\"\n if self.checkpoint is not None:\n metric = None\n if self.checkpoint.monitor == 'train_loss':\n metric = self.train_losses[-1]\n elif self.checkpoint.monitor == 'val_loss':\n metric = self.val_losses[-1]\n elif self.metrics:\n if self.checkpoint.monitor.startswith('train_'):\n if self.record_train:\n metric = self.train_metrics[0][\n self.checkpoint.monitor.split('train_')[-1]\n ][-1]\n else:\n metric = self.val_metrics[0][\n self.checkpoint.monitor.split('val_')[-1]\n ][-1]\n else:\n print('Invalid metric function, can\\'t save checkpoint.')\n return\n\n self.checkpoint(self.model, metric, epoch)\n\n def write_summary(self, epoch, train):\n \"\"\"Write training summary in tensorboard.\n\n Args:\n epoch (int): Current epoch number.\n train (bool): If True, summary will be\n written for model training else it\n will be writtern for model validation.\n \"\"\"\n if self.summary_writer is not None:\n if train:\n mode = 'train'\n\n # Write Images\n self.summary_writer.write_images(\n self.model, self.activate_logits, f'prediction_epoch_{epoch}'\n )\n loss = self.train_losses[-1]\n else:\n mode = 'val'\n loss = self.val_losses[-1]\n\n # Write Loss\n self.summary_writer.write_scalar(\n f'Loss/{mode}', loss, epoch\n )\n\n if not train or self.record_train:\n for idx in range(len(self.metrics)):\n for metric, info in self.metrics[idx].items():\n self.summary_writer.write_scalar(\n f'{idx}/{metric.title()}/{mode}',\n info['value'], epoch\n )\n\n def fit(self, start_epoch=1, epochs=None, reset=True, verbose=True):\n \"\"\"Perform model training.\n\n Args:\n start_epoch (:obj:`int`, optional): Start epoch for training.\n (default: 1)\n epochs (:obj:`int`, optional): Numbers of epochs/iterations to\n train the model for. If no value is given, the original\n value given during initialization of learner will be used.\n reset (:obj:`bool`, optional): Flag to indicate that training\n is starting from scratch. (default: True)\n verbose (:obj:`bool`, optional): Print logs. (default: True)\n \"\"\"\n\n if reset:\n self.reset_history()\n\n if epochs is not None:\n self.epochs = epochs\n\n for epoch in range(start_epoch, start_epoch + self.epochs):\n if verbose:\n print(f'Epoch {epoch}:')\n\n # Train an epoch\n self.train_epoch(verbose=verbose)\n self.write_summary(epoch, True)\n\n # Validate the model\n if self.val_loader is not None:\n self.validate(verbose=verbose)\n self.write_summary(epoch, False)\n\n # Save model checkpoint\n self.save_checkpoint(epoch)\n\n # Call Step LR\n if not self.lr_schedulers['step_lr'] is None:\n self.lr_schedulers['step_lr'].step()\n\n # Call Reduce LR on Plateau\n if not self.lr_schedulers['lr_plateau'] is None:\n self.lr_schedulers['lr_plateau'].step(self.val_losses[-1])\n" ]
[ [ "torch.no_grad", "torch.pow", "torch.abs", "torch.div", "torch.sum" ] ]
Kanasani-Monica/tensorpack
[ "18b19d6dec901415ab8877a1405145cb5dda919b" ]
[ "examples/DoReFa-Net/alexnet-dorefa.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File: alexnet-dorefa.py\n# Author: Yuxin Wu, Yuheng Zou ({wyx,zyh}@megvii.com)\n\nimport cv2\nimport tensorflow as tf\nimport argparse\nimport numpy as np\nimport os\nimport sys\n\n\nfrom tensorpack import *\nfrom tensorpack.tfutils.summary import add_param_summary\nfrom tensorpack.tfutils.varreplace import remap_variables\nfrom tensorpack.dataflow import dataset\nfrom tensorpack.utils.gpu import get_nr_gpu\n\nfrom imagenet_utils import get_imagenet_dataflow, fbresnet_augmentor, ImageNetModel\nfrom dorefa import get_dorefa, ternarize\n\n\"\"\"\nThis is a tensorpack script for the ImageNet results in paper:\nDoReFa-Net: Training Low Bitwidth Convolutional Neural Networks with Low Bitwidth Gradients\nhttp://arxiv.org/abs/1606.06160\n\nThe original experiements are performed on a proprietary framework.\nThis is our attempt to reproduce it on tensorpack & TensorFlow.\n\nTo Train:\n ./alexnet-dorefa.py --dorefa 1,2,6 --data PATH --gpu 0,1\n\n PATH should look like:\n PATH/\n train/\n n02134418/\n n02134418_198.JPEG\n ...\n ...\n val/\n ILSVRC2012_val_00000001.JPEG\n ...\n\n And you'll need the following to be able to fetch data efficiently\n Fast disk random access (Not necessarily SSD. I used a RAID of HDD, but not sure if plain HDD is enough)\n More than 20 CPU cores (for data processing)\n More than 10G of free memory\n\nTo run pretrained model:\n ./alexnet-dorefa.py --load alexnet-126.npz --run a.jpg --dorefa 1,2,6\n\"\"\"\n\nBITW = 1\nBITA = 2\nBITG = 6\nTOTAL_BATCH_SIZE = 256\nBATCH_SIZE = None\n\n\nclass Model(ImageNetModel):\n weight_decay = 5e-6\n weight_decay_pattern = 'fc.*/W'\n\n def get_logits(self, image):\n if BITW == 't':\n fw, fa, fg = get_dorefa(32, 32, 32)\n fw = ternarize\n else:\n fw, fa, fg = get_dorefa(BITW, BITA, BITG)\n\n # monkey-patch tf.get_variable to apply fw\n def new_get_variable(v):\n name = v.op.name\n # don't binarize first and last layer\n if not name.endswith('W') or 'conv0' in name or 'fct' in name:\n return v\n else:\n logger.info(\"Quantizing weight {}\".format(v.op.name))\n return fw(v)\n\n def nonlin(x):\n if BITA == 32:\n return tf.nn.relu(x) # still use relu for 32bit cases\n return tf.clip_by_value(x, 0.0, 1.0)\n\n def activate(x):\n return fa(nonlin(x))\n\n with remap_variables(new_get_variable), \\\n argscope([Conv2D, BatchNorm, MaxPooling], data_format='channels_first'), \\\n argscope(BatchNorm, momentum=0.9, epsilon=1e-4), \\\n argscope(Conv2D, use_bias=False):\n logits = (LinearWrap(image)\n .Conv2D('conv0', 96, 12, strides=4, padding='VALID', use_bias=True)\n .apply(activate)\n .Conv2D('conv1', 256, 5, padding='SAME', split=2)\n .apply(fg)\n .BatchNorm('bn1')\n .MaxPooling('pool1', 3, 2, padding='SAME')\n .apply(activate)\n\n .Conv2D('conv2', 384, 3)\n .apply(fg)\n .BatchNorm('bn2')\n .MaxPooling('pool2', 3, 2, padding='SAME')\n .apply(activate)\n\n .Conv2D('conv3', 384, 3, split=2)\n .apply(fg)\n .BatchNorm('bn3')\n .apply(activate)\n\n .Conv2D('conv4', 256, 3, split=2)\n .apply(fg)\n .BatchNorm('bn4')\n .MaxPooling('pool4', 3, 2, padding='VALID')\n .apply(activate)\n\n .FullyConnected('fc0', 4096)\n .apply(fg)\n .BatchNorm('bnfc0')\n .apply(activate)\n\n .FullyConnected('fc1', 4096, use_bias=False)\n .apply(fg)\n .BatchNorm('bnfc1')\n .apply(nonlin)\n .FullyConnected('fct', 1000, use_bias=True)())\n add_param_summary(('.*/W', ['histogram', 'rms']))\n return logits\n\n def optimizer(self):\n lr = tf.get_variable('learning_rate', initializer=2e-4, trainable=False)\n return tf.train.AdamOptimizer(lr, epsilon=1e-5)\n\n\ndef get_data(dataset_name):\n isTrain = dataset_name == 'train'\n augmentors = fbresnet_augmentor(isTrain)\n return get_imagenet_dataflow(\n args.data, dataset_name, BATCH_SIZE, augmentors)\n\n\ndef get_config():\n data_train = get_data('train')\n data_test = get_data('val')\n\n return TrainConfig(\n dataflow=data_train,\n callbacks=[\n ModelSaver(),\n ScheduledHyperParamSetter(\n 'learning_rate', [(60, 4e-5), (75, 8e-6)]),\n InferenceRunner(data_test,\n [ClassificationError('wrong-top1', 'val-error-top1'),\n ClassificationError('wrong-top5', 'val-error-top5')])\n ],\n model=Model(),\n steps_per_epoch=1280000 // TOTAL_BATCH_SIZE,\n max_epoch=90,\n )\n\n\ndef run_image(model, sess_init, inputs):\n pred_config = PredictConfig(\n model=model,\n session_init=sess_init,\n input_names=['input'],\n output_names=['output']\n )\n predictor = OfflinePredictor(pred_config)\n meta = dataset.ILSVRCMeta()\n pp_mean = meta.get_per_pixel_mean()\n pp_mean_224 = pp_mean[16:-16, 16:-16, :]\n words = meta.get_synset_words_1000()\n\n def resize_func(im):\n h, w = im.shape[:2]\n scale = 256.0 / min(h, w)\n desSize = map(int, (max(224, min(w, scale * w)),\n max(224, min(h, scale * h))))\n im = cv2.resize(im, tuple(desSize), interpolation=cv2.INTER_CUBIC)\n return im\n transformers = imgaug.AugmentorList([\n imgaug.MapImage(resize_func),\n imgaug.CenterCrop((224, 224)),\n imgaug.MapImage(lambda x: x - pp_mean_224),\n ])\n for f in inputs:\n assert os.path.isfile(f)\n img = cv2.imread(f).astype('float32')\n assert img is not None\n\n img = transformers.augment(img)[np.newaxis, :, :, :]\n outputs = predictor(img)[0]\n prob = outputs[0]\n ret = prob.argsort()[-10:][::-1]\n\n names = [words[i] for i in ret]\n print(f + \":\")\n print(list(zip(names, prob[ret])))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', help='the physical ids of GPUs to use')\n parser.add_argument('--load', help='load a checkpoint, or a npz (given as the pretrained model)')\n parser.add_argument('--data', help='ILSVRC dataset dir')\n parser.add_argument('--dorefa', required=True,\n help='number of bits for W,A,G, separated by comma. W=\"t\" means TTQ')\n parser.add_argument('--run', help='run on a list of images with the pretrained model', nargs='*')\n args = parser.parse_args()\n\n dorefa = args.dorefa.split(',')\n if dorefa[0] == 't':\n assert dorefa[1] == '32' and dorefa[2] == '32'\n BITW, BITA, BITG = 't', 32, 32\n else:\n BITW, BITA, BITG = map(int, dorefa)\n\n if args.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n\n if args.run:\n assert args.load.endswith('.npz')\n run_image(Model(), DictRestore(dict(np.load(args.load))), args.run)\n sys.exit()\n\n nr_tower = max(get_nr_gpu(), 1)\n BATCH_SIZE = TOTAL_BATCH_SIZE // nr_tower\n logger.set_logger_dir(os.path.join(\n 'train_log', 'alexnet-dorefa-{}'.format(args.dorefa)))\n logger.info(\"Batch per tower: {}\".format(BATCH_SIZE))\n\n config = get_config()\n if args.load:\n config.session_init = SaverRestore(args.load)\n launch_train_with_config(config, SyncMultiGPUTrainer(nr_tower))\n" ]
[ [ "tensorflow.train.AdamOptimizer", "tensorflow.nn.relu", "numpy.load", "tensorflow.clip_by_value", "tensorflow.get_variable" ] ]
vsl9/NeMo
[ "4137c2b4e3cba0ec5ca1da7b58b3ff97fdb25e50", "4137c2b4e3cba0ec5ca1da7b58b3ff97fdb25e50", "4137c2b4e3cba0ec5ca1da7b58b3ff97fdb25e50" ]
[ "nemo/collections/asr/parts/features.py", "nemo/collections/nlp/utils/callbacks/joint_intent_slot.py", "nemo/collections/asr/greedy_ctc_decoder.py" ]
[ "# Taken straight from Patter https://github.com/ryanleary/patter\n# TODO: review, and copyright and fix/add comments\nimport math\n\nimport librosa\nimport torch\nimport torch.nn as nn\nfrom torch_stft import STFT\n\nfrom nemo import logging\nfrom nemo.collections.asr.parts.perturb import AudioAugmentor\nfrom nemo.collections.asr.parts.segment import AudioSegment\n\nCONSTANT = 1e-5\n\n\ndef normalize_batch(x, seq_len, normalize_type):\n if normalize_type == \"per_feature\":\n x_mean = torch.zeros((seq_len.shape[0], x.shape[1]), dtype=x.dtype, device=x.device)\n x_std = torch.zeros((seq_len.shape[0], x.shape[1]), dtype=x.dtype, device=x.device)\n for i in range(x.shape[0]):\n x_mean[i, :] = x[i, :, : seq_len[i]].mean(dim=1)\n x_std[i, :] = x[i, :, : seq_len[i]].std(dim=1)\n # make sure x_std is not zero\n x_std += CONSTANT\n return (x - x_mean.unsqueeze(2)) / x_std.unsqueeze(2)\n elif normalize_type == \"all_features\":\n x_mean = torch.zeros(seq_len.shape, dtype=x.dtype, device=x.device)\n x_std = torch.zeros(seq_len.shape, dtype=x.dtype, device=x.device)\n for i in range(x.shape[0]):\n x_mean[i] = x[i, :, : seq_len[i].item()].mean()\n x_std[i] = x[i, :, : seq_len[i].item()].std()\n # make sure x_std is not zero\n x_std += CONSTANT\n return (x - x_mean.view(-1, 1, 1)) / x_std.view(-1, 1, 1)\n elif \"fixed_mean\" in normalize_type and \"fixed_std\" in normalize_type:\n x_mean = torch.tensor(normalize_type[\"fixed_mean\"], device=x.device)\n x_std = torch.tensor(normalize_type[\"fixed_std\"], device=x.device)\n return (x - x_mean.view(x.shape[0], x.shape[1]).unsqueeze(2)) / x_std.view(x.shape[0], x.shape[1]).unsqueeze(2)\n else:\n return x\n\n\ndef splice_frames(x, frame_splicing):\n \"\"\" Stacks frames together across feature dim\n\n input is batch_size, feature_dim, num_frames\n output is batch_size, feature_dim*frame_splicing, num_frames\n\n \"\"\"\n seq = [x]\n for n in range(1, frame_splicing):\n seq.append(torch.cat([x[:, :, :n], x[:, :, n:]], dim=2))\n return torch.cat(seq, dim=1)\n\n\nclass WaveformFeaturizer(object):\n def __init__(self, sample_rate=16000, int_values=False, augmentor=None):\n self.augmentor = augmentor if augmentor is not None else AudioAugmentor()\n self.sample_rate = sample_rate\n self.int_values = int_values\n\n def max_augmentation_length(self, length):\n return self.augmentor.max_augmentation_length(length)\n\n def process(self, file_path, offset=0, duration=0, trim=False):\n audio = AudioSegment.from_file(\n file_path,\n target_sr=self.sample_rate,\n int_values=self.int_values,\n offset=offset,\n duration=duration,\n trim=trim,\n )\n return self.process_segment(audio)\n\n def process_segment(self, audio_segment):\n self.augmentor.perturb(audio_segment)\n return torch.tensor(audio_segment.samples, dtype=torch.float)\n\n @classmethod\n def from_config(cls, input_config, perturbation_configs=None):\n if perturbation_configs is not None:\n aa = AudioAugmentor.from_config(perturbation_configs)\n else:\n aa = None\n\n sample_rate = input_config.get(\"sample_rate\", 16000)\n int_values = input_config.get(\"int_values\", False)\n\n return cls(sample_rate=sample_rate, int_values=int_values, augmentor=aa)\n\n\nclass FeaturizerFactory(object):\n def __init__(self):\n pass\n\n @classmethod\n def from_config(cls, input_cfg, perturbation_configs=None):\n return WaveformFeaturizer.from_config(input_cfg, perturbation_configs=perturbation_configs)\n\n\nclass FilterbankFeatures(nn.Module):\n \"\"\"Featurizer that converts wavs to Mel Spectrograms.\n See AudioToMelSpectrogramPreprocessor for args.\n \"\"\"\n\n def __init__(\n self,\n *,\n sample_rate=16000,\n n_window_size=320,\n n_window_stride=160,\n window=\"hann\",\n normalize=\"per_feature\",\n n_fft=None,\n preemph=0.97,\n nfilt=64,\n lowfreq=0,\n highfreq=None,\n log=True,\n log_zero_guard_type=\"add\",\n log_zero_guard_value=2 ** -24,\n dither=CONSTANT,\n pad_to=16,\n max_duration=16.7,\n frame_splicing=1,\n stft_conv=False,\n pad_value=0,\n mag_power=2.0,\n ):\n super(FilterbankFeatures, self).__init__()\n if (\n n_window_size is None\n or n_window_stride is None\n or not isinstance(n_window_size, int)\n or not isinstance(n_window_stride, int)\n or n_window_size <= 0\n or n_window_stride <= 0\n ):\n raise ValueError(\n f\"{self} got an invalid value for either n_window_size or \"\n f\"n_window_stride. Both must be positive ints.\"\n )\n logging.info(f\"PADDING: {pad_to}\")\n\n self.win_length = n_window_size\n self.hop_length = n_window_stride\n self.n_fft = n_fft or 2 ** math.ceil(math.log2(self.win_length))\n self.stft_conv = stft_conv\n\n if stft_conv:\n logging.info(\"STFT using conv\")\n\n # Create helper class to patch forward func for use with AMP\n class STFTPatch(STFT):\n def __init__(self, *params, **kw_params):\n super(STFTPatch, self).__init__(*params, **kw_params)\n\n def forward(self, input_data):\n return super(STFTPatch, self).transform(input_data)[0]\n\n self.stft = STFTPatch(self.n_fft, self.hop_length, self.win_length, window)\n\n else:\n logging.info(\"STFT using torch\")\n torch_windows = {\n 'hann': torch.hann_window,\n 'hamming': torch.hamming_window,\n 'blackman': torch.blackman_window,\n 'bartlett': torch.bartlett_window,\n 'none': None,\n }\n window_fn = torch_windows.get(window, None)\n window_tensor = window_fn(self.win_length, periodic=False) if window_fn else None\n self.register_buffer(\"window\", window_tensor)\n self.stft = lambda x: torch.stft(\n x,\n n_fft=self.n_fft,\n hop_length=self.hop_length,\n win_length=self.win_length,\n center=True,\n window=self.window.to(dtype=torch.float),\n )\n\n self.normalize = normalize\n self.log = log\n self.dither = dither\n self.frame_splicing = frame_splicing\n self.nfilt = nfilt\n self.preemph = preemph\n self.pad_to = pad_to\n highfreq = highfreq or sample_rate / 2\n\n filterbanks = torch.tensor(\n librosa.filters.mel(sample_rate, self.n_fft, n_mels=nfilt, fmin=lowfreq, fmax=highfreq,),\n dtype=torch.float,\n ).unsqueeze(0)\n # self.fb = filterbanks\n # self.window = window_tensor\n self.register_buffer(\"fb\", filterbanks)\n\n # Calculate maximum sequence length\n max_length = self.get_seq_len(torch.tensor(max_duration * sample_rate, dtype=torch.float))\n max_pad = pad_to - (max_length % pad_to) if pad_to > 0 else 0\n self.max_length = max_length + max_pad\n self.pad_value = pad_value\n self.mag_power = mag_power\n\n # We want to avoid taking the log of zero\n # There are two options: either adding or clamping to a small value\n if log_zero_guard_type not in [\"add\", \"clamp\"]:\n raise ValueError(\n f\"{self} received {log_zero_guard_type} for the \"\n f\"log_zero_guard_type parameter. It must be either 'add' or \"\n f\"'clamp'.\"\n )\n # log_zero_guard_value is the the small we want to use, we support\n # an actual number, or \"tiny\", or \"eps\"\n self.log_zero_guard_value = lambda _: log_zero_guard_value\n if isinstance(log_zero_guard_value, str):\n if log_zero_guard_value == \"tiny\":\n self.log_zero_guard_value = lambda x: torch.finfo(x.dtype).tiny\n elif log_zero_guard_value == \"eps\":\n self.log_zero_guard_value = lambda x: torch.finfo(x.dtype).eps\n else:\n raise ValueError(\n f\"{self} received {log_zero_guard_value} for the \"\n f\"log_zero_guard_type parameter. It must be either a \"\n f\"number, 'tiny', or 'eps'\"\n )\n self.log_zero_guard_type = log_zero_guard_type\n\n def get_seq_len(self, seq_len):\n return torch.ceil(seq_len / self.hop_length).to(dtype=torch.long)\n\n @property\n def filter_banks(self):\n return self.fb\n\n @torch.no_grad()\n def forward(self, x, seq_len):\n seq_len = self.get_seq_len(seq_len.float())\n\n # dither\n if self.dither > 0:\n x += self.dither * torch.randn_like(x)\n\n # do preemphasis\n if self.preemph is not None:\n x = torch.cat((x[:, 0].unsqueeze(1), x[:, 1:] - self.preemph * x[:, :-1]), dim=1,)\n\n x = self.stft(x)\n\n # get power spectrum\n if self.mag_power != 1.0:\n x = x.pow(self.mag_power)\n if not self.stft_conv:\n x = x.sum(-1)\n\n # dot with filterbank energies\n x = torch.matmul(self.fb.to(x.dtype), x)\n\n # log features if required\n if self.log:\n if self.log_zero_guard_type == \"add\":\n x = torch.log(x + self.log_zero_guard_value(x))\n elif self.log_zero_guard_type == \"clamp\":\n x = torch.log(torch.clamp(x, min=self.log_zero_guard_value(x)))\n else:\n raise ValueError(\"log_zero_guard_type was not understood\")\n\n # frame splicing if required\n if self.frame_splicing > 1:\n x = splice_frames(x, self.frame_splicing)\n\n # normalize if required\n if self.normalize:\n x = normalize_batch(x, seq_len, normalize_type=self.normalize)\n\n # mask to zero any values beyond seq_len in batch, pad to multiple of\n # `pad_to` (for efficiency)\n max_len = x.size(-1)\n mask = torch.arange(max_len).to(x.device)\n mask = mask.expand(x.size(0), max_len) >= seq_len.unsqueeze(1)\n x = x.masked_fill(mask.unsqueeze(1).type(torch.bool).to(device=x.device), self.pad_value,)\n del mask\n pad_to = self.pad_to\n if not self.training:\n pad_to = 16\n if pad_to == \"max\":\n x = nn.functional.pad(x, (0, self.max_length - x.size(-1)), value=self.pad_value)\n elif pad_to > 0:\n pad_amt = x.size(-1) % pad_to\n if pad_amt != 0:\n x = nn.functional.pad(x, (0, pad_to - pad_amt), value=self.pad_value)\n return x\n", "# Copyright (c) 2019 NVIDIA Corporation\n\nimport os\nimport random\nimport time\n\nimport matplotlib\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom sklearn.metrics import classification_report, confusion_matrix\n\nimport nemo\n\n__all__ = ['eval_iter_callback', 'eval_epochs_done_callback']\n\n\ndef tensor2list(tensor):\n return tensor.detach().cpu().tolist()\n\n\ndef eval_iter_callback(tensors, global_vars, eval_data_layer):\n if \"all_intent_preds\" not in global_vars.keys():\n global_vars[\"all_intent_preds\"] = []\n if \"all_intent_labels\" not in global_vars.keys():\n global_vars[\"all_intent_labels\"] = []\n if \"all_slot_preds\" not in global_vars.keys():\n global_vars[\"all_slot_preds\"] = []\n if \"all_slot_labels\" not in global_vars.keys():\n global_vars[\"all_slot_labels\"] = []\n if \"all_subtokens_mask\" not in global_vars.keys():\n global_vars[\"all_subtokens_mask\"] = []\n\n all_intent_logits, all_intent_labels = [], []\n all_slot_logits, all_slot_labels = [], []\n all_subtokens_mask = []\n for kv, v in tensors.items():\n if kv.startswith('intent_logits'):\n for v_tensor in v:\n for logit_tensor in v_tensor:\n all_intent_logits.append(tensor2list(logit_tensor))\n\n if kv.startswith('intents'):\n for v_tensor in v:\n for label_tensor in v_tensor:\n all_intent_labels.append(tensor2list(label_tensor))\n\n if kv.startswith('slot_logits'):\n for v_tensor in v:\n for logit_tensor in v_tensor:\n all_slot_logits.append(tensor2list(logit_tensor))\n\n if kv.startswith('slots'):\n for v_tensor in v:\n for label_tensor in v_tensor:\n all_slot_labels.extend(tensor2list(label_tensor))\n\n if kv.startswith('subtokens_mask'):\n for v_tensor in v:\n for subtokens_mask_tensor in v_tensor:\n all_subtokens_mask.extend(tensor2list(subtokens_mask_tensor))\n\n all_intent_preds = list(np.argmax(np.asarray(all_intent_logits), 1))\n all_slot_preds = list(np.argmax(np.asarray(all_slot_logits), 2).flatten())\n global_vars[\"all_intent_preds\"].extend(all_intent_preds)\n global_vars[\"all_intent_labels\"].extend(all_intent_labels)\n global_vars[\"all_slot_preds\"].extend(all_slot_preds)\n global_vars[\"all_slot_labels\"].extend(all_slot_labels)\n global_vars[\"all_subtokens_mask\"].extend(all_subtokens_mask)\n\n\ndef list2str(l):\n return ' '.join([str(j) for j in l])\n\n\ndef eval_epochs_done_callback(global_vars, graph_fold):\n intent_labels = np.asarray(global_vars['all_intent_labels'])\n intent_preds = np.asarray(global_vars['all_intent_preds'])\n\n slot_labels = np.asarray(global_vars['all_slot_labels'])\n slot_preds = np.asarray(global_vars['all_slot_preds'])\n subtokens_mask = np.asarray(global_vars['all_subtokens_mask']) > 0.5\n\n slot_labels = slot_labels[subtokens_mask]\n slot_preds = slot_preds[subtokens_mask]\n\n i = 0\n if intent_preds.shape[0] > 21:\n i = random.randint(0, intent_preds.shape[0] - 21)\n nemo.logging.info(\"Sampled i_preds: [%s]\" % list2str(intent_preds[i : i + 20]))\n nemo.logging.info(\"Sampled intents: [%s]\" % list2str(intent_labels[i : i + 20]))\n nemo.logging.info(\"Sampled s_preds: [%s]\" % list2str(slot_preds[i : i + 20]))\n nemo.logging.info(\"Sampled slots: [%s]\" % list2str(slot_labels[i : i + 20]))\n cm = confusion_matrix(intent_labels, intent_preds)\n nemo.logging.info(f'Confusion matrix:\\n{cm}')\n fig = plt.figure()\n ax = fig.add_subplot(111)\n cax = ax.matshow(cm)\n plt.title('Confusion matrix of the classifier')\n fig.colorbar(cax)\n plt.xlabel('Predicted')\n plt.ylabel('True')\n os.makedirs(graph_fold, exist_ok=True)\n plt.savefig(os.path.join(graph_fold, time.strftime('%Y%m%d-%H%M%S')))\n\n nemo.logging.info('Intent prediction results')\n correct_preds = sum(intent_labels == intent_preds)\n intent_accuracy = correct_preds / intent_labels.shape[0]\n nemo.logging.info(f'Intent accuracy: {intent_accuracy}')\n nemo.logging.info(\n f'Classification report:\\n \\\n {classification_report(intent_labels, intent_preds)}'\n )\n\n nemo.logging.info('Slot prediction results')\n slot_accuracy = sum(slot_labels == slot_preds) / slot_labels.shape[0]\n nemo.logging.info(f'Slot accuracy: {slot_accuracy}')\n nemo.logging.info(\n f'Classification report:\\n \\\n {classification_report(slot_labels[:-2], slot_preds[:-2])}'\n )\n\n return dict({'intent_accuracy': intent_accuracy, 'slot_accuracy': slot_accuracy})\n", "# Copyright (c) 2019 NVIDIA Corporation\nimport torch\n\nfrom nemo.backends.pytorch.nm import TrainableNM\nfrom nemo.core.neural_types import AxisType, BatchTag, ChannelTag, NeuralType, TimeTag\n\n\nclass GreedyCTCDecoder(TrainableNM):\n \"\"\"\n Greedy decoder that computes the argmax over a softmax distribution\n \"\"\"\n\n @property\n def input_ports(self):\n \"\"\"Returns definitions of module input ports.\n\n log_probs:\n 0: AxisType(BatchTag)\n\n 1: AxisType(TimeTag)\n\n 2: AxisType(ChannelTag)\n \"\"\"\n return {\"log_probs\": NeuralType({0: AxisType(BatchTag), 1: AxisType(TimeTag), 2: AxisType(ChannelTag),})}\n\n @property\n def output_ports(self):\n \"\"\"Returns definitions of module output ports.\n\n predictions:\n 0: AxisType(BatchTag)\n\n 1: AxisType(TimeTag)\n \"\"\"\n return {\"predictions\": NeuralType({0: AxisType(BatchTag), 1: AxisType(TimeTag)})}\n\n def __init__(self, **kwargs):\n TrainableNM.__init__(self, **kwargs)\n\n def forward(self, log_probs):\n with torch.no_grad():\n argmx = log_probs.argmax(dim=-1, keepdim=False)\n return argmx\n" ]
[ [ "torch.zeros", "torch.cat", "torch.arange", "torch.finfo", "torch.no_grad", "torch.ceil", "torch.randn_like", "torch.tensor", "torch.nn.functional.pad" ], [ "sklearn.metrics.confusion_matrix", "numpy.asarray", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "sklearn.metrics.classification_report", "matplotlib.pyplot.ylabel" ], [ "torch.no_grad" ] ]
ICinoI/pandapipes
[ "48dd088cdaaa21349f915547cd0f1d539885325c" ]
[ "pandapipes/component_models/pump_component.py" ]
[ "# Copyright (c) 2020-2021 by Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel, and University of Kassel. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.\n\nfrom operator import itemgetter\n\nimport numpy as np\nfrom numpy import dtype\nfrom pandapipes.component_models.abstract_models import BranchWZeroLengthComponent\nfrom pandapipes.constants import NORMAL_TEMPERATURE, NORMAL_PRESSURE\nfrom pandapipes.idx_branch import STD_TYPE, VINIT, D, AREA, TL, \\\n LOSS_COEFFICIENT as LC, FROM_NODE, TINIT, PL\nfrom pandapipes.idx_node import PINIT, PAMB\nfrom pandapipes.pipeflow_setup import get_net_option, get_fluid\n\n\nclass Pump(BranchWZeroLengthComponent):\n \"\"\"\n\n \"\"\"\n\n @classmethod\n def from_to_node_cols(cls):\n return \"from_junction\", \"to_junction\"\n\n @classmethod\n def table_name(cls):\n return \"pump\"\n\n @classmethod\n def active_identifier(cls):\n return \"in_service\"\n\n @classmethod\n def create_pit_branch_entries(cls, net, pump_pit, node_name):\n \"\"\"\n Function which creates pit branch entries with a specific table.\n\n :param net: The pandapipes network\n :type net: pandapipesNet\n :param pump_pit:\n :type pump_pit:\n :param internal_pipe_number:\n :type internal_pipe_number:\n :return: No Output.\n \"\"\"\n pump_pit = super().create_pit_branch_entries(net, pump_pit, node_name)\n std_types_lookup = np.array(list(net.std_type[cls.table_name()].keys()))\n std_type, pos = np.where(net[cls.table_name()]['std_type'].values\n == std_types_lookup[:, np.newaxis])\n pump_pit[pos, STD_TYPE] = std_type\n pump_pit[:, D] = 0.1\n pump_pit[:, AREA] = pump_pit[:, D] ** 2 * np.pi / 4\n pump_pit[:, LC] = 0\n\n @classmethod\n def calculate_pressure_lift(cls, net, pump_pit, node_pit):\n \"\"\"\n\n :param net: The pandapipes network\n :type net: pandapipesNet\n :param pump_pit:\n :type pump_pit:\n :param node_pit:\n :type node_pit:\n :return: power stroke\n :rtype: float\n \"\"\"\n area = pump_pit[:, AREA]\n idx = pump_pit[:, STD_TYPE].astype(int)\n std_types = np.array(list(net.std_type['pump'].keys()))[idx]\n from_nodes = pump_pit[:, FROM_NODE].astype(np.int32)\n # to_nodes = pump_pit[:, TO_NODE].astype(np.int32)\n fluid = get_fluid(net)\n p_from = node_pit[from_nodes, PAMB] + node_pit[from_nodes, PINIT]\n # p_to = node_pit[to_nodes, PAMB] + node_pit[to_nodes, PINIT]\n numerator = NORMAL_PRESSURE * pump_pit[:, TINIT]\n v_mps = pump_pit[:, VINIT]\n if fluid.is_gas:\n # consider volume flow at inlet\n normfactor_from = numerator * fluid.get_property(\"compressibility\", p_from) \\\n / (p_from * NORMAL_TEMPERATURE)\n v_mean = v_mps * normfactor_from\n else:\n v_mean = v_mps\n vol = v_mean * area\n fcts = itemgetter(*std_types)(net['std_type']['pump'])\n fcts = [fcts] if not isinstance(fcts, tuple) else fcts\n pl = np.array(list(map(lambda x, y: x.get_pressure(y), fcts, vol)))\n pump_pit[:, PL] = pl\n\n @classmethod\n def calculate_temperature_lift(cls, net, pump_pit, node_pit):\n \"\"\"\n\n :param net:\n :type net:\n :param pump_pit:\n :type pump_pit:\n :param node_pit:\n :type node_pit:\n :return:\n :rtype:\n \"\"\"\n pump_pit[:, TL] = 0\n\n @classmethod\n def extract_results(cls, net, options, node_name):\n \"\"\"\n Function that extracts certain results.\n\n :param net: The pandapipes network\n :type net: pandapipesNet\n :param options:\n :type options:\n :return: No Output.\n \"\"\"\n placement_table, pump_pit, res_table = super().prepare_result_tables(net, options, node_name)\n res_table['deltap_bar'].values[placement_table] = pump_pit[:, PL]\n\n @classmethod\n def get_component_input(cls):\n \"\"\"\n\n Get component input.\n\n :return:\n :rtype:\n \"\"\"\n return [(\"name\", dtype(object)),\n (\"from_junction\", \"u4\"),\n (\"to_junction\", \"u4\"),\n (\"std_type\", dtype(object)),\n (\"in_service\", 'bool'),\n (\"type\", dtype(object))]\n\n @classmethod\n def get_result_table(cls, net):\n \"\"\"\n\n Gets the result table.\n\n :param net: The pandapipes network\n :type net: pandapipesNet\n :return: (columns, all_float) - the column names and whether they are all float type. Only\n if False, returns columns as tuples also specifying the dtypes\n :rtype: (list, bool)\n \"\"\"\n return [\"deltap_bar\"], True\n" ]
[ [ "numpy.dtype" ] ]
shuuchen/UNet
[ "15722282e8342895a5003cb21dfae70e436b31cf" ]
[ "models/res_unet_classifier.py" ]
[ "import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\n\nclass ResUNet(nn.Module):\n def __init__(\n self,\n in_channels=1,\n out_channels=2,\n depth=5,\n wf=6,\n padding=True,\n batch_norm=True,\n up_mode='upconv',\n ):\n \"\"\"\n Implementation of\n U-Net: Convolutional Networks for Biomedical Image Segmentation\n (Ronneberger et al., 2015)\n https://arxiv.org/abs/1505.04597\n Using the default arguments will yield the exact version used\n in the original paper\n Args:\n in_channels (int): number of input channels\n out_channels (int): number of output channels\n depth (int): depth of the network\n wf (int): number of filters in the first layer is 2**wf\n padding (bool): if True, apply padding such that the input shape\n is the same as the output.\n This may introduce artifacts\n batch_norm (bool): Use BatchNorm after layers with an\n activation function\n up_mode (str): one of 'upconv' or 'upsample'.\n 'upconv' will use transposed convolutions for\n learned upsampling.\n 'upsample' will use bilinear upsampling.\n \"\"\"\n super(ResUNet, self).__init__()\n assert up_mode in ('upconv', 'upsample')\n self.padding = padding\n self.depth = depth\n self.batch_norm = batch_norm\n prev_channels = in_channels\n\n # residual concat preparation\n self.conv = nn.Conv2d(in_channels, 2 ** wf, kernel_size=3, padding=int(padding))\n if self.batch_norm:\n self.bn = nn.BatchNorm2d(2 ** wf)\n self.relu = nn.ReLU()\n\n self.down_path = nn.ModuleList()\n for i in range(depth):\n self.down_path.append(\n UNetResConvBlock(prev_channels, 2 ** (wf + i), padding, batch_norm)\n )\n prev_channels = 2 ** (wf + i)\n\n self.up_path = nn.ModuleList()\n for i in reversed(range(depth - 1)):\n self.up_path.append(\n UNetUpBlock(prev_channels, 2 ** (wf + i), up_mode, padding, batch_norm)\n )\n prev_channels = 2 ** (wf + i)\n\n self.last = nn.Conv2d(prev_channels, out_channels, kernel_size=1)\n\n def forward(self, x):\n '''\n x = self.conv(x)\n if self.batch_norm:\n x = self.bn(x)\n x = self.relu(x) # leaky relu maybe better\n '''\n blocks = []\n for i, down in enumerate(self.down_path):\n x = down(x)\n if i != len(self.down_path) - 1:\n blocks.append(x)\n x = F.max_pool2d(x, 2)\n\n for i, up in enumerate(self.up_path):\n x = up(x, blocks[-i - 1])\n\n return self.last(x)\n\n\nclass UNetResConvBlock(nn.Module):\n def __init__(self, in_size, out_size, padding, batch_norm):\n super(UNetResConvBlock, self).__init__()\n\n if batch_norm:\n bn = nn.BatchNorm2d\n\n self.conv1 = nn.Conv2d(in_size, out_size, kernel_size=3, padding=int(padding))\n self.bn1 = bn(out_size)\n self.relu1 = nn.ReLU()\n\n self.conv2 = nn.Conv2d(out_size, out_size, kernel_size=3, padding=int(padding))\n self.bn2 = bn(out_size)\n self.relu2 = nn.ReLU()\n \n self.conv3 = nn.Conv2d(out_size, out_size, kernel_size=3, padding=int(padding))\n self.bn3 = bn(out_size)\n self.relu3 = nn.ReLU()\n\n def forward(self, x):\n\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu1(x)\n\n identity = x\n \n out = self.conv2(x)\n out = self.bn2(out)\n out = self.relu2(out)\n \n out = self.conv3(out)\n out = self.bn3(out)\n \n out += identity\n out = self.relu3(out)\n \n return out\n\n\nclass UNetUpBlock(nn.Module):\n def __init__(self, in_size, out_size, up_mode, padding, batch_norm):\n super(UNetUpBlock, self).__init__()\n if up_mode == 'upconv':\n self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2)\n elif up_mode == 'upsample':\n self.up = nn.Sequential(\n nn.Upsample(mode='bilinear', scale_factor=2),\n nn.Conv2d(in_size, out_size, kernel_size=1),\n )\n\n self.conv_block = UNetResConvBlock(in_size, out_size, padding, batch_norm)\n\n def center_crop(self, layer, target_size):\n _, _, layer_height, layer_width = layer.size()\n diff_y = (layer_height - target_size[0]) // 2\n diff_x = (layer_width - target_size[1]) // 2\n return layer[\n :, :, diff_y : (diff_y + target_size[0]), diff_x : (diff_x + target_size[1])\n ]\n\n def forward(self, x, bridge):\n up = self.up(x)\n crop1 = self.center_crop(bridge, up.shape[2:])\n out = torch.cat([up, crop1], 1)\n out = self.conv_block(out)\n\n return out\n" ]
[ [ "torch.cat", "torch.nn.ModuleList", "torch.nn.BatchNorm2d", "torch.nn.ConvTranspose2d", "torch.nn.ReLU", "torch.nn.Upsample", "torch.nn.Conv2d", "torch.nn.functional.max_pool2d" ] ]
gngdb/ROMP
[ "a940af92e266530f4fe65807ab5920f0b4246511" ]
[ "src/lib/models/smpl.py" ]
[ "# -*- coding: utf-8 -*-\n\n# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is\n# holder of all proprietary rights on this computer program.\n# You can only use this computer program if you have closed\n# a license agreement with MPG or you get the right to use the computer\n# program from someone who is authorized to grant you that right.\n# Any use of the computer program without a valid license is prohibited and\n# liable to prosecution.\n#\n# Copyright©2019 Max-Planck-Gesellschaft zur Förderung\n# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute\n# for Intelligent Systems and the Max Planck Institute for Biological\n# Cybernetics. All rights reserved.\n#\n# Contact: ps-license@tuebingen.mpg.de\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport os,sys\nimport os.path as osp\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\nimport numpy as np\n\nfrom collections import namedtuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nsys.path.append(os.path.abspath(__file__).replace('models/smpl.py',''))\nfrom config import args\n\nModelOutput = namedtuple('ModelOutput',\n ['vertices', 'joints','joints_h36m17', 'joints_smpl24','full_pose', 'betas',\n 'global_orient','body_pose', 'expression','jaw_pose',\n 'left_hand_pose', 'right_hand_pose'])\nModelOutput.__new__.__defaults__ = (None,) * len(ModelOutput._fields)\n\n\ndef create(model_path, model_type='smpl',\n **kwargs):\n ''' Method for creating a model from a path and a model type\n\n Parameters\n ----------\n model_path: str\n Either the path to the model you wish to load or a folder,\n where each subfolder contains the differents types, i.e.:\n model_path:\n |\n |-- smpl\n |-- SMPL_FEMALE\n |-- SMPL_NEUTRAL\n |-- SMPL_MALE\n |-- smplh\n |-- SMPLH_FEMALE\n |-- SMPLH_MALE\n |-- smplx\n |-- SMPLX_FEMALE\n |-- SMPLX_NEUTRAL\n |-- SMPLX_MALE\n model_type: str, optional\n When model_path is a folder, then this parameter specifies the\n type of model to be loaded\n **kwargs: dict\n Keyword arguments\n\n Returns\n -------\n body_model: nn.Module\n The PyTorch module that implements the corresponding body model\n Raises\n ------\n ValueError: In case the model type is not one of SMPL, SMPLH or\n SMPLX\n '''\n\n # If it's a folder, assume\n if osp.isdir(model_path):\n model_path = os.path.join(model_path, model_type)\n\n if model_type.lower() == 'smpl':\n return SMPL(model_path, **kwargs)\n else:\n raise ValueError('Unknown model type {}, exiting!'.format(model_type))\n\nclass VertexJointSelector(nn.Module):\n\n def __init__(self, vertex_ids=None,\n use_hands=True,\n use_feet_keypoints=True, **kwargs):\n super(VertexJointSelector, self).__init__()\n\n extra_joints_idxs = []\n\n face_keyp_idxs = np.array([\n vertex_ids['nose'],\n vertex_ids['reye'],\n vertex_ids['leye'],\n vertex_ids['rear'],\n vertex_ids['lear']], dtype=np.int64)\n\n extra_joints_idxs = np.concatenate([extra_joints_idxs,\n face_keyp_idxs])\n\n if use_feet_keypoints:\n feet_keyp_idxs = np.array([vertex_ids['LBigToe'],\n vertex_ids['LSmallToe'],\n vertex_ids['LHeel'],\n vertex_ids['RBigToe'],\n vertex_ids['RSmallToe'],\n vertex_ids['RHeel']], dtype=np.int32)\n\n extra_joints_idxs = np.concatenate(\n [extra_joints_idxs, feet_keyp_idxs])\n\n if use_hands:\n self.tip_names = ['thumb', 'index', 'middle', 'ring', 'pinky']\n\n tips_idxs = []\n for hand_id in ['l', 'r']:\n for tip_name in self.tip_names:\n tips_idxs.append(vertex_ids[hand_id + tip_name])\n\n extra_joints_idxs = np.concatenate(\n [extra_joints_idxs, tips_idxs])\n\n self.register_buffer('extra_joints_idxs',\n to_tensor(extra_joints_idxs, dtype=torch.long))\n\n def forward(self, vertices, joints):\n extra_joints = torch.index_select(vertices, 1, self.extra_joints_idxs)\n joints = torch.cat([joints, extra_joints], dim=1)\n\n return joints\n\n\nclass SMPL(nn.Module):\n\n NUM_JOINTS = 23\n NUM_BODY_JOINTS = 23\n NUM_BETAS = 10\n\n def __init__(self, model_path, J_reg_extra9_path=None, J_reg_h36m17_path=None,\\\n data_struct=None, betas=None, global_orient=None,\\\n body_pose=None, transl=None, dtype=torch.float32, batch_size=1,\\\n joint_mapper=None, gender='neutral', vertex_ids=None, **kwargs):\n ''' SMPL model constructor\n\n Parameters\n ----------\n model_path: str\n The path to the folder or to the file where the model\n parameters are stored\n data_struct: Strct\n A struct object. If given, then the parameters of the model are\n read from the object. Otherwise, the model tries to read the\n parameters from the given `model_path`. (default = None)\n global_orient: torch.tensor, optional, Bx3\n The default value for the global orientation variable.\n (default = None)\n body_pose: torch.tensor, optional, Bx(Body Joints * 3)\n The default value for the body pose variable.\n (default = None)\n betas: torch.tensor, optional, Bx10\n The default value for the shape member variable.\n (default = None)\n transl: torch.tensor, optional, Bx3\n The default value for the transl variable.\n (default = None)\n dtype: torch.dtype, optional\n The data type for the created variables\n batch_size: int, optional\n The batch size used for creating the member variables\n joint_mapper: object, optional\n An object that re-maps the joints. Useful if one wants to\n re-order the SMPL joints to some other convention (e.g. MSCOCO)\n (default = None)\n gender: str, optional\n Which gender to load\n vertex_ids: dict, optional\n A dictionary containing the indices of the extra vertices that\n will be selected\n '''\n\n self.gender = gender\n\n if data_struct is None:\n if osp.isdir(model_path):\n model_fn = 'SMPL_{}.{ext}'.format(gender.upper(), ext='pkl')\n smpl_path = os.path.join(model_path, model_fn)\n else:\n smpl_path = model_path\n assert osp.exists(smpl_path), 'Path {} does not exist!'.format(\n smpl_path)\n\n with open(smpl_path, 'rb') as smpl_file:\n data_struct = Struct(**pickle.load(smpl_file,\n encoding='latin1'))\n\n super(SMPL, self).__init__()\n self.batch_size = batch_size\n\n if vertex_ids is None:\n # SMPL and SMPL-H share the same topology, so any extra joints can\n # be drawn from the same place\n vertex_ids = VERTEX_IDS['smplh']\n\n self.dtype = dtype\n\n #self.joint_mapper = joint_mapper\n\n self.vertex_joint_selector = VertexJointSelector(\n vertex_ids=vertex_ids, **kwargs)\n\n self.faces = data_struct.f\n self.register_buffer('faces_tensor',\n to_tensor(to_np(self.faces, dtype=np.int64),\n dtype=torch.long))\n\n # The vertices of the template model\n self.register_buffer('v_template',\n to_tensor(to_np(data_struct.v_template),\n dtype=dtype))\n if betas is None:\n default_betas = torch.zeros([batch_size, self.NUM_BETAS],dtype=dtype)\n else:\n if 'torch.Tensor' in str(type(betas)):\n default_betas = betas.clone().detach()\n else:\n default_betas = torch.tensor(betas,dtype=dtype)\n\n self.register_parameter('betas', nn.Parameter(default_betas,\n requires_grad=True))\n\n # The shape components\n shapedirs = data_struct.shapedirs\n # The shape components\n self.register_buffer(\n 'shapedirs',\n to_tensor(to_np(shapedirs), dtype=dtype))\n\n j_regressor = to_tensor(to_np(\n data_struct.J_regressor), dtype=dtype)\n self.register_buffer('J_regressor', j_regressor)\n\n if J_reg_extra9_path is not None:\n J_regressor_extra9 = np.load(J_reg_extra9_path)\n J_regressor_extra9 = to_tensor(to_np(J_regressor_extra9), dtype=dtype)\n self.register_buffer('J_regressor_extra9', J_regressor_extra9)\n else:\n self.register_buffer('J_regressor_extra9', None)\n\n if J_reg_h36m17_path is not None:\n H36M_TO_J17 = [6, 5, 4, 1, 2, 3, 16, 15, 14, 11, 12, 13, 8, 10, 0, 7, 9]\n J_regressor_h36m17 = np.load(J_reg_h36m17_path)[H36M_TO_J17]\n J_regressor_h36m17 = to_tensor(to_np(J_regressor_h36m17), dtype=dtype)\n self.register_buffer('J_regressor_h36m17', J_regressor_h36m17)\n else:\n self.register_buffer('J_regressor_h36m17', None)\n\n # Pose blend shape basis: 6890 x 3 x 207, reshaped to 6890*3 x 207\n num_pose_basis = data_struct.posedirs.shape[-1]\n # 207 x 20670\n posedirs = np.reshape(data_struct.posedirs, [-1, num_pose_basis]).T\n self.register_buffer('posedirs',\n to_tensor(to_np(posedirs), dtype=dtype))\n\n # indices of parents for each joints\n parents = to_tensor(to_np(data_struct.kintree_table[0])).long()\n parents[0] = -1\n self.register_buffer('parents', parents)\n\n self.register_buffer('lbs_weights',\n to_tensor(to_np(data_struct.weights), dtype=dtype))\n\n def create_mean_pose(self, data_struct):\n pass\n\n @torch.no_grad()\n def reset_params(self, **params_dict):\n for param_name, param in self.named_parameters():\n if param_name in params_dict:\n param[:] = torch.tensor(params_dict[param_name])\n else:\n param.fill_(0)\n\n def get_num_verts(self):\n return self.v_template.shape[0]\n\n def get_num_faces(self):\n return self.faces.shape[0]\n\n def extra_repr(self):\n return 'Number of betas: {}'.format(self.NUM_BETAS)\n\n def forward(self, betas=None, body_pose=None, global_orient=None,\n transl=None, return_verts=True, return_full_pose=False,\n **kwargs):\n ''' Forward pass for the SMPL model\n\n Parameters\n ----------\n global_orient: torch.tensor, optional, shape Bx3\n If given, ignore the member variable and use it as the global\n rotation of the body. Useful if someone wishes to predicts this\n with an external model. (default=None)\n betas: torch.tensor, optional, shape Bx10\n If given, ignore the member variable `betas` and use it\n instead. For example, it can used if shape parameters\n `betas` are predicted from some external model.\n (default=None)\n body_pose: torch.tensor, optional, shape Bx(J*3)\n If given, ignore the member variable `body_pose` and use it\n instead. For example, it can used if someone predicts the\n pose of the body joints are predicted from some external model.\n It should be a tensor that contains joint rotations in\n axis-angle format. (default=None)\n transl: torch.tensor, optional, shape Bx3\n If given, ignore the member variable `transl` and use it\n instead. For example, it can used if the translation\n `transl` is predicted from some external model.\n (default=None)\n return_verts: bool, optional\n Return the vertices. (default=True)\n return_full_pose: bool, optional\n Returns the full axis-angle pose vector (default=False)\n\n Returns\n -------\n '''\n betas = betas if betas is not None else self.betas\n\n full_pose = torch.cat([global_orient, body_pose], dim=1)\n\n vertices, joints = lbs(betas, full_pose, self.v_template,\n self.shapedirs, self.posedirs,\n self.J_regressor, self.parents,\n self.lbs_weights, dtype=self.dtype)\n\n joints = self.vertex_joint_selector(vertices, joints)\n joints_smpl24 = joints.clone()\n if self.J_regressor_h36m17 is not None:\n # 54 joints = 45 joints + 9 extra joints from different datasets\n joints_h36m17 = vertices2joints(self.J_regressor_h36m17, vertices)\n # use the middle of hip used in the most 2D pose datasets, not the o-th Pelvis of SMPL 24 joint\n joints_h36m17_pelvis = joints_h36m17[:,14].unsqueeze(1)\n joints_h36m17 = joints_h36m17 - joints_h36m17_pelvis\n\n if self.J_regressor_extra9 is not None:\n # 54 joints = 45 joints + 9 extra joints from different datasets\n joints = torch.cat([joints, vertices2joints(self.J_regressor_extra9, vertices)],1)\n # use the Pelvis of most 2D image, not the original Pelvis\n root_trans = joints[:,49].unsqueeze(1)\n if args().model_version!=1 or args().backbone!='hrnet':\n joints = joints - root_trans\n vertices = vertices - root_trans\n\n output = ModelOutput(vertices=vertices,\n global_orient=global_orient,\n body_pose=body_pose,\n joints=joints,\n joints_h36m17=joints_h36m17,\n joints_smpl24=joints_smpl24,\n betas=betas,\n full_pose=full_pose)\n\n return output\n\nVERTEX_IDS = {\n 'smplh': {\n 'nose': 332,\n 'reye': 6260,\n 'leye': 2800,\n 'rear': 4071,\n 'lear': 583,\n 'rthumb': 6191,\n 'rindex': 5782,\n 'rmiddle': 5905,\n 'rring': 6016,\n 'rpinky': 6133,\n 'lthumb': 2746,\n 'lindex': 2319,\n 'lmiddle': 2445,\n 'lring': 2556,\n 'lpinky': 2673,\n 'LBigToe': 3216,\n 'LSmallToe': 3226,\n 'LHeel': 3387,\n 'RBigToe': 6617,\n 'RSmallToe': 6624,\n 'RHeel': 6787\n },\n 'smplx': {\n 'nose': 9120,\n 'reye': 9929,\n 'leye': 9448,\n 'rear': 616,\n 'lear': 6,\n 'rthumb': 8079,\n 'rindex': 7669,\n 'rmiddle': 7794,\n 'rring': 7905,\n 'rpinky': 8022,\n 'lthumb': 5361,\n 'lindex': 4933,\n 'lmiddle': 5058,\n 'lring': 5169,\n 'lpinky': 5286,\n 'LBigToe': 5770,\n 'LSmallToe': 5780,\n 'LHeel': 8846,\n 'RBigToe': 8463,\n 'RSmallToe': 8474,\n 'RHeel': 8635\n }\n}\n\ndef vertices2landmarks(vertices, faces, lmk_faces_idx, lmk_bary_coords):\n ''' Calculates landmarks by barycentric interpolation\n\n Parameters\n ----------\n vertices: torch.tensor BxVx3, dtype = torch.float32\n The tensor of input vertices\n faces: torch.tensor Fx3, dtype = torch.long\n The faces of the mesh\n lmk_faces_idx: torch.tensor L, dtype = torch.long\n The tensor with the indices of the faces used to calculate the\n landmarks.\n lmk_bary_coords: torch.tensor Lx3, dtype = torch.float32\n The tensor of barycentric coordinates that are used to interpolate\n the landmarks\n\n Returns\n -------\n landmarks: torch.tensor BxLx3, dtype = torch.float32\n The coordinates of the landmarks for each mesh in the batch\n '''\n # Extract the indices of the vertices for each face\n # BxLx3\n batch_size, num_verts = vertices.shape[:2]\n device = vertices.device\n\n lmk_faces = torch.index_select(faces, 0, lmk_faces_idx.view(-1)).contiguous().view(\n batch_size, -1, 3)\n\n lmk_faces = lmk_faces + torch.arange(\n batch_size, dtype=torch.long, device=device).view(-1, 1, 1) * num_verts\n\n lmk_vertices = vertices.view(-1, 3).contiguous()[lmk_faces].contiguous().view(\n batch_size, -1, 3, 3)\n\n landmarks = torch.einsum('blfi,blf->bli', [lmk_vertices, lmk_bary_coords])\n return landmarks\n\n\ndef lbs(betas, pose, v_template, shapedirs, posedirs, J_regressor, parents,\n lbs_weights, pose2rot=True, dtype=torch.float32):\n ''' Performs Linear Blend Skinning with the given shape and pose parameters\n\n Parameters\n ----------\n betas : torch.tensor BxNB\n The tensor of shape parameters\n pose : torch.tensor Bx(J + 1) * 3\n The pose parameters in axis-angle format\n v_template torch.tensor BxVx3\n The template mesh that will be deformed\n shapedirs : torch.tensor 1xNB\n The tensor of PCA shape displacements\n posedirs : torch.tensor Px(V * 3)\n The pose PCA coefficients\n J_regressor : torch.tensor JxV\n The regressor array that is used to calculate the joints from\n the position of the vertices\n parents: torch.tensor J\n The array that describes the kinematic tree for the model\n lbs_weights: torch.tensor N x V x (J + 1)\n The linear blend skinning weights that represent how much the\n rotation matrix of each part affects each vertex\n pose2rot: bool, optional\n Flag on whether to convert the input pose tensor to rotation\n matrices. The default value is True. If False, then the pose tensor\n should already contain rotation matrices and have a size of\n Bx(J + 1)x9\n dtype: torch.dtype, optional\n\n Returns\n -------\n verts: torch.tensor BxVx3\n The vertices of the mesh after applying the shape and pose\n displacements.\n joints: torch.tensor BxJx3\n The joints of the model\n '''\n\n batch_size = max(betas.shape[0], pose.shape[0])\n device = betas.device\n\n # Add shape contribution\n v_shaped = v_template + blend_shapes(betas, shapedirs)\n\n # Get the joints\n # NxJx3 array\n J = vertices2joints(J_regressor, v_shaped)\n\n # 3. Add pose blend shapes\n # N x J x 3 x 3\n ident = torch.eye(3, dtype=dtype, device=device)\n if pose2rot:\n rot_mats = batch_rodrigues(\n pose.view(-1, 3), dtype=dtype).view([batch_size, -1, 3, 3])\n\n pose_feature = (rot_mats[:, 1:, :, :] - ident).view([batch_size, -1])\n # (N x P) x (P, V * 3) -> N x V x 3\n pose_offsets = torch.matmul(pose_feature, posedirs) \\\n .view(batch_size, -1, 3)\n else:\n pose_feature = pose[:, 1:].view(batch_size, -1, 3, 3) - ident\n rot_mats = pose.view(batch_size, -1, 3, 3)\n\n pose_offsets = torch.matmul(pose_feature.view(batch_size, -1),\n posedirs).view(batch_size, -1, 3)\n\n v_posed = pose_offsets + v_shaped\n # 4. Get the global joint location\n J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype)\n\n # 5. Do skinning:\n # W is N x V x (J + 1)\n W = lbs_weights.unsqueeze(dim=0).expand([batch_size, -1, -1])\n # (N x V x (J + 1)) x (N x (J + 1) x 16)\n num_joints = J_regressor.shape[0]\n T = torch.matmul(W, A.view(batch_size, num_joints, 16)) \\\n .view(batch_size, -1, 4, 4)\n\n homogen_coord = torch.ones([batch_size, v_posed.shape[1], 1],\n dtype=dtype, device=device)\n v_posed_homo = torch.cat([v_posed, homogen_coord], dim=2)\n v_homo = torch.matmul(T, torch.unsqueeze(v_posed_homo, dim=-1))\n\n verts = v_homo[:, :, :3, 0]\n\n return verts, J_transformed\n\ndef to_tensor(array, dtype=torch.float32):\n if 'torch.tensor' not in str(type(array)):\n return torch.tensor(array, dtype=dtype)\n\n\nclass Struct(object):\n def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)\n\n\ndef to_np(array, dtype=np.float32):\n if 'scipy.sparse' in str(type(array)):\n array = array.todense()\n return np.array(array, dtype=dtype)\n\n\ndef rot_mat_to_euler(rot_mats):\n # Calculates rotation matrix to euler angles\n # Careful for extreme cases of eular angles like [0.0, pi, 0.0]\n\n sy = torch.sqrt(rot_mats[:, 0, 0] * rot_mats[:, 0, 0] +\n rot_mats[:, 1, 0] * rot_mats[:, 1, 0])\n return torch.atan2(-rot_mats[:, 2, 0], sy)\n\ndef vertices2joints(J_regressor, vertices):\n ''' Calculates the 3D joint locations from the vertices\n\n Parameters\n ----------\n J_regressor : torch.tensor JxV\n The regressor array that is used to calculate the joints from the\n position of the vertices\n vertices : torch.tensor BxVx3\n The tensor of mesh vertices\n\n Returns\n -------\n torch.tensor BxJx3\n The location of the joints\n '''\n\n return torch.einsum('bik,ji->bjk', [vertices, J_regressor])\n\n\ndef blend_shapes(betas, shape_disps):\n ''' Calculates the per vertex displacement due to the blend shapes\n\n\n Parameters\n ----------\n betas : torch.tensor Bx(num_betas)\n Blend shape coefficients\n shape_disps: torch.tensor Vx3x(num_betas)\n Blend shapes\n\n Returns\n -------\n torch.tensor BxVx3\n The per-vertex displacement due to shape deformation\n '''\n\n # Displacement[b, m, k] = sum_{l} betas[b, l] * shape_disps[m, k, l]\n # i.e. Multiply each shape displacement by its corresponding beta and\n # then sum them.\n blend_shape = torch.einsum('bl,mkl->bmk', [betas, shape_disps])\n return blend_shape\n\n\ndef batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):\n ''' Calculates the rotation matrices for a batch of rotation vectors\n Parameters\n ----------\n rot_vecs: torch.tensor Nx3\n array of N axis-angle vectors\n Returns\n -------\n R: torch.tensor Nx3x3\n The rotation matrices for the given axis-angle parameters\n '''\n\n batch_size = rot_vecs.shape[0]\n device = rot_vecs.device\n\n angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True)\n rot_dir = rot_vecs / angle\n\n cos = torch.unsqueeze(torch.cos(angle), dim=1)\n sin = torch.unsqueeze(torch.sin(angle), dim=1)\n\n # Bx1 arrays\n rx, ry, rz = torch.split(rot_dir, 1, dim=1)\n K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)\n\n zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device)\n K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \\\n .view((batch_size, 3, 3))\n\n ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0)\n rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K)\n return rot_mat\n\n\ndef transform_mat(R, t):\n ''' Creates a batch of transformation matrices\n Args:\n - R: Bx3x3 array of a batch of rotation matrices\n - t: Bx3x1 array of a batch of translation vectors\n Returns:\n - T: Bx4x4 Transformation matrix\n '''\n # No padding left or right, only add an extra row\n return torch.cat([F.pad(R, [0, 0, 0, 1]),\n F.pad(t, [0, 0, 0, 1], value=1)], dim=2)\n\n\ndef batch_rigid_transform(rot_mats, joints, parents, dtype=torch.float32):\n \"\"\"\n Applies a batch of rigid transformations to the joints\n\n Parameters\n ----------\n rot_mats : torch.tensor BxNx3x3\n Tensor of rotation matrices\n joints : torch.tensor BxNx3\n Locations of joints\n parents : torch.tensor BxN\n The kinematic tree of each object\n dtype : torch.dtype, optional:\n The data type of the created tensors, the default is torch.float32\n\n Returns\n -------\n posed_joints : torch.tensor BxNx3\n The locations of the joints after applying the pose rotations\n rel_transforms : torch.tensor BxNx4x4\n The relative (with respect to the root joint) rigid transformations\n for all the joints\n \"\"\"\n\n joints = torch.unsqueeze(joints, dim=-1)\n\n rel_joints = joints.clone()\n rel_joints[:, 1:] -= joints[:, parents[1:]]\n\n #print(rot_mats.shape, rel_joints.shape,)\n transforms_mat = transform_mat(\n rot_mats.contiguous().view(-1, 3, 3),\n rel_joints.contiguous().view(-1, 3, 1)).contiguous().view(-1, joints.shape[1], 4, 4)\n\n transform_chain = [transforms_mat[:, 0]]\n for i in range(1, parents.shape[0]):\n # Subtract the joint location at the rest pose\n # No need for rotation, since it's identity when at rest\n curr_res = torch.matmul(transform_chain[parents[i]],\n transforms_mat[:, i])\n transform_chain.append(curr_res)\n\n transforms = torch.stack(transform_chain, dim=1)\n\n # The last column of the transformations contains the posed joints\n posed_joints = transforms[:, :, :3, 3]\n\n # The last column of the transformations contains the posed joints\n posed_joints = transforms[:, :, :3, 3]\n\n joints_homogen = F.pad(joints, [0, 0, 0, 1])\n\n rel_transforms = transforms - F.pad(\n torch.matmul(transforms, joints_homogen), [3, 0, 0, 0, 0, 0, 0, 0])\n\n return posed_joints, rel_transforms\n\ndef main():\n pass\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.cat", "torch.stack", "torch.einsum", "numpy.load", "torch.bmm", "torch.ones", "torch.nn.Parameter", "torch.eye", "torch.nn.functional.pad", "numpy.concatenate", "torch.sqrt", "torch.norm", "torch.unsqueeze", "torch.tensor", "torch.index_select", "torch.zeros", "torch.cos", "numpy.array", "numpy.reshape", "torch.matmul", "torch.sin", "torch.arange", "torch.no_grad", "torch.split", "torch.atan2" ] ]
qyuga/alpaca-trade-api-python
[ "495d494610f6f852a1c151548934c8209e7b1f86" ]
[ "alpaca_trade_api/entity.py" ]
[ "import pandas as pd\nimport pprint\nimport re\n\nISO8601YMD = re.compile(r'\\d{4}-\\d{2}-\\d{2}T')\nNY = 'America/New_York'\n\n\nclass Entity(object):\n '''This helper class provides property access (the \"dot notation\")\n to the json object, backed by the original object stored in the _raw\n field.\n '''\n\n def __init__(self, raw):\n self._raw = raw\n\n def __getattr__(self, key):\n if key in self._raw:\n val = self._raw[key]\n if (isinstance(val, str) and\n (key.endswith('_at') or\n key.endswith('_timestamp') or\n key.endswith('_time')) and\n ISO8601YMD.match(val)):\n return pd.Timestamp(val)\n else:\n return val\n return super().__getattribute__(key)\n\n def __repr__(self):\n return '{name}({raw})'.format(\n name=self.__class__.__name__,\n raw=pprint.pformat(self._raw, indent=4),\n )\n\n\nclass Account(Entity):\n pass\n\n\nclass Asset(Entity):\n pass\n\n\nclass Order(Entity):\n pass\n\n\nclass Position(Entity):\n pass\n\n\nclass Bar(Entity):\n def __getattr__(self, key):\n if key == 't':\n val = self._raw[key[0]]\n return pd.Timestamp(val, unit='s', tz=NY)\n return super().__getattr__(key)\n\n\nclass Bars(list):\n def __init__(self, raw):\n super().__init__([Bar(o) for o in raw])\n self._raw = raw\n\n @property\n def df(self):\n if not hasattr(self, '_df'):\n df = pd.DataFrame(\n self._raw, columns=('t', 'o', 'h', 'l', 'c', 'v'),\n )\n alias = {\n 't': 'time',\n 'o': 'open',\n 'h': 'high',\n 'l': 'low',\n 'c': 'close',\n 'v': 'volume',\n }\n df.columns = [alias[c] for c in df.columns]\n df.set_index('time', inplace=True)\n df.index = pd.to_datetime(\n df.index * 1e9, utc=True,\n ).tz_convert(NY)\n self._df = df\n return self._df\n\n\nclass BarSet(dict):\n def __init__(self, raw):\n for symbol in raw:\n self[symbol] = Bars(raw[symbol])\n self._raw = raw\n\n @property\n def df(self):\n '''## Experimental '''\n if not hasattr(self, '_df'):\n dfs = []\n for symbol, bars in self.items():\n df = bars.df.copy()\n df.columns = pd.MultiIndex.from_product(\n [[symbol, ], df.columns])\n dfs.append(df)\n if len(dfs) == 0:\n self._df = pd.DataFrame()\n else:\n self._df = pd.concat(dfs, axis=1)\n return self._df\n\n\nclass Clock(Entity):\n def __getattr__(self, key):\n if key in self._raw:\n val = self._raw[key]\n if key in ('timestamp', 'next_open', 'next_close'):\n return pd.Timestamp(val)\n else:\n return val\n return super().__getattr__(key)\n\n\nclass Calendar(Entity):\n def __getattr__(self, key):\n if key in self._raw:\n val = self._raw[key]\n if key in ('date',):\n return pd.Timestamp(val)\n elif key in ('open', 'close'):\n return pd.Timestamp(val).time()\n else:\n return val\n return super().__getattr__(key)\n" ]
[ [ "pandas.to_datetime", "pandas.DataFrame", "pandas.Timestamp", "pandas.MultiIndex.from_product", "pandas.concat" ] ]
armando-fandango/tensorflow-addons
[ "a7065e7297022f9086cdabcfe4798ee1691daedf" ]
[ "tensorflow_addons/seq2seq/attention_wrapper.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A powerful dynamic attention wrapper object.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport functools\nimport math\n\nimport numpy as np\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.keras import initializers\nfrom tensorflow.python.keras import layers\nfrom tensorflow.python.keras.engine import base_layer_utils\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import clip_ops\nfrom tensorflow.python.ops import functional_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import rnn_cell_impl\nfrom tensorflow.python.ops import tensor_array_ops\nfrom tensorflow.python.util import nest\n\n_zero_state_tensors = rnn_cell_impl._zero_state_tensors # pylint: disable=protected-access\n\n\nclass AttentionMechanism(object):\n @property\n def alignments_size(self):\n raise NotImplementedError\n\n @property\n def state_size(self):\n raise NotImplementedError\n\n\nclass _BaseAttentionMechanism(AttentionMechanism, layers.Layer):\n \"\"\"A base AttentionMechanism class providing common functionality.\n\n Common functionality includes:\n 1. Storing the query and memory layers.\n 2. Preprocessing and storing the memory.\n\n Note that this layer takes memory as its init parameter, which is an\n anti-pattern of Keras API, we have to keep the memory as init parameter for\n performance and dependency reason. Under the hood, during `__init__()`, it\n will invoke `base_layer.__call__(memory, setup_memory=True)`. This will let\n keras to keep track of the memory tensor as the input of this layer. Once\n the `__init__()` is done, then user can query the attention by\n `score = att_obj([query, state])`, and use it as a normal keras layer.\n\n Special attention is needed when adding using this class as the base layer\n for new attention:\n 1. Build() could be invoked at least twice. So please make sure weights\n are not duplicated.\n 2. Layer.get_weights() might return different set of weights if the\n instance has `query_layer`. The query_layer weights is not initialized\n until the memory is configured.\n\n Also note that this layer does not work with Keras model when\n `model.compile(run_eagerly=True)` due to the fact that this layer is\n stateful. The support for that will be added in a future version.\n \"\"\"\n\n def __init__(self,\n memory,\n probability_fn,\n query_layer=None,\n memory_layer=None,\n memory_sequence_length=None,\n **kwargs):\n \"\"\"Construct base AttentionMechanism class.\n\n Args:\n memory: The memory to query; usually the output of an RNN encoder.\n This tensor should be shaped `[batch_size, max_time, ...]`.\n probability_fn: A `callable`. Converts the score and previous\n alignments to probabilities. Its signature should be:\n `probabilities = probability_fn(score, state)`.\n query_layer: (optional): Instance of `tf.keras.Layer`. The layer's\n depth must match the depth of `memory_layer`. If `query_layer` is\n not provided, the shape of `query` must match that of\n `memory_layer`.\n memory_layer: (optional): Instance of `tf.keras.Layer`. The layer's\n depth must match the depth of `query_layer`.\n If `memory_layer` is not provided, the shape of `memory` must match\n that of `query_layer`.\n memory_sequence_length (optional): Sequence lengths for the batch\n entries in memory. If provided, the memory tensor rows are masked\n with zeros for values past the respective sequence lengths.\n **kwargs: Dictionary that contains other common arguments for layer\n creation.\n \"\"\"\n if (query_layer is not None\n and not isinstance(query_layer, layers.Layer)):\n raise TypeError(\n \"query_layer is not a Layer: %s\" % type(query_layer).__name__)\n if (memory_layer is not None\n and not isinstance(memory_layer, layers.Layer)):\n raise TypeError(\"memory_layer is not a Layer: %s\" %\n type(memory_layer).__name__)\n self.query_layer = query_layer\n self.memory_layer = memory_layer\n if self.memory_layer is not None and \"dtype\" not in kwargs:\n kwargs[\"dtype\"] = self.memory_layer.dtype\n super(_BaseAttentionMechanism, self).__init__(**kwargs)\n if not callable(probability_fn):\n raise TypeError(\"probability_fn must be callable, saw type: %s\" %\n type(probability_fn).__name__)\n self.probability_fn = probability_fn\n\n self.keys = None\n self.values = None\n self.batch_size = None\n self._memory_initialized = False\n self._check_inner_dims_defined = True\n self.supports_masking = True\n self.score_mask_value = dtypes.as_dtype(\n self.dtype).as_numpy_dtype(-np.inf)\n\n if memory is not None:\n # Setup the memory by self.__call__() with memory and\n # memory_seq_length. This will make the attention follow the keras\n # convention which takes all the tensor inputs via __call__().\n if memory_sequence_length is None:\n inputs = memory\n else:\n inputs = [memory, memory_sequence_length]\n\n self.values = super(_BaseAttentionMechanism, self).__call__(\n inputs, setup_memory=True)\n\n def build(self, input_shape):\n if not self._memory_initialized:\n # This is for setting up the memory, which contains memory and\n # optional memory_sequence_length. Build the memory_layer with\n # memory shape.\n if self.memory_layer is not None and not self.memory_layer.built:\n if isinstance(input_shape, list):\n self.memory_layer.build(input_shape[0])\n else:\n self.memory_layer.build(input_shape)\n else:\n # The input_shape should be query.shape and state.shape. Use the\n # query to init the query layer.\n if self.query_layer is not None and not self.query_layer.built:\n self.query_layer.build(input_shape[0])\n\n def __call__(self, inputs, **kwargs):\n \"\"\"Preprocess the inputs before calling `base_layer.__call__()`.\n\n Note that there are situation here, one for setup memory, and one with\n actual query and state.\n 1. When the memory has not been configured, we just pass all the param\n to base_layer.__call__(), which will then invoke self.call() with\n proper inputs, which allows this class to setup memory.\n 2. When the memory has already been setup, the input should contain\n query and state, and optionally processed memory. If the processed\n memory is not included in the input, we will have to append it to\n the inputs and give it to the base_layer.__call__(). The processed\n memory is the output of first invocation of self.__call__(). If we\n don't add it here, then from keras perspective, the graph is\n disconnected since the output from previous call is never used.\n\n Args:\n inputs: the inputs tensors.\n **kwargs: dict, other keyeword arguments for the `__call__()`\n \"\"\"\n if self._memory_initialized:\n if len(inputs) not in (2, 3):\n raise ValueError(\n \"Expect the inputs to have 2 or 3 tensors, got %d\" %\n len(inputs))\n if len(inputs) == 2:\n # We append the calculated memory here so that the graph will be\n # connected.\n inputs.append(self.values)\n return super(_BaseAttentionMechanism, self).__call__(inputs, **kwargs)\n\n def call(self, inputs, mask=None, setup_memory=False, **kwargs):\n \"\"\"Setup the memory or query the attention.\n\n There are two case here, one for setup memory, and the second is query\n the attention score. `setup_memory` is the flag to indicate which mode\n it is. The input list will be treated differently based on that flag.\n\n Args:\n inputs: a list of tensor that could either be `query` and `state`, or\n `memory` and `memory_sequence_length`.\n `query` is the tensor of dtype matching `memory` and shape\n `[batch_size, query_depth]`.\n `state` is the tensor of dtype matching `memory` and shape\n `[batch_size, alignments_size]`. (`alignments_size` is memory's\n `max_time`).\n `memory` is the memory to query; usually the output of an RNN\n encoder. The tensor should be shaped `[batch_size, max_time, ...]`.\n `memory_sequence_length` (optional) is the sequence lengths for the\n batch entries in memory. If provided, the memory tensor rows are\n masked with zeros for values past the respective sequence lengths.\n mask: optional bool tensor with shape `[batch, max_time]` for the\n mask of memory. If it is not None, the corresponding item of the\n memory should be filtered out during calculation.\n setup_memory: boolean, whether the input is for setting up memory, or\n query attention.\n **kwargs: Dict, other keyword arguments for the call method.\n Returns:\n Either processed memory or attention score, based on `setup_memory`.\n \"\"\"\n if setup_memory:\n if isinstance(inputs, list):\n if len(inputs) not in (1, 2):\n raise ValueError(\n \"Expect inputs to have 1 or 2 tensors, got %d\" %\n len(inputs))\n memory = inputs[0]\n memory_sequence_length = inputs[1] if len(\n inputs) == 2 else None\n memory_mask = mask\n else:\n memory, memory_sequence_length = inputs, None\n memory_mask = mask\n self._setup_memory(memory, memory_sequence_length, memory_mask)\n # We force the self.built to false here since only memory is,\n # initialized but the real query/state has not been call() yet. The\n # layer should be build and call again.\n self.built = False\n # Return the processed memory in order to create the Keras\n # connectivity data for it.\n return self.values\n else:\n if not self._memory_initialized:\n raise ValueError(\n \"Cannot query the attention before the setup of \"\n \"memory\")\n if len(inputs) not in (2, 3):\n raise ValueError(\n \"Expect the inputs to have query, state, and optional \"\n \"processed memory, got %d items\" % len(inputs))\n # Ignore the rest of the inputs and only care about the query and\n # state\n query, state = inputs[0], inputs[1]\n return self._calculate_attention(query, state)\n\n def _setup_memory(self,\n memory,\n memory_sequence_length=None,\n memory_mask=None):\n \"\"\"Pre-process the memory before actually query the memory.\n\n This should only be called once at the first invocation of call().\n\n Args:\n memory: The memory to query; usually the output of an RNN encoder.\n This tensor should be shaped `[batch_size, max_time, ...]`.\n memory_sequence_length (optional): Sequence lengths for the batch\n entries in memory. If provided, the memory tensor rows are masked\n with zeros for values past the respective sequence lengths.\n memory_mask: (Optional) The boolean tensor with shape `[batch_size,\n max_time]`. For any value equal to False, the corresponding value\n in memory should be ignored.\n \"\"\"\n if self._memory_initialized:\n raise ValueError(\n \"The memory for the attention has already been setup.\")\n if memory_sequence_length is not None and memory_mask is not None:\n raise ValueError(\n \"memory_sequence_length and memory_mask cannot be \"\n \"used at same time for attention.\")\n with ops.name_scope(self.name, \"BaseAttentionMechanismInit\",\n nest.flatten(memory)):\n self.values = _prepare_memory(\n memory,\n memory_sequence_length=memory_sequence_length,\n memory_mask=memory_mask,\n check_inner_dims_defined=self._check_inner_dims_defined)\n # Mark the value as check since the memory and memory mask might not\n # passed from __call__(), which does not have proper keras metadata.\n # TODO(omalleyt12): Remove this hack once the mask the has proper\n # keras history.\n base_layer_utils.mark_checked(self.values)\n if self.memory_layer is not None:\n self.keys = self.memory_layer(self.values)\n else:\n self.keys = self.values\n self.batch_size = (tensor_shape.dimension_value(self.keys.shape[0])\n or array_ops.shape(self.keys)[0])\n self._alignments_size = (tensor_shape.dimension_value(\n self.keys.shape[1]) or array_ops.shape(self.keys)[1])\n if memory_mask is not None or memory_sequence_length is not None:\n unwrapped_probability_fn = self.probability_fn\n\n def _mask_probability_fn(score, prev):\n return unwrapped_probability_fn(\n _maybe_mask_score(\n score,\n memory_mask=memory_mask,\n memory_sequence_length=memory_sequence_length,\n score_mask_value=self.score_mask_value), prev)\n\n self.probability_fn = _mask_probability_fn\n self._memory_initialized = True\n\n def _calculate_attention(self, query, state):\n raise NotImplementedError(\n \"_calculate_attention need to be implemented by subclasses.\")\n\n def compute_mask(self, inputs, mask=None):\n # There real input of the attention is query and state, and the memory\n # layer mask shouldn't be pass down. Returning None for all output mask\n # here.\n return None, None\n\n def get_config(self):\n config = {}\n # Since the probability_fn is likely to be a wrapped function, the child\n # class should preserve the original function and how its wrapped.\n\n if self.query_layer is not None:\n config[\"query_layer\"] = {\n \"class_name\": self.query_layer.__class__.__name__,\n \"config\": self.query_layer.get_config(),\n }\n if self.memory_layer is not None:\n config[\"memory_layer\"] = {\n \"class_name\": self.memory_layer.__class__.__name__,\n \"config\": self.memory_layer.get_config(),\n }\n # memory is a required init parameter and its a tensor. It cannot be\n # serialized to config, so we put a placeholder for it.\n config[\"memory\"] = None\n base_config = super(_BaseAttentionMechanism, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def _process_probability_fn(self, func_name):\n \"\"\"Helper method to retrieve the probably function by string input.\"\"\"\n valid_probability_fns = {\n \"softmax\": nn_ops.softmax,\n \"hardmax\": hardmax,\n }\n if func_name not in valid_probability_fns.keys():\n raise ValueError(\"Invalid probability function: %s, options are %s\"\n % (func_name, valid_probability_fns.keys()))\n return valid_probability_fns[func_name]\n\n @classmethod\n def deserialize_inner_layer_from_config(cls, config, custom_objects):\n \"\"\"Helper method that reconstruct the query and memory from the config.\n\n In the get_config() method, the query and memory layer configs are\n serialized into dict for persistence, this method perform the reverse\n action to reconstruct the layer from the config.\n\n Args:\n config: dict, the configs that will be used to reconstruct the\n object.\n custom_objects: dict mapping class names (or function names) of\n custom (non-Keras) objects to class/functions.\n Returns:\n config: dict, the config with layer instance created, which is ready\n to be used as init parameters.\n \"\"\"\n # Reconstruct the query and memory layer for parent class.\n from tensorflow.python.keras.layers import deserialize as deserialize_layer\n # Instead of updating the input, create a copy and use that.\n config = config.copy()\n query_layer_config = config.pop(\"query_layer\", None)\n if query_layer_config:\n query_layer = deserialize_layer(\n query_layer_config, custom_objects=custom_objects)\n config[\"query_layer\"] = query_layer\n memory_layer_config = config.pop(\"memory_layer\", None)\n if memory_layer_config:\n memory_layer = deserialize_layer(\n memory_layer_config, custom_objects=custom_objects)\n config[\"memory_layer\"] = memory_layer\n return config\n\n @property\n def alignments_size(self):\n return self._alignments_size\n\n @property\n def state_size(self):\n return self._alignments_size\n\n def initial_alignments(self, batch_size, dtype):\n \"\"\"Creates the initial alignment values for the `AttentionWrapper`\n class.\n\n This is important for AttentionMechanisms that use the previous\n alignment to calculate the alignment at the next time step\n (e.g. monotonic attention).\n\n The default behavior is to return a tensor of all zeros.\n\n Args:\n batch_size: `int32` scalar, the batch_size.\n dtype: The `dtype`.\n\n Returns:\n A `dtype` tensor shaped `[batch_size, alignments_size]`\n (`alignments_size` is the values' `max_time`).\n \"\"\"\n max_time = self._alignments_size\n return _zero_state_tensors(max_time, batch_size, dtype)\n\n def initial_state(self, batch_size, dtype):\n \"\"\"Creates the initial state values for the `AttentionWrapper` class.\n\n This is important for AttentionMechanisms that use the previous\n alignment to calculate the alignment at the next time step\n (e.g. monotonic attention).\n\n The default behavior is to return the same output as\n initial_alignments.\n\n Args:\n batch_size: `int32` scalar, the batch_size.\n dtype: The `dtype`.\n\n Returns:\n A structure of all-zero tensors with shapes as described by\n `state_size`.\n \"\"\"\n return self.initial_alignments(batch_size, dtype)\n\n\ndef _luong_score(query, keys, scale):\n \"\"\"Implements Luong-style (multiplicative) scoring function.\n\n This attention has two forms. The first is standard Luong attention,\n as described in:\n\n Minh-Thang Luong, Hieu Pham, Christopher D. Manning.\n \"Effective Approaches to Attention-based Neural Machine Translation.\"\n EMNLP 2015. https://arxiv.org/abs/1508.04025\n\n The second is the scaled form inspired partly by the normalized form of\n Bahdanau attention.\n\n To enable the second form, call this function with `scale=True`.\n\n Args:\n query: Tensor, shape `[batch_size, num_units]` to compare to keys.\n keys: Processed memory, shape `[batch_size, max_time, num_units]`.\n scale: the optional tensor to scale the attention score.\n\n Returns:\n A `[batch_size, max_time]` tensor of unnormalized score values.\n\n Raises:\n ValueError: If `key` and `query` depths do not match.\n \"\"\"\n depth = query.get_shape()[-1]\n key_units = keys.get_shape()[-1]\n if depth != key_units:\n raise ValueError(\n \"Incompatible or unknown inner dimensions between query and keys. \"\n \"Query (%s) has units: %s. Keys (%s) have units: %s. \"\n \"Perhaps you need to set num_units to the keys' dimension (%s)?\" %\n (query, depth, keys, key_units, key_units))\n\n # Reshape from [batch_size, depth] to [batch_size, 1, depth]\n # for matmul.\n query = array_ops.expand_dims(query, 1)\n\n # Inner product along the query units dimension.\n # matmul shapes: query is [batch_size, 1, depth] and\n # keys is [batch_size, max_time, depth].\n # the inner product is asked to **transpose keys' inner shape** to get a\n # batched matmul on:\n # [batch_size, 1, depth] . [batch_size, depth, max_time]\n # resulting in an output shape of:\n # [batch_size, 1, max_time].\n # we then squeeze out the center singleton dimension.\n score = math_ops.matmul(query, keys, transpose_b=True)\n score = array_ops.squeeze(score, [1])\n\n if scale is not None:\n score = scale * score\n return score\n\n\nclass LuongAttention(_BaseAttentionMechanism):\n \"\"\"Implements Luong-style (multiplicative) attention scoring.\n\n This attention has two forms. The first is standard Luong attention,\n as described in:\n\n Minh-Thang Luong, Hieu Pham, Christopher D. Manning.\n [Effective Approaches to Attention-based Neural Machine Translation.\n EMNLP 2015.](https://arxiv.org/abs/1508.04025)\n\n The second is the scaled form inspired partly by the normalized form of\n Bahdanau attention.\n\n To enable the second form, construct the object with parameter\n `scale=True`.\n \"\"\"\n\n def __init__(self,\n units,\n memory,\n memory_sequence_length=None,\n scale=False,\n probability_fn=\"softmax\",\n dtype=None,\n name=\"LuongAttention\",\n **kwargs):\n \"\"\"Construct the AttentionMechanism mechanism.\n\n Args:\n units: The depth of the attention mechanism.\n memory: The memory to query; usually the output of an RNN encoder.\n This tensor should be shaped `[batch_size, max_time, ...]`.\n memory_sequence_length: (optional): Sequence lengths for the batch\n entries in memory. If provided, the memory tensor rows are masked\n with zeros for values past the respective sequence lengths.\n scale: Python boolean. Whether to scale the energy term.\n probability_fn: (optional) string, the name of function to convert\n the attention score to probabilities. The default is `softmax`\n which is `tf.nn.softmax`. Other options is `hardmax`, which is\n hardmax() within this module. Any other value will result\n intovalidation error. Default to use `softmax`.\n dtype: The data type for the memory layer of the attention mechanism.\n name: Name to use when creating ops.\n **kwargs: Dictionary that contains other common arguments for layer\n creation.\n \"\"\"\n # For LuongAttention, we only transform the memory layer; thus\n # num_units **must** match expected the query depth.\n self.probability_fn_name = probability_fn\n probability_fn = self._process_probability_fn(self.probability_fn_name)\n wrapped_probability_fn = lambda score, _: probability_fn(score)\n if dtype is None:\n dtype = dtypes.float32\n memory_layer = kwargs.pop(\"memory_layer\", None)\n if not memory_layer:\n memory_layer = layers.Dense(\n units, name=\"memory_layer\", use_bias=False, dtype=dtype)\n self.units = units\n self.scale = scale\n self.scale_weight = None\n super(LuongAttention, self).__init__(\n memory=memory,\n memory_sequence_length=memory_sequence_length,\n query_layer=None,\n memory_layer=memory_layer,\n probability_fn=wrapped_probability_fn,\n name=name,\n dtype=dtype,\n **kwargs)\n\n def build(self, input_shape):\n super(LuongAttention, self).build(input_shape)\n if self.scale and self.scale_weight is None:\n self.scale_weight = self.add_weight(\n \"attention_g\", initializer=init_ops.ones_initializer, shape=())\n self.built = True\n\n def _calculate_attention(self, query, state):\n \"\"\"Score the query based on the keys and values.\n\n Args:\n query: Tensor of dtype matching `self.values` and shape\n `[batch_size, query_depth]`.\n state: Tensor of dtype matching `self.values` and shape\n `[batch_size, alignments_size]`\n (`alignments_size` is memory's `max_time`).\n\n Returns:\n alignments: Tensor of dtype matching `self.values` and shape\n `[batch_size, alignments_size]` (`alignments_size` is memory's\n `max_time`).\n next_state: Same as the alignments.\n \"\"\"\n score = _luong_score(query, self.keys, self.scale_weight)\n alignments = self.probability_fn(score, state)\n next_state = alignments\n return alignments, next_state\n\n def get_config(self):\n config = {\n \"units\": self.units,\n \"scale\": self.scale,\n \"probability_fn\": self.probability_fn_name,\n }\n base_config = super(LuongAttention, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n config = _BaseAttentionMechanism.deserialize_inner_layer_from_config(\n config, custom_objects=custom_objects)\n return cls(**config)\n\n\ndef _bahdanau_score(processed_query,\n keys,\n attention_v,\n attention_g=None,\n attention_b=None):\n \"\"\"Implements Bahdanau-style (additive) scoring function.\n\n This attention has two forms. The first is Bhandanau attention,\n as described in:\n\n Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio.\n \"Neural Machine Translation by Jointly Learning to Align and Translate.\"\n ICLR 2015. https://arxiv.org/abs/1409.0473\n\n The second is the normalized form. This form is inspired by the\n weight normalization article:\n\n Tim Salimans, Diederik P. Kingma.\n \"Weight Normalization: A Simple Reparameterization to Accelerate\n Training of Deep Neural Networks.\"\n https://arxiv.org/abs/1602.07868\n\n To enable the second form, set please pass in attention_g and attention_b.\n\n Args:\n processed_query: Tensor, shape `[batch_size, num_units]` to compare to\n keys.\n keys: Processed memory, shape `[batch_size, max_time, num_units]`.\n attention_v: Tensor, shape `[num_units]`.\n attention_g: Optional scalar tensor for normalization.\n attention_b: Optional tensor with shape `[num_units]` for normalization.\n\n Returns:\n A `[batch_size, max_time]` tensor of unnormalized score values.\n \"\"\"\n # Reshape from [batch_size, ...] to [batch_size, 1, ...] for broadcasting.\n processed_query = array_ops.expand_dims(processed_query, 1)\n if attention_g is not None and attention_b is not None:\n normed_v = attention_g * attention_v * math_ops.rsqrt(\n math_ops.reduce_sum(math_ops.square(attention_v)))\n return math_ops.reduce_sum(\n normed_v * math_ops.tanh(keys + processed_query + attention_b),\n [2])\n else:\n return math_ops.reduce_sum(\n attention_v * math_ops.tanh(keys + processed_query), [2])\n\n\nclass BahdanauAttention(_BaseAttentionMechanism):\n \"\"\"Implements Bahdanau-style (additive) attention.\n\n This attention has two forms. The first is Bahdanau attention,\n as described in:\n\n Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio.\n \"Neural Machine Translation by Jointly Learning to Align and Translate.\"\n ICLR 2015. https://arxiv.org/abs/1409.0473\n\n The second is the normalized form. This form is inspired by the\n weight normalization article:\n\n Tim Salimans, Diederik P. Kingma.\n \"Weight Normalization: A Simple Reparameterization to Accelerate\n Training of Deep Neural Networks.\"\n https://arxiv.org/abs/1602.07868\n\n To enable the second form, construct the object with parameter\n `normalize=True`.\n \"\"\"\n\n def __init__(self,\n units,\n memory,\n memory_sequence_length=None,\n normalize=False,\n probability_fn=\"softmax\",\n kernel_initializer=\"glorot_uniform\",\n dtype=None,\n name=\"BahdanauAttention\",\n **kwargs):\n \"\"\"Construct the Attention mechanism.\n\n Args:\n units: The depth of the query mechanism.\n memory: The memory to query; usually the output of an RNN encoder.\n This tensor should be shaped `[batch_size, max_time, ...]`.\n memory_sequence_length: (optional): Sequence lengths for the batch\n entries in memory. If provided, the memory tensor rows are masked\n with zeros for values past the respective sequence lengths.\n normalize: Python boolean. Whether to normalize the energy term.\n probability_fn: (optional) string, the name of function to convert\n the attention score to probabilities. The default is `softmax`\n which is `tf.nn.softmax`. Other options is `hardmax`, which is\n hardmax() within this module. Any other value will result into\n validation error. Default to use `softmax`.\n kernel_initializer: (optional), the name of the initializer for the\n attention kernel.\n dtype: The data type for the query and memory layers of the attention\n mechanism.\n name: Name to use when creating ops.\n **kwargs: Dictionary that contains other common arguments for layer\n creation.\n \"\"\"\n self.probability_fn_name = probability_fn\n probability_fn = self._process_probability_fn(self.probability_fn_name)\n wrapped_probability_fn = lambda score, _: probability_fn(score)\n if dtype is None:\n dtype = dtypes.float32\n query_layer = kwargs.pop(\"query_layer\", None)\n if not query_layer:\n query_layer = layers.Dense(\n units, name=\"query_layer\", use_bias=False, dtype=dtype)\n memory_layer = kwargs.pop(\"memory_layer\", None)\n if not memory_layer:\n memory_layer = layers.Dense(\n units, name=\"memory_layer\", use_bias=False, dtype=dtype)\n self.units = units\n self.normalize = normalize\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.attention_v = None\n self.attention_g = None\n self.attention_b = None\n super(BahdanauAttention, self).__init__(\n memory=memory,\n memory_sequence_length=memory_sequence_length,\n query_layer=query_layer,\n memory_layer=memory_layer,\n probability_fn=wrapped_probability_fn,\n name=name,\n dtype=dtype,\n **kwargs)\n\n def build(self, input_shape):\n super(BahdanauAttention, self).build(input_shape)\n if self.attention_v is None:\n self.attention_v = self.add_weight(\n \"attention_v\", [self.units],\n dtype=self.dtype,\n initializer=self.kernel_initializer)\n if (self.normalize and self.attention_g is None\n and self.attention_b is None):\n self.attention_g = self.add_weight(\n \"attention_g\",\n initializer=init_ops.constant_initializer(\n math.sqrt((1. / self.units))),\n shape=())\n self.attention_b = self.add_weight(\n \"attention_b\",\n shape=[self.units],\n initializer=init_ops.zeros_initializer())\n self.built = True\n\n def _calculate_attention(self, query, state):\n \"\"\"Score the query based on the keys and values.\n\n Args:\n query: Tensor of dtype matching `self.values` and shape\n `[batch_size, query_depth]`.\n state: Tensor of dtype matching `self.values` and shape\n `[batch_size, alignments_size]`\n (`alignments_size` is memory's `max_time`).\n\n Returns:\n alignments: Tensor of dtype matching `self.values` and shape\n `[batch_size, alignments_size]` (`alignments_size` is memory's\n `max_time`).\n next_state: same as alignments.\n \"\"\"\n processed_query = self.query_layer(\n query) if self.query_layer else query\n score = _bahdanau_score(\n processed_query,\n self.keys,\n self.attention_v,\n attention_g=self.attention_g,\n attention_b=self.attention_b)\n alignments = self.probability_fn(score, state)\n next_state = alignments\n return alignments, next_state\n\n def get_config(self):\n config = {\n \"units\": self.units,\n \"normalize\": self.normalize,\n \"probability_fn\": self.probability_fn_name,\n \"kernel_initializer\":\n initializers.serialize(self.kernel_initializer)\n }\n base_config = super(BahdanauAttention, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n config = _BaseAttentionMechanism.deserialize_inner_layer_from_config(\n config, custom_objects=custom_objects)\n return cls(**config)\n\n\ndef safe_cumprod(x, *args, **kwargs):\n \"\"\"Computes cumprod of x in logspace using cumsum to avoid underflow.\n\n The cumprod function and its gradient can result in numerical instabilities\n when its argument has very small and/or zero values. As long as the\n argument is all positive, we can instead compute the cumulative product as\n exp(cumsum(log(x))). This function can be called identically to\n tf.cumprod.\n\n Args:\n x: Tensor to take the cumulative product of.\n *args: Passed on to cumsum; these are identical to those in cumprod.\n **kwargs: Passed on to cumsum; these are identical to those in cumprod.\n Returns:\n Cumulative product of x.\n \"\"\"\n with ops.name_scope(None, \"SafeCumprod\", [x]):\n x = ops.convert_to_tensor(x, name=\"x\")\n tiny = np.finfo(x.dtype.as_numpy_dtype).tiny\n return math_ops.exp(\n math_ops.cumsum(\n math_ops.log(clip_ops.clip_by_value(x, tiny, 1)), *args,\n **kwargs))\n\n\ndef monotonic_attention(p_choose_i, previous_attention, mode):\n \"\"\"Compute monotonic attention distribution from choosing probabilities.\n\n Monotonic attention implies that the input sequence is processed in an\n explicitly left-to-right manner when generating the output sequence. In\n addition, once an input sequence element is attended to at a given output\n timestep, elements occurring before it cannot be attended to at subsequent\n output timesteps. This function generates attention distributions\n according to these assumptions. For more information, see `Online and\n Linear-Time Attention by Enforcing Monotonic Alignments`.\n\n Args:\n p_choose_i: Probability of choosing input sequence/memory element i.\n Should be of shape (batch_size, input_sequence_length), and should all\n be in the range [0, 1].\n previous_attention: The attention distribution from the previous output\n timestep. Should be of shape (batch_size, input_sequence_length). For\n the first output timestep, preevious_attention[n] should be\n [1, 0, 0, ..., 0] for all n in [0, ... batch_size - 1].\n mode: How to compute the attention distribution. Must be one of\n 'recursive', 'parallel', or 'hard'.\n * 'recursive' uses tf.scan to recursively compute the distribution.\n This is slowest but is exact, general, and does not suffer from\n numerical instabilities.\n * 'parallel' uses parallelized cumulative-sum and cumulative-product\n operations to compute a closed-form solution to the recurrence\n relation defining the attention distribution. This makes it more\n efficient than 'recursive', but it requires numerical checks which\n make the distribution non-exact. This can be a problem in\n particular when input_sequence_length is long and/or p_choose_i has\n entries very close to 0 or 1.\n * 'hard' requires that the probabilities in p_choose_i are all either\n 0 or 1, and subsequently uses a more efficient and exact solution.\n\n Returns:\n A tensor of shape (batch_size, input_sequence_length) representing the\n attention distributions for each sequence in the batch.\n\n Raises:\n ValueError: mode is not one of 'recursive', 'parallel', 'hard'.\n \"\"\"\n # Force things to be tensors\n p_choose_i = ops.convert_to_tensor(p_choose_i, name=\"p_choose_i\")\n previous_attention = ops.convert_to_tensor(\n previous_attention, name=\"previous_attention\")\n if mode == \"recursive\":\n # Use .shape[0] when it's not None, or fall back on symbolic shape\n batch_size = tensor_shape.dimension_value(\n p_choose_i.shape[0]) or array_ops.shape(p_choose_i)[0]\n # Compute [1, 1 - p_choose_i[0], 1 - p_choose_i[1], ..., 1 - p_choose_\n # i[-2]]\n shifted_1mp_choose_i = array_ops.concat(\n [array_ops.ones((batch_size, 1)), 1 - p_choose_i[:, :-1]], 1)\n # Compute attention distribution recursively as\n # q[i] = (1 - p_choose_i[i - 1])*q[i - 1] + previous_attention[i]\n # attention[i] = p_choose_i[i]*q[i]\n attention = p_choose_i * array_ops.transpose(\n functional_ops.scan(\n # Need to use reshape to remind TF of the shape between loop\n # iterations\n lambda x, yz: array_ops.reshape(yz[0] * x + yz[1],\n (batch_size,)),\n # Loop variables yz[0] and yz[1]\n [\n array_ops.transpose(shifted_1mp_choose_i),\n array_ops.transpose(previous_attention)\n ],\n # Initial value of x is just zeros\n array_ops.zeros((batch_size,))))\n elif mode == \"parallel\":\n # safe_cumprod computes cumprod in logspace with numeric checks\n cumprod_1mp_choose_i = safe_cumprod(\n 1 - p_choose_i, axis=1, exclusive=True)\n # Compute recurrence relation solution\n attention = p_choose_i * cumprod_1mp_choose_i * math_ops.cumsum(\n previous_attention /\n # Clip cumprod_1mp to avoid divide-by-zero\n clip_ops.clip_by_value(cumprod_1mp_choose_i, 1e-10, 1.),\n axis=1)\n elif mode == \"hard\":\n # Remove any probabilities before the index chosen last time step\n p_choose_i *= math_ops.cumsum(previous_attention, axis=1)\n # Now, use exclusive cumprod to remove probabilities after the first\n # chosen index, like so:\n # p_choose_i = [0, 0, 0, 1, 1, 0, 1, 1]\n # cumprod(1 - p_choose_i, exclusive=True) = [1, 1, 1, 1, 0, 0, 0, 0]\n # Product of above: [0, 0, 0, 1, 0, 0, 0, 0]\n attention = p_choose_i * math_ops.cumprod(\n 1 - p_choose_i, axis=1, exclusive=True)\n else:\n raise ValueError(\"mode must be 'recursive', 'parallel', or 'hard'.\")\n return attention\n\n\ndef _monotonic_probability_fn(score,\n previous_alignments,\n sigmoid_noise,\n mode,\n seed=None):\n \"\"\"Attention probability function for monotonic attention.\n\n Takes in unnormalized attention scores, adds pre-sigmoid noise to encourage\n the model to make discrete attention decisions, passes them through a\n sigmoid to obtain \"choosing\" probabilities, and then calls\n monotonic_attention to obtain the attention distribution. For more\n information, see\n\n Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,\n \"Online and Linear-Time Attention by Enforcing Monotonic Alignments.\"\n ICML 2017. https://arxiv.org/abs/1704.00784\n\n Args:\n score: Unnormalized attention scores, shape\n `[batch_size, alignments_size]`\n previous_alignments: Previous attention distribution, shape\n `[batch_size, alignments_size]`\n sigmoid_noise: Standard deviation of pre-sigmoid noise. Setting this\n larger than 0 will encourage the model to produce large attention\n scores, effectively making the choosing probabilities discrete and the\n resulting attention distribution one-hot. It should be set to 0 at\n test-time, and when hard attention is not desired.\n mode: How to compute the attention distribution. Must be one of\n 'recursive', 'parallel', or 'hard'. See the docstring for\n `tf.contrib.seq2seq.monotonic_attention` for more information.\n seed: (optional) Random seed for pre-sigmoid noise.\n\n Returns:\n A `[batch_size, alignments_size]`-shape tensor corresponding to the\n resulting attention distribution.\n \"\"\"\n # Optionally add pre-sigmoid noise to the scores\n if sigmoid_noise > 0:\n noise = random_ops.random_normal(\n array_ops.shape(score), dtype=score.dtype, seed=seed)\n score += sigmoid_noise * noise\n # Compute \"choosing\" probabilities from the attention scores\n if mode == \"hard\":\n # When mode is hard, use a hard sigmoid\n p_choose_i = math_ops.cast(score > 0, score.dtype)\n else:\n p_choose_i = math_ops.sigmoid(score)\n # Convert from choosing probabilities to attention distribution\n return monotonic_attention(p_choose_i, previous_alignments, mode)\n\n\nclass _BaseMonotonicAttentionMechanism(_BaseAttentionMechanism):\n \"\"\"Base attention mechanism for monotonic attention.\n\n Simply overrides the initial_alignments function to provide a dirac\n distribution, which is needed in order for the monotonic attention\n distributions to have the correct behavior.\n \"\"\"\n\n def initial_alignments(self, batch_size, dtype):\n \"\"\"Creates the initial alignment values for the monotonic attentions.\n\n Initializes to dirac distributions, i.e.\n [1, 0, 0, ...memory length..., 0] for all entries in the batch.\n\n Args:\n batch_size: `int32` scalar, the batch_size.\n dtype: The `dtype`.\n\n Returns:\n A `dtype` tensor shaped `[batch_size, alignments_size]`\n (`alignments_size` is the values' `max_time`).\n \"\"\"\n max_time = self._alignments_size\n return array_ops.one_hot(\n array_ops.zeros((batch_size,), dtype=dtypes.int32),\n max_time,\n dtype=dtype)\n\n\nclass BahdanauMonotonicAttention(_BaseMonotonicAttentionMechanism):\n \"\"\"Monotonic attention mechanism with Bahadanau-style energy function.\n\n This type of attention enforces a monotonic constraint on the attention\n distributions; that is once the model attends to a given point in the\n memory it can't attend to any prior points at subsequence output timesteps.\n It achieves this by using the _monotonic_probability_fn instead of softmax\n to construct its attention distributions. Since the attention scores are\n passed through a sigmoid, a learnable scalar bias parameter is applied\n after the score function and before the sigmoid. Otherwise, it is\n equivalent to BahdanauAttention. This approach is proposed in\n\n Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,\n \"Online and Linear-Time Attention by Enforcing Monotonic Alignments.\"\n ICML 2017. https://arxiv.org/abs/1704.00784\n \"\"\"\n\n def __init__(self,\n units,\n memory,\n memory_sequence_length=None,\n normalize=False,\n sigmoid_noise=0.,\n sigmoid_noise_seed=None,\n score_bias_init=0.,\n mode=\"parallel\",\n kernel_initializer=\"glorot_uniform\",\n dtype=None,\n name=\"BahdanauMonotonicAttention\",\n **kwargs):\n \"\"\"Construct the Attention mechanism.\n\n Args:\n units: The depth of the query mechanism.\n memory: The memory to query; usually the output of an RNN encoder.\n This tensor should be shaped `[batch_size, max_time, ...]`.\n memory_sequence_length: (optional): Sequence lengths for the batch\n entries in memory. If provided, the memory tensor rows are masked\n with zeros for values past the respective sequence lengths.\n normalize: Python boolean. Whether to normalize the energy term.\n sigmoid_noise: Standard deviation of pre-sigmoid noise. See the\n docstring for `_monotonic_probability_fn` for more information.\n sigmoid_noise_seed: (optional) Random seed for pre-sigmoid noise.\n score_bias_init: Initial value for score bias scalar. It's\n recommended to initialize this to a negative value when the length\n of the memory is large.\n mode: How to compute the attention distribution. Must be one of\n 'recursive', 'parallel', or 'hard'. See the docstring for\n `tf.contrib.seq2seq.monotonic_attention` for more information.\n kernel_initializer: (optional), the name of the initializer for the\n attention kernel.\n dtype: The data type for the query and memory layers of the attention\n mechanism.\n name: Name to use when creating ops.\n **kwargs: Dictionary that contains other common arguments for layer\n creation.\n \"\"\"\n # Set up the monotonic probability fn with supplied parameters\n if dtype is None:\n dtype = dtypes.float32\n wrapped_probability_fn = functools.partial(\n _monotonic_probability_fn,\n sigmoid_noise=sigmoid_noise,\n mode=mode,\n seed=sigmoid_noise_seed)\n query_layer = kwargs.pop(\"query_layer\", None)\n if not query_layer:\n query_layer = layers.Dense(\n units, name=\"query_layer\", use_bias=False, dtype=dtype)\n memory_layer = kwargs.pop(\"memory_layer\", None)\n if not memory_layer:\n memory_layer = layers.Dense(\n units, name=\"memory_layer\", use_bias=False, dtype=dtype)\n self.units = units\n self.normalize = normalize\n self.sigmoid_noise = sigmoid_noise\n self.sigmoid_noise_seed = sigmoid_noise_seed\n self.score_bias_init = score_bias_init\n self.mode = mode\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.attention_v = None\n self.attention_score_bias = None\n self.attention_g = None\n self.attention_b = None\n super(BahdanauMonotonicAttention, self).__init__(\n memory=memory,\n memory_sequence_length=memory_sequence_length,\n query_layer=query_layer,\n memory_layer=memory_layer,\n probability_fn=wrapped_probability_fn,\n name=name,\n dtype=dtype,\n **kwargs)\n\n def build(self, input_shape):\n super(BahdanauMonotonicAttention, self).build(input_shape)\n if self.attention_v is None:\n self.attention_v = self.add_weight(\n \"attention_v\", [self.units],\n dtype=self.dtype,\n initializer=self.kernel_initializer)\n if self.attention_score_bias is None:\n self.attention_score_bias = self.add_weight(\n \"attention_score_bias\",\n shape=(),\n dtype=self.dtype,\n initializer=init_ops.constant_initializer(\n self.score_bias_init, dtype=self.dtype))\n if (self.normalize and self.attention_g is None\n and self.attention_b is None):\n self.attention_g = self.add_weight(\n \"attention_g\",\n dtype=self.dtype,\n initializer=init_ops.constant_initializer(\n math.sqrt((1. / self.units))),\n shape=())\n self.attention_b = self.add_weight(\n \"attention_b\", [self.units],\n dtype=self.dtype,\n initializer=init_ops.zeros_initializer())\n self.built = True\n\n def _calculate_attention(self, query, state):\n \"\"\"Score the query based on the keys and values.\n\n Args:\n query: Tensor of dtype matching `self.values` and shape\n `[batch_size, query_depth]`.\n state: Tensor of dtype matching `self.values` and shape\n `[batch_size, alignments_size]`\n (`alignments_size` is memory's `max_time`).\n\n Returns:\n alignments: Tensor of dtype matching `self.values` and shape\n `[batch_size, alignments_size]` (`alignments_size` is memory's\n `max_time`).\n \"\"\"\n processed_query = self.query_layer(\n query) if self.query_layer else query\n score = _bahdanau_score(\n processed_query,\n self.keys,\n self.attention_v,\n attention_g=self.attention_g,\n attention_b=self.attention_b)\n score += self.attention_score_bias\n alignments = self.probability_fn(score, state)\n next_state = alignments\n return alignments, next_state\n\n def get_config(self):\n config = {\n \"units\": self.units,\n \"normalize\": self.normalize,\n \"sigmoid_noise\": self.sigmoid_noise,\n \"sigmoid_noise_seed\": self.sigmoid_noise_seed,\n \"score_bias_init\": self.score_bias_init,\n \"mode\": self.mode,\n \"kernel_initializer\":\n initializers.serialize(self.kernel_initializer),\n }\n base_config = super(BahdanauMonotonicAttention, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n config = _BaseAttentionMechanism.deserialize_inner_layer_from_config(\n config, custom_objects=custom_objects)\n return cls(**config)\n\n\nclass LuongMonotonicAttention(_BaseMonotonicAttentionMechanism):\n \"\"\"Monotonic attention mechanism with Luong-style energy function.\n\n This type of attention enforces a monotonic constraint on the attention\n distributions; that is once the model attends to a given point in the\n memory it can't attend to any prior points at subsequence output timesteps.\n It achieves this by using the _monotonic_probability_fn instead of softmax\n to construct its attention distributions. Otherwise, it is equivalent to\n LuongAttention. This approach is proposed in\n\n [Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,\n \"Online and Linear-Time Attention by Enforcing Monotonic Alignments.\"\n ICML 2017.](https://arxiv.org/abs/1704.00784)\n \"\"\"\n\n def __init__(self,\n units,\n memory,\n memory_sequence_length=None,\n scale=False,\n sigmoid_noise=0.,\n sigmoid_noise_seed=None,\n score_bias_init=0.,\n mode=\"parallel\",\n dtype=None,\n name=\"LuongMonotonicAttention\",\n **kwargs):\n \"\"\"Construct the Attention mechanism.\n\n Args:\n units: The depth of the query mechanism.\n memory: The memory to query; usually the output of an RNN encoder.\n This tensor should be shaped `[batch_size, max_time, ...]`.\n memory_sequence_length: (optional): Sequence lengths for the batch\n entries in memory. If provided, the memory tensor rows are masked\n with zeros for values past the respective sequence lengths.\n scale: Python boolean. Whether to scale the energy term.\n sigmoid_noise: Standard deviation of pre-sigmoid noise. See the\n docstring for `_monotonic_probability_fn` for more information.\n sigmoid_noise_seed: (optional) Random seed for pre-sigmoid noise.\n score_bias_init: Initial value for score bias scalar. It's\n recommended to initialize this to a negative value when the length\n of the memory is large.\n mode: How to compute the attention distribution. Must be one of\n 'recursive', 'parallel', or 'hard'. See the docstring for\n `tf.contrib.seq2seq.monotonic_attention` for more information.\n dtype: The data type for the query and memory layers of the attention\n mechanism.\n name: Name to use when creating ops.\n **kwargs: Dictionary that contains other common arguments for layer\n creation.\n \"\"\"\n # Set up the monotonic probability fn with supplied parameters\n if dtype is None:\n dtype = dtypes.float32\n wrapped_probability_fn = functools.partial(\n _monotonic_probability_fn,\n sigmoid_noise=sigmoid_noise,\n mode=mode,\n seed=sigmoid_noise_seed)\n memory_layer = kwargs.pop(\"memory_layer\", None)\n if not memory_layer:\n memory_layer = layers.Dense(\n units, name=\"memory_layer\", use_bias=False, dtype=dtype)\n self.units = units\n self.scale = scale\n self.sigmoid_noise = sigmoid_noise\n self.sigmoid_noise_seed = sigmoid_noise_seed\n self.score_bias_init = score_bias_init\n self.mode = mode\n self.attention_g = None\n self.attention_score_bias = None\n super(LuongMonotonicAttention, self).__init__(\n memory=memory,\n memory_sequence_length=memory_sequence_length,\n query_layer=None,\n memory_layer=memory_layer,\n probability_fn=wrapped_probability_fn,\n name=name,\n dtype=dtype,\n **kwargs)\n\n def build(self, input_shape):\n super(LuongMonotonicAttention, self).build(input_shape)\n if self.scale and self.attention_g is None:\n self.attention_g = self.add_weight(\n \"attention_g\", initializer=init_ops.ones_initializer, shape=())\n if self.attention_score_bias is None:\n self.attention_score_bias = self.add_weight(\n \"attention_score_bias\",\n shape=(),\n initializer=init_ops.constant_initializer(\n self.score_bias_init, dtype=self.dtype))\n self.built = True\n\n def _calculate_attention(self, query, state):\n \"\"\"Score the query based on the keys and values.\n\n Args:\n query: Tensor of dtype matching `self.values` and shape\n `[batch_size, query_depth]`.\n state: Tensor of dtype matching `self.values` and shape\n `[batch_size, alignments_size]`\n (`alignments_size` is memory's `max_time`).\n\n Returns:\n alignments: Tensor of dtype matching `self.values` and shape\n `[batch_size, alignments_size]` (`alignments_size` is memory's\n `max_time`).\n next_state: Same as alignments\n \"\"\"\n score = _luong_score(query, self.keys, self.attention_g)\n score += self.attention_score_bias\n alignments = self.probability_fn(score, state)\n next_state = alignments\n return alignments, next_state\n\n def get_config(self):\n config = {\n \"units\": self.units,\n \"scale\": self.scale,\n \"sigmoid_noise\": self.sigmoid_noise,\n \"sigmoid_noise_seed\": self.sigmoid_noise_seed,\n \"score_bias_init\": self.score_bias_init,\n \"mode\": self.mode,\n }\n base_config = super(LuongMonotonicAttention, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n config = _BaseAttentionMechanism.deserialize_inner_layer_from_config(\n config, custom_objects=custom_objects)\n return cls(**config)\n\n\nclass AttentionWrapperState(\n collections.namedtuple(\n \"AttentionWrapperState\",\n (\"cell_state\", \"attention\", \"time\", \"alignments\",\n \"alignment_history\", \"attention_state\"))):\n \"\"\"`namedtuple` storing the state of a `AttentionWrapper`.\n\n Contains:\n\n - `cell_state`: The state of the wrapped `RNNCell` at the previous time\n step.\n - `attention`: The attention emitted at the previous time step.\n - `time`: int32 scalar containing the current time step.\n - `alignments`: A single or tuple of `Tensor`(s) containing the\n alignments emitted at the previous time step for each attention\n mechanism.\n - `alignment_history`: (if enabled) a single or tuple of `TensorArray`(s)\n containing alignment matrices from all time steps for each attention\n mechanism. Call `stack()` on each to convert to a `Tensor`.\n - `attention_state`: A single or tuple of nested objects\n containing attention mechanism state for each attention mechanism.\n The objects may contain Tensors or TensorArrays.\n \"\"\"\n\n def clone(self, **kwargs):\n \"\"\"Clone this object, overriding components provided by kwargs.\n\n The new state fields' shape must match original state fields' shape.\n This will be validated, and original fields' shape will be propagated\n to new fields.\n\n Example:\n\n ```python\n initial_state = attention_wrapper.get_initial_state(\n batch_size=..., dtype=...)\n initial_state = initial_state.clone(cell_state=encoder_state)\n ```\n\n Args:\n **kwargs: Any properties of the state object to replace in the\n returned `AttentionWrapperState`.\n\n Returns:\n A new `AttentionWrapperState` whose properties are the same as\n this one, except any overridden properties as provided in `kwargs`.\n \"\"\"\n\n def with_same_shape(old, new):\n \"\"\"Check and set new tensor's shape.\"\"\"\n if isinstance(old, ops.Tensor) and isinstance(new, ops.Tensor):\n if not context.executing_eagerly():\n new_shape = array_ops.shape(new)\n old_shape = array_ops.shape(old)\n with ops.control_dependencies([\n check_ops.assert_equal( # pylint: disable=bad-continuation\n new_shape,\n old_shape,\n data=[new_shape, old_shape])\n ]):\n # Add an identity op so that control deps can kick in.\n return array_ops.identity(new)\n else:\n if old.shape.as_list() != new.shape.as_list():\n raise ValueError(\n \"The shape of the AttentionWrapperState is \"\n \"expected to be same as the one to clone. \"\n \"self.shape: %s, input.shape: %s\" % (old.shape,\n new.shape))\n return new\n return new\n\n return nest.map_structure(\n with_same_shape, self,\n super(AttentionWrapperState, self)._replace(**kwargs))\n\n\ndef _prepare_memory(memory,\n memory_sequence_length=None,\n memory_mask=None,\n check_inner_dims_defined=True):\n \"\"\"Convert to tensor and possibly mask `memory`.\n\n Args:\n memory: `Tensor`, shaped `[batch_size, max_time, ...]`.\n memory_sequence_length: `int32` `Tensor`, shaped `[batch_size]`.\n memory_mask: `boolean` tensor with shape [batch_size, max_time]. The\n memory should be skipped when the corresponding mask is False.\n check_inner_dims_defined: Python boolean. If `True`, the `memory`\n argument's shape is checked to ensure all but the two outermost\n dimensions are fully defined.\n\n Returns:\n A (possibly masked), checked, new `memory`.\n\n Raises:\n ValueError: If `check_inner_dims_defined` is `True` and not\n `memory.shape[2:].is_fully_defined()`.\n \"\"\"\n memory = nest.map_structure(\n lambda m: ops.convert_to_tensor(m, name=\"memory\"), memory)\n if memory_sequence_length is not None and memory_mask is not None:\n raise ValueError(\n \"memory_sequence_length and memory_mask can't be provided \"\n \"at same time.\")\n if memory_sequence_length is not None:\n memory_sequence_length = ops.convert_to_tensor(\n memory_sequence_length, name=\"memory_sequence_length\")\n if check_inner_dims_defined:\n\n def _check_dims(m):\n if not m.get_shape()[2:].is_fully_defined():\n raise ValueError(\n \"Expected memory %s to have fully defined inner dims, \"\n \"but saw shape: %s\" % (m.name, m.get_shape()))\n\n nest.map_structure(_check_dims, memory)\n if memory_sequence_length is None and memory_mask is None:\n return memory\n elif memory_sequence_length is not None:\n seq_len_mask = array_ops.sequence_mask(\n memory_sequence_length,\n maxlen=array_ops.shape(nest.flatten(memory)[0])[1],\n dtype=nest.flatten(memory)[0].dtype)\n else:\n # For memory_mask is not None\n seq_len_mask = math_ops.cast(\n memory_mask, dtype=nest.flatten(memory)[0].dtype)\n\n def _maybe_mask(m, seq_len_mask):\n \"\"\"Mask the memory based on the memory mask.\"\"\"\n rank = m.get_shape().ndims\n rank = rank if rank is not None else array_ops.rank(m)\n extra_ones = array_ops.ones(rank - 2, dtype=dtypes.int32)\n seq_len_mask = array_ops.reshape(\n seq_len_mask,\n array_ops.concat((array_ops.shape(seq_len_mask), extra_ones), 0))\n return m * seq_len_mask\n\n return nest.map_structure(lambda m: _maybe_mask(m, seq_len_mask), memory)\n\n\ndef _maybe_mask_score(score,\n memory_sequence_length=None,\n memory_mask=None,\n score_mask_value=None):\n \"\"\"Mask the attention score based on the masks.\"\"\"\n if memory_sequence_length is None and memory_mask is None:\n return score\n if memory_sequence_length is not None and memory_mask is not None:\n raise ValueError(\n \"memory_sequence_length and memory_mask can't be provided \"\n \"at same time.\")\n if memory_sequence_length is not None:\n message = (\"All values in memory_sequence_length must greater than \"\n \"zero.\")\n with ops.control_dependencies([\n check_ops.assert_positive( # pylint: disable=bad-continuation\n memory_sequence_length,\n message=message)\n ]):\n memory_mask = array_ops.sequence_mask(\n memory_sequence_length, maxlen=array_ops.shape(score)[1])\n score_mask_values = score_mask_value * array_ops.ones_like(score)\n return array_ops.where(memory_mask, score, score_mask_values)\n\n\ndef hardmax(logits, name=None):\n \"\"\"Returns batched one-hot vectors.\n\n The depth index containing the `1` is that of the maximum logit value.\n\n Args:\n logits: A batch tensor of logit values.\n name: Name to use when creating ops.\n Returns:\n A batched one-hot tensor.\n \"\"\"\n with ops.name_scope(name, \"Hardmax\", [logits]):\n logits = ops.convert_to_tensor(logits, name=\"logits\")\n if tensor_shape.dimension_value(logits.get_shape()[-1]) is not None:\n depth = tensor_shape.dimension_value(logits.get_shape()[-1])\n else:\n depth = array_ops.shape(logits)[-1]\n return array_ops.one_hot(\n math_ops.argmax(logits, -1), depth, dtype=logits.dtype)\n\n\ndef _compute_attention(attention_mechanism, cell_output, attention_state,\n attention_layer):\n \"\"\"Computes the attention and alignments for a given\n attention_mechanism.\"\"\"\n if isinstance(attention_mechanism, _BaseAttentionMechanism):\n alignments, next_attention_state = attention_mechanism(\n [cell_output, attention_state])\n else:\n # For other class, assume they are following _BaseAttentionMechanism,\n # which takes query and state as separate parameter.\n alignments, next_attention_state = attention_mechanism(\n cell_output, state=attention_state)\n\n # Reshape from [batch_size, memory_time] to [batch_size, 1, memory_time]\n expanded_alignments = array_ops.expand_dims(alignments, 1)\n # Context is the inner product of alignments and values along the\n # memory time dimension.\n # alignments shape is\n # [batch_size, 1, memory_time]\n # attention_mechanism.values shape is\n # [batch_size, memory_time, memory_size]\n # the batched matmul is over memory_time, so the output shape is\n # [batch_size, 1, memory_size].\n # we then squeeze out the singleton dim.\n context_ = math_ops.matmul(expanded_alignments, attention_mechanism.values)\n context_ = array_ops.squeeze(context_, [1])\n\n if attention_layer is not None:\n attention = attention_layer(\n array_ops.concat([cell_output, context_], 1))\n else:\n attention = context_\n\n return attention, alignments, next_attention_state\n\n\nclass AttentionWrapper(layers.AbstractRNNCell):\n \"\"\"Wraps another `RNNCell` with attention.\"\"\"\n\n def __init__(self,\n cell,\n attention_mechanism,\n attention_layer_size=None,\n alignment_history=False,\n cell_input_fn=None,\n output_attention=True,\n initial_cell_state=None,\n name=None,\n attention_layer=None,\n attention_fn=None):\n \"\"\"Construct the `AttentionWrapper`.\n\n **NOTE** If you are using the `BeamSearchDecoder` with a cell wrapped\n in `AttentionWrapper`, then you must ensure that:\n\n - The encoder output has been tiled to `beam_width` via\n `tf.contrib.seq2seq.tile_batch` (NOT `tf.tile`).\n - The `batch_size` argument passed to the `get_initial_state` method of\n this wrapper is equal to `true_batch_size * beam_width`.\n - The initial state created with `get_initial_state` above contains a\n `cell_state` value containing properly tiled final state from the\n encoder.\n\n An example:\n\n ```\n tiled_encoder_outputs = tf.contrib.seq2seq.tile_batch(\n encoder_outputs, multiplier=beam_width)\n tiled_encoder_final_state = tf.conrib.seq2seq.tile_batch(\n encoder_final_state, multiplier=beam_width)\n tiled_sequence_length = tf.contrib.seq2seq.tile_batch(\n sequence_length, multiplier=beam_width)\n attention_mechanism = MyFavoriteAttentionMechanism(\n num_units=attention_depth,\n memory=tiled_inputs,\n memory_sequence_length=tiled_sequence_length)\n attention_cell = AttentionWrapper(cell, attention_mechanism, ...)\n decoder_initial_state = attention_cell.get_initial_state(\n batch_size=true_batch_size * beam_width, dtype=dtype)\n decoder_initial_state = decoder_initial_state.clone(\n cell_state=tiled_encoder_final_state)\n ```\n\n Args:\n cell: An instance of `RNNCell`.\n attention_mechanism: A list of `AttentionMechanism` instances or a\n single instance.\n attention_layer_size: A list of Python integers or a single Python\n integer, the depth of the attention (output) layer(s). If None\n (default), use the context as attention at each time step.\n Otherwise, feed the context and cell output into the attention\n layer to generate attention at each time step. If\n attention_mechanism is a list, attention_layer_size must be a list\n of the same length. If attention_layer is set, this must be None.\n If attention_fn is set, it must guaranteed that the outputs of\n attention_fn also meet the above requirements.\n alignment_history: Python boolean, whether to store alignment history\n from all time steps in the final output state (currently stored as\n a time major `TensorArray` on which you must call `stack()`).\n cell_input_fn: (optional) A `callable`. The default is:\n `lambda inputs, attention:\n array_ops.concat([inputs, attention], -1)`.\n output_attention: Python bool. If `True` (default), the output at\n each time step is the attention value. This is the behavior of\n Luong-style attention mechanisms. If `False`, the output at each\n time step is the output of `cell`. This is the behavior of\n Bhadanau-style attention mechanisms. In both cases, the\n `attention` tensor is propagated to the next time step via the\n state and is used there. This flag only controls whether the\n attention mechanism is propagated up to the next cell in an RNN\n stack or to the top RNN output.\n initial_cell_state: The initial state value to use for the cell when\n the user calls `get_initial_state()`. Note that if this value is\n provided now, and the user uses a `batch_size` argument of\n `get_initial_state` which does not match the batch size of\n `initial_cell_state`, proper behavior is not guaranteed.\n name: Name to use when creating ops.\n attention_layer: A list of `tf.layers.Layer` instances or a\n single `tf.layers.Layer` instance taking the context and cell\n output as inputs to generate attention at each time step. If None\n (default), use the context as attention at each time step. If\n attention_mechanism is a list, attention_layer must be a list of\n the same length. If attention_layers_size is set, this must be\n None.\n attention_fn: An optional callable function that allows users to\n provide their own customized attention function, which takes input\n (attention_mechanism, cell_output, attention_state,\n attention_layer) and outputs (attention, alignments,\n next_attention_state). If provided, the attention_layer_size should\n be the size of the outputs of attention_fn.\n\n Raises:\n TypeError: `attention_layer_size` is not None and\n (`attention_mechanism` is a list but `attention_layer_size` is not;\n or vice versa).\n ValueError: if `attention_layer_size` is not None,\n `attention_mechanism` is a list, and its length does not match that\n of `attention_layer_size`; if `attention_layer_size` and\n `attention_layer` are set simultaneously.\n \"\"\"\n super(AttentionWrapper, self).__init__(name=name)\n rnn_cell_impl.assert_like_rnncell(\"cell\", cell)\n if isinstance(attention_mechanism, (list, tuple)):\n self._is_multi = True\n attention_mechanisms = list(attention_mechanism)\n for attention_mechanism in attention_mechanisms:\n if not isinstance(attention_mechanism, AttentionMechanism):\n raise TypeError(\n \"attention_mechanism must contain only instances of \"\n \"AttentionMechanism, saw type: %s\" %\n type(attention_mechanism).__name__)\n else:\n self._is_multi = False\n if not isinstance(attention_mechanism, AttentionMechanism):\n raise TypeError(\n \"attention_mechanism must be an AttentionMechanism or \"\n \"list of multiple AttentionMechanism instances, saw type: \"\n \"%s\" % type(attention_mechanism).__name__)\n attention_mechanisms = [attention_mechanism]\n\n if cell_input_fn is None:\n cell_input_fn = (lambda inputs, attention: array_ops.concat(\n [inputs, attention], -1))\n else:\n if not callable(cell_input_fn):\n raise TypeError(\"cell_input_fn must be callable, saw type: %s\"\n % type(cell_input_fn).__name__)\n\n if attention_layer_size is not None and attention_layer is not None:\n raise ValueError(\n \"Only one of attention_layer_size and attention_layer \"\n \"should be set\")\n\n if attention_layer_size is not None:\n attention_layer_sizes = tuple(\n attention_layer_size if isinstance(attention_layer_size, (\n list, tuple)) else (attention_layer_size,))\n if len(attention_layer_sizes) != len(attention_mechanisms):\n raise ValueError(\n \"If provided, attention_layer_size must contain exactly \"\n \"one integer per attention_mechanism, saw: %d vs %d\" %\n (len(attention_layer_sizes), len(attention_mechanisms)))\n self._attention_layers = list(\n layers.Dense(\n attention_layer_size,\n name=\"attention_layer\",\n use_bias=False,\n dtype=attention_mechanisms[i].dtype) for i,\n attention_layer_size in enumerate(attention_layer_sizes))\n self._attention_layer_size = sum(attention_layer_sizes)\n elif attention_layer is not None:\n self._attention_layers = list(\n attention_layer if isinstance(attention_layer, (\n list, tuple)) else (attention_layer,))\n if len(self._attention_layers) != len(attention_mechanisms):\n raise ValueError(\n \"If provided, attention_layer must contain exactly one \"\n \"layer per attention_mechanism, saw: %d vs %d\" % (len(\n self._attention_layers), len(attention_mechanisms)))\n self._attention_layer_size = sum(\n tensor_shape.dimension_value(\n layer.compute_output_shape([\n None, cell.output_size + tensor_shape.dimension_value(\n mechanism.values.shape[-1])\n ])[-1]) for layer, mechanism in zip(\n self._attention_layers, attention_mechanisms))\n else:\n self._attention_layers = None\n self._attention_layer_size = sum(\n tensor_shape.dimension_value(\n attention_mechanism.values.shape[-1])\n for attention_mechanism in attention_mechanisms)\n\n if attention_fn is None:\n attention_fn = _compute_attention\n self._attention_fn = attention_fn\n\n self._cell = cell\n self._attention_mechanisms = attention_mechanisms\n self._cell_input_fn = cell_input_fn\n self._output_attention = output_attention\n self._alignment_history = alignment_history\n with ops.name_scope(name, \"AttentionWrapperInit\"):\n if initial_cell_state is None:\n self._initial_cell_state = None\n else:\n final_state_tensor = nest.flatten(initial_cell_state)[-1]\n state_batch_size = (tensor_shape.dimension_value(\n final_state_tensor.shape[0])\n or array_ops.shape(final_state_tensor)[0])\n error_message = (\n \"When constructing AttentionWrapper %s: \" % self.name +\n \"Non-matching batch sizes between the memory \"\n \"(encoder output) and initial_cell_state. Are you using \"\n \"the BeamSearchDecoder? You may need to tile your \"\n \"initial state via the tf.contrib.seq2seq.tile_batch \"\n \"function with argument multiple=beam_width.\")\n with ops.control_dependencies(\n self._batch_size_checks( # pylint: disable=bad-continuation\n state_batch_size, error_message)):\n self._initial_cell_state = nest.map_structure(\n lambda s: array_ops.identity(\n s, name=\"check_initial_cell_state\"),\n initial_cell_state)\n\n def _batch_size_checks(self, batch_size, error_message):\n return [\n check_ops.assert_equal(\n batch_size,\n attention_mechanism.batch_size,\n message=error_message)\n for attention_mechanism in self._attention_mechanisms\n ]\n\n def _item_or_tuple(self, seq):\n \"\"\"Returns `seq` as tuple or the singular element.\n\n Which is returned is determined by how the AttentionMechanism(s) were\n passed to the constructor.\n\n Args:\n seq: A non-empty sequence of items or generator.\n\n Returns:\n Either the values in the sequence as a tuple if\n AttentionMechanism(s) were passed to the constructor as a sequence\n or the singular element.\n \"\"\"\n t = tuple(seq)\n if self._is_multi:\n return t\n else:\n return t[0]\n\n @property\n def output_size(self):\n if self._output_attention:\n return self._attention_layer_size\n else:\n return self._cell.output_size\n\n @property\n def state_size(self):\n \"\"\"The `state_size` property of `AttentionWrapper`.\n\n Returns:\n An `AttentionWrapperState` tuple containing shapes used\n by this object.\n \"\"\"\n return AttentionWrapperState(\n cell_state=self._cell.state_size,\n time=tensor_shape.TensorShape([]),\n attention=self._attention_layer_size,\n alignments=self._item_or_tuple(\n a.alignments_size for a in self._attention_mechanisms),\n attention_state=self._item_or_tuple(\n a.state_size for a in self._attention_mechanisms),\n alignment_history=self._item_or_tuple(\n a.alignments_size if self._alignment_history else () for a in\n self._attention_mechanisms)) # sometimes a TensorArray\n\n def get_initial_state(self, inputs=None, batch_size=None, dtype=None):\n \"\"\"Return an initial (zero) state tuple for this `AttentionWrapper`.\n\n **NOTE** Please see the initializer documentation for details of how\n to call `get_initial_state` if using an `AttentionWrapper` with a\n `BeamSearchDecoder`.\n\n Args:\n inputs: The inputs that will be fed to this cell.\n batch_size: `0D` integer tensor: the batch size.\n dtype: The internal state data type.\n\n Returns:\n An `AttentionWrapperState` tuple containing zeroed out tensors and,\n possibly, empty `TensorArray` objects.\n\n Raises:\n ValueError: (or, possibly at runtime, InvalidArgument), if\n `batch_size` does not match the output size of the encoder passed\n to the wrapper object at initialization time.\n \"\"\"\n if inputs is not None:\n batch_size = array_ops.shape(inputs)[0]\n dtype = inputs.dtype\n with ops.name_scope(\n type(self).__name__ + \"ZeroState\", values=[batch_size]): # pylint: disable=bad-continuation\n if self._initial_cell_state is not None:\n cell_state = self._initial_cell_state\n else:\n cell_state = self._cell.get_initial_state(\n batch_size=batch_size, dtype=dtype)\n error_message = (\n \"When calling get_initial_state of AttentionWrapper %s: \" %\n self.name + \"Non-matching batch sizes between the memory \"\n \"(encoder output) and the requested batch size. Are you using \"\n \"the BeamSearchDecoder? If so, make sure your encoder output \"\n \"has been tiled to beam_width via \"\n \"tf.contrib.seq2seq.tile_batch, and the batch_size= argument \"\n \"passed to get_initial_state is batch_size * beam_width.\")\n with ops.control_dependencies(\n self._batch_size_checks(batch_size, error_message)): # pylint: disable=bad-continuation\n cell_state = nest.map_structure(\n lambda s: array_ops.identity(s, name=\"checked_cell_state\"),\n cell_state)\n initial_alignments = [\n attention_mechanism.initial_alignments(batch_size, dtype)\n for attention_mechanism in self._attention_mechanisms\n ]\n return AttentionWrapperState(\n cell_state=cell_state,\n time=array_ops.zeros([], dtype=dtypes.int32),\n attention=_zero_state_tensors(self._attention_layer_size,\n batch_size, dtype),\n alignments=self._item_or_tuple(initial_alignments),\n attention_state=self._item_or_tuple(\n attention_mechanism.initial_state(batch_size, dtype)\n for attention_mechanism in self._attention_mechanisms),\n alignment_history=self._item_or_tuple(\n tensor_array_ops.TensorArray(\n dtype,\n size=0,\n dynamic_size=True,\n element_shape=alignment.shape) if self.\n _alignment_history else ()\n for alignment in initial_alignments))\n\n def call(self, inputs, state):\n \"\"\"Perform a step of attention-wrapped RNN.\n\n - Step 1: Mix the `inputs` and previous step's `attention` output via\n `cell_input_fn`.\n - Step 2: Call the wrapped `cell` with this input and its previous\n state.\n - Step 3: Score the cell's output with `attention_mechanism`.\n - Step 4: Calculate the alignments by passing the score through the\n `normalizer`.\n - Step 5: Calculate the context vector as the inner product between the\n alignments and the attention_mechanism's values (memory).\n - Step 6: Calculate the attention output by concatenating the cell\n output and context through the attention layer (a linear layer with\n `attention_layer_size` outputs).\n\n Args:\n inputs: (Possibly nested tuple of) Tensor, the input at this time\n step.\n state: An instance of `AttentionWrapperState` containing\n tensors from the previous time step.\n\n Returns:\n A tuple `(attention_or_cell_output, next_state)`, where:\n\n - `attention_or_cell_output` depending on `output_attention`.\n - `next_state` is an instance of `AttentionWrapperState`\n containing the state calculated at this time step.\n\n Raises:\n TypeError: If `state` is not an instance of `AttentionWrapperState`.\n \"\"\"\n if not isinstance(state, AttentionWrapperState):\n raise TypeError(\n \"Expected state to be instance of AttentionWrapperState. \"\n \"Received type %s instead.\" % type(state))\n\n # Step 1: Calculate the true inputs to the cell based on the\n # previous attention value.\n cell_inputs = self._cell_input_fn(inputs, state.attention)\n cell_state = state.cell_state\n cell_output, next_cell_state = self._cell(cell_inputs, cell_state)\n\n cell_batch_size = (tensor_shape.dimension_value(cell_output.shape[0])\n or array_ops.shape(cell_output)[0])\n error_message = (\n \"When applying AttentionWrapper %s: \" % self.name +\n \"Non-matching batch sizes between the memory \"\n \"(encoder output) and the query (decoder output). Are you using \"\n \"the BeamSearchDecoder? You may need to tile your memory input \"\n \"via the tf.contrib.seq2seq.tile_batch function with argument \"\n \"multiple=beam_width.\")\n with ops.control_dependencies(\n self._batch_size_checks(cell_batch_size, error_message)): # pylint: disable=bad-continuation\n cell_output = array_ops.identity(\n cell_output, name=\"checked_cell_output\")\n\n if self._is_multi:\n previous_attention_state = state.attention_state\n previous_alignment_history = state.alignment_history\n else:\n previous_attention_state = [state.attention_state]\n previous_alignment_history = [state.alignment_history]\n\n all_alignments = []\n all_attentions = []\n all_attention_states = []\n maybe_all_histories = []\n for i, attention_mechanism in enumerate(self._attention_mechanisms):\n attention, alignments, next_attention_state = self._attention_fn(\n attention_mechanism, cell_output, previous_attention_state[i],\n self._attention_layers[i] if self._attention_layers else None)\n alignment_history = previous_alignment_history[i].write(\n state.time, alignments) if self._alignment_history else ()\n\n all_attention_states.append(next_attention_state)\n all_alignments.append(alignments)\n all_attentions.append(attention)\n maybe_all_histories.append(alignment_history)\n\n attention = array_ops.concat(all_attentions, 1)\n next_state = AttentionWrapperState(\n time=state.time + 1,\n cell_state=next_cell_state,\n attention=attention,\n attention_state=self._item_or_tuple(all_attention_states),\n alignments=self._item_or_tuple(all_alignments),\n alignment_history=self._item_or_tuple(maybe_all_histories))\n\n if self._output_attention:\n return attention, next_state\n else:\n return cell_output, next_state\n" ]
[ [ "tensorflow.python.ops.array_ops.identity", "tensorflow.python.ops.math_ops.matmul", "tensorflow.python.ops.array_ops.where", "tensorflow.python.ops.array_ops.ones", "tensorflow.python.ops.math_ops.tanh", "tensorflow.python.framework.tensor_shape.dimension_value", "tensorflow.python.ops.math_ops.sigmoid", "tensorflow.python.ops.array_ops.ones_like", "numpy.finfo", "tensorflow.python.ops.array_ops.expand_dims", "tensorflow.python.util.nest.flatten", "tensorflow.python.keras.layers.Dense", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.keras.layers.deserialize", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.ops.init_ops.constant_initializer", "tensorflow.python.ops.math_ops.cumsum", "tensorflow.python.ops.rnn_cell_impl.assert_like_rnncell", "tensorflow.python.keras.initializers.get", "tensorflow.python.ops.array_ops.rank", "tensorflow.python.ops.check_ops.assert_equal", "tensorflow.python.ops.init_ops.zeros_initializer", "tensorflow.python.keras.engine.base_layer_utils.mark_checked", "tensorflow.python.framework.dtypes.as_dtype", "tensorflow.python.ops.array_ops.squeeze", "tensorflow.python.ops.math_ops.cumprod", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.ops.math_ops.square", "tensorflow.python.keras.initializers.serialize", "tensorflow.python.ops.math_ops.argmax", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.util.nest.map_structure", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.check_ops.assert_positive", "tensorflow.python.ops.clip_ops.clip_by_value", "tensorflow.python.ops.array_ops.transpose", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.array_ops.reshape", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.tensor_array_ops.TensorArray" ] ]
Elizaaaaa/deep-learning-containers
[ "6274ecb264645070d11b27e5c7e60d2e4110537d" ]
[ "test/dlc_tests/container_tests/bin/examples/KerasMXNet/cifar10_resnet.py" ]
[ "\"\"\"Trains a ResNet on the CIFAR10 dataset.\n \nResNet v1\n[a] Deep Residual Learning for Image Recognition\nhttps://arxiv.org/pdf/1512.03385.pdf\n \nResNet v2\n[b] Identity Mappings in Deep Residual Networks\nhttps://arxiv.org/pdf/1603.05027.pdf\n\"\"\"\n \nfrom __future__ import print_function\nimport keras\nfrom keras.layers import Dense, Conv2D, BatchNormalization, Activation\nfrom keras.layers import AveragePooling2D, Input, Flatten\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler\nfrom keras.callbacks import ReduceLROnPlateau\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.regularizers import l2\nfrom keras import backend as K\nfrom keras.models import Model\nfrom keras.datasets import cifar10\nimport numpy as np\nimport os\n \nK.set_image_data_format('channels_first')\n \n# Training parameters\nbatch_size = 32 # orig paper trained all networks with batch_size=128\nepochs = 10\ndata_augmentation = True\nnum_classes = 10\n \n# Subtracting pixel mean improves accuracy\nsubtract_pixel_mean = True\n \n# Model parameter\n# ----------------------------------------------------------------------------\n# | | 200-epoch | Orig Paper| 200-epoch | Orig Paper| sec/epoch\n# Model | n | ResNet v1 | ResNet v1 | ResNet v2 | ResNet v2 | GTX1080Ti\n# |v1(v2)| %Accuracy | %Accuracy | %Accuracy | %Accuracy | v1 (v2)\n# ----------------------------------------------------------------------------\n# ResNet20 | 3 (2)| 92.16 | 91.25 | ----- | ----- | 35 (---)\n# ResNet32 | 5(NA)| 92.46 | 92.49 | NA | NA | 50 ( NA)\n# ResNet44 | 7(NA)| 92.50 | 92.83 | NA | NA | 70 ( NA)\n# ResNet56 | 9 (6)| 92.71 | 93.03 | 93.01 | NA | 90 (100)\n# ResNet110 |18(12)| 92.65 | 93.39+-.16| 93.15 | 93.63 | 165(180)\n# ResNet164 |27(18)| ----- | 94.07 | ----- | 94.54 | ---(---)\n# ResNet1001| (111)| ----- | 92.39 | ----- | 95.08+-.14| ---(---)\n# ---------------------------------------------------------------------------\nn = 3\n \n# Model version\n# Orig paper: version = 1 (ResNet v1), Improved ResNet: version = 2 (ResNet v2)\nversion = 1\n \n# Computed depth from supplied model parameter n\nif version == 1:\n depth = n * 6 + 2\nelif version == 2:\n depth = n * 9 + 2\n \n# Model name, depth and version\nmodel_type = 'ResNet%dv%d' % (depth, version)\n \n# Load the CIFAR10 data.\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n \n# Input image dimensions.\ninput_shape = x_train.shape[1:]\n \n# Normalize data.\nx_train = x_train.astype('float32') / 255\nx_test = x_test.astype('float32') / 255\n \n# If subtract pixel mean is enabled\nif subtract_pixel_mean:\n x_train_mean = np.mean(x_train, axis=0)\n x_train -= x_train_mean\n x_test -= x_train_mean\n \nprint('x_train shape:', x_train.shape)\nprint(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\nprint('y_train shape:', y_train.shape)\n \n# Convert class vectors to binary class matrices.\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n \n \ndef lr_schedule(epoch):\n \"\"\"Learning Rate Schedule\n \n Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.\n Called automatically every epoch as part of callbacks during training.\n \n # Arguments\n epoch (int): The number of epochs\n \n # Returns\n lr (float32): learning rate\n \"\"\"\n lr = 1e-3\n if epoch > 180:\n lr *= 0.5e-3\n elif epoch > 160:\n lr *= 1e-3\n elif epoch > 120:\n lr *= 1e-2\n elif epoch > 80:\n lr *= 1e-1\n print('Learning rate: ', lr)\n return lr\n \n \ndef resnet_layer(inputs,\n num_filters=16,\n kernel_size=3,\n strides=1,\n activation='relu',\n batch_normalization=True,\n conv_first=True):\n \"\"\"2D Convolution-Batch Normalization-Activation stack builder\n \n # Arguments\n inputs (tensor): input tensor from input image or previous layer\n num_filters (int): Conv2D number of filters\n kernel_size (int): Conv2D square kernel dimensions\n strides (int): Conv2D square stride dimensions\n activation (string): activation name\n batch_normalization (bool): whether to include batch normalization\n conv_first (bool): conv-bn-activation (True) or\n activation-bn-conv (False)\n \n # Returns\n x (tensor): tensor as input to the next layer\n \"\"\"\n conv = Conv2D(num_filters,\n kernel_size=kernel_size,\n strides=strides,\n padding='same',\n kernel_initializer='he_normal',\n kernel_regularizer=l2(1e-4))\n \n x = inputs\n if conv_first:\n x = conv(x)\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n else:\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n x = conv(x)\n return x\n \n \ndef resnet_v1(input_shape, depth, num_classes=10):\n \"\"\"ResNet Version 1 Model builder [a]\n \n Stacks of 2 x (3 x 3) Conv2D-BN-ReLU\n Last ReLU is after the shortcut connection.\n At the beginning of each stage, the feature map size is halved (downsampled)\n by a convolutional layer with strides=2, while the number of filters is\n doubled. Within each stage, the layers have the same number filters and the\n same number of filters.\n Features maps sizes:\n stage 0: 32x32, 16\n stage 1: 16x16, 32\n stage 2: 8x8, 64\n The Number of parameters is approx the same as Table 6 of [a]:\n ResNet20 0.27M\n ResNet32 0.46M\n ResNet44 0.66M\n ResNet56 0.85M\n ResNet110 1.7M\n \n # Arguments\n input_shape (tensor): shape of input image tensor\n depth (int): number of core convolutional layers\n num_classes (int): number of classes (CIFAR10 has 10)\n \n # Returns\n model (Model): Keras model instance\n \"\"\"\n if (depth - 2) % 6 != 0:\n raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')\n # Start model definition.\n num_filters = 16\n num_res_blocks = int((depth - 2) / 6)\n \n inputs = Input(shape=input_shape)\n x = resnet_layer(inputs=inputs)\n # Instantiate the stack of residual units\n for stack in range(3):\n for res_block in range(num_res_blocks):\n strides = 1\n if stack > 0 and res_block == 0: # first layer but not first stack\n strides = 2 # downsample\n y = resnet_layer(inputs=x,\n num_filters=num_filters,\n strides=strides)\n y = resnet_layer(inputs=y,\n num_filters=num_filters,\n activation=None)\n if stack > 0 and res_block == 0: # first layer but not first stack\n # linear projection residual shortcut connection to match\n # changed dims\n x = resnet_layer(inputs=x,\n num_filters=num_filters,\n kernel_size=1,\n strides=strides,\n activation=None,\n batch_normalization=False)\n x = keras.layers.add([x, y])\n x = Activation('relu')(x)\n num_filters *= 2\n \n # Add classifier on top.\n # v1 does not use BN after last shortcut connection-ReLU\n x = AveragePooling2D(pool_size=8)(x)\n y = Flatten()(x)\n outputs = Dense(num_classes,\n activation='softmax',\n kernel_initializer='he_normal')(y)\n \n # Instantiate model.\n model = Model(inputs=inputs, outputs=outputs)\n return model\n \n \ndef resnet_v2(input_shape, depth, num_classes=10):\n \"\"\"ResNet Version 2 Model builder [b]\n \n Stacks of (1 x 1)-(3 x 3)-(1 x 1) BN-ReLU-Conv2D or also known as\n bottleneck layer\n First shortcut connection per layer is 1 x 1 Conv2D.\n Second and onwards shortcut connection is identity.\n At the beginning of each stage, the feature map size is halved (downsampled)\n by a convolutional layer with strides=2, while the number of filter maps is\n doubled. Within each stage, the layers have the same number filters and the\n same filter map sizes.\n Features maps sizes:\n conv1 : 32x32, 16\n stage 0: 32x32, 64\n stage 1: 16x16, 128\n stage 2: 8x8, 256\n \n # Arguments\n input_shape (tensor): shape of input image tensor\n depth (int): number of core convolutional layers\n num_classes (int): number of classes (CIFAR10 has 10)\n \n # Returns\n model (Model): Keras model instance\n \"\"\"\n if (depth - 2) % 9 != 0:\n raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')\n # Start model definition.\n num_filters_in = 16\n num_res_blocks = int((depth - 2) / 9)\n \n inputs = Input(shape=input_shape)\n # v2 performs Conv2D with BN-ReLU on input before splitting into 2 paths\n x = resnet_layer(inputs=inputs,\n num_filters=num_filters_in,\n conv_first=True)\n \n # Instantiate the stack of residual units\n for stage in range(3):\n for res_block in range(num_res_blocks):\n activation = 'relu'\n batch_normalization = True\n strides = 1\n if stage == 0:\n num_filters_out = num_filters_in * 4\n if res_block == 0: # first layer and first stage\n activation = None\n batch_normalization = False\n else:\n num_filters_out = num_filters_in * 2\n if res_block == 0: # first layer but not first stage\n strides = 2 # downsample\n \n # bottleneck residual unit\n y = resnet_layer(inputs=x,\n num_filters=num_filters_in,\n kernel_size=1,\n strides=strides,\n activation=activation,\n batch_normalization=batch_normalization,\n conv_first=False)\n y = resnet_layer(inputs=y,\n num_filters=num_filters_in,\n conv_first=False)\n y = resnet_layer(inputs=y,\n num_filters=num_filters_out,\n kernel_size=1,\n conv_first=False)\n if res_block == 0:\n # linear projection residual shortcut connection to match\n # changed dims\n x = resnet_layer(inputs=x,\n num_filters=num_filters_out,\n kernel_size=1,\n strides=strides,\n activation=None,\n batch_normalization=False)\n x = keras.layers.add([x, y])\n \n num_filters_in = num_filters_out\n \n # Add classifier on top.\n # v2 has BN-ReLU before Pooling\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = AveragePooling2D(pool_size=8)(x)\n y = Flatten()(x)\n outputs = Dense(num_classes,\n activation='softmax',\n kernel_initializer='he_normal')(y)\n \n # Instantiate model.\n model = Model(inputs=inputs, outputs=outputs)\n return model\n \n \nif version == 2:\n model = resnet_v2(input_shape=input_shape, depth=depth)\nelse:\n model = resnet_v1(input_shape=input_shape, depth=depth)\n \nmodel.compile(loss='categorical_crossentropy',\n optimizer=Adam(lr=lr_schedule(0)),\n metrics=['accuracy'])\nmodel.summary()\nprint(model_type)\n \n# Prepare model model saving directory.\nsave_dir = os.path.join(os.getcwd(), 'saved_models')\nmodel_name = 'cifar10_%s_model.{epoch:03d}.h5' % model_type\nif not os.path.isdir(save_dir):\n os.makedirs(save_dir)\nfilepath = os.path.join(save_dir, model_name)\n \n# Prepare callbacks for model saving and for learning rate adjustment.\ncheckpoint = ModelCheckpoint(filepath=filepath,\n monitor='val_acc',\n verbose=1,\n save_best_only=True)\n \nlr_scheduler = LearningRateScheduler(lr_schedule)\n \nlr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),\n cooldown=0,\n patience=5,\n min_lr=0.5e-6)\n \ncallbacks = [checkpoint, lr_reducer, lr_scheduler]\n \n# Run training, with or without data augmentation.\nif not data_augmentation:\n print('Not using data augmentation.')\n model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n validation_data=(x_test, y_test),\n shuffle=True,\n callbacks=callbacks)\nelse:\n print('Using real-time data augmentation.')\n # This will do preprocessing and realtime data augmentation:\n datagen = ImageDataGenerator(\n # set input mean to 0 over the dataset\n featurewise_center=False,\n # set each sample mean to 0\n samplewise_center=False,\n # divide inputs by std of dataset\n featurewise_std_normalization=False,\n # divide each input by its std\n samplewise_std_normalization=False,\n # apply ZCA whitening\n zca_whitening=False,\n # randomly rotate images in the range (deg 0 to 180)\n rotation_range=0,\n # randomly shift images horizontally\n width_shift_range=0.1,\n # randomly shift images vertically\n height_shift_range=0.1,\n # randomly flip images\n horizontal_flip=True,\n # randomly flip images\n vertical_flip=False)\n \n # Compute quantities required for featurewise normalization\n # (std, mean, and principal components if ZCA whitening is applied).\n datagen.fit(x_train)\n \n # Fit the model on the batches generated by datagen.flow().\n model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),\n validation_data=(x_test, y_test),\n epochs=epochs, verbose=1, workers=4,\n callbacks=callbacks)\n \n# Score trained model.\nscores = model.evaluate(x_test, y_test, verbose=1)\nprint('Test loss:', scores[0])\nprint('Test accuracy:', scores[1])" ]
[ [ "numpy.sqrt", "numpy.mean" ] ]
MMnash/ParlAI
[ "7429016bce901b00f9bf4b06c82687d49cd548fa" ]
[ "projects/light_whoami/agents/pacer.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"\nPACER: Partial And Complete Efficient Re-ranking.\n\nSee `PacerTreeSearchMixin.modify_logprobs` for a complete description.\n\"\"\"\nimport random\nimport torch\nimport torch.nn.functional as F\nfrom typing import Optional, Any, Dict, List\n\nfrom parlai.agents.transformer.transformer import TransformerGeneratorAgent\nfrom parlai.core.opt import Opt\nfrom parlai.core.params import ParlaiParser\nfrom parlai.core.torch_generator_agent import (\n TorchGeneratorAgent,\n TreeSearch,\n GreedySearch,\n BeamSearch,\n DelayedBeamSearch,\n TopKSampling,\n NucleusSampling,\n TSType,\n)\nimport parlai.utils.logging as logging\nfrom parlai.utils.torch import neginf\nfrom projects.light_whoami.agents.rpa_rerank import (\n RPAReranker,\n RPARerankAgent,\n LongRPARerankAgent,\n)\nfrom projects.light_whoami.task.utils import extract_characters\nfrom projects.msc.agents.long_tga import TransformerVariantAgent\n\n\nclass PacerAgentMixin:\n \"\"\"\n Override TGA to use a different tree search decoder.\n \"\"\"\n\n @classmethod\n def add_cmdline_args(\n cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None\n ) -> ParlaiParser:\n RPAReranker.add_cmdline_args(parser, partial_opt=partial_opt)\n group = parser.add_argument_group('PACER Group')\n group.add_argument(\n '--pacer-n-tokens',\n type=int,\n default=10,\n help='How many tokens to re-rank and consider',\n )\n group.add_argument(\n '--pacer-frequency-ratio',\n type=float,\n default=0.05,\n help='The frequency with which to apply PACER re-ranking.',\n )\n return parser\n\n def __init__(self, opt: Opt, shared=None):\n super().__init__(opt, shared)\n if not shared:\n self.classifier = RPAReranker(opt)\n else:\n self.classifier = shared['classifier']\n assert opt[\n 'beam_block_full_context'\n ], 'must set --beam-block-full-context True to use PACER'\n\n def share(self) -> Dict[str, Any]:\n shared = super().share()\n shared['classifier'] = self.classifier\n return shared\n\n def _get_batch_context(self, batch):\n \"\"\"\n Override to always provide full context.\n \"\"\"\n if 'full_text_vec' not in batch:\n logging.warn('Batch does not have full text vec, resorting to text vec')\n return batch.text_vec\n return batch.full_text_vec\n\n def _treesearch_factory(self, device: int) -> TreeSearch:\n method = self.opt.get('inference', 'greedy')\n beam_size = self.opt.get('beam_size', 1)\n pacer_kwargs = {\n 'classifier': self.classifier,\n 'pacer_n_tokens': self.opt['pacer_n_tokens'],\n 'pacer_frequency_ratio': self.opt['pacer_frequency_ratio'],\n 'agent': self,\n }\n if method == 'greedy':\n return PacerGreedySearch(\n beam_size,\n min_length=0,\n block_ngram=self.beam_block_ngram,\n context_block_ngram=self.beam_context_block_ngram,\n length_penalty=self.opt.get('beam_length_penalty', 0.65),\n padding_token=self.NULL_IDX,\n bos_token=self.START_IDX,\n eos_token=self.END_IDX,\n device=device,\n **pacer_kwargs,\n )\n elif method == 'beam':\n return PacerBeamSearch(\n beam_size,\n min_length=self.beam_min_length,\n block_ngram=self.beam_block_ngram,\n context_block_ngram=self.beam_context_block_ngram,\n length_penalty=self.opt.get('beam_length_penalty', 0.65),\n padding_token=self.NULL_IDX,\n bos_token=self.START_IDX,\n eos_token=self.END_IDX,\n device=device,\n **pacer_kwargs,\n )\n elif method == 'delayedbeam':\n return PacerDelayedBeamSearch(\n self.opt['topk'],\n self.opt['beam_delay'],\n beam_size,\n min_length=self.beam_min_length,\n block_ngram=self.beam_block_ngram,\n context_block_ngram=self.beam_context_block_ngram,\n length_penalty=self.opt.get('beam_length_penalty', 0.65),\n padding_token=self.NULL_IDX,\n bos_token=self.START_IDX,\n eos_token=self.END_IDX,\n device=device,\n **pacer_kwargs,\n )\n elif method == 'topk':\n return PacerTopKSampling(\n self.opt['topk'],\n beam_size,\n min_length=self.beam_min_length,\n block_ngram=self.beam_block_ngram,\n context_block_ngram=self.beam_context_block_ngram,\n length_penalty=self.opt.get('beam_length_penalty', 0.65),\n padding_token=self.NULL_IDX,\n bos_token=self.START_IDX,\n eos_token=self.END_IDX,\n device=device,\n **pacer_kwargs,\n )\n elif method == 'nucleus':\n return PacerNucleusSampling(\n self.opt['topp'],\n beam_size,\n min_length=self.beam_min_length,\n block_ngram=self.beam_block_ngram,\n context_block_ngram=self.beam_context_block_ngram,\n length_penalty=self.opt.get('beam_length_penalty', 0.65),\n padding_token=self.NULL_IDX,\n bos_token=self.START_IDX,\n eos_token=self.END_IDX,\n device=device,\n **pacer_kwargs,\n )\n else:\n raise NotImplementedError(\n f'Other gen methods not available for PACER: {method}'\n )\n\n\nclass PacerTreeSearchMixin(TreeSearch):\n def __init__(self, *args, **kwargs):\n self.classifier = kwargs.pop('classifier')\n self.agent = kwargs.pop('agent')\n self.n_toks = kwargs.pop('pacer_n_tokens')\n self.frequency = kwargs.pop('pacer_frequency_ratio')\n super().__init__(*args, **kwargs)\n\n def set_batch_context(\n self: TSType, batch_context_list: List[List[int]], batch_idx: int\n ) -> TSType:\n \"\"\"\n Override to save de-tokenized version of context.\n \"\"\"\n self.context = batch_context_list[batch_idx]\n self.context_str = self.agent._v2t(self.context)\n self.character = extract_characters(self.context_str)['_self_name']\n return self\n\n def select_paths(\n self, logprobs: torch.Tensor, prior_scores: torch.Tensor, current_length: int\n ):\n \"\"\"\n Override select_paths to modify the logprobs according to classifier outputs.\n\n :param logprobs:\n a (beamsize x vocab) tensor of log probabilities. If this is the first\n turn in the dialogue, it will be a (1 x vocab) tensor.\n :param prior_scores:\n a (beamsize) tensor of weights with the cumulative running\n log-probability of each beam. If the first turn, it will be a (1) tensor.\n :param current_length:\n the current length in tokens\n :return:\n a (hypothesis_ids, token_id, scores) tuple, where:\n\n - hypothesis_ids is a LongTensor of hypotheses we're extending. May have\n repeats, but should always be (beamsize) long.\n - token_ids is a (beamsize) LongTensor of next-token choices for\n each of the hypotheses.\n - scores is a (beamsize) Tensor with the updated cumulative log-probs\n of each beam.\n \"\"\"\n logprobs = self.modify_logprobs(logprobs)\n return super().select_paths(logprobs, prior_scores, current_length)\n\n def modify_logprobs(self, logprobs: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Modify logprobs in PACER.\n\n The way it works:\n\n 1. With frequency r, select a token x_i+1 to re-rank.\n 2. Generate word probabilities for token x_i+1.\n 3. Examine top k words {x_j | score(x_j) \\in top_k(P(x_i+1 | x_0,...,x_i))}; use classifier to predict P(a|x1, ..., x_i, x_j)\n 4. Rescore top k words via multiplication, re-normalize, and advance the generation.\n\n :param logprobs:\n initial token probabilities\n\n :return modified:\n return the modified log probabilities according to PACER\n \"\"\"\n if random.random() > self.frequency:\n return logprobs\n vals, inds = logprobs.topk(self.n_toks, dim=-1, sorted=False)\n new_probs = logprobs.clone().fill_(neginf(logprobs.dtype))\n # Construct partial hypotheses for each beam for each top K tokens\n batch_hyps = [\n h\n for i in range(len(self.partial_hyps))\n for h in [\n self.agent._v2t(self.partial_hyps[i][1:] + [ind]) for ind in inds[i]\n ]\n ]\n # Classify all beam outputs\n predictor_outputs = self.classifier.batch_classify(\n [self.context_str] * self.n_toks * logprobs.size(0), batch_hyps\n )\n # Extract RPA scores\n log_predictor_scores = (\n torch.stack(\n [\n F.log_softmax(pred['sorted_scores'].float(), dim=0)[\n int(pred['text'] == self.character) - 1\n ]\n for pred in predictor_outputs\n ]\n )\n .to(vals.device)\n .view(vals.size())\n )\n # \"Multiply\" Probabilities (in log space...)\n scores = vals + log_predictor_scores\n for i in range(new_probs.size(0)):\n new_probs[i, inds[i]] = scores[i]\n return F.log_softmax(new_probs, dim=-1, dtype=torch.float32) # type: ignore\n\n\nclass PacerGreedySearch(PacerTreeSearchMixin, GreedySearch):\n \"\"\"\n Override Greedy to work with PACER.\n \"\"\"\n\n pass\n\n\nclass PacerBeamSearch(PacerTreeSearchMixin, BeamSearch):\n \"\"\"\n Override Beam to work with PACER.\n \"\"\"\n\n pass\n\n\nclass PacerDelayedBeamSearch(PacerTreeSearchMixin, DelayedBeamSearch):\n \"\"\"\n Override Delayed Beam Search to work with PACER.\n \"\"\"\n\n pass\n\n\nclass PacerTopKSampling(PacerTreeSearchMixin, TopKSampling):\n \"\"\"\n Override TopK Sampling to work with PACER.\n \"\"\"\n\n pass\n\n\nclass PacerNucleusSampling(PacerTreeSearchMixin, NucleusSampling):\n \"\"\"\n Override Nucleus Sampling to work with PAcer\n \"\"\"\n\n pass\n\n\nclass PacerPartialOnlyAgent(PacerAgentMixin, TransformerGeneratorAgent):\n @classmethod\n def add_cmdline_args(\n cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None\n ) -> ParlaiParser:\n TransformerGeneratorAgent.add_cmdline_args(parser, partial_opt=partial_opt)\n PacerAgentMixin.add_cmdline_args(parser, partial_opt=partial_opt)\n return parser\n\n\nclass LongPacerPartialOnlyAgent(PacerAgentMixin, TransformerVariantAgent):\n @classmethod\n def add_cmdline_args(\n cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None\n ) -> ParlaiParser:\n TransformerVariantAgent.add_cmdline_args(parser, partial_opt=partial_opt)\n PacerAgentMixin.add_cmdline_args(parser, partial_opt)\n return parser\n\n\nclass PacerAgent(PacerPartialOnlyAgent, RPARerankAgent):\n \"\"\"\n PACER Agent: Combines Beam and Partial Re-ranking\n \"\"\"\n\n @classmethod\n def add_cmdline_args(\n cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None\n ) -> ParlaiParser:\n RPARerankAgent.add_cmdline_args(parser, partial_opt=partial_opt)\n PacerPartialOnlyAgent.add_cmdline_args(parser, partial_opt=partial_opt)\n return parser\n\n\nclass LongPacerAgent(LongPacerPartialOnlyAgent, LongRPARerankAgent):\n @classmethod\n def add_cmdline_args(\n cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None\n ) -> ParlaiParser:\n LongRPARerankAgent.add_cmdline_args(parser, partial_opt=partial_opt)\n LongPacerPartialOnlyAgent.add_cmdline_args(parser, partial_opt=partial_opt)\n return parser\n" ]
[ [ "torch.nn.functional.log_softmax" ] ]
thomascherickal/strawberryfields
[ "f8e030b6e75554e8d59d232d3aa285b1168d2e76" ]
[ "tests/frontend/test_sf_plot.py" ]
[ "# Copyright 2019-2020 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"\nUnit tests for strawberryfields.plot\n\"\"\"\nimport pytest\n\nimport numpy as np\nimport plotly.io as pio\n\nimport strawberryfields as sf\nfrom strawberryfields.ops import Sgate, BSgate, MeasureFock\nfrom strawberryfields.plot import plot_wigner\n\npytestmark = pytest.mark.frontend\n\n@pytest.fixture(scope=\"module\")\ndef prog():\n \"\"\"Program used for testing\"\"\"\n program = sf.Program(2)\n\n with program.context as q:\n Sgate(0.54, 0) | q[0]\n Sgate(0.54, 0) | q[1]\n BSgate(6.283, 0.6283) | (q[0], q[1])\n MeasureFock() | q\n\n return program\n\nclass TestWignerPlotting:\n \"\"\"Test the Wigner plotting function\"\"\"\n\n @pytest.mark.parametrize(\"renderer\", [\"png\", \"json\", \"browser\"])\n @pytest.mark.parametrize(\"mode\", [0, 1])\n @pytest.mark.parametrize(\"contours\", [True, False])\n def test_no_errors(self, mode, renderer, contours, prog, monkeypatch):\n \"\"\"Test that no errors are thrown when calling the `plot_wigner` function\"\"\"\n eng = sf.Engine(\"gaussian\")\n results = eng.run(prog)\n\n xvec = np.arange(-4, 4, 0.1)\n pvec = np.arange(-4, 4, 0.1)\n with monkeypatch.context() as m:\n m.setattr(pio, \"show\", lambda x: None)\n plot_wigner(results.state, mode, xvec, pvec, renderer=renderer, contours=contours)\n" ]
[ [ "numpy.arange" ] ]
klarh/flowws-structure-pretraining
[ "d54103d30df98ed846768d6e0ed851edcd53bfcc", "d54103d30df98ed846768d6e0ed851edcd53bfcc" ]
[ "flowws_structure_pretraining/tasks/internal.py", "flowws_structure_pretraining/analysis/EmbeddingPlotter.py" ]
[ "import bisect\nimport collections\n\nimport freud\nimport numpy as np\n\nFrame = collections.namedtuple(\n \"Frame\",\n [\n \"box\",\n \"positions\",\n \"types\",\n \"context\",\n \"index_i\",\n \"index_j\",\n \"weights\",\n \"rijs\",\n \"tijs\",\n \"nlist\",\n ],\n)\n\n\ndef process_frame(frame, nlist_generator, max_types):\n box = frame.box\n positions = frame.positions\n types = frame.types\n context = frame.context\n nl = nlist_generator(box, positions)\n\n index_i = nl.query_point_indices\n index_j = nl.point_indices\n weights = nl.weights.copy()\n weights /= np.repeat(np.add.reduceat(weights, nl.segments), nl.neighbor_counts)\n\n rijs = positions[index_j] - positions[index_i]\n rijs = freud.box.Box.from_box(box).wrap(rijs)\n tijs = encode_types(types[index_i], types[index_j], None, max_types)\n return Frame(\n box, positions, types, context, index_i, index_j, weights, rijs, tijs, nl\n )\n\n\ndef encode_types(source_types, dest_types, N, max_types):\n onehot_src = np.eye(max_types)[source_types]\n onehot_dest = np.eye(max_types)[dest_types]\n\n minus = onehot_dest - onehot_src\n plus = onehot_dest + onehot_src\n if N:\n minus = minus.reshape((-1, N, max_types))\n plus = plus.reshape((-1, N, max_types))\n\n return np.concatenate([minus, plus], axis=-1)\n\n\ndef pad(xs, max_size, dim=None):\n result = []\n for x in xs:\n if len(x) < max_size:\n padding = np.zeros((max_size - len(x), dim or 1), dtype=x.dtype)\n if dim is None:\n padding = padding[..., 0]\n x = np.concatenate([x, padding], axis=0)\n result.append(x)\n return np.asarray(result)\n\n\ndef index_frame(frame, indices, max_size, type_dim):\n all_bonds = []\n for i in indices:\n bond_start = bisect.bisect_left(frame.index_i, i)\n bond_end = bisect.bisect_left(frame.index_i, i + 1)\n bonds = slice(bond_start, bond_end)\n all_bonds.append(bonds)\n\n result = [\n pad([frame.rijs[b] for b in all_bonds], max_size, 3),\n pad([frame.tijs[b] for b in all_bonds], max_size, type_dim),\n pad([frame.weights[b] for b in all_bonds], max_size, None),\n ]\n return tuple(result)\n\n\nclass EnvironmentGenerator:\n def __init__(self, frames):\n self.frames = list(frames)\n self.frame_sizes = [len(frame.positions) for frame in self.frames]\n self.frame_probas = np.array(self.frame_sizes, dtype=np.float32) / sum(\n self.frame_sizes\n )\n\n def sample(self, seed=13, loop=True, subsample=None):\n rng = np.random.default_rng(seed)\n particle_indices = []\n\n if subsample is not None:\n if np.array(subsample).size == 1:\n if subsample < 0:\n subsample = (1.0 + subsample, 1)\n else:\n subsample = (0, subsample)\n left, right = subsample\n for frame in self.frames:\n filt = rng.uniform(size=len(frame.positions))\n filt = np.logical_and(filt >= left, filt < right)\n particle_indices.append(np.where(filt)[0])\n else:\n for frame in self.frames:\n particle_indices.append(np.arange(len(frame.positions)))\n\n if loop:\n frame_indices = np.arange(len(self.frames))\n\n while True:\n frame_i = rng.choice(frame_indices, p=self.frame_probas)\n particle = rng.choice(particle_indices[frame_i])\n yield self.produce(frame_i, particle)\n else:\n for frame_i in range(len(self.frames)):\n for particle in particle_indices[frame_i]:\n yield self.produce(frame_i, particle)\n\n def produce(self, frame_i, particle):\n frame = self.frames[frame_i]\n bond_start = bisect.bisect_left(frame.index_i, particle)\n bond_end = bisect.bisect_left(frame.index_i, particle + 1)\n bonds = slice(bond_start, bond_end)\n\n rijs = frame.rijs[bonds]\n tijs = frame.tijs[bonds]\n weights = frame.weights[bonds]\n\n return (rijs, tijs, weights), frame.context\n", "import collections\n\nimport flowws\nfrom flowws import Argument as Arg\nimport matplotlib\nimport numpy as np\nimport plato\n\nfrom ..internal import Remap\n\n\n@flowws.add_stage_arguments\nclass EmbeddingPlotter(flowws.Stage):\n \"\"\"Use PCA to project the embedding\"\"\"\n\n ARGS = [\n Arg('key', '-k', str, help='Key to use for embedding'),\n Arg('shuffle', '-s', bool, True, help='Shuffle points before plotting'),\n Arg('seed', None, int, 13, help='RNG seed for random shuffling'),\n Arg(\n 'progressive_threshold',\n '-p',\n int,\n 16,\n help='If more keys than this are given, use a progressive colormap',\n ),\n Arg('component_x', '-x', int, 0, help='Embedding component to plot on x axis'),\n Arg('component_y', '-y', int, 1, help='Embedding component to plot on y axis'),\n ]\n\n def run(self, scope, storage):\n if 'key' not in self.arguments:\n valid_keys = [\n k\n for (k, v) in sorted(scope.items())\n if k.startswith('embed')\n and isinstance(v, np.ndarray)\n and v.dtype != object\n ]\n key = valid_keys[-1]\n else:\n key = self.arguments['key']\n\n x = scope[key]\n\n self.arg_specifications['component_x'].valid_values = flowws.Range(\n 0, x.shape[-1], (True, False)\n )\n self.arg_specifications['component_y'].valid_values = flowws.Range(\n 0, x.shape[-1], (True, False)\n )\n\n found_key_values = collections.defaultdict(set)\n embedding_contexts = [dict(d) for d in scope['embedding_contexts']]\n for d in embedding_contexts:\n for (k, v) in d.items():\n found_key_values[k].add(v)\n to_remove = [k for (k, vs) in found_key_values.items() if len(vs) == 1]\n for d in embedding_contexts:\n for k in to_remove:\n d.pop(k, None)\n\n remap = Remap()\n unique_contexts = set(map(lambda x: frozenset(x.items()), embedding_contexts))\n context_sort = lambda v: tuple(sorted(v))\n for v in sorted(unique_contexts, key=context_sort):\n remap(v)\n contexts = np.array([remap(frozenset(d.items())) for d in embedding_contexts])\n\n if self.arguments['shuffle']:\n rng = np.random.default_rng(self.arguments['seed'])\n shuf = np.arange(len(x))\n rng.shuffle(shuf)\n x = x[shuf]\n contexts = contexts[shuf]\n\n self.remap = remap\n self.x = x\n self.contexts = contexts\n\n scope.setdefault('visuals', []).append(self)\n\n def get_colormap(self, remap):\n remap_inverse_dicts = [dict(v) for v in remap.inverse]\n file_frames = collections.defaultdict(set)\n get_key = lambda d: d.get('fname', d.get('structure', 'none'))\n get_index = lambda d: d.get('frame', d.get('noise', -1))\n for d in remap_inverse_dicts:\n file_frames[get_key(d)].add(get_index(d))\n file_frames = {k: list(sorted(v)) for (k, v) in file_frames.items()}\n\n if any(len(v) > 1 for v in file_frames.values()):\n # use special file-frame colormap\n colors = []\n ticks = []\n labels = []\n file_starts = dict(\n zip(\n sorted(file_frames),\n np.linspace(0, 3, len(file_frames), endpoint=False),\n )\n )\n file_thetas = {\n k: [0.5]\n if len(v) == 1\n else np.linspace(0.2, 0.8, len(v), endpoint=True)\n for (k, v) in file_frames.items()\n }\n file_colors = {\n k: plato.cmap.cubehelix(file_thetas[k], s=file_starts[k], r=0, h=1.2)\n for k in file_frames\n }\n for d in remap_inverse_dicts:\n key = get_key(d)\n index = file_frames[key].index(get_index(d))\n if index == len(file_frames[key]) // 2:\n ticks.append(len(colors))\n labels.append(dict(sorted(d.items())))\n colors.append(file_colors[key][index])\n cmap = matplotlib.colors.ListedColormap(colors)\n\n if len(ticks) > self.arguments['progressive_threshold']:\n ticks = labels = []\n\n elif len(remap) > self.arguments['progressive_threshold']:\n cmap = matplotlib.colors.LinearSegmentedColormap.from_list(\n 'custom_cubehelix',\n plato.cmap.cubehelix(np.linspace(0.2, 0.8, len(remap), endpoint=True)),\n )\n ticks = labels = []\n else:\n cmap = matplotlib.colors.ListedColormap(\n plato.cmap.cubeellipse_intensity(\n np.linspace(0, 2 * np.pi, len(remap), endpoint=False)\n )\n )\n ticks = np.linspace(0, len(remap), len(remap), endpoint=False)\n labels = [dict(sorted(v)) for v in remap.inverse]\n return cmap, ticks, labels\n\n def draw_matplotlib(self, fig):\n ax = fig.add_subplot()\n remap = self.remap\n\n cmap, ticks, ticklabels = self.get_colormap(remap)\n points = ax.scatter(\n self.x[:, self.arguments['component_x']],\n self.x[:, self.arguments['component_y']],\n c=self.contexts,\n cmap=cmap,\n alpha=0.5,\n vmin=-0.5,\n vmax=len(self.remap) - 0.5,\n )\n cbar = fig.colorbar(points, ticks=ticks)\n cbar.ax.set_yticklabels(ticklabels)\n cbar.solids.set(alpha=1)\n" ]
[ [ "numpy.concatenate", "numpy.add.reduceat", "numpy.array", "numpy.asarray", "numpy.random.default_rng", "numpy.logical_and", "numpy.eye", "numpy.where" ], [ "matplotlib.colors.ListedColormap", "numpy.random.default_rng" ] ]
stes/scikit-kinematics
[ "1a4d7212c8fff93428cb1d56ac6d77faa32e6bc5" ]
[ "skinematics/sensors/yei.py" ]
[ "'''\nImport data saved with yei-sensors, through subclassing \"IMU_Base\"\n'''\n\n'''\nAuthor: Thomas Haslwanter\nDate: Sept-2017\n'''\n\nimport numpy as np\nimport pandas as pd\nimport re\nimport abc\n\n# To ensure that the relative path works\nimport os\nimport sys\n\nparent_dir = os.path.abspath(os.path.join( os.path.dirname(__file__), '..' ))\nif parent_dir not in sys.path:\n sys.path.insert(0, parent_dir)\n\nfrom imus import IMU_Base\n\nclass YEI(IMU_Base):\n \"\"\"Concrete class based on abstract base class IMU_Base \"\"\" \n \n def get_data(self, in_file, in_data=None):\n '''Get the sampling rate, as well as the recorded data,\n and assign them to the corresponding attributes of \"self\".\n \n Parameters\n ----------\n in_file : string\n Filename of the data-file\n in_data : not used here\n \n Assigns\n -------\n - rate : rate\n - acc : acceleration\n - omega : angular_velocity\n - mag : mag_field_direction\n '''\n \n data = pd.read_csv(in_file)\n \n # Generate a simple list of column names\n newColumns = []\n pattern = re.compile('.*%(\\w+)\\((\\w+)\\)')\n for name in data.columns:\n newColumns.append(pattern.match(name).groups()[1])\n data.columns = newColumns\n \n \n # Calculate rate (ChipTime is in microsec)\n start = data.ChipTimeUS[0] * 1e-6 # microseconds to seconds\n stop = data.ChipTimeUS.values[-1] * 1e-6 # pandas can't count backwards\n rate = len(data) / (stop-start)\n \n # Extract the columns that you want, and pass them on\n in_data = {'rate':rate,\n 'acc': data.filter(regex='Accel').values,\n 'omega': data.filter(regex='Gyro').values,\n 'mag': data.filter(regex='Compass').values}\n self._set_data(in_data)\n\nif __name__ == '__main__':\n my_sensor = YEI(in_file=r'..\\tests\\data\\data_yei.txt') \n \n import matplotlib.pyplot as plt \n \n plt.plot(my_sensor.acc) \n plt.show()\n print('Done')\n " ]
[ [ "matplotlib.pyplot.show", "pandas.read_csv", "matplotlib.pyplot.plot" ] ]
Zhengymm/DCP
[ "6faf311f0519c50b213c248b23108119453075b4" ]
[ "runDCP.py" ]
[ "from __future__ import print_function, division\n\nimport argparse\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom sklearn.cluster import KMeans\nfrom torch.nn.parameter import Parameter\nfrom torch.optim import Adam\nfrom evaluation import eva\nfrom utils import load_data, load_graph\nfrom models import AE, GAE\n\ntorch.cuda.set_device(0)\n\n\nclass DCP_DEC(nn.Module):\n def __init__(self, n_enc_1, n_enc_2, n_dec_1, n_dec_2, n_input, n_z,\n n_clusters, v=1):\n super(DCP_DEC, self).__init__()\n\n # autoencoder for intra information\n self.ae = AE(n_enc_1=n_enc_1,\n n_enc_2=n_enc_2,\n n_dec_1=n_dec_1,\n n_dec_2=n_dec_2,\n n_input=n_input,\n n_z=n_z)\n self.ae.load_state_dict(torch.load(args.dnn_pretrain_path, map_location='cpu'))\n # cluster layer\n self.dnn_cluster_layer = Parameter(torch.Tensor(n_clusters, n_z))\n torch.nn.init.xavier_normal_(self.dnn_cluster_layer.data)\n\n # GCN for inter information\n self.gae = GAE(input_feat_dim=n_input, hidden_dim1=256, n_z=n_z)\n self.gae.load_state_dict(torch.load(args.gcn_pretrain_path, map_location='cpu'))\n # cluster layer\n self.gcn_cluster_layer = Parameter(torch.Tensor(n_clusters, n_z))\n torch.nn.init.xavier_normal_(self.gcn_cluster_layer.data)\n\n # degree\n self.v = v\n\n def forward(self, x, adj):\n # DNN Module\n x_bar, dz = self.ae(x)\n\n # GCN Module\n a_bar, gz = self.gae(x, adj)\n\n # Dual Self-supervised Module for DNN\n q_dnn = 1.0 / (1.0 + (torch.sum(torch.pow(dz.unsqueeze(1) - self.dnn_cluster_layer, 2), 2) / self.v))\n q_dnn = q_dnn.pow((self.v + 1.0) / 2.0)\n q_dnn = (q_dnn.t() / torch.sum(q_dnn, 1)).t()\n\n # Dual Self-supervised Module for GCN\n q_gcn = 1.0 / (1.0 + (torch.sum(torch.pow(gz.unsqueeze(1) - self.gcn_cluster_layer, 2), 2) / self.v))\n q_gcn = q_gcn.pow((self.v + 1.0) / 2.0)\n q_gcn = (q_gcn.t() / torch.sum(q_gcn, 1)).t()\n\n return x_bar, q_dnn, a_bar, q_gcn, dz, gz\n\n\ndef target_distribution(q):\n weight = q ** 2 / q.sum(0)\n return (weight.t() / weight.sum(1)).t()\n\n\ndef train_dcp(dataset):\n model = DCP_DEC(512, 256, 256, 512, n_input=args.n_input, n_z=args.n_z,\n n_clusters=args.n_clusters, v=1.0).to(device)\n print(model)\n optimizer = Adam(model.parameters(), lr=args.lr)\n\n # KNN Graph/A\n adj = load_graph(args.name, args.k)\n adj = adj.cuda()\n\n # cluster parameter initiate\n data = torch.Tensor(dataset.x).to(device)\n y = dataset.y\n\n # k-menas initial\n with torch.no_grad():\n _, dz = model.ae(data)\n dnn_kmeans = KMeans(n_clusters=args.n_clusters, n_init=20)\n # print(dnn_kmeans)\n y_dnnpred = dnn_kmeans.fit_predict(dz.data.cpu().numpy())\n model.dnn_cluster_layer.data = torch.tensor(dnn_kmeans.cluster_centers_).to(device)\n eva(y, y_dnnpred, 'dnn-pre')\n\n with torch.no_grad():\n _, gz = model.gae(data, adj)\n gcn_kmeans = KMeans(n_clusters=args.n_clusters, n_init=20)\n # print(gcn_kmeans)\n y_gcnpred = gcn_kmeans.fit_predict(gz.data.cpu().numpy())\n model.gcn_cluster_layer.data = torch.tensor(gcn_kmeans.cluster_centers_).to(device)\n eva(y, y_gcnpred, 'gae-pre')\n\n iteration = 1000\n for epoch in range(iteration):\n # adjust_learning_rate(optimizer, epoch)\n if epoch % 1 == 0:\n # update_interval\n _, tmp_qdnn, _, tmp_qgcn, tmp_dz, tmp_gz = model(data, adj)\n\n p_dnn = target_distribution(tmp_qdnn.data)\n p_gcn = target_distribution(tmp_qgcn.data)\n\n res1 = tmp_qdnn.data.cpu().numpy().argmax(1) # Q_dnn\n res2 = p_dnn.data.cpu().numpy().argmax(1) # P_dnn\n res3 = tmp_qgcn.data.cpu().numpy().argmax(1) # Q_gcn\n res4 = p_gcn.data.cpu().numpy().argmax(1) # P_gcn\n\n qdnn = eva(y, res1, str(epoch) + ' Q_DNN')\n eva(y, res2, str(epoch) + ' P_DNN')\n qgcn = eva(y, res3, str(epoch) + ' Q_GCN')\n eva(y, res4, str(epoch) + ' P_GCN')\n\n x_bar, q_dnn, a_bar, q_gcn, dz, gz = model(data, adj)\n\n dnn_cluloss = F.kl_div(q_dnn.log(), p_dnn, reduction='batchmean') # dnn_cluster\n dnn_reloss = F.mse_loss(x_bar, data) # dnn_reconstruction\n gcn_cluloss = F.kl_div(q_gcn.log(), p_gcn, reduction='batchmean') # gcn_cluster\n gcn_reloss = F.mse_loss(a_bar, adj.to_dense()) # gcn_reconstruction\n\n # clustering distribution consistency\n con_loss = F.kl_div(q_dnn.log(), q_gcn, reduction='batchmean') # GCN guide\n\n loss = args.alpha * dnn_cluloss + 1.0 * dnn_reloss + \\\n args.beta * gcn_cluloss + 1.0 * gcn_reloss + \\\n args.gamma * con_loss\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n return qgcn, qdnn\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='train',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--name', type=str, default='reut')\n parser.add_argument('--k', type=int, default=None)\n parser.add_argument('--lr', type=float, default=1e-3)\n parser.add_argument('--n_clusters', default=3, type=int)\n parser.add_argument('--n_z', default=64, type=int)\n parser.add_argument('--pretrain_path', type=str, default='pkl')\n parser.add_argument('--alpha', type=float, default=0)\n parser.add_argument('--beta', type=float, default=0)\n parser.add_argument('--gamma', type=float, default=0)\n args = parser.parse_args()\n args.cuda = torch.cuda.is_available()\n print(\"use cuda: {}\".format(args.cuda))\n device = torch.device(\"cuda\" if args.cuda else \"cpu\")\n\n args.dnn_pretrain_path = './pretrain/{}_att2_512-256-64.pkl'.format(args.name)\n args.gcn_pretrain_path = './pretrain/{}_att2_gcn2.pkl'.format(args.name)\n\n dataset = load_data(args.name)\n\n args.n_input = dataset.x.shape[1]\n args.n_clusters = dataset.y.max() + 1\n print(args)\n\n print(\"Start training...............\")\n result_qgcn, result_qdnn = train_dcp(dataset)\n print(\".........................\")\n print(\"The result of Q-GAE:\")\n print(result_qgcn)\n print(\"The result of Q-AE:\")\n print(result_qdnn)\n print(\".........................\")\n" ]
[ [ "torch.device", "sklearn.cluster.KMeans", "torch.no_grad", "torch.nn.functional.mse_loss", "torch.cuda.set_device", "torch.cuda.is_available", "torch.tensor", "torch.load", "torch.nn.init.xavier_normal_", "torch.Tensor", "torch.sum" ] ]
darianyang/molecool
[ "0654a88bfb5a6ef2137ea9f74dcae86b7827f999" ]
[ "molecool/io/pdb.py" ]
[ "\"\"\"\nFunctions for manipulating PDB files.\n\"\"\"\n\nimport os\nimport numpy as np\n\ndef open_pdb(f_loc):\n \"\"\"\n This function reads in a pdb file and returns the atom names and coordinates.\n\n Parameters\n ----------\n f_loc : string for file location\n\n Returns\n -------\n sym : list\n coords : ndarray\n\n Examples\n --------\n >>> open_pdb(\"/path/to/file\")\n\n \"\"\"\n\n with open(f_loc) as f:\n data = f.readlines()\n c = []\n sym = []\n for l in data:\n if 'ATOM' in l[0:6] or 'HETATM' in l[0:6]:\n sym.append(l[76:79].strip())\n try:\n # list comprehension: for x in list_object: --> float(x)\n c2 = [float(x) for x in l[30:55].split()]\n except ValueError as e:\n raise ValueError(F\"error in pdb file {f_loc} formatting.\\n {e}\")\n else:\n c.append(c2)\n coords = np.array(c)\n return sym, coords\n" ]
[ [ "numpy.array" ] ]
remi-pr/tridesclous
[ "074f425fd40f1fb76f619f74cc024dd9817b7ee7", "074f425fd40f1fb76f619f74cc024dd9817b7ee7" ]
[ "tridesclous/gui/isiviewer.py", "tridesclous/tests/test_matplotlibplot.py" ]
[ "from .myqt import QT\nimport pyqtgraph as pg\n\nimport numpy as np\nimport pandas as pd\n\nfrom .base import WidgetBase\n\n\n\nclass MyViewBox(pg.ViewBox):\n doubleclicked = QT.pyqtSignal()\n def mouseDoubleClickEvent(self, ev):\n self.doubleclicked.emit()\n ev.accept()\n def raiseContextMenu(self, ev):\n #for some reasons enableMenu=False is not taken (bug ????)\n pass\n\n\n\nclass ISIViewer(WidgetBase):\n _params = [\n {'name': 'bin_min', 'type': 'float', 'value' : 0. },\n {'name': 'bin_max', 'type': 'float', 'value' : 100. },\n {'name': 'bin_size', 'type': 'float', 'value' : 1.0 },\n ]\n def __init__(self, controller=None, parent=None):\n WidgetBase.__init__(self, parent=parent, controller=controller)\n \n self.layout = QT.QVBoxLayout()\n self.setLayout(self.layout)\n \n #~ h = QT.QHBoxLayout()\n #~ self.layout.addLayout(h)\n #~ h.addWidget(QT.QLabel('<b>Similarity</b>') )\n\n #~ but = QT.QPushButton('settings')\n #~ but.clicked.connect(self.open_settings)\n #~ h.addWidget(but)\n \n \n self.graphicsview = pg.GraphicsView()\n self.layout.addWidget(self.graphicsview)\n \n self.initialize_plot()\n \n #~ self.on_params_changed()#this do refresh \n\n\n def initialize_plot(self):\n self.viewBox = MyViewBox()\n self.viewBox.doubleclicked.connect(self.open_settings)\n #~ self.viewBox.disableAutoRange()\n \n self.plot = pg.PlotItem(viewBox=self.viewBox)\n self.graphicsview.setCentralItem(self.plot)\n self.plot.hideButtons()\n \n #ISI are computed on demand\n self.all_isi = {}\n \n def _compute_isi(self, k):\n spikes = self.controller.spikes\n \n isi = []\n for seg_num in range(self.controller.dataio.nb_segment):\n sel = (spikes['segment'] == seg_num) & (spikes['cluster_label'] == k)\n isi.append(np.diff(spikes[sel]['index'])/self.controller.dataio.sample_rate)\n isi = np.concatenate(isi)\n self.all_isi[k] = isi * 1000. #ms\n\n def refresh(self):\n self.plot.clear()\n \n n = 0\n for k in self.controller.positive_cluster_labels:\n if not self.controller.cluster_visible[k]:\n continue\n \n if k not in self.all_isi:\n self._compute_isi(k)\n \n isi = self.all_isi[k]\n if len(isi) ==0:\n return\n \n bins = np.arange(self.params['bin_min'], self.params['bin_max'], self.params['bin_size'])\n count, bins = np.histogram(isi, bins=bins)\n \n qcolor = self.controller.qcolors[k]\n curve = pg.PlotCurveItem(bins[:-1], count, pen=pg.mkPen(qcolor, width=3))\n self.plot.addItem(curve)\n\n\n", "import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom tridesclous import download_dataset\nfrom tridesclous.dataio import DataIO\nfrom tridesclous.catalogueconstructor import CatalogueConstructor\nfrom tridesclous.matplotlibplot import *\n\n\nimport matplotlib.pyplot as plt\n\nfrom tridesclous.tests.testingtools import setup_catalogue\nimport shutil\n\nfrom tridesclous.tests.testingtools import ON_CI_CLOUD\nimport pytest\n\n\ndef setup_module():\n setup_catalogue('test_matplotlibplot', dataset_name='olfactory_bulb')\n\ndef teardown_module():\n shutil.rmtree('test_matplotlibplot')\n\n \n@pytest.mark.skipif(ON_CI_CLOUD, reason='ON_CI_CLOUD')\ndef test_plot_probe_geometry():\n dataio = DataIO('test_matplotlibplot')\n plot_probe_geometry(dataio, chan_grp=0) \n\n \n@pytest.mark.skipif(ON_CI_CLOUD, reason='ON_CI_CLOUD')\ndef test_plot_signals():\n dataio = DataIO('test_matplotlibplot')\n catalogueconstructor = CatalogueConstructor(dataio=dataio, chan_grp=0)\n \n plot_signals(dataio, signal_type='initial')\n plot_signals(dataio, signal_type='processed')\n plot_signals(catalogueconstructor, signal_type='processed', with_peaks=True, time_slice=(2., 3))\n plot_signals(catalogueconstructor, signal_type='processed', with_span=True, time_slice=(2., 3))\n \n\n@pytest.mark.skipif(ON_CI_CLOUD, reason='ON_CI_CLOUD')\ndef test_plot_waveforms_with_geometry():\n nb_channel = 32\n waveforms = np.random.randn(200, 45, nb_channel)\n channels = np.arange(nb_channel)\n #~ geometry = {c: [np.random.randint(100), np.random.randint(100)] for c in channels}\n geometry = np.random.randint(low=0, high=100, size=(200, 2))\n \n #~ , channels, geometry\n #~ print(geometry)\n \n plot_waveforms_with_geometry(waveforms, channels, geometry) \n\n@pytest.mark.skipif(ON_CI_CLOUD, reason='ON_CI_CLOUD')\ndef test_plot_waveforms():\n dataio = DataIO('test_matplotlibplot')\n catalogueconstructor = CatalogueConstructor(dataio=dataio, chan_grp=0)\n \n plot_waveforms(catalogueconstructor)\n\n\n@pytest.mark.skipif(ON_CI_CLOUD, reason='ON_CI_CLOUD')\ndef test_plot_features_scatter_2d():\n dataio = DataIO('test_matplotlibplot')\n catalogueconstructor = CatalogueConstructor(dataio=dataio, chan_grp=0)\n \n #~ plot_features_scatter_2d(catalogueconstructor)\n plot_features_scatter_2d(catalogueconstructor, labels=[0])\n\n \nif __name__ == '__main__':\n setup_module()\n \n test_plot_probe_geometry()\n test_plot_signals()\n test_plot_waveforms_with_geometry()\n test_plot_waveforms()\n test_plot_features_scatter_2d()\n \n\n\n\n plt.show()" ]
[ [ "numpy.concatenate", "numpy.histogram", "numpy.arange", "numpy.diff" ], [ "matplotlib.pyplot.show", "numpy.arange", "numpy.random.randint", "numpy.random.randn" ] ]
mkturkcan/NoiseIgnoringNetworks
[ "5803d666d6c97075d18673825dc07c20bb3e5375" ]
[ "autokeras/tasks/structured_data_mixin.py" ]
[ "import pandas as pd\n\n\nclass StructuredDataMixin(object):\n\n def check(self, column_names, column_types):\n if column_types:\n for column_type in column_types.values():\n if column_type not in ['categorical', 'numerical']:\n raise ValueError(\n 'Column_types should be either \"categorical\" '\n 'or \"numerical\", but got {name}'.format(name=column_type))\n if column_names and column_types:\n for column_name in column_types:\n if column_name not in column_names:\n raise ValueError('Column_names and column_types are '\n 'mismatched. Cannot find column name '\n '{name} in the data.'.format(name=column_name))\n\n def read_for_predict(self, x):\n if isinstance(x, str):\n x = pd.read_csv(x)\n if self._target_col_name in x:\n x.pop(self._target_col_name)\n return x\n" ]
[ [ "pandas.read_csv" ] ]
murphp30/radiospectra
[ "0730784abf740a527343386264f5711b08c12f7f" ]
[ "radiospectra/spectrogram2/tests/test_psp_rfs.py" ]
[ "from pathlib import Path\nfrom datetime import datetime\nfrom unittest import mock\n\nimport numpy as np\n\nimport astropy.units as u\nfrom astropy.time import Time\nfrom sunpy.net import attrs as a\n\nfrom radiospectra.spectrogram2 import Spectrogram\nfrom radiospectra.spectrogram2.sources import RFSSpectrogram\n\n\n@mock.patch('radiospectra.spectrogram2.spectrogram.SpectrogramFactory._read_cdf')\ndef test_psp_rfs_lfr(read_cdf):\n start_time = Time('2019-04-09 00:01:16.197889')\n end_time = Time('2019-04-10 00:01:04.997573')\n meta = {\n 'cdf_meta': {\n 'TITLE': 'PSP FIELDS RFS LFR data',\n 'Project': 'PSP',\n 'Source_name': 'PSP_FLD>Parker Solar Probe FIELDS',\n 'Descriptor': 'RFS_LFR>Radio Frequency Spectrometer LFR',\n 'Data_type': 'L2>Level 2 Data',\n 'Data_version': '01',\n 'MODS': 'Revision 1',\n 'Logical_file_id': 'psp_fld_l2_rfs_lfr_20190409_v01',\n 'Mission_group': 'PSP'},\n 'detector': 'lfr',\n 'instrument': 'FIELDS/RFS',\n 'observatory': 'PSP',\n 'start_time': start_time,\n 'end_time': end_time,\n 'wavelength': a.Wavelength(10.546879882812501 * u.kHz, 1687.5 * u.kHz),\n 'times': start_time + np.linspace(0, (end_time - start_time).to_value('s'), 12359) * u.s,\n 'freqs': [10546.88, 18750., 28125., 37500., 46875., 56250., 65625., 75000., 84375., 89062.5,\n 94921.88, 100781.25, 106640.62, 112500., 118359.38, 125390.62, 132421.88, 140625.,\n 146484.38, 157031.25, 166406.25, 175781.25, 186328.12, 196875., 208593.75,\n 220312.5, 233203.12, 247265.62, 261328.12, 276562.5, 292968.75, 309375., 328125.,\n 346875., 366796.88, 387890.62, 411328.12, 434765.62, 459375., 486328.12,\n 514453.12, 544921.9, 576562.5, 609375., 645703.1, 682031.25, 721875., 764062.5,\n 808593.75, 855468.75, 904687.5, 957421.9, 1013671.9, 1072265.6, 1134375.,\n 1196484.4, 1265625., 1312500., 1368750., 1425000., 1481250., 1565625., 1621875.,\n 1687500.] * u.Hz\n }\n array = np.zeros((64, 12359))\n read_cdf.return_value = (meta, array)\n file = Path('fake.cdf')\n spec = Spectrogram(file)\n assert isinstance(spec, RFSSpectrogram)\n assert spec.observatory == 'PSP'\n assert spec.instrument == 'FIELDS/RFS'\n assert spec.detector == 'LFR'\n # TODO check why not exact prob base on spacecrast ET so won't match utc exacly\n assert spec.start_time.datetime == datetime(2019, 4, 9, 0, 1, 16, 197889)\n assert spec.end_time.datetime == datetime(2019, 4, 10, 0, 1, 4, 997573)\n assert spec.wavelength.min.round(1) == 10.5 * u.kHz\n assert spec.wavelength.max == 1687.5 * u.kHz\n assert spec.level == 'L2'\n assert spec.version == 1\n\n\n@mock.patch('radiospectra.spectrogram2.spectrogram.SpectrogramFactory._read_cdf')\ndef test_psp_rfs_hfr(read_cdf):\n start_time = Time('2019-04-09 00:01:13.904188')\n end_time = Time('2019-04-10 00:01:02.758315')\n meta = {\n 'cdf_meta': {\n 'TITLE': 'PSP FIELDS RFS HFR data',\n 'Project': 'PSP',\n 'Source_name': 'PSP_FLD>Parker Solar Probe FIELDS',\n 'Descriptor': 'RFS_HFR>Radio Frequency Spectrometer HFR',\n 'Data_type': 'L2>Level 2 Data',\n 'Data_version': '01',\n 'MODS': 'Revision 1',\n 'Logical_file_id': 'psp_fld_l2_rfs_lfr_20190409_v01',\n 'Mission_group': 'PSP'},\n 'detector': 'hfr',\n 'instrument': 'FIELDS/RFS',\n 'observatory': 'PSP',\n 'start_time': start_time,\n 'end_time': end_time,\n 'wavelength': a.Wavelength(1275.0*u.kHz, 19171.876*u.kHz),\n 'times': start_time + np.linspace(0, (end_time - start_time).to_value('s'), 12359) * u.s,\n 'freqs': [1275000., 1321875., 1378125., 1425000., 1471875., 1575000., 1621875., 1678125.,\n 1771875., 1828125., 1921875., 2025000., 2128125., 2221875., 2278125., 2371875.,\n 2521875., 2625000., 2728125., 2878125., 2971875., 3121875., 3271875., 3375000.,\n 3525000., 3721875., 3871875., 4021875., 4228125., 4425000., 4575000., 4771875.,\n 5025000., 5221875., 5475000., 5728125., 5971875., 6225000., 6478125., 6778125.,\n 7078125., 7425000., 7725000., 8071875., 8428125., 8821875., 9178125., 9571875.,\n 10021875., 10471875., 10921875., 11428125., 11925000., 12421875., 13021875.,\n 13575000., 14175000., 14821875., 15478125., 16125000., 16875000., 17625000.,\n 18375000., 19171876.] * u.Hz\n }\n array = np.zeros((64, 12359))\n read_cdf.return_value = (meta, array)\n file = Path('fake.cdf')\n spec = Spectrogram(file)\n assert isinstance(spec, RFSSpectrogram)\n assert spec.observatory == 'PSP'\n assert spec.instrument == 'FIELDS/RFS'\n assert spec.detector == 'HFR'\n # TODO check why not exact prob base on spacecrast ET so won't match utc exacly\n assert spec.start_time.datetime == datetime(2019, 4, 9, 0, 1, 13, 904188)\n assert spec.end_time.datetime == datetime(2019, 4, 10, 0, 1, 2, 758315)\n assert spec.wavelength.min == 1275.0 * u.kHz\n assert spec.wavelength.max == 19171.876 * u.kHz\n assert spec.level == 'L2'\n assert spec.version == 1\n" ]
[ [ "numpy.zeros" ] ]
ACea15/pyNastran
[ "5ffc37d784b52c882ea207f832bceb6b5eb0e6d4", "5ffc37d784b52c882ea207f832bceb6b5eb0e6d4", "5ffc37d784b52c882ea207f832bceb6b5eb0e6d4" ]
[ "pyNastran/op2/dev/aero_eqs.py", "pyNastran/op2/tables/oes_stressStrain/complex/oes_shear.py", "pyNastran/bdf/mesh_utils/shift.py" ]
[ "#pylint: disable=C0301,W0612,C0111,R0201,C0103,W0613,R0914\nimport numpy as np\n\ndef merge(amatrix, bmatrix):\n return amatrix + bmatrix\n\ndef run(Gka, Wkk, Skj, AJJI, DJK,\n K, KALX, KARX, KALL, KALLI, KAXL, Ka_rl, Ka_rr, Ka_lli,\n K2JE, KARL, KARZX, KSALL, KSALLI, KAA, Ka_lr, K2RZX,\n K2JK, K1JE,\n ik, A, B,\n MLL, MLR, mr, MRR, MRL,\n QPKI, Qke, Qkx,\n PHIA,\n WGJ,\n FAJE,\n D, PL, PR, ZXX, TR, TRX, INTZ,\n bref, cref, qbar, machs,\n ARLR, M4RR, MSRR, AMLR, M5RR, K4LX, TMP2, IPZ,\n SRKT, SKJ, SKL, SKJF,\n DJX, D1JE, D2JE, DJ2K, DJKB, D2JK,\n GTKL,\n WTFACT,\n Ajj, Ajx, ALX, AJJT,\n UX, UL, UINTL,\n\n NDIM, PHAI, wj, f_j_e,\n subcases, only_steady, only_unsteady):\n WKK = Wkk\n GKA = Gka\n AJJ = Ajj\n Ka_ll = KALL\n Djx = DJX\n Ka_rx = KARX\n Ka_lx = KALX\n\n q = 1.\n S = 1.\n phi_ai = PHAI\n\n # KAA = structural stiffness matrix\n # MAA = structural mass matrix\n # PA = vector of applied loads\n\n # eq 1-64\n Qaa = Gka.T @ Wkk @ Skj @ AJJI @ DJK @ GKA\n\n # eq 1-65\n QAX = GKA.T @ WKK @ SKJ @ AJJI @ DJX\n\n\n # eq 1-75\n ZXX = (\n mr @ TR.T @ TRX\n - (D.T @ Ka_ll + Ka_rl) @ Ka_lli @ (MLL @ D + MLR) @ TR.T @ TRX\n - (D.T @ KALX + KARX) @ (D.T @ KALL + KARL) @ KALLI @ KALX\n )\n PZ = D.T @ PL + PR - (D.T @ KALL + KARL) @ KALLI @ PL\n\n # eq 1-78\n KRZX = ZXX - mr @ TR.T @ TRX\n\n # eq 1-80\n NDIM = np.ones((6, 6), dtype='float32')\n NDIM[3, 3] /= bref\n NDIM[4, 4] /= cref\n NDIM[5, 5] /= bref\n\n # eq 1-88\n K2RR = -(D.T @ Ka_ll + Ka_rl) @ ARLR + (D.T @ Ka_lr + Ka_rr)\n MSRR = mr\n KAZL = D.T @ Ka_ll + Ka_rl\n KARZX = KAZL - KAXL * ALX\n IPZ = INTZ - (K.T @ Ka_ll + Ka_rl) @ UINTL\n\n # eq 1-89\n M5RR = -K2RR @ M4RR + MSRR\n MIRR = -KAZL @ AMLR + M5RR\n KR1ZX = -K2RR @ K4LX + KARZX\n IPZF = K2RR @ TMP2 + IPZ\n\n # eq 1-90\n IPZF1 = MIRR**(-1) @ IPZF # solve?\n IPZF2 = MSRR @ IPZF1\n KR2ZX = -MIRR**(-1) @ KR1ZX # solve?\n Z1ZX = MSRR @ K2RZX\n\n # eq 1-91\n aero_coeffs = 1./(q*S) * NDIM @ TR @ Z1ZX\n aero_coeffs0 = 1./(q*S) * NDIM @ TR @ IPZF2\n\n\n RSTAB = qbar @ SRKT.T @ Qkx\n AjjI = Ajj**(-1)\n Qkx = Wkk @ Skj @ AjjI @ Djx # solve?\n\n # eq 1-92\n RINT = qbar @ SRKT.T @ (Wkk @ Skj @ AjjI @ wj + Skj @ (f_j_e / qbar))\n\n # eq 1-93\n KSAZX = D.T @ (Ka_lx + Ka_rx)\n\n # Qii = the generalized aerodynamic matrix\n # phi_ai = matrix of i-set normal mode vectors in the physical a-set\n # Gka = spline matrix from Eq 1-22 reduced to the a-set\n # ue = vector of extra point displacements\n # D1JE =\n # D2JE =\n # wj = downwash\n # WTFACT = weighting matrix Wkk from Eq 1-21\n\n # eq 1-94\n INTZ = Gka.T @ RINT\n\n\n # eq 1-103\n #wj = D1JE + i\n\n # eq 1-104\n Qie = phi_ai.T @ Gka.T @ WTFACT @ Qke\n\n # eq 1-105\n Qke = WTFACT @ Skj @ AjjI @ (D1JE + ik * K2JE)\n\n\n # eq 1-108\n # A is symmetric nhdpts + 2 matrix\n C = A**(-1) @ B\n\n #--------------------------------------------------------------\n # PFAERO (section 5.3)\n\n # 1. Read in DMI\n # 2. process aero model geometry (APD)\n # 4. print USET data if requested (TABPRT)\n # 5. form spline matrix (GI)\n\n # 7.\n if not only_unsteady:\n # 8. form static aero matrices that are only a function of geometry (ADG)\n ADG = 'ADG'\n\n # 9. loop on the static aeroelastic subcases\n for subcase in subcases:\n # 10. loop on number of machs per subcase. For trim, this is one..\n # for divergence, this is nmachs on the DIVERG card\n # 11. determine mach on the current loop (AELOOP)\n for mach in machs:\n # 12. if aero data exists, skip to 18\n\n # 13. generate aero matrices (AMG)\n AMG = 'AMG'\n\n # 14. apply weighting factors (if any)\n WSKJ = WKK @ SKL\n\n # 16.\n AJJI = AJJ ** (-1)\n QKKS = WSKJ @ AJJI @ DJK\n\n # 17.\n QKX = WSKJ @ AJJI @ DJK\n # 18. next mach\n # 19. next subcase\n\n\n # 20. skip if only_steady\n if not only_steady:\n # 21. do we need to generate PARAML?\n # 22. loop on machs/reduced frequency pairs\n # 23. determine mach and reduced frequency\n # 24. if the required aero data exists, skip to 35\n # 25. create aero matrices (AMG)\n AMG = 'AMG'\n\n # 26. apply weight factors (if any)\n SKJ1 = WTFACT @ SKJF\n\n # 27.\n DKJB = K1JK = ik * D2JK\n\n # 30.\n QKJ = AJJI @ SKJ.T\n\n # 31.\n QKK = QKJ @ DJKB\n\n # 32. if there are user input downwash matrices due to extra points\n QKE = QKJ @ (D1JE + ik * D2JE)\n\n # 33.\n goto = 35\n\n if goto == 34:\n # 34.\n QKK = SKJ1 @ AJJT @ DKJB\n\n # 36.\n #return\n\n #--------------------------------------------------------------\n # AESTATRS\n\n # 1.\n MSLR = MLL @ D + MLR\n MSRR = MRL @ D + D.T @ MSLR + MRR\n M1RR = D.T @ MRL + MRR\n M1RR = D.T @ MLL + MRL\n\n # 2.\n RFSOP = TR.T @ TRX\n\n # 7.\n QAA = GKA.T @ QKKS @ GKA\n QAX = GKA.T @ QKX\n KAAA = -qbar * QAA\n KAAX = -qbar * QAX\n KSAA1 = KAA + KAAA\n\n # 9.\n KsALLI = KSALL ** (-1)\n ALX = KSALLI @ KALX\n\n # KRZX = restrained elastic dimensional derivatives\n # Z1ZX = unrestrained elastic dimensional derivatives\n # RSTAB = rigid, splined dimensional derivatives\n # KSAZX = rigid, splined dimensional derivatives\n # ZZX = solution matrix for trim calculations\n # HP = perturbation in support point deformations relative to mean axis due to aerodyanmic extra points\n\n # 19.\n UK = GTKL.T @ UL\n\n # 20. total downwash is computed\n WJ = K1JK @ UK + DJX @ UX + WGJ\n\n # 21. compute pressures on aero elements\n FFAJ = qbar @ AJJI @ WJ + qbar * FAJE\n\n # 22. compute forces on aero elements\n PAK = qbar * WSKJ @ AJJ @ WJ + qbar @ SKJ @ FAJE\n\n #--------------------------------------------------------------\n # DIVERGRS\n\n # 1.\n GKL = GTKL.T\n\n # 8.\n QLL = GKL.T @ QKK @ GKL\n\n #--------------------------------------------------------------\n # FLUTTER\n\n\n # 19.\n GPKI = GKA @ PHAI\n\n # 24.\n QKI = QKK @ GPKI\n QII = GPKI @ QKI\n\n # 26.\n QIE = GPKI @ QKE\n\n # 27. form QHH\n\n # 28. form QKH\n\n #--------------------------------------------------------------\n # MRFREQRS\n\n # 3. form:\n GPKI = GKA @ PHIA\n\n # 7. form:\n QKI = QKK @ GPKI\n QKK = GPKI.T @ QKI\n\n # 9. form:\n QIE = QPKI.T @ QKE\n\n # 10. form:\n QKH = merge(QII, QIE)\n\n # 11. form:\n QKH = merge(QKI, QKE)\n\n # 12. append QHH & QKH onto QHHA and QKHA using SDR1\n\n\ndef thin_plate_spline(C, wS, mesh, aero_points, node_list, D=1.0):\n \"\"\"spline function\"\"\"\n piD16 = np.pi * D * 16.\n\n nnodes = len(node_list)\n npoints = len(aero_points.keys())\n Cws = np.linalg.inv(C) * wS # Cws matrix, P matrix\n\n wa = {}\n i = 0\n for iaero, aero_node in sorted(aero_points.items()):\n xK = np.zeros(nnodes+3, 'd')\n #nodeI = mesh.Node(iNode)\n\n xa, ya, za = aero_node\n\n xK[0] = 1.\n xK[1] = xa\n xK[2] = ya\n\n j = 3\n for jNode in node_list:\n sNode = mesh.Node(jNode)\n (xs, ys, zs) = sNode.get_position()\n\n Rij2 = (xa-xs)**2. + (ya-ys)**2 # Rij^2\n if Rij2 == 0.:\n xK[j] = 0.\n else:\n Kij = Rij2 * np.log(Rij2) / piD16 # natural log\n xK[j] = Kij\n j += 1\n\n wai = xK * Cws\n wa[iaero] = wai[0, 0]\n #print(\"w[%s]=%s\" % (iAero, wi[0, 0]))\n i += 1\n\n\n #P = solve(C, wS)\n #C*P = wS\n #P = C^-1*wS\n return Cws, wa\n\n#run()\n", "from typing import List\nimport numpy as np\n\nfrom pyNastran.utils.numpy_utils import integer_types\nfrom pyNastran.op2.result_objects.op2_objects import get_complex_times_dtype\nfrom pyNastran.op2.tables.oes_stressStrain.real.oes_objects import (\n StressObject, StrainObject, OES_Object)\nfrom pyNastran.f06.f06_formatting import write_imag_floats_13e\n\n\nclass ComplexShearArray(OES_Object):\n \"\"\"\n Common class for:\n - ComplexShearStressArray\n - ComplexShearStrainArray\n \"\"\"\n def __init__(self, data_code, is_sort1, isubcase, dt):\n OES_Object.__init__(self, data_code, isubcase, apply_data_code=False) ## why???\n #self.element_node = None\n #self.code = [self.format_code, self.sort_code, self.s_code]\n\n #self.ntimes = 0 # or frequency/mode\n #self.ntotal = 0\n #self.itime = 0\n self.nelements = 0 # result specific\n\n if is_sort1:\n pass\n else:\n raise NotImplementedError('SORT2')\n\n @property\n def is_real(self) -> bool:\n return False\n\n @property\n def is_complex(self) -> bool:\n return True\n\n @property\n def nnodes_per_element(self) -> int:\n return 1\n\n def _reset_indices(self) -> None:\n self.itotal = 0\n self.ielement = 0\n\n #def get_nnodes(self):\n #return get_nnodes(self)\n\n def build(self):\n \"\"\"sizes the vectorized attributes of the ComplexShearArray\"\"\"\n if not hasattr(self, 'subtitle'):\n self.subtitle = self.data_code['subtitle']\n #print('ntimes=%s nelements=%s ntotal=%s subtitle=%s' % (\n #self.ntimes, self.nelements, self.ntotal, self.subtitle))\n nnodes = 1\n\n #self.names = []\n #self.nelements //= nnodes\n self.nelements //= self.ntimes\n self.ntotal = self.nelements * nnodes# * 2\n #self.ntotal\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n #print('ntotal=%s ntimes=%s nelements=%s' % (self.ntotal, self.ntimes, self.nelements))\n\n #print(\"ntimes=%s nelements=%s ntotal=%s\" % (self.ntimes, self.nelements, self.ntotal))\n dtype, idtype, cfdtype = get_complex_times_dtype(self.nonlinear_factor, self.size)\n self._times = np.zeros(self.ntimes, dtype=dtype)\n #self.ntotal = self.nelements * nnodes\n\n self.element = np.zeros(self.nelements, dtype=idtype)\n\n # the number is messed up because of the offset for the element's properties\n if self.nelements != self.ntotal:\n msg = 'ntimes=%s nelements=%s nnodes=%s ne*nn=%s ntotal=%s' % (\n self.ntimes, self.nelements, nnodes, self.nelements * nnodes,\n self.ntotal)\n raise RuntimeError(msg)\n\n # [max_shear, avg_shear]\n self.data = np.zeros((self.ntimes, self.ntotal, 2), dtype=cfdtype)\n\n def build_dataframe(self):\n \"\"\"creates a pandas dataframe\"\"\"\n #Mode 1 2\n #EigenvalueReal -0.0 -0.0\n #EigenvalueImag -0.0 -0.0\n #Damping 0.0 0.0\n #ElementID Item\n #22 max_shear 5.855954e-09+0.000000e+00j 0.000000+0.000000j\n # avg_shear 5.855954e-09+0.000000e+00j 0.000000+0.000000j\n #import pandas as pd\n column_names, column_values = self._build_dataframe_transient_header()\n self.data_frame = self._build_pandas_transient_elements(\n column_values, column_names,\n self.headers, self.element, self.data)\n\n def __eq__(self, table): # pragma: no cover\n assert self.is_sort1 == table.is_sort1\n self._eq_header(table)\n if not np.array_equal(self.data, table.data):\n msg = 'table_name=%r class_name=%s\\n' % (self.table_name, self.__class__.__name__)\n msg += '%s\\n' % str(self.code_information())\n ntimes = self.data.shape[0]\n\n i = 0\n if self.is_sort1:\n for itime in range(ntimes):\n for ieid, eid in enumerate(self.element):\n t1 = self.data[itime, ieid, :]\n t2 = table.data[itime, ieid, :]\n (tx1, ty1, unused_tz1, unused_rx1, unused_ry1, unused_rz1) = t1\n (tx2, ty2, unused_tz2, unused_rx2, unused_ry2, unused_rz2) = t2\n d = t1 - t2\n if not np.allclose([tx1.real, tx1.imag, ty1.real, ty1.imag],\n [tx2.real, tx2.imag, ty2.real, ty2.imag], atol=0.0001):\n #if not np.array_equal(t1, t2):\n msg += '%-4s (%s, %sj, %s, %sj)\\n (%s, %sj, %s, %sj)\\n dt12=(%s, %sj, %s, %sj)\\n' % (\n eid,\n tx1.real, tx1.imag, ty1.real, ty1.imag,\n tx2.real, tx2.imag, ty2.real, ty2.imag,\n d[0].real, d[0].imag, d[1].real, d[1].imag,)\n i += 1\n if i > 10:\n print(msg)\n raise ValueError(msg)\n else:\n raise NotImplementedError(self.is_sort2)\n if i > 0:\n print(msg)\n raise ValueError(msg)\n return True\n\n def add_sort1(self, dt, eid, max_shear, avg_shear):\n \"\"\"unvectorized method for adding SORT1 transient data\"\"\"\n assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)\n self._times[self.itime] = dt\n self.data[self.itime, self.itotal] = [max_shear, avg_shear]\n self.element[self.itotal] = eid\n #self.ielement += 1\n self.itotal += 1\n\n def get_stats(self, short: bool=False) -> List[str]:\n if not self.is_built:\n return [\n '<%s>\\n' % self.__class__.__name__,\n f' ntimes: {self.ntimes:d}\\n',\n f' ntotal: {self.ntotal:d}\\n',\n ]\n\n nelements = self.nelements\n ntimes = self.ntimes\n nnodes = self.element.shape[0]\n #ntotal = self.ntotal\n msg = []\n if self.nonlinear_factor not in (None, np.nan): # transient\n msg.append(' type=%s ntimes=%i nelements=%i nnodes=%i; table_name=%r\\n' % (\n self.__class__.__name__, ntimes, nelements, nnodes, self.table_name))\n else:\n msg.append(' type=%s nelements=%i nnodes=%i; table_name=%r\\n' % (\n self.__class__.__name__, nelements, nnodes, self.table_name))\n msg.append(' data: [ntimes, nnodes, 2] where 2=[%s]\\n' % str(', '.join(self._get_headers())))\n msg.append(f' element.shape = {self.element.shape}\\n')\n msg.append(f' data.shape = {self.data.shape}\\n')\n msg.append(' %s\\n' % self.element_name)\n msg += self.get_data_code()\n return msg\n\n def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',\n page_num: int=1, is_mag_phase: bool=False, is_sort1: bool=True):\n \"\"\"\n C O M P L E X F O R C E S A C T I N G O N S H E A R P A N E L E L E M E N T S (CSHEAR)\n (REAL/IMAGINARY)\n\n ====== POINT 1 ====== ====== POINT 2 ====== ====== POINT 3 ====== ====== POINT 4 ======\n ELEMENT F-FROM-4 F-FROM-2 F-FROM-1 F-FROM-3 F-FROM-2 F-FROM-4 F-FROM-3 F-FROM-1\n ID KICK-1 SHEAR-12 KICK-2 SHEAR-23 KICK-3 SHEAR-34 KICK-4 SHEAR-41\n 28 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0\n 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0\n 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0\n 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0\n \"\"\"\n #if header is None:\n #header = []\n #f.write(self.code_information())\n #return page_num\n msg_temp = _get_cshear_msg(is_mag_phase, is_sort1)\n\n ntimes = self.data.shape[0]\n eids = self.element\n if self.is_sort1:\n if is_sort1:\n for itime in range(ntimes):\n dt = self._times[itime]\n\n dt_line = ' %14s = %12.5E\\n' % (self.data_code['name'], dt)\n header[1] = dt_line\n msg = header + msg_temp\n f06_file.write('\\n'.join(msg))\n\n max_shear = self.data[itime, :, 0]\n avg_shear = self.data[itime, :, 1]\n assert len(eids) == len(max_shear)\n assert len(max_shear) > 0, max_shear\n for eid, max_sheari, avg_sheari in zip(eids, max_shear, avg_shear):\n assert isinstance(eid, integer_types), 'eid=%s type=%s' % (eid, type(eid))\n [rmax_shear, imax_shear, ravg_shear, iavg_shear\n ,] = write_imag_floats_13e([max_sheari, avg_sheari], is_mag_phase)\n\n #f.write(' 28 0.0 / 0.0 0.0 / 0.0\\n')\n f06_file.write(\n '%24s %-13s / %-13s %-13s / %-13s\\n' % (\n eid, rmax_shear, imax_shear, ravg_shear, iavg_shear))\n f06_file.write(page_stamp % page_num)\n page_num += 1\n else:\n # TODO: write in SORT2\n times = self._times\n for ieid, eid in enumerate(eids):\n max_shear = self.data[:, ieid, 0].ravel()\n avg_shear = self.data[:, ieid, 1].ravel()\n for itime, max_sheari, avg_sheari in zip(times, max_shear, avg_shear):\n [rmax_shear, imax_shear, ravg_shear, iavg_shear\n ] = write_imag_floats_13e([max_sheari, avg_sheari], is_mag_phase)\n\n #f06_file.write(\n #' %6s %-13s / %-13s %-13s / %-13s\\n' % (\n #eid, rmax_shear, imax_shear, ravg_shear, iavg_shear))\n f06_file.write(\n '%24s %-13s / %-13s %-13s / %-13s\\n' % (\n eid, rmax_shear, imax_shear, ravg_shear, iavg_shear))\n f06_file.write(page_stamp % page_num)\n page_num += 1\n else:\n raise NotImplementedError('ComplexShearArray-sort2')\n return page_num - 1\n\n @property\n def headers(self) -> List[str]:\n return self._get_headers()\n\n def get_headers(self) -> List[str]:\n return self.headers\n\nclass ComplexShearStressArray(ComplexShearArray, StressObject):\n def __init__(self, data_code, is_sort1, isubcase, dt):\n ComplexShearArray.__init__(self, data_code, is_sort1, isubcase, dt)\n StressObject.__init__(self, data_code, isubcase)\n\n def _get_headers(self) -> List[str]:\n return ['max_shear', 'avg_shear']\n\nclass ComplexShearStrainArray(ComplexShearArray, StrainObject):\n def __init__(self, data_code, is_sort1, isubcase, dt):\n ComplexShearArray.__init__(self, data_code, is_sort1, isubcase, dt)\n StrainObject.__init__(self, data_code, isubcase)\n assert self.is_strain, self.stress_bits\n\n def _get_headers(self):\n return ['max_shear', 'avg_shear']\n\ndef _get_cshear_msg(is_mag_phase, is_sort1):\n if is_mag_phase:\n raise NotImplementedError()\n else:\n out = [\n ' C O M P L E X S T R E S S E S I N S H E A R P A N E L S ( C S H E A R )\\n'\n ' (REAL/IMAGINARY)\\n'\n ' \\n'\n ' ELEMENT MAXIMUM AVERAGE\\n'\n ' ID. SHEAR SHEAR\\n'\n #' 28 9.089907E+01 / 1.709346E+02 -9.089907E+01 / -1.709346E+02\\n'\n ]\n return out\n #out = [\n #' C O M P L E X F O R C E S A C T I N G O N S H E A R P A N E L E L E M E N T S (CSHEAR)\\n',\n #' (REAL/IMAGINARY)\\n',\n #' \\n',\n #' ====== POINT 1 ====== ====== POINT 2 ====== ====== POINT 3 ====== ====== POINT 4 ======\\n',\n #' ELEMENT F-FROM-4 F-FROM-2 F-FROM-1 F-FROM-3 F-FROM-2 F-FROM-4 F-FROM-3 F-FROM-1\\n',\n #' ID KICK-1 SHEAR-12 KICK-2 SHEAR-23 KICK-3 SHEAR-34 KICK-4 SHEAR-41\\n',\n #]\n #else:\n #out = [\n #' C O M P L E X F O R C E S A C T I N G O N S H E A R P A N E L E L E M E N T S (CSHEAR)\\n',\n #' (REAL/IMAGINARY)\\n',\n #' \\n',\n #' ====== POINT 1 ====== ====== POINT 2 ====== ====== POINT 3 ====== ====== POINT 4 ======\\n',\n #' ELEMENT F-FROM-4 F-FROM-2 F-FROM-1 F-FROM-3 F-FROM-2 F-FROM-4 F-FROM-3 F-FROM-1\\n',\n #' ID KICK-1 SHEAR-12 KICK-2 SHEAR-23 KICK-3 SHEAR-34 KICK-4 SHEAR-41\\n',\n #]\n #return out\n", "\"\"\"\ndefines:\n - model = shift(bdf_filename, dxyz, bdf_filename_out=None)\n\n\"\"\"\nimport numpy as np\n\nfrom pyNastran.bdf.mesh_utils.internal_utils import get_bdf_model\n\n\ndef shift(bdf_filename, dxyz, bdf_filename_out=None):\n \"\"\"shifts the model by some amount\"\"\"\n if isinstance(dxyz, list):\n dxyz = np.array(dxyz)\n assert isinstance(dxyz, np.ndarray), dxyz\n print(\"dxyz = %s\" % dxyz)\n\n model = get_bdf_model(bdf_filename, xref=True, log=None, debug=True)\n for unused_nid, node in model.nodes.items():\n xyz = node.get_position() + dxyz\n node.set_position(model, xyz, cid=0, xref=True)\n\n for unused_caero_id, caero in model.caeros.items():\n caero.shift(dxyz)\n\n if bdf_filename_out:\n model.write_bdf(bdf_filename_out)\n return model\n\ndef update_nodes(model, nid_cp_cd, xyz_cid0):\n \"\"\"how does this work for SPOINTs/EPOINTs???\"\"\"\n coord = model.coords[0]\n all_node_ids = np.array(list(model.nodes.keys()), dtype=nid_cp_cd.dtype)\n nids = nid_cp_cd[:, 0]\n inids = np.searchsorted(nids, all_node_ids)\n for inid, nid in zip(inids, all_node_ids):\n node = model.nodes[nid]\n xyz = xyz_cid0[inid, :]\n node.xyz = xyz\n node.cp = 0\n node.cp_ref = coord\n" ]
[ [ "numpy.log", "numpy.ones", "numpy.linalg.inv", "numpy.zeros" ], [ "numpy.allclose", "numpy.array_equal", "numpy.zeros" ], [ "numpy.searchsorted", "numpy.array" ] ]
ishine/VITS-1
[ "6b76bab881c801322ee3a8d8815ec06dd1c80980" ]
[ "module/dataset_util.py" ]
[ "#encoding:utf-8\n\nimport random\nimport numpy as np\nimport matplotlib as mpl\nmpl.use('Agg')# AGG(Anti-Grain Geometry engine)\nimport matplotlib.pyplot as plt\nimport os\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.utils.data as data\nimport torchvision\nfrom torchvision import models,transforms\nimport torchvision.utils as vutils\nimport torch.nn.init as init\nfrom torch.autograd import Function\nimport torch.nn.functional as F\n\nimport torchaudio\n\n#wavファイル、話者id、テキスト(音素列)の3つを読み込むためのDatasetクラス\nclass AudioSpeakerTextLoader(torch.utils.data.Dataset):\n\t\"\"\"\n\t\t1) 前処理によって作成されたtxtファイルに書かれたwavファイル、話者id、テキスト(音素列)の3つを読み込む\n\t\t2) テキストを正規化し整数へと変換\n\t\t3) wavファイルからスペクトログラムを計算\n\t\"\"\"\n\tdef __init__(self, dataset_txtfile_path, phoneme_list):\n\t\t#dataset_txtfile_path : 前処理によって作成されたtxtファイルへのパス\n\t\t#phoneme_list : 学習に用いる音素のlist\n\t\tself.sampling_rate = 22050\n\t\tself.filter_length = 1024\n\t\tself.hop_length = 256\n\t\tself.win_length = 1024\n\t\tself.phoneme_list = phoneme_list\n\t\t#音素とindexを対応付け 対応を前計算しておくことでバッチ作成時の処理を高速化する\n\t\tself.phoneme2index = {p : i for i, p in enumerate(self.phoneme_list, 0)}\n\n\t\t###前処理によって作成されたtxtファイルの読み込み###\n\t\t#一行につき\n\t\t#wavファイルへのパス|話者id|音素列\n\t\t#というフォーマットで記述されている\n\t\twith open(dataset_txtfile_path, \"r\") as f:\n\t\t\tself.wavfilepath_speakerid_text = [line.split(\"|\") for line in f.readlines()]\n\t\t#各行をランダムにシャッフル\n\t\trandom.seed(1234)\n\t\trandom.shuffle(self.wavfilepath_speakerid_text)\n\n\tdef get_audio_text_speaker_pair(self, audiopath_sid_text):\n\t\t# separate filename, speaker_id and text\n\t\taudiopath, sid, text = audiopath_sid_text[0], audiopath_sid_text[1], audiopath_sid_text[2]\n\t\twav, spec = self.get_audio(audiopath)\n\t\ttext = self.get_text(text)\n\t\tsid = self.get_sid(sid)\n\t\treturn (wav, spec, text, sid)\n\n\tdef get_audio(self, wavfile_path):\n\t\t#wavファイルの読み込み\n\t\twav, _ = torchaudio.load(wavfile_path)\n\t\t#wavからspectrogramを計算\n\t\t#計算結果はファイルに保存しておき、2回目以降はそれを読み込むだけにする\n\t\tspec_filename = wavfile_path.replace(\".wav\", \".spec.pt\")\n\t\tif os.path.exists(spec_filename):\n\t\t\tspec = torch.load(spec_filename)\n\t\telse:\n\t\t\tpad_size = int((self.filter_length-self.hop_length)/2)\n\t\t\twav_padded = torch.nn.functional.pad(wav, (pad_size, pad_size), mode='reflect')\n\t\t\tspec = torchaudio.functional.spectrogram(\n\t\t\t\t\t\t\t\t\twaveform=wav_padded,\n\t\t\t\t\t\t\t\t\tpad=0,#torchaudio.functional.spectrogram内で使われているtorch.nn.functional.padはmode='constant'となっているが、今回はmode='reflect'としたいため手動でpaddingする\n\t\t\t\t\t\t\t\t\twindow=torch.hann_window(self.win_length),\n\t\t\t\t\t\t\t\t\tn_fft=self.filter_length,\n\t\t\t\t\t\t\t\t\thop_length=self.hop_length,\n\t\t\t\t\t\t\t\t\twin_length=self.win_length,\n\t\t\t\t\t\t\t\t\tpower=2,\n\t\t\t\t\t\t\t\t\tnormalized=False,\n\t\t\t\t\t\t\t\t\tcenter=False\n\t\t\t\t\t\t\t\t)\n\t\t\tspec = torch.squeeze(spec, 0)\n\t\t\ttorch.save(spec, spec_filename)\n\t\treturn wav, spec\n\n\tdef get_sid(self, sid):\n\t\tsid = torch.LongTensor([int(sid)])\n\t\treturn sid\n\t\n\tdef get_text(self, text):\n\t\t#Converts a string of text to a sequence of IDs corresponding to the symbols in the text.\n\t\ttext_splitted = text.replace(\"\\n\", \"\").split(\",\")\n\t\ttext_converted_into_index = [self.phoneme2index[p] for p in text_splitted]#音素を数値に変換\n\t\t#各音素の間に0を挿入する\n\t\ttext_norm = [0] * (len(text_converted_into_index) * 2 + 1)\n\t\ttext_norm[1::2] = text_converted_into_index\n\t\t#tensorへと変換\n\t\ttext_norm = torch.LongTensor(text_norm)\n\t\treturn text_norm\n\n\tdef __getitem__(self, index):\n\t\tline = self.wavfilepath_speakerid_text[index]\n\t\twavfilepath, speakerid, text = line[0], line[1], line[2]\n\t\twav, spec = self.get_audio(wavfilepath)\n\t\tspeaker_id = self.get_sid(speakerid)\n\t\ttext = self.get_text(text)\n\t\treturn (wav, spec, speaker_id, text)\n\n\tdef __len__(self):\n\t\treturn len(self.wavfilepath_speakerid_text)\n\n#AudioSpeakerTextLoaderの__getitem__により取得されたデータをバッチへと固める関数\ndef collate_fn(batch):\n\t# batch = [\n\t# \t(wav, spec, speaker_id, text),\n\t# \t(wav, spec, speaker_id, text),\n\t# \t....\n\t# ]\n\tmax_wav_len = max([x[0].size(1) for x in batch])#wavの最大の長さを算出\n\tmax_spec_len = max([x[1].size(1) for x in batch])#spectrogramの最大の長さを算出\n\tmax_text_len = max([x[3].size(0) for x in batch])#textの最大の長さを算出\n\n\tbatch_size = len(batch)\n\n\twav_lengths = torch.LongTensor(batch_size)#torch.size([batch_size])\n\tspec_lengths = torch.LongTensor(batch_size)\n\tspeaker_id = torch.LongTensor(batch_size)\n\ttext_lengths = torch.LongTensor(batch_size)\n\n\twav_padded = torch.zeros(batch_size, 1, max_wav_len, dtype=torch.float32)\n\tspec_padded = torch.zeros(batch_size, batch[0][1].size(0), max_spec_len, dtype=torch.float32)\n\ttext_padded = torch.zeros(batch_size, max_text_len, dtype=torch.long)\n\n\t#text_padded, spec_padded, wav_paddedは全ての要素が0で初期化されているが、\n\t#左詰めで元のtext, spec, wavで上書きすることによりzero-paddingされたtensorを取得できる\n\tfor i, (wav_row, spec_row, speaker_id_row, text_row) in enumerate(batch, 0):\n\t\twav_padded[i, :, :wav_row.size(1)] = wav_row\n\t\twav_lengths[i] = wav_row.size(1)\n\n\t\tspec_padded[i, :, :spec_row.size(1)] = spec_row\n\t\tspec_lengths[i] = spec_row.size(1)\n\n\t\tspeaker_id[i] = speaker_id_row\n\n\t\ttext_padded[i, :text_row.size(0)] = text_row\n\t\ttext_lengths[i] = text_row.size(0)\n\n\treturn wav_padded, wav_lengths, \\\n\t\t\tspec_padded, spec_lengths, \\\n\t\t\tspeaker_id, \\\n\t\t\ttext_padded, text_lengths\n\n#batch内の各tensorについて、start_indices[i]で指定されたindexから長さsegment_sizeの箇所を取り出す関数\n#学習時、スペクトログラムや音声波形について、時間軸に沿って指定した長さだけ切り取るのに用いる\ndef slice_segments(input_tensor, start_indices, segment_size):\n\toutput_tensor = torch.zeros_like(input_tensor[:, ..., :segment_size])\n\tbatch_size = input_tensor.size(0)\n\tfor batch_index in range(batch_size):\n\t\tindex_start = start_indices[batch_index]\n\t\tindex_end = index_start + segment_size\n\t\toutput_tensor[batch_index] = input_tensor[batch_index, ..., index_start:index_end]\n\treturn output_tensor\n" ]
[ [ "matplotlib.use", "torch.zeros", "torch.hann_window", "torch.save", "torch.squeeze", "torch.LongTensor", "torch.load", "torch.zeros_like", "torch.nn.functional.pad" ] ]
hpparvi/triceratops
[ "7521ee5c5a493871b95e60ae0092fbb5d74db2da" ]
[ "triceratops/priors.py" ]
[ "import numpy as np\nfrom astropy import constants\nfrom .funcs import (file_to_contrast_curve,\n separation_at_contrast,\n trilegal_results)\n\nMsun = constants.M_sun.cgs.value\nRsun = constants.R_sun.cgs.value\nRearth = constants.R_earth.cgs.value\nG = constants.G.cgs.value\nau = constants.au.cgs.value\npi = np.pi\n\n\ndef sample_rp(x, M_s):\n \"\"\"\n Samples planet radii that are dependent on host mass.\n Args:\n x (numpy array): Random numbers between 0 and 1.\n M_s (numpy array): Host star masses [Solar radii].\n Returns:\n x (numpy array): Sampled planet radii [Earth radii].\n\n \"\"\"\n R_break1 = 3.0\n R_break2 = 6.0\n R_min = 0.5\n R_max = 20.0\n # power coefficients for M > 0.45\n p1 = 0.0\n p2 = -4.0\n p3 = -0.5\n # power coefficients for M <= 0.45\n p4 = 0.0\n p5 = -7.0\n p6 = -0.5\n # normalizing constants for M > 0.45\n A1 = R_break1**p1 / R_break1**p2\n A2 = R_break2**p2 / R_break2**p3\n I1 = (R_break1**(p1+1) - R_min**(p1+1))/(p1+1)\n I2 = A1*(R_break2**(p2+1) - R_break1**(p2+1))/(p2+1)\n I3 = A2*A1*(R_max**(p3+1) - R_break2**(p3+1))/(p3+1)\n Norm1 = 1/(I1+I2+I3)\n # normalizing constants for M <= 0.45\n A3 = R_break1**p4 / R_break1**p5\n A4 = R_break2**p5 / R_break2**p6\n I4 = (R_break1**(p4+1) - R_min**(p4+1))/(p4+1)\n I5 = A3*(R_break2**(p5+1) - R_break1**(p5+1))/(p5+1)\n I6 = A4*A3*(R_max**(p6+1) - R_break2**(p6+1))/(p6+1)\n Norm2 = 1/(I4+I5+I6)\n\n mask1 = (\n (x <= Norm1*I1)\n & (M_s > 0.45)\n )\n mask2 = (\n (x > Norm1*I1)\n & (x <= Norm1*(I1+I2))\n & (M_s > 0.45)\n )\n mask3 = (\n (x > Norm1*(I1+I2))\n & (x <= Norm1*(I1+I2+I3))\n & (M_s > 0.45)\n )\n mask4 = (\n (x <= Norm2*I4)\n & (M_s <= 0.45)\n )\n mask5 = (\n (x > Norm2*I4)\n & (x <= Norm2*(I4+I5))\n & (M_s <= 0.45)\n )\n mask6 = (\n (x > Norm2*(I4+I5))\n & (x <= Norm2*(I4+I5+I6))\n & (M_s <= 0.45)\n )\n x[mask1] = (\n (x[mask1]/Norm1*(p1+1) + R_min**(p1+1))**(1/(p1+1))\n )\n x[mask2] = (\n (\n (x[mask2]/Norm1 - I1)*(p2+1)/A1\n + R_break1**(p2+1)\n )**(1/(p2+1))\n )\n x[mask3] = (\n (\n (x[mask3]/Norm1 - I1 - I2)*(p3+1)/(A1*A2)\n + R_break2**(p3+1)\n )**(1/(p3+1))\n )\n x[mask4] = (\n (x[mask4]/Norm2*(p4+1) + R_min**(p4+1))**(1/(p4+1))\n )\n x[mask5] = (\n (\n (x[mask5]/Norm2 - I4)*(p5+1)/A3\n + R_break1**(p5+1)\n )**(1/(p5+1))\n )\n x[mask6] = (\n (\n (x[mask6]/Norm2 - I4 - I5)*(p6+1)/(A3*A4)\n + R_break2**(p6+1)\n )**(1/(p6+1))\n )\n return x\n\n\ndef sample_inc(x, lower=0, upper=90):\n \"\"\"\n Samples inclinations.\n Args:\n x (numpy array): Random numbers between 0 and 1.\n lower (float): Lower bound of inclinations [deg].\n Upper (float): Upper bound of inclinations [deg].\n Returns:\n x (numpy array): Sampled inclinations [deg].\n\n \"\"\"\n # normalizing constant\n Norm = 1/(np.cos(lower*np.pi/180) - np.cos(upper*np.pi/180))\n x = np.arccos(np.cos(lower*np.pi/180) - x/Norm) * 180/np.pi\n return x\n\n\ndef sample_q(x):\n \"\"\"\n Samples mass ratios of short-period binaries.\n Args:\n x (numpy array): Random numbers between 0 and 1.\n Returns:\n x (numpy array): Sampled mass ratios [deg].\n \"\"\"\n # power coefficients\n p1 = 0.3\n p2 = -0.5\n # normalizing constants\n # continuity between first two segments\n A1 = (0.3**p1)/(0.3**p2)\n # satisfy F_twin condition\n F_twin = 0.30\n A2 = (\n 1 + (F_twin)/(1-F_twin)\n * ((1.0**(p2+1) - 0.3**(p2+1))/(p2+1))\n / ((1.0**(p2+1) - 0.95**(p2+1))/(p2+1))\n )\n I1 = (0.3**(p1+1) - 0.1**(p1+1))/(p1+1)\n I2 = A1*(0.95**(p2+1) - 0.3**(p2+1))/(p2+1)\n I3 = A2*A1*(1.0**(p2+1) - 0.95**(p2+1))/(p2+1)\n Norm = 1/(I1+I2+I3)\n\n mask1 = x <= Norm*I1\n mask2 = (x > Norm*I1) & (x <= Norm*(I1+I2))\n mask3 = (x > Norm*(I1+I2)) & (x <= Norm*(I1+I2+I3))\n x[mask1] = (\n (x[mask1]/Norm*(p1+1) + 0.1**(p1+1))**(1/(p1+1))\n )\n x[mask2] = (\n ((x[mask2]/Norm - I1)*(p2+1)/A1 + 0.3**(p2+1))**(1/(p2+1))\n )\n x[mask3] = (\n (\n (x[mask3]/Norm - I1 - I2)*(p2+1)/(A1*A2)\n + 0.95**(p2+1))**(1/(p2+1))\n )\n return x\n\n\ndef sample_q_companion(x):\n \"\"\"\n Samples mass ratios of long-period companions.\n Args:\n x (numpy array): Random numbers between 0 and 1.\n Returns:\n x (numpy array): Sampled mass ratios [deg].\n \"\"\"\n # power coefficients\n p1 = 0.3\n p2 = -0.95\n # normalizing constants\n # continuity between first two segments\n A1 = (0.3**p1) / (0.3**p2)\n # satisfy F_twin condition\n F_twin = 0.05\n A2 = (\n 1 + (F_twin)/(1-F_twin)\n * ((1.0**(p2+1) - 0.3**(p2+1))/(p2+1))\n / ((1.0**(p2+1) - 0.95**(p2+1))/(p2+1))\n )\n I1 = (0.3**(p1+1) - 0.1**(p1+1))/(p1+1)\n I2 = A1*(0.95**(p2+1) - 0.3**(p2+1))/(p2+1)\n I3 = A2*A1*(1.0**(p2+1) - 0.95**(p2+1))/(p2+1)\n Norm = 1/(I1+I2+I3)\n\n mask1 = x <= Norm*I1\n mask2 = (x > Norm*I1) & (x <= Norm*(I1+I2))\n mask3 = (x > Norm*(I1+I2)) & (x <= Norm*(I1+I2+I3))\n x[mask1] = (\n (x[mask1]/Norm*(p1+1) + 0.1**(p1+1))**(1/(p1+1))\n )\n x[mask2] = (\n ((x[mask2]/Norm - I1)*(p2+1)/A1 + 0.3**(p2+1))**(1/(p2+1))\n )\n x[mask3] = (\n (\n (x[mask3]/Norm - I1 - I2)*(p2+1)/(A1*A2)\n + 0.95**(p2+1))**(1/(p2+1))\n )\n return x\n\n\ndef lnprior_Mstar_planet(M_s: np.array):\n \"\"\"\n Estimates planet occurrence rate for all planet radii and orbital\n periods under 50 days and turns it into a prior probability.\n Args:\n M_s (float): Star mass [Solar masses].\n Returns:\n lnprior_planet (float): The log probability of there being a\n short period planet\n around a star of mass M_s.\n \"\"\"\n f_p = np.zeros(len(M_s))\n mask = ((2.5 - 1.5*M_s) > 0.1)\n f_p[mask] = 2.5 - 1.5*M_s[mask]\n mask = ((2.5 - 1.5*M_s) <= 0.1)\n f_p[mask] = 0.1\n f_p[f_p > 1.0] = 1.0\n lnprior_Mstar = np.log(f_p)\n # return lnprior_Mstar as 0 (omitted due to bias)\n return 0.0\n\n\ndef lnprior_Mstar_binary(M_s: np.array):\n \"\"\"\n Calculates the companion rate of the host star for periods\n under 50 days and turns it into a prior probability.\n Args:\n M_s (numpy array): Host star masses [solar masses].\n Returns:\n lnprior_EB (float): The log probability of there being\n a short period binary\n around a star of mass M_s.\n \"\"\"\n f_comp = np.zeros(len(M_s))\n f1 = np.zeros(len(M_s))\n f2 = np.zeros(len(M_s))\n f3 = np.zeros(len(M_s))\n t1 = np.zeros(len(M_s))\n t2_partial = np.zeros(len(M_s))\n alpha = 0.018\n dlogP = 0.7\n max_Porb = 50\n\n mask = (M_s >= 1.0)\n f1[mask] = (\n 0.020 + 0.04*np.log10(M_s[mask])\n + 0.07*(np.log10(M_s[mask]))**2\n )\n f2[mask] = (\n 0.039 + 0.07*np.log10(M_s[mask])\n + 0.01*(np.log10(M_s[mask]))**2\n )\n f3[mask] = (\n 0.078 - 0.05*np.log10(M_s[mask])\n + 0.04*(np.log10(M_s[mask]))**2\n )\n t1[mask] = f1[mask]\n t2_partial[mask] = (\n 0.5*(np.log10(max_Porb) - 1.0)\n * (\n 2.0*f1[mask]+(f2[mask]-f1[mask]-alpha*dlogP)\n * (np.log10(max_Porb) - 1.0)\n )\n )\n f_comp[mask] = t1[mask] + t2_partial[mask]\n\n mask = (M_s < 1.0)\n f1[mask] = (\n 0.020 + 0.04*np.log10(1.0)\n + 0.07*(np.log10(1.0))**2\n )\n f2[mask] = (\n 0.039 + 0.07*np.log10(1.0)\n + 0.01*(np.log10(1.0))**2\n )\n f3[mask] = (\n 0.078 - 0.05*np.log10(1.0)\n + 0.04*(np.log10(1.0))**2\n )\n t1[mask] = f1[mask]\n t2_partial[mask] = (\n 0.5*(np.log10(max_Porb) - 1.0)\n * (\n 2.0*f1[mask]+(f2[mask]-f1[mask]-alpha*dlogP)\n * (np.log10(max_Porb) - 1.0)\n )\n )\n f_comp[mask] = t1[mask] + t2_partial[mask]\n f_comp[mask] = 0.65*f_comp[mask]+0.35*f_comp[mask]*M_s[mask]\n\n f_comp[f_comp > 1.0] = 1.0\n lnprior_Mstar = np.log(f_comp)\n # return lnprior_Mstar (omitted due to bias)\n return 0.0\n\n\ndef lnprior_Porb_planet(P_orb: float):\n \"\"\"\n Calculates probability of a planet with a given\n orbital period < 50 days.\n Args:\n P_orb (float): Orbital period [days].\n Returns:\n lnprior_Porb (float): Log probability of planet having an\n orbital period P_orb +/- 0.1 days.\n \"\"\"\n P_break = 10\n P_min = 0.1\n P_max = 50\n p1 = 1.5\n p2 = 0.0\n A = P_break**p1 / P_break**p2\n I1 = (P_break**(p1+1) - P_min**(p1+1))/(p1+1)\n I2 = A*(P_max**(p2+1) - P_break**(p2+1))/(p2+1)\n Norm = 1/(I1+I2)\n\n if P_orb < P_min+0.1:\n P_orb = P_min+0.1\n elif P_orb > P_max-0.1:\n P_orb = P_max-0.1\n\n if P_orb <= P_break-0.1:\n I1 = ((P_orb+0.1)**(p1+1) - (P_orb-0.1)**(p1+1))/(p1+1)\n prob = Norm * I1\n elif P_orb >= P_break+0.1:\n I1 = A*((P_orb+0.1)**(p2+1) - (P_orb-0.1)**(p2+1))/(p2+1)\n prob = Norm * I1\n else:\n I1 = (P_break**(p1+1) - (P_orb-0.1)**(p1+1))/(p1+1)\n I2 = A*((P_orb+0.1)**(p2+1) - P_break**(p2+1))/(p2+1)\n prob = Norm * (I1+I2)\n\n lnprior_Porb = np.log(prob)\n return lnprior_Porb\n\n\ndef lnprior_Porb_binary(P_orb: float):\n \"\"\"\n Calculates probability of a binary with a given\n orbital period < 50 days.\n Args:\n P_orb (float): Orbital period [days].\n Returns:\n lnprior_Porb (float): Log probability of binary having an\n orbital period P_orb +/- 0.1 days.\n \"\"\"\n P_break = 0.3\n P_min = 0.1\n P_max = 50\n p1 = 5.0\n p2 = 0.5\n\n A = P_break**p1 / P_break**p2\n I1 = (P_break**(p1+1) - P_min**(p1+1))/(p1+1)\n I2 = A*(P_max**(p2+1) - P_break**(p2+1))/(p2+1)\n Norm = 1/(I1+I2)\n\n if P_orb < P_min+0.1:\n P_orb = P_min+0.1\n elif P_orb > P_max-0.1:\n P_orb = P_max-0.1\n\n if P_orb <= P_break-0.1:\n I1 = ((P_orb+0.1)**(p1+1) - (P_orb-0.1)**(p1+1))/(p1+1)\n prob = Norm * I1\n elif P_orb >= P_break+0.1:\n I1 = A*((P_orb+0.1)**(p2+1) - (P_orb-0.1)**(p2+1))/(p2+1)\n prob = Norm * I1\n else:\n I1 = (P_break**(p1+1) - (P_orb-0.1)**(p1+1))/(p1+1)\n I2 = A*((P_orb+0.1)**(p2+1) - P_break**(p2+1))/(p2+1)\n prob = Norm * (I1+I2)\n\n lnprior_Porb = np.log(prob)\n return lnprior_Porb\n\n\ndef lnprior_bound(M_s: float, plx: float, delta_mags: np.array,\n separations: np.array, contrasts: np.array):\n \"\"\"\n Calculates the bound companion rate of the target star.\n Args:\n M_s (float): Target star mass [solar masses].\n plx (float): Parallax of the target star [mas].\n delta_mags (numpy array): Contrasts of simulated\n companions (delta_mag).\n separations (numpy array): Separation at contrast (arcsec).\n contrasts (numpy array): Contrast at separation (delta_mag).\n Returns:\n lnprior_bound (float): The log probability of there being\n a bound companion.\n \"\"\"\n # determine maximum physical separations based on angular\n # separation constraints and parallax\n if np.isnan(plx):\n plx = 0.1\n d = 1000/plx\n seps = d*separation_at_contrast(delta_mags, separations, contrasts)\n\n # calculate prior probability for M_s >= 1.0 solar masses\n if M_s >= 1.0:\n f1 = 0.020 + 0.04*np.log10(M_s) + 0.07*(np.log10(M_s))**2\n f2 = 0.039 + 0.07*np.log10(M_s) + 0.01*(np.log10(M_s))**2\n f3 = 0.078 - 0.05*np.log10(M_s) + 0.04*(np.log10(M_s))**2\n alpha = 0.018\n dlogP = 0.7\n max_Porbs = ((4*pi**2)/(G*M_s*Msun)*(seps*au)**3)**(1/2)/86400\n\n t1 = f1\n t2_partial = (\n 0.5*(np.log10(max_Porbs) - 1.0)\n * (\n 2.0*f1 + (f2 - f1 - alpha*dlogP)\n * (np.log10(max_Porbs) - 1.0)\n )\n )\n t2 = (\n 0.5*(2.0 - 1.0)\n * (\n 2.0*f1 + (f2 - f1 - alpha*dlogP)\n * (2.0 - 1.0)\n )\n )\n t3_partial = (\n 0.5*alpha\n * (\n np.log10(max_Porbs)**2-5.4\n * np.log10(max_Porbs)+6.8\n )\n + f2*(np.log10(max_Porbs) - 2.0)\n )\n t3 = 0.5*alpha*(3.4**2 - 5.4*3.4 + 6.8) + f2*(3.4 - 2.0)\n t4_partial = (\n alpha*dlogP*(np.log10(max_Porbs) - 3.4)\n + f2*(np.log10(max_Porbs) - 3.4)\n + (f3 - f2 - alpha*dlogP)\n * (\n 0.238095*np.log10(max_Porbs)**2\n - 0.952381*np.log10(max_Porbs)\n + 0.485714\n )\n )\n t4 = (\n alpha*dlogP*(5.5 - 3.4) + f2*(5.5 - 3.4)\n + (f3 - f2 - alpha*dlogP)\n * (0.238095*5.5**2 - 0.952381*5.5 + 0.485714)\n )\n t5_partial = (\n f3*(3.33333 - 17.3566*np.exp(-0.3*np.log10(max_Porbs)))\n )\n t5 = f3*(3.33333 - 17.3566*np.exp(-0.3*8.0))\n\n f_comp = np.zeros(len(seps))\n mask = (np.log10(max_Porbs) < 1.0)\n f_comp[mask] = t1\n mask = (\n (np.log10(max_Porbs) >= 1.0)\n & (np.log10(max_Porbs) < 2.0)\n )\n f_comp[mask] = t1 + t2_partial[mask]\n mask = (\n (np.log10(max_Porbs) >= 2.0)\n & (np.log10(max_Porbs) < 3.4)\n )\n f_comp[mask] = t1 + t2 + t3_partial[mask]\n mask = (\n (np.log10(max_Porbs) >= 3.4)\n & (np.log10(max_Porbs) < 5.5)\n )\n f_comp[mask] = t1 + t2 + t3 + t4_partial[mask]\n mask = (\n (np.log10(max_Porbs) >= 5.5)\n & (np.log10(max_Porbs) < 8.0)\n )\n f_comp[mask] = t1 + t2 + t3 + t4 + t5_partial[mask]\n mask = (np.log10(max_Porbs) >= 8.0)\n f_comp[mask] = t1 + t2 + t3 + t4 + t5\n lnprior_bound = np.log(f_comp)\n\n # calculate prior probability for M_s < 1.0 solar masses\n else:\n M_act = M_s\n M_s = 1.0\n f1 = 0.020 + 0.04*np.log10(M_s) + 0.07*(np.log10(M_s))**2\n f2 = 0.039 + 0.07*np.log10(M_s) + 0.01*(np.log10(M_s))**2\n f3 = 0.078 - 0.05*np.log10(M_s) + 0.04*(np.log10(M_s))**2\n alpha = 0.018\n dlogP = 0.7\n max_Porbs = ((4*pi**2)/(G*M_s*Msun)*(seps*au)**3)**(1/2)/86400\n\n t1 = f1\n t2_partial = (\n 0.5*(np.log10(max_Porbs) - 1.0)\n * (\n 2.0*f1 + (f2 - f1 - alpha*dlogP)\n * (np.log10(max_Porbs) - 1.0)\n )\n )\n t2 = (\n 0.5*(2.0 - 1.0)\n * (\n 2.0*f1 + (f2 - f1 - alpha*dlogP)\n * (2.0 - 1.0)\n )\n )\n t3_partial = (\n 0.5*alpha\n * (\n np.log10(max_Porbs)**2\n - 5.4*np.log10(max_Porbs) + 6.8\n )\n + f2*(np.log10(max_Porbs) - 2.0)\n )\n t3 = 0.5*alpha*(3.4**2 - 5.4*3.4 + 6.8) + f2*(3.4 - 2.0)\n t4_partial = (\n alpha*dlogP*(np.log10(max_Porbs) - 3.4)\n + f2*(np.log10(max_Porbs) - 3.4)\n + (f3 - f2 - alpha*dlogP)\n * (\n 0.238095*np.log10(max_Porbs)**2\n - 0.952381*np.log10(max_Porbs) + 0.485714\n )\n )\n t4 = (\n alpha*dlogP*(5.5 - 3.4)\n + f2*(5.5 - 3.4)\n + (f3 - f2 - alpha*dlogP)\n * (0.238095*5.5**2 - 0.952381*5.5 + 0.485714)\n )\n t5_partial = (\n f3*(3.33333 - 17.3566*np.exp(-0.3*np.log10(max_Porbs)))\n )\n t5 = f3*(3.33333 - 17.3566*np.exp(-0.3*8.0))\n\n f_comp = np.zeros(len(seps))\n mask = (np.log10(max_Porbs) < 1.0)\n f_comp[mask] = t1\n mask = (\n (np.log10(max_Porbs) >= 1.0)\n & (np.log10(max_Porbs) < 2.0)\n )\n f_comp[mask] = t1 + t2_partial[mask]\n mask = (\n (np.log10(max_Porbs) >= 2.0)\n & (np.log10(max_Porbs) < 3.4)\n )\n f_comp[mask] = t1 + t2 + t3_partial[mask]\n mask = (\n (np.log10(max_Porbs) >= 3.4)\n & (np.log10(max_Porbs) < 5.5)\n )\n f_comp[mask] = t1 + t2 + t3 + t4_partial[mask]\n mask = (\n (np.log10(max_Porbs) >= 5.5)\n & (np.log10(max_Porbs) < 8.0)\n )\n f_comp[mask] = t1 + t2 + t3 + t4 + t5_partial[mask]\n mask = (np.log10(max_Porbs) >= 8.0)\n f_comp[mask] = t1 + t2 + t3 + t4 + t5\n f_act = 0.65*f_comp+0.35*f_comp*M_act\n lnprior_bound = np.log(f_act)\n\n return lnprior_bound\n\n\ndef lnprior_background(N_comp: int, delta_mags: np.array,\n separations: np.array,\n contrasts: np.array):\n \"\"\"\n Calculates the limiting separation (in arcsecs)\n at a given delta_mag.\n Args:\n N_comp (int): Number of stars obtained from\n trilegal simulation.\n delta_mags (numpy array): Contrasts of simulated\n companions (delta_mag).\n separations (numpy array): Separation at contrast (arcsec).\n contrasts (numpy array): Contrast at separation (delta_mag).\n Returns:\n lnprior_bg (float): The log probability of there being\n a background star.\n \"\"\"\n seps = separation_at_contrast(delta_mags, separations, contrasts)\n lnprior_bg = np.log10((N_comp/0.1) * (1/3600)**2 * seps**2)\n return lnprior_bg\n" ]
[ [ "numpy.isnan", "numpy.log", "numpy.exp", "numpy.cos", "numpy.log10" ] ]
hidden-beauty/hiddenbeauty-tools
[ "ba93138f94079b89d7f9d89328509e201837bd07" ]
[ "hiddenbeauty/utils.py" ]
[ "import pymesh\nimport math\nimport os\nimport numpy as np\n\ndef get_fast_bbox(mesh):\n\n bbox = [[100000,100000,100000], [0,0,0]]\n for vertex in mesh.vertices:\n\n if vertex[0] > bbox[1][0]:\n bbox[1][0] = vertex[0]\n if vertex[0] < bbox[0][0]:\n bbox[0][0] = vertex[0]\n\n if vertex[1] > bbox[1][1]:\n bbox[1][1] = vertex[1]\n if vertex[1] < bbox[0][1]:\n bbox[0][1] = vertex[1]\n\n if vertex[2] > bbox[1][2]:\n bbox[1][2] = vertex[2]\n if vertex[2] < bbox[0][2]:\n bbox[0][2] = vertex[2]\n\n return bbox\n\ndef get_fast_bbox_2d(points):\n\n bbox = [[100000,100000], [0,0]]\n for vertex in points:\n\n if vertex[0] > bbox[1][0]:\n bbox[1][0] = vertex[0]\n if vertex[0] < bbox[0][0]:\n bbox[0][0] = vertex[0]\n\n if vertex[1] > bbox[1][1]:\n bbox[1][1] = vertex[1]\n if vertex[1] < bbox[0][1]:\n bbox[0][1] = vertex[1]\n\n return bbox\n\n\ndef center_around_origin(mesh):\n\n bbox = get_fast_bbox(mesh)\n width_x = bbox[1][0] - bbox[0][0]\n width_y = bbox[1][1] - bbox[0][1]\n width_z = bbox[1][2] - bbox[0][2]\n trans_x = -((width_x / 2.0) + bbox[0][0])\n trans_y = -((width_y / 2.0) + bbox[0][1])\n trans_z = -((width_z / 2.0) + bbox[0][2])\n\n return translate(mesh, (trans_y, trans_x, trans_z))\n\n\ndef rotate(mesh, offset, rotation_axis, rotation_angle):\n \"\"\"\n mesh is the mesh to be rotated\n offset is a three axis vector\n rotation_matrix is a 3d rotation matrix\n angle is the rotation angle in degress\n\n returns rotated mesh\n \"\"\"\n offset = np.array(offset);\n axis = np.array((rotation_axis[1], rotation_axis[0], rotation_axis[2]));\n angle = math.radians(rotation_angle);\n rot = pymesh.Quaternion.fromAxisAngle(axis, angle);\n rot = rot.to_matrix();\n\n vertices = mesh.vertices;\n bbox = mesh.bbox;\n centroid = 0.5 * (bbox[0] + bbox[1]);\n vertices = np.dot(rot, (vertices - centroid).T).T + centroid + offset;\n\n return pymesh.form_mesh(vertices, mesh.faces, mesh.voxels)\n\n\ndef clear_color(file):\n mesh = pymesh.meshio.load_mesh(file);\n new_mesh = pymesh.form_mesh(mesh.vertices, mesh.faces)\n pymesh.meshio.save_mesh(file, new_mesh);\n\n\ndef scale(mesh, scale_factor):\n \"\"\"\n mesh is the mesh to be rotated\n scale_factor is how much to scale the model by.\n\n returns rotated mesh\n \"\"\"\n\n vertices = []\n for vertex in mesh.vertices:\n vertices.append((vertex[0] * scale_factor[0], vertex[1] * scale_factor[1], vertex[2] * scale_factor[2]))\n\n return pymesh.form_mesh(vertices, mesh.faces, mesh.voxels)\n\n\ndef translate(mesh, translation_vector):\n \"\"\"\n mesh is the mesh to be rotated\n translation_vector the vectory by which the mesh should be translated by\n\n returns rotated mesh\n \"\"\"\n\n vertices = []\n for vertex in mesh.vertices:\n vertices.append((vertex[0] + translation_vector[1], vertex[1] + translation_vector[0], vertex[2] + translation_vector[2]))\n\n return pymesh.form_mesh(vertices, mesh.faces, mesh.voxels)\n\n\ndef flip_mesh(mesh):\n new_faces = []\n for face in mesh.faces:\n new_face = list(face)\n new_face.reverse()\n new_faces.append(new_face)\n\n return pymesh.form_mesh(mesh.vertices, np.array(new_faces))\n\n\ndef make_3d(mesh, offset):\n vertices = [ (vertex[0], vertex[1], offset) for vertex in mesh.vertices]\n return pymesh.form_mesh(vertices, mesh.faces, mesh.voxels)\n\n\nfile_index = 0\ndef save_mesh(filename, mesh):\n global file_index\n\n filename = os.path.join(\"debug\", \"%02d-%s.stl\" % (file_index, filename))\n file_index += 1\n pymesh.meshio.save_mesh(filename, mesh)\n print(\"wrote %s\" % filename)\n\n\ndef mesh_from_xy_points(faces_xy, extrude_mm = 0.0):\n index = {}\n inverse = {}\n count = 0\n for face in faces_xy:\n for point in face:\n if tuple(point) not in index:\n index[tuple(point)] = count\n inverse[count] = point\n count += 1\n\n vertices = []\n for i in index.values():\n vertices.append(inverse[i])\n\n faces = []\n for face in faces_xy:\n new_face = []\n for point in face:\n new_face.append(index[tuple(point)])\n faces.append(new_face)\n\n if len(faces_xy[0][0]) == 2:\n return make_3d(pymesh.form_mesh(np.array(vertices), np.array(faces)), extrude_mm)\n else:\n return pymesh.form_mesh(np.array(vertices), np.array(faces))\n" ]
[ [ "numpy.array", "numpy.dot" ] ]
PeterWolf93/PupilLabs_VR_Calibration
[ "5904804d5eab83805cc1ded04b9de31239b5771a" ]
[ "Calib_Tools/pix2deg.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Aug 6 10:58:17 2018\r\n\r\n@author: P. Wolf\r\n\r\ntitle: pix2deg\r\n\"\"\"\r\n#%% Imports\r\nimport numpy as np\r\n\r\n#%% main function\r\ndef pix2deg(x_px,y_px,cfg,mode):\r\n if mode == '2D':\r\n x_rad = np.arctan((x_px * cfg.screen_width)/(cfg.screen_xres * cfg.screen_dist))\r\n y_rad = np.arctan((y_px * cfg.screen_height)/(cfg.screen_yres * cfg.screen_dist))\r\n \r\n x_deg = x_rad / np.pi * 180\r\n y_deg = y_rad / np.pi * 180\r\n elif mode == '3D':\r\n x_rad = np.arctan(x_px / cfg.screen_dist)\r\n y_rad = np.arctan(y_px / cfg.screen_dist)\r\n \r\n x_deg = x_rad / np.pi * 180\r\n y_deg = y_rad / np.pi * 180\r\n return x_deg, y_deg\r\n" ]
[ [ "numpy.arctan" ] ]
wiheto/dfcbenchmarker
[ "d2381d7173a40234d217301e91a0d883bf555d4c" ]
[ "dfcbenchmarker/plot.py" ]
[ "import matplotlib.pyplot as plt\nimport dfcbenchmarker\nimport numpy as np\nimport scipy.stats as sps\nfrom matplotlib.ticker import LinearLocator\nimport seaborn as sns\nimport os\nplt.style.use('seaborn-whitegrid')\n\n\n\ndef plot_timeseries(x,plot_autocorr='no',fig_dir=None,fig_prefix=None,cm='Set2',limitaxis=100,mi='alpha'):\n\n\n if isinstance(mi,str):\n mi = [mi]\n\n if fig_prefix:\n fig_prefix += '_'\n else:\n fig_prefix = ''\n\n if not fig_dir:\n fig_dir = './'\n\n if not os.path.exists(fig_dir):\n os.makedirs(fig_dir,exist_ok=True)\n\n params = {}\n for m in mi:\n params[m] = np.unique(x.index.get_level_values(m))\n mi,mi_num,mi_parameters,mi_param_list = dfcbenchmarker.multiindex_preproc(params,mi)\n\n colormap=dfcbenchmarker.get_discrete_colormap(cm)\n\n for sim_it, mi_params in enumerate(mi_parameters):\n\n param_sname = [p[0] + '-' + str(p[1]) for p in list(zip(mi,mi_params))]\n param_sname = '_'.join(param_sname)\n if param_sname:\n param_sname = '_' + param_sname.replace(' ','')\n\n param_title = [p[0] + '=' + str(p[1]) for p in list(zip(mi,mi_params))]\n param_title = ','.join(param_title)\n param_title = param_title.replace(' ','').replace(',',', ')\n\n if mi_params == ():\n mi_params = np.arange(0,len(x))\n\n if plot_autocorr == 'no':\n\n fig,ax=plt.subplots(1)\n ax.plot(np.arange(1,limitaxis+1),x['timeseries_1'][mi_params][:limitaxis],color=colormap(0),alpha=0.9,linewidth=2)\n ax.plot(np.arange(1,limitaxis+1),x['timeseries_2'][mi_params][:limitaxis],color=colormap(1),alpha=0.9,linewidth=2)\n ax.set_xlim(1,limitaxis)\n ax.set_ylabel('Signal Amplitude')\n ax.set_xlabel('Time')\n\n else:\n\n autocorrelation = np.array([dfcbenchmarker.autocorr(x[ts][mi_params]) for ts in ['timeseries_1','timeseries_2']])\n\n fig=plt.figure()\n ax=[]\n ax.append(plt.subplot2grid((2,3),(0,0),colspan=3))\n for n in range(0,3):\n ax.append(plt.subplot2grid((2,3),(1,n)))\n\n # Plot 1: raw time series\n ax[0].plot(np.arange(1,limitaxis+1),x['timeseries_1'][mi_params][:limitaxis],color=colormap(0),alpha=0.9,linewidth=2)\n ax[0].plot(np.arange(1,limitaxis+1),x['timeseries_2'][mi_params][:limitaxis],color=colormap(1),alpha=.9,linewidth=2)\n ax[0].set_xlim(1,limitaxis)\n ax[0].set_ylabel('Signal Amplitude')\n ax[0].set_xlabel('Time')\n\n # Plot 2 and 3: autocorrelation of timeseries 1 and 2\n for p in range(1,3):\n ax[p].plot(np.arange(0,autocorrelation.shape[1]),autocorrelation[p-1,:],color=colormap(p-1),alpha=0.9,linewidth=2)\n ax[p].set_ylabel('Correlation (r)')\n ax[p].set_xlabel('Lag')\n ax[p].axis([0,autocorrelation.shape[1]-1,0,1])\n ax[p].set_yticks(np.arange(0,1.05,0.25))\n ax[p].set_xticks(np.arange(0,autocorrelation.shape[1],2))\n\n # Plot 4: correlation of timeseries 1 and 2\n cmap = sns.cubehelix_palette(start=1/3, light=1, as_cmap=True)\n ax[3] = sns.kdeplot(x['timeseries_1'][mi_params], x['timeseries_2'][mi_params], shade=True,cmap=cmap)\n ax[3].set_xlabel('Signal 1 amplitude')\n ax[3].set_ylabel('Signal 2 amplitude')\n\n [dfcbenchmarker.square_axis(ax[n]) for n in [1,2,3]]\n\n plt.suptitle(param_title,fontsize=11)\n plt.tight_layout(rect=[0, 0, 1, 0.95])\n plt.savefig(fig_dir + '/' + fig_prefix + 'raw-timeseries' + param_sname + '.pdf',r=600)\n\n plt.close('all')\n\ndef plot_method_correlation(dfc, cmap='RdBu_r', fig_dir=None, fig_prefix=None, mi=[]):\n\n\n if isinstance(mi,str):\n mi = [mi]\n\n if fig_prefix:\n fig_prefix += '_'\n else:\n fig_prefix = ''\n\n if not fig_dir:\n fig_dir = './'\n\n if not os.path.exists(fig_dir):\n os.makedirs(fig_dir,exist_ok=True)\n\n params = {}\n for m in mi:\n params[m] = np.unique(dfc.index.get_level_values(m))\n mi,mi_num,mi_parameters,mi_param_list = dfcbenchmarker.multiindex_preproc(params,mi)\n\n for sim_it, mi_params in enumerate(mi_parameters):\n\n param_sname = [p[0] + '-' + str(p[1]) for p in list(zip(mi,mi_params))]\n param_sname = '_'.join(param_sname)\n if param_sname:\n param_sname = '_' + param_sname.replace(' ','')\n\n param_title = [p[0] + '=' + str(p[1]) for p in list(zip(mi,mi_params))]\n param_title = ','.join(param_title)\n param_title = param_title.replace(' ','').replace(',',', ')\n\n if mi_params == ():\n mi_params = np.arange(0,len(dfc))\n\n R=np.zeros([len(dfc.columns),len(dfc.columns)])\n for i,m1 in enumerate(sorted(dfc.columns)):\n for j,m2 in enumerate(sorted(dfc.columns)):\n notnan = np.intersect1d(np.where(np.isnan(dfc[m1][mi_params])==0),np.where(np.isnan(dfc[m2][mi_params])==0))\n R[i,j]= sps.spearmanr(dfc[m1][mi_params][notnan],dfc[m2][mi_params][notnan])[0]\n\n\n fig,ax=plt.subplots(1)\n\n pax=ax.pcolormesh(R,vmin=-1,vmax=1,cmap=cmap)\n dfcbenchmarker.square_axis(ax)\n\n ax.set_xticks(np.arange(0.5,len(dfc.columns)-0.49,1))\n ax.set_xticklabels(sorted(dfc.columns))\n ax.set_yticks(np.arange(0.5,len(dfc.columns)-0.49,1))\n ax.set_yticklabels(sorted(dfc.columns))\n ax.axis([0,len(dfc.columns),len(dfc.columns),0])\n\n plt.suptitle(param_title,fontsize=11)\n plt.tight_layout(rect=[0, 0, 1, 0.95])\n fig.colorbar(pax)\n plt.savefig(fig_dir + '/' + fig_prefix + 'dfc-method-correlation' + param_sname + '.pdf',r=600)\n\n plt.close('all')\n\n\n\ndef plot_dfc_timeseries(dfc, limitaxis=500, cm='Set2', fig_dir = None, fig_prefix=None,mi=[]):\n\n if isinstance(mi,str):\n mi = [mi]\n\n if fig_prefix:\n fig_prefix += '_'\n else:\n fig_prefix = ''\n\n if not fig_dir:\n fig_dir = './'\n\n if not os.path.exists(fig_dir):\n os.makedirs(fig_dir,exist_ok=True)\n\n params = {}\n for m in mi:\n params[m] = np.unique(dfc.index.get_level_values(m))\n mi,mi_num,mi_parameters,mi_param_list = dfcbenchmarker.multiindex_preproc(params,mi)\n\n colormap=dfcbenchmarker.get_discrete_colormap(cm)\n\n for sim_it, mi_params in enumerate(mi_parameters):\n\n param_sname = [p[0] + '-' + str(p[1]) for p in list(zip(mi,mi_params))]\n param_sname = '_'.join(param_sname)\n if param_sname:\n param_sname = '_' + param_sname.replace(' ','')\n\n param_title = [p[0] + '=' + str(p[1]) for p in list(zip(mi,mi_params))]\n param_title = ','.join(param_title)\n param_title = param_title.replace(' ','').replace(',',', ')\n\n if mi_params == ():\n mi_params = np.arange(0,len(dfc))\n\n fig,ax=plt.subplots(len(dfc.columns), 1, sharex=True)\n\n for i,dfc_method in enumerate(sorted(dfc.columns)):\n\n ax[i].plot(dfc[dfc_method][mi_params][:limitaxis],color=colormap(i),alpha=0.5,linewidth=2)\n ax[i].set_ylabel('DFC ('+ dfc_method + ')')\n ax[i].get_yaxis().set_major_locator(LinearLocator(numticks=5))\n ax[i].set_xlim(1,limitaxis)\n\n ax[-1].set_xlabel('time')\n\n plt.suptitle(param_title,fontsize=11)\n plt.tight_layout(rect=[0, 0, 1, 0.95])\n\n plt.savefig(fig_dir + '/' + fig_prefix + 'dfc-timeseries' + param_sname + '.pdf',r=600)\n\n plt.close('all')\n\n\n\ndef plot_betadfc_distribution(dfc, dat_dir, fig_dir = None, model_prefix=None, burn=1000, mi='alpha', cm='Set2'):\n\n if isinstance(mi,str):\n mi = [mi]\n\n if model_prefix:\n model_prefix += '_'\n\n if not fig_dir:\n fig_dir = './'\n\n if not os.path.exists(fig_dir):\n os.makedirs(fig_dir,exist_ok=True)\n\n params = {}\n for m in mi:\n params[m] = np.unique(dfc.index.get_level_values(m))\n mi,mi_num,mi_parameters,mi_param_list = dfcbenchmarker.multiindex_preproc(params,mi)\n\n colormap=dfcbenchmarker.get_discrete_colormap(cm)\n\n for sim_it, mi_params in enumerate(mi_parameters):\n\n param_sname = [p[0] + '-' + str(p[1]) for p in list(zip(mi,mi_params))]\n param_sname = '_'.join(param_sname)\n if param_sname:\n param_sname = '_' + param_sname.replace(' ','')\n\n param_title = [p[0] + '=' + str(p[1]) for p in list(zip(mi,mi_params))]\n param_title = ','.join(param_title)\n param_title = param_title.replace(' ','').replace(',',', ')\n\n if mi_params == ():\n mi_params = np.arange(0,len(dfc))\n\n fig,ax=plt.subplots(len(dfc.columns),sharex=True,sharey=True,figsize=(5,len(dfc.columns)))\n\n beta_col = []\n lines = []\n for i,method in enumerate(sorted(dfc.columns)):\n beta_dfc=dfcbenchmarker.load_bayes_model(dat_dir,model_prefix + 'method-' + method + param_sname)[0][burn:].get_values('beta')\n #Plot\n ltmp = ax[i].hist(beta_dfc,np.arange(-1,1,0.001),histtype='stepfilled',color=colormap(i),normed=True,alpha=0.4, linewidth=2,label=method)\n lines.append(ltmp)\n ax[i].set_yticklabels([])\n ax[i].set_ylabel(method)\n beta_col.append(beta_dfc)\n #ax[i].set_ylabel('Posterior Frequency (' + method + ')')\n\n beta_col = np.vstack(beta_col)\n\n xmin = beta_col.min()\n xmax = beta_col.max()\n ax[0].get_yaxis().set_major_locator(LinearLocator(numticks=4))\n ax[0].set_xlim([np.around(xmin-0.005,2),np.around(xmax+0.005,2)])\n\n ax[-1].set_xlabel('Posterior (' + r'$β$' + ')')\n\n fig.suptitle(param_title,fontsize=11)\n fig.tight_layout(rect=[0, 0, 1, 0.95])\n\n plt.savefig(fig_dir + '/' + model_prefix + 'beta-posterior' + param_sname + '.pdf',r=600)\n\n plt.close('all')\n\n\n\ndef plot_fluctuating_covariance(x, fig_dir = None, lags=10,limitaxis=500,cm = 'Set2',mi='alpha', fig_prefix=None):\n\n# if labels == None:\n# labels=np.unique(x.index.get_level_values(mi))\n\n if isinstance(mi,str):\n mi = [mi]\n\n if not fig_dir:\n fig_dir = './'\n\n if fig_prefix:\n fig_prefix += '_'\n else:\n fig_prefix = ''\n\n if not os.path.exists(fig_dir):\n os.makedirs(fig_dir,exist_ok=True)\n\n params = {}\n for m in mi:\n params[m] = np.unique(x.index.get_level_values(m))\n mi,mi_num,mi_parameters,mi_param_list = dfcbenchmarker.multiindex_preproc(params,mi)\n\n colormap=dfcbenchmarker.get_discrete_colormap(cm)\n\n for sim_it, mi_params in enumerate(mi_parameters):\n\n param_sname = [p[0] + '-' + str(p[1]) for p in list(zip(mi,mi_params))]\n param_sname = '_'.join(param_sname)\n if param_sname:\n param_sname = '_' + param_sname.replace(' ','')\n\n param_title = [p[0] + '=' + str(p[1]) for p in list(zip(mi,mi_params))]\n param_title = ','.join(param_title)\n param_title = param_title.replace(' ','').replace(',',', ')\n\n if mi_params == ():\n mi_params = np.arange(0,len(x))\n\n covariance_autocorrelation = dfcbenchmarker.autocorr(x['covariance_parameter'][mi_params],lags=lags)\n\n # Create grid\n fig = plt.figure()\n ax = []\n ax.append(plt.subplot2grid((2,2),(0,0),colspan=2))\n ax.append(plt.subplot2grid((2,2),(1,0)))\n ax.append(plt.subplot2grid((2,2),(1,1)))\n\n ax[0].plot(np.arange(1,limitaxis+1),x['covariance_parameter'][mi_params][:limitaxis],color=colormap(0),alpha=0.5,linewidth=2)\n ax[0].set_xlabel('Time')\n ax[0].set_ylabel(r'Covariance ($r_t$)')\n\n ymin = x['covariance_parameter'][mi_params][:limitaxis].min()\n ymax = x['covariance_parameter'][mi_params][:limitaxis].max()\n ax[0].axis([1,limitaxis+1,np.around(ymin-0.05,1),np.around(ymax+0.05,1)])\n\n\n ax[1].hist(x['covariance_parameter'][mi_params],np.arange(-.1,1,0.02),color=colormap(1),alpha=0.9,linewidth=0,histtype='stepfilled',normed='true')\n ax[1].set_xlabel('Covariance')\n ax[1].set_ylabel('Frequency')\n xmin = x['covariance_parameter'][mi_params].min()\n xmax = x['covariance_parameter'][mi_params].max()\n ax[1].axis([np.around(xmin-0.05,1),np.around(xmax+0.05,1),0,np.ceil(ax[1].get_ylim()[-1])])\n\n dfcbenchmarker.square_axis(ax[1])\n\n ax[2].plot(np.arange(0,11),covariance_autocorrelation,color=colormap(2),alpha=0.9,linewidth=2)\n ax[2].set_ylabel('Correlation (r)')\n ax[2].set_xlabel('Lag')\n ymin = covariance_autocorrelation.min()\n ymax = 1\n ax[2].axis([0,10,np.around(ymin-0.05,1),np.around(ymax+0.05,1)])\n\n dfcbenchmarker.square_axis(ax[2])\n plt.suptitle(param_title,fontsize=11)\n plt.tight_layout(rect=[0, 0, 1, 0.95])\n\n plt.savefig(fig_dir + '/' + fig_prefix + 'fluctuating-covariance' + param_sname + '.pdf',r=600)\n\n plt.close('all')\n" ]
[ [ "numpy.isnan", "matplotlib.pyplot.savefig", "matplotlib.pyplot.suptitle", "scipy.stats.spearmanr", "matplotlib.pyplot.close", "matplotlib.pyplot.subplots", "matplotlib.pyplot.figure", "matplotlib.ticker.LinearLocator", "numpy.arange", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.style.use", "matplotlib.pyplot.subplot2grid", "numpy.around", "numpy.vstack" ] ]
jinjiren/chainer-DRCN
[ "5494f4c9f37bb6ac634d659988af1c48bfd4e8dc" ]
[ "utils.py" ]
[ "\"\"\"\nContains all helpers for DRCN\n\"\"\"\nimport datetime\nimport json\nimport os\n\nimport numpy as np\nimport scipy.ndimage as ndi\n\n\ndef prepare_dir(args):\n # customize the output path\n date = datetime.datetime.now()\n date_str = date.strftime(\"%m%d%H%M%S\")\n if args.source_only:\n out_path = os.path.join(args.out, 'source_only', date_str)\n else:\n out_path = os.path.join(args.out, args.noise, date_str)\n try:\n os.makedirs(out_path)\n except OSError:\n pass\n # save all options for the experiment\n args_path = os.path.join(out_path, 'args.json')\n with open(args_path, 'w') as f:\n json.dump(vars(args), f)\n return out_path\n\n\ndef augmentation(data):\n img, label = data\n img = random_rotation(img, 20)\n img = random_shift(img, 0.2, 0.2)\n return img, label\n\n\ndef apply_transform(x,\n transform_matrix,\n channel_axis=0,\n fill_mode='nearest',\n cval=0.):\n \"\"\"Apply the image transformation specified by a matrix.\n # Arguments\n x: 2D numpy array, single image.\n transform_matrix: Numpy array specifying the geometric transformation.\n channel_axis: Index of axis for channels in the input tensor.\n fill_mode: Points outside the boundaries of the input\n are filled according to the given mode\n (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).\n cval: Value used for points outside the boundaries\n of the input if `mode='constant'`.\n # Returns\n The transformed version of the input.\n \"\"\"\n x = np.rollaxis(x, channel_axis, 0)\n final_affine_matrix = transform_matrix[:2, :2]\n final_offset = transform_matrix[:2, 2]\n channel_images = [ndi.interpolation.affine_transform(\n x_channel,\n final_affine_matrix,\n final_offset,\n order=1,\n mode=fill_mode,\n cval=cval) for x_channel in x]\n x = np.stack(channel_images, axis=0)\n x = np.rollaxis(x, 0, channel_axis + 1)\n return x\n\n\ndef random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,\n fill_mode='nearest', cval=0.):\n \"\"\"Performs a random spatial shift of a Numpy image tensor.\n # Arguments\n x: Input tensor. Must be 3D.\n wrg: Width shift range, as a float fraction of the width.\n hrg: Height shift range, as a float fraction of the height.\n row_axis: Index of axis for rows in the input tensor.\n col_axis: Index of axis for columns in the input tensor.\n channel_axis: Index of axis for channels in the input tensor.\n fill_mode: Points outside the boundaries of the input\n are filled according to the given mode\n (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).\n cval: Value used for points outside the boundaries\n of the input if `mode='constant'`.\n # Returns\n Shifted Numpy image tensor.\n \"\"\"\n h, w = x.shape[row_axis], x.shape[col_axis]\n tx = np.random.uniform(-hrg, hrg) * h\n ty = np.random.uniform(-wrg, wrg) * w\n translation_matrix = np.array([[1, 0, tx],\n [0, 1, ty],\n [0, 0, 1]])\n\n transform_matrix = translation_matrix # no need to do offset\n x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)\n return x\n\n\ndef random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,\n fill_mode='nearest', cval=0.):\n \"\"\"Performs a random rotation of a Numpy image tensor.\n # Arguments\n x: Input tensor. Must be 3D.\n rg: Rotation range, in degrees.\n row_axis: Index of axis for rows in the input tensor.\n col_axis: Index of axis for columns in the input tensor.\n channel_axis: Index of axis for channels in the input tensor.\n fill_mode: Points outside the boundaries of the input\n are filled according to the given mode\n (one of `{'constant', 'nearest', 'reflect', 'wrap'}`).\n cval: Value used for points outside the boundaries\n of the input if `mode='constant'`.\n # Returns\n Rotated Numpy image tensor.\n \"\"\"\n theta = np.deg2rad(np.random.uniform(-rg, rg))\n rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],\n [np.sin(theta), np.cos(theta), 0],\n [0, 0, 1]])\n\n h, w = x.shape[row_axis], x.shape[col_axis]\n transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)\n x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)\n return x\n\n\ndef transform_matrix_offset_center(matrix, x, y):\n o_x = float(x) / 2 + 0.5\n o_y = float(y) / 2 + 0.5\n offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])\n reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])\n transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)\n return transform_matrix\n" ]
[ [ "numpy.array", "numpy.dot", "scipy.ndimage.interpolation.affine_transform", "numpy.sin", "numpy.rollaxis", "numpy.stack", "numpy.random.uniform", "numpy.cos" ] ]
kxxt/taichi
[ "15f39b79c258080f1e34fcbdc29646d9ced0a4fe" ]
[ "python/taichi/examples/features/io/export_mesh.py" ]
[ "import os\n\nimport numpy as np\n\nimport taichi as ti\n\n# A 2D grid with quad faces\n# y\n# |\n# z---/\n# x\n# 19---15---11---07---03\n# | | | | |\n# 18---14---10---06---02\n# | | | | |\n# 17---13---19---05---01\n# | | | | |\n# 16---12---08---04---00\n\nwriter = ti.PLYWriter(num_vertices=20, num_faces=12, face_type=\"quad\")\n\n# For the vertices, the only required channel is the position,\n# which can be added by passing 3 np.array x, y, z into the following function.\n\nx = np.zeros(20)\ny = np.array(list(np.arange(0, 4)) * 5)\nz = np.repeat(np.arange(5), 4)\nwriter.add_vertex_pos(x, y, z)\n\n# For faces (if any), the only required channel is the list of vertex indices that each face contains.\nindices = np.array([0, 1, 5, 4] * 12) + np.repeat(\n np.array(list(np.arange(0, 3)) * 4) + 4 * np.repeat(np.arange(4), 3), 4)\nwriter.add_faces(indices)\n\n# Add custome vertex channel, the input should include a key, a supported datatype and, the data np.array\nvdata = np.random.rand(20)\nwriter.add_vertex_channel(\"vdata1\", \"double\", vdata)\n\n# Add custome face channel\nfoo_data = np.zeros(12)\nwriter.add_face_channel(\"foo_key\", \"foo_data_type\", foo_data)\n# error! because \"foo_data_type\" is not a supported datatype. Supported ones are\n# ['char', 'uchar', 'short', 'ushort', 'int', 'uint', 'float', 'double']\n\n# PLYwriter already defines several useful helper functions for common channels\n# Add vertex color, alpha, and rgba\n# using float/double r g b alpha to reprent color, the range should be 0 to 1\nr = np.random.rand(20)\ng = np.random.rand(20)\nb = np.random.rand(20)\nalpha = np.random.rand(20)\nwriter.add_vertex_color(r, g, b)\nwriter.add_vertex_alpha(alpha)\n# equivilantly\n# add_vertex_rgba(r, g, b, alpha)\n\n# vertex normal\nwriter.add_vertex_normal(np.ones(20), np.zeros(20), np.zeros(20))\n\n# vertex index, and piece (group id)\nwriter.add_vertex_id()\nwriter.add_vertex_piece(np.ones(20))\n\n# Add face index, and piece (group id)\n# Indexing the existing faces in the writer and add this channel to face channels\nwriter.add_face_id()\n# Set all the faces is in group 1\nwriter.add_face_piece(np.ones(12))\n\nseries_prefix = \"example.ply\"\nseries_prefix_ascii = \"example_ascii.ply\"\n# Export a single file\n# use ascii so you can read the content\nwriter.export_ascii(series_prefix_ascii)\n\n# alternatively, use binary for a bit better performance\nwriter.export(series_prefix)\n\n# Export a sequence of files, ie in 10 frames\nfor frame in range(10):\n # write each frame as i.e. \"example_000000.ply\" in your current running folder\n writer.export_frame_ascii(frame, series_prefix_ascii)\n # alternatively, use binary\n writer.export_frame(frame, series_prefix)\n\n # update location/color\n x = x + 0.1 * np.random.rand(20)\n y = y + 0.1 * np.random.rand(20)\n z = z + 0.1 * np.random.rand(20)\n r = np.random.rand(20)\n g = np.random.rand(20)\n b = np.random.rand(20)\n alpha = np.random.rand(20)\n # re-fill\n writer = ti.PLYWriter(num_vertices=20, num_faces=12, face_type=\"quad\")\n writer.add_vertex_pos(x, y, z)\n writer.add_faces(indices)\n writer.add_vertex_channel(\"vdata1\", \"double\", vdata)\n writer.add_vertex_color(r, g, b)\n writer.add_vertex_alpha(alpha)\n writer.add_vertex_normal(np.ones(20), np.zeros(20), np.zeros(20))\n writer.add_vertex_id()\n writer.add_vertex_piece(np.ones(20))\n writer.add_face_id()\n writer.add_face_piece(np.ones(12))\n" ]
[ [ "numpy.array", "numpy.random.rand", "numpy.zeros", "numpy.ones", "numpy.arange" ] ]
ptillet/Fixup
[ "bdf6cb4b77717ab2ac2fa02adc4160aba06c1b56" ]
[ "fairseq/fairseq/modules/multihead_attention_zero.py" ]
[ "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\nimport torch\nfrom torch import nn\nfrom torch.nn import Parameter\nimport torch.nn.functional as F\n\nfrom fairseq import utils\n\nimport math\n\nclass MultiheadAttentionZero(nn.Module):\n \"\"\"Multi-headed attention.\n\n See \"Attention Is All You Need\" for more details.\n \"\"\"\n def __init__(self, embed_dim, num_heads, dropout=0., bias=True, num_blocks=12):\n super().__init__()\n self.embed_dim = embed_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.head_dim = embed_dim // num_heads\n assert self.head_dim * num_heads == self.embed_dim, \"embed_dim must be divisible by num_heads\"\n self.scaling = self.head_dim**-0.5\n self._mask = None\n\n self.in_proj_weight = Parameter(torch.Tensor(3*embed_dim, embed_dim))\n if bias:\n self.in_proj_bias = Parameter(torch.Tensor(3*embed_dim))\n else:\n self.register_parameter('in_proj_bias', None)\n self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.num_blocks = num_blocks\n\n self.reset_parameters()\n\n def reset_parameters(self):\n nn.init.xavier_uniform_(self.in_proj_weight)\n nn.init.xavier_uniform_(self.out_proj.weight)\n self.in_proj_weight.data *= math.pow(self.num_blocks, -1./6.)\n self.out_proj.weight.data *= 0.\n if self.in_proj_bias is not None:\n nn.init.constant_(self.in_proj_bias, 0.)\n nn.init.constant_(self.out_proj.bias, 0.)\n\n def forward(self, query, key, value, mask_future_timesteps=False,\n key_padding_mask=None, incremental_state=None,\n need_weights=True, static_kv=False):\n \"\"\"Input shape: Time x Batch x Channel\n\n Self-attention can be implemented by passing in the same arguments for\n query, key and value. Future timesteps can be masked with the\n `mask_future_timesteps` argument. Padding elements can be excluded from\n the key by passing a binary ByteTensor (`key_padding_mask`) with shape:\n batch x src_len, where padding elements are indicated by 1s.\n \"\"\"\n\n qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr()\n kv_same = key.data_ptr() == value.data_ptr()\n\n tgt_len, bsz, embed_dim = query.size()\n assert embed_dim == self.embed_dim\n assert list(query.size()) == [tgt_len, bsz, embed_dim]\n assert key.size() == value.size()\n\n if incremental_state is not None:\n saved_state = self._get_input_buffer(incremental_state)\n if 'prev_key' in saved_state:\n # previous time steps are cached - no need to recompute\n # key and value if they are static\n if static_kv:\n assert kv_same and not qkv_same\n key = value = None\n else:\n saved_state = None\n\n if qkv_same:\n # self-attention\n q, k, v = self.in_proj_qkv(query)\n elif kv_same:\n # encoder-decoder attention\n q = self.in_proj_q(query)\n if key is None:\n assert value is None\n # this will allow us to concat it with previous value and get\n # just get the previous value\n k = v = q.new(0)\n else:\n k, v = self.in_proj_kv(key)\n else:\n q = self.in_proj_q(query)\n k = self.in_proj_k(key)\n v = self.in_proj_v(value)\n q *= self.scaling\n\n if saved_state is not None:\n if 'prev_key' in saved_state:\n k = torch.cat((saved_state['prev_key'], k), dim=0)\n if 'prev_value' in saved_state:\n v = torch.cat((saved_state['prev_value'], v), dim=0)\n saved_state['prev_key'] = k\n saved_state['prev_value'] = v\n self._set_input_buffer(incremental_state, saved_state)\n\n src_len = k.size(0)\n\n if key_padding_mask is not None:\n assert key_padding_mask.size(0) == bsz\n assert key_padding_mask.size(1) == src_len\n\n q = q.contiguous().view(tgt_len, bsz*self.num_heads, self.head_dim).transpose(0, 1)\n k = k.contiguous().view(src_len, bsz*self.num_heads, self.head_dim).transpose(0, 1)\n v = v.contiguous().view(src_len, bsz*self.num_heads, self.head_dim).transpose(0, 1)\n\n attn_weights = torch.bmm(q, k.transpose(1, 2))\n assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]\n\n # only apply masking at training time (when incremental state is None)\n if mask_future_timesteps and incremental_state is None:\n assert query.size() == key.size(), \\\n 'mask_future_timesteps only applies to self-attention'\n attn_weights += self.buffered_mask(attn_weights).unsqueeze(0)\n if key_padding_mask is not None:\n # don't attend to padding symbols\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n attn_weights = attn_weights.float().masked_fill(\n key_padding_mask.unsqueeze(1).unsqueeze(2),\n float('-inf'),\n ).type_as(attn_weights) # FP16 support: cast to float and back\n attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)\n attn_weights = F.softmax(attn_weights.float(), dim=-1).type_as(attn_weights)\n attn_weights = F.dropout(attn_weights, p=self.dropout, training=self.training)\n\n attn = torch.bmm(attn_weights, v)\n assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]\n attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)\n attn = self.out_proj(attn)\n\n if need_weights:\n # average attention weights over heads\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n attn_weights = attn_weights.sum(dim=1) / self.num_heads\n else:\n attn_weights = None\n\n return attn, attn_weights\n\n def in_proj_qkv(self, query):\n return self._in_proj(query).chunk(3, dim=-1)\n\n def in_proj_kv(self, key):\n return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1)\n\n def in_proj_q(self, query):\n return self._in_proj(query, end=self.embed_dim)\n\n def in_proj_k(self, key):\n return self._in_proj(key, start=self.embed_dim, end=2*self.embed_dim)\n\n def in_proj_v(self, value):\n return self._in_proj(value, start=2*self.embed_dim)\n\n def _in_proj(self, input, start=0, end=None):\n weight = self.in_proj_weight\n bias = self.in_proj_bias\n weight = weight[start:end, :]\n if bias is not None:\n bias = bias[start:end]\n return F.linear(input, weight, bias)\n\n def buffered_mask(self, tensor):\n dim = tensor.size(-1)\n if self._mask is None:\n self._mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)\n if self._mask.size(0) < dim:\n self._mask = torch.triu(utils.fill_with_neg_inf(self._mask.resize_(dim, dim)), 1)\n return self._mask[:dim, :dim]\n\n def reorder_incremental_state(self, incremental_state, new_order):\n \"\"\"Reorder buffered internal state (for incremental generation).\"\"\"\n input_buffer = self._get_input_buffer(incremental_state)\n if input_buffer is not None:\n for k in input_buffer.keys():\n input_buffer[k] = input_buffer[k].index_select(1, new_order)\n self._set_input_buffer(incremental_state, input_buffer)\n\n def _get_input_buffer(self, incremental_state):\n return utils.get_incremental_state(\n self,\n incremental_state,\n 'attn_state',\n ) or {}\n\n def _set_input_buffer(self, incremental_state, buffer):\n utils.set_incremental_state(\n self,\n incremental_state,\n 'attn_state',\n buffer,\n )\n" ]
[ [ "torch.nn.Linear", "torch.cat", "torch.nn.init.constant_", "torch.nn.functional.dropout", "torch.nn.init.xavier_uniform_", "torch.bmm", "torch.nn.functional.linear", "torch.Tensor" ] ]
AlbertSuarez/donework
[ "3cd11e153a20d7b4d2ba69e9536d332224c79579" ]
[ "src/gpt_2/src/interactive_conditional_samples.py" ]
[ "#!/usr/bin/env python3\nimport fire\nimport json\nimport os\nimport numpy as np\nimport tensorflow as tf\n\nfrom src import *\nimport src.gpt_2.src.model as model\nimport src.gpt_2.src.sample as sample\nimport src.gpt_2.src.encoder as encoder\n\n\ngeneratedText = \"\"\ninputText = \"\"\nrandomness = 85\n\n\ndef interact_model(model_name='117M', seed=None, nsamples=1, batch_size=1, length=None, temperature=0.85, top_k=100):\n global generatedText\n global inputText\n\n if batch_size is None:\n batch_size = 1\n assert nsamples % batch_size == 0\n\n enc = encoder.get_encoder(model_name)\n hparams = model.default_hparams()\n with open(os.path.join(MODEL_PATH, model_name, 'hparams.json')) as f:\n hparams.override_from_dict(json.load(f))\n\n if length is None:\n length = hparams.n_ctx // 2\n elif length > hparams.n_ctx:\n raise ValueError(\"Can't get samples longer than window size: %s\" % hparams.n_ctx)\n\n with tf.Session(graph=tf.Graph()) as sess:\n context = tf.placeholder(tf.int32, [batch_size, None])\n np.random.seed(seed)\n tf.set_random_seed(seed)\n global randomness\n output = sample.sample_sequence(\n hparams=hparams, \n length=length,\n context=context,\n batch_size=batch_size,\n temperature=randomness/100, top_k=100-randomness+1\n )\n\n saver = tf.train.Saver()\n ckpt = tf.train.latest_checkpoint(os.path.join(MODEL_PATH, model_name))\n saver.restore(sess, ckpt)\n\n raw_text = inputText\n context_tokens = enc.encode(raw_text)\n generated = 0\n for _ in range(nsamples // batch_size):\n out = sess.run(output, feed_dict={\n context: [context_tokens for _ in range(batch_size)]\n })[:, len(context_tokens):]\n for i in range(batch_size):\n generated += 1\n text = enc.decode(out[i])\n text = text.split(\"<|endoftext|>\", 1)[0]\n generatedText += text\n\n\nif __name__ == '__main__':\n fire.Fire(interact_model)\n\n\ndef generate_sample(input_text, rand):\n global generatedText\n global inputText\n global randomness\n randomness = rand\n inputText = input_text\n generatedText = \"\"\n fire.Fire(interact_model)\n return generatedText\n" ]
[ [ "tensorflow.set_random_seed", "numpy.random.seed", "tensorflow.Graph", "tensorflow.train.Saver", "tensorflow.placeholder" ] ]
microsoft/AdversarialGMM
[ "7a5cd51353c8a81e16c01220b71f77e4e1102add", "7a5cd51353c8a81e16c01220b71f77e4e1102add" ]
[ "mliv/dgps.py", "montecarlo/mcpy/plotting.py" ]
[ "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n\nimport numpy as np\n\n# continuously differentiable\nfn_dict_cdiff = {'2dpoly': 1, 'sigmoid': 2,\n 'sin': 3, 'frequent_sin': 4,\n '3dpoly': 7, 'linear': 8}\n# continuous but not differentiable\nfn_dict_cont = {'abs': 0, 'abs_sqrt': 5, 'rand_pw': 9,\n 'abspos': 10, 'sqrpos': 11, 'pwlinear': 15}\n\n# discontinuous\nfn_dict_disc = {'step': 6, 'band': 12, 'invband': 13,\n 'steplinear': 14}\n\n# monotone\nfn_dict_monotone = {'sigmoid': 2,\n 'step': 6, 'linear': 8,\n 'abspos': 10, 'sqrpos': 11, 'pwlinear': 15}\n\n# convex\nfn_dict_convex = {'abs': 0, '2dpoly': 1, 'linear': 8,\n 'abspos': 10, 'sqrpos': 11}\n\n# all functions\nfn_dict = {'abs': 0, '2dpoly': 1, 'sigmoid': 2,\n 'sin': 3, 'frequent_sin': 4, 'abs_sqrt': 5,\n 'step': 6, '3dpoly': 7, 'linear': 8, 'rand_pw': 9,\n 'abspos': 10, 'sqrpos': 11, 'band': 12, 'invband': 13,\n 'steplinear': 14, 'pwlinear': 15}\n\n\ndef generate_random_pw_linear(lb=-2, ub=2, n_pieces=5):\n splits = np.random.choice(np.arange(lb, ub, 0.1),\n n_pieces - 1, replace=False)\n splits.sort()\n slopes = np.random.uniform(-4, 4, size=n_pieces)\n start = []\n start.append(np.random.uniform(-1, 1))\n for t in range(n_pieces - 1):\n start.append(start[t] + slopes[t] * (splits[t] -\n (lb if t == 0 else splits[t - 1])))\n return lambda x: [start[ind] + slopes[ind] * (x - (lb if ind == 0 else splits[ind - 1])) for ind in [np.searchsorted(splits, x)]][0]\n\n\ndef get_tau_fn(func):\n def first(x):\n return x[:, [0]] if len(x.shape) == 2 else x\n # func describes the relation between response and treatment\n if func == fn_dict['abs']:\n def tau_fn(x): return np.abs(first(x))\n elif func == fn_dict['2dpoly']:\n def tau_fn(x): return -1.5 * first(x) + .9 * (first(x)**2)\n elif func == fn_dict['sigmoid']:\n def tau_fn(x): return 2 / (1 + np.exp(-2 * first(x)))\n elif func == fn_dict['sin']:\n def tau_fn(x): return np.sin(first(x))\n elif func == fn_dict['frequent_sin']:\n def tau_fn(x): return np.sin(3 * first(x))\n elif func == fn_dict['abs_sqrt']:\n def tau_fn(x): return np.sqrt(np.abs(first(x)))\n elif func == fn_dict['step']:\n def tau_fn(x): return 1. * (first(x) < 0) + 2.5 * (first(x) >= 0)\n elif func == fn_dict['3dpoly']:\n def tau_fn(x): return -1.5 * first(x) + .9 * \\\n (first(x)**2) + first(x)**3\n elif func == fn_dict['linear']:\n def tau_fn(x): return first(x)\n elif func == fn_dict['rand_pw']:\n pw_linear = generate_random_pw_linear()\n\n def tau_fn(x):\n return np.array([pw_linear(x_i) for x_i in first(x).flatten()]).reshape(-1, 1)\n elif func == fn_dict['abspos']:\n def tau_fn(x): return np.abs(first(x)) * (first(x) >= 0)\n elif func == fn_dict['sqrpos']:\n def tau_fn(x): return (first(x)**2) * (first(x) >= 0)\n elif func == fn_dict['band']:\n def tau_fn(x): return 1.0 * (first(x) >= -.75) * (first(x) <= .75)\n elif func == fn_dict['invband']:\n def tau_fn(x): return 1. - 1. * (first(x) >= -.75) * (first(x) <= .75)\n elif func == fn_dict['steplinear']:\n def tau_fn(x): return 2. * (first(x) >= 0) - first(x)\n elif func == fn_dict['pwlinear']:\n def tau_fn(x):\n q = first(x)\n return (q + 1) * (q <= -1) + (q - 1) * (q >= 1)\n else:\n raise NotImplementedError()\n\n return tau_fn\n\n\ndef standardize(z, p, y, fn):\n ym = y.mean()\n ystd = y.std()\n y = (y - ym) / ystd\n\n def newfn(x): return (fn(x) - ym) / ystd\n return z, p, y, newfn\n\n\ndef get_data(n_samples, n_instruments, iv_strength, tau_fn, dgp_num):\n # Construct dataset\n # z:- instruments (features included here, can be high-dimensional)\n # p :- treatments (features included here as well, can be high-dimensional)\n # y :- response (is a scalar always)\n confounder = np.random.normal(0, 1, size=(n_samples, 1))\n z = np.random.normal(0, 1, size=(n_samples, n_instruments))\n fn = tau_fn\n\n if dgp_num == 1:\n # DGP 1 in the paper\n p = 2 * z[:, [0]] * (z[:, [0]] > 0) * iv_strength \\\n + 2 * z[:, [1]] * (z[:, [1]] < 0) * iv_strength \\\n + 2 * confounder * (1 - iv_strength) + \\\n np.random.normal(0, .1, size=(n_samples, 1))\n y = fn(p) + 2 * confounder + \\\n np.random.normal(0, .1, size=(n_samples, 1))\n elif dgp_num == 2:\n # DGP 2 in the paper\n p = 2 * z[:, [0]] * iv_strength \\\n + 2 * confounder * (1 - iv_strength) + \\\n np.random.normal(0, .1, size=(n_samples, 1))\n y = fn(p) + 2 * confounder + \\\n np.random.normal(0, .1, size=(n_samples, 1))\n elif dgp_num == 3:\n # DeepIV's DGP - has feature variables as well\n # z is 3-dimensional: composed of (1) 1D z, (2) t - time unif~(0,10), and (3) s - customer type {1,...,7}\n # y is related to p and z in a complex non-linear, non separable manner\n # p is related to z again in a non-separable manner, rho is endogeneity parameter\n rho = 0.8\n psd = 3.7\n pmu = 17.779\n ysd = 158.\n ymu = -292.1\n z_1 = np.random.normal(0, 1, size=(n_samples, 1))\n v = np.random.normal(0, 1, size=(n_samples, 1))\n t = np.random.uniform(0, 10, size=(n_samples, 1))\n s = np.random.randint(1, 8, size=(n_samples, 1))\n e = rho * v + \\\n np.random.normal(0, np.sqrt(1 - rho**2), size=(n_samples, 1))\n\n def psi(t): return 2 * (np.power(t - 5, 4) / 600 +\n np.exp(-4 * np.power(t - 5, 2)) + t / 10 - 2)\n p = 25 + (z_1 + 3) * psi(t) + v\n p = (p - pmu) / psd\n g = (10 + p) * s * psi(t) - 2 * p + e\n y = (g - ymu) / ysd\n z = np.hstack((z_1, s, t))\n p = np.hstack((p, s, t))\n\n def fn(p): return ((10 + p[:, 0]) * p[:, 1]\n * psi(p[:, 2]) - 2 * p[:, 0] - ymu) / ysd\n elif dgp_num == 4:\n # Many weak Instruments DGP - n_instruments can be very large\n z = np.random.normal(0.5, 1, size=(n_samples, n_instruments))\n p = np.amin(z, axis=1).reshape(-1, 1) * iv_strength + confounder * \\\n (1 - iv_strength) + np.random.normal(0, 0.1, size=(n_samples, 1))\n y = fn(p) + 2 * confounder + \\\n np.random.normal(0, 0.1, size=(n_samples, 1))\n else:\n # Here we have equal number of treatments and instruments and each\n # instrument affects a separate treatment. Only the first treatment\n # matters for the outcome.\n z = np.random.normal(0, 2, size=(n_samples, n_instruments))\n U = np.random.normal(0, 2, size=(n_samples, 1))\n delta = np.random.normal(0, .1, size=(n_samples, 1))\n zeta = np.random.normal(0, .1, size=(n_samples, 1))\n p = iv_strength * z + (1 - iv_strength) * U + delta\n y = fn(p) + U + zeta\n\n return standardize(z, p, y, fn)\n", "import os\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import griddata\nplt.style.use('ggplot')\nfrom mcpy.utils import filesafe\nimport mcpy.metrics\nimport itertools\n\n\ndef plot_subset_param_histograms(param_estimates, metric_results, config, subset):\n for dgp_name, pdgp in param_estimates.items():\n n_methods = len(list(pdgp.keys()))\n n_params = config['dgp_opts']['kappa_gamma'] + 1\n plt.figure(figsize=(4 * n_params, 2 * n_methods))\n for it, m_name in enumerate(pdgp.keys()):\n for inner_it, i in enumerate(subset):\n plt.subplot(n_methods, n_params, it * n_params + inner_it + 1)\n plt.hist(pdgp[m_name][:, i])\n plt.title(\"{}[{}]. $\\\\mu$: {:.2f}, $\\\\sigma$: {:.2f}\".format(\n m_name, i, np.mean(pdgp[m_name][:, i]), np.std(pdgp[m_name][:, i])))\n plt.tight_layout()\n plt.savefig(os.path.join(config['target_dir'], 'dist_dgp_{}_{}.png'.format(\n dgp_name, config['param_str'])), dpi=300)\n plt.close()\n return\n\n\ndef plot_param_histograms(param_estimates, metric_results, config):\n for dgp_name, pdgp in param_estimates.items():\n n_methods = len(list(pdgp.keys()))\n n_params = next(iter(pdgp.values())).shape[1]\n plt.figure(figsize=(4 * n_params, 2 * n_methods))\n for it, m_name in enumerate(pdgp.keys()):\n for i in range(pdgp[m_name].shape[1]):\n plt.subplot(n_methods, n_params, it * n_params + i + 1)\n plt.hist(pdgp[m_name][:, i])\n plt.title(\"{}[{}]. $\\\\mu$: {:.2f}, $\\\\sigma$: {:.2f}\".format(\n m_name, i, np.mean(pdgp[m_name][:, i]), np.std(pdgp[m_name][:, i])))\n plt.tight_layout()\n plt.savefig(os.path.join(config['target_dir'], 'dist_dgp_{}_{}.png'.format(\n dgp_name, config['param_str'])), dpi=300)\n plt.close()\n return\n\n\ndef plot_metrics(param_estimates, metric_results, config):\n for dgp_name, mdgp in metric_results.items():\n n_methods = len(list(mdgp.keys()))\n for metric_name in next(iter(mdgp.values())).keys():\n plt.figure(figsize=(1.5 * n_methods, 2.5))\n plt.violinplot([mdgp[method_name][metric_name]\n for method_name in mdgp.keys()], showmedians=True)\n plt.xticks(np.arange(1, n_methods + 1), list(mdgp.keys()))\n plt.ylabel(metric_name)\n plt.tight_layout()\n plt.savefig(os.path.join(config['target_dir'], '{}_dgp_{}_{}.png'.format(\n filesafe(metric_name), dgp_name, config['param_str'])), dpi=300)\n plt.close()\n return\n\n\ndef plot_metric_comparisons(param_estimates, metric_results, config):\n for dgp_name, mdgp in metric_results.items():\n n_methods = len(list(mdgp.keys()))\n for metric_name in next(iter(mdgp.values())).keys():\n plt.figure(figsize=(1.5 * n_methods, 2.5))\n plt.violinplot([mdgp[method_name][metric_name] - mdgp[config['proposed_method']][metric_name]\n for method_name in mdgp.keys() if method_name != config['proposed_method']], showmedians=True)\n plt.xticks(np.arange(1, n_methods), [method_name for method_name in mdgp.keys(\n ) if method_name != config['proposed_method']])\n plt.ylabel('decrease in {}'.format(metric_name))\n plt.tight_layout()\n plt.savefig(os.path.join(config['target_dir'], '{}_decrease_dgp_{}_{}.png'.format(\n filesafe(metric_name), dgp_name, config['param_str'])), dpi=300)\n plt.close()\n return\n\n\ndef instance_plot(plot_name, param_estimates, metric_results, config, plot_config):\n methods = plot_config['methods'] if 'methods' in plot_config else list(\n config['methods'].keys())\n metrics = plot_config['metrics'] if 'metrics' in plot_config else list(\n config['metrics'].keys())\n dgps = plot_config['dgps'] if 'dgps' in plot_config else list(\n config['dgps'].keys())\n metric_transforms = plot_config['metric_transforms'] if 'metric_transforms' in plot_config else {\n '': mcpy.metrics.transform_identity}\n\n for tr_name, tr_fn in metric_transforms.items():\n for dgp_name in dgps:\n for metric_name in metrics:\n plt.figure(figsize=(1.5 * len(methods), 2.5))\n plt.violinplot([tr_fn(metric_results, dgp_name, method_name, metric_name, config)\n for method_name in methods], showmedians=True)\n plt.xticks(np.arange(1, len(methods) + 1), methods)\n plt.ylabel('{}({})'.format(tr_name, metric_name))\n plt.tight_layout()\n plt.savefig(os.path.join(config['target_dir'], '{}_{}_{}_dgp_{}_{}.png'.format(\n plot_name, filesafe(metric_name), tr_name, dgp_name, config['param_str'])), dpi=300)\n plt.close()\n return\n\n\ndef _select_config_keys(sweep_keys, select_vals, filter_vals):\n\n if select_vals is not None:\n mask_select = [all(any((p, v) in key for v in vlist)\n for p, vlist in select_vals.items()) for key in sweep_keys]\n else:\n mask_select = [True] * len(sweep_keys)\n if filter_vals is not None:\n mask_filter = [all(all((p, v) not in key for v in vlist)\n for p, vlist in filter_vals.items()) for key in sweep_keys]\n else:\n mask_filter = [True] * len(sweep_keys)\n mask = [ms and mf for ms, mf in zip(mask_select, mask_filter)]\n return mask\n\n\ndef sweep_plot_marginal_transformed_metric(transform_fn, transform_name, dgps, methods, metrics, plot_name, sweep_keys, sweep_params, sweep_metrics, config, param_subset={}, select_vals={}, filter_vals={}):\n\n sweeps = {}\n for dgp_key, dgp_val in config['dgp_opts'].items():\n if hasattr(dgp_val, \"__len__\"):\n sweeps[dgp_key] = dgp_val\n\n mask = _select_config_keys(sweep_keys, select_vals, filter_vals)\n if np.sum(mask) == 0:\n print(\"Filtering resulted in no valid configurations!\")\n return\n\n for dgp in dgps:\n for metric in metrics:\n for param, param_vals in sweeps.items():\n if param_subset is not None and param not in param_subset:\n continue\n plt.figure(figsize=(5, 3))\n for method in methods:\n medians = []\n mins = []\n maxs = []\n for val in param_vals:\n subset = [transform_fn(metrics, dgp, method, metric, config) for key, metrics, ms\n in zip(sweep_keys, sweep_metrics, mask)\n if (param, val) in key and ms]\n if len(subset) > 0:\n grouped_results = np.concatenate(subset)\n medians.append(np.median(grouped_results))\n mins.append(np.percentile(grouped_results, 5))\n maxs.append(np.percentile(grouped_results, 95))\n line = plt.plot(param_vals, medians, label=method)\n plt.fill_between(param_vals, maxs, mins,\n alpha=0.3, color=line[0].get_color())\n plt.legend()\n plt.xlabel(param)\n plt.ylabel('{}({})'.format(transform_name, metric))\n plt.tight_layout()\n plt.savefig(os.path.join(config['target_dir'], '{}_{}_{}_dgp_{}_growing_{}_{}.png'.format(\n plot_name, filesafe(metric), transform_name, dgp, filesafe(param), config['param_str'])), dpi=300)\n plt.close()\n\n for param1, param2 in itertools.combinations(sweeps.keys(), 2):\n if param_subset is not None and (param1, param2) not in param_subset and (param2, param1) not in param_subset:\n continue\n x, y, z = [], [], []\n for method_it, method in enumerate(methods):\n x.append([]), y.append([]), z.append([])\n for val1, val2 in itertools.product(*[sweeps[param1], sweeps[param2]]):\n subset = [transform_fn(metrics, dgp, method, metric, config) for key, metrics, ms\n in zip(sweep_keys, sweep_metrics, mask)\n if (param1, val1) in key and (param2, val2) in key and ms]\n if len(subset) > 0:\n grouped_results = np.concatenate(subset)\n x[method_it].append(val1)\n y[method_it].append(val2)\n z[method_it].append(np.median(grouped_results))\n vmin = np.min(z)\n vmax = np.max(z)\n fig, axes = plt.subplots(nrows=1, ncols=len(\n methods), figsize=(4 * len(methods), 3))\n if not hasattr(axes, '__len__'):\n axes = [axes]\n for method_it, (method, ax) in enumerate(zip(methods, axes)):\n xi = np.linspace(np.min(x[method_it]), np.max(\n x[method_it]), 5 * len(x[method_it]))\n yi = np.linspace(np.min(y[method_it]), np.max(\n y[method_it]), 5 * len(y[method_it]))\n xi, yi = np.meshgrid(xi, yi)\n zi = griddata((np.array(x[method_it]), np.array(\n y[method_it])), np.array(z[method_it]), (xi, yi), method='linear')\n ax.contour(xi, yi, zi, 15, linewidths=0.2, colors='k')\n im = ax.pcolormesh(\n xi, yi, zi, cmap=plt.cm.Reds, vmin=vmin, vmax=vmax)\n ax.contourf(xi, yi, zi, 15, cmap=plt.cm.Reds,\n vmin=vmin, vmax=vmax)\n ax.scatter(np.array(x[method_it]), np.array(\n y[method_it]), alpha=0.5, s=3, c='b')\n ax.set_xlabel(param1)\n ax.set_ylabel(param2)\n ax.set_title(method)\n\n plt.tight_layout()\n fig.subplots_adjust(right=0.8)\n cbar_ax = fig.add_axes([0.81, 0.15, 0.02, 0.72])\n cbar = fig.colorbar(im, cax=cbar_ax)\n cbar.ax.tick_params(labelsize=8)\n cbar.ax.set_ylabel(\n 'median {}({})'.format(transform_name, metric))\n plt.savefig(os.path.join(config['target_dir'], '{}_{}_{}_dgp_{}_growing_{}_and_{}_{}.png'.format(\n plot_name, filesafe(metric), transform_name, dgp, filesafe(param1), filesafe(param2), config['param_str'])), dpi=300)\n plt.close()\n\n\ndef sweep_plot(plot_name, sweep_keys, sweep_params, sweep_metrics, config, plot_config):\n param_subset = plot_config['varying_params'] if 'varying_params' in plot_config else None\n select_vals = plot_config['select_vals'] if 'select_vals' in plot_config else {\n }\n filter_vals = plot_config['filter_vals'] if 'filter_vals' in plot_config else {\n }\n methods = plot_config['methods'] if 'methods' in plot_config else list(\n config['methods'].keys())\n metrics = plot_config['metrics'] if 'metrics' in plot_config else list(\n config['metrics'].keys())\n dgps = plot_config['dgps'] if 'dgps' in plot_config else list(\n config['dgps'].keys())\n metric_transforms = plot_config['metric_transforms'] if 'metric_transforms' in plot_config else {\n '': mcpy.metrics.transform_identity}\n\n for tr_name, tr_fn in metric_transforms.items():\n sweep_plot_marginal_transformed_metric(tr_fn, tr_name, dgps, methods, metrics, plot_name, sweep_keys, sweep_params, sweep_metrics, config,\n param_subset=param_subset, select_vals=select_vals, filter_vals=filter_vals)\n" ]
[ [ "numpy.random.normal", "numpy.searchsorted", "numpy.amin", "numpy.random.uniform", "numpy.random.randint", "numpy.arange", "numpy.sqrt", "numpy.power", "numpy.hstack" ], [ "numpy.median", "numpy.min", "numpy.mean", "numpy.max", "numpy.concatenate", "numpy.arange", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.subplot", "matplotlib.use", "numpy.array", "numpy.percentile", "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "numpy.std", "matplotlib.pyplot.hist", "matplotlib.pyplot.style.use", "numpy.sum", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.ylabel", "numpy.meshgrid" ] ]
shalei120/HopfieldLM
[ "3fba4ee05bfc7f5041593f95457ffdf0bdc094a3" ]
[ "modules/functional.py" ]
[ "import torch\nimport torch.nn as nn\n\nfrom torch.tensor import Tensor\nfrom typing import Optional, Tuple, Union\n\n\ndef hopfield_core_forward(query, # type: Tensor\n key, # type: Tensor\n value, # type: Tensor\n embed_dim_to_check, # type: int\n num_heads, # type: int\n in_proj_weight, # type: Optional[Tensor]\n in_proj_bias, # type: Optional[Tensor]\n bias_k, # type: Optional[Tensor]\n bias_v, # type: Optional[Tensor]\n add_zero_attn, # type: bool\n dropout_p, # type: float\n out_proj_weight, # type: Tensor\n out_proj_bias, # type: Tensor\n training=True, # type: bool\n key_padding_mask=None, # type: Optional[Tensor]\n need_weights=True, # type: bool\n attn_mask=None, # type: Optional[Tensor]\n use_separate_proj_weight=False, # type: bool\n q_proj_weight=None, # type: Optional[Tensor]\n k_proj_weight=None, # type: Optional[Tensor]\n v_proj_weight=None, # type: Optional[Tensor]\n static_k=None, # type: Optional[Tensor]\n static_v=None, # type: Optional[Tensor]\n\n key_as_static=False, # type: bool\n query_as_static=False, # type: bool\n value_as_static=False, # type: bool\n value_as_connected=False, # type: bool\n normalize_pattern=False, # type: bool\n p_norm_weight=None, # type: Optional[Tensor]\n p_norm_bias=None, # type: Optional[Tensor]\n head_dim=None, # type: Optional[int]\n pattern_dim=None, # type: Optional[int]\n scaling=None, # type: Optional[Union[float, Tensor]]\n update_steps_max=0, # type: Optional[Union[int, Tensor]]\n update_steps_eps=1e-4, # type: Union[float, Tensor]\n return_raw_associations=False, # type: bool\n return_projected_patterns=False # type: bool\n ):\n # type: (...) -> Tuple[Tensor, Optional[Tensor]]\n r\"\"\"\n Args:\n query, key, value: map a query and a set of key-value pairs to an output.\n See \"Attention Is All You Need\" for more details.\n See \"Hopfield Networks is All You Need\" for more details in the setting of Hopfield networks.\n embed_dim_to_check: total dimension of the model (in case of default head dimension).\n num_heads: parallel attention heads.\n in_proj_weight, in_proj_bias: input projection weight and bias.\n bias_k, bias_v: bias of the key and value sequences to be added at dim=0.\n add_zero_attn: add a new batch of zeros to the key and\n value sequences at dim=1.\n dropout_p: probability of an element to be zeroed.\n out_proj_weight, out_proj_bias: the output projection weight and bias.\n training: apply dropout if is ``True``.\n key_padding_mask: if provided, specified padding elements in the key will\n be ignored by the attention. This is an binary mask. When the value is True,\n the corresponding value on the attention layer will be filled with -inf.\n need_weights: output attn_output_weights.\n attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all\n the batches while a 3D mask allows to specify a different mask for the entries of each batch.\n use_separate_proj_weight: the function accept the proj. weights for query, key,\n and value in different forms. If false, in_proj_weight will be used, which is\n a combination of q_proj_weight, k_proj_weight, v_proj_weight.\n q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.\n static_k, static_v: static key and value used for attention operators.\n\n key_as_static: interpret specified key as being static.\n query_as_static: interpret specified key as being static.\n value_as_static: interpret specified key as being static.\n value_as_connected: connect value projection with key projection.\n normalize_pattern: enable normalization of patterns.\n p_norm_weight, p_norm_bias: pattern normalization weight and bias.\n head_dim: dimensionality of each head.\n pattern_dim: dimensionality of each projected value input.\n scaling: scaling of association heads, often represented as beta (one entry per head).\n update_steps_max: maximum count of association update steps (None equals to infinity).\n update_steps_eps: minimum difference threshold between two consecutive association update steps.\n return_raw_associations: return raw association (softmax) values, unmodified.\n return_projected_patterns: return pattern projection values, unmodified.\n\n Shape:\n Inputs:\n - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is\n the embedding dimension.\n - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is\n the embedding dimension.\n - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is\n the embedding dimension.\n - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.\n If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions\n will be unchanged. If a BoolTensor is provided, the positions with the\n value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.\n - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.\n 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,\n S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked\n positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend\n while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``\n are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor\n is provided, it will be added to the attention weight.\n - static_k: :math:`(N*num_heads, S, head_dim)`, where S is the source sequence length, N is the batch size.\n - static_v: :math:`(N*num_heads, S, head_dim)`, where S is the source sequence length, N is the batch size.\n\n - scaling: :math:`(num_heads,)`, where num_heads is the amount of heads.\n\n Outputs:\n - attn_output: :math:`(L, N, E)`, where L is the target sequence length, N is the batch size,\n E is the embedding dimension.\n - attn_output_weights: :math:`(N, L, S)`, where N is the batch size,\n L is the target sequence length, S is the source sequence length.\n - attn_raw: :math:``(N, num_heads, L, S)`, where N is the batch size,\n L is the target sequence length, S is the source sequence length.\n \"\"\"\n if not torch.jit.is_scripting():\n tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v,\n out_proj_weight, out_proj_bias)\n if any([type(t) is not Tensor for t in tens_ops]) and nn.functional.has_torch_function(tens_ops):\n return nn.functional.handle_torch_function(\n hopfield_core_forward, tens_ops, query, key, value,\n embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias,\n bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight,\n out_proj_bias, training=training, key_padding_mask=key_padding_mask,\n need_weights=need_weights, attn_mask=attn_mask,\n use_separate_proj_weight=use_separate_proj_weight,\n q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight,\n v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v,\n key_as_static=key_as_static, query_as_static=query_as_static,\n value_as_static=value_as_static, value_as_connected=value_as_connected,\n normalize_pattern=normalize_pattern, p_norm_weight=p_norm_weight, p_norm_bias=p_norm_bias,\n head_dim=head_dim, pattern_dim=pattern_dim, scaling=scaling, update_steps_max=update_steps_max,\n update_steps_eps=update_steps_eps, return_raw_associations=return_raw_associations)\n tgt_len, bsz, embed_dim = query.shape[0], value.shape[1], query.shape[2]\n assert embed_dim == embed_dim_to_check\n # allow MHA to have different sizes for the feature dimension\n assert key.size(0) == value.size(0) and key.size(1) == value.size(1)\n\n assert (scaling is None) or (type(scaling) in (float, torch.Tensor))\n if type(scaling) == torch.Tensor:\n assert scaling.ndimension() == 1 and scaling.shape[0] == num_heads, \"only one entry per head.\"\n\n assert (update_steps_max is None) or (type(update_steps_max) in (int, torch.Tensor))\n if type(update_steps_max) == torch.Tensor:\n assert update_steps_max.ndimension() == 1 and update_steps_max.shape[0] == num_heads, \"only one entry per head.\"\n elif type(update_steps_max) == int:\n update_steps_max = torch.tensor([update_steps_max] * num_heads, dtype=torch.int32, device=query.device)\n elif update_steps_max is None:\n update_steps_max = -torch.ones(size=(num_heads,), dtype=torch.int32, device=query.device)\n\n assert type(update_steps_eps) in (float, torch.Tensor)\n if type(update_steps_eps) == torch.Tensor:\n assert update_steps_eps.ndimension() == 1 and update_steps_eps.shape[0] == num_heads, \"only one entry per head.\"\n assert (update_steps_eps <= 0.0).sum() == 0, \"only positive thresholds allowed.\"\n update_steps_eps = update_steps_eps.to(device=query.device)\n elif type(update_steps_eps) == float:\n assert update_steps_eps > 0, \"only positive thresholds allowed.\"\n update_steps_eps = torch.tensor([update_steps_eps] * num_heads, dtype=query.dtype, device=query.device)\n\n # Adapt dimensionality of each each.\n if head_dim is None:\n head_dim = embed_dim // num_heads\n assert head_dim * num_heads == embed_dim, r'embed_dim must be divisible by num_heads.'\n hopfield_dim = num_heads * head_dim\n\n # Adapt dimensionality of each value projection.\n if pattern_dim is None:\n pattern_dim = head_dim\n assert (not value_as_connected) or (pattern_dim == head_dim)\n\n q, k, v, xi, src_len = None, None, None, None, 0\n update_step, xi_old, xi_difference_norm = 0, None, float(r'+inf')\n update_active_heads = torch.tensor([[[True]]] * num_heads * bsz, device=query.device)\n assert update_active_heads.any(), \"at least one head needs to be active.\"\n\n ####################################################################################################################\n # BEGIN HOPFIELD UPDATE ITERATION #\n ####################################################################################################################\n\n while update_active_heads.any():\n\n # The query is already projected into the \"Hopfield\" space at \"update_step\" equals 0.\n # No more projection necessary if \"update_step\" greater than 0.\n if update_step == 0:\n if not use_separate_proj_weight:\n\n if torch.equal(query, key) and torch.equal(key, value) and not (\n key_as_static or query_as_static or value_as_static):\n # self-attention\n q, k, v = nn.functional.linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1)\n\n elif torch.equal(key, value) and not (key_as_static or value_as_static):\n # encoder-decoder attention\n _start, _end = 0, hopfield_dim\n if query_as_static:\n q = query.repeat(1, num_heads, 1)\n else:\n # This is inline in_proj function with in_proj_weight and in_proj_bias\n _b = in_proj_bias\n _w = in_proj_weight[_start:_end, :]\n if _b is not None:\n _b = _b[_start:_end]\n q = nn.functional.linear(query, _w, _b)\n _start = hopfield_dim\n _end = None\n\n if key is None:\n assert value is None\n k = None\n v = None\n else:\n\n # This is inline in_proj function with in_proj_weight and in_proj_bias\n _b = in_proj_bias\n _w = in_proj_weight[_start:_end, :]\n if _b is not None:\n _b = _b[_start:_end]\n k, v = nn.functional.linear(key, _w, _b).chunk(2, dim=-1)\n\n else:\n _start, _end = 0, hopfield_dim\n if query_as_static:\n q = query.repeat(1, num_heads, 1)\n else:\n # This is inline in_proj function with in_proj_weight and in_proj_bias\n _b = in_proj_bias\n _w = in_proj_weight[_start:_end, :]\n if _b is not None:\n _b = _b[_start:_end]\n q = nn.functional.linear(query, _w, _b)\n _start += hopfield_dim\n _end += hopfield_dim\n\n if key_as_static:\n k = key.repeat(1, num_heads, 1)\n else:\n # This is inline in_proj function with in_proj_weight and in_proj_bias\n _b = in_proj_bias\n _w = in_proj_weight[_start:_end, :]\n if _b is not None:\n _b = _b[_start:_end]\n k = nn.functional.linear(key, _w, _b)\n _start += hopfield_dim\n _end += hopfield_dim\n\n if value_as_static:\n v = value.repeat(1, num_heads, 1)\n else:\n # This is inline in_proj function with in_proj_weight and in_proj_bias\n _b = in_proj_bias\n _w = in_proj_weight[_start:_end, :]\n if _b is not None:\n _b = _b[_start:_end]\n v = nn.functional.linear(value, _w, _b)\n else:\n _start, _end = 0, hopfield_dim\n if query_as_static:\n q = query.repeat(1, num_heads, 1)\n else:\n q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight)\n len1, len2 = q_proj_weight_non_opt.size()\n assert len1 == hopfield_dim and len2 == query.size(-1)\n if in_proj_bias is not None:\n q = nn.functional.linear(query, q_proj_weight_non_opt, in_proj_bias[_start:_end])\n _start += hopfield_dim\n _end += hopfield_dim\n else:\n q = nn.functional.linear(query, q_proj_weight_non_opt, in_proj_bias)\n\n v = value\n if key_as_static:\n k = key.repeat(1, num_heads, 1)\n else:\n k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight)\n len1, len2 = k_proj_weight_non_opt.size()\n assert len1 == hopfield_dim and len2 == key.size(-1)\n\n _bias = None if in_proj_bias is None else in_proj_bias[_start:_end]\n k = nn.functional.linear(key, k_proj_weight_non_opt, _bias)\n if value_as_connected:\n v = nn.functional.linear(v, k_proj_weight_non_opt, _bias)\n _start += hopfield_dim\n _end += num_heads * pattern_dim\n\n if value_as_static:\n if not (value_as_connected or key_as_static):\n v = v.repeat(1, num_heads, 1)\n else:\n v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight)\n len1, len2 = v_proj_weight_non_opt.size()\n assert len1 == (num_heads * pattern_dim) and len2 == v.size(-1)\n if in_proj_bias is not None:\n v = nn.functional.linear(v, v_proj_weight_non_opt, in_proj_bias[_start:])\n else:\n v = nn.functional.linear(v, v_proj_weight_non_opt, in_proj_bias)\n\n if attn_mask is not None:\n assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \\\n attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or \\\n attn_mask.dtype == torch.bool, \\\n 'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype)\n if attn_mask.dtype == torch.uint8:\n warnings.warn(\n \"Byte tensor for attn_mask in nn.HopfieldCore is deprecated. Use bool tensor instead.\")\n attn_mask = attn_mask.to(torch.bool)\n\n if attn_mask.dim() == 2:\n attn_mask = attn_mask.unsqueeze(0)\n # print(attn_mask.size(), [1, query.size(0), key.size(0)])\n if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:\n raise RuntimeError('The size of the 2D attn_mask is not correct.')\n elif attn_mask.dim() == 3:\n if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]:\n raise RuntimeError('The size of the 3D attn_mask is not correct.')\n else:\n raise RuntimeError(\"attn_mask's dimension {} is not supported\".format(attn_mask.dim()))\n # attn_mask's dim is 3 now.\n\n # Optionally normalize patterns.\n if normalize_pattern:\n q = torch.nn.functional.layer_norm(\n input=q.reshape(shape=(-1, head_dim)), normalized_shape=(head_dim,),\n weight=p_norm_weight, bias=p_norm_bias).reshape(shape=q.shape)\n k = torch.nn.functional.layer_norm(\n input=k.reshape(shape=(-1, head_dim)), normalized_shape=(head_dim,),\n weight=p_norm_weight, bias=p_norm_bias).reshape(shape=k.shape)\n\n else:\n active_xi = xi.masked_select(mask=update_active_heads).view(size=(-1, *xi.shape[1:]))\n active_k = k.masked_select(mask=update_active_heads).view(size=(-1, *k.shape[1:]))\n q = torch.masked_scatter(input=q, mask=update_active_heads, source=torch.bmm(active_xi, active_k))\n\n # Optionally scale association heads (each head separately).\n if type(scaling) == float:\n q = q * scaling\n elif type(scaling) == torch.Tensor:\n q = q * scaling.view(1, 1, -1).repeat(repeats=(1, 1, q.shape[2] // scaling.shape[0]))\n\n if update_step == 0:\n # convert ByteTensor key_padding_mask to bool\n if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:\n warnings.warn(\n \"Byte tensor for key_padding_mask in nn.HopfieldCore is deprecated. Use bool tensor instead.\")\n key_padding_mask = key_padding_mask.to(torch.bool)\n\n if bias_k is not None and bias_v is not None:\n if static_k is None and static_v is None and key_as_static is None and value_as_static is None:\n k = torch.cat([k, bias_k.repeat(1, bsz, 1)])\n v = torch.cat([v, bias_v.repeat(1, bsz, 1)])\n if attn_mask is not None:\n attn_mask = nn.functional.pad(attn_mask, [0, 1])\n if key_padding_mask is not None:\n key_padding_mask = nn.functional.pad(key_padding_mask, [0, 1])\n else:\n assert static_k is None, \"bias cannot be added to static key.\"\n assert static_v is None, \"bias cannot be added to static value.\"\n assert not key_as_static, \"bias cannot be added to static key.\"\n assert not value_as_static, \"bias cannot be added to static value.\"\n else:\n assert bias_k is None\n assert bias_v is None\n\n q = q.contiguous().view(tgt_len, -1, head_dim).transpose(0, 1)\n if k is not None:\n k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)\n if v is not None:\n v = v.contiguous().view(v.shape[0], bsz * num_heads, -1).transpose(0, 1)\n\n if static_k is not None:\n assert static_k.size(0) == bsz * num_heads\n assert static_k.size(2) == head_dim\n k = static_k\n\n if static_v is not None:\n assert static_v.size(0) == bsz * num_heads\n assert static_v.size(2) == pattern_dim\n v = static_v\n\n src_len = k.size(1)\n\n if key_padding_mask is not None:\n assert key_padding_mask.size(0) == bsz\n assert key_padding_mask.size(1) == src_len\n\n if add_zero_attn:\n src_len += 1\n k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1)\n v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1)\n if attn_mask is not None:\n attn_mask = nn.functional.pad(attn_mask, [0, 1])\n if key_padding_mask is not None:\n key_padding_mask = nn.functional.pad(key_padding_mask, [0, 1])\n\n attn_output_weights = torch.bmm(q, k.transpose(1, 2))\n assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]\n\n if attn_mask is not None:\n if attn_mask.dtype == torch.bool:\n attn_output_weights.masked_fill_(attn_mask, float('-inf'))\n else:\n attn_output_weights += attn_mask\n\n if key_padding_mask is not None:\n attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)\n attn_output_weights = attn_output_weights.masked_fill(\n key_padding_mask.unsqueeze(1).unsqueeze(2),\n float('-inf'),\n )\n attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len)\n\n # Compute new xi for Hopfield retrieve iterations.\n if xi is None:\n xi = nn.functional.softmax(attn_output_weights, dim=-1)\n else:\n xi = torch.masked_scatter(input=xi, mask=update_active_heads, source=nn.functional.softmax(\n attn_output_weights.masked_select(mask=update_active_heads).view(size=(-1, *xi.shape[1:])), dim=-1))\n\n # Compute threshold-based stopping criterion for Hopfield retrieve iterations.\n with torch.no_grad():\n xi_active = xi.view(size=(bsz, num_heads, tgt_len, src_len))\n update_active_heads = (update_step < update_steps_max) | (update_steps_max < 0)\n if xi_old is not None:\n update_active_heads &= ((xi_old - xi_active).norm(p=2, dim=(2, 3)).max(axis=0)[0]) > update_steps_eps\n update_active_heads = update_active_heads.unsqueeze(dim=1).unsqueeze(dim=2).repeat(repeats=(bsz, 1, 1))\n xi_old = xi_active\n update_step += 1\n\n ####################################################################################################################\n # END HOPFIELD UPDATE ITERATION #\n ####################################################################################################################\n\n attn_output_weights = nn.functional.dropout(xi, p=dropout_p, training=training)\n attn_output = torch.bmm(attn_output_weights, v)\n assert list(attn_output.shape[:2]) == [bsz * num_heads, tgt_len]\n attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, -1)\n if out_proj_weight is not None:\n assert attn_output.shape[2] == num_heads * pattern_dim\n attn_output = nn.functional.linear(attn_output, out_proj_weight, out_proj_bias)\n\n xi = xi.view(bsz, num_heads, tgt_len, src_len) if return_raw_associations else None\n v = v.view(bsz, num_heads, src_len, -1) if return_projected_patterns else None\n if need_weights:\n # average attention weights over heads\n attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)\n return attn_output, attn_output_weights.sum(dim=1) / num_heads, xi, v\n else:\n return attn_output, None, xi, v\n" ]
[ [ "torch.jit._unwrap_optional", "torch.nn.functional.dropout", "torch.no_grad", "torch.bmm", "torch.ones", "torch.nn.functional.linear", "torch.jit.is_scripting", "torch.tensor", "torch.nn.functional.handle_torch_function", "torch.nn.functional.softmax", "torch.nn.functional.pad", "torch.equal", "torch.nn.functional.has_torch_function" ] ]
tetsuoh0103/optuna
[ "b057b467948690c840d9608659ce22b675a8d047" ]
[ "optuna/visualization/matplotlib/_parallel_coordinate.py" ]
[ "from collections import defaultdict\nimport math\nfrom typing import Callable\nfrom typing import cast\nfrom typing import DefaultDict\nfrom typing import List\nfrom typing import Optional\n\nimport numpy as np\n\nfrom optuna._experimental import experimental\nfrom optuna._study_direction import StudyDirection\nfrom optuna.logging import get_logger\nfrom optuna.study import Study\nfrom optuna.trial import FrozenTrial\nfrom optuna.trial import TrialState\nfrom optuna.visualization._utils import _check_plot_args\nfrom optuna.visualization.matplotlib._matplotlib_imports import _imports\nfrom optuna.visualization.matplotlib._utils import _is_categorical\nfrom optuna.visualization.matplotlib._utils import _is_log_scale\n\n\nif _imports.is_successful():\n from optuna.visualization.matplotlib._matplotlib_imports import Axes\n from optuna.visualization.matplotlib._matplotlib_imports import LineCollection\n from optuna.visualization.matplotlib._matplotlib_imports import plt\n\n_logger = get_logger(__name__)\n\n\n@experimental(\"2.2.0\")\ndef plot_parallel_coordinate(\n study: Study,\n params: Optional[List[str]] = None,\n *,\n target: Optional[Callable[[FrozenTrial], float]] = None,\n target_name: str = \"Objective Value\",\n) -> \"Axes\":\n \"\"\"Plot the high-dimensional parameter relationships in a study with Matplotlib.\n\n .. seealso::\n Please refer to :func:`optuna.visualization.plot_parallel_coordinate` for an example.\n\n Example:\n\n The following code snippet shows how to plot the high-dimensional parameter relationships.\n\n .. plot::\n\n import optuna\n\n def objective(trial):\n x = trial.suggest_float(\"x\", -100, 100)\n y = trial.suggest_categorical(\"y\", [-1, 0, 1])\n return x ** 2 + y\n\n\n sampler = optuna.samplers.TPESampler(seed=10)\n study = optuna.create_study(sampler=sampler)\n study.optimize(objective, n_trials=10)\n\n optuna.visualization.matplotlib.plot_parallel_coordinate(study, params=[\"x\", \"y\"])\n\n Args:\n study:\n A :class:`~optuna.study.Study` object whose trials are plotted for their target values.\n params:\n Parameter list to visualize. The default is all parameters.\n target:\n A function to specify the value to display. If it is :obj:`None` and ``study`` is being\n used for single-objective optimization, the objective values are plotted.\n\n .. note::\n Specify this argument if ``study`` is being used for multi-objective optimization.\n target_name:\n Target's name to display on the axis label and the legend.\n\n Returns:\n A :class:`matplotlib.axes.Axes` object.\n\n Raises:\n :exc:`ValueError`:\n If ``target`` is :obj:`None` and ``study`` is being used for multi-objective\n optimization.\n \"\"\"\n\n _imports.check()\n _check_plot_args(study, target, target_name)\n return _get_parallel_coordinate_plot(study, params, target, target_name)\n\n\ndef _get_parallel_coordinate_plot(\n study: Study,\n params: Optional[List[str]] = None,\n target: Optional[Callable[[FrozenTrial], float]] = None,\n target_name: str = \"Objective Value\",\n) -> \"Axes\":\n\n if target is None:\n\n def _target(t: FrozenTrial) -> float:\n return cast(float, t.value)\n\n target = _target\n reversescale = study.direction == StudyDirection.MINIMIZE\n else:\n reversescale = True\n\n # Set up the graph style.\n fig, ax = plt.subplots()\n cmap = plt.get_cmap(\"Blues_r\" if reversescale else \"Blues\")\n ax.set_title(\"Parallel Coordinate Plot\")\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"bottom\"].set_visible(False)\n\n # Prepare data for plotting.\n trials = [trial for trial in study.trials if trial.state == TrialState.COMPLETE]\n\n if len(trials) == 0:\n _logger.warning(\"Your study does not have any completed trials.\")\n return ax\n\n all_params = {p_name for t in trials for p_name in t.params.keys()}\n if params is not None:\n for input_p_name in params:\n if input_p_name not in all_params:\n raise ValueError(\"Parameter {} does not exist in your study.\".format(input_p_name))\n all_params = set(params)\n sorted_params = sorted(all_params)\n\n obj_org = [target(t) for t in trials]\n obj_min = min(obj_org)\n obj_max = max(obj_org)\n obj_w = obj_max - obj_min\n dims_obj_base = [[o] for o in obj_org]\n\n cat_param_names = []\n cat_param_values = []\n cat_param_ticks = []\n log_param_names = []\n param_values = []\n var_names = [target_name]\n for p_name in sorted_params:\n values = [t.params[p_name] if p_name in t.params else np.nan for t in trials]\n\n if _is_log_scale(trials, p_name):\n values = [math.log10(v) for v in values]\n log_param_names.append(p_name)\n elif _is_categorical(trials, p_name):\n vocab = defaultdict(lambda: len(vocab)) # type: DefaultDict[str, int]\n values = [vocab[v] for v in values]\n cat_param_names.append(p_name)\n vocab_item_sorted = sorted(vocab.items(), key=lambda x: x[1])\n cat_param_values.append([v[0] for v in vocab_item_sorted])\n cat_param_ticks.append([v[1] for v in vocab_item_sorted])\n\n p_min = min(values)\n p_max = max(values)\n p_w = p_max - p_min\n\n if p_w == 0.0:\n center = obj_w / 2 + obj_min\n for i in range(len(values)):\n dims_obj_base[i].append(center)\n else:\n for i, v in enumerate(values):\n dims_obj_base[i].append((v - p_min) / p_w * obj_w + obj_min)\n\n var_names.append(p_name if len(p_name) < 20 else \"{}...\".format(p_name[:17]))\n param_values.append(values)\n\n # Draw multiple line plots and axes.\n # Ref: https://stackoverflow.com/a/50029441\n ax.set_xlim(0, len(sorted_params))\n ax.set_ylim(obj_min, obj_max)\n xs = [range(len(sorted_params) + 1) for _ in range(len(dims_obj_base))]\n segments = [np.column_stack([x, y]) for x, y in zip(xs, dims_obj_base)]\n lc = LineCollection(segments, cmap=cmap)\n lc.set_array(np.asarray([target(t) for t in trials] + [0]))\n axcb = fig.colorbar(lc, pad=0.1)\n axcb.set_label(target_name)\n plt.xticks(range(len(sorted_params) + 1), var_names, rotation=330)\n\n for i, p_name in enumerate(sorted_params):\n ax2 = ax.twinx()\n ax2.set_ylim(min(param_values[i]), max(param_values[i]))\n if _is_log_scale(trials, p_name):\n ax2.set_yscale(\"log\")\n ax2.spines[\"top\"].set_visible(False)\n ax2.spines[\"bottom\"].set_visible(False)\n ax2.get_xaxis().set_visible(False)\n ax2.plot([1] * len(param_values[i]), param_values[i], visible=False)\n ax2.spines[\"right\"].set_position((\"axes\", (i + 1) / len(sorted_params)))\n if p_name in cat_param_names:\n idx = cat_param_names.index(p_name)\n tick_pos = cat_param_ticks[idx]\n tick_labels = cat_param_values[idx]\n ax2.set_yticks(tick_pos)\n ax2.set_yticklabels(tick_labels)\n\n ax.add_collection(lc)\n\n return ax\n" ]
[ [ "numpy.column_stack" ] ]
JoshVarty/BananaCollector_DoubleQLearning
[ "7594212d187ba60f6c701b471cc9d3cae871131c" ]
[ "model.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass QNetwork(nn.Module):\n \"\"\"Actor (Policy) Model.\"\"\"\n\n def __init__(self, state_size, action_size, seed, fc1_units=64, fc2_units=64):\n \"\"\"Initialize parameters and build model.\n Params\n ======\n state_size (int): Dimension of each state\n action_size (int): Dimension of each action\n seed (int): Random seed\n fc1_units (int): Number of nodes in first hidden layer\n fc2_units (int): Number of nodes in second hidden layer\n \"\"\"\n super(QNetwork, self).__init__()\n self.seed = torch.manual_seed(seed)\n self.fc1 = nn.Linear(state_size, 128)\n self.fc2 = nn.Linear(128, 256)\n self.fc3 = nn.Linear(256, 256)\n self.fc3 = nn.Linear(256, 256)\n self.fc4 = nn.Linear(256, action_size)\n\n def forward(self, state):\n \"\"\"Build a network that maps state -> action values.\"\"\"\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n return self.fc4(x)\n" ]
[ [ "torch.manual_seed", "torch.nn.Linear" ] ]
ixaxaar/pytorch-sublstm
[ "879d3df6407dfa653fbd1e501a6af02191759421" ]
[ "test/test_cell.py" ]
[ "#!/usr/bin/env python3\n\nimport pytest\nimport numpy as np\n\nimport torch.nn as nn\nimport torch as T\nfrom torch.autograd import Variable as var\nimport torch.nn.functional as F\nfrom torch.nn.utils import clip_grad_norm\nimport torch.optim as optim\nimport numpy as np\n\nimport sys\nimport os\nimport math\nimport time\nsys.path.insert(0, '.')\n\nfrom subLSTM.functional import SubLSTMCell as SubLSTMCellF\nfrom subLSTM.nn import SubLSTMCell\n\n\ndef test_cell():\n hidden_size = 20\n input_size = 10\n\n for bias in (True, False):\n input = var(T.randn(3, input_size))\n hx = var(T.randn(3, hidden_size))\n cx = var(T.randn(3, hidden_size))\n\n cell = SubLSTMCell(input_size, hidden_size, bias=bias)\n\n for i in range(6):\n (hx, cx) = cell(input, (hx, cx))\n\n hx.sum().backward()\n assert hx.size() == T.Size([3, hidden_size])\n assert cx.size() == T.Size([3, hidden_size])\n" ]
[ [ "torch.Size", "torch.randn" ] ]
zwhitfield/TE_EVEs_assignEVEtaxonomy
[ "3ae30d209fa19a4b8ebc6125ab0e0a2935df6bf4" ]
[ "ClassifyEVEtaxonomy.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 18 11:04:04 2016\n\n@author: zwhitfield\n\"\"\"\nimport sys\nimport pandas as pd\n\noutputdir = str(sys.argv[1])\nfileName = str(sys.argv[2])\n\n#outputdir=\"/home/zwhitfield/Desktop/ForMarkGenomePaper/FrozenData/Aag2_assembly/\"\ntaxonomyCategories = ['superkingdom','order','family','genus','species']\n\ndef LoadData (directory, fileNameWithEVEs, fileNameWithEVEhierarchies, fileNameWithEVEranks):\n if 'closestTEtoEVEs' in fileNameWithEVEs:\n allEVEs = pd.read_csv(directory + fileNameWithEVEs,\n names = [\"ContigEVE\",\"EVEstart\",\"EVEend\",\"EVEdescription\",\"EVEscore\",\"EVEstrand\",\"EVEsomething\",\"ContigTE\",\"TEstart\",\"TEend\",\"TEdescription\",\"TEscore\",\"TEstrand\",\"TEfamily\",\"Distance\"],\n sep = \"\\t\")\n elif 'Aag_Contigs' in fileNameWithEVEs:\n allEVEs = pd.read_csv(directory + fileNameWithEVEs,\n names = [\"ContigEVE\",\"EVEstart\",\"EVEend\",\"EVEdescription\",\"EVEscore\",\"EVEstrand\",\"EVEpi\"],\n sep = \"\\t\")\n # elif 'TEsClosestToEVEs' in fileNameWithEVEs:\n # allEVEs = pd.read_csv(directory + fileNameWithEVEs,\n # sep = \"\\t\")\n\n EVEvirusHierarchies = pd.read_csv(directory + fileNameWithEVEhierarchies,\n names = [\"Rank1\",\"Rank2\",\"Rank3\",\"Rank4\",\"Rank5\",\"Rank6\",\"Rank7\", \"Rank8\", \"Rank9\"],\n sep = \"\\t\")\n \n EVERankHierarchies = pd.read_csv(directory + fileNameWithEVEranks,\n names = [\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\", \"H\", \"I\"],\n sep = \"\\t\")\n \n #This organization is VERY specific to the NCBI virus taxonomy categories\n EVEhierarchiesOrganized = pd.DataFrame(index=range(0,len(EVEvirusHierarchies)), columns=taxonomyCategories)\n \n currentRow = 0\n for index, Rankrow in EVERankHierarchies.iterrows():\n currentCol = 0\n for Rankcol in Rankrow:\n #print EVErow[currentCol]\n if Rankcol in taxonomyCategories:\n EVEhierarchiesOrganized.ix[currentRow][Rankcol] = EVEvirusHierarchies.iloc[currentRow][currentCol]\n currentCol = currentCol + 1\n currentRow = currentRow + 1 \n \n Merged = pd.merge(allEVEs, EVEhierarchiesOrganized, right_index=True, left_index=True)\n \n #Hand annotate some viruses with missing families :(\n #Most of these are based on Li et al. eLife 2015,\"Unprecedented genomic diversity of RNA viruses in arthropods reveals the ancestry of negative-sense RNA viruses\"\n #Some are from Uniprot taxonomy categories\n #Not comprehensive changes; only those viruses I find in the EVE files\n Merged.loc[Merged.species == 'Wuhan Mosquito Virus 8', 'family'] = \"Chuviridae\"\n Merged.loc[Merged.species == 'Wuchang Cockraoch Virus 3', 'family'] = \"Chuviridae\" \n Merged.loc[Merged.species == 'Lishi Spider Virus 1', 'family'] = \"Chuviridae\" \n Merged.loc[Merged.species == 'Shayang Fly Virus 1', 'family'] = \"Chuviridae\" \n Merged.loc[Merged.species == 'Wenzhou Crab Virus 2', 'family'] = \"Chuviridae\" \n \n Merged.loc[Merged.species == 'Bole Tick Virus 2', 'family'] = \"Rhabdoviridae\" \n Merged.loc[Merged.species == 'Shayang Fly Virus 2', 'family'] = \"Rhabdoviridae\" \n Merged.loc[Merged.species == 'Wuhan Ant Virus', 'family'] = \"Rhabdoviridae\" \n Merged.loc[Merged.species == 'Wuhan Fly Virus 2', 'family'] = \"Rhabdoviridae\" \n Merged.loc[Merged.species == 'Wuhan House Fly Virus 1', 'family'] = \"Rhabdoviridae\" \n Merged.loc[Merged.species == 'Wuhan Mosquito Virus 9', 'family'] = \"Rhabdoviridae\"\n Merged.loc[Merged.species == 'Yongjia Tick Virus 2', 'family'] = \"Rhabdoviridae\"\n \n Merged.loc[Merged.species == 'Cilv-C', 'family'] = \"Virgaviridae\"\n Merged.loc[Merged.species == 'Citrus leprosis virus C', 'family'] = \"Virgaviridae\"\n Merged.loc[Merged.species == 'Blueberry necrotic ring blotch virus', 'family'] = \"Virgaviridae\"\n \n Merged.loc[Merged.species == 'Wutai Mosquito Virus', 'family'] = \"Bunyaviridae\"\n\n Merged.to_csv(outputdir + fileNameWithEVEs + \"_withTaxonomy.txt\", sep='\\t', header = True, index = False, quoting = False)\n \n return Merged\n \nLoadData (outputdir,\n fileName,\n \"TaxonomyHierarchyByEVE_\" + fileName + \".txt\",\n \"RankingHierarchyByEVE_\" + fileName + \".txt\"\n )\n" ]
[ [ "pandas.read_csv", "pandas.merge" ] ]
joaramirezra/open-source-contribution
[ "12556ac54e54e1f71687d7f774f2e11873f0effa" ]
[ "PYTHON/python_for_music/play_music.py" ]
[ "# get iris data from sklearn\nimport sklearn\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import accuracy_score\n\n# load iris data\niris = load_iris()\n\n\n# split data\nX_train, X_test, y_train, y_test = train_test_split(\n iris.data, iris.target, random_state=0)\n\n# train model\nknn = KNeighborsClassifier(n_neighbors=1)\nknn.fit(X_train, y_train)\n\n" ]
[ [ "sklearn.model_selection.train_test_split", "sklearn.neighbors.KNeighborsClassifier", "sklearn.datasets.load_iris" ] ]
merfishtools/merfishtools-evaluation
[ "5499abe9d9f1e0e5bf649bdc38001592cbe1a159" ]
[ "scripts/experiment-diffexp.py" ]
[ "import numpy as np\nimport pandas as pd\nimport matplotlib as mpl\nmpl.use(\"agg\")\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nbf_categories = [-np.inf, 0, 2, 6, 10, np.inf]\nbf_labels = [0, 1, 2, 3, 4]\n#bf_labels = [\"no evidence\", \"weak\", \"positive\", \"strong\", \"very strong\"]\n\nests = pd.concat([pd.read_table(f, index_col=0) for f in snakemake.input],\n keys=[\"{} vs {}\".format(*c) for c in snakemake.params.comparisons])\n\nbf = ests[\"diff_2lnbf\"]\nbf = bf.unstack(0)\nbf = bf[(bf >= 2).any(axis=\"columns\")]\nbf = bf.fillna(-np.inf)\nbf = bf.stack()\nbf = pd.cut(bf, bf_categories, labels=bf_labels, right=False, include_lowest=True)\n\nmatrix = bf.unstack(1)\nbf = bf[~(bf.index.str.startswith(\"notarget\") | bf.index.str.startswith(\"blank\"))]\n\nsns.set(style=\"ticks\", palette=\"colorblind\", context=snakemake.wildcards.context)\nplt.figure(figsize=np.asarray(snakemake.config[\"plots\"][\"figsize\"]) * 3)\ncg = sns.heatmap(bf)\n#plt.setp(cg.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)\nplt.savefig(snakemake.output[0], bbox_inches=\"tight\")\n" ]
[ [ "matplotlib.use", "pandas.read_table", "pandas.cut", "numpy.asarray", "matplotlib.pyplot.savefig" ] ]
pdec/PhiSpy_deprekate
[ "f3d686f995d4422cc214d3173f7f84522c746357" ]
[ "scripts/plot_stats.py" ]
[ "#!/usr/bin/python3\n__author__ = 'Przemek Decewicz'\n\nfrom argparse import ArgumentParser, RawDescriptionHelpFormatter\nfrom glob import glob\nfrom os import makedirs, path\nfrom sys import argv\nimport matplotlib.pyplot as plt\nimport matplotlib\nmatplotlib.use('Agg')\nimport numpy as np\nfrom io import TextIOWrapper\nimport math\n\n\ndef plot_stats(infile, outfile):\n\n \"\"\"Reads test/training set and plots all identified stats.\n\n Stats are slightly transformed to retained a visible scale.\n Two types of plots are provided:\n - transformed stats\n\n Parameters\n ----------\n infile: str\n Path to input file.\n outfile: str\n Path to resulting PNG file with plots.\n \"\"\"\n\n # read input file\n with open(infile) as inf:\n colnames = inf.readline().strip().split('\\t')\n data = np.genfromtxt(infile, delimiter=\"\\t\", filling_values=1, dtype=np.float64, skip_header=1)\n\n for i, name in enumerate(colnames):\n if name == 'orf_length_med':\n data[:, i] = data[:, i] / 50\n elif name == 'shannon_slope':\n data[:, i] = data[:, i] * 200\n elif name == 'at_skew':\n data[:, i] = data[:, i] * 2\n elif name == 'gc_skew':\n data[:, i] = data[:, i] * 2\n elif name == 'max_direction':\n data[:, i] = data[:, i] / 3\n elif name == 'phmms':\n data[:, i] = data[:, i] * 2\n elif name == 'status':\n data[:, i] = data[:, i] * 20\n\n # make a plot\n fig, ax = plt.subplots(figsize=(18, 4.5), dpi = 150)\n\n plt.plot(data, '-', linewidth=.8, alpha = 0.9)\n plt.legend(colnames, loc='lower center', bbox_to_anchor=(0.5,-0.17), ncol = len(colnames))\n plt.margins(x=0.01)\n plt.subplots_adjust(left=0.03, right=0.99, top=0.9, bottom=0.15)\n plt.title(path.basename(infile))\n\n plt.savefig(outfile)\n plt.close()\n\n\n\ndef main():\n args = ArgumentParser(prog = 'plot_trainSets_stats.py',\n description = 'Plots PhiSpy\\'s training/test sets statistics.',\n epilog = 'Example usage:\\npython3 scripts/plot_trainSets_stats.py -d PhiSpyModules/data -o PhiSpyModules/data/trainSets_stats ',\n formatter_class = RawDescriptionHelpFormatter)\n\n args.add_argument('-i', '--infile',\n type = str,\n help = 'Path to input GenBank file.')\n\n args.add_argument('-d', '--indir',\n type = str,\n help = 'Path to input directory with multiple GenBank files.')\n\n args.add_argument('-s', '--suffix',\n type = str,\n help = 'Suffix that will be added to input file name.')\n\n args.add_argument('-o', '--outdir',\n type = str,\n help = 'Path to output directory.',\n required = True)\n\n if len(argv[1:]) == 0:\n args.print_help()\n args.exit()\n\n try:\n args = args.parse_args()\n except:\n args.exit()\n\n if not args.infile and not args.indir:\n print('You have to provide input data by either --infile or --indir.')\n exit(1)\n elif args.indir:\n infiles = glob(path.join(args.indir, '*.txt'))\n else:\n infiles = [args.infile]\n\n # Create output directory\n if not path.isdir(args.outdir): makedirs(args.outdir)\n\n # Process all input files\n for infile in infiles:\n plot_file_name = f'{path.basename(infile).rsplit(\".\", 1)[0]}.{args.suffix}.png'\n plot_file = path.join(args.outdir, plot_file_name)\n plot_stats(infile, plot_file)\n print(f'Done with plot: {plot_file}')\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "matplotlib.use", "matplotlib.pyplot.savefig", "numpy.genfromtxt", "matplotlib.pyplot.plot", "matplotlib.pyplot.close", "matplotlib.pyplot.subplots", "matplotlib.pyplot.margins", "matplotlib.pyplot.subplots_adjust" ] ]