python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
from typing import Optional, Any, cast
import gym
import gym_minigrid.minigrid
import numpy as np
import torch
from babyai.utils.format import InstructionsPreprocessor
from gym_minigrid.minigrid import MiniGridEnv
from allenact.base_abstractions.sensor import Sensor, prepare_locals_for_super
from allenact.base_abstractions.task import Task, SubTaskType
# fmt: off
ALL_VOCAB_TOKENS = [
"a", "after", "and", "ball", "behind", "blue", "box",
"door", "front", "go", "green", "grey", "in", "key",
"left", "next", "of", "on", "open", "pick", "purple",
"put", "red", "right", "the", "then", "to", "up", "yellow",
"you", "your",
]
# fmt: on
class EgocentricMiniGridSensor(Sensor[MiniGridEnv, Task[MiniGridEnv]]):
def __init__(
self,
agent_view_size: int,
view_channels: int = 1,
uuid: str = "minigrid_ego_image",
**kwargs: Any
):
self.agent_view_size = agent_view_size
self.view_channels = view_channels
self.num_objects = (
cast(
int, max(map(abs, gym_minigrid.minigrid.OBJECT_TO_IDX.values())) # type: ignore
)
+ 1
)
self.num_colors = (
cast(int, max(map(abs, gym_minigrid.minigrid.COLOR_TO_IDX.values()))) # type: ignore
+ 1
)
self.num_states = (
cast(int, max(map(abs, gym_minigrid.minigrid.STATE_TO_IDX.values()))) # type: ignore
+ 1
)
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _get_observation_space(self) -> gym.Space:
return gym.spaces.Box(
low=0,
high=max(self.num_objects, self.num_colors, self.num_states) - 1,
shape=(self.agent_view_size, self.agent_view_size, self.view_channels),
dtype=int,
)
def get_observation(
self,
env: MiniGridEnv,
task: Optional[SubTaskType],
*args,
minigrid_output_obs: Optional[np.ndarray] = None,
**kwargs: Any
) -> Any:
if minigrid_output_obs is not None and minigrid_output_obs["image"].shape == (
self.agent_view_size,
self.agent_view_size,
):
img = minigrid_output_obs["image"][:, :, : self.view_channels]
else:
env.agent_view_size = self.agent_view_size
img = env.gen_obs()["image"][:, :, : self.view_channels]
assert img.dtype == np.uint8
return img
class MiniGridMissionSensor(Sensor[MiniGridEnv, Task[MiniGridEnv]]):
def __init__(self, instr_len: int, uuid: str = "minigrid_mission", **kwargs: Any):
self.instr_preprocessor = InstructionsPreprocessor(
model_name="TMP_SENSOR", load_vocab_from=None
)
# We initialize the vocabulary with a fixed collection of tokens
# and then ensure that the size cannot exceed this number. This
# guarantees that sensors on all processes will produce the same
# values.
for token in ALL_VOCAB_TOKENS:
_ = self.instr_preprocessor.vocab[token]
self.instr_preprocessor.vocab.max_size = len(ALL_VOCAB_TOKENS)
self.instr_len = instr_len
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _get_observation_space(self) -> gym.Space:
return gym.spaces.Box(
low=0,
high=self.instr_preprocessor.vocab.max_size,
shape=(self.instr_len,),
dtype=int,
)
def get_observation(
self,
env: MiniGridEnv,
task: Optional[SubTaskType],
*args,
minigrid_output_obs: Optional[np.ndarray] = None,
**kwargs: Any
) -> Any:
if minigrid_output_obs is None:
minigrid_output_obs = env.gen_obs()
out = self.instr_preprocessor([minigrid_output_obs]).view(-1)
n: int = out.shape[0]
if n > self.instr_len:
out = out[: self.instr_len]
elif n < self.instr_len:
out = torch.nn.functional.pad(
input=out, pad=[0, self.instr_len - n], value=0,
)
return out.long().numpy()
|
ask4help-main
|
allenact_plugins/minigrid_plugin/minigrid_sensors.py
|
import abc
from typing import Callable, Dict, Optional, Tuple, cast
import gym
import numpy as np
import torch
from gym.spaces.dict import Dict as SpaceDict
import torch.nn as nn
from allenact.algorithms.onpolicy_sync.policy import (
ActorCriticModel,
Memory,
DistributionType,
ActorCriticOutput,
ObservationType,
)
from allenact.base_abstractions.distributions import Distr, CategoricalDistr
from allenact.embodiedai.models.basic_models import LinearActorCritic, RNNActorCritic
from allenact.utils.misc_utils import prepare_locals_for_super
class MiniGridSimpleConvBase(ActorCriticModel[Distr], abc.ABC):
actor_critic: ActorCriticModel
def __init__(
self,
action_space: gym.spaces.Discrete,
observation_space: SpaceDict,
num_objects: int,
num_colors: int,
num_states: int,
object_embedding_dim: int = 8,
**kwargs,
):
super().__init__(action_space=action_space, observation_space=observation_space)
self.num_objects = num_objects
self.object_embedding_dim = object_embedding_dim
vis_input_shape = observation_space["minigrid_ego_image"].shape
agent_view_x, agent_view_y, view_channels = vis_input_shape
assert agent_view_x == agent_view_y
self.agent_view = agent_view_x
self.view_channels = view_channels
assert (np.array(vis_input_shape[:2]) >= 3).all(), (
"MiniGridSimpleConvRNN requires" "that the input size be at least 3x3."
)
self.num_channels = 0
if self.num_objects > 0:
# Object embedding
self.object_embedding = nn.Embedding(
num_embeddings=num_objects, embedding_dim=self.object_embedding_dim
)
self.object_channel = self.num_channels
self.num_channels += 1
self.num_colors = num_colors
if self.num_colors > 0:
# Same dimensionality used for colors and states
self.color_embedding = nn.Embedding(
num_embeddings=num_colors, embedding_dim=self.object_embedding_dim
)
self.color_channel = self.num_channels
self.num_channels += 1
self.num_states = num_states
if self.num_states > 0:
self.state_embedding = nn.Embedding(
num_embeddings=num_states, embedding_dim=self.object_embedding_dim
)
self.state_channel = self.num_channels
self.num_channels += 1
assert self.num_channels == self.view_channels > 0
self.ac_key = "enc"
self.observations_for_ac: Dict[str, Optional[torch.Tensor]] = {
self.ac_key: None
}
self.num_agents = 1
def forward( # type:ignore
self,
observations: ObservationType,
memory: Memory,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]:
minigrid_ego_image = cast(torch.Tensor, observations["minigrid_ego_image"])
use_agent = minigrid_ego_image.shape == 6
nrow, ncol, nchannels = minigrid_ego_image.shape[-3:]
nsteps, nsamplers, nagents = masks.shape[:3]
assert nrow == ncol == self.agent_view
assert nchannels == self.view_channels == self.num_channels
embed_list = []
if self.num_objects > 0:
ego_object_embeds = self.object_embedding(
minigrid_ego_image[..., self.object_channel].long()
)
embed_list.append(ego_object_embeds)
if self.num_colors > 0:
ego_color_embeds = self.color_embedding(
minigrid_ego_image[..., self.color_channel].long()
)
embed_list.append(ego_color_embeds)
if self.num_states > 0:
ego_state_embeds = self.state_embedding(
minigrid_ego_image[..., self.state_channel].long()
)
embed_list.append(ego_state_embeds)
ego_embeds = torch.cat(embed_list, dim=-1)
if use_agent:
self.observations_for_ac[self.ac_key] = ego_embeds.view(
nsteps, nsamplers, nagents, -1
)
else:
self.observations_for_ac[self.ac_key] = ego_embeds.view(
nsteps, nsamplers * nagents, -1
)
# noinspection PyCallingNonCallable
out, mem_return = self.actor_critic(
observations=self.observations_for_ac,
memory=memory,
prev_actions=prev_actions,
masks=masks,
)
self.observations_for_ac[self.ac_key] = None
return out, mem_return
class MiniGridSimpleConvRNN(MiniGridSimpleConvBase):
def __init__(
self,
action_space: gym.spaces.Discrete,
observation_space: SpaceDict,
num_objects: int,
num_colors: int,
num_states: int,
object_embedding_dim: int = 8,
hidden_size=512,
num_layers=1,
rnn_type="GRU",
head_type: Callable[
..., ActorCriticModel[CategoricalDistr]
] = LinearActorCritic,
**kwargs,
):
super().__init__(**prepare_locals_for_super(locals()))
self._hidden_size = hidden_size
agent_view_x, agent_view_y, view_channels = observation_space[
"minigrid_ego_image"
].shape
self.actor_critic = RNNActorCritic(
input_uuid=self.ac_key,
action_space=action_space,
observation_space=SpaceDict(
{
self.ac_key: gym.spaces.Box(
low=np.float32(-1.0),
high=np.float32(1.0),
shape=(
self.object_embedding_dim
* agent_view_x
* agent_view_y
* view_channels,
),
)
}
),
hidden_size=hidden_size,
num_layers=num_layers,
rnn_type=rnn_type,
head_type=head_type,
)
self.memory_key = "rnn"
self.train()
@property
def num_recurrent_layers(self):
return self.actor_critic.num_recurrent_layers
@property
def recurrent_hidden_state_size(self):
return self._hidden_size
def _recurrent_memory_specification(self):
return {
self.memory_key: (
(
("layer", self.num_recurrent_layers),
("sampler", None),
("hidden", self.recurrent_hidden_state_size),
),
torch.float32,
)
}
class MiniGridSimpleConv(MiniGridSimpleConvBase):
def __init__(
self,
action_space: gym.spaces.Discrete,
observation_space: SpaceDict,
num_objects: int,
num_colors: int,
num_states: int,
object_embedding_dim: int = 8,
**kwargs,
):
super().__init__(**prepare_locals_for_super(locals()))
agent_view_x, agent_view_y, view_channels = observation_space[
"minigrid_ego_image"
].shape
self.actor_critic = LinearActorCritic(
self.ac_key,
action_space=action_space,
observation_space=SpaceDict(
{
self.ac_key: gym.spaces.Box(
low=np.float32(-1.0),
high=np.float32(1.0),
shape=(
self.object_embedding_dim
* agent_view_x
* agent_view_y
* view_channels,
),
)
}
),
)
self.memory_key = None
self.train()
@property
def num_recurrent_layers(self):
return 0
@property
def recurrent_hidden_state_size(self):
return 0
# noinspection PyMethodMayBeStatic
def _recurrent_memory_specification(self):
return None
|
ask4help-main
|
allenact_plugins/minigrid_plugin/minigrid_models.py
|
ask4help-main
|
allenact_plugins/minigrid_plugin/configs/__init__.py
|
|
"""Experiment Config for MiniGrid tutorial."""
import gym
import torch.nn as nn
from allenact.base_abstractions.sensor import SensorSuite
from allenact_plugins.minigrid_plugin.minigrid_models import MiniGridSimpleConv
from allenact_plugins.minigrid_plugin.minigrid_tasks import MiniGridTask
from projects.tutorials.minigrid_tutorial import MiniGridTutorialExperimentConfig
class MiniGridNoMemoryExperimentConfig(MiniGridTutorialExperimentConfig):
@classmethod
def tag(cls) -> str:
return "MiniGridNoMemory"
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return MiniGridSimpleConv(
action_space=gym.spaces.Discrete(len(MiniGridTask.class_action_names())),
observation_space=SensorSuite(cls.SENSORS).observation_spaces,
num_objects=cls.SENSORS[0].num_objects,
num_colors=cls.SENSORS[0].num_colors,
num_states=cls.SENSORS[0].num_states,
)
|
ask4help-main
|
allenact_plugins/minigrid_plugin/configs/minigrid_nomemory.py
|
ask4help-main
|
allenact_plugins/minigrid_plugin/scripts/__init__.py
|
|
ask4help-main
|
allenact_plugins/minigrid_plugin/data/__init__.py
|
|
"""Utility functions and classes for visualization and logging."""
import os
from datetime import datetime
import cv2
import imageio
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib
from allenact_plugins.manipulathor_plugin.manipulathor_utils import initialize_arm
from allenact_plugins.manipulathor_plugin.manipulathor_utils import (
reset_environment_and_additional_commands,
transport_wrapper,
)
class LoggerVisualizer:
def __init__(self, exp_name="", log_dir=""):
if log_dir == "":
log_dir = self.__class__.__name__
if exp_name == "":
exp_name = "NoNameExp"
self.exp_name = exp_name
log_dir = os.path.join(exp_name, log_dir,)
self.log_dir = log_dir
os.makedirs(self.log_dir, exist_ok=True)
self.log_queue = []
self.action_queue = []
self.logger_index = 0
def log(self, environment, action_str):
raise Exception("Not Implemented")
def is_empty(self):
return len(self.log_queue) == 0
def finish_episode_metrics(self, episode_info, task_info, metric_results):
pass
def finish_episode(self, environment, episode_info, task_info):
pass
class TestMetricLogger(LoggerVisualizer):
def __init__(self, exp_name="", log_dir="", **kwargs):
super().__init__(exp_name=exp_name, log_dir=log_dir)
self.total_metric_dict = {}
log_file_name = os.path.join(self.log_dir, "test_metric.txt")
self.metric_log_file = open(log_file_name, "w")
self.disturbance_distance_queue = []
def average_dict(self):
result = {}
for (k, v) in self.total_metric_dict.items():
result[k] = sum(v) / len(v)
return result
def finish_episode_metrics(self, episode_info, task_info, metric_results=None):
if metric_results is None:
print("had to reset")
self.action_queue = []
self.disturbance_distance_queue = []
return
for k in metric_results.keys():
if "metric" in k or k in ["ep_length", "reward", "success"]:
self.total_metric_dict.setdefault(k, [])
self.total_metric_dict[k].append(metric_results[k])
print(
"total",
len(self.total_metric_dict["success"]),
"average test metric",
self.average_dict(),
)
# save the task info and all the action queue and results
log_dict = {
"logger_number": self.logger_index,
"action_sequence": self.action_queue,
"disturbance_sequence": self.disturbance_distance_queue,
"task_info_metrics": metric_results,
}
self.logger_index += 1
self.metric_log_file.write(str(log_dict))
self.metric_log_file.write("\n")
self.metric_log_file.flush()
print("Logging to", self.metric_log_file.name)
self.action_queue = []
self.disturbance_distance_queue = []
def log(self, environment, action_str="", disturbance_str=""):
# We can add agent arm and state location if needed
self.action_queue.append(action_str)
self.disturbance_distance_queue.append(disturbance_str)
class BringObjImageVisualizer(LoggerVisualizer):
def finish_episode(self, environment, episode_info, task_info):
now = datetime.now()
time_to_write = now.strftime("%Y_%m_%d_%H_%M_%S_%f")
time_to_write += "log_ind_{}".format(self.logger_index)
self.logger_index += 1
print("Loggigng", time_to_write, "len", len(self.log_queue))
source_object_id = task_info["source_object_id"]
goal_object_id = task_info["goal_object_id"]
pickup_success = episode_info.object_picked_up
episode_success = episode_info._success
# Put back if you want the images
# for i, img in enumerate(self.log_queue):
# image_dir = os.path.join(self.log_dir, time_to_write + '_seq{}.png'.format(str(i)))
# cv2.imwrite(image_dir, img[:,:,[2,1,0]])
episode_success_offset = "succ" if episode_success else "fail"
pickup_success_offset = "succ" if pickup_success else "fail"
gif_name = (
time_to_write
+ "_from_"
+ source_object_id.split("|")[0]
+ "_to_"
+ goal_object_id.split("|")[0]
+ "_pickup_"
+ pickup_success_offset
+ "_episode_"
+ episode_success_offset
+ ".gif"
)
concat_all_images = np.expand_dims(np.stack(self.log_queue, axis=0), axis=1)
save_image_list_to_gif(concat_all_images, gif_name, self.log_dir)
this_controller = environment.controller
scene = this_controller.last_event.metadata["sceneName"]
reset_environment_and_additional_commands(this_controller, scene)
self.log_start_goal(
environment,
task_info["visualization_source"],
tag="start",
img_adr=os.path.join(self.log_dir, time_to_write),
)
self.log_start_goal(
environment,
task_info["visualization_target"],
tag="goal",
img_adr=os.path.join(self.log_dir, time_to_write),
)
self.log_queue = []
self.action_queue = []
def log(self, environment, action_str):
image_tensor = environment.current_frame
self.action_queue.append(action_str)
self.log_queue.append(image_tensor)
def log_start_goal(self, env, task_info, tag, img_adr):
object_location = task_info["object_location"]
object_id = task_info["object_id"]
agent_state = task_info["agent_pose"]
this_controller = env.controller
# We should not reset here
# for start arm from high up as a cheating, this block is very important. never remove
event1, event2, event3 = initialize_arm(this_controller)
if not (
event1.metadata["lastActionSuccess"]
and event2.metadata["lastActionSuccess"]
and event3.metadata["lastActionSuccess"]
):
print("ERROR: ARM MOVEMENT FAILED in logging! SHOULD NEVER HAPPEN")
event = transport_wrapper(this_controller, object_id, object_location)
if event.metadata["lastActionSuccess"] == False:
print("ERROR: oh no could not transport in logging")
event = this_controller.step(
dict(
action="TeleportFull",
standing=True,
x=agent_state["position"]["x"],
y=agent_state["position"]["y"],
z=agent_state["position"]["z"],
rotation=dict(
x=agent_state["rotation"]["x"],
y=agent_state["rotation"]["y"],
z=agent_state["rotation"]["z"],
),
horizon=agent_state["cameraHorizon"],
)
)
if event.metadata["lastActionSuccess"] == False:
print("ERROR: oh no could not teleport in logging")
image_tensor = this_controller.last_event.frame
image_dir = (
img_adr + "_obj_" + object_id.split("|")[0] + "_pickup_" + tag + ".png"
)
cv2.imwrite(image_dir, image_tensor[:, :, [2, 1, 0]])
# Saving the mask
target_object_id = task_info["object_id"]
all_visible_masks = this_controller.last_event.instance_masks
if target_object_id in all_visible_masks:
mask_frame = all_visible_masks[target_object_id]
else:
mask_frame = np.zeros(env.controller.last_event.frame[:, :, 0].shape)
mask_dir = (
img_adr + "_obj_" + object_id.split("|")[0] + "_pickup_" + tag + "_mask.png"
)
cv2.imwrite(mask_dir, mask_frame.astype(float) * 255.0)
class ImageVisualizer(LoggerVisualizer):
def __init__(
self,
exp_name="",
log_dir="",
add_top_down_view: bool = False,
add_depth_map: bool = False,
):
super().__init__(exp_name=exp_name, log_dir=log_dir)
self.add_top_down_view = add_top_down_view
self.add_depth_map = add_depth_map
if self.add_top_down_view:
self.top_down_queue = []
self.disturbance_distance_queue = []
def finish_episode(self, environment, episode_info, task_info):
time_to_write = "log_ind_{:03d}".format(self.logger_index)
self.logger_index += 1
print("Logging", time_to_write, "len", len(self.log_queue))
object_id = task_info["objectId"]
scene_name = task_info["source_location"]["scene_name"]
source_countertop = task_info["source_location"]["countertop_id"]
target_countertop = task_info["target_location"]["countertop_id"]
pickup_success = episode_info.object_picked_up
episode_success = episode_info._success
# Put back if you want the images
# for i, img in enumerate(self.log_queue):
# image_dir = os.path.join(self.log_dir, time_to_write + '_seq{}.png'.format(str(i)))
# cv2.imwrite(image_dir, img[:,:,[2,1,0]])
episode_success_offset = "succ" if episode_success else "fail"
pickup_success_offset = "succ" if pickup_success else "fail"
gif_name = (
time_to_write
+ "_pickup_"
+ pickup_success_offset
+ "_episode_"
+ episode_success_offset
+ "_"
+ scene_name.split("_")[0]
+ "_obj_"
+ object_id.split("|")[0]
+ "_from_"
+ source_countertop.split("|")[0]
+ "_to_"
+ target_countertop.split("|")[0]
+ ".gif"
)
self.log_queue = put_annotation_on_image(
self.log_queue, self.disturbance_distance_queue
)
concat_all_images = np.expand_dims(np.stack(self.log_queue, axis=0), axis=1)
if self.add_top_down_view:
topdown_all_images = np.expand_dims(
np.stack(self.top_down_queue, axis=0), axis=1
) # (T, 1, H, W, 3)
concat_all_images = np.concatenate(
[concat_all_images, topdown_all_images], axis=1
) # (T, 2, H, W, 3)
save_image_list_to_gif(concat_all_images, gif_name, self.log_dir)
self.log_start_goal(
environment,
task_info["visualization_source"],
tag="start",
img_adr=os.path.join(self.log_dir, time_to_write),
)
self.log_start_goal(
environment,
task_info["visualization_target"],
tag="goal",
img_adr=os.path.join(self.log_dir, time_to_write),
)
self.log_queue = []
self.action_queue = []
self.disturbance_distance_queue = []
if self.add_top_down_view:
self.top_down_queue = []
def log(self, environment, action_str="", disturbance_str=""):
self.action_queue.append(action_str)
self.disturbance_distance_queue.append(disturbance_str)
image_tensor = environment.current_frame
self.log_queue.append(image_tensor)
if self.add_top_down_view:
# Reference: https://github.com/allenai/ai2thor/pull/814
event = environment.controller.step(action="GetMapViewCameraProperties")
event = environment.controller.step(
action="AddThirdPartyCamera", **event.metadata["actionReturn"]
)
self.top_down_queue.append(event.third_party_camera_frames[0])
def log_start_goal(self, env, task_info, tag, img_adr):
object_location = task_info["object_location"]
object_id = task_info["object_id"]
agent_state = task_info["agent_pose"]
this_controller = env.controller
scene = this_controller.last_event.metadata[
"sceneName"
] # maybe we need to reset env actually]
reset_environment_and_additional_commands(this_controller, scene)
# for start arm from high up as a cheating, this block is very important. never remove
event1, event2, event3 = initialize_arm(this_controller)
if not (
event1.metadata["lastActionSuccess"]
and event2.metadata["lastActionSuccess"]
and event3.metadata["lastActionSuccess"]
):
print("ERROR: ARM MOVEMENT FAILED in logging! SHOULD NEVER HAPPEN")
event = transport_wrapper(this_controller, object_id, object_location)
if event.metadata["lastActionSuccess"] == False:
print("ERROR: oh no could not transport in logging")
event = this_controller.step(
dict(
action="TeleportFull",
standing=True,
x=agent_state["position"]["x"],
y=agent_state["position"]["y"],
z=agent_state["position"]["z"],
rotation=dict(
x=agent_state["rotation"]["x"],
y=agent_state["rotation"]["y"],
z=agent_state["rotation"]["z"],
),
horizon=agent_state["cameraHorizon"],
)
)
if event.metadata["lastActionSuccess"] == False:
print("ERROR: oh no could not teleport in logging")
image_tensor = this_controller.last_event.frame
image_dir = img_adr + "_" + tag + ".png"
cv2.imwrite(image_dir, image_tensor[:, :, [2, 1, 0]])
if self.add_depth_map:
depth = this_controller.last_event.depth_frame.copy() # (H, W)
depth[depth > 5.0] = 5.0
norm = matplotlib.colors.Normalize(vmin=depth.min(), vmax=depth.max())
rgb = cm.get_cmap(plt.get_cmap("viridis"))(norm(depth))[:, :, :3] # [0,1]
rgb = (rgb * 255).astype(np.uint8)
depth_dir = img_adr + "_" + tag + "_depth.png"
cv2.imwrite(depth_dir, rgb[:, :, [2, 1, 0]])
def save_image_list_to_gif(image_list, gif_name, gif_dir):
gif_adr = os.path.join(gif_dir, gif_name)
seq_len, cols, w, h, c = image_list.shape
pallet = np.zeros(
(seq_len, w, h * cols, c)
) # to support multiple animations in one gif
for col_ind in range(cols):
pallet[:, :, col_ind * h : (col_ind + 1) * h, :] = image_list[:, col_ind]
if not os.path.exists(gif_dir):
os.makedirs(gif_dir)
imageio.mimsave(gif_adr, pallet.astype(np.uint8), format="GIF", duration=1 / 5)
print("Saved result in ", gif_adr)
def put_annotation_on_image(images, annotations):
all_images = []
for img, annot in zip(images, annotations):
position = (10, 10)
from PIL import Image, ImageFont, ImageDraw
pil_img = Image.fromarray(img)
draw = ImageDraw.Draw(pil_img)
draw.text(position, annot, (0, 0, 0))
all_images.append(np.array(pil_img))
return all_images
|
ask4help-main
|
allenact_plugins/manipulathor_plugin/manipulathor_viz.py
|
"""Task Definions for the task of ArmPointNav."""
from typing import Dict, Tuple, List, Any, Optional
import copy
import gym
import numpy as np
from allenact.base_abstractions.misc import RLStepResult
from allenact.base_abstractions.sensor import Sensor
from allenact.base_abstractions.task import Task
from allenact_plugins.manipulathor_plugin.armpointnav_constants import (
MOVE_ARM_CONSTANT,
DISTANCE_EPS,
)
from allenact_plugins.manipulathor_plugin.manipulathor_constants import (
MOVE_ARM_HEIGHT_P,
MOVE_ARM_HEIGHT_M,
MOVE_ARM_X_P,
MOVE_ARM_X_M,
MOVE_ARM_Y_P,
MOVE_ARM_Y_M,
MOVE_ARM_Z_P,
MOVE_ARM_Z_M,
ROTATE_WRIST_PITCH_P,
ROTATE_WRIST_PITCH_M,
ROTATE_WRIST_YAW_P,
ROTATE_WRIST_YAW_M,
ROTATE_ELBOW_P,
ROTATE_ELBOW_M,
LOOK_UP,
LOOK_DOWN,
MOVE_AHEAD,
ROTATE_RIGHT,
ROTATE_LEFT,
PICKUP,
DONE,
)
from allenact_plugins.manipulathor_plugin.manipulathor_environment import (
ManipulaTHOREnvironment,
position_distance,
)
from allenact_plugins.manipulathor_plugin.manipulathor_viz import LoggerVisualizer
class AbstractPickUpDropOffTask(Task[ManipulaTHOREnvironment]):
_actions = (
MOVE_ARM_HEIGHT_P,
MOVE_ARM_HEIGHT_M,
MOVE_ARM_X_P,
MOVE_ARM_X_M,
MOVE_ARM_Y_P,
MOVE_ARM_Y_M,
MOVE_ARM_Z_P,
MOVE_ARM_Z_M,
MOVE_AHEAD,
ROTATE_RIGHT,
ROTATE_LEFT,
)
# New commit of AI2THOR has some issue that the objects will vibrate a bit
# without any external force. To eliminate the vibration effect, we have to
# introduce _vibration_dist_dict when checking the disturbance, from an external csv file.
# By default it is None, i.e. we assume there is no vibration.
_vibration_dist_dict: Optional[Dict] = None
def __init__(
self,
env: ManipulaTHOREnvironment,
sensors: List[Sensor],
task_info: Dict[str, Any],
max_steps: int,
visualizers: List[LoggerVisualizer] = [],
**kwargs
) -> None:
"""Initializer.
See class documentation for parameter definitions.
"""
super().__init__(
env=env, sensors=sensors, task_info=task_info, max_steps=max_steps, **kwargs
)
self._took_end_action: bool = False
self._success: Optional[bool] = False
self._subsampled_locations_from_which_obj_visible: Optional[
List[Tuple[float, float, int, int]]
] = None
self.visualizers = visualizers
self.start_visualize()
self.action_sequence_and_success = []
self._took_end_action: bool = False
self._success: Optional[bool] = False
self._subsampled_locations_from_which_obj_visible: Optional[
List[Tuple[float, float, int, int]]
] = None
# in allenact initialization is with 0.2
self.last_obj_to_goal_distance = None
self.last_arm_to_obj_distance = None
self.object_picked_up = False
self.got_reward_for_pickup = False
self.reward_configs = kwargs["reward_configs"]
self.initial_object_locations = self.env.get_current_object_locations()
@property
def action_space(self):
return gym.spaces.Discrete(len(self._actions))
def reached_terminal_state(self) -> bool:
return self._took_end_action
@classmethod
def class_action_names(cls, **kwargs) -> Tuple[str, ...]:
return cls._actions
def close(self) -> None:
self.env.stop()
def obj_state_aproximity(self, s1, s2):
# KIANA ignore rotation for now
position1 = s1["position"]
position2 = s2["position"]
eps = MOVE_ARM_CONSTANT * 2
return (
abs(position1["x"] - position2["x"]) < eps
and abs(position1["y"] - position2["y"]) < eps
and abs(position1["z"] - position2["z"]) < eps
)
def start_visualize(self):
for visualizer in self.visualizers:
if not visualizer.is_empty():
print("OH NO VISUALIZER WAS NOT EMPTY")
visualizer.finish_episode(self.env, self, self.task_info)
visualizer.finish_episode_metrics(self, self.task_info, None)
visualizer.log(self.env)
def visualize(self, action_str):
for vizualizer in self.visualizers:
vizualizer.log(self.env, action_str)
def finish_visualizer(self):
for visualizer in self.visualizers:
visualizer.finish_episode(self.env, self, self.task_info)
def finish_visualizer_metrics(self, metric_results):
for visualizer in self.visualizers:
visualizer.finish_episode_metrics(self, self.task_info, metric_results)
def render(self, mode: str = "rgb", *args, **kwargs) -> np.ndarray:
assert mode == "rgb", "only rgb rendering is implemented"
return self.env.current_frame
def calc_action_stat_metrics(self) -> Dict[str, Any]:
action_stat = {"action_stat/" + action_str: 0.0 for action_str in self._actions}
action_success_stat = {
"action_success/" + action_str: 0.0 for action_str in self._actions
}
action_success_stat["action_success/total"] = 0.0
seq_len = len(self.action_sequence_and_success)
for (action_name, action_success) in self.action_sequence_and_success:
action_stat["action_stat/" + action_name] += 1.0
action_success_stat[
"action_success/{}".format(action_name)
] += action_success
action_success_stat["action_success/total"] += action_success
action_success_stat["action_success/total"] /= seq_len
for action_name in self._actions:
action_success_stat["action_success/{}".format(action_name)] /= max(
action_stat["action_stat/" + action_name], 1.0
)
action_stat["action_stat/" + action_name] /= seq_len
result = {**action_stat, **action_success_stat}
return result
def metrics(self) -> Dict[str, Any]:
result = super(AbstractPickUpDropOffTask, self).metrics()
if self.is_done():
result = {**result, **self.calc_action_stat_metrics()}
# 1. goal object metrics
final_obj_distance_from_goal = self.obj_distance_from_goal()
result[
"average/final_obj_distance_from_goal"
] = final_obj_distance_from_goal
final_arm_distance_from_obj = self.arm_distance_from_obj()
result["average/final_arm_distance_from_obj"] = final_arm_distance_from_obj
final_obj_pickup = 1 if self.object_picked_up else 0
result["average/final_obj_pickup"] = final_obj_pickup
original_distance = self.get_original_object_distance() + DISTANCE_EPS
result["average/original_distance"] = original_distance
# this ratio can be more than 1
if self.object_picked_up:
ratio_distance_left = final_obj_distance_from_goal / original_distance
result["average/ratio_distance_left"] = ratio_distance_left
result["average/eplen_pickup"] = self.eplen_pickup
# 2. disturbance with other objects
current_object_locations = self.env.get_current_object_locations()
objects_moved = self.env.get_objects_moved(
self.initial_object_locations,
current_object_locations,
self.task_info["objectId"],
self._vibration_dist_dict,
)
result["disturbance/objects_moved_num"] = len(objects_moved)
# 3. conditioned on success
if self._success:
result["average/eplen_success"] = result["ep_length"]
result["average/success_wo_disturb"] = len(objects_moved) == 0
else:
result["average/success_wo_disturb"] = 0.0
result["success"] = self._success
self.finish_visualizer_metrics(result)
self.finish_visualizer()
self.action_sequence_and_success = []
return result
def _step(self, action: int) -> RLStepResult:
raise Exception("Not implemented")
def arm_distance_from_obj(self):
goal_obj_id = self.task_info["objectId"]
object_info = self.env.get_object_by_id(goal_obj_id)
hand_state = self.env.get_absolute_hand_state()
return position_distance(object_info, hand_state)
def obj_distance_from_goal(self):
goal_obj_id = self.task_info["objectId"]
object_info = self.env.get_object_by_id(goal_obj_id)
goal_state = self.task_info["target_location"]
return position_distance(object_info, goal_state)
def get_original_object_distance(self):
goal_obj_id = self.task_info["objectId"]
s_init = dict(position=self.task_info["source_location"]["object_location"])
current_location = self.env.get_object_by_id(goal_obj_id)
original_object_distance = position_distance(s_init, current_location)
return original_object_distance
def judge(self) -> float:
"""Compute the reward after having taken a step."""
raise Exception("Not implemented")
class ArmPointNavTask(AbstractPickUpDropOffTask):
_actions = (
MOVE_ARM_HEIGHT_P,
MOVE_ARM_HEIGHT_M,
MOVE_ARM_X_P,
MOVE_ARM_X_M,
MOVE_ARM_Y_P,
MOVE_ARM_Y_M,
MOVE_ARM_Z_P,
MOVE_ARM_Z_M,
MOVE_AHEAD,
ROTATE_RIGHT,
ROTATE_LEFT,
PICKUP,
DONE,
)
def __init__(
self,
env: ManipulaTHOREnvironment,
sensors: List[Sensor],
task_info: Dict[str, Any],
max_steps: int,
visualizers: List[LoggerVisualizer] = [],
**kwargs
) -> None:
super().__init__(
env=env,
sensors=sensors,
task_info=task_info,
max_steps=max_steps,
visualizers=visualizers,
**kwargs
)
self.cumulated_disturb_distance_all = 0.0
self.cumulated_disturb_distance_visible = 0.0
# NOTE: visible distance can be negative, no determinitic relation with
# all distance
self.previous_object_locations = copy.deepcopy(self.initial_object_locations)
self.current_penalized_distance = 0.0 # used in Sensor for auxiliary task
def metrics(self) -> Dict[str, Any]:
result = super(ArmPointNavTask, self).metrics()
if self.is_done():
# add disturbance distance metrics
result[
"disturbance/objects_moved_distance"
] = self.cumulated_disturb_distance_all
result[
"disturbance/objects_moved_distance_vis"
] = self.cumulated_disturb_distance_visible
return result
def visualize(self, **kwargs):
for vizualizer in self.visualizers:
vizualizer.log(self.env, **kwargs)
def _step(self, action: int) -> RLStepResult:
action_str = self.class_action_names()[action]
self._last_action_str = action_str
action_dict = {"action": action_str}
object_id = self.task_info["objectId"]
if action_str == PICKUP:
action_dict = {**action_dict, "object_id": object_id}
self.env.step(action_dict)
self.last_action_success = self.env.last_action_success
last_action_name = self._last_action_str
last_action_success = float(self.last_action_success)
self.action_sequence_and_success.append((last_action_name, last_action_success))
# If the object has not been picked up yet and it was picked up in the previous step update parameters to integrate it into reward
if not self.object_picked_up:
if self.env.is_object_at_low_level_hand(object_id):
self.object_picked_up = True
self.eplen_pickup = (
self._num_steps_taken + 1
) # plus one because this step has not been counted yet
if action_str == DONE:
self._took_end_action = True
object_state = self.env.get_object_by_id(object_id)
goal_state = self.task_info["target_location"]
goal_achieved = self.object_picked_up and self.obj_state_aproximity(
object_state, goal_state
)
self.last_action_success = goal_achieved
self._success = goal_achieved
step_result = RLStepResult(
observation=self.get_observations(),
reward=self.judge(),
done=self.is_done(),
info={"last_action_success": self.last_action_success},
)
return step_result
def judge(self) -> float:
"""Compute the reward after having taken a step."""
reward = self.reward_configs["step_penalty"]
if not self.last_action_success or (
self._last_action_str == PICKUP and not self.object_picked_up
):
reward += self.reward_configs["failed_action_penalty"]
if self._took_end_action:
reward += (
self.reward_configs["goal_success_reward"]
if self._success
else self.reward_configs["failed_stop_reward"]
)
# increase reward if object pickup and only do it once
if not self.got_reward_for_pickup and self.object_picked_up:
reward += self.reward_configs["pickup_success_reward"]
self.got_reward_for_pickup = True
current_obj_to_arm_distance = self.arm_distance_from_obj()
if self.last_arm_to_obj_distance is None:
delta_arm_to_obj_distance_reward = 0
else:
delta_arm_to_obj_distance_reward = (
self.last_arm_to_obj_distance - current_obj_to_arm_distance
)
self.last_arm_to_obj_distance = current_obj_to_arm_distance
reward += delta_arm_to_obj_distance_reward
current_obj_to_goal_distance = self.obj_distance_from_goal()
if self.last_obj_to_goal_distance is None:
delta_obj_to_goal_distance_reward = 0
else:
delta_obj_to_goal_distance_reward = (
self.last_obj_to_goal_distance - current_obj_to_goal_distance
)
self.last_obj_to_goal_distance = current_obj_to_goal_distance
reward += delta_obj_to_goal_distance_reward
# add disturbance cost
## here we measure disturbance by the sum of moving distance of all objects
## note that collided object may move for a while wo external force due to inertia
## and we may also consider mass
current_object_locations = self.env.get_current_object_locations()
disturb_distance_visible = self.env.get_objects_move_distance(
initial_object_locations=self.initial_object_locations,
previous_object_locations=self.previous_object_locations,
current_object_locations=current_object_locations,
target_object_id=self.task_info["objectId"],
only_visible=True,
thres_dict=self._vibration_dist_dict,
)
disturb_distance_all = self.env.get_objects_move_distance(
initial_object_locations=self.initial_object_locations,
previous_object_locations=self.previous_object_locations,
current_object_locations=current_object_locations,
target_object_id=self.task_info["objectId"],
only_visible=False,
thres_dict=self._vibration_dist_dict,
)
self.cumulated_disturb_distance_all += disturb_distance_all
self.cumulated_disturb_distance_visible += disturb_distance_visible
penalized_distance = (
disturb_distance_visible
if self.reward_configs["disturb_visible"]
else disturb_distance_all
)
reward += self.reward_configs["disturb_penalty"] * penalized_distance
self.current_penalized_distance = penalized_distance
self.previous_object_locations = current_object_locations
self.visualize(
action_str=self._last_action_str,
disturbance_str=str(round(penalized_distance, 4)),
)
return float(reward)
class RotateArmPointNavTask(ArmPointNavTask):
_actions = (
MOVE_ARM_HEIGHT_P,
MOVE_ARM_HEIGHT_M,
MOVE_ARM_X_P,
MOVE_ARM_X_M,
MOVE_ARM_Y_P,
MOVE_ARM_Y_M,
MOVE_ARM_Z_P,
MOVE_ARM_Z_M,
ROTATE_WRIST_PITCH_P,
ROTATE_WRIST_PITCH_M,
ROTATE_WRIST_YAW_P,
ROTATE_WRIST_YAW_M,
ROTATE_ELBOW_P,
ROTATE_ELBOW_M,
MOVE_AHEAD,
ROTATE_RIGHT,
ROTATE_LEFT,
PICKUP,
DONE,
)
class CamRotateArmPointNavTask(ArmPointNavTask):
_actions = (
MOVE_ARM_HEIGHT_P,
MOVE_ARM_HEIGHT_M,
MOVE_ARM_X_P,
MOVE_ARM_X_M,
MOVE_ARM_Y_P,
MOVE_ARM_Y_M,
MOVE_ARM_Z_P,
MOVE_ARM_Z_M,
ROTATE_WRIST_PITCH_P,
ROTATE_WRIST_PITCH_M,
ROTATE_WRIST_YAW_P,
ROTATE_WRIST_YAW_M,
ROTATE_ELBOW_P,
ROTATE_ELBOW_M,
LOOK_UP,
LOOK_DOWN,
MOVE_AHEAD,
ROTATE_RIGHT,
ROTATE_LEFT,
PICKUP,
DONE,
)
class EasyArmPointNavTask(ArmPointNavTask):
_actions = (
MOVE_ARM_HEIGHT_P,
MOVE_ARM_HEIGHT_M,
MOVE_ARM_X_P,
MOVE_ARM_X_M,
MOVE_ARM_Y_P,
MOVE_ARM_Y_M,
MOVE_ARM_Z_P,
MOVE_ARM_Z_M,
MOVE_AHEAD,
ROTATE_RIGHT,
ROTATE_LEFT,
# PICKUP,
# DONE,
)
def _step(self, action: int) -> RLStepResult:
action_str = self.class_action_names()[action]
self._last_action_str = action_str
action_dict = {"action": action_str}
object_id = self.task_info["objectId"]
if action_str == PICKUP:
action_dict = {**action_dict, "object_id": object_id}
self.env.step(action_dict)
self.last_action_success = self.env.last_action_success
last_action_name = self._last_action_str
last_action_success = float(self.last_action_success)
self.action_sequence_and_success.append((last_action_name, last_action_success))
self.visualize(last_action_name)
# If the object has not been picked up yet and it was picked up in the previous step update parameters to integrate it into reward
if not self.object_picked_up:
if (
object_id
in self.env.controller.last_event.metadata["arm"]["pickupableObjects"]
):
self.env.step(dict(action="PickupObject"))
# we are doing an additional pass here, label is not right and if we fail we will do it twice
object_inventory = self.env.controller.last_event.metadata["arm"][
"heldObjects"
]
if len(object_inventory) > 0 and object_id not in object_inventory:
self.env.step(dict(action="ReleaseObject"))
if self.env.is_object_at_low_level_hand(object_id):
self.object_picked_up = True
self.eplen_pickup = (
self._num_steps_taken + 1
) # plus one because this step has not been counted yet
if self.object_picked_up:
object_state = self.env.get_object_by_id(object_id)
goal_state = self.task_info["target_location"]
goal_achieved = self.object_picked_up and self.obj_state_aproximity(
object_state, goal_state
)
if goal_achieved:
self._took_end_action = True
self.last_action_success = goal_achieved
self._success = goal_achieved
step_result = RLStepResult(
observation=self.get_observations(),
reward=self.judge(),
done=self.is_done(),
info={"last_action_success": self.last_action_success},
)
return step_result
# def judge(self) -> float: Seems like we are fine on this
|
ask4help-main
|
allenact_plugins/manipulathor_plugin/manipulathor_tasks.py
|
"""Task Samplers for the task of ArmPointNav."""
import json
import random
from typing import List, Dict, Optional, Any, Union
import gym
from allenact.base_abstractions.sensor import Sensor
from allenact.base_abstractions.task import Task
from allenact.base_abstractions.task import TaskSampler
from allenact.utils.experiment_utils import set_deterministic_cudnn, set_seed
from allenact_plugins.manipulathor_plugin.manipulathor_utils import (
transport_wrapper,
initialize_arm,
)
from allenact_plugins.manipulathor_plugin.manipulathor_environment import (
ManipulaTHOREnvironment,
)
from allenact_plugins.manipulathor_plugin.manipulathor_tasks import (
AbstractPickUpDropOffTask,
ArmPointNavTask,
RotateArmPointNavTask,
CamRotateArmPointNavTask,
EasyArmPointNavTask,
)
from allenact_plugins.manipulathor_plugin.manipulathor_viz import (
ImageVisualizer,
LoggerVisualizer,
)
class AbstractMidLevelArmTaskSampler(TaskSampler):
_TASK_TYPE = Task
def __init__(
self,
scenes: List[str],
sensors: List[Sensor],
max_steps: int,
env_args: Dict[str, Any],
action_space: gym.Space,
rewards_config: Dict,
objects: List[str],
scene_period: Optional[Union[int, str]] = None,
max_tasks: Optional[int] = None,
num_task_per_scene: Optional[int] = None,
seed: Optional[int] = None,
deterministic_cudnn: bool = False,
fixed_tasks: Optional[List[Dict[str, Any]]] = None,
visualizers: List[LoggerVisualizer] = [],
*args,
**kwargs
) -> None:
self.rewards_config = rewards_config
self.env_args = env_args
self.scenes = scenes
self.grid_size = 0.25
self.env: Optional[ManipulaTHOREnvironment] = None
self.sensors = sensors
self.max_steps = max_steps
self._action_space = action_space
self.objects = objects
self.num_task_per_scene = num_task_per_scene
self.scene_counter: Optional[int] = None
self.scene_order: Optional[List[str]] = None
self.scene_id: Optional[int] = None
self.scene_period: Optional[
Union[str, int]
] = scene_period # default makes a random choice
self.max_tasks: Optional[int] = None
self.reset_tasks = max_tasks
self._last_sampled_task: Optional[Task] = None
self.seed: Optional[int] = None
self.set_seed(seed)
if deterministic_cudnn:
set_deterministic_cudnn()
self.reset()
self.visualizers = visualizers
self.sampler_mode = kwargs["sampler_mode"]
self.cap_training = kwargs["cap_training"]
def _create_environment(self, **kwargs) -> ManipulaTHOREnvironment:
env = ManipulaTHOREnvironment(
make_agents_visible=False, object_open_speed=0.05, env_args=self.env_args,
)
return env
@property
def last_sampled_task(self) -> Optional[Task]:
return self._last_sampled_task
def close(self) -> None:
if self.env is not None:
self.env.stop()
@property
def all_observation_spaces_equal(self) -> bool:
"""Check if observation spaces equal.
# Returns
True if all Tasks that can be sampled by this sampler have the
same observation space. Otherwise False.
"""
return True
def reset(self):
self.scene_counter = 0
self.scene_order = list(range(len(self.scenes)))
random.shuffle(self.scene_order)
self.scene_id = 0
self.sampler_index = 0
self.max_tasks = self.reset_tasks
def set_seed(self, seed: int):
self.seed = seed
if seed is not None:
set_seed(seed)
class SimpleArmPointNavGeneralSampler(AbstractMidLevelArmTaskSampler):
_TASK_TYPE = AbstractPickUpDropOffTask
def __init__(self, **kwargs) -> None:
super(SimpleArmPointNavGeneralSampler, self).__init__(**kwargs)
self.all_possible_points = []
for scene in self.scenes:
for object in self.objects:
valid_position_adr = "datasets/apnd-dataset/valid_object_positions/valid_{}_positions_in_{}.json".format(
object, scene
)
try:
with open(valid_position_adr) as f:
data_points = json.load(f)
except Exception:
print("Failed to load", valid_position_adr)
continue
visible_data = [
data for data in data_points[scene] if data["visibility"]
]
self.all_possible_points += visible_data
self.countertop_object_to_data_id = self.calc_possible_trajectories(
self.all_possible_points
)
scene_names = set(
[
self.all_possible_points[counter[0]]["scene_name"]
for counter in self.countertop_object_to_data_id.values()
if len(counter) > 1
]
)
if len(set(scene_names)) < len(self.scenes):
print("Not all scenes appear")
print(
"Len dataset",
len(self.all_possible_points),
"total_remained",
sum([len(v) for v in self.countertop_object_to_data_id.values()]),
)
if (
self.sampler_mode != "train"
): # Be aware that this totally overrides some stuff
self.deterministic_data_list = []
for scene in self.scenes:
for object in self.objects:
valid_position_adr = "datasets/apnd-dataset/deterministic_tasks/tasks_{}_positions_in_{}.json".format(
object, scene
)
try:
with open(valid_position_adr) as f:
data_points = json.load(f)
except Exception:
print("Failed to load", valid_position_adr)
continue
visible_data = [
dict(scene=scene, index=i, datapoint=data)
for (i, data) in enumerate(data_points[scene])
]
if self.num_task_per_scene is None:
self.deterministic_data_list += visible_data
else: # select a small number of data points for fast evaluation
self.deterministic_data_list += visible_data[
: min(self.num_task_per_scene, len(visible_data))
]
if self.sampler_mode == "test":
random.shuffle(self.deterministic_data_list)
self.max_tasks = self.reset_tasks = len(self.deterministic_data_list)
def next_task(
self, force_advance_scene: bool = False
) -> Optional[AbstractPickUpDropOffTask]:
if self.max_tasks is not None and self.max_tasks <= 0:
return None
if self.sampler_mode != "train" and self.length <= 0:
return None
source_data_point, target_data_point = self.get_source_target_indices()
scene = source_data_point["scene_name"]
assert source_data_point["object_id"] == target_data_point["object_id"]
assert source_data_point["scene_name"] == target_data_point["scene_name"]
if self.env is None:
self.env = self._create_environment()
self.env.reset(
scene_name=scene, agentMode="arm", agentControllerType="mid-level"
)
initialize_arm(self.env.controller)
source_location = source_data_point
target_location = dict(
position=target_data_point["object_location"],
rotation={"x": 0, "y": 0, "z": 0},
)
task_info = {
"objectId": source_location["object_id"],
"countertop_id": source_location["countertop_id"],
"source_location": source_location,
"target_location": target_location,
}
this_controller = self.env
transport_wrapper(
this_controller,
source_location["object_id"],
source_location["object_location"],
)
agent_state = source_location["agent_pose"]
this_controller.step(
dict(
action="TeleportFull",
standing=True,
x=agent_state["position"]["x"],
y=agent_state["position"]["y"],
z=agent_state["position"]["z"],
rotation=dict(
x=agent_state["rotation"]["x"],
y=agent_state["rotation"]["y"],
z=agent_state["rotation"]["z"],
),
horizon=agent_state["cameraHorizon"],
)
)
should_visualize_goal_start = [
x for x in self.visualizers if issubclass(type(x), ImageVisualizer)
]
if len(should_visualize_goal_start) > 0:
task_info["visualization_source"] = source_data_point
task_info["visualization_target"] = target_data_point
self._last_sampled_task = self._TASK_TYPE(
env=self.env,
sensors=self.sensors,
task_info=task_info,
max_steps=self.max_steps,
action_space=self._action_space,
visualizers=self.visualizers,
reward_configs=self.rewards_config,
)
return self._last_sampled_task
@property
def total_unique(self) -> Optional[Union[int, float]]:
if self.sampler_mode == "train":
return None
else:
return min(self.max_tasks, len(self.deterministic_data_list))
@property
def length(self) -> Union[int, float]:
"""Length.
# Returns
Number of total tasks remaining that can be sampled. Can be float('inf').
"""
return (
self.total_unique - self.sampler_index
if self.sampler_mode != "train"
else (float("inf") if self.max_tasks is None else self.max_tasks)
)
def get_source_target_indices(self):
if self.sampler_mode == "train":
valid_countertops = [
k for (k, v) in self.countertop_object_to_data_id.items() if len(v) > 1
]
countertop_id = random.choice(valid_countertops)
indices = random.sample(self.countertop_object_to_data_id[countertop_id], 2)
result = (
self.all_possible_points[indices[0]],
self.all_possible_points[indices[1]],
)
else:
result = self.deterministic_data_list[self.sampler_index]["datapoint"]
self.sampler_index += 1
return result
def calc_possible_trajectories(self, all_possible_points):
object_to_data_id = {}
for i in range(len(all_possible_points)):
object_id = all_possible_points[i]["object_id"]
object_to_data_id.setdefault(object_id, [])
object_to_data_id[object_id].append(i)
return object_to_data_id
class ArmPointNavTaskSampler(SimpleArmPointNavGeneralSampler):
_TASK_TYPE = ArmPointNavTask
def __init__(self, **kwargs) -> None:
super(ArmPointNavTaskSampler, self).__init__(**kwargs)
possible_initial_locations = (
"datasets/apnd-dataset/valid_agent_initial_locations.json"
)
if self.sampler_mode == "test":
possible_initial_locations = (
"datasets/apnd-dataset/deterministic_valid_agent_initial_locations.json"
)
with open(possible_initial_locations) as f:
self.possible_agent_reachable_poses = json.load(f)
def next_task(
self, force_advance_scene: bool = False
) -> Optional[AbstractPickUpDropOffTask]:
if self.max_tasks is not None and self.max_tasks <= 0:
return None
if self.sampler_mode != "train" and self.length <= 0:
return None
source_data_point, target_data_point = self.get_source_target_indices()
scene = source_data_point["scene_name"]
assert source_data_point["object_id"] == target_data_point["object_id"]
assert source_data_point["scene_name"] == target_data_point["scene_name"]
if self.env is None:
self.env = self._create_environment()
self.env.reset(
scene_name=scene, agentMode="arm", agentControllerType="mid-level"
)
initialize_arm(self.env.controller)
source_location = source_data_point
target_location = dict(
position=target_data_point["object_location"],
rotation={"x": 0, "y": 0, "z": 0},
countertop_id=target_data_point["countertop_id"],
)
this_controller = self.env
transport_wrapper(
this_controller,
source_location["object_id"],
source_location["object_location"],
)
agent_state = source_location[
"initial_agent_pose"
] # THe only line different from father
this_controller.step(
dict(
action="TeleportFull",
standing=True,
x=agent_state["position"]["x"],
y=agent_state["position"]["y"],
z=agent_state["position"]["z"],
rotation=dict(
x=agent_state["rotation"]["x"],
y=agent_state["rotation"]["y"],
z=agent_state["rotation"]["z"],
),
horizon=agent_state["cameraHorizon"],
)
)
should_visualize_goal_start = [
x for x in self.visualizers if issubclass(type(x), ImageVisualizer)
]
initial_object_info = self.env.get_object_by_id(source_location["object_id"])
initial_agent_location = self.env.controller.last_event.metadata["agent"]
initial_hand_state = self.env.get_absolute_hand_state()
task_info = {
"objectId": source_location["object_id"],
"source_location": source_location, # used in analysis
"target_location": target_location, # used in analysis
"agent_initial_state": initial_agent_location, # not used
"initial_object_location": initial_object_info, # not used
"initial_hand_state": initial_hand_state,
}
if len(should_visualize_goal_start) > 0:
task_info["visualization_source"] = source_data_point
task_info["visualization_target"] = target_data_point
self._last_sampled_task = self._TASK_TYPE(
env=self.env,
sensors=self.sensors,
task_info=task_info,
max_steps=self.max_steps,
action_space=self._action_space,
visualizers=self.visualizers,
reward_configs=self.rewards_config,
)
return self._last_sampled_task
def get_source_target_indices(self):
if self.sampler_mode == "train":
valid_countertops = [
k for (k, v) in self.countertop_object_to_data_id.items() if len(v) > 1
]
countertop_id = random.choice(valid_countertops)
indices = random.sample(self.countertop_object_to_data_id[countertop_id], 2)
result = (
self.all_possible_points[indices[0]],
self.all_possible_points[indices[1]],
)
scene_name = result[0]["scene_name"]
selected_agent_init_loc = random.choice(
self.possible_agent_reachable_poses[scene_name]
)
initial_agent_pose = {
"name": "agent",
"position": {
"x": selected_agent_init_loc["x"],
"y": selected_agent_init_loc["y"],
"z": selected_agent_init_loc["z"],
},
"rotation": {
"x": -0.0,
"y": selected_agent_init_loc["rotation"],
"z": 0.0,
},
"cameraHorizon": selected_agent_init_loc["horizon"],
"isStanding": True,
}
result[0]["initial_agent_pose"] = initial_agent_pose
else: # agent init location needs to be fixed, therefore we load a fixed valid agent init that is previously randomized
result = self.deterministic_data_list[self.sampler_index]["datapoint"]
scene_name = self.deterministic_data_list[self.sampler_index]["scene"]
datapoint_original_index = self.deterministic_data_list[self.sampler_index][
"index"
]
selected_agent_init_loc = self.possible_agent_reachable_poses[scene_name][
datapoint_original_index
]
initial_agent_pose = {
"name": "agent",
"position": {
"x": selected_agent_init_loc["x"],
"y": selected_agent_init_loc["y"],
"z": selected_agent_init_loc["z"],
},
"rotation": {
"x": -0.0,
"y": selected_agent_init_loc["rotation"],
"z": 0.0,
},
"cameraHorizon": selected_agent_init_loc["horizon"],
"isStanding": True,
}
result[0]["initial_agent_pose"] = initial_agent_pose
self.sampler_index += 1
return result
class RotateArmPointNavTaskSampler(ArmPointNavTaskSampler):
_TASK_TYPE = RotateArmPointNavTask
class CamRotateArmPointNavTaskSampler(ArmPointNavTaskSampler):
_TASK_TYPE = CamRotateArmPointNavTask
class EasyArmPointNavTaskSampler(ArmPointNavTaskSampler):
_TASK_TYPE = EasyArmPointNavTask
def get_all_tuples_from_list(list):
result = []
for first_ind in range(len(list) - 1):
for second_ind in range(first_ind + 1, len(list)):
result.append([list[first_ind], list[second_ind]])
return result
|
ask4help-main
|
allenact_plugins/manipulathor_plugin/manipulathor_task_samplers.py
|
ask4help-main
|
allenact_plugins/manipulathor_plugin/__init__.py
|
|
"""Constant values and hyperparameters that are used by the environment."""
import ai2thor.fifo_server
ARM_MIN_HEIGHT = 0.450998873
ARM_MAX_HEIGHT = 1.8009994
ADDITIONAL_ARM_ARGS = {
"disableRendering": True,
"returnToStart": True,
"speed": 1,
}
MOVE_AHEAD = "MoveAheadContinuous"
ROTATE_LEFT = "RotateLeftContinuous"
ROTATE_RIGHT = "RotateRightContinuous"
MOVE_ARM_HEIGHT_P = "MoveArmHeightP"
MOVE_ARM_HEIGHT_M = "MoveArmHeightM"
MOVE_ARM_X_P = "MoveArmXP"
MOVE_ARM_X_M = "MoveArmXM"
MOVE_ARM_Y_P = "MoveArmYP"
MOVE_ARM_Y_M = "MoveArmYM"
MOVE_ARM_Z_P = "MoveArmZP"
MOVE_ARM_Z_M = "MoveArmZM"
ROTATE_WRIST_PITCH_P = "RotateArmWristPitchP"
ROTATE_WRIST_PITCH_M = "RotateArmWristPitchM"
ROTATE_WRIST_YAW_P = "RotateArmWristYawP"
ROTATE_WRIST_YAW_M = "RotateArmWristYawM"
ROTATE_ELBOW_P = "RotateArmElbowP"
ROTATE_ELBOW_M = "RotateArmElbowM"
LOOK_UP = "LookUp"
LOOK_DOWN = "LookDown"
PICKUP = "PickUpMidLevel"
DONE = "DoneMidLevel"
ENV_ARGS = dict(
gridSize=0.25,
width=224,
height=224,
visibilityDistance=1.0,
agentMode="arm",
fieldOfView=100,
agentControllerType="mid-level",
server_class=ai2thor.fifo_server.FifoServer,
useMassThreshold=True,
massThreshold=10,
autoSimulation=False,
autoSyncTransforms=True,
)
VALID_OBJECT_LIST = [
"Knife",
"Bread",
"Fork",
"Potato",
"SoapBottle",
"Pan",
"Plate",
"Tomato",
"Egg",
"Pot",
"Spatula",
"Cup",
"Bowl",
"SaltShaker",
"PepperShaker",
"Lettuce",
"ButterKnife",
"Apple",
"DishSponge",
"Spoon",
"Mug",
]
|
ask4help-main
|
allenact_plugins/manipulathor_plugin/manipulathor_constants.py
|
import json
import os
from constants import ABS_PATH_OF_TOP_LEVEL_DIR
TRAIN_OBJECTS = ["Apple", "Bread", "Tomato", "Lettuce", "Pot", "Mug"]
TEST_OBJECTS = ["Potato", "SoapBottle", "Pan", "Egg", "Spatula", "Cup"]
MOVE_ARM_CONSTANT = 0.05
MOVE_ARM_HEIGHT_CONSTANT = MOVE_ARM_CONSTANT
UNWANTED_MOVE_THR = 0.01
DISTANCE_EPS = 1e-9
DISTANCE_MAX = 10.0
dataset_json_file = os.path.join(
ABS_PATH_OF_TOP_LEVEL_DIR, "datasets", "apnd-dataset", "starting_pose.json"
)
try:
with open(dataset_json_file) as f:
ARM_START_POSITIONS = json.load(f)
except Exception:
raise Exception("Dataset not found in {}".format(dataset_json_file))
|
ask4help-main
|
allenact_plugins/manipulathor_plugin/armpointnav_constants.py
|
"""Utility classes and functions for sensory inputs used by the models."""
from typing import Any, Union, Optional
import gym
import numpy as np
from allenact.base_abstractions.sensor import Sensor
from allenact.embodiedai.sensors.vision_sensors import DepthSensor, RGBSensor
from allenact.base_abstractions.task import Task
from allenact.utils.misc_utils import prepare_locals_for_super
from allenact_plugins.manipulathor_plugin.arm_calculation_utils import (
world_coords_to_agent_coords,
state_dict_to_tensor,
diff_position,
coord_system_transform,
)
from allenact_plugins.manipulathor_plugin.manipulathor_environment import (
ManipulaTHOREnvironment,
)
class DepthSensorThor(
DepthSensor[Union[ManipulaTHOREnvironment], Union[Task[ManipulaTHOREnvironment]],]
):
"""Sensor for Depth images in THOR.
Returns from a running ManipulaTHOREnvironment instance, the current
RGB frame corresponding to the agent's egocentric view.
"""
def frame_from_env(
self, env: ManipulaTHOREnvironment, task: Optional[Task]
) -> np.ndarray:
return env.controller.last_event.depth_frame.copy()
class NoVisionSensorThor(
RGBSensor[Union[ManipulaTHOREnvironment], Union[Task[ManipulaTHOREnvironment]],]
):
"""Sensor for RGB images in THOR.
Returns from a running ManipulaTHOREnvironment instance, the current
RGB frame corresponding to the agent's egocentric view.
"""
def frame_from_env(
self, env: ManipulaTHOREnvironment, task: Optional[Task]
) -> np.ndarray:
return np.zeros_like(env.current_frame)
class AgentRelativeCurrentObjectStateThorSensor(Sensor):
def __init__(self, uuid: str = "relative_current_obj_state", **kwargs: Any):
observation_space = gym.spaces.Box(
low=-100, high=100, shape=(6,), dtype=np.float32
) # (low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)
super().__init__(**prepare_locals_for_super(locals()))
def get_observation(
self, env: ManipulaTHOREnvironment, task: Task, *args: Any, **kwargs: Any
) -> Any:
object_id = task.task_info["objectId"]
current_object_state = env.get_object_by_id(object_id)
relative_current_obj = world_coords_to_agent_coords(
current_object_state, env.controller.last_event.metadata["agent"]
)
result = state_dict_to_tensor(
dict(
position=relative_current_obj["position"],
rotation=relative_current_obj["rotation"],
)
)
return result
class RelativeObjectToGoalSensor(Sensor):
def __init__(
self,
uuid: str = "relative_obj_to_goal",
coord_system: str = "xyz_unsigned",
**kwargs: Any
):
assert coord_system in [
"xyz_unsigned",
"xyz_signed",
"polar_radian",
"polar_trigo",
]
self.coord_system = coord_system
if coord_system == "polar_trigo":
obs_dim = 5
else:
obs_dim = 3
observation_space = gym.spaces.Box(
low=-100, high=100, shape=(obs_dim,), dtype=np.float32
)
super().__init__(**prepare_locals_for_super(locals()))
def get_observation(
self, env: ManipulaTHOREnvironment, task: Task, *args: Any, **kwargs: Any
) -> Any:
goal_obj_id = task.task_info["objectId"]
object_info = env.get_object_by_id(goal_obj_id)
target_state = task.task_info["target_location"]
agent_state = env.controller.last_event.metadata["agent"]
relative_current_obj = world_coords_to_agent_coords(object_info, agent_state)
relative_goal_state = world_coords_to_agent_coords(target_state, agent_state)
relative_distance = diff_position(
relative_current_obj, relative_goal_state, absolute=False,
)
result = coord_system_transform(relative_distance, self.coord_system)
return result
class InitialObjectToGoalSensor(Sensor):
def __init__(self, uuid: str = "initial_obj_to_goal", **kwargs: Any):
# observation_space = gym.spaces.Discrete(len(self.detector_types))
observation_space = gym.spaces.Box(
low=-100, high=100, shape=(3,), dtype=np.float32
) # (low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)
super().__init__(**prepare_locals_for_super(locals()))
def get_observation(
self, env: ManipulaTHOREnvironment, task: Task, *args: Any, **kwargs: Any
) -> Any:
object_source_location = task.task_info["initial_object_location"]
target_state = task.task_info["target_location"]
agent_state = task.task_info["agent_initial_state"]
relative_current_obj = world_coords_to_agent_coords(
object_source_location, agent_state
)
relative_goal_state = world_coords_to_agent_coords(target_state, agent_state)
relative_distance = diff_position(relative_current_obj, relative_goal_state)
result = state_dict_to_tensor(dict(position=relative_distance))
return result
class DistanceObjectToGoalSensor(Sensor):
def __init__(self, uuid: str = "distance_obj_to_goal", **kwargs: Any):
# observation_space = gym.spaces.Discrete(len(self.detector_types))
observation_space = gym.spaces.Box(
low=-100, high=100, shape=(3,), dtype=np.float32
) # (low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)
super().__init__(**prepare_locals_for_super(locals()))
def get_observation(
self, env: ManipulaTHOREnvironment, task: Task, *args: Any, **kwargs: Any
) -> Any:
goal_obj_id = task.task_info["objectId"]
object_info = env.get_object_by_id(goal_obj_id)
target_state = task.task_info["target_location"]
agent_state = env.controller.last_event.metadata["agent"]
relative_current_obj = world_coords_to_agent_coords(object_info, agent_state)
relative_goal_state = world_coords_to_agent_coords(target_state, agent_state)
relative_distance = diff_position(relative_current_obj, relative_goal_state)
result = state_dict_to_tensor(dict(position=relative_distance))
result = ((result ** 2).sum() ** 0.5).view(1)
return result
class RelativeAgentArmToObjectSensor(Sensor):
def __init__(
self,
uuid: str = "relative_agent_arm_to_obj",
coord_system: str = "xyz_unsigned",
**kwargs: Any
):
assert coord_system in [
"xyz_unsigned",
"xyz_signed",
"polar_radian",
"polar_trigo",
]
self.coord_system = coord_system
if coord_system == "polar_trigo":
obs_dim = 5
else:
obs_dim = 3
observation_space = gym.spaces.Box(
low=-100, high=100, shape=(obs_dim,), dtype=np.float32
)
super().__init__(**prepare_locals_for_super(locals()))
def get_observation(
self, env: ManipulaTHOREnvironment, task: Task, *args: Any, **kwargs: Any
) -> Any:
goal_obj_id = task.task_info["objectId"]
object_info = env.get_object_by_id(goal_obj_id)
hand_state = env.get_absolute_hand_state()
relative_goal_obj = world_coords_to_agent_coords(
object_info, env.controller.last_event.metadata["agent"]
)
relative_hand_state = world_coords_to_agent_coords(
hand_state, env.controller.last_event.metadata["agent"]
)
relative_distance = diff_position(
relative_goal_obj, relative_hand_state, absolute=False,
)
result = coord_system_transform(relative_distance, self.coord_system)
return result
class InitialAgentArmToObjectSensor(Sensor):
def __init__(self, uuid: str = "initial_agent_arm_to_obj", **kwargs: Any):
observation_space = gym.spaces.Box(
low=-100, high=100, shape=(3,), dtype=np.float32
) # (low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)
super().__init__(**prepare_locals_for_super(locals()))
def get_observation(
self, env: ManipulaTHOREnvironment, task: Task, *args: Any, **kwargs: Any
) -> Any:
object_source_location = task.task_info["initial_object_location"]
initial_hand_state = task.task_info["initial_hand_state"]
relative_goal_obj = world_coords_to_agent_coords(
object_source_location, env.controller.last_event.metadata["agent"]
)
relative_hand_state = world_coords_to_agent_coords(
initial_hand_state, env.controller.last_event.metadata["agent"]
)
relative_distance = diff_position(relative_goal_obj, relative_hand_state)
result = state_dict_to_tensor(dict(position=relative_distance))
return result
class DistanceAgentArmToObjectSensor(Sensor):
def __init__(self, uuid: str = "distance_agent_arm_to_obj", **kwargs: Any):
observation_space = gym.spaces.Box(
low=-100, high=100, shape=(3,), dtype=np.float32
) # (low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)
super().__init__(**prepare_locals_for_super(locals()))
def get_observation(
self, env: ManipulaTHOREnvironment, task: Task, *args: Any, **kwargs: Any
) -> Any:
goal_obj_id = task.task_info["objectId"]
object_info = env.get_object_by_id(goal_obj_id)
hand_state = env.get_absolute_hand_state()
relative_goal_obj = world_coords_to_agent_coords(
object_info, env.controller.last_event.metadata["agent"]
)
relative_hand_state = world_coords_to_agent_coords(
hand_state, env.controller.last_event.metadata["agent"]
)
relative_distance = diff_position(relative_goal_obj, relative_hand_state)
result = state_dict_to_tensor(dict(position=relative_distance))
result = ((result ** 2).sum() ** 0.5).view(1)
return result
class PickedUpObjSensor(Sensor):
def __init__(self, uuid: str = "pickedup_object", **kwargs: Any):
observation_space = gym.spaces.Box(
low=0, high=1, shape=(1,), dtype=np.float32
) # (low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)
super().__init__(**prepare_locals_for_super(locals()))
def get_observation(
self, env: ManipulaTHOREnvironment, task: Task, *args: Any, **kwargs: Any
) -> Any:
return task.object_picked_up
|
ask4help-main
|
allenact_plugins/manipulathor_plugin/manipulathor_sensors.py
|
"""Utility classes and functions for calculating the arm relative and absolute
position."""
from typing import Dict
import numpy as np
import torch
from allenact.utils.system import get_logger
from scipy.spatial.transform import Rotation as R
def state_dict_to_tensor(state: Dict):
result = []
if "position" in state:
result += [
state["position"]["x"],
state["position"]["y"],
state["position"]["z"],
]
if "rotation" in state:
result += [
state["rotation"]["x"],
state["rotation"]["y"],
state["rotation"]["z"],
]
return torch.Tensor(result)
def diff_position(state_goal, state_curr, absolute: bool = True):
p1 = state_goal["position"]
p2 = state_curr["position"]
if absolute:
result = {k: abs(p1[k] - p2[k]) for k in p1.keys()}
else:
result = {k: (p1[k] - p2[k]) for k in p1.keys()}
return result
def coord_system_transform(position: Dict, coord_system: str):
assert coord_system in [
"xyz_unsigned",
"xyz_signed",
"polar_radian",
"polar_trigo",
]
if "xyz" in coord_system:
result = [
position["x"],
position["y"],
position["z"],
]
result = torch.Tensor(result)
if coord_system == "xyz_unsigned":
return torch.abs(result)
else: # xyz_signed
return result
else:
hxy = np.hypot(position["x"], position["y"])
r = np.hypot(hxy, position["z"])
el = np.arctan2(position["z"], hxy) # elevation angle: [-pi/2, pi/2]
az = np.arctan2(position["y"], position["x"]) # azimuthal angle: [-pi, pi]
if coord_system == "polar_radian":
result = [
r,
el / (0.5 * np.pi),
az / np.pi,
] # normalize to [-1, 1]
return torch.Tensor(result)
else: # polar_trigo
result = [
r,
np.cos(el),
np.sin(el),
np.cos(az),
np.sin(az),
]
return torch.Tensor(result)
def position_rotation_to_matrix(position, rotation):
result = np.zeros((4, 4))
r = R.from_euler("xyz", [rotation["x"], rotation["y"], rotation["z"]], degrees=True)
result[:3, :3] = r.as_matrix()
result[3, 3] = 1
result[:3, 3] = [position["x"], position["y"], position["z"]]
return result
def inverse_rot_trans_matrix(mat):
mat = np.linalg.inv(mat)
return mat
def matrix_to_position_rotation(matrix):
result = {"position": None, "rotation": None}
rotation = R.from_matrix(matrix[:3, :3]).as_euler("xyz", degrees=True)
rotation_dict = {"x": rotation[0], "y": rotation[1], "z": rotation[2]}
result["rotation"] = rotation_dict
position = matrix[:3, 3]
result["position"] = {"x": position[0], "y": position[1], "z": position[2]}
return result
def find_closest_inverse(deg):
for k in _saved_inverse_rotation_mats.keys():
if abs(k - deg) < 5:
return _saved_inverse_rotation_mats[k]
# if it reaches here it means it had not calculated the degree before
rotation = R.from_euler("xyz", [0, deg, 0], degrees=True)
result = rotation.as_matrix()
inverse = inverse_rot_trans_matrix(result)
get_logger().warning(f"Had to calculate the matrix for {deg}")
return inverse
def calc_inverse(deg):
rotation = R.from_euler("xyz", [0, deg, 0], degrees=True)
result = rotation.as_matrix()
inverse = inverse_rot_trans_matrix(result)
return inverse
_saved_inverse_rotation_mats = {i: calc_inverse(i) for i in range(0, 360, 45)}
_saved_inverse_rotation_mats[360] = _saved_inverse_rotation_mats[0]
def world_coords_to_agent_coords(world_obj, agent_state):
position = agent_state["position"]
rotation = agent_state["rotation"]
agent_translation = [position["x"], position["y"], position["z"]]
assert abs(rotation["x"]) < 0.01 and abs(rotation["z"]) < 0.01
inverse_agent_rotation = find_closest_inverse(rotation["y"])
obj_matrix = position_rotation_to_matrix(
world_obj["position"], world_obj["rotation"]
)
obj_translation = np.matmul(
inverse_agent_rotation, (obj_matrix[:3, 3] - agent_translation)
)
# add rotation later
obj_matrix[:3, 3] = obj_translation
result = matrix_to_position_rotation(obj_matrix)
return result
|
ask4help-main
|
allenact_plugins/manipulathor_plugin/arm_calculation_utils.py
|
import ai2thor
from allenact_plugins.ithor_plugin.ithor_environment import IThorEnvironment
from allenact_plugins.manipulathor_plugin.armpointnav_constants import (
ARM_START_POSITIONS,
)
from allenact_plugins.manipulathor_plugin.manipulathor_constants import (
ADDITIONAL_ARM_ARGS,
)
def make_all_objects_unbreakable(controller):
all_breakable_objects = [
o["objectType"]
for o in controller.last_event.metadata["objects"]
if o["breakable"] is True
]
all_breakable_objects = set(all_breakable_objects)
for obj_type in all_breakable_objects:
controller.step(action="MakeObjectsOfTypeUnbreakable", objectType=obj_type)
def reset_environment_and_additional_commands(controller, scene_name):
controller.reset(scene_name)
controller.step(action="MakeAllObjectsMoveable")
controller.step(action="MakeObjectsStaticKinematicMassThreshold")
make_all_objects_unbreakable(controller)
return
def transport_wrapper(controller, target_object, target_location):
transport_detail = dict(
action="PlaceObjectAtPoint",
objectId=target_object,
position=target_location,
forceKinematic=True,
)
advance_detail = dict(action="AdvancePhysicsStep", simSeconds=1.0)
if issubclass(type(controller), IThorEnvironment):
event = controller.step(transport_detail)
controller.step(advance_detail)
elif type(controller) == ai2thor.controller.Controller:
event = controller.step(**transport_detail)
controller.step(**advance_detail)
return event
def initialize_arm(controller):
# for start arm from high up,
scene = controller.last_event.metadata["sceneName"]
initial_pose = ARM_START_POSITIONS[scene]
event1 = controller.step(
dict(
action="TeleportFull",
standing=True,
x=initial_pose["x"],
y=initial_pose["y"],
z=initial_pose["z"],
rotation=dict(x=0, y=initial_pose["rotation"], z=0),
horizon=initial_pose["horizon"],
)
)
event2 = controller.step(
dict(action="MoveArm", position=dict(x=0.0, y=0, z=0.35), **ADDITIONAL_ARM_ARGS)
)
event3 = controller.step(dict(action="MoveArmBase", y=0.8, **ADDITIONAL_ARM_ARGS))
return event1, event2, event3
|
ask4help-main
|
allenact_plugins/manipulathor_plugin/manipulathor_utils.py
|
"""A wrapper for engaging with the ManipulaTHOR environment."""
import copy
import math
import warnings
from typing import Dict, Union, Any, Optional, cast
import ai2thor.server
import numpy as np
import math
from ai2thor.controller import Controller
from allenact.utils.misc_utils import prepare_locals_for_super
from allenact_plugins.ithor_plugin.ithor_constants import VISIBILITY_DISTANCE, FOV
from allenact_plugins.ithor_plugin.ithor_environment import IThorEnvironment
from allenact_plugins.manipulathor_plugin.armpointnav_constants import (
MOVE_ARM_HEIGHT_CONSTANT,
MOVE_ARM_CONSTANT,
UNWANTED_MOVE_THR,
DISTANCE_MAX,
)
from allenact_plugins.manipulathor_plugin.manipulathor_constants import (
ADDITIONAL_ARM_ARGS,
ARM_MIN_HEIGHT,
ARM_MAX_HEIGHT,
)
from allenact_plugins.manipulathor_plugin.manipulathor_constants import (
MOVE_ARM_HEIGHT_P,
MOVE_ARM_HEIGHT_M,
MOVE_ARM_X_P,
MOVE_ARM_X_M,
MOVE_ARM_Y_P,
MOVE_ARM_Y_M,
MOVE_ARM_Z_P,
MOVE_ARM_Z_M,
ROTATE_WRIST_PITCH_P,
ROTATE_WRIST_PITCH_M,
ROTATE_WRIST_YAW_P,
ROTATE_WRIST_YAW_M,
ROTATE_ELBOW_P,
ROTATE_ELBOW_M,
LOOK_UP,
LOOK_DOWN,
MOVE_AHEAD,
ROTATE_RIGHT,
ROTATE_LEFT,
PICKUP,
DONE,
)
from allenact_plugins.manipulathor_plugin.manipulathor_utils import (
reset_environment_and_additional_commands,
)
def position_distance(s1, s2, filter_nan: bool = False):
position1 = s1["position"]
position2 = s2["position"]
dist = (
(position1["x"] - position2["x"]) ** 2
+ (position1["y"] - position2["y"]) ** 2
+ (position1["z"] - position2["z"]) ** 2
) ** 0.5
if filter_nan:
dist = DISTANCE_MAX if math.isnan(dist) or dist > DISTANCE_MAX else dist
return dist
def rotation_distance(s1: Dict[str, float], s2: Dict[str, float]):
"""Distance between rotations."""
rotation1 = s1["rotation"]
rotation2 = s2["rotation"]
def deg_dist(d0: float, d1: float):
dist = (d0 - d1) % 360
return min(dist, 360 - dist)
return sum(deg_dist(rotation1[k], rotation2[k]) for k in ["x", "y", "z"])
class ManipulaTHOREnvironment(IThorEnvironment):
"""Wrapper for the manipulathor controller providing arm functionality and
bookkeeping.
See [here](https://ai2thor.allenai.org/documentation/installation) for comprehensive
documentation on AI2-THOR.
# Attributes
controller : The ai2thor controller.
"""
def __init__(
self,
x_display: Optional[str] = None,
docker_enabled: bool = False,
local_thor_build: Optional[str] = None,
visibility_distance: float = VISIBILITY_DISTANCE,
fov: float = FOV,
player_screen_width: int = 224,
player_screen_height: int = 224,
quality: str = "Very Low",
restrict_to_initially_reachable_points: bool = False,
make_agents_visible: bool = True,
object_open_speed: float = 1.0,
simplify_physics: bool = False,
verbose: bool = False,
env_args=None,
) -> None:
"""Initializer.
# Parameters
x_display : The x display into which to launch ai2thor (possibly necessarily if you are running on a server
without an attached display).
docker_enabled : Whether or not to run thor in a docker container (useful on a server without an attached
display so that you don't have to start an x display).
local_thor_build : The path to a local build of ai2thor. This is probably not necessary for your use case
and can be safely ignored.
visibility_distance : The distance (in meters) at which objects, in the viewport of the agent,
are considered visible by ai2thor and will have their "visible" flag be set to `True` in the metadata.
fov : The agent's camera's field of view.
width : The width resolution (in pixels) of the images returned by ai2thor.
height : The height resolution (in pixels) of the images returned by ai2thor.
quality : The quality at which to render. Possible quality settings can be found in
`ai2thor._quality_settings.QUALITY_SETTINGS`.
restrict_to_initially_reachable_points : Whether or not to restrict the agent to locations in ai2thor
that were found to be (initially) reachable by the agent (i.e. reachable by the agent after resetting
the scene). This can be useful if you want to ensure there are only a fixed set of locations where the
agent can go.
make_agents_visible : Whether or not the agent should be visible. Most noticable when there are multiple agents
or when quality settings are high so that the agent casts a shadow.
object_open_speed : How quickly objects should be opened. High speeds mean faster simulation but also mean
that opening objects have a lot of kinetic energy and can, possibly, knock other objects away.
simplify_physics : Whether or not to simplify physics when applicable. Currently this only simplies object
interactions when opening drawers (when simplified, objects within a drawer do not slide around on
their own when the drawer is opened or closed, instead they are effectively glued down).
"""
self._verbose = verbose
self.env_args = env_args
del verbose
del env_args
super(ManipulaTHOREnvironment, self).__init__(
**prepare_locals_for_super(locals())
)
def create_controller(self):
controller = Controller(**self.env_args)
return controller
def start(
self, scene_name: Optional[str], move_mag: float = 0.25, **kwargs,
) -> None:
"""Starts the ai2thor controller if it was previously stopped.
After starting, `reset` will be called with the scene name and move magnitude.
# Parameters
scene_name : The scene to load.
move_mag : The amount of distance the agent moves in a single `MoveAhead` step.
kwargs : additional kwargs, passed to reset.
"""
if self._started:
raise RuntimeError(
"Trying to start the environment but it is already started."
)
self.controller = self.create_controller()
self._started = True
self.reset(scene_name=scene_name, move_mag=move_mag, **kwargs)
def reset(
self, scene_name: Optional[str], move_mag: float = 0.25, **kwargs,
):
self._move_mag = move_mag
self._grid_size = self._move_mag
if scene_name is None:
scene_name = self.controller.last_event.metadata["sceneName"]
# self.reset_init_params()#**kwargs) removing this fixes one of the crashing problem
# to solve the crash issue
# TODO why do we still have this crashing problem?
try:
reset_environment_and_additional_commands(self.controller, scene_name)
except Exception as e:
print("RESETTING THE SCENE,", scene_name, "because of", str(e))
self.controller = ai2thor.controller.Controller(**self.env_args)
reset_environment_and_additional_commands(self.controller, scene_name)
if self.object_open_speed != 1.0:
self.controller.step(
{"action": "ChangeOpenSpeed", "x": self.object_open_speed}
)
self._initially_reachable_points = None
self._initially_reachable_points_set = None
self.controller.step({"action": "GetReachablePositions"})
if not self.controller.last_event.metadata["lastActionSuccess"]:
warnings.warn(
"Error when getting reachable points: {}".format(
self.controller.last_event.metadata["errorMessage"]
)
)
self._initially_reachable_points = self.last_action_return
self.list_of_actions_so_far = []
def randomize_agent_location(
self, seed: int = None, partial_position: Optional[Dict[str, float]] = None
) -> Dict:
raise NotImplementedError
def is_object_at_low_level_hand(self, object_id):
current_objects_in_hand = self.controller.last_event.metadata["arm"][
"heldObjects"
]
return object_id in current_objects_in_hand
def object_in_hand(self):
"""Object metadata for the object in the agent's hand."""
inv_objs = self.last_event.metadata["inventoryObjects"]
if len(inv_objs) == 0:
return None
elif len(inv_objs) == 1:
return self.get_object_by_id(
self.last_event.metadata["inventoryObjects"][0]["objectId"]
)
else:
raise AttributeError("Must be <= 1 inventory objects.")
def correct_nan_inf(self, flawed_dict, extra_tag=""):
corrected_dict = copy.deepcopy(flawed_dict)
for (k, v) in corrected_dict.items():
if math.isnan(v) or math.isinf(v):
corrected_dict[k] = 0
return corrected_dict
def get_object_by_id(self, object_id: str) -> Optional[Dict[str, Any]]:
for o in self.last_event.metadata["objects"]:
if o["objectId"] == object_id:
o["position"] = self.correct_nan_inf(o["position"], "obj id")
return o
return None
def get_current_arm_state(self):
h_min = ARM_MIN_HEIGHT
h_max = ARM_MAX_HEIGHT
agent_base_location = 0.9009995460510254
event = self.controller.last_event
offset = event.metadata["agent"]["position"]["y"] - agent_base_location
h_max += offset
h_min += offset
joints = event.metadata["arm"]["joints"]
arm = joints[-1]
assert arm["name"] == "robot_arm_4_jnt"
xyz_dict = copy.deepcopy(arm["rootRelativePosition"])
height_arm = joints[0]["position"]["y"]
xyz_dict["h"] = (height_arm - h_min) / (h_max - h_min)
xyz_dict = self.correct_nan_inf(xyz_dict, "realtive hand")
return xyz_dict
def get_absolute_hand_state(self):
event = self.controller.last_event
joints = event.metadata["arm"]["joints"]
arm = copy.deepcopy(joints[-1])
assert arm["name"] == "robot_arm_4_jnt"
xyz_dict = arm["position"]
xyz_dict = self.correct_nan_inf(xyz_dict, "absolute hand")
return dict(position=xyz_dict, rotation={"x": 0, "y": 0, "z": 0})
def get_pickupable_objects(self):
event = self.controller.last_event
object_list = event.metadata["arm"]["pickupableObjects"]
return object_list
def get_current_object_locations(self):
obj_loc_dict = {}
metadata = self.controller.last_event.metadata["objects"]
for o in metadata:
obj_loc_dict[o["objectId"]] = dict(
position=o["position"], rotation=o["rotation"], visible=o["visible"],
)
return copy.deepcopy(obj_loc_dict)
def close_enough(self, current_obj_pose, init_obj_pose, threshold):
position_close = [
abs(current_obj_pose["position"][k] - init_obj_pose["position"][k])
<= threshold
for k in ["x", "y", "z"]
]
position_is_close = sum(position_close) == 3
return position_is_close
def get_objects_moved(
self,
previous_object_locations,
current_object_locations,
target_object_id,
thres_dict: Optional[Dict] = None,
):
moved_objects = []
scene_id = self.scene_name.split("_")[0]
for object_id in current_object_locations.keys():
if object_id == target_object_id:
continue
if object_id not in previous_object_locations:
continue
threshold = UNWANTED_MOVE_THR
if thres_dict is not None:
threshold = max(threshold, thres_dict[scene_id + "-" + object_id])
if not self.close_enough(
current_object_locations[object_id],
previous_object_locations[object_id],
threshold=threshold,
):
moved_objects.append(object_id)
return moved_objects
def get_objects_move_distance(
self,
initial_object_locations,
previous_object_locations,
current_object_locations,
target_object_id,
only_visible: bool = False,
thres_dict: Optional[Dict] = None,
):
moved_objects_position_distance = {}
scene_id = self.scene_name.split("_")[0]
for object_id in current_object_locations.keys():
if object_id == target_object_id:
continue
if object_id not in previous_object_locations:
continue
if only_visible:
# current is visible
if not current_object_locations[object_id]["visible"]:
continue
p_initial2current = position_distance(
current_object_locations[object_id],
initial_object_locations[object_id],
filter_nan=True,
)
p_initial2previous = position_distance(
previous_object_locations[object_id],
initial_object_locations[object_id],
filter_nan=True,
)
threshold = 0.0
if thres_dict is not None:
threshold = max(threshold, thres_dict[scene_id + "-" + object_id])
p_initial2current = max(0.0, p_initial2current - threshold)
p_initial2previous = max(0.0, p_initial2previous - threshold)
moved_objects_position_distance[object_id] = (
p_initial2current - p_initial2previous
)
return sum(moved_objects_position_distance.values())
def step(
self, action_dict: Dict[str, Union[str, int, float]]
) -> ai2thor.server.Event:
"""Take a step in the ai2thor environment."""
action = cast(str, action_dict["action"])
skip_render = "renderImage" in action_dict and not action_dict["renderImage"]
last_frame: Optional[np.ndarray] = None
if skip_render:
last_frame = self.current_frame
if self.simplify_physics:
action_dict["simplifyOPhysics"] = True
if action in [PICKUP, DONE]:
if action == PICKUP:
object_id = action_dict["object_id"]
if not self.is_object_at_low_level_hand(object_id):
pickupable_objects = self.get_pickupable_objects()
#
if object_id in pickupable_objects:
# This version of the task is actually harder # consider making it easier, are we penalizing failed pickup? yes
self.step(dict(action="PickupObject"))
# we are doing an additional pass here, label is not right and if we fail we will do it twice
object_inventory = self.controller.last_event.metadata["arm"][
"heldObjects"
]
if (
len(object_inventory) > 0
and object_id not in object_inventory
):
self.step(dict(action="ReleaseObject"))
action_dict = {"action": "Pass"}
elif action in [MOVE_AHEAD, ROTATE_LEFT, ROTATE_RIGHT]:
copy_additions = copy.deepcopy(ADDITIONAL_ARM_ARGS)
action_dict = {**action_dict, **copy_additions}
if action in [MOVE_AHEAD]:
action_dict["action"] = "MoveAgent"
action_dict["ahead"] = 0.2
elif action in [ROTATE_RIGHT]:
action_dict["action"] = "RotateAgent"
action_dict["degrees"] = 45
elif action in [ROTATE_LEFT]:
action_dict["action"] = "RotateAgent"
action_dict["degrees"] = -45
elif "MoveArm" in action:
copy_additions = copy.deepcopy(ADDITIONAL_ARM_ARGS)
action_dict = {**action_dict, **copy_additions}
base_position = self.get_current_arm_state()
if "MoveArmHeight" in action:
action_dict["action"] = "MoveArmBase"
if action == "MoveArmHeightP":
base_position["h"] += MOVE_ARM_HEIGHT_CONSTANT
if action == "MoveArmHeightM":
base_position[
"h"
] -= MOVE_ARM_HEIGHT_CONSTANT # height is pretty big!
action_dict["y"] = base_position["h"]
else:
action_dict["action"] = "MoveArm"
if action == "MoveArmXP":
base_position["x"] += MOVE_ARM_CONSTANT
elif action == "MoveArmXM":
base_position["x"] -= MOVE_ARM_CONSTANT
elif action == "MoveArmYP":
base_position["y"] += MOVE_ARM_CONSTANT
elif action == "MoveArmYM":
base_position["y"] -= MOVE_ARM_CONSTANT
elif action == "MoveArmZP":
base_position["z"] += MOVE_ARM_CONSTANT
elif action == "MoveArmZM":
base_position["z"] -= MOVE_ARM_CONSTANT
action_dict["position"] = {
k: v for (k, v) in base_position.items() if k in ["x", "y", "z"]
}
elif "RotateArm" in action:
copy_additions = copy.deepcopy(ADDITIONAL_ARM_ARGS)
action_dict = {**action_dict, **copy_additions}
if action == ROTATE_WRIST_PITCH_P:
action_dict["action"] = "RotateWristRelative"
action_dict["pitch"] = 15
elif action == ROTATE_WRIST_PITCH_M:
action_dict["action"] = "RotateWristRelative"
action_dict["pitch"] = -15
elif action == ROTATE_WRIST_YAW_P:
action_dict["action"] = "RotateWristRelative"
action_dict["yaw"] = 15
elif action == ROTATE_WRIST_YAW_M:
action_dict["action"] = "RotateWristRelative"
action_dict["yaw"] = -15
elif action == ROTATE_ELBOW_P:
action_dict["action"] = "RotateElbowRelative"
action_dict["degrees"] = 15
elif action == ROTATE_ELBOW_M:
action_dict["action"] = "RotateElbowRelative"
action_dict["degrees"] = -15
else:
raise ValueError("invalid action " + str(action))
elif action in [LOOK_UP, LOOK_DOWN]:
copy_additions = copy.deepcopy(ADDITIONAL_ARM_ARGS)
action_dict = {**action_dict, **copy_additions}
if action == LOOK_UP:
action_dict["action"] = LOOK_UP
elif action == LOOK_DOWN:
action_dict["action"] = LOOK_DOWN
# there exists other actions e.g. "PlaceObjectAtPoint"
sr = self.controller.step(action_dict)
self.list_of_actions_so_far.append(action_dict)
if self._verbose:
print(self.controller.last_event)
if self.restrict_to_initially_reachable_points:
self._snap_agent_to_initially_reachable()
if skip_render:
assert last_frame is not None
self.last_event.frame = last_frame
return sr
|
ask4help-main
|
allenact_plugins/manipulathor_plugin/manipulathor_environment.py
|
from typing import Optional
import gym
import numpy as np
class GymEnvironment(gym.Wrapper):
"""gym.Wrapper with minimal bookkeeping (initial observation)."""
def __init__(self, gym_env_name: str):
super().__init__(gym.make(gym_env_name))
self._initial_observation: Optional[np.ndarray] = None
self.reset() # generate initial observation
def reset(self) -> np.ndarray:
self._initial_observation = self.env.reset()
return self._initial_observation
@property
def initial_observation(self) -> np.ndarray:
assert (
self._initial_observation is not None
), "Attempted to read initial_observation without calling reset()"
res = self._initial_observation
self._initial_observation = None
return res
|
ask4help-main
|
allenact_plugins/gym_plugin/gym_environment.py
|
ask4help-main
|
allenact_plugins/gym_plugin/__init__.py
|
|
from typing import Optional, Any
import gym
import numpy as np
from allenact.base_abstractions.sensor import Sensor, prepare_locals_for_super
from allenact.base_abstractions.task import Task, SubTaskType
from allenact_plugins.gym_plugin.gym_environment import GymEnvironment
class GymBox2DSensor(Sensor[gym.Env, Task[gym.Env]]):
"""Wrapper for gym Box2D tasks' observations."""
def __init__(
self,
gym_env_name: str = "LunarLanderContinuous-v2",
uuid: str = "gym_box2d_sensor",
**kwargs: Any
):
self.gym_env_name = gym_env_name
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _get_observation_space(self) -> gym.Space:
if self.gym_env_name in ["LunarLanderContinuous-v2", "LunarLander-v2"]:
return gym.spaces.Box(-np.inf, np.inf, shape=(8,), dtype=np.float32)
elif self.gym_env_name in ["BipedalWalker-v2", "BipedalWalkerHardcore-v2"]:
high = np.array([np.inf] * 24)
return gym.spaces.Box(-high, high, dtype=np.float32)
elif self.gym_env_name == "CarRacing-v0":
state_w, state_h = 96, 96
return gym.spaces.Box(
low=0, high=255, shape=(state_h, state_w, 3), dtype=np.uint8
)
raise NotImplementedError()
def get_observation(
self,
env: GymEnvironment,
task: Optional[SubTaskType],
*args,
gym_obs: Optional[np.ndarray] = None,
**kwargs: Any
) -> np.ndarray:
if gym_obs is not None:
return gym_obs
else:
return env.initial_observation
class GymMuJoCoSensor(Sensor[gym.Env, Task[gym.Env]]):
"""Wrapper for gym MuJoCo and Robotics tasks observations."""
def __init__(self, gym_env_name: str, uuid: str, **kwargs: Any):
self.gym_env_name = gym_env_name
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _get_observation_space(self) -> gym.Space:
# observation space for gym MoJoCo
if self.gym_env_name == "InvertedPendulum-v2":
return gym.spaces.Box(-np.inf, np.inf, shape=(4,), dtype="float32")
elif self.gym_env_name == "Ant-v2":
return gym.spaces.Box(-np.inf, np.inf, shape=(111,), dtype="float32")
elif self.gym_env_name in ["Reacher-v2", "Hopper-v2"]:
return gym.spaces.Box(-np.inf, np.inf, shape=(11,), dtype="float32")
elif self.gym_env_name == "InvertedDoublePendulum-v2":
return gym.spaces.Box(-np.inf, np.inf, (11,), "float32")
elif self.gym_env_name in ["HumanoidStandup-v2", "Humanoid-v2"]:
return gym.spaces.Box(-np.inf, np.inf, (376,), "float32")
elif self.gym_env_name in ["HalfCheetah-v2", "Walker2d-v2"]:
return gym.spaces.Box(-np.inf, np.inf, (17,), "float32")
elif self.gym_env_name == "Swimmer-v2":
return gym.spaces.Box(-np.inf, np.inf, (8,), "float32")
# TODO observation space for gym Robotics
elif self.gym_env_name == "HandManipulateBlock-v0":
return gym.spaces.Dict(
dict(
desired_goal=gym.spaces.Box(
-np.inf, np.inf, shape=(7,), dtype="float32"
),
achieved_goal=gym.spaces.Box(
-np.inf, np.inf, shape=(7,), dtype="float32"
),
observation=gym.spaces.Box(
-np.inf, np.inf, shape=(61,), dtype="float32"
),
)
)
else:
raise NotImplementedError
def get_observation(
self,
env: GymEnvironment,
task: Optional[SubTaskType],
*args,
gym_obs: Optional[np.ndarray] = None,
**kwargs: Any
) -> np.ndarray:
if gym_obs is not None:
return np.array(gym_obs, dtype=np.float32) # coerce to be float32
else:
return np.array(env.initial_observation, dtype=np.float32)
|
ask4help-main
|
allenact_plugins/gym_plugin/gym_sensors.py
|
import torch
from allenact.base_abstractions.distributions import Distr
class GaussianDistr(torch.distributions.Normal, Distr):
"""PyTorch's Normal distribution with a `mode` method."""
def mode(self) -> torch.FloatTensor:
return super().mean
|
ask4help-main
|
allenact_plugins/gym_plugin/gym_distributions.py
|
from typing import Dict, Union, Optional, Tuple, Any, Sequence, cast
import gym
import torch
import torch.nn as nn
from allenact.algorithms.onpolicy_sync.policy import (
ActorCriticModel,
DistributionType,
)
from allenact.base_abstractions.misc import ActorCriticOutput, Memory
from allenact_plugins.gym_plugin.gym_distributions import GaussianDistr
class MemorylessActorCritic(ActorCriticModel[GaussianDistr]):
"""ActorCriticModel for gym tasks with continuous control in the range [-1,
1]."""
def __init__(
self,
input_uuid: str,
action_space: gym.spaces.Box,
observation_space: gym.spaces.Dict,
action_std: float = 0.5,
mlp_hidden_dims: Sequence[int] = (64, 32),
):
super().__init__(action_space, observation_space)
self.input_uuid = input_uuid
assert len(observation_space[self.input_uuid].shape) == 1
state_dim = observation_space[self.input_uuid].shape[0]
assert len(action_space.shape) == 1
action_dim = action_space.shape[0]
mlp_hidden_dims = (state_dim,) + tuple(mlp_hidden_dims)
# action mean range -1 to 1
self.actor = nn.Sequential(
*self.make_mlp_hidden(nn.Tanh, *mlp_hidden_dims),
nn.Linear(32, action_dim),
nn.Tanh(),
)
# critic
self.critic = nn.Sequential(
*self.make_mlp_hidden(nn.Tanh, *mlp_hidden_dims), nn.Linear(32, 1),
)
# maximum standard deviation
self.register_buffer(
"action_std",
torch.tensor([action_std] * action_dim).view(1, 1, -1),
persistent=False,
)
@staticmethod
def make_mlp_hidden(nl, *dims):
res = []
for it, dim in enumerate(dims[:-1]):
res.append(nn.Linear(dim, dims[it + 1]),)
res.append(nl())
return res
def _recurrent_memory_specification(self):
return None
def forward( # type:ignore
self,
observations: Dict[str, Union[torch.FloatTensor, Dict[str, Any]]],
memory: Memory,
prev_actions: Any,
masks: torch.FloatTensor,
) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]:
means = self.actor(observations[self.input_uuid])
values = self.critic(observations[self.input_uuid])
return (
ActorCriticOutput(
cast(DistributionType, GaussianDistr(loc=means, scale=self.action_std)),
values,
{},
),
None, # no Memory
)
|
ask4help-main
|
allenact_plugins/gym_plugin/gym_models.py
|
import random
from typing import Any, List, Dict, Optional, Union, Callable, Sequence, Tuple
import gym
import numpy as np
from gym.utils import seeding
from allenact.base_abstractions.misc import RLStepResult
from allenact.base_abstractions.sensor import Sensor, SensorSuite
from allenact.base_abstractions.task import Task, TaskSampler
from allenact.utils.experiment_utils import set_seed
from allenact.utils.system import get_logger
from allenact_plugins.gym_plugin.gym_environment import GymEnvironment
from allenact_plugins.gym_plugin.gym_sensors import GymBox2DSensor, GymMuJoCoSensor
class GymTask(Task[gym.Env]):
"""Abstract gym task.
Subclasses need to implement `class_action_names` and `_step`.
"""
def __init__(
self,
env: GymEnvironment,
sensors: Union[SensorSuite, List[Sensor]],
task_info: Dict[str, Any],
**kwargs,
):
max_steps = env.spec.max_episode_steps
super().__init__(
env=env, sensors=sensors, task_info=task_info, max_steps=max_steps, **kwargs
)
self._gym_done = False
self.task_name: str = self.env.spec.id
@property
def action_space(self) -> gym.spaces.Space:
return self.env.action_space
def render(self, mode: str = "rgb", *args, **kwargs) -> np.ndarray:
if mode == "rgb":
mode = "rgb_array"
return self.env.render(mode=mode)
def get_observations(
self, *args, gym_obs: Optional[Dict[str, Any]] = None, **kwargs
) -> Any:
return self.sensor_suite.get_observations(
env=self.env, task=self, gym_obs=gym_obs
)
def reached_terminal_state(self) -> bool:
return self._gym_done
def close(self) -> None:
pass
def metrics(self) -> Dict[str, Any]:
# noinspection PyUnresolvedReferences,PyCallingNonCallable
env_metrics = self.env.metrics() if hasattr(self.env, "metrics") else {}
return {
**super().metrics(),
**{k: float(v) for k, v in env_metrics.items()},
"success": int(
self.env.was_successful
if hasattr(self.env, "was_successful")
else self.cumulative_reward > 0
),
}
class GymContinuousTask(GymTask):
"""Task for a continuous-control gym Box2D & MuJoCo Env; it allows
interfacing allenact with gym tasks."""
@classmethod
def class_action_names(cls, **kwargs) -> Tuple[str, ...]:
return tuple()
def _step(self, action: Sequence[float]) -> RLStepResult:
action = np.array(action)
gym_obs, reward, self._gym_done, info = self.env.step(action=action)
return RLStepResult(
observation=self.get_observations(gym_obs=gym_obs),
reward=reward,
done=self.is_done(),
info=info,
)
def task_selector(env_name: str) -> type:
"""Helper function for `GymTaskSampler`."""
if env_name in [
# Box2d Env
"CarRacing-v0",
"LunarLanderContinuous-v2",
"BipedalWalker-v2",
"BipedalWalkerHardcore-v2",
# MuJoCo Env
"InvertedPendulum-v2",
"Ant-v2",
"InvertedDoublePendulum-v2",
"Humanoid-v2",
"Reacher-v2",
"Hopper-v2",
"HalfCheetah-v2",
"Swimmer-v2",
"Walker2d-v2",
]:
return GymContinuousTask
raise NotImplementedError()
def sensor_selector(env_name: str) -> Sensor:
"""Helper function for `GymTaskSampler`."""
if env_name in [
"CarRacing-v0",
"LunarLanderContinuous-v2",
"BipedalWalker-v2",
"BipedalWalkerHardcore-v2",
"LunarLander-v2",
]:
return GymBox2DSensor(env_name)
elif env_name in [
"InvertedPendulum-v2",
"Ant-v2",
"InvertedDoublePendulum-v2",
"Humanoid-v2",
"Reacher-v2",
"Hopper-v2",
"HalfCheetah-v2",
"Swimmer-v2",
"Walker2d-v2",
]:
return GymMuJoCoSensor(env_name=env_name, uuid="gym_mujoco_data")
raise NotImplementedError()
class GymTaskSampler(TaskSampler):
"""TaskSampler for gym environments."""
def __init__(
self,
gym_env_type: str = "LunarLanderContinuous-v2",
sensors: Optional[Union[SensorSuite, List[Sensor]]] = None,
max_tasks: Optional[int] = None,
num_unique_seeds: Optional[int] = None,
task_seeds_list: Optional[List[int]] = None,
deterministic_sampling: bool = False,
task_selector: Callable[[str], type] = task_selector,
repeat_failed_task_for_min_steps: int = 0,
extra_task_kwargs: Optional[Dict] = None,
seed: Optional[int] = None,
**kwargs,
):
super().__init__()
self.gym_env_type = gym_env_type
self.sensors: SensorSuite
if sensors is None:
self.sensors = SensorSuite([sensor_selector(self.gym_env_type)])
else:
self.sensors = (
SensorSuite(sensors)
if not isinstance(sensors, SensorSuite)
else sensors
)
self.max_tasks = max_tasks
self.num_unique_seeds = num_unique_seeds
self.deterministic_sampling = deterministic_sampling
self.repeat_failed_task_for_min_steps = repeat_failed_task_for_min_steps
self.extra_task_kwargs = (
extra_task_kwargs if extra_task_kwargs is not None else {}
)
self._last_env_seed: Optional[int] = None
self._last_task: Optional[GymTask] = None
self._number_of_steps_taken_with_task_seed = 0
assert (not deterministic_sampling) or repeat_failed_task_for_min_steps <= 0, (
"If `deterministic_sampling` is True then we require"
" `repeat_failed_task_for_min_steps <= 0`"
)
assert (self.num_unique_seeds is None) or (
0 < self.num_unique_seeds
), "`num_unique_seeds` must be a positive integer."
self.num_unique_seeds = num_unique_seeds
self.task_seeds_list = task_seeds_list
if self.task_seeds_list is not None:
if self.num_unique_seeds is not None:
assert self.num_unique_seeds == len(
self.task_seeds_list
), "`num_unique_seeds` must equal the length of `task_seeds_list` if both specified."
self.num_unique_seeds = len(self.task_seeds_list)
elif self.num_unique_seeds is not None:
self.task_seeds_list = list(range(self.num_unique_seeds))
if num_unique_seeds is not None and repeat_failed_task_for_min_steps > 0:
raise NotImplementedError(
"`repeat_failed_task_for_min_steps` must be <=0 if number"
" of unique seeds is not None."
)
assert (not deterministic_sampling) or (
self.num_unique_seeds is not None
), "Cannot use deterministic sampling when `num_unique_seeds` is `None`."
if (not deterministic_sampling) and self.max_tasks:
get_logger().warning(
"`deterministic_sampling` is `False` but you have specified `max_tasks < inf`,"
" this might be a mistake when running testing."
)
if seed is not None:
self.set_seed(seed)
else:
self.np_seeded_random_gen, _ = seeding.np_random(
random.randint(0, 2 ** 31 - 1)
)
self.num_tasks_generated = 0
self.task_type = task_selector(self.gym_env_type)
self.env: GymEnvironment = GymEnvironment(self.gym_env_type)
@property
def length(self) -> Union[int, float]:
return (
float("inf")
if self.max_tasks is None
else self.max_tasks - self.num_tasks_generated
)
@property
def total_unique(self) -> Optional[Union[int, float]]:
return None if self.num_unique_seeds is None else self.num_unique_seeds
@property
def last_sampled_task(self) -> Optional[Task]:
raise NotImplementedError
def next_task(self, force_advance_scene: bool = False) -> Optional[GymTask]:
if self.length <= 0:
return None
repeating = False
if self.num_unique_seeds is not None:
if self.deterministic_sampling:
self._last_env_seed = self.task_seeds_list[
self.num_tasks_generated % len(self.task_seeds_list)
]
else:
self._last_env_seed = self.np_seeded_random_gen.choice(
self.task_seeds_list
)
else:
if self._last_task is not None:
self._number_of_steps_taken_with_task_seed += (
self._last_task.num_steps_taken()
)
if (
self._last_env_seed is not None
and self._number_of_steps_taken_with_task_seed
< self.repeat_failed_task_for_min_steps
and self._last_task.cumulative_reward == 0
):
repeating = True
else:
self._number_of_steps_taken_with_task_seed = 0
self._last_env_seed = self.np_seeded_random_gen.randint(0, 2 ** 31 - 1)
task_has_same_seed_reset = hasattr(self.env, "same_seed_reset")
if repeating and task_has_same_seed_reset:
# noinspection PyUnresolvedReferences
self.env.same_seed_reset()
else:
self.env.seed(self._last_env_seed)
self.env.saved_seed = self._last_env_seed
self.env.reset()
self.num_tasks_generated += 1
task_info = {"id": "random%d" % random.randint(0, 2 ** 63 - 1)}
self._last_task = self.task_type(
**dict(env=self.env, sensors=self.sensors, task_info=task_info),
**self.extra_task_kwargs,
)
return self._last_task
def close(self) -> None:
self.env.close()
@property
def all_observation_spaces_equal(self) -> bool:
return True
def reset(self) -> None:
self.num_tasks_generated = 0
self.env.reset()
def set_seed(self, seed: int) -> None:
self.np_seeded_random_gen, _ = seeding.np_random(seed)
if seed is not None:
set_seed(seed)
|
ask4help-main
|
allenact_plugins/gym_plugin/gym_tasks.py
|
from typing import List, Callable, Optional, Any, cast, Dict
import gym
import numpy as np
import torch
import torch.nn as nn
from torchvision import models
import clip
from allenact.base_abstractions.preprocessor import Preprocessor
from allenact.utils.misc_utils import prepare_locals_for_super
'''
class ClipResNetEmbedder(nn.Module):
def __init__(self, resnet, pool=True):
super().__init__()
self.model = resnet
self.pool = pool
self.eval()
def forward(self, x):
m = self.model.visual
with torch.no_grad():
newmodel = torch.nn.Sequential(*(list(m.children())[:-1]))
# m.attnpool = None
out = newmodel(x)
# print(out.shape,'clip embedded')
# print (**m[:-1])
# exit()
return out
'''
class ClipResNetEmbedder(nn.Module):
def __init__(self, resnet, pool=True):
super().__init__()
self.model = resnet
self.pool = pool
self.eval()
def forward(self, x):
m = self.model.visual
with torch.no_grad():
def stem(x):
for conv, bn in [(m.conv1, m.bn1), (m.conv2, m.bn2), (m.conv3, m.bn3)]:
x = m.relu(bn(conv(x)))
x = m.avgpool(x)
return x
x = x.type(m.conv1.weight.dtype)
x = stem(x)
x = m.layer1(x)
x = m.layer2(x)
x = m.layer3(x)
x = m.layer4(x)
if self.pool:
x = m.avgpool(x)
x = torch.flatten(x, 1)
return x
class ClipResNetPreprocessor(Preprocessor):
"""Preprocess RGB or depth image using a ResNet model with CLIP model
weights."""
CLIP_RGB_MEANS = (0.48145466, 0.4578275, 0.40821073)
CLIP_RGB_STDS = (0.26862954, 0.26130258, 0.27577711)
def __init__(
self,
rgb_input_uuid: str,
clip_model_type: str,
pool: bool,
device: Optional[torch.device] = None,
device_ids: Optional[List[torch.device]] = None,
**kwargs: Any,
):
assert clip_model_type in clip.available_models()
if clip_model_type == "RN50":
output_shape = (2048, 7, 7)
elif clip_model_type == "RN50x16":
output_shape = (3072, 7, 7)
else:
raise NotImplementedError(
f"Currently `clip_model_type` must be one of 'RN50' or 'RN50x16'"
)
# print (clip_model_type,'clip')
# exit()
if pool:
output_shape = output_shape[:1]
self.clip_model_type = clip_model_type
self.pool = pool
self.device = torch.device("cpu") if device is None else device
self.device_ids = device_ids or cast(
List[torch.device], list(range(torch.cuda.device_count()))
)
self._resnet: Optional[ClipResNetEmbedder] = None
low = -np.inf
high = np.inf
shape = output_shape
input_uuids = [rgb_input_uuid]
assert (
len(input_uuids) == 1
), "resnet preprocessor can only consume one observation type"
observation_space = gym.spaces.Box(low=low, high=high, shape=shape)
super().__init__(**prepare_locals_for_super(locals()))
@property
def resnet(self) -> ClipResNetEmbedder:
import clip
if self._resnet is None:
self._resnet = ClipResNetEmbedder(
clip.load(self.clip_model_type, device=self.device)[0], pool=self.pool
).to(self.device)
return self._resnet
def to(self, device: torch.device) -> "ClipResNetPreprocessor":
self._resnet = self.resnet.to(device)
self.device = device
return self
def process(self, obs: Dict[str, Any], *args: Any, **kwargs: Any) -> Any:
x = obs[self.input_uuids[0]].to(self.device).permute(0, 3, 1, 2) # bhwc -> bchw
# If the input is depth, repeat it across all 3 channels
# print (x.shape,'input shape')
if x.shape[1] == 1:
x = x.repeat(1, 3, 1, 1)
x = self.resnet(x).float()
# print (x.shape,'shape in clip preprocessor')
# print (self.pool)
return x
class ClipTextPreprocessor(Preprocessor):
def __init__(
self,
goal_sensor_uuid: str,
object_types: List[str],
device: Optional[torch.device] = None,
device_ids: Optional[List[torch.device]] = None,
**kwargs: Any,
):
try:
import clip
self.clip = clip
except ImportError as _:
raise ImportError(
"Cannot `import clip` when instatiating `CLIPResNetPreprocessor`."
" Please install clip from the openai/CLIP git repository:"
"\n`pip install git+https://github.com/openai/CLIP.git@3b473b0e682c091a9e53623eebc1ca1657385717`"
)
output_shape = (1024,)
self.object_types = object_types
self.device = torch.device("cpu") if device is None else device
self.device_ids = device_ids or cast(
List[torch.device], list(range(torch.cuda.device_count()))
)
low = -np.inf
high = np.inf
shape = output_shape
observation_space = gym.spaces.Box(low=low, high=high, shape=shape)
input_uuids = [goal_sensor_uuid]
super().__init__(**prepare_locals_for_super(locals()))
@property
def text_encoder(self):
if self._clip_model is None:
self._clip_model = self.clip.load('RN50', device=self.device)[0]
self._clip_model.eval()
return self._clip_model.encode_text
def to(self, device: torch.device):
self.device = device
self._clip_model = None
return self
def process(self, obs: Dict[str, Any], *args: Any, **kwargs: Any) -> Any:
object_inds = obs[self.input_uuids[0]]
object_types = [self.object_types[ind] for ind in object_inds]
x = self.clip.tokenize([f"navigate to the {obj}" for obj in object_types]).to(self.device)
with torch.no_grad():
return self.text_encoder(x).float()
|
ask4help-main
|
allenact_plugins/clip_plugin/clip_preprocessors.py
|
"""Baseline models for use in the object navigation task.
Object navigation is currently available as a Task in AI2-THOR and
Facebook's Habitat.
"""
from typing import Tuple, Dict, Optional, cast
import gym
import torch
import torch.nn as nn
from gym.spaces.dict import Dict as SpaceDict
from allenact.algorithms.onpolicy_sync.policy import (
ActorCriticModel,
LinearCriticHead,
LinearActorHead,
DistributionType,
Memory,
ObservationType,
)
from allenact.base_abstractions.distributions import CategoricalDistr
from allenact.base_abstractions.misc import ActorCriticOutput
from allenact.embodiedai.models.basic_models import RNNStateEncoder
class CLIPObjectNavActorCritic(ActorCriticModel[CategoricalDistr]):
def __init__(
self,
action_space: gym.spaces.Discrete,
observation_space: SpaceDict,
goal_sensor_uuid: str,
rgb_resnet_preprocessor_uuid: str,
hidden_size: int = 512,
include_auxiliary_head: bool = False,
):
super().__init__(action_space=action_space, observation_space=observation_space,)
self._hidden_size = hidden_size
self.include_auxiliary_head = include_auxiliary_head
self.encoder = CLIPActorCriticEncoder(
self.observation_space,
goal_sensor_uuid,
rgb_resnet_preprocessor_uuid,
self._hidden_size
)
self.actor = LinearActorHead(self.encoder.output_dims, action_space.n)
self.critic = LinearCriticHead(self.encoder.output_dims)
if self.include_auxiliary_head:
self.auxiliary_actor = LinearActorHead(self.encoder.output_dims, action_space.n)
self.train()
@property
def recurrent_hidden_state_size(self) -> int:
"""The recurrent hidden state size of the model."""
return self._hidden_size
@property
def is_blind(self) -> bool:
"""True if the model is blind (e.g. neither 'depth' or 'rgb' is an
input observation type)."""
return False
@property
def num_recurrent_layers(self) -> int:
"""Number of recurrent hidden layers."""
return self.encoder.state_encoder.num_recurrent_layers
def _recurrent_memory_specification(self):
return dict(
rnn=(
(
("layer", self.num_recurrent_layers),
("sampler", None),
("hidden", self.recurrent_hidden_state_size),
),
torch.float32,
)
)
def get_object_type_encoding(
self, observations: Dict[str, torch.FloatTensor]
) -> torch.FloatTensor:
"""Get the object type encoding from input batched observations."""
return self.encoder.get_object_type_encoding(observations)
def forward( # type:ignore
self,
observations: ObservationType,
memory: Memory,
prev_actions: torch.Tensor,
masks: torch.FloatTensor,
) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]:
x, rnn_hidden_states = self.encoder(observations, memory, masks)
return (
ActorCriticOutput(
distributions=self.actor(x),
values=self.critic(x),
extras={"auxiliary_distributions": self.auxiliary_actor(x)}
if self.include_auxiliary_head
else {},
),
memory.set_tensor("rnn", rnn_hidden_states),
)
class CLIPActorCriticEncoder(nn.Module):
def __init__(
self,
observation_spaces: SpaceDict,
goal_sensor_uuid: str,
resnet_preprocessor_uuid: str,
rnn_hidden_size: int = 512
) -> None:
super().__init__()
self.goal_uuid = goal_sensor_uuid
self.resnet_uuid = resnet_preprocessor_uuid
self.state_encoder = RNNStateEncoder(self.output_dims, rnn_hidden_size,)
@property
def is_blind(self):
return False
@property
def output_dims(self):
return (1024)
def get_object_type_encoding(
self, observations: Dict[str, torch.FloatTensor]
) -> torch.FloatTensor:
"""Get the object type encoding from input batched observations."""
return observations[self.goal_uuid]
def forward(self, observations, memory: Memory, masks: torch.FloatTensor):
# observations are (nstep, nsampler, nagent, *features) or (nstep, nsampler, *features)
# rnn encoder input should be (*n..., vector)
# function output should be (*n..., vector)
nstep, nsampler = observations[self.resnet_uuid].shape[:2]
x, rnn_hidden_states = self.state_encoder(
observations[self.resnet_uuid],
memory.tensor("rnn"),
masks
)
x = x.view(nstep * nsampler, -1)
# adapt input
resnet_obs = observations[self.resnet_uuid].view(nstep * nsampler, -1)
goal_obs = observations[self.goal_uuid].view(nstep * nsampler, -1)
# nn layers
output = (resnet_obs + x) * goal_obs
# adapt output
output = output.view(nstep, nsampler, -1)
return output, rnn_hidden_states
|
ask4help-main
|
allenact_plugins/clip_plugin/objectnav_models.py
|
ask4help-main
|
allenact_plugins/clip_plugin/__init__.py
|
|
ask4help-main
|
allenact_plugins/clip_plugin/configs/__init__.py
|
|
ask4help-main
|
allenact_plugins/clip_plugin/scripts/__init__.py
|
|
#!/usr/bin/python
import setuptools
setuptools.setup(
name='citeomatic',
version='0.01',
url='http://github.com/allenai/s2-research',
packages=setuptools.find_packages(),
install_requires=[
],
tests_require=[
],
zip_safe=False,
test_suite='py.test',
entry_points='',
pyrobuf_modules=['citeomatic.proto'],
)
|
citeomatic-master
|
setup.py
|
import random
import unittest
import os
import h5py
from sklearn.preprocessing import normalize
from citeomatic.models.options import ModelOptions
from citeomatic.models.text_embeddings import TextEmbeddingSum
import numpy as np
FIXTURES = os.path.join('tests', 'fixtures')
EMBEDDINGS_FILE = os.path.join(FIXTURES, 'weights.h5')
def almost_equal(x, y, threshold=0.0001):
return abs(x-y) < threshold
class TestPreTrainedEmbedding(unittest.TestCase):
def test_pre_trained_layer(self):
with h5py.File(EMBEDDINGS_FILE, 'r') as f:
pretrained_embeddings = f['embedding'][...]
options = ModelOptions()
options.use_pretrained = True
options.dense_dim = 300
options.n_features = 200
t_embedding_sum = TextEmbeddingSum(options=options,
pretrained_embeddings=pretrained_embeddings,
magnitudes_initializer='ones'
)
embedding_model, outputs = t_embedding_sum.create_text_embedding_model(
prefix='test', final_l2_norm=False)
idx = random.randint(0, 200)
pred = embedding_model.predict(np.asarray([idx + 1]))[0]
input_embedding = normalize(pretrained_embeddings[idx].reshape(1, -1))[0]
assert all(map(almost_equal, pred, input_embedding))
|
citeomatic-master
|
tests/test_pre_trained_embedding.py
|
import re
from citeomatic.common import Document, global_tokenizer
from citeomatic import display
TEST_ABSTRACT = """
'— This paper investigates into the colorization problem which converts a grayscale image to a colorful version. This is a very difficult problem and normally requires manual adjustment to achieve artifact-free quality. For instance , it normally requires human-labelled color scribbles on the grayscale target image or a careful selection of colorful reference images (e.g., capturing the same scene in the grayscale target image). Unlike the previous methods, this paper aims at a high-quality fully-automatic colorization method. With the assumption of a perfect patch matching technique, the use of an extremely large-scale reference database (that contains sufficient color images) is the most reliable solution to the colorization problem. However, patch matching noise will increase with respect to the size of the reference database in practice. Inspired by the recent success in deep learning techniques which provide amazing modeling of large-scale data, this paper re-formulates the colorization problem so that deep learning techniques can be directly employed. To ensure artifact-free quality, a joint bilateral filtering based post-processing step is proposed. We further develop an adaptive image clustering technique to incorporate the global image information. Numerous experiments demonstrate that our method outperforms the state-of-art algorithms both in terms of quality and speed.'"""
TEST_DOCS = [
Document(
title=' '.join(global_tokenizer('Deep Colorization')),
title_raw='Deep Colorization',
abstract=' '.join(global_tokenizer(TEST_ABSTRACT)),
abstract_raw=TEST_ABSTRACT,
authors=['Zezhou Cheng', 'Qingxiong Yang', 'Bin Sheng'],
out_citations=[],
year=2015,
id='6baaca1b6de31ac2a5b1f89e9b3baa61e41d52f9',
venue='ICCV',
in_citation_count=8,
out_citation_count=37,
key_phrases=[
'Colorization', 'Reference Database', 'Deep Learning Technique'
]
), Document(
title=' '.join(global_tokenizer('Deep Computing')),
title_raw='Deep Computing',
abstract='',
abstract_raw='',
authors=['Louis V. Gerstner'],
out_citations=[],
year=2000,
id='100544cf556dd8d98e6871bf28ea9e87a6f0ecc9',
venue='LOG IN',
in_citation_count=0,
out_citation_count=0,
key_phrases=[]
), Document(
title=' '.join(global_tokenizer('Deep Blue')),
title_raw='Deep Blue',
abstract='',
abstract_raw='',
authors=['Jim Ottaviani'],
out_citations=[],
year=2006,
id='60a5511f544d0ed5155f7c5f0a70b8c87337d2f7',
venue='IASSIST Conference',
in_citation_count=0,
out_citation_count=0,
key_phrases=[]
)
]
EXPECTED_BIBTEX = [
"""@article{cheng2015deep,
title={Deep Colorization},
author={Zezhou Cheng, Qingxiong Yang, Bin Sheng},
year={2015}
}""", """@article{gerstner2000deep,
title={Deep Computing},
author={Louis V. Gerstner},
year={2000}
}""", """@article{ottaviani2006deep,
title={Deep Blue},
author={Jim Ottaviani},
year={2006}
}"""
]
def test_bibtex_export():
for doc, expected in zip(TEST_DOCS, EXPECTED_BIBTEX):
assert re.sub('\\s+', ' ', display.document_to_bibtex(doc)).lower() == \
re.sub('\\s+', ' ', expected).lower()
|
citeomatic-master
|
tests/test_display.py
|
#!/usr/bin/env python
import json
import logging
import os
import random
import time
import numpy as np
from citeomatic import features
from citeomatic.common import FieldNames
from citeomatic.corpus import Corpus
def _time(op):
st = time.time()
r = op()
ed = time.time()
print(op, ed - st)
return r
WORDS = '''
accretion
agreeably
anguishing
armor
avenues
bassoon
bier
bobs
brightest
bystander
carpetbags
charbroiling
civilian
collaboration
condition
convincingly
crankcases
curtsying
deeper
designate
disbursements
divorce
duckbill
elliptical
enviously
exiling
fateful
fixture
forces
fulcra
geologic
graffiti
gyration
hearten
homeyness
hyphenated
inbreed
injections
inundate
jubilantly
lamebrain
liberalism
loss
manna
memorials
miscasting
mortifies
naturalistic
noses
opened
overpopulation
parqueted
perform
pillow
politest
preferable
pronoun
pyjamas
rattles
referees
representation
rhino
rumples
scarcity
seldom
shipments
sizes
sneeringly
speakers
stake
stratums
summoning
synthetic
tenderness
tingle
transiting
turncoat
uneasily
urchin
violets
wayfaring
wintertime
zaniest
'''.split('\n')
WORDS = WORDS * 100
print(len(WORDS))
def build_test_corpus(source_file, target_file):
try:
os.unlink(target_file)
except:
pass
with open(source_file, 'w') as tf:
for i in range(100):
json.dump({
FieldNames.TITLE: ' '.join(random.sample(WORDS, 10)),
FieldNames.ABSTRACT: ' '.join(random.sample(WORDS, 1000)),
FieldNames.AUTHORS: [],
FieldNames.OUT_CITATIONS: [
str(x) for x in random.sample(range(100), 2)
],
FieldNames.IN_CITATION_COUNT: len([
str(x) for x in random.sample(range(100), 2)
]),
FieldNames.KEY_PHRASES: random.sample(WORDS, 3),
FieldNames.YEAR: 2011,
FieldNames.PAPER_ID: str(i),
FieldNames.VENUE: 'v-{}'.format(random.randint(1, 5))
}, tf
)
tf.write('\n')
Corpus.build(target_file, source_file)
def test_corpus_conversion():
build_test_corpus('/tmp/foo.json', '/tmp/foo.sqlite')
def test_corpus_iterator():
corpus = Corpus.load('/tmp/foo.sqlite')
iter_ids = []
for doc in corpus:
iter_ids.append(doc.id)
overlap_n = len(set(iter_ids).intersection(set(corpus.all_ids)))
assert overlap_n == corpus.n_docs
def test_featurizer_and_data_gen():
build_test_corpus('/tmp/foo.json', '/tmp/foo.sqlite')
corpus = Corpus.load('/tmp/foo.sqlite')
featurizer = features.Featurizer()
featurizer.fit(corpus, max_df_frac=1.0)
dg = features.DataGenerator(corpus, featurizer)
gen = dg.triplet_generator(
paper_ids=corpus.all_ids,
candidate_ids=corpus.all_ids,
batch_size=128,
neg_to_pos_ratio=5
)
# make sure we can get features
for i in range(10):
print(i)
X, y = next(gen)
# correct batch size
assert len(y) >= 128
# positives, hard negatives, easy negatives
assert len(np.unique(y)) == 3
# correct padding
assert X['query-abstract-txt'].shape[1] == featurizer.max_abstract_len
assert X['query-title-txt'].shape[1] == featurizer.max_title_len
# no new words
assert set(featurizer.word_indexer.word_to_index.keys()).difference(WORDS) == set()
q, ex, labels = next(dg._listwise_examples(
corpus.all_ids,
corpus.all_ids
))
# query id should not be in candidates
assert q.id not in [i.id for i in ex]
# pos ids should be out_citations
pos_docs = [i.id for i, j in zip(ex, labels) if j == np.max(labels)]
assert set(pos_docs) == set(q.out_citations)
# neg ids should be NOT out_citations
neg_docs = [i.id for i, j in zip(ex, labels) if j < np.max(labels)]
assert np.all([i not in neg_docs for i in q.out_citations])
# test variable margin off
dg = features.DataGenerator(corpus, featurizer, use_variable_margin=False)
gen = dg.triplet_generator(
paper_ids=corpus.all_ids,
candidate_ids=corpus.all_ids,
batch_size=128,
neg_to_pos_ratio=5
)
X, y = next(gen)
print(dg.margins_offset_dict)
assert len(np.unique(y)) == 2
def test_data_isolation():
build_test_corpus('/tmp/foo.json', '/tmp/foo.sqlite')
corpus = Corpus.load('/tmp/foo.sqlite')
assert len(set(corpus.train_ids).intersection(set(corpus.valid_ids))) == 0
assert len(set(corpus.train_ids).intersection(set(corpus.test_ids))) == 0
assert len(set(corpus.valid_ids).intersection(set(corpus.test_ids))) == 0
featurizer = features.Featurizer()
featurizer.fit(corpus, max_df_frac=1.0)
dg = features.DataGenerator(corpus, featurizer)
query, examples, labels = next(dg._listwise_examples(corpus.train_ids))
examples_ids = [doc.id for doc in examples]
assert len(set(examples_ids).intersection(set(corpus.valid_ids))) == 0
assert len(set(examples_ids).intersection(set(corpus.test_ids))) == 0
dg = features.DataGenerator(corpus, featurizer)
query, examples, labels = next(dg._listwise_examples(paper_ids=corpus.valid_ids,
candidate_ids=corpus.valid_ids + corpus.train_ids))
examples_ids = [doc.id for doc in examples]
assert len(set(examples_ids).intersection(set(corpus.train_ids))) > 0
assert len(set(examples_ids).intersection(set(corpus.test_ids))) == 0
dg = features.DataGenerator(corpus, featurizer)
query, examples, labels = next(dg._listwise_examples(paper_ids=corpus.test_ids,
candidate_ids=corpus.valid_ids + corpus.train_ids))
examples_ids = [doc.id for doc in examples]
assert len(set(examples_ids).intersection(set(corpus.test_ids))) == 0
dg = features.DataGenerator(corpus, featurizer)
query, examples, labels = next(
dg._listwise_examples(paper_ids=corpus.test_ids,
candidate_ids=corpus.valid_ids + corpus.train_ids + corpus.test_ids))
examples_ids = [doc.id for doc in examples]
#assert len(set(examples_ids).intersection(set(corpus.test_ids))) != 0
if __name__ == '__main__':
import pytest
pytest.main([__file__, '-s'])
|
citeomatic-master
|
tests/test_corpus.py
|
import glob
from citeomatic.grobid_parser import GrobidParser, parse_full_text
from citeomatic import file_util
def test_grobid_reed():
parser = parse_full_text(
file_util.slurp(file_util.test_file(__file__, 'reed.xml'))
)
assert parser.title == 'Optimizing Cauchy Reed-Solomon Codes for Fault-Tolerant Network Storage Applications Optimizing Cauchy Reed-Solomon Codes for Fault-Tolerant Network Storage Applications'
assert parser.authors == [
'James Plank', 'Lihao Xu', 'James Plank', 'Lihao Xu'
]
assert parser.abstract == ' '.join(
[
'In the past few years, all manner of storage applications , ranging from disk array systems to',
'distributed and wide-area systems, have started to grapple with the reality of tolerating multiple',
'simultaneous failures of storage nodes. Unlike the single failure case, which is optimally handled',
'with RAID Level-5 parity, the multiple failure case is more difficult because optimal general purpose',
'strategies are not yet known. Erasure Coding is the field of research that deals with these',
'strategies, and this field has blossomed in recent years. Despite this research, the decades-old',
'Reed-Solomon erasure code remains the only space-optimal (MDS) code for all but the smallest storage',
'systems. The best performing implementations of Reed-Solomon coding employ a variant called Cauchy',
"Reed-Solomon coding, developed in the mid 1990's [4]. In this paper, we present an improvement to",
'Cauchy Reed-Solomon coding that is based on optimizing the Cauchy distribution matrix. We detail',
'an algorithm for generating good matrices and then evaluate the performance of encoding using all',
'implementations Reed-Solomon codes, plus the best MDS codes from the literature. The improvements',
'over the original Cauchy Reed-Solomon codes are as much as 83% in realistic scenarios, and average',
'roughly 10% over all cases that we tested.'
]
)
def test_grobid_salience():
parser = parse_full_text(
file_util.slurp(file_util.test_file(__file__, 'salience.xml'))
)
assert parser.title == 'A Model of Saliency-based Visual Attention for Rapid Scene Analysis'
assert parser.authors == ['Laurent Itti', 'Christof Koch', 'Ernst Niebur']
assert parser.abstract == ' '.join(
[
'{ A visual attention system, inspired by the behavior and the neuronal',
'architecture of the early primate visual system, is presented.',
'Multiscale image features are combined into a single topographical',
'saliency map. A dynamical neu-ral network then selects attended',
'locations in order of decreasing saliency. The system breaks down the',
'complex problem of scene understanding by rapidly selecting, in a',
'computationally eecient manner, conspicuous locations to be analyzed',
'in detail.'
]
)
def _test_all():
for pdf in glob.glob('./data/pdfs/*.pdf'):
pdf_blob = ('input', ('pdf', open(pdf, 'rb').read(), 'application/pdf'))
try:
parsed = GrobidParser('http://localhost:8080').parse(pdf_blob)
print(pdf, parsed.title, parsed.authors)
except:
print('Failed to parse: %s', pdf)
if __name__ == '__main__':
import pytest
pytest.main(['-s', __file__])
|
citeomatic-master
|
tests/test_extract.py
|
import unittest
import numpy as np
from citeomatic.corpus import Corpus
from citeomatic.features import Featurizer, DataGenerator
from citeomatic.models.layers import triplet_loss
from citeomatic.models.options import ModelOptions
from citeomatic.utils import import_from
from tests.test_corpus import build_test_corpus
import keras.backend as K
create_model = import_from("citeomatic.models.citation_ranker", "create_model")
embedder_create_model = import_from("citeomatic.models.paper_embedder", "create_model")
class TestModelBuild(unittest.TestCase):
@classmethod
def setUpClass(cls):
build_test_corpus('/tmp/foo.json', '/tmp/foo.sqlite')
corpus = Corpus.load('/tmp/foo.sqlite')
options = ModelOptions(**{})
featurizer = Featurizer(max_title_len=options.max_title_len, max_abstract_len=options.max_abstract_len)
featurizer.fit(corpus, max_df_frac=1.0)
options.n_features = featurizer.n_features
options.n_authors = featurizer.n_authors
options.n_venues = featurizer.n_venues
options.n_keyphrases = featurizer.n_keyphrases
cls.corpus = corpus
cls.featurizer = featurizer
cls.options = options
def test_build_paper_embedder_sum(self):
try:
models = embedder_create_model(self.options)
assert 'embedding' in models
assert 'citeomatic' in models
self._test_train(models)
assert True
except Exception:
assert False
def test_build_magdir(self):
try:
models = embedder_create_model(self.options)
self.options.use_magdir = False
assert 'embedding' in models
assert 'citeomatic' in models
self._test_train(models)
assert True
except Exception:
assert False
def test_build_paper_embedder_cnn(self):
try:
self.options.embedding_type = 'cnn'
models = embedder_create_model(self.options)
assert 'embedding' in models
assert 'citeomatic' in models
self._test_train(models)
assert True
except Exception:
assert False
def test_build_paper_embedder_cnn2(self):
try:
self.options.embedding_type = 'cnn2'
models = embedder_create_model(self.options)
assert 'embedding' in models
assert 'citeomatic' in models
self._test_train(models)
assert True
except Exception:
assert False
def test_cnn(self):
self.options.embedding_type = 'cnn'
try:
models = create_model(self.options)
self._test_train(models)
except Exception:
assert False
def test_lstm(self):
self.options.embedding_type = 'lstm'
try:
models = create_model(self.options)
self._test_train(models)
except Exception:
assert False
def test_build_paper_embedder_lstm(self):
try:
self.options.embedding_type = 'lstm'
models = embedder_create_model(self.options)
assert 'embedding' in models
assert 'citeomatic' in models
self._test_train(models)
assert True
except Exception:
assert False
def test_build_train_ranker(self):
try:
models = create_model(self.options)
assert models['embedding'] is None
assert 'citeomatic' in models
self._test_train(models)
except Exception:
assert False
def test_use_author(self):
self.options.use_authors = True
try:
models = create_model(self.options)
self._test_train(models)
except Exception:
assert False
def test_use_venue(self):
self.options.use_venue = True
try:
models = create_model(self.options)
self._test_train(models)
except Exception:
assert False
def test_use_keyphrases(self):
self.options.use_keyphrases = True
try:
models = create_model(self.options)
self._test_train(models)
except Exception:
assert False
def test_use_citations(self):
self.options.use_citations = True
try:
models = create_model(self.options)
self._test_train(models)
except Exception:
assert False
self.options.use_citations = False
try:
models = create_model(self.options)
self._test_train(models)
except Exception:
assert False
def test_use_sparse(self):
self.options.use_sparse = True
try:
models = create_model(self.options)
self._test_train(models)
except Exception:
assert False
def test_siamese(self):
self.options.use_src_tgt_embeddings = True
try:
models = create_model(self.options)
self._test_train(models)
except Exception:
assert False
def _test_train(self, models: dict):
model = models['citeomatic']
model.compile(optimizer='nadam', loss=triplet_loss)
dg = DataGenerator(self.corpus, self.featurizer, candidate_selector=TestCandidateSelector())
training_generator = dg.triplet_generator(paper_ids=self.corpus.train_ids, batch_size=2)
model.fit_generator(training_generator, steps_per_epoch=1, epochs=10)
K.clear_session()
class TestCandidateSelector(object):
def confidence(self, doc_id, candidate_ids):
return np.ones(len(candidate_ids))
|
citeomatic-master
|
tests/test_model_build.py
|
import unittest
from citeomatic.eval_metrics import precision_recall_f1_at_ks, average_results
class TestEvalMetrics(unittest.TestCase):
def test_precision_recall_f1_at_ks(self):
gold_y = ['1', '2', '3']
pred_y = ['1', '4', '3']
scores_y = [1.0, 0.1, 0.5]
k = [1, 2, 3]
results = precision_recall_f1_at_ks(gold_y, pred_y, scores=None, k_list=k)
assert results['precision'] == [1.0, 0.5, 2/3]
assert results['recall'] == [1/3, 1/3, 2/3]
assert results['f1'] == [1/2, 2/5, 2/3]
assert results['mrr'] == 1.0
results_2 = precision_recall_f1_at_ks(gold_y, pred_y, scores_y, k)
assert results_2['precision'] == [1.0, 1.0, 2/3]
assert results_2['recall'] == [1/3, 2/3, 2/3]
assert results_2['f1'] == [1/2, 4/5, 2/3]
assert results_2['mrr'] == 1.0
def test_average_results(self):
r1 = {
'precision': [1.0, 0.5, 2/3],
'recall': [1.0, 0.5, 2/3],
'f1': [1.0, 0.5, 2/3],
'mrr': 1.0,
}
r2 = {
'precision': [3.0, 1.0, 4/3],
'recall': [3.0, 1.0, 4/3],
'f1': [3.0, 1.0, 4/3],
'mrr': 0.5,
}
averaged_results = average_results([r1, r2])
assert averaged_results['precision'] == [2.0, 0.75, 1.0]
assert averaged_results['mrr'] == 0.75
if __name__ == '__main__':
unittest.main()
|
citeomatic-master
|
tests/test_eval_metrics.py
|
#!/usr/bin/env python3
import collections
import logging
from typing import List
import flask
import numpy as np
from flask import Flask, request
from citeomatic import display
from citeomatic.common import Document, FieldNames
from citeomatic.corpus import Corpus
from citeomatic.features import Featurizer
from citeomatic.neighbors import ANN, EmbeddingModel
NUM_ANN_CANDIDATES = 1000
DEFAULT_NUM_CITATIONS = 50
TOTAL_CANDIDATES = 1000
app = Flask(__name__, template_folder='.', static_folder='client/build/')
Prediction = collections.namedtuple(
'Prediction',
['score', 'document', 'position', 'explanation', 'cited', 'pdf']
)
class APIModel(object):
def __init__(
self,
models,
featurizer: Featurizer,
corpus: Corpus=None,
ann: ANN=None,
ann_embedding_model=None,
max_neighbors=1000,
candidate_min_in_citations=4,
):
self.model = models['citeomatic']
self.embedding_model = EmbeddingModel(featurizer, models['embedding']) if \
ann_embedding_model is None else ann_embedding_model
self.featurizer = featurizer
self.explanation = None # Explanation(self.model, featurizer)
self._ann = ann
self.corpus = corpus
self.max_neighbors = max_neighbors
self.candidate_min_in_citations = candidate_min_in_citations
def get_ann_similar_documents(self, doc, top_n=NUM_ANN_CANDIDATES):
doc_embedded = self.embedding_model.embed(doc)
return self._ann.get_nns_by_vector(doc_embedded, top_n)
@staticmethod
def _sha_to_url(sha):
return "https://pdfs.semanticscholar.org/" + sha[0:4] + "/" + sha[4:] + ".pdf"
def predict(self, doc, top_n=DEFAULT_NUM_CITATIONS) -> List[Prediction]:
candidate_ids = self.get_ann_similar_documents(doc, top_n=self.max_neighbors)
candidate_ids = [
bulk_id for bulk_id in candidate_ids
if self.corpus[bulk_id].in_citation_count >=
self.candidate_min_in_citations
]
# Extend the candidate set with their citations
citations_of_candidates = []
for id in candidate_ids:
citations_of_candidates.extend(self.corpus[id].citations)
candidate_ids = list(set(citations_of_candidates + candidate_ids))
logging.info('Fetching %d documents ' % len(candidate_ids))
candidates = [self.corpus[paper_id] for paper_id in candidate_ids]
logging.info('Featurizing... %d documents ' % len(candidates))
features = self.featurizer.transform_query_and_results(doc, candidates)
logging.info('Predicting...')
scores = self.model.predict(features, batch_size=64).flatten()
best_matches = np.argsort(scores)[::-1]
predictions = []
for i, match_idx in enumerate(best_matches[:top_n]):
if candidates[match_idx].title.lower() == doc.title.lower():
continue
predictions.append(
Prediction(
score=float(scores[match_idx]),
document=candidates[match_idx],
pdf=APIModel._sha_to_url(str(candidates[match_idx].id)),
position=i,
explanation={},
cited=candidates[match_idx].title.lower() in doc.citations
)
)
logging.info("Done! Found %s predictions." % len(predictions))
return predictions
def document_from_dict(doc):
defaults = {
FieldNames.TITLE: '',
FieldNames.ABSTRACT: '',
FieldNames.AUTHORS: [],
FieldNames.OUT_CITATIONS: [],
FieldNames.YEAR: 2016,
FieldNames.PAPER_ID: 0,
FieldNames.VENUE: '',
FieldNames.IN_CITATION_COUNT: 0,
FieldNames.OUT_CITATION_COUNT: 0,
FieldNames.KEY_PHRASES: []
}
defaults.update(doc)
return Document(**defaults)
def dict_from_document(doc):
doc_dict = {}
for field in doc._fields:
doc_dict[field] = getattr(doc, field)
return doc_dict
def find_citations(source_file, doc):
predictions = app.config['API_MODEL'].predict(doc)
response = {
'predictions':
[
{
'document': p.document._asdict(),
'score': p.score,
'explanation': p.explanation,
'cited': str(p.cited) if p.cited else '',
'pdf': p.pdf,
'bibtex': display.document_to_bibtex(p.document)
} for p in predictions
]
}
response.update(doc._asdict())
response['source_file'] = source_file
# logging.debug("Citeomatic response %s", predictions)
return response
@app.route('/api/predictions/json', methods=['GET', 'POST'])
def predict_json():
doc = document_from_dict(request.get_json())
predictions = app.config['API_MODEL'].predict(doc)
return flask.jsonify(
{
'predictions':
[
{
'document': p.document._asdict(),
'score': p.score,
'explanation': p.explanation
} for p in predictions
]
}
)
@app.route('/api/pdfs', methods=['GET'])
def fetch_pdfs():
ids = request.args['ids'].split(',')
return flask.Response('Not yet implemented. Sorry!', mimetype='text/plain')
@app.route("/upload/json", methods=['POST'])
def upload_form():
logging.debug(request.get_json())
req_body = request.get_json()
title = req_body['title']
abstract = req_body['abstract']
authors = req_body['authors'].split(',')
json_body = {'title': title, 'abstract': abstract, 'authors': authors}
return flask.jsonify(find_citations('', document_from_dict(json_body)))
|
citeomatic-master
|
citeomatic/service.py
|
import numpy as np
def precision_recall_f1_at_ks(gold_y, predictions, scores=None, k_list=None):
def _mrr(ranked_list):
try:
idx = ranked_list.index(True)
return 1. / (idx + 1)
except ValueError:
return 0.0
if k_list is None:
k_list = [1, 5, 10]
if scores is not None:
sorted_predictions = [p for p, _ in
sorted(zip(predictions, scores), key=lambda x : x[1], reverse=True)]
else:
sorted_predictions = predictions
gold_set = set(gold_y)
sorted_correct = [y_pred in gold_set for y_pred in sorted_predictions]
results = {
'precision': [],
'recall': [],
'f1': [],
'mrr': _mrr(sorted_correct),
'k': k_list
}
num_gold = len(gold_y)
for k in k_list:
num_correct = np.sum(sorted_correct[:k])
p = num_correct / k
r = num_correct / num_gold
if num_correct == 0:
f = 0.0
else:
f = 2 * p * r / (p + r)
results['precision'].append(p)
results['recall'].append(r)
results['f1'].append(f)
return results
def average_results(results: list):
p_matrix = []
r_matrix = []
f_matrix = []
mrr_list = []
for r in results:
p_matrix.append(r['precision'])
r_matrix.append(r['recall'])
f_matrix.append(r['f1'])
mrr_list.append(r['mrr'])
return {
'precision': list(np.mean(p_matrix, axis=0)),
'recall': list(np.mean(r_matrix, axis=0)),
'f1': list(np.mean(f_matrix, axis=0)),
'mrr': np.mean(mrr_list),
}
def f1(p, r):
if p + r == 0.0:
return 0.0
else:
return 2 * p * r / (p + r)
|
citeomatic-master
|
citeomatic/eval_metrics.py
|
#!/usr/bin/env python
"""
Luigi pipeline for Citeomatic.
This includes tasks for fetching the dataset, building a vocabulary and
training features and training/evaluating the model.
"""
import logging
import os
import zipfile
from os import path
import luigi
from citeomatic import file_util, features, training, corpus
from citeomatic.features import Featurizer
from citeomatic.models.options import ModelOptions
from citeomatic.serialization import import_from
from luigi.util import inherits
logger = logging.getLogger('citeomatic.tasks')
import faulthandler
faulthandler.enable()
class SharedParameters(luigi.Task):
base_dir = luigi.Parameter(default=path.expanduser('~/citeomatic-data/'))
@property
def data_dir(self):
return self.base_dir + '/data'
@property
def model_dir(self):
return self.base_dir + '/model'
def log(self, msg, *args):
logger.info(msg, *args)
class DownloadCorpus(SharedParameters):
corpus_url = luigi.Parameter(
default=
'https://s3-us-west-2.amazonaws.com/ai2-s2-research-public/2017-02-21/papers-2017-02-21.zip'
)
def output(self):
json_name = self.corpus_url.split('/')[-1]
json_name = json_name.replace('.zip', '.json.gz')
return luigi.LocalTarget(path.join(self.data_dir, json_name))
def run(self):
self.output().makedirs()
output_dir = path.dirname(self.output().path)
output_filename = self.output().path
assert os.system(
'curl "%s" > "%s/papers.zip.tmp"' % (self.corpus_url, output_dir)
) == 0
with zipfile.ZipFile('%s/papers.zip.tmp' % output_dir) as zf:
for name in zf.namelist():
if name.endswith('.json.gz'):
zf.extract(name, output_dir)
break
#assert os.unlink('%s/papers.zip.tmp' % output_dir) == 0
class BuildCorpus(SharedParameters):
def requires(self):
return {'corpus': DownloadCorpus()}
def output(self):
corpus_suffix = self.requires()['corpus'].corpus_url.split('/')[-1]
corpus_name = corpus_suffix.replace('.zip', '.sqlite')
return luigi.LocalTarget(path.join(self.data_dir, corpus_name))
def run(self):
try:
corpus.build_corpus(self.output().path + '.tmp', self.input()['corpus'].path)
os.rename(self.output().path + '.tmp', self.output().path)
except:
os.system("rm -rf '%s'" % self.output().path + '.tmp')
raise
class CreateFeaturizer(SharedParameters):
training_fraction = luigi.FloatParameter(default=0.8)
max_features = luigi.IntParameter(default=100000000)
name = luigi.Parameter('default')
def requires(self):
return {'corpus': BuildCorpus()}
def output(self):
return luigi.LocalTarget(
path.join(self.model_dir, 'featurizer-%s.pickle' % self.name)
)
def run(self):
logger.info(
"Loading corpus from file %s " % self.input()['corpus'].path
)
c = corpus.Corpus.load(self.input()['corpus'].path, self.training_fraction)
logger.info("Fitting featurizer and making cache...")
featurizer = Featurizer(max_features=self.max_features)
featurizer.fit(c)
self.output().makedirs()
file_util.write_pickle(self.output().path, featurizer)
class TrainModel(SharedParameters):
model_config = luigi.Parameter()
experiment_name = luigi.Parameter(default='v0')
def requires(self):
return {'featurizer': CreateFeaturizer(), 'corpus': BuildCorpus()}
def output(self):
return luigi.LocalTarget(
path.join(self.model_dir, self.experiment_name, 'weights.h5')
)
def run(self):
featurizer = file_util.read_pickle(self.input()['featurizer'].path)
corpus = corpus.Corpus.load(self.input()['corpus'].path)
model_options = ModelOptions.load(self.model_config)
model_options.n_authors = featurizer.n_authors
model_options.n_features = featurizer.n_features
citeomatic_model, embedding_model = train_text_model(
corpus,
featurizer,
model_options,
embedding_model_for_ann=None,
debug=False,
tensorboard_dir=None
)
self.output().makedirs()
citeomatic_model.save_weights(
path.join(self.output().path, 'weights.h5'), overwrite=True
)
embedding_model.save_weights(
path.join(self.output().path, 'embedding.h5'), overwrite=True
)
file_util.write_json(
model_options.to_json(),
path.join(self.output().path, 'options.json')
)
class TestModel(SharedParameters):
def requires(self):
return {
'featurizer': CreateFeaturizer(),
'corpus': DownloadCorpus(),
'model': TrainModel(),
}
def run(self):
from citeomatic.scripts.evaluate_citeomatic_model import \
TestCiteomatic
test_app = TestCiteomatic(
model_dir=self.output_dir(),
test_samples=self.test_samples,
min_citation_count=10,
corpus_path=self._corpus_path('corpus.msgpack'),
filter_method='es',
)
test_app.main([])
if __name__ == '__main__':
from luigi.cmdline import luigi_run
luigi_run()
|
citeomatic-master
|
citeomatic/tasks.py
|
import argparse
import logging
import os
import pickle
import sys
import time
import typing
from ast import literal_eval
import numpy
import pandas
import traitlets
from traitlets.config import Configurable
from citeomatic import traits, file_util
from .file_util import read_json, read_pickle, write_file, write_json, write_pickle
# These properties will be ignored for argument parsing.
IGNORED_TRAITS = {'parent', 'config'}
def generic_parser(trait, v):
if v.startswith('@json:'):
try:
return read_json(v[6:])
except Exception as e:
raise argparse.ArgumentTypeError('Failed to parse JSON', e)
if v.startswith('@eval:'):
try:
return eval(v[6:])
except Exception as e:
raise argparse.ArgumentTypeError('Failed to evaluate argument', e)
if v.startswith('@pickle:'):
try:
return read_pickle(v[8:])
except Exception as e:
raise argparse.ArgumentTypeError(
'Failed to read pickle file %s' % v[8:], e
)
if v.startswith('@csv:'):
try:
return pandas.read_csv(v[5:])
except Exception as e:
raise argparse.ArgumentTypeError(
'Failed to read CSV file %s' % v[5:], e
)
if v.startswith('@call:'):
try:
import importlib
fqn = v[6:]
module_parts = fqn.split('.')
module_name = '.'.join(module_parts[:-1])
fn = module_parts[-1]
mod = importlib.import_module(module_name)
return getattr(mod, fn)()
except Exception as e:
raise argparse.ArgumentTypeError(
'Failed to invoke method: %s: %s' % (v, e)
)
if isinstance(trait, (traitlets.Unicode, traitlets.Enum)):
return v
if isinstance(trait, traitlets.Int):
return int(v)
if isinstance(trait, traitlets.Bool):
try:
iv = int(v)
return bool(iv)
except ValueError as _:
pass
if v.lower() == 'true':
return True
if v.lower() == 'false':
return False
raise argparse.ArgumentTypeError(
'"%s" could not be parsed as a boolean'
)
return literal_eval(v)
def parser_for_trait(trait: traitlets.TraitType) -> object:
def _trait_parser(v):
return generic_parser(trait, v)
_trait_parser.__name__ = trait.__class__.__name__
return _trait_parser
def setup_default_logging(level=logging.INFO):
pandas.options.display.width = 200
for h in list(logging.root.handlers):
logging.root.removeHandler(h)
logging.basicConfig(
format=
'%(levelname).1s%(asctime)-15s %(filename)s:%(lineno)d %(message)s',
level=level,
stream=sys.stderr
)
logging.getLogger('elasticsearch').setLevel(logging.WARN)
class Config(Configurable):
"""
Basic configuration management for research experiments.
A configuration has a base directory where models can be stored and a version tag. Helper
methods are supplied to access the version specific directory and read files. The
configuration itself, (along with any code versioning information available) is automatically
serialized to the output directory on first use. This makes it easy to resume an experiment
given only the output directory.
In practice, a new version should be created for each significant change to allow for
tracking of progress.
"""
version = traits.Unicode(default_value='', allow_none=True)
base_dir = traits.Unicode(default_value='.')
existing_config_pickle = traits.Unicode(allow_none=True)
log_level = traits.Unicode(required=False, default_value='info')
description = None
rest_args = []
_configured = False
def __init__(self, **kw):
Configurable.__init__(self, **kw)
self._run_id = time.strftime('%Y-%m-%d-%H-%M-%S')
self.reset(kw)
assert self.base_dir, 'Must specify a base directory to write to.'
self._configured = True
self._finished_init()
def _finished_init(self):
"""
Called after initial configuration is completed (attributes assigned, etc.)
:return:
"""
pass
def reset(self, kw):
for k, v in kw.items():
setattr(self, k, v)
def tmp_dir(self):
return os.path.join(self.output_dir(), 'tmp-%s' % self._run_id)
def dump_configuration(self, output_dir=None):
"""
Write the configuration object to the given output directory.
Always emits a pickle file, but if YAML is available, or the configuration
values are JSON serializable, those formats will be emitted as well.
:param output_dir: Directory to write configuration to.
:return:
"""
if output_dir is None:
output_dir = self.output_dir()
logging.info('Writing configuration to %s', output_dir)
write_pickle(
os.path.join(output_dir, 'config.pickle'), self._trait_values
)
try:
import yaml
write_file(
os.path.join(output_dir, 'config.yaml'),
yaml.dump(self._trait_values)
)
except ImportError as _:
logging.warning('Failed to import YAML')
try:
write_json(
os.path.join(output_dir, 'config.json'), self._trait_values
)
except:
pass
def output_dir_path(self):
if not self.version:
path = self.base_dir
else:
path = os.path.join(self.base_dir, self.version)
return path
def output_dir(self):
path = self.output_dir_path()
if not os.path.exists(path):
os.system('mkdir -p "%s"' % path)
return path
def output_file(self, name) -> str:
return os.path.join(self.output_dir(), name)
def output_fd(self, name, mode) -> 'file':
return open(self.output_file(name), mode)
def write_file(self, name, data):
write_file(self.output_file(name), data)
def write_json(self, name, data, indent=2, sort_keys=True):
if not name.endswith('.json'):
name += '.json'
write_json(
self.output_file(name), data, indent=indent, sort_keys=sort_keys
)
def write_pickle(self, name, data):
if not name.endswith('.pickle'):
name += '.pickle'
write_pickle(self.output_file(name), data)
def read_pickle(self, name):
if os.path.exists(self.output_file(name)):
return read_pickle(self.output_file(name))
elif os.path.exists(self.output_file(name + '.pickle')):
return read_pickle(self.output_file(name) + '.pickle')
else:
assert False, 'Failed to find pickle file: "%s"' % self.output_file(
name
)
def setup_logging(self):
"""
Initialize logging for this configuration.
Output will be written to stderr, and appended to the appropriate
log files in the output directory for this config.
:return:
"""
log_level = getattr(logging, self.log_level.upper())
setup_default_logging(log_level)
logger = logging.getLogger()
handler = logging.StreamHandler(
file_util.open(self.output_file('LOG'), 'a')
)
handler.setFormatter(
logging.Formatter(
'%(levelname).1s%(asctime)-15s %(filename)s:%(lineno)d %(message)s',
)
)
logger.addHandler(handler)
logging.info('Initialized configuration (%s)', self.__class__.__name__)
logging.info('Writing to: %s', self.output_dir())
def __repr__(self):
return self.__class__.__name__
# return json.dumps(
# {name: str(getattr(self, name)) for name in self.traits().keys()}
# )
@classmethod
def parse_command_line(
cls, argv, add_help, defaults=None, allow_unknown_args=True
):
if defaults is None:
defaults = {}
if argv is None:
argv = sys.argv
parser = argparse.ArgumentParser(
add_help=add_help,
description=cls.description,
formatter_class=argparse.MetavarTypeHelpFormatter
)
def _help_for(name, trait):
if name in defaults:
return str(defaults[name])[:50]
if trait.default_value == traitlets.Undefined:
return None
return str(trait.default_value)[:50]
for name, trait in sorted(cls.class_traits().items()):
if name in IGNORED_TRAITS:
continue
if isinstance(trait, traitlets.List):
nargs = '*'
type_parser = parser_for_trait(trait._trait)
else:
nargs = None
type_parser = parser_for_trait(trait)
parser.add_argument(
'--%s' % name,
type=type_parser,
nargs=nargs,
help=trait.help or '%s' % _help_for(name, trait),
required=trait.metadata.get('required', False)
)
# print('Argv: %s', argv)
if allow_unknown_args:
parsed, rest = parser.parse_known_args(args=argv)
else:
parsed = parser.parse_args(args=argv)
rest = []
parsed = {k: v for (k, v) in parsed.__dict__.items() if v is not None}
return parsed, rest
@classmethod
def parse_environment(cls):
env_values = {}
for name, trait in cls.class_traits().items():
if name.upper() in os.environ:
env_values[name] = generic_parser(
trait, os.environ[name.upper()]
)
return env_values
@classmethod
def initialize(cls, argv=None, add_help=True, **kw):
setup_default_logging()
parsed, rest = cls.parse_command_line(
argv, defaults=kw, add_help=add_help
)
env_parsed = cls.parse_environment()
if 'existing_config_pickle' in env_parsed:
logging.info(
'Restoring from existing configuration pickle: %s',
env_parsed['existing_config_pickle']
)
return cls.load_from_pickle(env_parsed['existing_config_pickle'])
kw = dict(kw)
kw.update(parsed)
kw.update(env_parsed)
config = cls(**kw)
config.setup_logging()
cls.rest_args = rest[1:]
Config.v = config
return config
@classmethod
def load_from_pickle(cls, filename):
"""Restore an existing configuration object from the given data directory."""
with open(filename, 'rb') as f:
config = pickle.loads(f.read())
# reset the base directory to be wherever our pickle file came from
config.base_dir = os.path.dirname(
os.path.dirname(os.path.abspath(filename))
)
logging.info('Config: %s', config.base_dir)
return config
class App(Config):
defaults = {}
def main(self, args):
pass
@classmethod
def run(cls, module_name):
if module_name == '__main__':
instance = cls.initialize(**cls.defaults)
logging.info('Running: %s', ' '.join(sys.argv))
instance.main(instance.rest_args)
JsonData = typing.Union[list, dict, str, int, float]
class JsonSerializable(traitlets.HasTraits):
def to_dict(self) -> dict:
"""Recursively convert objects to dicts to allow json serialization."""
return {
JsonSerializable.serialize(k): JsonSerializable.serialize(v)
for (k, v) in self._trait_values.items()
}
@staticmethod
def serialize(obj: typing.Union['JsonSerializable', JsonData]):
if isinstance(obj, JsonSerializable):
return obj.to_dict()
elif isinstance(obj, list):
return [JsonSerializable.serialize(v) for v in obj]
elif isinstance(obj, dict):
res_dict = dict()
for (key, value) in obj.items():
assert type(key) == str
res_dict[key] = JsonSerializable.serialize(value)
return res_dict
else:
return obj
@classmethod
def from_dict(cls, json_data: dict):
assert (type(json_data) == dict)
args = {}
for (k, v) in cls.class_traits().items():
args[k] = JsonSerializable.deserialize(v, json_data[k])
return cls(**args)
@staticmethod
def deserialize(target_trait: traitlets.TraitType, json_data: JsonData):
"""
N.B. Using this function on complex objects is not advised; prefer to use an explicit serialization scheme.
"""
# Note: calling importlib.reload on this file breaks issubclass (http://stackoverflow.com/a/11461574/6174778)
if isinstance(target_trait, traitlets.Instance
) and issubclass(target_trait.klass, JsonSerializable):
return target_trait.klass.from_dict(json_data)
elif isinstance(target_trait, traitlets.List):
assert isinstance(json_data, list)
return [
JsonSerializable.deserialize(target_trait._trait, element)
for element in json_data
]
elif isinstance(target_trait, traitlets.Dict):
# Assume all dictionary keys are strings
assert isinstance(json_data, dict)
res_dict = dict()
for (key, value) in json_data.items():
assert type(key) == str
res_dict[key] = JsonSerializable.deserialize(
target_trait._trait, value
)
return res_dict
else:
return json_data
def __repr__(self):
traits_list = [
'%s=%s' % (k, repr(v)) for (k, v) in self._trait_values.items()
]
return type(self).__name__ + '(' + ', '.join(traits_list) + ')'
|
citeomatic-master
|
citeomatic/config.py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: citeomatic/schema.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='citeomatic/schema.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\x17\x63iteomatic/schema.proto\"\xd5\x01\n\x08\x44ocument\x12\r\n\x05title\x18\x01 \x01(\t\x12\x10\n\x08\x61\x62stract\x18\x02 \x01(\t\x12\x0f\n\x07\x61uthors\x18\x03 \x03(\t\x12\x15\n\rout_citations\x18\x04 \x03(\t\x12\x19\n\x11in_citation_count\x18\x05 \x01(\x05\x12\x0c\n\x04year\x18\x06 \x01(\x05\x12\n\n\x02id\x18\x07 \x01(\t\x12\r\n\x05venue\x18\x08 \x01(\t\x12\x13\n\x0bkey_phrases\x18\t \x03(\t\x12\x11\n\ttitle_raw\x18\n \x01(\t\x12\x14\n\x0c\x61\x62stract_raw\x18\x0b \x01(\t\" \n\x03Hit\x12\r\n\x05\x64ocid\x18\x01 \x01(\t\x12\n\n\x02tf\x18\x02 \x01(\x05\"!\n\x0bPostingList\x12\x12\n\x04hits\x18\x01 \x03(\x0b\x32\x04.Hitb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_DOCUMENT = _descriptor.Descriptor(
name='Document',
full_name='Document',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='title', full_name='Document.title', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='abstract', full_name='Document.abstract', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='authors', full_name='Document.authors', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='out_citations', full_name='Document.out_citations', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='in_citation_count', full_name='Document.in_citation_count', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='year', full_name='Document.year', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='id', full_name='Document.id', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='venue', full_name='Document.venue', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key_phrases', full_name='Document.key_phrases', index=8,
number=9, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='title_raw', full_name='Document.title_raw', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='abstract_raw', full_name='Document.abstract_raw', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=28,
serialized_end=241,
)
_HIT = _descriptor.Descriptor(
name='Hit',
full_name='Hit',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='docid', full_name='Hit.docid', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tf', full_name='Hit.tf', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=243,
serialized_end=275,
)
_POSTINGLIST = _descriptor.Descriptor(
name='PostingList',
full_name='PostingList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='hits', full_name='PostingList.hits', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=277,
serialized_end=310,
)
_POSTINGLIST.fields_by_name['hits'].message_type = _HIT
DESCRIPTOR.message_types_by_name['Document'] = _DOCUMENT
DESCRIPTOR.message_types_by_name['Hit'] = _HIT
DESCRIPTOR.message_types_by_name['PostingList'] = _POSTINGLIST
Document = _reflection.GeneratedProtocolMessageType('Document', (_message.Message,), dict(
DESCRIPTOR = _DOCUMENT,
__module__ = 'citeomatic.schema_pb2'
# @@protoc_insertion_point(class_scope:Document)
))
_sym_db.RegisterMessage(Document)
Hit = _reflection.GeneratedProtocolMessageType('Hit', (_message.Message,), dict(
DESCRIPTOR = _HIT,
__module__ = 'citeomatic.schema_pb2'
# @@protoc_insertion_point(class_scope:Hit)
))
_sym_db.RegisterMessage(Hit)
PostingList = _reflection.GeneratedProtocolMessageType('PostingList', (_message.Message,), dict(
DESCRIPTOR = _POSTINGLIST,
__module__ = 'citeomatic.schema_pb2'
# @@protoc_insertion_point(class_scope:PostingList)
))
_sym_db.RegisterMessage(PostingList)
# @@protoc_insertion_point(module_scope)
|
citeomatic-master
|
citeomatic/schema_pb2.py
|
from typing import Iterator
import arrow
import numpy as np
import tqdm
from annoy import AnnoyIndex
from citeomatic import file_util
from citeomatic.utils import batch_apply, flatten
from citeomatic.schema_pb2 import Document
from citeomatic.common import load_pickle
import keras.backend as K
class ANN(object):
"""
Wraps an Annoy index and a docid mapping.
AnnoyIndex do not pickle correctly; they need to be save/loaded as well.
"""
def __init__(self, embeddings, annoy_index, docid_to_idx):
self.docid_to_idx = docid_to_idx
self.idx_to_docid = {v: k for (k, v) in docid_to_idx.items()}
self.embeddings = embeddings
if annoy_index is not None:
self.annoy_dims = annoy_index.f
self.annoy = annoy_index
else:
self.annoy = None
@classmethod
def build(cls, embedding_model, corpus, ann_trees=100):
docid_to_idx = {}
if corpus.corpus_type == 'pubmed' or corpus.corpus_type == 'dblp':
docs = [corpus[doc_id] for doc_id in corpus.train_ids + corpus.valid_ids]
else:
docs = corpus
doc_embeddings = np.zeros((len(docs), embedding_model.output_shape))
embedding_gen = embedding_model.embed_documents(docs, batch_size=1024)
for i, (doc, embedding) in enumerate(
zip(tqdm.tqdm(docs), embedding_gen)):
docid_to_idx[doc.id] = i
doc_embeddings[i] = embedding
annoy_index = AnnoyIndex(embedding_model.output_shape)
for i, embedding in enumerate(tqdm.tqdm(doc_embeddings)):
annoy_index.add_item(i, embedding)
annoy_index.build(ann_trees)
ann = cls(doc_embeddings, annoy_index, docid_to_idx)
return ann
def save(self, target):
if self.annoy is not None:
self.annoy.save('%s.annoy' % target)
file_util.write_pickle('%s.pickle' % target, self)
def __getstate__(self):
return self.docid_to_idx, self.idx_to_docid, self.embeddings, self.annoy_dims, None
def __setstate__(self, state):
self.docid_to_idx, self.idx_to_docid, self.embeddings, self.annoy_dims, self.annoy = state
@staticmethod
def load(source):
import annoy
ann = file_util.read_pickle('%s.pickle' % source)
ann.annoy = annoy.AnnoyIndex(ann.annoy_dims)
ann.annoy.load('%s.annoy' % source)
return ann
def get_nns_by_vector(self, vector, top_n, **kw):
similarity = np.dot(self.embeddings, -vector)
idx = np.argpartition(similarity, top_n)[:top_n]
idx = idx[np.argsort(similarity[idx])]
return [self.idx_to_docid[i] for i in idx]
def get_nns_by_id(self, doc_id, top_n, **kw):
idx = self.annoy.get_nns_by_item(
self.docid_to_idx[doc_id], top_n, search_k=-1
)
return [self.idx_to_docid[i] for i in idx]
def get_similarities(self, vector, doc_ids):
indexes = [self.docid_to_idx[doc_id] for doc_id in doc_ids]
return np.dot(self.embeddings[indexes], vector)
class EmbeddingModel(object):
"""
Wrap a Siamese citeomatic model and expose an interface
to extract the embeddings for individual documents.
"""
def __init__(self, featurizer, model):
import keras.backend as K
self._model = model
self._featurizer = featurizer
self.output_shape = K.int_shape(self._model.outputs[0])[-1]
def embed_documents(self,
generator: Iterator[Document], batch_size=256) -> Iterator[np.ndarray]:
"""
Compute embeddings of the provided documents.
"""
def _run_embedding(batch) -> np.array:
features = self._featurizer.transform_list(batch)
doc_embedding = self._model.predict(
{
'query-title-txt': features['title'],
'query-abstract-txt': features['abstract'],
'doc-txt': features['abstract'],
}
)
return doc_embedding
return batch_apply(generator, _run_embedding, batch_size)
def embed(self, doc):
return np.asarray(list(self.embed_documents([doc])))[0]
|
citeomatic-master
|
citeomatic/neighbors.py
|
import json
STRING_ENCODING = 'utf-8'
class Cache(object):
def lookup(self, namespace: str, key: str):
raise NotImplementedError("Please use subclass")
def put(self, namespace: str, key: str, json_str: str):
raise NotImplementedError("Please use subclass")
class LocalCache(Cache):
def __init__(self):
self._dict = {}
def create_hash_key(self, namespace: str, key: str):
return "%s/%s" % (namespace, key)
def lookup(self, namespace: str, key: str):
hash_key = self.create_hash_key(namespace, key)
return self._dict.get(hash_key, None)
def put(self, namespace: str, key: str, json_str: str):
hash_key = self.create_hash_key(namespace, key)
self._dict[hash_key] = json.loads(json_str)
|
citeomatic-master
|
citeomatic/cache.py
|
ROOT = '/net/nfs.corp/s2-research/citeomatic/data/'
DEFAULT_BASE_DIR = 'models/'
|
citeomatic-master
|
citeomatic/__init__.py
|
import errno
import io
import json
import logging
import os
import pickle
import tarfile
import typing
import tempfile
import hashlib
import subprocess
from os.path import abspath, dirname, join
from gzip import GzipFile
import arrow
ROOT = abspath(dirname(dirname(dirname(__file__))))
class S3FileNotFoundError(FileNotFoundError):
pass
def _expand(filename):
return os.path.expanduser(filename)
def _is_okay_cache_dir(name):
if os.path.exists(name) or os.system('mkdir -p %s' % name) == 0:
return name
def _cache_dir():
# Try using a shared data drive if it's available
dirs = [
'/data/cache/s2-research',
'/tmp/s2-research-cache/',
'/tmp/',
]
for name in dirs:
if _is_okay_cache_dir(name):
logging.info('Using %s for caching', name)
return name
assert False, 'Failed to find suitable cache directory'
def last_modified(filename):
if filename.startswith('s3://'):
return S3File.last_modified(filename)
else:
if os.path.exists(filename):
return arrow.get(os.path.getmtime(filename))
else:
return None
class StreamingS3File(object):
def __init__(self, name, mode, encoding):
assert 'w' not in mode and 'a' not in mode, 'Streaming writes not supported.'
key = _s3_key(name)
if key is None:
raise FileNotFoundError(name)
streaming_file = key.get()['Body']
def _readinto(buf):
bytes_read = streaming_file.read(len(buf))
buf[:len(bytes_read)] = bytes_read
return len(bytes_read)
streaming_file.readinto = _readinto
streaming_file.readable = lambda: True
streaming_file.writable = lambda: False
streaming_file.seekable = lambda: False
streaming_file.closeable = lambda: False
streaming_file.closed = False
streaming_file.flush = lambda: 0
self._file = io.BufferedReader(streaming_file, buffer_size=512000)
if encoding is not None or 't' in mode:
# The S3 file interface from boto doesn't conform to the standard python file interface.
# Add dummy methods to make the text wrapper happy.
self._file = io.TextIOWrapper(self._file, encoding=encoding)
def readable(self):
return True
def writeable(self):
return False
def seekable(self):
return False
def closeable(self):
return False
@property
def closed(self):
return False
def flush(self):
return 0
def read(self, *args):
return self._file.read(*args)
def readline(self):
return self._file.readline()
def close(self):
return self._file.close()
def seekable(self):
return False
def __enter__(self):
return self._file
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
def cache_file(name):
if not name.startswith('s3://'):
return name
s3_last_modified = last_modified(name)
cleaned_name = name[5:].replace('/', '_')
target_filename = os.path.join(_cache_dir(), cleaned_name)
if os.path.exists(target_filename):
if s3_last_modified is None or last_modified(
target_filename
) >= s3_last_modified:
return target_filename
logging.info('Cache file for %s does not exist, copying.', name)
parse = _parse_s3_location(name)
retcode = subprocess.call(
'aws s3api get-object --bucket "%s" --key "%s" "%s.tmp.%d" --request-payer=requester'
% (parse['bucket'], parse['key'], target_filename, os.getpid()),
stdout=subprocess.DEVNULL,
shell=True
)
if retcode != 0:
raise FileNotFoundError('Failed to copy %s' % name)
assert os.system(
'mv "%s.tmp.%d" "%s"' %
(target_filename, os.getpid(), target_filename)
) == 0
assert os.system('chmod 777 "%s"' % (target_filename)) == 0
return target_filename
def s3_location_to_object(path):
s3 = boto3.resource('s3')
parse = _parse_s3_location(path)
bucket_name = parse['bucket']
key = parse['key']
return s3.Object(bucket_name, key)
def _parse_s3_location(path):
logging.debug('Parsing path %s' % path)
if not path.startswith('s3://'):
raise ValueError('s3 location must start with s3://')
path = path[5:]
parts = path.split('/', 1)
if len(parts) == 1:
bucket = parts[0]
key = None
else:
bucket, key = parts
return {'bucket': bucket, 'key': key}
# Yield S3 objects with a given prefix.
def iterate_s3_objects(path, max_files=None):
import boto3
# Check if path exists on S3
if path.startswith('s3://'):
parsed_location = _parse_s3_location(path)
bucket = parsed_location['bucket']
folder_key = parsed_location['key']
s3 = boto3.resource('s3')
client = boto3.client('s3')
s3_bucket = s3.Bucket(bucket)
if max_files:
s3_obj_iterator = \
s3_bucket.objects.filter(Prefix=folder_key, RequestPayer='requester').limit(max_files)
else:
s3_obj_iterator = s3_bucket.objects.filter(
Prefix=folder_key, RequestPayer='requester'
).all()
yield from s3_obj_iterator
# Yield s3 filenames with a given prefix.
def iterate_s3_files(path_prefix, max_files=None):
# return the full name of each file.
for s3_object in iterate_s3_objects(path_prefix, max_files):
yield 's3://{}/{}'.format(s3_object.bucket_name, s3_object.key)
# Deprecated. For backward compatibility.
def iterate_s3(path):
yield from iterate_s3_objects(path)
def iterate_s3_files(path_prefix, max_files=None):
"""Yield s3 filenames with a given prefix."""
# return the full name of each file.
for s3_object in iterate_s3_objects(path_prefix, max_files):
yield 's3://{}/{}'.format(s3_object.bucket_name, s3_object.key)
def iterate_files(path_prefix: str) -> typing.Iterable[str]:
"""Yield filenames with a given prefix."""
if path_prefix.startswith('s3://'):
yield from iterate_s3_files(path_prefix)
else:
for (root, directories, filenames) in os.walk(path_prefix):
for filename in filenames:
yield os.path.join(root, filename)
class S3File(object):
def __init__(self, name, mode, encoding):
self.name = name
self.mode = mode
self.encoding = encoding
if 'r' in mode:
self._local_name = self._cache()
self._local_file = io.open(self._local_name, mode)
else:
prefix = self.name.split('//')[1].replace('/', '_')
self._local_name = join(_cache_dir(), '.tmp_' + prefix)
self._local_file = io.open(
self._local_name, mode=mode, encoding=encoding
)
@staticmethod
def last_modified(filename):
key = _s3_key(filename)
if key is None:
return None
return arrow.get(key.last_modified)
def flush(self):
logging.info('Syncing "%s" to S3' % self.name)
self._local_file.flush()
assert os.system(
'aws s3 cp "%s" "%s"' % (self._local_name, self.name)
) == 0
def write(self, *args):
return self._local_file.write(*args)
def read(self, *args):
return self._local_file.read(*args)
def read_lines(self, *args):
return self._local_file.read(*args)
def _cache(self):
return cache_file(self.name)
def seekable(self):
return True
def close(self):
if 'w' in self.mode or 'a' in self.mode:
self.flush()
os.unlink(self._local_name)
else:
self._local_file.close()
def __enter__(self):
return self._local_file
def __exit__(self, type, value, traceback):
self.close()
def _gzip_file(fileobj, mode, encoding):
def _fix_fileobj(gzip_file):
"""
Terrible hack to ensure that GzipFile actually calls close on the fileobj passed into it.
"""
gzip_file.myfileobj = gzip_file.fileobj
return gzip_file
if 't' in mode or encoding is not None:
mode = mode.replace('t', '')
f = _fix_fileobj(GzipFile(fileobj=fileobj, mode=mode))
return io.TextIOWrapper(f, encoding=encoding)
else:
f = _fix_fileobj(GzipFile(fileobj=fileobj, mode=mode))
if 'r' in mode:
return io.BufferedReader(f)
else:
return io.BufferedWriter(f)
def _bzip_file(fileobj, mode, encoding):
import bz2
if 't' in mode:
bz2_file = bz2.BZ2File(fileobj, mode=mode.replace('t', 'b'))
bz2_file._closefp = True
return io.TextIOWrapper(bz2_file, encoding)
else:
bz2_file = bz2.BZ2File(fileobj, mode=mode)
bz2_file._closefp = True
return bz2_file
def data_file(name):
"""Read a data file from the source repository."""
return os.path.join(ROOT, 'data', name)
def test_file(caller_filename: str, name: str) -> str:
curdir = abspath(caller_filename)
while curdir != '/':
fname = os.path.join(curdir, 'testdata', name)
if os.path.exists(fname):
return fname
curdir = os.path.dirname(curdir)
raise FileNotFoundError('Failed to find testdata file: %s' % name)
def slurp(filename, mode='r', encoding=None):
"""Read all content from `filename`"""
with open(
_expand(filename), mode=mode, encoding=encoding, streaming=True
) as f:
return f.read()
def read_json(filename):
"""Read JSON from `filename`."""
with open(_expand(filename), 'rt') as f:
return json.load(f)
def write_json(filename, obj, indent=None, sort_keys=None):
"""Write JSON to `filename`"""
with open(_expand(filename), 'w') as f:
json.dump(obj, f, indent=indent, sort_keys=sort_keys)
def write_json_atomic(filename, obj, indent=None, sort_keys=None):
"""Write JSON to `filename` such that `filename` never exists in a partially written state."""
filename = _expand(filename)
if filename.startswith('s3://'):
write_json(
filename, obj, indent, sort_keys
) # s3 operations are already atomic
with tempfile.NamedTemporaryFile(
'w', dir=os.path.dirname(filename), delete=False
) as f:
json.dump(obj, f, indent=indent, sort_keys=sort_keys)
tempname = f.name
os.rename(tempname, filename)
def read_pickle(filename, streaming=False):
"""Read pickled data from `name`."""
with open(_expand(filename), 'rb', streaming=streaming) as f:
return pickle.load(f)
def write_pickle(filename, obj):
with open(_expand(filename), 'wb') as f:
pickle.dump(obj, f, -1)
def write_file(filename, value: typing.Union[bytes, str], mode='w'):
with open(_expand(filename), mode) as f:
f.write(value)
def write_file_if_not_exists(
filename, value: typing.Union[bytes, str], mode='w'
):
if os.path.exists(_expand(filename)):
return
write_file(filename, value, mode)
def write_file_atomic(
filename: str, value: typing.Union[bytes, str], mode='w'
) -> None:
if filename.startswith('s3://'):
write_file(filename, value, mode)
else:
with tempfile.NamedTemporaryFile(
'w', dir=os.path.dirname(filename), delete=False
) as f:
f.write(value)
tempname = f.name
os.rename(tempname, filename)
def read_lines(filename, comment=None, streaming=False):
"""
Read all non-blank lines from `filename`.
Skip any lines that begin the comment character.
:param filename: Filename to read from.
:param comment: If defined, ignore lines starting with this text.
:return:
"""
with open(_expand(filename), 'rt', streaming=streaming) as f:
for l in f:
if comment and not l.startswith(comment):
continue
yield l.strip()
def read_json_lines(filename, streaming=False):
for line in read_lines(filename, streaming=streaming):
yield json.loads(line)
def exists(filename):
return last_modified(filename) is not None
def open(filename, mode='rb', encoding=None, **kw):
"""
Open `filename` for reading. If filename is compressed with a known format,
it will be transparently decompressed.
Optional keyword args:
`streaming`: if true, remote files will be streamed directly; no local cache
will be generated.
`no_zip`: do not try to automatically decompress the input file
"""
if filename.endswith('.gz') and 'no_decompress' not in kw:
if 'r' in mode:
target_mode = 'rb'
else:
target_mode = 'wb'
target = open(
filename,
no_decompress=True,
mode=target_mode,
encoding=None,
**kw
)
return _gzip_file(target, mode, encoding)
if filename.endswith('.bz2') and 'no_decompress' not in kw:
if 'r' in mode:
target_mode = 'rb'
else:
target_mode = 'wb'
target = open(
filename,
no_decompress=True,
mode=target_mode,
encoding=None,
**kw
)
return _bzip_file(target, mode, encoding)
if filename.startswith('s3://'):
if kw.get('streaming', False):
return StreamingS3File(filename, mode, encoding)
else:
return S3File(filename, mode, encoding)
import io
return io.open(filename, mode, encoding=encoding)
def safe_makedirs(dir_path: str) -> None:
"""Create a directory if it doesn't already exist, avoiding race conditions if called from multiple processes."""
dir_path = _expand(dir_path)
try:
os.makedirs(dir_path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(dir_path):
pass
else:
raise
def copy(src: str, dst: str) -> None:
"""Copy src to dst."""
src = _expand(src)
dst = _expand(dst)
with open(src, 'rb') as src_f, open(dst, 'wb') as dst_f:
while True:
chunk = src_f.read(4096)
if chunk is None or len(chunk) == 0:
break
dst_f.write(chunk)
def extract_tarfile_from_bytes(b: bytes, dst: str, mode='r') -> None:
seekable_f = io.BytesIO(b)
safe_makedirs(os.path.dirname(dst))
with tarfile.open(fileobj=seekable_f, mode=mode) as t:
t.extractall(path=dst)
def extract_tarfile(src: str, dst: str, streaming=True) -> None:
"""Extract a tarfile at 'src' to 'dst'."""
src = _expand(src)
dst = _expand(dst)
with open(src, mode='rb', streaming=streaming) as f:
b = f.read()
extract_tarfile_from_bytes(b, dst)
def compute_sha1(filename: str, buf_size=int(1e6)) -> str:
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
data = f.read(buf_size)
if not data:
break
sha1.update(data)
return sha1.hexdigest()
class SetJsonEncoder(json.JSONEncoder):
"""Simple JSONEncoder that encodes sets as lists."""
def default(self, obj):
if isinstance(obj, set):
return list(obj)
return json.JSONEncoder.default(self, obj)
class JsonFile(object):
'''
A flat text file where each line is one json object
# to read though a file line by line
with JsonFile('file.json', 'r') as fin:
for line in fin:
# line is the deserialized json object
pass
# to write a file object by object
with JsonFile('file.json', 'w') as fout:
fout.write({'key1': 5, 'key2': 'token'})
fout.write({'key1': 0, 'key2': 'the'})
'''
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
def __iter__(self):
for line in self._file:
yield json.loads(line)
def write(self, item):
item_as_json = json.dumps(item, ensure_ascii=False)
encoded = '{0}\n'.format(item_as_json)
self._file.write(encoded)
def __enter__(self):
self._file = open(*self._args, **self._kwargs)
self._file.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.__exit__(exc_type, exc_val, exc_tb)
class GzipJsonFile(JsonFile):
'''
A gzip compressed JsonFile. Usage is the same as JsonFile
'''
def __enter__(self):
self._file = GzipFile(*self._args, **self._kwargs)
self._file.__enter__()
return self
def __iter__(self):
for line in self._file:
yield json.loads(line.decode('utf-8', 'ignore'))
def write(self, item):
item_as_json = json.dumps(item, ensure_ascii=False)
encoded = '{0}\n'.format(item_as_json).encode('utf-8', 'ignore')
self._file.write(encoded)
|
citeomatic-master
|
citeomatic/file_util.py
|
import collections
import logging
import mmh3
import re
import resource
import numpy as np
import pandas as pd
import six
import tqdm
from keras.preprocessing.sequence import pad_sequences
from sklearn.feature_extraction.text import CountVectorizer
from citeomatic.candidate_selectors import CandidateSelector
from citeomatic.utils import flatten
from citeomatic.common import DatasetPaths
from citeomatic.models.options import ModelOptions
dp = DatasetPaths()
CLEAN_TEXT_RE = re.compile('[^ a-z]')
# filters for authors and docs
MAX_AUTHORS_PER_DOCUMENT = 8
MAX_KEYPHRASES_PER_DOCUMENT = 20
MIN_TRUE_CITATIONS = {
'pubmed': 2,
'dblp': 1,
'oc': 2
}
MAX_TRUE_CITATIONS = 100
# Adjustments to how we boost heavily cited documents.
CITATION_SLOPE = 0.01
MAX_CITATION_BOOST = 0.02
# Parameters for soft-margin data generation.
TRUE_CITATION_OFFSET = 0.3
HARD_NEGATIVE_OFFSET = 0.2
NN_NEGATIVE_OFFSET = 0.1
EASY_NEGATIVE_OFFSET = 0.0
# ANN jaccard percentile cutoff
ANN_JACCARD_PERCENTILE = 0.05
def label_for_doc(d, offset):
sigmoid = 1 / (1 + np.exp(-d.in_citation_count * CITATION_SLOPE))
return offset + (sigmoid * MAX_CITATION_BOOST)
def jaccard(featurizer, x, y):
x_title, x_abstract = featurizer._cleaned_document_words(x)
y_title, y_abstract = featurizer._cleaned_document_words(y)
a = set(x_title + x_abstract)
b = set(y_title + y_abstract)
c = a.intersection(b)
return float(len(c)) / (len(a) + len(b) - len(c))
def _clean(text):
return CLEAN_TEXT_RE.sub(' ', text.lower())
class Featurizer(object):
'''
This class uses the corpus to turn text into features expected by the neural network.
Parameters
----------
max_title_len : int, default=32
Maximum number of tokens allowed in the paper title.
max_abstract_len : int, default=256
Maximum number of tokens allowed in the abstract title.
'''
STOPWORDS = {
'abstract', 'about', 'an', 'and', 'are', 'as', 'at', 'be', 'by', 'for',
'from', 'how', 'in', 'is', 'it', 'of', 'on', 'or', 'that', 'the',
'this', 'to', 'was', 'what', 'when', 'where', 'who', 'will', 'with',
'the', 'we', 'our', 'which'
}
def __init__(
self,
max_features=200000,
max_title_len=32,
max_abstract_len=256,
use_pretrained=False,
min_author_papers=1,
min_venue_papers=1,
min_keyphrase_papers=5
):
self.max_features = max_features
self.max_title_len = max_title_len
self.max_abstract_len = max_abstract_len
self.use_pretrained = use_pretrained
self.min_author_papers = min_author_papers
self.min_venue_papers = min_venue_papers
self.min_keyphrase_papers = min_keyphrase_papers
self.author_to_index = {}
self.venue_to_index = {}
self.keyphrase_to_index = {}
self.word_indexer = None
@property
def n_authors(self):
return len(self.author_to_index) + 1
@property
def n_venues(self):
return len(self.venue_to_index) + 1
@property
def n_keyphrases(self):
if not hasattr(self, 'keyphrase_to_index'):
self.keyphrase_to_index = {}
return len(self.keyphrase_to_index) + 1
def fit(self, corpus, max_df_frac=0.90, min_df_frac=0.000025, is_featurizer_for_test=False):
logging.info(
'Usage at beginning of featurizer fit: %s',
resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1e6
)
if is_featurizer_for_test:
paper_ids_for_training = corpus.train_ids + corpus.valid_ids
else:
paper_ids_for_training = corpus.train_ids
# Fitting authors and venues
logging.info('Fitting authors and venues')
author_counts = collections.Counter()
venue_counts = collections.Counter()
keyphrase_counts = collections.Counter()
for doc_id in tqdm.tqdm(paper_ids_for_training):
doc = corpus[doc_id]
author_counts.update(doc.authors)
venue_counts.update([doc.venue])
keyphrase_counts.update(doc.key_phrases)
c = 1
for author, count in author_counts.items():
if count >= self.min_author_papers:
self.author_to_index[author] = c
c += 1
c = 1
for venue, count in venue_counts.items():
if count >= self.min_venue_papers:
self.venue_to_index[venue] = c
c += 1
c = 1
for keyphrase, count in keyphrase_counts.items():
if count >= self.min_keyphrase_papers:
self.keyphrase_to_index[keyphrase] = c
c += 1
# Step 1: filter out some words and make a vocab
if self.use_pretrained:
vocab_file = dp.vocab_for_corpus('shared')
with open(vocab_file, 'r') as f:
vocab = f.read().split()
else:
logging.info('Cleaning text.')
all_docs_text = [
' '.join((_clean(corpus[doc_id].title), _clean(corpus[doc_id].abstract)))
for doc_id in tqdm.tqdm(paper_ids_for_training)
]
logging.info('Fitting vectorizer...')
if self.max_features is not None:
count_vectorizer = CountVectorizer(
max_df=max_df_frac,
max_features=self.max_features,
stop_words=self.STOPWORDS
)
else:
count_vectorizer = CountVectorizer(
max_df=max_df_frac,
min_df=min_df_frac,
stop_words=self.STOPWORDS
)
count_vectorizer.fit(tqdm.tqdm(all_docs_text))
vocab = count_vectorizer.vocabulary_
logging.info(
'Usage after word count: %s',
resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1e6
)
# Step 4: Initialize mapper from word to index
self.word_indexer = FeatureIndexer(
vocab=vocab,
use_pretrained=self.use_pretrained
)
self.n_features = 1 + len(self.word_indexer.word_to_index)
logging.info(
'Usage after word_indexer: %s',
resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1e6
)
logging.info(
'Usage at end of fit: %s',
resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1e6
)
logging.info('Total words %d ' % len(self.word_indexer.word_to_index))
logging.info('Total authors %d ' % self.n_authors)
logging.info('Total venues %d ' % self.n_venues)
logging.info('Total keyphrases %d ' % self.n_keyphrases)
def __setstate__(self, state):
for k, v in state.items():
try:
setattr(self, k, v)
except AttributeError as e:
logging.warning('Ignoring renamed attribute: %s', k)
continue
def _citation_features(self, documents):
return np.log([max(doc.in_citation_count - 1, 0) + 1 for doc in documents])
def _intersection_features(self, query_features, candidate_features):
feats_intersection_lst = [
np.intersect1d(query, candidate)
for (query, candidate) in zip(query_features, candidate_features)
]
feats_intersection = np.zeros_like(query_features)
for i, intersection in enumerate(feats_intersection_lst):
feats_intersection[i, :len(intersection)] = intersection
return feats_intersection
def _text_features(self, text, max_len):
return np.asarray(
pad_sequences(self.word_indexer.transform([text]), max_len)[0],
dtype=np.int32
)
def _cleaned_document_words(self, document):
title = _clean(document.title).split(' ')
abstract = _clean(document.abstract).split(' ')
return title, abstract
def transform_query_candidate(self, query_docs, candidate_docs, confidence_scores=None):
"""
Parameters
----------
query_docs - a list of query documents
candidate_docs - a list of candidate documents corresponding to each query doc in query_docs
Returns
-------
[feats1, feats2] - feats1 and feats2 are transformed versions of the text in the dicts
of documents.
"""
query_features = self.transform_list(query_docs)
candidate_features = self.transform_list(candidate_docs)
candidate_citation_features = self._citation_features(candidate_docs)
query_candidate_title_intersection = self._intersection_features(
query_features=query_features['title'],
candidate_features=candidate_features['title']
)
query_candidate_abstract_intersection = self._intersection_features(
query_features=query_features['abstract'],
candidate_features=candidate_features['abstract']
)
features = {
'query-authors-txt':
query_features['authors'],
'query-venue-txt':
query_features['venue'],
'query-title-txt':
query_features['title'],
'query-abstract-txt':
query_features['abstract'],
'query-keyphrases-txt':
query_features['keyphrases'],
'candidate-authors-txt':
candidate_features['authors'],
'candidate-venue-txt':
candidate_features['venue'],
'candidate-title-txt':
candidate_features['title'],
'candidate-abstract-txt':
candidate_features['abstract'],
'candidate-keyphrases-txt':
candidate_features['keyphrases'],
'query-candidate-title-intersection':
query_candidate_title_intersection,
'query-candidate-abstract-intersection':
query_candidate_abstract_intersection,
'candidate-citation-count':
candidate_citation_features
}
if confidence_scores is not None:
features['candidate-confidence'] = np.asarray(confidence_scores)
return features
def transform_query_and_results(self, query, list_of_documents, similarities):
"""
Parameters
----------
query - a single query document
list_of_documents - a list of possible candidate documents
Returns
-------
[feats1, feats2] - feats1 and feats2 are transformed versions of the text in the
in the tuples of documents.
"""
query_docs = []
for i in range(len(list_of_documents)):
query_docs.append(query)
return self.transform_query_candidate(query_docs, list_of_documents, similarities)
def transform_doc(self, document):
"""
Converts a document into its title and abstract word sequences
:param document: Input document of type Document
:param confidence: Confidence score as assigned by a candidate selector
:return: a tuple containing the title and abstract's transformed word sequences. Word
sequences are np arrays
"""
title, abstract = self._cleaned_document_words(document)
features = {
'title':
self._text_features(title, self.max_title_len),
'abstract':
self._text_features(abstract, self.max_abstract_len),
'authors':
[
self.author_to_index[author] for author in document.authors
if author in self.author_to_index
],
'venue':
[self.venue_to_index.get(document.venue, 0)],
'keyphrases':
[
self.keyphrase_to_index[keyphrase]
for keyphrase in document.key_phrases
if keyphrase in self.keyphrase_to_index
]
}
return features
def transform_list(self, list_of_documents):
docs = []
for document in list_of_documents:
docs.append(self.transform_doc(document))
# pull out individual columns and convert to arrays
features = {
'title':
np.asarray([doc['title'] for doc in docs]),
'abstract':
np.asarray([doc['abstract'] for doc in docs]),
'authors':
np.asarray(pad_sequences(
[doc['authors'] for doc in docs], MAX_AUTHORS_PER_DOCUMENT
)),
'venue':
np.asarray([doc['venue'] for doc in docs]),
'keyphrases':
np.asarray(pad_sequences(
[doc['keyphrases'] for doc in docs], MAX_KEYPHRASES_PER_DOCUMENT
)),
}
return features
class CachingFeaturizer(Featurizer):
def __init__(self, featurizer):
for k, v in featurizer.__dict__.items():
setattr(self, k, v)
self._cache = {}
def transform_doc(self, document, confidence=None):
if document.id not in self._cache:
features = Featurizer.transform_doc(self, document)
self._cache[document.id] = features
return self._cache[document.id]
class FeatureIndexer(object):
"""
A class to transform raw tokens into formatted indices.
Parameters
----------
vocab : dict/set/list
The set of words to index.
offset : int, default=1
Index offset. Default is 1 because Keras reserves index 0 for the mask.
"""
def __init__(self, vocab, offset=1, use_pretrained=False):
self.word_to_index = {}
self.offset = offset
self.use_pretrained = use_pretrained
for i, word in enumerate(vocab):
self.word_to_index[word] = i + offset
# if not use_pretrained: # OOV hashing stuff only when not using pretrained. Pretrained
# # vocab file already has
# num_words = len(vocab)
# for i in range(1, ModelOptions.num_oov_buckets + 1):
# word = ModelOptions.oov_term_prefix + str(i)
# self.word_to_index[word] = num_words + i
def transform(self, raw_X):
"""
Transforms raw strings into hashed indices.
Input should be e.g. raw_X = [['the', 'first', 'string'], ['the', 'second']],
"""
indexed_X = []
for raw_x in raw_X:
indexed_x = [self.word_to_id(word) for word in raw_x]
indexed_x = [i for i in indexed_x if i is not None]
indexed_X.append(indexed_x)
return indexed_X
def word_to_id(self, word):
"""
Takes a word and returns the index
"""
if word in self.word_to_index:
return self.word_to_index[word]
elif self.use_pretrained:
hash_id = (mmh3.hash(word) % ModelOptions.num_oov_buckets) + 1
word = ModelOptions.oov_term_prefix + str(hash_id)
return self.word_to_index[word]
else:
return None
class DataGenerator(object):
"""
Class to yield batches of data to train Keras models.
Parameters
----------
corpus : Corpus
The corpus with all of the documents.
featurizer : Featurizer
Featurizer to turn documents into indices.
"""
KEYS = ['hard_negatives', 'nn', 'easy']
def __init__(self,
corpus,
featurizer,
ann=None,
candidate_selector: CandidateSelector = None,
margin_multiplier=1,
use_variable_margin=True):
self.corpus = corpus
self.featurizer = CachingFeaturizer(featurizer)
self.ann = ann
self.candidate_selector = candidate_selector
margins_offset_dict = {
'true': TRUE_CITATION_OFFSET * margin_multiplier,
'hard': HARD_NEGATIVE_OFFSET * margin_multiplier,
'nn': NN_NEGATIVE_OFFSET * margin_multiplier,
'easy': EASY_NEGATIVE_OFFSET * margin_multiplier
}
if not use_variable_margin:
margins_offset_dict['hard'] = margins_offset_dict['nn']
margins_offset_dict['easy'] = margins_offset_dict['nn']
self.margins_offset_dict = margins_offset_dict
def _listwise_examples(
self,
paper_ids,
candidate_ids=None,
neg_to_pos_ratio=6
):
# the id pool should only have IDs that are in the corpus
paper_ids_list = np.array(list(self.corpus.filter(paper_ids)))
# candidate_ids is decides where candidates come from
if candidate_ids is None:
candidate_ids = self.corpus.train_ids
else:
candidate_ids = self.corpus.filter(candidate_ids)
# these are reused
candidate_ids_set = set(candidate_ids)
candidate_ids_list = np.array(list(candidate_ids_set))
while True:
candidate_ids_list = np.random.permutation(candidate_ids_list)
paper_ids_list = np.random.permutation(paper_ids_list)
for doc_id in paper_ids_list:
examples = []
labels = []
query = self.corpus[doc_id]
true_citations = candidate_ids_set.intersection(query.out_citations)
if len(true_citations) < MIN_TRUE_CITATIONS[self.corpus.corpus_type]:
continue
if len(true_citations) > MAX_TRUE_CITATIONS:
true_citations = np.random.choice(
list(true_citations), MAX_TRUE_CITATIONS, replace=False
)
n_positive = len(true_citations)
n_per_type = {
'hard_negatives': int(np.ceil(n_positive * neg_to_pos_ratio / 3.0)),
'easy': int(np.ceil(n_positive * neg_to_pos_ratio / 3.0)),
'nn': int(np.ceil(n_positive * neg_to_pos_ratio / 3.0))
}
if self.ann is not None:
pos_jaccard_sims = [
jaccard(self.featurizer, query, self.corpus[i])
for i in true_citations
]
ann_jaccard_cutoff = np.percentile(pos_jaccard_sims, ANN_JACCARD_PERCENTILE)
else:
ann_jaccard_cutoff = None
hard_negatives, nn_negatives, easy_negatives = self.get_negatives(
candidate_ids_set, candidate_ids_list, n_per_type, query, ann_jaccard_cutoff
)
for c in true_citations:
doc = self.corpus[c]
labels.append(label_for_doc(doc, self.margins_offset_dict['true']))
examples.append(doc)
for doc in hard_negatives:
labels.append(label_for_doc(doc, self.margins_offset_dict['hard']))
examples.append(doc)
for doc in nn_negatives:
labels.append(label_for_doc(doc, self.margins_offset_dict['nn']))
examples.append(doc)
for doc in easy_negatives:
labels.append(label_for_doc(doc, self.margins_offset_dict['easy']))
examples.append(doc)
labels = np.asarray(labels)
sorted_idx = np.argsort(labels)[::-1]
labels = labels[sorted_idx]
examples = [examples[i] for i in sorted_idx]
yield query, examples, labels
def triplet_generator(
self,
paper_ids,
candidate_ids=None,
batch_size=1024,
neg_to_pos_ratio=6
):
queries = []
batch_ex = []
batch_labels = []
# Sample examples from our sorted list. The margin between each example is the difference in their label:
# easy negatives (e.g. very bad results) should be further away from a true positive than hard negatives
# (less embarrassing).
for q, ex, labels in self._listwise_examples(paper_ids, candidate_ids, neg_to_pos_ratio):
num_true = len([l for l in labels if l >= self.margins_offset_dict['true']])
# ignore cases where we didn't find enough negatives...
if len(labels) < num_true * 2:
continue
# Sample pairs of (positive, negative)
pos = np.random.randint(0, num_true, len(labels) * 2)
neg = np.random.randint(num_true, len(labels), len(labels) * 2)
for ai, bi in zip(pos, neg):
queries.extend([q, q])
batch_ex.extend([ex[ai], ex[bi]])
batch_labels.extend([labels[ai], labels[bi]])
if len(queries) > batch_size:
if self.candidate_selector is None:
yield self.featurizer.transform_query_candidate(
queries, batch_ex
), np.asarray(batch_labels)
else:
confidence_scores = self.candidate_selector.confidence(q.id, [doc.id for
doc in batch_ex])
yield self.featurizer.transform_query_candidate(
queries, batch_ex, confidence_scores
), np.asarray(batch_labels)
del queries[:]
del batch_ex[:]
del batch_labels[:]
def get_negatives(
self, candidate_ids_set, candidate_ids_list, n_per_type, document, ann_jaccard_cutoff=1
):
'''
:param n_per_type: dictionary with keys: 'easy', 'hard_negatives', 'nn'
:param document: query document
:return: documents
'''
def sample(document_ids, n):
document_ids = document_ids.intersection(candidate_ids_set)
if len(document_ids) > n:
document_ids = np.random.choice(
list(document_ids), size=int(n), replace=False
)
return document_ids
# initialize some variables
doc_citations = set(document.out_citations)
doc_citations.add(document.id)
result_ids_dict = {}
for key in self.KEYS:
result_ids_dict[key] = set()
# step 0: make sure we heed the limitations about NN negatives
if self.ann is None:
n_per_type['easy'] += int(np.ceil(n_per_type['nn'] / 2.0))
n_per_type['hard_negatives'] += int(np.ceil(n_per_type['nn'] / 2.0))
n_per_type['nn'] = 0
# step 1: find hard citation negatives, and remove true positives from it
if n_per_type['hard_negatives'] > 0:
result_ids_dict['hard_negatives'] = set(
flatten(
[
list(self.corpus[id].out_citations) for id in document.out_citations
if id in self.corpus
]
)
)
result_ids_dict['hard_negatives'].difference_update(doc_citations)
# adding hard_negatives to doc_citations so we can remove them later
doc_citations.update(result_ids_dict['hard_negatives'])
# step 2: get nearest neighbors from embeddings, and remove the true positives, hard citations and es negatives
if n_per_type['nn'] > 0:
# getting more than we need because of the jaccard cutoff
candidate_nn_ids = self.ann.get_nns_by_id(
document.id, 10 * n_per_type['nn']
)
if ann_jaccard_cutoff < 1:
candidate_nn_ids = [
i for i in candidate_nn_ids
if jaccard(self.featurizer, document, self.corpus[i]) < ann_jaccard_cutoff
]
result_ids_dict['nn'] = set(candidate_nn_ids)
result_ids_dict['nn'].difference_update(doc_citations)
# adding ann_negatives to doc_citations so we can remove them later
doc_citations.update(result_ids_dict['nn'])
# step 3: get easy negatives
if n_per_type['easy'] > 0:
random_index = np.random.randint(len(candidate_ids_list))
random_index_range = np.arange(random_index, random_index + n_per_type['easy'])
result_ids_dict['easy'] = set(
np.take(candidate_ids_list, random_index_range, mode='wrap'))
result_ids_dict['easy'].difference_update(doc_citations)
# step 4: trim down the requested number of ids per type and get the actual documents
result_docs = []
for key in self.KEYS:
docs = [
self.corpus[doc_id]
for doc_id in sample(result_ids_dict[key], n_per_type[key])
]
result_docs.append(docs)
return result_docs
|
citeomatic-master
|
citeomatic/features.py
|
from citeomatic.common import Document
def document_to_bibtex(doc: Document):
"""
Return a BibTeX string for the given document.
:param doc:
:return: str:
"""
authors = doc.authors
if authors:
author_prefix = authors[0].split(' ')[-1].lower()
else:
author_prefix = ''
title_prefix = doc.title.split()[0].lower()
params = {
'title': doc.title,
'author': ', '.join(doc.authors),
'venue': doc.venue,
'year': doc.year,
'bibname': '%s%s%s' % (author_prefix, doc.year, title_prefix)
}
return '''@article{%(bibname)s,
title={%(title)s},
author={%(author)s},
year={%(year)s}
}''' % params
|
citeomatic-master
|
citeomatic/display.py
|
import logging
import os
import keras.backend as K
import tensorflow as tf
from citeomatic import service
from citeomatic.serialization import model_from_directory
from citeomatic.features import Corpus
from citeomatic.grobid_parser import GrobidParser
from citeomatic.neighbors import ANN, EmbeddingModel
from citeomatic.config import setup_default_logging
def get_session():
num_threads = os.environ.get('NUM_THREADS', None)
gpu_fraction = float(os.environ.get('GPU_FRACTION', '1.0'))
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
if num_threads:
return tf.Session(
config=tf.ConfigProto(
gpu_options=gpu_options,
intra_op_parallelism_threads=int(num_threads)
)
)
else:
return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
K.set_session(get_session())
setup_default_logging(logging.INFO)
featurizer, models = model_from_directory(os.environ['MODEL_PATH'])
if 'ANN_MODEL_PATH' in os.environ:
featurizer, ann_models = model_from_directory(os.environ['ANN_MODEL_PATH'])
ann_model = EmbeddingModel(featurizer, ann_models['embedding'])
ann = ANN.load(os.environ['ANN_MODEL_PATH'] + '/citeomatic_ann')
corpus = Corpus.load(os.environ['CORPUS_PATH'])
else:
ann = None
ann_model = None
corpus = None
app = service.app
app.config['DEBUG'] = True
app.config['API_MODEL'] = service.APIModel(
corpus=corpus,
featurizer=featurizer,
models=models,
ann_embedding_model=ann_model,
ann=ann,
)
# assert os.environ.get('AWS_ACCESS_KEY_ID')
# assert os.environ.get('AWS_SECRET_ACCESS_KEY')
app.config['NODE_PROXY'] = os.environ.get('NODE_PROXY', 'http://localhost:5100')
app.config['S3_CACHE_ON'] = int(os.environ.get('S3_CACHE_ON', 0))
app.config['SECRET_KEY'] = 'top secret!'
app.config['GROBID'] = GrobidParser(
os.environ.get('GROBID_HOST', 'http://localhost:8080')
)
logging.info("S3_CACHE_ON=%s", app.config['S3_CACHE_ON'])
|
citeomatic-master
|
citeomatic/gunicorn.py
|
import importlib
import os
import pickle
from citeomatic import file_util
from citeomatic.schema_pb2 import Document as ProtoDoc
import spacy
from whoosh.fields import *
PAPER_EMBEDDING_MODEL = 'paper_embedder'
CITATION_RANKER_MODEL = 'citation_ranker'
nlp = spacy.load("en")
RESTRICTED_POS_TAGS = {'PUNCT', 'SYM', 'DET', 'NUM', 'SPACE', 'PART'}
schema = Schema(title=TEXT,
abstract=TEXT,
id=ID(stored=True))
def global_tokenizer(text, restrict_by_pos=False, lowercase=True, filter_empty_token=True):
if restrict_by_pos:
token_list = [
w.text for w in nlp(text) if w.pos_ not in RESTRICTED_POS_TAGS
]
else:
token_list = [w.text for w in nlp(text)]
if lowercase:
token_list = [w.lower() for w in token_list]
if filter_empty_token:
token_list = [w for w in token_list if len(w) > 0]
return token_list
class FieldNames(object):
PAPER_ID = "id"
TITLE = "title"
ABSTRACT = "abstract"
AUTHORS = "authors"
VENUE = "venue"
YEAR = "year"
IN_CITATIONS = "in_citations"
OUT_CITATIONS = "out_citations"
KEY_PHRASES = "key_phrases"
URLS = "pdf_urls"
S2_URL = "s2_url"
OUT_CITATION_COUNT = 'out_citation_count'
IN_CITATION_COUNT = 'in_citation_count'
DATE = 'date'
TITLE_RAW = "title_raw"
ABSTRACT_RAW = "abstract_raw"
class DatasetPaths(object):
BASE_DIR = os.path.abspath("./data")
DBLP_GOLD_DIR = os.path.join(BASE_DIR, 'comparison/dblp/gold')
DBLP_CORPUS_JSON = os.path.join(BASE_DIR, 'comparison/dblp/corpus.json')
DBLP_DB_FILE = os.path.join(BASE_DIR, 'db/dblp.sqlite.db')
DBLP_BM25_INDEX = os.path.join(BASE_DIR, 'bm25_index/dblp/')
PUBMED_GOLD_DIR = os.path.join(BASE_DIR, 'comparison/pubmed/gold')
PUBMED_CORPUS_JSON = os.path.join(BASE_DIR, 'comparison/pubmed/corpus.json')
PUBMED_DB_FILE = os.path.join(BASE_DIR, 'db/pubmed.sqlite.db')
PUBMED_BM25_INDEX = os.path.join(BASE_DIR, 'bm25_index/pubmed/')
OC_FILE = os.path.join(BASE_DIR, 'open_corpus/papers-2017-02-21.json.gz')
OC_CORPUS_JSON = os.path.join(BASE_DIR, 'open_corpus/corpus.json')
OC_DB_FILE = os.path.join(BASE_DIR, 'db/oc.sqlite.db')
OC_BM25_INDEX = os.path.join(BASE_DIR, 'bm25_index/oc/')
OC_PKL_FILE = os.path.join(BASE_DIR, 'open_corpus/corpus.pkl')
OC_ANN_FILE = os.path.join(BASE_DIR, 'open_corpus/ann.pkl')
PRETRAINED_DIR = os.path.join(BASE_DIR, 'pretrained')
EMBEDDING_WEIGHTS_FILENAME = 'embedding.h5'
PRETRAINED_VOCAB_FILENAME = 'vocab.txt'
FEATURIZER_FILENAME = 'featurizer.pickle'
OPTIONS_FILENAME = 'options.json'
CITEOMATIC_WEIGHTS_FILENAME = 'weights.h5'
def embeddings_weights_for_corpus(self, corpus_name):
return os.path.join(
self.PRETRAINED_DIR,
corpus_name + '_' + self.EMBEDDING_WEIGHTS_FILENAME
)
def vocab_for_corpus(self, corpus_name):
return os.path.join(
self.PRETRAINED_DIR,
corpus_name + '_' + self.PRETRAINED_VOCAB_FILENAME
)
def get_json_path(self, corpus_name):
if corpus_name.lower() == 'dblp':
return self.DBLP_CORPUS_JSON
elif corpus_name.lower() == 'pubmed':
return self.PUBMED_CORPUS_JSON
elif (corpus_name.lower() == 'oc'
or corpus_name.lower() == 'open_corpus'
or corpus_name.lower() == 'opencorpus'):
return self.OC_CORPUS_JSON
else:
return None
def get_bm25_index_path(self, corpus_name):
if corpus_name.lower() == 'dblp':
return self.DBLP_BM25_INDEX
elif corpus_name.lower() == 'pubmed':
return self.PUBMED_BM25_INDEX
elif (corpus_name.lower() == 'oc'
or corpus_name.lower() == 'open_corpus'
or corpus_name.lower() == 'opencorpus'):
return self.OC_BM25_INDEX
else:
return None
def get_db_path(self, corpus_name):
if corpus_name.lower() == 'dblp':
return self.DBLP_DB_FILE
elif corpus_name.lower() == 'pubmed':
return self.PUBMED_DB_FILE
elif (corpus_name.lower() == 'oc'
or corpus_name.lower() == 'open_corpus'
or corpus_name.lower() == 'opencorpus'):
return self.OC_DB_FILE
else:
return None
def get_pkl_path(self, corpus_name):
if (corpus_name.lower() == 'oc'
or corpus_name.lower() == 'open_corpus'
or corpus_name.lower() == 'opencorpus'):
return self.OC_PKL_FILE
else:
assert False
class Document(object):
_fields = [
FieldNames.TITLE,
FieldNames.ABSTRACT,
FieldNames.AUTHORS,
FieldNames.OUT_CITATIONS,
FieldNames.YEAR,
FieldNames.PAPER_ID,
FieldNames.VENUE,
FieldNames.IN_CITATION_COUNT,
FieldNames.OUT_CITATION_COUNT,
FieldNames.KEY_PHRASES,
FieldNames.DATE,
FieldNames.TITLE_RAW,
FieldNames.ABSTRACT_RAW,
]
def __init__(
self,
title,
abstract,
authors,
out_citations,
year,
id: str,
venue,
in_citation_count,
out_citation_count,
key_phrases,
title_raw,
abstract_raw,
date=None,
candidate_selector_confidence=None
):
self.title = title
self.abstract = abstract
self.authors = authors
self.out_citations = out_citations
self.year = year
self.id = id
self.venue = venue
self.in_citation_count = in_citation_count
self.out_citation_count = out_citation_count
self.key_phrases = key_phrases
self.date = date
self.title_raw = title_raw
self.abstract_raw = abstract_raw
self.candidate_selector_confidence = candidate_selector_confidence
def __iter__(self):
for k in self._fields:
yield getattr(self, k)
def _asdict(self):
return dict(**self.__dict__)
@staticmethod
def from_proto_doc(doc: ProtoDoc):
out_citations = [c for c in doc.out_citations]
return Document(
title=doc.title,
abstract=doc.abstract,
authors=[a for a in doc.authors],
out_citations=out_citations,
in_citation_count=doc.in_citation_count,
year=doc.year,
id=doc.id,
venue=doc.venue,
out_citation_count=len(out_citations),
key_phrases=[p for p in doc.key_phrases],
title_raw=doc.title_raw,
abstract_raw=doc.abstract_raw,
)
class ModelLoader(pickle.Unpickler):
def find_class(self, mod_name, klass_name):
if mod_name[:4] == 'ai2.':
mod_name = mod_name[4:]
mod = importlib.import_module(mod_name)
return getattr(mod, klass_name)
def load_pickle(filename):
with file_util.open(filename) as f:
return ModelLoader(f).load()
|
citeomatic-master
|
citeomatic/common.py
|
import functools
import sys
import importlib
from typing import TypeVar, Iterator, Callable, List
PY3 = sys.version_info[0] == 3
if PY3:
reload = importlib.reload
T = TypeVar('T')
U = TypeVar('U')
def import_from(module, name, reload_flag=False):
'''
usage example:
grid = import_from('sklearn.model_selection', 'GridSearchCV')
is equivalent to:
from sklearn.model_selection import GridSearchV as grid
'''
module = importlib.import_module(module)
if reload_flag:
module = reload(module)
return getattr(module, name)
def flatten(lst):
"""Flatten `lst` (return the depth first traversal of `lst`)"""
out = []
for v in lst:
if v is None:
continue
if isinstance(v, list):
out.extend(flatten(v))
else:
out.append(v)
return out
def once(fn):
cache = {}
@functools.wraps(fn)
def _fn():
if 'result' not in cache:
cache['result'] = fn()
return cache['result']
return _fn
def batch_apply(
generator: Iterator[T],
evaluator: Callable[[List[T]], List[U]],
batch_size=256
):
"""
Invoke `evaluator` using batches consumed from `generator`.
Some functions (e.g. Keras models) are much more efficient when evaluted with large batches of
inputs at a time. This function simplifies streaming data through these models.
"""
for batch in batchify(generator, batch_size):
yield from evaluator(batch)
def batchify(it: Iterator[T], batch_size=128) -> Iterator[List[T]]:
batch = []
for item in it:
batch.append(item)
if len(batch) >= batch_size:
yield batch
batch = []
if batch:
yield batch
|
citeomatic-master
|
citeomatic/utils.py
|
import logging
from abc import ABC
from whoosh import scoring, qparser
from whoosh.filedb.filestore import FileStorage, copy_to_ram
from whoosh.index import FileIndex
from whoosh.qparser import MultifieldParser
from citeomatic.common import schema, FieldNames
from citeomatic.corpus import Corpus
from citeomatic.neighbors import ANN
from citeomatic.neighbors import EmbeddingModel
import numpy as np
class CandidateSelector(ABC):
def __init__(self, top_k=100):
self.top_k = top_k
def fetch_candidates(self, doc_id, candidates_id_pool) -> tuple:
"""
For each query paper, return a list of candidates and associated scores
:param doc_id: Document ID to get candidates for
:param top_k: How many top candidates to fetch
:param candidates_id_pool: Set of candidate IDs to limit candidates to
:return:
"""
pass
def confidence(self, doc_id, candidate_ids):
"""
:param doc_id:
:param candidate_ids:
:return:
"""
pass
class ANNCandidateSelector(CandidateSelector):
def __init__(
self,
corpus: Corpus,
ann: ANN,
paper_embedding_model: EmbeddingModel,
top_k: int,
extend_candidate_citations: bool
):
super().__init__(top_k)
self.corpus = corpus
self.ann = ann
self.paper_embedding_model = paper_embedding_model
self.extend_candidate_citations = extend_candidate_citations
def fetch_candidates(self, doc_id, candidate_ids_pool: set):
doc = self.corpus[doc_id]
doc_embedding = self.paper_embedding_model.embed(doc)
# 1. Fetch candidates from ANN index
nn_candidates = self.ann.get_nns_by_vector(doc_embedding, self.top_k + 1)
# 2. Remove the current document from candidate list
if doc_id in nn_candidates:
nn_candidates.remove(doc_id)
candidate_ids = nn_candidates[:self.top_k]
# 3. Check if we need to include citations of candidates found so far.
if self.extend_candidate_citations:
extended_candidate_ids = []
for candidate_id in candidate_ids:
extended_candidate_ids.extend(self.corpus[candidate_id].out_citations)
candidate_ids = candidate_ids + extended_candidate_ids
logging.debug("Number of candidates found: {}".format(len(candidate_ids)))
candidate_ids = set(candidate_ids).intersection(candidate_ids_pool)
if doc_id in candidate_ids:
candidate_ids.remove(doc_id)
candidate_ids_list = list(candidate_ids)
candidate_ids_list = [candidate_doc_id for candidate_doc_id in candidate_ids_list if
self.corpus[candidate_doc_id].year <= self.corpus[doc_id].year]
confidence_scores = self.confidence(doc_id, candidate_ids_list)
sorted_pairs = sorted(zip(candidate_ids_list, confidence_scores), key=lambda x: x[1],
reverse=True)
sorted_candidate_ids = []
sorted_scores = []
for pair in sorted_pairs:
sorted_candidate_ids.append(pair[0])
sorted_scores.append(pair[1])
return sorted_candidate_ids, sorted_scores
def confidence(self, doc_id, candidate_ids):
doc = self.corpus[doc_id]
doc_embedding = self.paper_embedding_model.embed(doc)
return self.ann.get_similarities(doc_embedding, candidate_ids)
class BM25CandidateSelector(CandidateSelector):
def __init__(
self,
corpus: Corpus,
index_path: str,
top_k,
extend_candidate_citations: bool
):
super().__init__(top_k)
self.index_path = index_path
storage = FileStorage(self.index_path, readonly=True)
self._bm25_index = FileIndex(copy_to_ram(storage), schema=schema)
self.searcher = self._bm25_index.searcher(weighting=scoring.BM25F)
self.query_parser = MultifieldParser([FieldNames.TITLE, FieldNames.ABSTRACT],
self._bm25_index.schema, group=qparser.OrGroup)
self.corpus = corpus
self.extend_candidate_citations = extend_candidate_citations
def fetch_candidates(self, doc_id, candidate_ids_pool):
title_key_terms = ' '.join([
t for t,_ in self.searcher.key_terms_from_text('title', self.corpus[doc_id].title,
numterms=3)]
)
abstract_key_terms = ' '.join([
t for t,_ in self.searcher.key_terms_from_text('abstract', self.corpus[doc_id].abstract)]
)
# Implement BM25 index builder and return
query = self.query_parser.parse(title_key_terms + " " + abstract_key_terms)
results = self.searcher.search(query, limit=self.top_k + 1, optimize=True, scored=True)
candidate_ids_pool = set(candidate_ids_pool)
candidate_ids = []
candidate_scores = []
for result in results:
if result['id'] in candidate_ids_pool and result['id'] != doc_id:
candidate_ids.append(result['id'])
candidate_scores.append(result.score)
return candidate_ids, candidate_scores
class OracleCandidateSelector(CandidateSelector):
def __init__(self, corpus: Corpus):
super().__init__()
self.corpus = corpus
def fetch_candidates(self, doc_id, candidate_ids_pool):
candidates = set(self.corpus.get_citations(doc_id))
candidates.intersection_update(candidate_ids_pool)
return list(candidates), np.ones(len(candidates))
|
citeomatic-master
|
citeomatic/candidate_selectors.py
|
import numpy as np
from citeomatic.corpus import Corpus
from citeomatic.features import Featurizer
class Ranker:
def __init__(self, corpus: Corpus, featurizer: Featurizer, citation_ranker,
num_candidates_to_rank):
self.corpus = corpus
self.featurizer = featurizer
self.citation_ranker = citation_ranker
self.num_candidates_to_rank = num_candidates_to_rank
def rank(self, query_id, candidate_ids, similarities):
query = self.corpus[query_id]
candidates = [self.corpus[id] for id in candidate_ids]
features = self.featurizer.transform_query_and_results(query, candidates, similarities)
scores = self.citation_ranker.predict(features, batch_size=1024).flatten()
best_matches = np.argsort(scores)[::-1]
predictions = []
pred_scores = []
for i, match_idx in enumerate(best_matches[:self.num_candidates_to_rank]):
predictions.append(candidates[match_idx].id)
pred_scores.append(float(scores[match_idx]))
return predictions, pred_scores
class NoneRanker(object):
def rank(self, query_id, candidate_ids, similarities):
return candidate_ids, similarities
|
citeomatic-master
|
citeomatic/ranker.py
|
import collections
import logging
import re
import arrow
import requests
import untangle
from citeomatic.utils import flatten
date_parser = re.compile(r'[^\d](?:19|20)\d\d[^\d]')
CURRENT_YEAR = arrow.now().year
EARLIEST_YEAR = 1970
def _all_text(doc):
child_text = [_all_text(c) for c in doc.children]
cdata_text = doc.cdata.strip() if doc.cdata is not None else ''
return child_text + [cdata_text]
def _reference_dates(doc):
if 'date' in [c._name for c in doc.children]:
try:
date = int(doc.date['when'])
return [date]
except:
return []
else:
return [_reference_dates(c) for c in doc.children]
def _find_latest_year(doc):
text = ' '.join(flatten(_all_text(doc)))
ref_dates = flatten(_reference_dates(doc))
results = [int(r[1:-1]) for r in date_parser.findall(text)] + ref_dates
results = sorted(results)
best_result = None
for r in results:
if CURRENT_YEAR >= r >= EARLIEST_YEAR:
best_result = r
return best_result
def _extract_authors(file_desc):
try:
author_groups = file_desc.sourceDesc.biblStruct.analytic.author
except IndexError:
logging.warning('Failed to find author group.')
return []
authors = []
for anode in author_groups:
try:
forename = anode.persName.forename
surname = anode.persName.surname
forename = forename.cdata if hasattr(forename,
'cdata') else forename[0].cdata
surname = surname.cdata
authors.append('%s %s' % (forename, surname))
except IndexError:
logging.warning('Failed to parse author %s', anode)
return authors
def _extract_year(doc, file_desc):
try:
return int(file_desc.publicationStmt.date['when'].split("-")[0])
except Exception as e:
year_guess = _find_latest_year(doc)
if year_guess is not None:
return year_guess
else:
return 2017
def _extract_refs(doc):
try:
references_list = []
references = [
ele for ele in doc.TEI.text.back.div if ele['type'] == 'references'
][0]
for item in references.listBibl.biblStruct:
references_list.append(item.children[0].title.cdata.lower())
return references_list
except IndexError as e:
logging.warning('Failed to parse references.')
return []
def _extract_abstract(profile_desc):
try:
return profile_desc.abstract.p.cdata
except IndexError as e:
logging.warning('Failed to parse abstract', exc_info=1)
logging.warning('%s', profile_desc)
raise
def _extract_title(file_desc):
try:
return file_desc.titleStmt.title.cdata
except IndexError as e:
logging.warning('Failed to parse title', exc_info=1)
logging.warning('%s', file_desc.titleStmt)
raise
GrobidResponse = collections.namedtuple(
'GrobidResponse', ['title', 'authors', 'abstract', 'references', 'year']
)
def parse_full_text(raw) -> GrobidResponse:
raw = raw
doc = untangle.parse(raw)
file_desc = doc.TEI.teiHeader.fileDesc
profile_desc = doc.TEI.teiHeader.profileDesc
return GrobidResponse(
title=_extract_title(file_desc),
authors=_extract_authors(file_desc),
abstract=_extract_abstract(profile_desc),
references=_extract_refs(doc),
year=_extract_year(doc, file_desc)
)
def parse_header_text(raw) -> GrobidResponse:
file_desc = untangle.parse(raw).TEI.teiHeader.fileDesc
profile_desc = untangle.parse(raw).TEI.teiHeader.profileDesc
return GrobidResponse(
title=_extract_title(file_desc),
authors=_extract_authors(file_desc),
abstract=_extract_abstract(profile_desc),
references=[],
year=2017,
)
class GrobidParser(object):
def __init__(self, grobid_url):
self._grobid_url = grobid_url
def parse(self, pdf) -> GrobidResponse:
url = '%s/processFulltextDocument' % self._grobid_url
xml = requests.post(url, files=[pdf]).text
try:
return parse_full_text(xml)
except:
logging.warning('Failed to parse full PDF, falling back on header.')
print('Failed to parse full PDF, falling back on header.')
url = '%s/processHeaderDocument' % self._grobid_url
xml = requests.post(url, files=[pdf]).text
return parse_header_text(xml)
|
citeomatic-master
|
citeomatic/grobid_parser.py
|
import typing
import traitlets
T1 = typing.TypeVar('T1')
T2 = typing.TypeVar('T2')
T3 = typing.TypeVar('T3')
T4 = typing.TypeVar('T4')
T = typing.TypeVar('T')
K = typing.TypeVar('K')
V = typing.TypeVar('V')
# Define wrappers for traitlets classes. These simply provide Python type hints
# that correspond to actual instance type that will result after a class is
# instantiated (e.g. Unicode() becomes a string).
#
# This allows PyCharm style type hinting to resolve types properly.
def Float(*args, **kw) -> float:
return traitlets.Float(*args, **kw)
def CFloat(*args, **kw) -> float:
return traitlets.CFloat(*args, **kw)
def Int(*args, **kw) -> int:
return traitlets.Int(*args, **kw)
def Bool(*args, **kw) -> bool:
return traitlets.Bool(*args, **kw)
def Enum(options: typing.List[T], **kw) -> T:
return traitlets.Enum(options, **kw)
def List(klass: T, **kw) -> typing.List[T]:
return traitlets.List(klass, **kw)
def Set(klass: T, **kw) -> typing.Set[T]:
return traitlets.Set(klass, **kw)
# N.B. traitlets.Dict does not check key types.
def Dict(val_class: V, **kw) -> typing.Dict[typing.Any, V]:
return traitlets.Dict(val_class, **kw)
def Tuple1(a: T1) -> typing.Tuple[T1]:
return traitlets.Tuple(a)
def Tuple2(a: T1, b: T2) -> typing.Tuple[T1, T2]:
return traitlets.Tuple(a, b)
def Unicode(*args, **kw) -> str:
return traitlets.Unicode(*args, **kw)
def Instance(klass: T, **kw) -> T:
return traitlets.Instance(klass, **kw)
def Array(**kw):
import numpy
return Instance(numpy.ndarray, **kw)
def DataFrameType(**kw):
import pandas
return Instance(pandas.DataFrame, **kw)
def Any(**kw) -> typing.Any:
return traitlets.Any(**kw)
# Just a direct copy for now to provide a consistent interface.
HasTraits = traitlets.HasTraits
|
citeomatic-master
|
citeomatic/traits.py
|
#!/usr/bin/env python
"""
Helpers for pickle compatibility across module renames.
"""
import json
import os
from typing import Tuple, Any
import tensorflow as tf
from citeomatic import file_util
from citeomatic.common import DatasetPaths
from citeomatic.features import Featurizer
from citeomatic.models.options import ModelOptions
from citeomatic.utils import import_from
def model_from_directory(dirname: str, on_cpu=False) -> Tuple[Featurizer, Any]:
dp = DatasetPaths()
options_json = file_util.read_json(
os.path.join(dirname, dp.OPTIONS_FILENAME),
)
options = ModelOptions(**json.loads(options_json))
featurizer_file_prefix = 'pretrained_' if options.use_pretrained else 'corpus_fit_'
featurizer = file_util.read_pickle(
os.path.join(dirname, featurizer_file_prefix + dp.FEATURIZER_FILENAME)
) # type: Featurizer
options.n_authors = featurizer.n_authors
options.n_features = featurizer.n_features
options.n_venues = featurizer.n_venues
options.n_keyphrases = featurizer.n_keyphrases
create_model = import_from(
'citeomatic.models.%s' % options.model_name, 'create_model'
)
if on_cpu:
with tf.device('/cpu:0'):
models = create_model(options)
else:
models = create_model(options)
print("Loading model from %s " % dirname)
print(models['citeomatic'].summary())
if dirname.startswith('s3://'):
models['citeomatic'].load_weights(
file_util.cache_file(os.path.join(dirname, dp.CITEOMATIC_WEIGHTS_FILENAME))
)
models['embedding'].load_weights(
file_util.cache_file(os.path.join(dirname, dp.EMBEDDING_WEIGHTS_FILENAME))
)
else:
models['citeomatic'].load_weights(os.path.join(dirname, dp.CITEOMATIC_WEIGHTS_FILENAME))
if models['embedding'] is not None:
models['embedding'].load_weights(os.path.join(dirname, dp.EMBEDDING_WEIGHTS_FILENAME))
return featurizer, models
|
citeomatic-master
|
citeomatic/serialization.py
|
import collections
import logging
import os
import resource
import h5py
import keras
import numpy as np
import tensorflow as tf
import tqdm
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.optimizers import TFOptimizer
from citeomatic import file_util
from citeomatic.candidate_selectors import CandidateSelector, ANNCandidateSelector
from citeomatic.common import DatasetPaths
from citeomatic.corpus import Corpus
from citeomatic.features import DataGenerator
from citeomatic.features import Featurizer
from citeomatic.models import layers
from citeomatic.models.options import ModelOptions
from citeomatic.neighbors import EmbeddingModel, ANN
from citeomatic.ranker import Ranker
from citeomatic.serialization import model_from_directory
from citeomatic.utils import import_from
from citeomatic.eval_metrics import precision_recall_f1_at_ks, average_results, f1
EVAL_KEYS = [1, 5, 10, 20, 50, 100, 1000]
EVAL_DATASET_KEYS = {'dblp': 10,
'pubmed': 20,
'oc': 20}
EVAL_DOC_MIN_CITATION = {
'dblp': 10,
'pubmed': 10,
'oc': 1
}
class ValidationCallback(keras.callbacks.Callback):
def __init__(self, corpus, candidate_selector, ranker, n_valid):
super().__init__()
self.candidate_selector = candidate_selector
self.corpus = corpus
self.ranker = ranker
self.losses = []
self.n_valid = n_valid
def on_epoch_end(self, epoch, logs={}):
self.losses.append(logs.get('loss'))
p_r_f1_mrr = eval_text_model(
self.corpus,
self.candidate_selector,
self.ranker,
papers_source='valid',
n_eval=self.n_valid
)
for k, v in p_r_f1_mrr.items():
logs[k] = v
class MemoryUsageCallback(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
logging.info(
'\nCurrent memory usage: %s',
resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1e6
)
class UpdateANN(keras.callbacks.Callback):
def __init__(self,
corpus,
featurizer,
embedding_model,
training_data_generator: DataGenerator,
validation_data_generator: DataGenerator,
embed_at_epoch_end,
embed_at_train_begin):
super().__init__()
self.corpus = corpus
self.featurizer = featurizer
self.embedding_model = embedding_model
self.training_data_generator = training_data_generator
self.validation_data_generator = validation_data_generator
self.embed_at_epch_end = embed_at_epoch_end
self.embed_at_train_begin = embed_at_train_begin
def _re_embed(self, load_pkl=False):
embedder = EmbeddingModel(self.featurizer, self.embedding_model)
if load_pkl and self.corpus.corpus_type == 'oc' and os.path.exists(
DatasetPaths.OC_ANN_FILE + ".pickle"):
ann = ANN.load(DatasetPaths.OC_ANN_FILE)
else:
ann = ANN.build(embedder, self.corpus, ann_trees=10)
candidate_selector = ANNCandidateSelector(
corpus=self.corpus,
ann=ann,
paper_embedding_model=embedder,
top_k=100,
extend_candidate_citations=False
)
self.training_data_generator.ann = ann
self.training_data_generator.candidate_selector = candidate_selector
self.validation_data_generator.ann = self.training_data_generator.ann
self.validation_data_generator.candidate_selector = self.training_data_generator.candidate_selector
def on_train_begin(self, logs=None):
if self.embed_at_train_begin:
logging.info(
'Beginning training. Embedding corpus.',
)
self._re_embed(load_pkl=True)
def on_epoch_end(self, epoch, logs=None):
if self.embed_at_epch_end:
logging.info(
'Epoch %d begin. Retraining approximate nearest neighbors model.',
epoch + 1
)
self._re_embed()
def train_text_model(
corpus: Corpus,
featurizer: Featurizer,
model_options: ModelOptions,
models_ann_dir=None,
debug=False,
tensorboard_dir=None,
):
"""
Utility function for training citeomatic models.
"""
# load pretrained embeddings
if model_options.use_pretrained:
dp = DatasetPaths()
pretrained_embeddings_file = dp.embeddings_weights_for_corpus('shared')
with h5py.File(pretrained_embeddings_file, 'r') as f:
pretrained_embeddings = f['embedding'][...]
else:
pretrained_embeddings = None
create_model = import_from(
'citeomatic.models.%s' % model_options.model_name,
'create_model'
)
models = create_model(model_options, pretrained_embeddings)
model, embedding_model = models['citeomatic'], models['embedding']
logging.info(model.summary())
if model_options.train_for_test_set:
paper_ids_for_training = corpus.train_ids + corpus.valid_ids
candidates_for_training = corpus.train_ids + corpus.valid_ids + corpus.test_ids
else:
paper_ids_for_training = corpus.train_ids
candidates_for_training = corpus.train_ids + corpus.valid_ids
training_dg = DataGenerator(corpus=corpus,
featurizer=featurizer,
margin_multiplier=model_options.margin_multiplier,
use_variable_margin=model_options.use_variable_margin)
training_generator = training_dg.triplet_generator(
paper_ids=paper_ids_for_training,
candidate_ids=candidates_for_training,
batch_size=model_options.batch_size,
neg_to_pos_ratio=model_options.neg_to_pos_ratio
)
validation_dg = DataGenerator(corpus=corpus,
featurizer=featurizer,
margin_multiplier=model_options.margin_multiplier,
use_variable_margin=model_options.use_variable_margin)
validation_generator = validation_dg.triplet_generator(
paper_ids=corpus.valid_ids,
candidate_ids=corpus.train_ids + corpus.valid_ids,
batch_size=1024,
neg_to_pos_ratio=model_options.neg_to_pos_ratio
)
if model_options.optimizer == 'tfopt':
optimizer = TFOptimizer(
tf.contrib.opt.LazyAdamOptimizer(learning_rate=model_options.lr)
)
else:
optimizer = import_from(
'keras.optimizers', model_options.optimizer
)(lr=model_options.lr)
model.compile(optimizer=optimizer, loss=layers.triplet_loss)
# training calculation
model_options.samples_per_epoch = int(np.minimum(
model_options.samples_per_epoch, model_options.total_samples
))
epochs = int(np.ceil(
model_options.total_samples / model_options.samples_per_epoch
))
steps_per_epoch = int(
model_options.samples_per_epoch / model_options.batch_size
)
# callbacks
callbacks_list = []
if debug:
callbacks_list.append(MemoryUsageCallback())
if model_options.tb_dir is not None:
callbacks_list.append(
TensorBoard(
log_dir=model_options.tb_dir, histogram_freq=1, write_graph=True
)
)
if model_options.reduce_lr_flag:
if model_options.optimizer != 'tfopt':
callbacks_list.append(
ReduceLROnPlateau(
verbose=1, patience=2, epsilon=0.01, min_lr=1e-6, factor=0.5
)
)
if models_ann_dir is None:
ann_featurizer = featurizer
paper_embedding_model = embedding_model
embed_at_epoch_end = True
embed_at_train_begin = False
else:
ann_featurizer, ann_models = model_from_directory(models_ann_dir, on_cpu=True)
paper_embedding_model = ann_models['embedding']
paper_embedding_model._make_predict_function()
embed_at_epoch_end = False
embed_at_train_begin = True
callbacks_list.append(
UpdateANN(corpus, ann_featurizer, paper_embedding_model, training_dg, validation_dg,
embed_at_epoch_end, embed_at_train_begin)
)
if model_options.tb_dir is None:
validation_data = validation_generator
else:
validation_data = next(validation_generator)
# logic
model.fit_generator(
generator=training_generator,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
callbacks=callbacks_list,
validation_data=validation_generator,
validation_steps=10
)
return model, embedding_model
def end_to_end_training(model_options: ModelOptions, dataset_type, models_dir, models_ann_dir=None):
# step 1: make the directory
if not os.path.exists(models_dir):
os.makedirs(models_dir)
# step 2: load the corpus DB
print("Loading corpus db...")
dp = DatasetPaths()
db_file = dp.get_db_path(dataset_type)
json_file = dp.get_json_path(dataset_type)
if not os.path.isfile(db_file):
print("Have to build the database! This may take a while, but should only happen once.")
Corpus.build(db_file, json_file)
if dataset_type == 'oc':
corpus = Corpus.load_pkl(dp.get_pkl_path(dataset_type))
else:
corpus = Corpus.load(db_file, model_options.train_frac)
# step 3: load/make the featurizer (once per hyperopt run)
print("Making feautrizer")
featurizer_file_prefix = 'pretrained_' if model_options.use_pretrained else 'corpus_fit_'
featurizer_file = os.path.join(models_dir, featurizer_file_prefix + dp.FEATURIZER_FILENAME)
if os.path.isfile(featurizer_file):
featurizer = file_util.read_pickle(featurizer_file)
else:
featurizer = Featurizer(
max_features=model_options.max_features,
max_title_len=model_options.max_title_len,
max_abstract_len=model_options.max_abstract_len,
use_pretrained=model_options.use_pretrained,
min_author_papers=model_options.min_author_papers,
min_venue_papers=model_options.min_venue_papers,
min_keyphrase_papers=model_options.min_keyphrase_papers
)
featurizer.fit(corpus, is_featurizer_for_test=model_options.train_for_test_set)
file_util.write_pickle(featurizer_file, featurizer)
# update model options after featurization
model_options.n_authors = featurizer.n_authors
model_options.n_venues = featurizer.n_venues
model_options.n_keyphrases = featurizer.n_keyphrases
model_options.n_features = featurizer.n_features
if model_options.use_pretrained:
model_options.dense_dim = model_options.dense_dim_pretrained
# step 4: train the model
citeomatic_model, embedding_model = train_text_model(
corpus,
featurizer,
model_options,
models_ann_dir=models_ann_dir,
debug=True,
tensorboard_dir=None
)
# step 5: save the model
citeomatic_model.save_weights(
os.path.join(models_dir, dp.CITEOMATIC_WEIGHTS_FILENAME), overwrite=True
)
if embedding_model is not None:
embedding_model.save_weights(
os.path.join(models_dir, dp.EMBEDDING_WEIGHTS_FILENAME), overwrite=True
)
file_util.write_json(
os.path.join(models_dir, dp.OPTIONS_FILENAME),
model_options.to_json(),
)
return corpus, featurizer, model_options, citeomatic_model, embedding_model
def _gold_citations(doc_id: str, corpus: Corpus, min_citations: int, candidate_ids_pool: set):
gold_citations_1 = set(corpus.get_citations(doc_id))
if doc_id in gold_citations_1:
gold_citations_1.remove(doc_id)
citations_of_citations = []
for c in gold_citations_1:
citations_of_citations.extend(corpus.get_citations(c))
gold_citations_2 = set(citations_of_citations).union(gold_citations_1)
if doc_id in gold_citations_2:
gold_citations_2.remove(doc_id)
gold_citations_1.intersection_update(candidate_ids_pool)
gold_citations_2.intersection_update(candidate_ids_pool)
if len(gold_citations_1) < min_citations:
return [], []
return gold_citations_1, gold_citations_2
def eval_text_model(
corpus: Corpus,
candidate_selector: CandidateSelector,
ranker: Ranker,
papers_source='valid',
min_citations=1,
n_eval=None
):
if papers_source == 'valid':
paper_ids_for_eval = corpus.valid_ids
candidate_ids_pool = corpus.train_ids + corpus.valid_ids
elif papers_source == 'train':
paper_ids_for_eval = corpus.train_ids
candidate_ids_pool = corpus.train_ids
else:
logging.info("Using Test IDs")
paper_ids_for_eval = corpus.test_ids
candidate_ids_pool = corpus.train_ids + corpus.valid_ids + corpus.test_ids
if corpus.corpus_type == 'dblp' or corpus.corpus_type == 'pubmed':
# Hack to compare with previous work. BEWARE: do not do real experiments this way !!!
candidate_ids_pool = corpus.train_ids
candidate_ids_pool = set(candidate_ids_pool)
logging.info(
"Restricting Candidates pool and gold citations to {} docs".format(len(candidate_ids_pool)))
if n_eval is not None:
if n_eval < len(paper_ids_for_eval):
logging.info("Selecting a random sample of {} papers for evaluation.".format(n_eval))
np.random.seed(110886)
paper_ids_for_eval = np.random.choice(paper_ids_for_eval, n_eval, replace=False)
else:
logging.info("Using all {} papers for evaluation.".format(len(paper_ids_for_eval)))
# eval_doc_predictions = []
results_1 = []
results_2 = []
for doc_id in tqdm.tqdm(paper_ids_for_eval):
gold_citations_1, gold_citations_2 = _gold_citations(doc_id, corpus, min_citations,
candidate_ids_pool)
if len(gold_citations_1) < EVAL_DOC_MIN_CITATION[corpus.corpus_type]:
logging.debug("Skipping doc id : {}".format(doc_id))
continue
candidate_ids, confidence_scores = candidate_selector.fetch_candidates(doc_id,
candidate_ids_pool)
if len(candidate_ids) == 0:
continue
predictions, scores = ranker.rank(doc_id, candidate_ids, confidence_scores)
logging.debug("Done! Found %s predictions." % len(predictions))
# eval_doc_predictions.append(predictions)
r_1 = precision_recall_f1_at_ks(
gold_y=gold_citations_1,
predictions=predictions,
scores=None,
k_list=EVAL_KEYS
)
r_2 = precision_recall_f1_at_ks(
gold_y=gold_citations_2,
predictions=predictions,
scores=None,
k_list=EVAL_KEYS
)
results_1.append(r_1)
results_2.append(r_2)
logging.info("Found {} papers in the test set after filtering docs with fewer than {} "
"citations".format(len(results_1), EVAL_DOC_MIN_CITATION[corpus.corpus_type]))
averaged_results_1 = average_results(results_1)
averaged_results_2 = average_results(results_2)
return {
'precision_1': {k: v for k, v in zip(EVAL_KEYS, averaged_results_1['precision'])},
'recall_1': {k: v for k, v in zip(EVAL_KEYS, averaged_results_1['recall'])},
'f1_1_per_paper': {k: v for k, v in zip(EVAL_KEYS, averaged_results_1['f1'])},
'mrr_1': averaged_results_1['mrr'],
'f1_1': {k: f1(p, r) for k, p, r in zip(EVAL_KEYS, averaged_results_1['precision'],
averaged_results_1['recall'])}
}
|
citeomatic-master
|
citeomatic/training.py
|
import json
import logging
import sqlite3
import pickle
import tqdm
from citeomatic import file_util
from citeomatic.common import FieldNames, Document, DatasetPaths
from citeomatic.utils import batchify
from citeomatic.schema_pb2 import Document as ProtoDoc
def stream_papers(data_path):
for line_json in tqdm.tqdm(file_util.read_json_lines(data_path)):
citations = set(line_json[FieldNames.OUT_CITATIONS])
citations.discard(line_json[FieldNames.PAPER_ID]) # remove self-citations
citations = list(citations)
in_citation_count = int(line_json[FieldNames.IN_CITATION_COUNT])
key_phrases = list(set(line_json[FieldNames.KEY_PHRASES]))
yield ProtoDoc(
id=line_json[FieldNames.PAPER_ID],
title=line_json[FieldNames.TITLE],
abstract=line_json[FieldNames.ABSTRACT],
authors=line_json[FieldNames.AUTHORS],
out_citations=citations,
in_citation_count=in_citation_count,
year=line_json.get(FieldNames.YEAR, 2017),
key_phrases=key_phrases,
venue=line_json.get(FieldNames.VENUE, ''),
)
def build_corpus(db_filename, corpus_json):
""""""
with sqlite3.connect(db_filename) as conn:
conn.execute('PRAGMA synchronous=OFF')
conn.execute('PRAGMA journal_mode=MEMORY')
conn.row_factory = sqlite3.Row
conn.execute(
'''CREATE TABLE IF NOT EXISTS ids (id STRING, year INT)'''
)
conn.execute(
'''CREATE TABLE IF NOT EXISTS documents
(id STRING, year INT, payload BLOB)'''
)
conn.execute('''CREATE INDEX IF NOT EXISTS year_idx on ids (year)''')
conn.execute('''CREATE INDEX IF NOT EXISTS id_idx on ids (id)''')
conn.execute('''CREATE INDEX IF NOT EXISTS id_doc_idx on documents (id)''')
for batch in batchify(stream_papers(corpus_json), 1024):
conn.executemany(
'INSERT INTO ids (id, year) VALUES (?, ?)',
[
(doc.id, doc.year)
for doc in batch
]
)
conn.executemany(
'INSERT INTO documents (id, payload) VALUES (?, ?)',
[
(doc.id, doc.SerializeToString())
for doc in batch
]
)
conn.commit()
def load(data_path, train_frac=0.80):
return Corpus(data_path, train_frac)
class Corpus(object):
training_ranges = {
'dblp': (1966, 2007), # both years inclusive
'pubmed': (1966, 2008), # both years inclusive
'oc': None
}
validation_ranges = {'dblp': (2008, 2008), 'pubmed': (2009, 2009), 'oc': None}
testing_ranges = {'dblp': (2009, 2011), 'pubmed': (2010, 2013), 'oc': None}
def _fetch_paper_ids(self, date_range=None):
"""
Fetch paper ids from sqlite db published between the provided years. If none provided,
get all IDs
:param date_range: a tuple of start and end year (both inclusive)
:return:
"""
if date_range is None:
query = '''
SELECT id from ids
ORDER BY year
'''
else:
query = '''
SELECT id from ids
WHERE year >= {} AND year <= {}
ORDER BY year
'''.format(date_range[0], date_range[1])
id_rows = self._conn.execute(
query
).fetchall()
all_ids = [str(r[0]) for r in id_rows]
return all_ids
def __init__(self, data_path, train_frac):
self._conn = sqlite3.connect(
'file://%s?mode=ro' % data_path, check_same_thread=False, uri=True
)
self.train_frac = train_frac
if 'dblp' in data_path:
self.corpus_type = 'dblp'
elif 'pubmed' in data_path:
self.corpus_type = 'pubmed'
else:
self.corpus_type = 'oc'
if self.corpus_type is 'oc':
all_ids = self._fetch_paper_ids()
n = len(all_ids)
n_train = int(self.train_frac * n)
n_valid = (n - n_train) // 2
n_test = n - n_train - n_valid
self.train_ids = all_ids[0:n_train]
self.valid_ids = all_ids[n_train:n_train + n_valid]
self.test_ids = all_ids[n_train + n_valid:]
else:
self.train_ids = self._fetch_paper_ids(Corpus.training_ranges[self.corpus_type])
self.valid_ids = self._fetch_paper_ids(Corpus.validation_ranges[self.corpus_type])
self.test_ids = self._fetch_paper_ids(Corpus.testing_ranges[self.corpus_type])
n_train = len(self.train_ids)
n_valid = len(self.valid_ids)
n_test = len(self.test_ids)
self.n_docs = len(self.train_ids) + len(self.valid_ids) + len(self.test_ids)
self.all_ids = self.train_ids + self.valid_ids + self.test_ids
self._id_set = set(self.all_ids)
logging.info('%d training docs' % n_train)
logging.info('%d validation docs' % n_valid)
logging.info('%d testing docs' % n_test)
logging.info("Loading documents into memory")
self.documents = [doc for doc in self._doc_generator()]
self.doc_id_to_index_dict = {doc.id: idx for idx, doc in enumerate(self.documents)}
@staticmethod
def load(data_path, train_frac=0.80):
return load(data_path, train_frac)
@staticmethod
def load_pkl(corpus_pkl_location):
return pickle.load(open(corpus_pkl_location, "rb"))
@staticmethod
def build(db_filename, source_json):
return build_corpus(db_filename, source_json)
def _doc_generator(self):
with self._conn as tx:
for row in tx.execute(
'SELECT payload from documents ORDER BY year'
):
doc = ProtoDoc()
doc.ParseFromString(row[0])
yield Document.from_proto_doc(doc)
def __len__(self):
return self.n_docs
def __iter__(self):
for doc in self.documents:
yield doc
def __contains__(self, id):
return id in self._id_set
def __getitem__(self, id):
index = self.doc_id_to_index_dict[id]
return self.documents[index]
def select(self, id_set):
for doc in self.documents:
if doc in id_set:
yield doc.id, doc
def filter(self, id_set):
return self._id_set.intersection(id_set)
def get_citations(self, doc_id):
out_citations = self[doc_id].out_citations
# Remove cited documents that appear after the year of publication of source document as
# they indicate incorrect data
return [cit_doc_id for cit_doc_id in out_citations if cit_doc_id in self._id_set and
self[cit_doc_id].year <= self[doc_id].year]
|
citeomatic-master
|
citeomatic/corpus.py
|
import json
from citeomatic import file_util
from citeomatic.common import PAPER_EMBEDDING_MODEL, CITATION_RANKER_MODEL
from traitlets import Bool, HasTraits, Int, Unicode, Enum, Float
class ModelOptions(HasTraits):
# The type of candidate selector to use. Okapi BM25 (https://en.wikipedia.org/wiki/Okapi_BM25)
# ranking model or an Approximate Nearest Neighbor index built on embeddings of documents
# obtained from the paper_embedding model
candidate_selector_type = Enum(('ann', 'bm25'), default_value='ann')
# Whether to use the citation_ranker model to re-rank selected candidates or not
citation_ranker_type = Enum(('neural', 'none'), default_value='neural')
# Model name to train: citation_ranker or paper_embedder
model_name = Enum(values=[PAPER_EMBEDDING_MODEL, CITATION_RANKER_MODEL], default_value=PAPER_EMBEDDING_MODEL)
# No. of features (words) to retain from the corpus for training
n_features = Int()
# No. of authors to retain from the corpus for training
n_authors = Int()
# No. of venues to retain from the corpus for training
n_venues = Int()
# No. of key phrases to retain from the corpus for training
n_keyphrases = Int()
# Dimension of word embedding
dense_dim = Int(default_value=75)
# Dimension of embeddings for author, venue or keyphrase
metadata_dim = Int(default_value=10)
# Embedding type to use for text fields
embedding_type = Enum(values=['sum', 'cnn', 'cnn2', 'lstm'], default_value='sum')
# Architecture changing options
use_dense = Bool(default_value=True)
use_citations = Bool(default_value=True)
use_sparse = Bool(default_value=True)
use_src_tgt_embeddings = Bool(default_value=False)
use_metadata = Bool(default_value=True)
use_authors = Bool(default_value=False)
use_venue = Bool(default_value=False)
use_keyphrases = Bool(default_value=False)
# training and feature params
optimizer = Unicode(default_value='tfopt')
lr = Float(default_value=0.0001)
use_nn_negatives = Bool(default_value=True)
margin_multiplier = Float(default_value=1)
use_variable_margin = Bool(default_value=True)
train_frac = Float(default_value=0.8) # the rest will be divided 50/50 val/test
max_features = Int(default_value=200000)
max_title_len = Int(default_value=50)
max_abstract_len = Int(default_value=500)
neg_to_pos_ratio = Int(default_value=6) # ideally divisible by 2 and 3
batch_size = Int(default_value=512)
samples_per_epoch = Int(default_value=1000000)
total_samples = Int(default_value=5000000)
reduce_lr_flag = Bool(default_value=False)
# regularization params for embedding layer: l1 for mag/sparse, l2 for dir
l2_lambda = Float(default_value=0.00001)
l1_lambda = Float(default_value=0.0000001)
dropout_p = Float(default_value=0)
use_magdir = Bool(default_value=True)
# params for TextEmbeddingConv
kernel_width = Int(default_value=5)
stride = Int(default_value=2)
# params for TextEmbeddingConv2
filters = Int(default_value=100) # default in the paper
max_kernel_size = Int(default_value=5) # we use 2, 3, 4, 5. paper uses 3, 4, 5
# dense layers
dense_config = Unicode(default_value='20,20')
num_ann_nbrs_to_fetch = Int(default_value=100)
num_candidates_to_rank = Int(default_value=100) # No. of candidates to fetch from ANN at eval time
extend_candidate_citations = Bool(default_value=True) # Whether to include citations of ANN
# similar docs as possible candidates or not
use_pretrained = Bool(default_value=False)
num_oov_buckets = 100 # for hashing out of vocab terms
dense_dim_pretrained = 300 # just a fact - don't change
oov_term_prefix = '#OOV_'
subset_vocab_to_training = False
# minimum number of papers for authors/venues/keyphrases to get an embedding.
min_author_papers = Int(default_value=1)
min_venue_papers = Int(default_value=1)
min_keyphrase_papers = Int(default_value=5)
use_selector_confidence = Bool(default_value=True)
# Tensoboard logging directory
tb_dir = Unicode(default_value=None, allow_none=True)
# Option to fine-tune pre-trained embeddings
enable_fine_tune = Bool(default_value=True)
# Use both training and validation data for final training of model
train_for_test_set = Bool(default_value=False)
def __repr__(self):
return json.dumps(self._trait_values, indent=2, sort_keys=True)
def to_json(self):
model_kw = {name: getattr(self, name) for name in ModelOptions.class_traits().keys()}
return json.dumps(model_kw)
@staticmethod
def load(filename):
kw = file_util.read_json(filename)
return ModelOptions(**kw)
|
citeomatic-master
|
citeomatic/models/options.py
|
from abc import ABC
import numpy as np
from citeomatic.models.layers import L2Normalize, ScalarMul, Sum, EmbeddingZero
from citeomatic.models.options import ModelOptions
from keras.layers import Bidirectional, Input, LSTM, Concatenate, SpatialDropout1D
from keras.layers import Conv1D, Lambda, Dense, GlobalMaxPooling1D, Embedding
from keras.models import Model
from keras.regularizers import l1, l2
import keras.backend as K
def _prefix(tuple):
return '-'.join(tuple)
def set_embedding_layer_weights(embedding_layer, pretrained_embeddings):
dense_dim = pretrained_embeddings.shape[1]
weights = np.vstack((np.zeros(dense_dim), pretrained_embeddings))
embedding_layer.set_weights([weights])
def valid_conv_kernel_size(input_kernel_size, h, r):
return int(np.floor((input_kernel_size - h)/r + 1))
def make_embedder(options, pretrained_embeddings):
if options.embedding_type == 'sum':
embedder_title = TextEmbeddingSum(options=options, pretrained_embeddings=pretrained_embeddings)
embedder_abstract = embedder_title
elif options.embedding_type == 'cnn':
embedder_title = TextEmbeddingConv(options=options, pretrained_embeddings=pretrained_embeddings, max_sequence_len=options.max_title_len)
embedder_abstract = TextEmbeddingConv(options=options, pretrained_embeddings=pretrained_embeddings, max_sequence_len=options.max_abstract_len)
# no reason not to share the embedding itself
embedder_abstract.embed_direction = embedder_title.embed_direction
embedder_abstract.embed_magnitude = embedder_title.embed_magnitude
elif options.embedding_type == 'cnn2':
embedder_title = TextEmbeddingConv2(options=options, pretrained_embeddings=pretrained_embeddings)
embedder_abstract = TextEmbeddingConv2(options=options, pretrained_embeddings=pretrained_embeddings)
# no reason not to share the embedding itself
embedder_abstract.embed_direction = embedder_title.embed_direction
embedder_abstract.embed_magnitude = embedder_title.embed_magnitude
elif options.embedding_type == 'lstm':
embedder_title = TextEmbeddingLSTM(options=options, pretrained_embeddings=pretrained_embeddings)
embedder_abstract = embedder_title
else:
assert False, 'Unknown embedding type %s' % options.embedding_type
return embedder_title, embedder_abstract
class TextEmbedding(object):
def __init__(self,
options: ModelOptions,
pretrained_embeddings=None,
field_type='text',
magnitudes_initializer='uniform'):
"""
:param options:
:param pretrained_embeddings:
:param embedding_type: Takes one of three values: text / authors / venues depending on
which field is being embedded
"""
self.field_type = field_type
if self.field_type == 'text':
self.n_features = options.n_features
self.dense_dim = options.dense_dim
elif self.field_type == 'authors':
self.n_features = options.n_authors
self.dense_dim = options.metadata_dim
elif self.field_type == 'venue':
self.n_features = options.n_venues
self.dense_dim = options.metadata_dim
elif self.field_type == 'keyphrases':
self.n_features = options.n_keyphrases
self.dense_dim = options.metadata_dim
else:
assert False
self.l1_lambda = options.l1_lambda
self.l2_lambda = options.l2_lambda * (pretrained_embeddings is None)
self.dropout_p = options.dropout_p
self.use_magdir = options.use_magdir
self.magnitudes_initializer = magnitudes_initializer
self.enable_fine_tune = options.enable_fine_tune
self.pretrained_embeddings = pretrained_embeddings
self.mask = None
def define_embedding_layers(self):
# shared layers
self.embed_direction = EmbeddingZero(
output_dim=self.dense_dim,
input_dim=self.n_features,
activity_regularizer=l2(self.l2_lambda),
mask_zero=self.mask,
trainable=self.pretrained_embeddings is None or self.enable_fine_tune
)
if self.pretrained_embeddings is not None:
self.embed_direction.build((None,))
set_embedding_layer_weights(self.embed_direction,
self.pretrained_embeddings)
self.embed_magnitude = EmbeddingZero(
output_dim=1,
input_dim=self.n_features,
activity_regularizer=l1(self.l1_lambda),
# will induce sparsity if large enough
mask_zero=self.mask,
embeddings_initializer=self.magnitudes_initializer
)
self.dropout = SpatialDropout1D(self.dropout_p)
def embedding_constructor(self, prefix):
_input = Input(shape=(None,), dtype='int32', name='%s-txt' % prefix)
if self.use_magdir:
dir_embedding = self.embed_direction(_input)
direction = L2Normalize.invoke(dir_embedding,
name='%s-dir-norm' % prefix)
magnitude = self.embed_magnitude(_input)
_embedding = ScalarMul.invoke([direction, magnitude],
name='%s-embed' % prefix)
else:
_embedding = self.embed_direction(_input)
_embedding = self.dropout(_embedding)
return _input, _embedding
class TextEmbeddingSum(TextEmbedding):
"""
Text embedding models class.
"""
def __init__(self, **kwargs):
super(TextEmbeddingSum, self).__init__(**kwargs)
self.mask = True
self.define_embedding_layers()
def create_text_embedding_model(self, prefix="", final_l2_norm=True):
"""
:param prefix: Preferred prefix to add to each layer in the model
:return: A model that takes a sequence of words as inputs and outputs the normed sum of
word embeddings that occur in the document.
"""
_input, _embedding = self.embedding_constructor(prefix)
summed = Sum.invoke(_embedding, name='%s-sum-title' % prefix)
if final_l2_norm:
normed_sum = L2Normalize.invoke(
summed, name='%s-l2_normed_sum' % prefix
)
outputs_list = [normed_sum]
else:
outputs_list = [summed]
return Model(
inputs=_input, outputs=outputs_list, name="%s-embedding-model" % prefix
), outputs_list
class TextEmbeddingConv(TextEmbedding):
"""
Text embedding models class.
"""
def __init__(self, max_sequence_len=None, **kwargs):
super(TextEmbeddingConv, self).__init__(**kwargs)
self.max_sequence_len = max_sequence_len
self.kernel_width = kwargs['options'].kernel_width
self.stride = kwargs['options'].stride
self.mask = False
self.define_embedding_layers()
# shared convolution layers
conv1_output_length = valid_conv_kernel_size(max_sequence_len, self.kernel_width, self.stride)
conv2_output_length = valid_conv_kernel_size(conv1_output_length, self.kernel_width, self.stride)
self.conv1 = Conv1D(filters=self.dense_dim,
kernel_size=self.kernel_width,
strides=self.stride,
padding='valid',
activation='elu')
self.conv2 = Conv1D(filters=self.dense_dim,
kernel_size=self.kernel_width,
strides=self.stride,
padding='valid',
activation='elu')
self.conv3 = Conv1D(filters=self.dense_dim,
kernel_size=conv2_output_length,
strides=self.stride,
padding='valid',
activation='elu')
def create_text_embedding_model(self, prefix="", final_l2_norm=True):
"""
:param prefix: Preferred prefix to add to each layer in the model
:param final_l2_norm: Whether to l2 norm final output
:return: A model that takes a sequence of words as inputs and outputs the normed sum of
word embeddings that occur in the document.
"""
_input, _embedding = self.embedding_constructor(prefix)
conved1 = self.conv1(_embedding)
conved2 = self.conv2(conved1)
conved3 = self.conv3(conved2)
conved3 = Lambda(lambda x: K.squeeze(x, axis=1))(conved3)
if final_l2_norm:
normed_conved3 = L2Normalize.invoke(
conved3, name='%s-l2_normed_conv_encoding' % prefix
)
outputs_list = [normed_conved3]
else:
outputs_list = [conved3]
return Model(
inputs=_input, outputs=outputs_list, name='%s-embedding-model'
), outputs_list
class TextEmbeddingConv2(TextEmbedding):
"""
Text embedding models class.
More or less:
https://arxiv.org/pdf/1408.5882v2.pdf
"""
def __init__(self, **kwargs):
super(TextEmbeddingConv2, self).__init__(**kwargs)
self.filters = kwargs['options'].filters
self.max_kernel_size = kwargs['options'].max_kernel_size
self.mask = False
self.define_embedding_layers()
# shared conv layers
self.conv_layers = []
for kernel_size in range(2, self.max_kernel_size + 1):
self.conv_layers.append(Conv1D(filters=self.filters,
kernel_size=kernel_size,
padding='same'))
self.dense = Dense(self.dense_dim, activation='elu')
def create_text_embedding_model(self, prefix="", final_l2_norm=True):
"""
:param prefix: Preferred prefix to add to each layer in the model
:return: A model that takes a sequence of words as inputs and outputs the normed sum of
word embeddings that occur in the document.
"""
_input, _embedding = self.embedding_constructor(prefix)
list_of_convs = [GlobalMaxPooling1D()(conv(_embedding))
for conv in self.conv_layers]
z = Concatenate()(list_of_convs) if len(list_of_convs) > 1 else list_of_convs[0]
encoded = self.dense(z)
if final_l2_norm:
normed_encoded = L2Normalize.invoke(
encoded, name='%s-l2_normed_conv_encoding' % prefix
)
outputs_list = [normed_encoded]
else:
outputs_list = [encoded]
return Model(
inputs=_input, outputs=outputs_list, name='%s-embedding-model'
), outputs_list
class TextEmbeddingLSTM(TextEmbedding):
"""
Text embedding models class.
"""
def __init__(self, **kwargs):
super(TextEmbeddingLSTM, self).__init__(**kwargs)
self.mask = True
self.define_embedding_layers()
self.bilstm = Bidirectional(LSTM(self.dense_dim))
self.dense = Dense(self.dense_dim, activation='elu')
def create_text_embedding_model(self, prefix="", final_l2_norm=True):
"""
:param prefix: Preferred prefix to add to each layer in the model
:return: A model that takes a sequence of words as inputs and outputs the normed sum of
word embeddings that occur in the document.
"""
_input, _embedding = self.embedding_constructor(prefix)
lstm_embedding = self.dense(self.bilstm(_embedding))
if final_l2_norm:
normed_lstm_embedding = L2Normalize.invoke(
lstm_embedding, name='%s-l2_normed_bilstm_embedding' % prefix
)
outputs_list = [normed_lstm_embedding]
else:
outputs_list = [lstm_embedding]
return Model(
inputs=_input, outputs=outputs_list, name="%s-embedding-model"
), outputs_list
|
citeomatic-master
|
citeomatic/models/text_embeddings.py
|
import logging
import tensorflow as tf
from citeomatic.models.layers import Sum, custom_dot, EmbeddingZero
from citeomatic.models.options import ModelOptions
from citeomatic.models.text_embeddings import TextEmbeddingSum, _prefix, make_embedder
from keras.engine import Model
from keras.regularizers import l1, l2
from keras.layers import Dense, Embedding, Input, Reshape, Concatenate, multiply, Lambda, Flatten, \
Dot
import keras.backend as K
FIELDS = ['title', 'abstract']
SOURCE_NAMES = ['query', 'candidate']
def create_model(options: ModelOptions, pretrained_embeddings=None):
logging.info('Building model: %s' % options)
embedders = {}
if options.use_src_tgt_embeddings:
# separate emebedders for query and for candidate
embedder_title, embedder_abstract = make_embedder(options, pretrained_embeddings)
embedders[_prefix(('query', 'title'))] = embedder_title
embedders[_prefix(('query', 'abstract'))] = embedder_abstract
embedder_title, embedder_abstract = make_embedder(options, pretrained_embeddings)
embedders[_prefix(('candidate', 'title'))] = embedder_title
embedders[_prefix(('candidate', 'abstract'))] = embedder_abstract
else:
# same embedders for query and for candidate
embedder_title, embedder_abstract = make_embedder(options, pretrained_embeddings)
embedders[_prefix(('query', 'title'))] = embedder_title
embedders[_prefix(('query', 'abstract'))] = embedder_abstract
embedders[_prefix(('candidate', 'title'))] = embedder_title
embedders[_prefix(('candidate', 'abstract'))] = embedder_abstract
normed_sums = {}
intermediate_outputs = []
citeomatic_inputs = []
if options.use_dense:
for source in SOURCE_NAMES:
for field in FIELDS:
prefix = _prefix((source, field))
embedding_model, outputs = embedders[
prefix
].create_text_embedding_model(prefix=prefix)
normed_sums[(source, field)] = outputs[0]
citeomatic_inputs.append(embedding_model.input)
for field in FIELDS:
query = normed_sums[('query', field)]
candidate = normed_sums[('candidate', field)]
cos_dist = custom_dot(
query,
candidate,
options.dense_dim,
normalize=True
)
intermediate_outputs.append(cos_dist)
# lookup weights for the intersection of individual terms
# (computed by the feature generator.)
if options.use_sparse:
for field in FIELDS:
sparse_input = Input(
name='query-candidate-%s-intersection' % field, shape=(None,)
)
elementwise_sparse = EmbeddingZero(
input_dim=options.n_features,
output_dim=1,
mask_zero=True,
name="%s-sparse-embedding" % field,
activity_regularizer=l1(options.l1_lambda)
)(sparse_input)
intermediate_outputs.append(Sum()(elementwise_sparse))
citeomatic_inputs.append(sparse_input)
if options.use_authors:
assert options.n_authors > 0
embedder = TextEmbeddingSum(options=options, field_type='authors')
# candidate author
candidate_author_embedder, candidate_author_embeddings = embedder.create_text_embedding_model(
prefix='candidate-authors',
final_l2_norm=True
)
citeomatic_inputs.append(candidate_author_embedder.input)
# query author
query_author_embedder, query_author_embeddings = embedder.create_text_embedding_model(
prefix='query-authors',
final_l2_norm=True
)
citeomatic_inputs.append(query_author_embedder.input)
# cos-sim
author_similarity = custom_dot(
candidate_author_embeddings[0],
query_author_embeddings[0],
options.metadata_dim,
normalize=True
)
intermediate_outputs.append(author_similarity)
if options.use_venue:
assert options.n_venues > 0
embedder = TextEmbeddingSum(options=options, field_type='venue')
# candidate venue
candidate_venue_embedder, candidate_venue_embeddings = embedder.create_text_embedding_model(
prefix='candidate-venue',
final_l2_norm=True
)
citeomatic_inputs.append(candidate_venue_embedder.input)
# query venue
query_venue_embedder, query_venue_embeddings = embedder.create_text_embedding_model(
prefix='query-venue',
final_l2_norm=True
)
citeomatic_inputs.append(query_venue_embedder.input)
# cos-sim
venue_similarity = custom_dot(
candidate_venue_embeddings[0],
query_venue_embeddings[0],
options.metadata_dim,
normalize=True
)
intermediate_outputs.append(venue_similarity)
if options.use_keyphrases:
assert options.n_keyphrases > 0
if options.n_keyphrases > 1:
# only happens if there WERE any keyphrases
# this prevents opencorpus from having this extra layer
embedding = TextEmbeddingSum(options=options, field_type='keyphrases')
# candidate keyphrases
candidate_keyphrases_embedder, candidate_keyphrases_embeddings = embedding.create_text_embedding_model(
prefix='candidate-keyphrases',
final_l2_norm=True
)
citeomatic_inputs.append(candidate_keyphrases_embedder.input)
# query keyphrases
query_keyphrases_embedder, query_keyphrases_embeddings = embedding.create_text_embedding_model(
prefix='query-keyphrases',
final_l2_norm=True
)
citeomatic_inputs.append(query_keyphrases_embedder.input)
# cos-sim
keyphrases_similarity = custom_dot(
candidate_keyphrases_embeddings[0],
query_keyphrases_embeddings[0],
options.metadata_dim,
normalize=True
)
intermediate_outputs.append(keyphrases_similarity)
if options.use_citations:
citation_count_input = Input(
shape=(1,), dtype='float32', name='candidate-citation-count'
)
citeomatic_inputs.append(citation_count_input)
intermediate_outputs.append(citation_count_input)
if options.use_selector_confidence:
candidate_confidence_input = Input(
shape=(1,), dtype='float32', name='candidate-confidence'
)
citeomatic_inputs.append(candidate_confidence_input)
intermediate_outputs.append(candidate_confidence_input)
if len(intermediate_outputs) > 1:
last = Concatenate()(intermediate_outputs)
else:
last = intermediate_outputs
for i, layer_size in enumerate(options.dense_config.split(',')):
layer_size = int(layer_size)
last = Dense(
layer_size, name='dense-%d' % i, activation='elu'
)(last)
text_output = Dense(
1, kernel_initializer='one', name='final-output', activation='sigmoid'
)(last)
citeomatic_model = Model(inputs=citeomatic_inputs, outputs=text_output)
# Setting embedding model to None to avoid its inadvertent usage for ANN embeddings
models = {
'embedding': None,
'citeomatic': citeomatic_model,
}
return models
|
citeomatic-master
|
citeomatic/models/citation_ranker.py
|
import logging
from keras.engine import Model
from keras.layers import Add
from citeomatic.models.layers import L2Normalize, ScalarMultiply, custom_dot
from citeomatic.models.options import ModelOptions
from citeomatic.models.text_embeddings import _prefix, make_embedder
FIELDS = ['title', 'abstract']
SOURCE_NAMES = ['query', 'candidate']
def create_model(options: ModelOptions, pretrained_embeddings=None):
logging.info('Building model: %s' % options)
scalar_sum_models = {}
for field in FIELDS:
scalar_sum_models[field] = ScalarMultiply(name='scalar-mult-' + field)
# same embedders for query and for candidate
embedder_title, embedder_abstract = make_embedder(options, pretrained_embeddings)
embedders = {'title': embedder_title, 'abstract': embedder_abstract}
# apply text embedding model and add up title, abstract, etc
embedding_models = {}
normed_weighted_sum_of_normed_sums = {}
for source in SOURCE_NAMES:
weighted_normed_sums = []
for field in FIELDS:
prefix = _prefix((source, field))
embedding_model, _ = embedders[field].create_text_embedding_model(
prefix=prefix, final_l2_norm=True
)
embedding_models[prefix] = embedding_model
normed_sum = embedding_models[prefix].outputs[0]
weighted_normed_sums.append(scalar_sum_models[field](normed_sum))
weighted_sum = Add()(weighted_normed_sums)
normed_weighted_sum_of_normed_sums[source] = L2Normalize.invoke(
weighted_sum, name='%s-l2_normed_sum' % source
)
# cos distance
text_output = custom_dot(
normed_weighted_sum_of_normed_sums['query'],
normed_weighted_sum_of_normed_sums['candidate'],
options.dense_dim,
normalize=False
)
citeomatic_inputs = []
for source in SOURCE_NAMES:
for field in FIELDS:
key = _prefix((source, field))
citeomatic_inputs.append(embedding_models[key].input)
citeomatic_model = Model(inputs=citeomatic_inputs, outputs=text_output)
embedding_model = Model(
inputs=citeomatic_inputs[0:len(SOURCE_NAMES)],
outputs=normed_weighted_sum_of_normed_sums['query']
)
models = {'embedding': embedding_model, 'citeomatic': citeomatic_model}
return models
|
citeomatic-master
|
citeomatic/models/paper_embedder.py
|
citeomatic-master
|
citeomatic/models/__init__.py
|
|
import tensorflow as tf
from keras import backend as K
from keras.engine.topology import Layer
from keras.layers import Lambda, Embedding
from keras.layers import Concatenate, Dot, Reshape, Flatten
class EmbeddingZero(Embedding):
def call(self, inputs):
if K.dtype(inputs) != 'int32':
inputs = K.cast(inputs, 'int32')
out = K.gather(self.embeddings, inputs)
mask = K.expand_dims(K.clip(K.cast(inputs, 'float32'), 0, 1), axis=-1)
return out * mask
class NamedLambda(Lambda):
def __init__(self, name=None):
Lambda.__init__(self, self.fn, name=name)
@classmethod
def invoke(cls, args, **kw):
return cls(**kw)(args)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.name)
class L2Normalize(NamedLambda):
def fn(self, x):
return K.l2_normalize(x, axis=-1)
class ScalarMul(NamedLambda):
def fn(self, x):
return x[0] * x[1]
class Sum(NamedLambda):
def fn(self, x):
return K.sum(x, axis=1)
class ScalarMultiply(Layer):
def __init__(self, **kwargs):
super(ScalarMultiply, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self.w = self.add_weight(
shape=(1, 1), initializer='one', trainable=True, name='w'
)
super(ScalarMultiply, self).build(input_shape)
def call(self, x, mask=None):
return self.w * x
def compute_output_shape(self, input_shape):
return input_shape[0], input_shape[1]
def custom_dot(a, b, d, normalize=True):
# keras is terrible...
reshaped_a = Reshape((1, d))(a)
reshaped_b = Reshape((1, d))(b)
reshaped_in = [reshaped_a, reshaped_b]
dotted = Dot(axes=(2, 2), normalize=normalize)(reshaped_in)
return Flatten()(dotted)
def triplet_loss(y_true, y_pred):
y_pred = K.flatten(y_pred)
y_true = K.flatten(y_true)
pos = y_pred[::2]
neg = y_pred[1::2]
# margin is given by the difference in labels
margin = y_true[::2] - y_true[1::2]
delta = K.maximum(margin + neg - pos, 0)
return K.mean(delta, axis=-1)
|
citeomatic-master
|
citeomatic/models/layers.py
|
from collections import Counter
from citeomatic.common import DatasetPaths
from citeomatic.config import App
from citeomatic.corpus import Corpus
from citeomatic.traits import Enum
import numpy as np
class CorpusStat(App):
dataset_type = Enum(('dblp', 'pubmed', 'oc'), default_value='pubmed')
def main(self, args):
dp = DatasetPaths()
if self.dataset_type == 'oc':
corpus = Corpus.load_pkl(dp.get_pkl_path(self.dataset_type))
else:
corpus = Corpus.load(dp.get_db_path(self.dataset_type))
authors = Counter()
key_phrases = Counter()
years = Counter()
venues = Counter()
num_docs_with_kp = 0
in_citations_counts = []
out_citations_counts = []
for doc in corpus:
authors.update(doc.authors)
key_phrases.update(doc.key_phrases)
if len(doc.key_phrases) > 0:
num_docs_with_kp += 1
in_citations_counts.append(doc.in_citation_count)
out_citations_counts.append(doc.out_citation_count)
years.update([doc.year])
venues.update([doc.venue])
training_years = [corpus[doc_id].year for doc_id in corpus.train_ids]
validation_years = [corpus[doc_id].year for doc_id in corpus.valid_ids]
testing_years = [corpus[doc_id].year for doc_id in corpus.test_ids]
print("No. of documents = {}".format(len(corpus)))
print("Unique number of authors = {}".format(len(authors)))
print("Unique number of key phrases = {}".format(len(key_phrases)))
print("Unique number of venues = {}".format(len(venues)))
print("No. of docs with key phrases = {}".format(num_docs_with_kp))
print("Average in citations = {} (+/- {})".format(np.mean(in_citations_counts),
np.std(in_citations_counts)))
print("Average out citations = {} (+/- {})".format(np.mean(out_citations_counts),
np.std(out_citations_counts)))
print("No. of training examples = {} ({} to {})".format(len(corpus.train_ids),
np.min(training_years),
np.max(training_years)))
print("No. of validation examples = {} ({} to {})".format(len(corpus.valid_ids),
np.min(validation_years),
np.max(validation_years)))
print("No. of testing examples = {} ({} to {})".format(len(corpus.test_ids),
np.min(testing_years),
np.max(testing_years)))
print(authors.most_common(10))
CorpusStat.run(__name__)
|
citeomatic-master
|
citeomatic/scripts/corpus_stats.py
|
import logging
from citeomatic.common import DatasetPaths
from citeomatic.config import App
from citeomatic.corpus import Corpus
class VerifyCorpus(App):
def main(self, args):
def _verify(db_filename, corpus_json):
try:
Corpus.build(db_filename=db_filename, source_json=corpus_json)
except Exception as e:
logging.critical("Failed to build corpus {} for file {}".format(db_filename, corpus_json))
print(e)
_verify(DatasetPaths.DBLP_DB_FILE, DatasetPaths.DBLP_CORPUS_JSON)
_verify(DatasetPaths.PUBMED_DB_FILE, DatasetPaths.PUBMED_CORPUS_JSON)
_verify(DatasetPaths.OC_DB_FILE, DatasetPaths.OC_CORPUS_JSON)
VerifyCorpus.run(__name__)
|
citeomatic-master
|
citeomatic/scripts/verify_corpus.py
|
import json
import os
from citeomatic.config import App
from citeomatic.models.options import ModelOptions
from citeomatic.traits import Unicode, Enum
import copy
class GenerateOcConfigs(App):
dataset_type = Enum(('dblp', 'pubmed', 'oc'), default_value='pubmed')
input_config_file = Unicode(default_value=None, allow_none=True)
out_dir = Unicode(default_value="config/")
def write_change_to_file(self, filename, base_options, change):
filename = os.path.join(self.out_dir, filename)
base_options_2 = copy.deepcopy(base_options)
base_options_2.update(change)
json.dump(base_options_2, open(filename, "w"), sort_keys=True, indent=2)
def main(self, args):
if self.input_config_file is None:
base_config = ModelOptions().to_json()
else:
base_config = json.load(open(self.input_config_file))
changes_file_list = [
({'use_citations': False, 'use_selector_confidence': False},
"{}.citation_ranker.canonical-extra_features.options.json".format(self.dataset_type)),
({'use_magdir': False}, "{}.citation_ranker.canonical-magdir.options.json".format(
self.dataset_type)),
({'use_variable_margin': False},
"{}.citation_ranker.canonical-var_margin.options.json".format(self.dataset_type)),
({
'use_metadata': False,
'use_authors': False,
'use_keyphrases': False,
'use_venue': False,
}, "{}.citation_ranker.canonical-metadata.options.json".format(self.dataset_type)),
({'use_src_tgt_embeddings': True},
"{}.citation_ranker.canonical-siamese.options.json".format(self.dataset_type)),
({'use_src_tgt_embeddings': False},
"{}.citation_ranker.canonical-non_siamese.options.json".format(self.dataset_type)),
({'use_pretrained': True, 'enable_fine_tune': False},
"{}.citation_ranker.canonical-pretrained_no_finetune.options.json".format(self.dataset_type)),
({'use_pretrained': True, 'enable_fine_tune': True},
"{}.citation_ranker.canonical-pretrained_with_finetune.options.json".format(
self.dataset_type
)),
({'use_sparse': False},
"{}.citation_ranker.canonical-sparse.options.json".format(self.dataset_type)),
({'batch_size': 512},
"{}.citation_ranker.canonical-large_batch.options.json".format(self.dataset_type)),
({'use_nn_negatives': False},
"{}.citation_ranker.canonical-nn_negatives.options.json".format(self.dataset_type)),
({'embedding_type': 'cnn2'},
"{}.citation_ranker.canonical+cnn.options.json".format(self.dataset_type))
]
for change, filename in changes_file_list:
self.write_change_to_file(filename=filename,
base_options=base_config,
change=change
)
GenerateOcConfigs.run(__name__)
|
citeomatic-master
|
citeomatic/scripts/generate_oc_configs.py
|
#!/usr/bin/env python3
import atexit
import collections
import logging
import os
import random
import base.config
import numpy as np
import tqdm
from citeomatic import DEFAULT_BASE_DIR, ROOT, model_from_directory
from citeomatic.elastic import fetch_citations, fetch_level2_citations
from citeomatic.features import Corpus
from citeomatic.neighbors import EmbeddingModel, ANN
from citeomatic.service import APIModel
from base import file_util
from traitlets import Int, Unicode, Bool, Enum
DETAILED_PAPERS = [
'Piccolo: Building Fast, Distributed Programs with Partitioned Tables',
'Holographic Embeddings of Knowledge Graphs',
'Identifying Relations for Open Information Extraction',
'Question Answering over Freebase with Multi-Column Convolutional Neural Networks',
'Optimizing Cauchy Reed-Solomon Codes for Fault-Tolerant Network Storage Applications',
'Wikification and Beyond: The Challenges of Entity and Concept Grounding',
'Named Entity Recognition in Tweets: An Experimental Study',
'Training Input-Output Recurrent Neural Networks through Spectral Methods',
'End-To-End Memory Networks',
]
EVAL_KEYS = [1, 5, 10, 20, 50, 100, 1000]
CITE_CACHE = {}
DONE_IDS = []
def _load_cite_cache():
if os.path.exists('/tmp/citation.cache.json'):
return file_util.read_json('/tmp/citation.cache.json')
return {}
CITE_CACHE = _load_cite_cache()
def _save_cache():
file_util.write_json('/tmp/citation.cache.json', CITE_CACHE)
atexit.register(_save_cache)
class TestCiteomatic(base.config.App):
"""
Test the citation prediction model and calculate Precision and Recall@K.
Parameters
----------
model_dir : string
Required argument
Location of the saved model weights and config files.
test_samples : Int, default=10
Default number of samples to evaluate on
min_citation_count : Int, default=10
The minimum number of citations a test document should have
filter_method : str, default='es'
What method to use to pre-fetch the document.
'es' is elastic search.
'ann' is approximate nearest neighbors.
ann_path : str, default=ROOT + '/data/citeomatic-approx-nn.index'
Location of the ANN index.
corpus_path: str, default='corpus-small.json'
Location of corpus file to use.
"""
defaults = {
'base_dir': os.path.join(ROOT, DEFAULT_BASE_DIR),
}
model_dir = Unicode(allow_none=False)
test_samples = Int(default_value=10)
min_citation_count = Int(default_value=10)
max_neighbors = Int(default_value=1000)
corpus_path = Unicode(default_value=os.path.join(defaults['base_dir'], 'corpus.msgpack'))
filter_method = Unicode(default_value='es')
ann_path = Unicode(default_value=None, allow_none=True)
ann_model_dir = Unicode(default_value=None, allow_none=True)
candidate_min_in_citations = Int(default_value=4, allow_none=True)
limit_candidate_to_train_ids = Bool(default_value=False)
extend_candidate_citations = Bool(default_value=False)
def _fetch_citations(self, paper_id, level):
key = '%s/%d' % (paper_id, level)
if key not in CITE_CACHE:
if level == 1:
citations = self.corpus[paper_id].citations
citations = [c for c in citations if c in self.corpus.train_ids]
CITE_CACHE[key] = citations
else:
if self.citation_source == 'es':
second_level_citations = list(
fetch_level2_citations(self._fetch_citations(paper_id, 1))
)
else:
second_level_citations = []
second_level_citations.extend(self.corpus[paper_id].citations)
for c in self.corpus[paper_id].citations:
second_level_citations.extend(self.corpus[c].citations)
second_level_citations = [
c for c in second_level_citations if c in self.corpus.train_ids
]
CITE_CACHE[key] = second_level_citations
return CITE_CACHE[key]
def _predict(self, paper_id):
# Obtain out-citations of a paper. We cannot use the ones
# in the `corpus` object because they were filtered down to contain
# IDs that are in the corpus itself.
gold_citations = set(self._fetch_citations(paper_id, 1))
citations_of_citations = set(self._fetch_citations(paper_id, 2))
gold_citations_2 = gold_citations.union(citations_of_citations)
if len(gold_citations) < self.min_citation_count:
return None
logging.info("No. of gold citations of %s = %d" % (paper_id, len(gold_citations)))
document = self.corpus[paper_id]
def _found(lst):
return len([id for id in lst if id in self.corpus])
best_recall_1 = _found(gold_citations) / len(gold_citations)
best_recall_2 = _found(gold_citations_2) / len(gold_citations_2)
logging.info(
'Corpus recall for paper %s = %f %s' % (paper_id, best_recall_1, best_recall_2)
)
predictions = self.model.predict(document, top_n=np.max(list(EVAL_KEYS)))
paper_results = []
for prediction in predictions:
if prediction.document.id == paper_id: continue
paper_results.append(
{
'title': prediction.document.title,
'id': prediction.document.id,
'correct_1': prediction.document.id in gold_citations,
'correct_2': prediction.document.id in gold_citations_2,
'score': prediction.score,
}
)
def _mrr(p):
try:
idx = p.index(True)
return 1. / (idx + 1)
except ValueError:
return 0.0
p1 = [p['correct_1'] for p in paper_results]
mrr1 = _mrr(p1)
p2 = [p['correct_2'] for p in paper_results]
mrr2 = _mrr(p2)
logging.info('Level 1 P@10 = %f ' % np.mean(p1[:10]))
logging.info('Level 2 P@10 = %f ' % np.mean(p2[:10]))
logging.info('Level 1 MRR = %f' % mrr1)
logging.info('Level 2 MRR = %f' % mrr2)
candidate_set_recall = np.sum(p1) / len(gold_citations)
logging.info('Candidate set recall = %f ' % candidate_set_recall)
DONE_IDS.append(paper_id)
logging.info('======================== %d' % len(DONE_IDS))
return {
'title': document.title,
'id': document.id,
'predictions': paper_results,
'recall_1': best_recall_1,
'recall_2': best_recall_2,
'num_gold_1': len(gold_citations),
'num_gold_2': len(gold_citations_2),
'mrr_1': mrr1,
'mrr_2': mrr2
}
def _init_model(self):
featurizer, models = model_from_directory(self.model_dir)
corpus = Corpus.load(self.corpus_path, featurizer.training_fraction)
if self.filter_method == "ann":
ann = ANN.load(self.ann_path)
if self.ann_model_dir:
featurizer_ann, models_ann = model_from_directory(self.ann_model_dir)
else:
featurizer_ann, models_ann = featurizer, models
ann_doc_embedding_model = EmbeddingModel(featurizer_ann, models_ann['embedding'])
api_model = APIModel(
models,
featurizer,
ann=ann,
ann_embedding_model=ann_doc_embedding_model,
corpus=corpus,
max_neighbors=self.max_neighbors,
candidate_min_in_citations=self.candidate_min_in_citations,
limit_candidate_to_train_ids=self.limit_candidate_to_train_ids,
extend_candidate_citations=self.extend_candidate_citations,
citation_source=self.citation_source
)
else:
api_model = APIModel(
models,
featurizer,
max_neighbors=self.max_neighbors,
candidate_min_in_citations=self.candidate_min_in_citations,
limit_candidate_to_train_ids=self.limit_candidate_to_train_ids,
extend_candidate_citations=self.extend_candidate_citations,
citation_source=self.citation_source
)
self.corpus = corpus
self.model = api_model
return corpus, api_model
def main(self, rest):
corpus, api_model = self._init_model()
logging.info(
'Found %d ids in training and %d ids for testing' %
(len(corpus.train_ids), len(corpus.test_ids))
)
query_doc_ids = []
for doc in tqdm.tqdm(corpus):
if doc.title in DETAILED_PAPERS:
query_doc_ids.append(doc.id)
for doc_id in query_doc_ids:
logging.info('Query Doc Title ---> %s ' % corpus[doc_id].title)
citations = self._fetch_citations(doc_id, 1)
predictions = api_model.predict(corpus[doc_id], top_n=50)
for prediction in predictions:
logging.info(
'\t%f\t%s\t%s' % (
prediction.score, prediction.document.id in citations,
prediction.document.title
)
)
random.seed(110886)
shuffled_test_ids = sorted(np.sort(list(corpus.test_ids)), key=lambda k: random.random())
filtered_test_ids = []
for test_id in tqdm.tqdm(shuffled_test_ids):
if len(self._fetch_citations(test_id, 1)) >= self.min_citation_count:
filtered_test_ids.append(test_id)
if len(filtered_test_ids) == self.test_samples:
break
shuffled_test_ids = filtered_test_ids
results = [self._predict(paper_id) for paper_id in shuffled_test_ids]
results = [r for r in results if r is not None]
precision_at_1 = collections.defaultdict(list)
recall_at_1 = collections.defaultdict(list)
precision_at_2 = collections.defaultdict(list)
recall_at_2 = collections.defaultdict(list)
for r in results:
p1 = [p['correct_1'] for p in r['predictions']]
p2 = [p['correct_2'] for p in r['predictions']]
for k in EVAL_KEYS:
patk = np.mean(p1[:k])
ratk = np.sum(p1[:k]) / r['num_gold_1']
precision_at_1[k].append(patk)
recall_at_1[k].append(ratk)
patk = np.mean(p2[:k])
ratk = np.sum(p2[:k]) / r['num_gold_2']
precision_at_2[k].append(patk)
recall_at_2[k].append(ratk)
self.write_json(
'test_results.json', {
'precision_1': {k: np.mean(v)
for (k, v) in precision_at_1.items()},
'recall_1': {k: np.mean(v)
for (k, v) in recall_at_1.items()},
'precision_2': {k: np.mean(v)
for (k, v) in precision_at_2.items()},
'recall_2': {k: np.mean(v)
for (k, v) in recall_at_2.items()},
'mrr_1': np.mean([r['mrr_1'] for r in results]),
'mrr_2': np.mean([r['mrr_2'] for r in results]),
'results': results,
}
)
logging.info("\n====\nResults on %d randomly sampled papers" % len(precision_at_1[1]))
logging.info("Precision @K")
logging.info("K\tLevel 1\tLevel 2")
for k in np.sort(list(precision_at_1.keys())):
logging.info(
"K=%d:\t%f\t%f" % (k, np.mean(precision_at_1[k]), np.mean(precision_at_2[k]))
)
logging.info("Recall @k")
for k in np.sort(list(recall_at_1.keys())):
logging.info("K=%d:\t%f\t%f" % (k, np.mean(recall_at_1[k]), np.mean(recall_at_2[k])))
logging.info("Best possible recall = %f ", np.mean([r['recall_1'] for r in results]))
logging.info('Level 1 MRR = %f' % np.mean([r['mrr_1'] for r in results]))
logging.info('Level 2 MRR = %f' % np.mean([r['mrr_2'] for r in results]))
TestCiteomatic.run(__name__)
|
citeomatic-master
|
citeomatic/scripts/evaluate_citeomatic_model.py
|
import datetime
import logging
import os
from pprint import pprint
import numpy as np
from hyperopt import hp, fmin, tpe, Trials, STATUS_OK, STATUS_FAIL
from hyperopt.pyll.base import scope
from traitlets import Int, Unicode, Enum
from citeomatic import file_util
from citeomatic.common import PAPER_EMBEDDING_MODEL, CITATION_RANKER_MODEL, DatasetPaths
from citeomatic.config import App
from citeomatic.candidate_selectors import ANNCandidateSelector, BM25CandidateSelector
from citeomatic.models.options import ModelOptions
from citeomatic.serialization import model_from_directory
from citeomatic.neighbors import EmbeddingModel, ANN
from citeomatic.ranker import Ranker, NoneRanker
from citeomatic.training import end_to_end_training
from citeomatic.training import eval_text_model, EVAL_DATASET_KEYS
import pickle
import tensorflow as tf
import keras.backend as K
class TrainCiteomatic(App, ModelOptions):
dataset_type = Enum(('dblp', 'pubmed', 'oc'), default_value='dblp')
# Whether to train based on given options or to run a hyperopt
mode = Enum(('train', 'hyperopt'))
# Training parameters
hyperopts_results_pkl = Unicode(default_value=None, allow_none=True)
options_json = Unicode(default_value=None, allow_none=True)
# hyperopt parameters
max_evals_initial = Int(default_value=25)
max_evals_secondary = Int(default_value=5)
total_samples_initial = Int(default_value=5000000)
total_samples_secondary = Int(default_value=50000000)
n_eval = Int(default_value=500, allow_none=True)
models_ann_dir = Unicode(default_value=None, allow_none=True)
models_dir_base = Unicode(
default_value='data/models/'
)
run_identifier = Unicode(default_value=None, allow_none=True)
version = Unicode(default_value='v0')
# to be filled in later
models_dir = Unicode(default_value=None, allow_none=True)
ann = None
def main(self, args):
if self.mode == 'hyperopt':
self.run_hyperopt()
elif self.mode == 'train':
self.run_train()
else:
assert False
def run_train(self):
eval_params = {}
if self.hyperopts_results_pkl is not None:
params = pickle.load(open(self.hyperopts_results_pkl, "rb"))
for k, v in params[1][0]['result']['params'].items():
eval_params[k] = v
if self.options_json is not None:
obj = file_util.read_json(self.options_json)
eval_params.update(obj)
if self.model_name == PAPER_EMBEDDING_MODEL:
self.models_ann_dir = None
self.models_dir = os.path.join(self.models_dir_base, PAPER_EMBEDDING_MODEL)
self.train_and_evaluate(eval_params)
@staticmethod
def _hyperopt_space(model_name, total_samples):
use_pretrained = hp.choice('use_pretrained', [True, False])
common_param_space = {
'total_samples':
total_samples,
'lr':
hp.choice('lr', [0.1, 0.01, 0.001, 0.0001, 0.00001]),
'l1_lambda':
hp.choice('l1_lambda', np.append(np.logspace(-7, -2, 6), 0)),
'dropout_p':
hp.quniform('dropout_p', 0.0, 0.75, 0.05),
'margin_multiplier':
hp.choice('margin_multiplier', [0.5, 0.75, 1.0, 1.25, 1.5])
}
pre_trained_params = {
True: {
'use_pretrained': True,
'l2_lambda': 0,
'dense_dim': 300,
'enable_fine_tune': hp.choice('enable_fine_tune', [True, False])
},
False: {
'use_pretrained': False,
'l2_lambda': hp.choice('l2_lambda', np.append(np.logspace(-7, -2, 6), 0)),
'dense_dim': scope.int(hp.quniform('dense_dim', 25, 325, 25)),
'enable_fine_tune': True # doesn't matter what goes here
}
}
# the search space
# note that the scope.int code is a hack to get integers out of the sampler
if model_name == CITATION_RANKER_MODEL:
ranker_model_params = {
'embedding_type':
hp.choice('embedding_type', ['sum']),
'metadata_dim':
scope.int(hp.quniform('metadata_dim', 5, 55, 5)),
}
space = scope.switch(
scope.int(use_pretrained),
{**pre_trained_params[False],
**common_param_space,
**ranker_model_params,
},
{**pre_trained_params[True],
**common_param_space,
**ranker_model_params
}
)
elif model_name == PAPER_EMBEDDING_MODEL:
space = scope.switch(
scope.int(use_pretrained),
{**pre_trained_params[False],
**common_param_space
},
{**pre_trained_params[True],
**common_param_space
}
)
else:
# Should not come here. Adding this to make pycharm happy.
assert False
return space
def run_hyperopt(self):
# run identifier
if self.run_identifier is None:
self.run_identifier = '_'.join(
[
'citeomatic_hyperopt',
self.model_name,
self.dataset_type,
datetime.datetime.now().strftime("%Y-%m-%d"),
self.version
]
)
self.models_dir = os.path.join(self.models_dir_base, self.run_identifier)
if self.model_name == PAPER_EMBEDDING_MODEL:
self.models_ann_dir = None
space = self._hyperopt_space(self.model_name, self.total_samples_initial)
# stage 1: run hyperopt for max_evals_initial
# using a max of total_samples_initial samples
trials = Trials()
_ = fmin(
fn=self.train_and_evaluate,
space=space,
algo=tpe.suggest,
max_evals=self.max_evals_initial,
trials=trials
)
sorted_results_stage_1 = sorted(
trials.trials, key=lambda x: x['result']['loss']
)
# stage 2: run the top max_evals_seconadry from stage 1
# using a max of total_samples_secondary samples
results_stage_2 = []
for result in sorted_results_stage_1[:self.max_evals_secondary]:
params = result['result']['params']
params['total_samples'] = self.total_samples_secondary
out = self.train_and_evaluate(params)
results_stage_2.append({'params': params, 'result': out})
sorted_results_stage_2 = sorted(
results_stage_2, key=lambda x: x['result']['loss']
)
# save and display results
results_save_file = 'hyperopt_results.pickle'
file_util.write_pickle(
os.path.join(self.models_dir, results_save_file),
(sorted_results_stage_1, sorted_results_stage_2)
)
pprint(sorted_results_stage_2[0])
def train_and_evaluate(self, eval_params):
# Needed especially for hyperopt runs
K.clear_session()
model_kw = {name: getattr(self, name) for name in ModelOptions.class_traits().keys()}
model_kw.update(eval_params)
model_options = ModelOptions(**model_kw)
if model_options.use_metadata:
model_options.use_keyphrases = True
model_options.use_authors = True
model_options.use_venue = True
print("====== OPTIONS =====")
print(model_options)
print("======")
if model_options.train_for_test_set:
logging.info("\n\n============== TRAINING FOR TEST SET =============\n\n")
training_outputs = end_to_end_training(
model_options,
self.dataset_type,
self.models_dir,
self.models_ann_dir
)
corpus, featurizer, model_options, citeomatic_model, embedding_model = training_outputs
if self.candidate_selector_type == 'ann':
# if no ann_dir is provided, then we use the model that was just trained
# and have to rebuild the ANN
if self.models_ann_dir is None:
print(
'Using embedding model that was just trained for eval. Building...')
paper_embedding_model = EmbeddingModel(
featurizer,
embedding_model
)
self.ann = ANN.build(paper_embedding_model, corpus)
# if a dir is provided, then go ahead and load it
else:
featurizer_for_ann, ann_models = model_from_directory(
self.models_ann_dir, on_cpu=True
)
paper_embedding_model = EmbeddingModel(
featurizer_for_ann,
ann_models['embedding']
)
# the ANN itself needs to be only built once
if self.ann is None:
if corpus.corpus_type == 'oc' and os.path.exists(DatasetPaths.OC_ANN_FILE + ".pickle"):
self.ann = ANN.load(DatasetPaths.OC_ANN_FILE)
else:
self.ann = ANN.build(paper_embedding_model, corpus)
candidate_selector = ANNCandidateSelector(
corpus=corpus,
ann=self.ann,
paper_embedding_model=paper_embedding_model,
top_k=model_options.num_ann_nbrs_to_fetch,
extend_candidate_citations=model_options.extend_candidate_citations
)
elif self.candidate_selector_type == 'bm25':
dp = DatasetPaths()
candidate_selector = BM25CandidateSelector(
corpus=corpus,
index_path=dp.get_bm25_index_path(self.dataset_type),
top_k=model_options.num_ann_nbrs_to_fetch,
extend_candidate_citations=model_options.extend_candidate_citations
)
else:
# Should not come here. Adding this to make pycharm happy.
assert False
if self.citation_ranker_type == 'neural':
ranker = Ranker(
corpus=corpus,
featurizer=featurizer,
citation_ranker=citeomatic_model,
num_candidates_to_rank=model_options.num_candidates_to_rank
)
elif self.citation_ranker_type == 'none':
ranker = NoneRanker()
else:
# Should not come here. Adding this to make pycharm happy.
assert False
if self.mode != 'hyperopt' or model_options.total_samples == self.total_samples_secondary:
results_training = eval_text_model(
corpus,
candidate_selector,
ranker,
papers_source='train',
n_eval=self.n_eval
)
else:
results_training = {}
results_validation = eval_text_model(
corpus,
candidate_selector,
ranker,
papers_source='valid',
n_eval=self.n_eval
)
logging.info("===== Validation Results ===== ")
logging.info("Validation Precision\n\n{}".format(results_validation['precision_1']))
logging.info("Validation Recall\n\n{}".format(results_validation['recall_1']))
p = results_validation['precision_1'][EVAL_DATASET_KEYS[self.dataset_type]]
r = results_validation['recall_1'][EVAL_DATASET_KEYS[self.dataset_type]]
f1 = results_validation['f1_1'][EVAL_DATASET_KEYS[self.dataset_type]]
if self.model_name == PAPER_EMBEDDING_MODEL:
# optimizing for recall
l = -r
else:
# optimizing for F1
l = -f1
out = {
'loss': l, # have to negate since we're minimizing
'losses_training': results_training,
'losses_validation': results_validation,
'status': STATUS_FAIL if np.isnan(f1) else STATUS_OK,
'params': eval_params
}
return out
TrainCiteomatic.run(__name__)
|
citeomatic-master
|
citeomatic/scripts/train.py
|
import json
import pickle
from citeomatic.candidate_selectors import BM25CandidateSelector, ANNCandidateSelector, \
OracleCandidateSelector
from citeomatic.common import DatasetPaths
from citeomatic.config import App
from traitlets import Int, Unicode, Enum
from citeomatic.corpus import Corpus
from citeomatic.neighbors import EmbeddingModel, ANN
from citeomatic.ranker import NoneRanker, Ranker
from citeomatic.serialization import model_from_directory
from citeomatic.training import eval_text_model, EVAL_DATASET_KEYS
import os
class Evaluate(App):
dataset_type = Enum(('dblp', 'pubmed', 'oc'), default_value='pubmed')
candidate_selector_type = Enum(('bm25', 'ann', 'oracle'), default_value='bm25')
metric = Enum(('precision', 'recall', 'f1'), default_value='recall')
split = Enum(('train', 'test', 'valid'), default_value='valid')
# ann options
paper_embedder_dir = Unicode(default_value=None, allow_none=True)
# Candidate selector options
num_candidates = Int(default_value=None, allow_none=True)
ranker_type = Enum(('none', 'neural'), default_value='none')
n_eval = Int(default_value=None, allow_none=True)
# ranker options
citation_ranker_dir = Unicode(default_value=None, allow_none=True)
_embedder = None
_ann = None
def embedder(self, featurizer, embedding_model) -> EmbeddingModel:
if self._embedder is None:
self._embedder = EmbeddingModel(featurizer, embedding_model)
return self._embedder
def ann(self, embedder, corpus) -> ANN:
if corpus.corpus_type == 'oc' and os.path.exists(DatasetPaths.OC_ANN_FILE + ".pickle"):
self._ann = ANN.load(DatasetPaths.OC_ANN_FILE)
return self._ann
if self._ann is None:
self._ann = ANN.build(embedder, corpus, ann_trees=100)
if self.dataset_type == 'oc':
self._ann.save(DatasetPaths.OC_ANN_FILE)
return self._ann
def _make_ann_candidate_selector(self, corpus, featurizer, embedding_model, num_candidates):
e = self.embedder(featurizer, embedding_model)
return ANNCandidateSelector(
corpus=corpus,
ann=self.ann(e, corpus),
paper_embedding_model=e,
top_k=num_candidates,
extend_candidate_citations=True
)
def main(self, args):
dp = DatasetPaths()
if self.dataset_type == 'oc':
corpus = Corpus.load_pkl(dp.get_pkl_path(self.dataset_type))
else:
corpus = Corpus.load(dp.get_db_path(self.dataset_type))
if self.ranker_type == 'none':
citation_ranker = NoneRanker()
elif self.ranker_type == 'neural':
assert self.citation_ranker_dir is not None
ranker_featurizer, ranker_models = model_from_directory(self.citation_ranker_dir,
on_cpu=True)
citation_ranker = Ranker(
corpus=corpus,
featurizer=ranker_featurizer,
citation_ranker=ranker_models['citeomatic'],
num_candidates_to_rank=100
)
else:
assert False
candidate_results_map = {}
if self.num_candidates is None:
if self.dataset_type == 'oc':
num_candidates_list = [100]
else:
num_candidates_list = [1, 5, 10, 15, 25, 50, 75, 100]
else:
num_candidates_list = [self.num_candidates]
for num_candidates in num_candidates_list:
if self.candidate_selector_type == 'bm25':
index_path = dp.get_bm25_index_path(self.dataset_type)
candidate_selector = BM25CandidateSelector(
corpus,
index_path,
num_candidates,
False
)
elif self.candidate_selector_type == 'ann':
assert self.paper_embedder_dir is not None
featurizer, models = model_from_directory(self.paper_embedder_dir, on_cpu=True)
candidate_selector = self._make_ann_candidate_selector(corpus=corpus,
featurizer=featurizer,
embedding_model=models['embedding'],
num_candidates=num_candidates)
elif self.candidate_selector_type == 'oracle':
candidate_selector = OracleCandidateSelector(corpus)
else:
assert False
results = eval_text_model(corpus, candidate_selector, citation_ranker,
papers_source=self.split, n_eval=self.n_eval)
candidate_results_map[num_candidates] = results
best_k = -1
best_metric = 0.0
metric_key = self.metric + "_1"
for k, v in candidate_results_map.items():
if best_metric < v[metric_key][EVAL_DATASET_KEYS[self.dataset_type]]:
best_k = k
best_metric = v[metric_key][EVAL_DATASET_KEYS[self.dataset_type]]
print(json.dumps(candidate_results_map, indent=4, sort_keys=True))
print(best_k)
print(best_metric)
Evaluate.run(__name__)
|
citeomatic-master
|
citeomatic/scripts/evaluate.py
|
import logging
import os
import tqdm
from citeomatic import file_util
from citeomatic.common import DatasetPaths, FieldNames, global_tokenizer
from citeomatic.config import App
from citeomatic.corpus import Corpus
from citeomatic.service import document_from_dict, dict_from_document
from citeomatic.traits import Enum
import json
class ConvertKddToCiteomatic(App):
dataset_name = Enum(options=['dblp', 'pubmed'])
def main(self, args):
if self.dataset_name == 'dblp':
input_path = DatasetPaths.DBLP_GOLD_DIR
output_path = DatasetPaths.DBLP_CORPUS_JSON
elif self.dataset_name == 'pubmed':
input_path = DatasetPaths.PUBMED_GOLD_DIR
output_path = DatasetPaths.PUBMED_CORPUS_JSON
else:
assert False
logging.info("Reading Gold data from {}".format(input_path))
logging.info("Writing corpus to {}".format(output_path))
assert os.path.exists(input_path)
assert not os.path.exists(output_path)
papers_file = os.path.join(input_path, "papers.txt")
abstracts_file = os.path.join(input_path, "abstracts.txt")
keyphrases_file = os.path.join(input_path, "paper_keyphrases.txt")
citations_file = os.path.join(input_path, "paper_paper.txt")
authors_file = os.path.join(input_path, "paper_author.txt")
venues_file = os.path.join(input_path, "paper_venue.txt")
paper_titles = {}
paper_years = {}
paper_abstracts = {}
paper_keyphrases = {}
paper_citations = {}
paper_in_citations = {}
paper_authors = {}
paper_venues = {}
bad_ids = set()
for line in file_util.read_lines(abstracts_file):
parts = line.split("\t")
paper_id = int(parts[0])
if len(parts) == 2:
paper_abstracts[paper_id] = parts[1]
else:
paper_abstracts[paper_id] = ""
if paper_abstracts[paper_id] == "":
bad_ids.add(paper_id)
for line in file_util.read_lines(papers_file):
parts = line.split('\t')
paper_id = int(parts[0])
paper_years[paper_id] = int(parts[2])
paper_titles[paper_id] = parts[3]
for line in file_util.read_lines(keyphrases_file):
parts = line.split("\t")
paper_id = int(parts[0])
if paper_id not in paper_keyphrases:
paper_keyphrases[paper_id] = []
for kp in parts[1:]:
kp = kp.strip()
if len(kp) > 0:
paper_keyphrases[paper_id].append(kp[:-4])
for line in file_util.read_lines(citations_file):
parts = line.split("\t")
paper_id = int(parts[0])
if paper_id not in paper_citations:
paper_citations[paper_id] = []
c = int(parts[1])
if c in bad_ids:
continue
paper_citations[paper_id].append(str(c))
if c not in paper_in_citations:
paper_in_citations[c] = []
if paper_id not in paper_in_citations:
paper_in_citations[paper_id] = []
paper_in_citations[c].append(paper_id)
for line in file_util.read_lines(authors_file):
parts = line.split("\t")
paper_id = int(parts[0])
if paper_id not in paper_authors:
paper_authors[paper_id] = []
paper_authors[paper_id].append(parts[1])
for line in file_util.read_lines(venues_file):
parts = line.split("\t")
paper_id = int(parts[0])
paper_venues[paper_id] = parts[1]
test_paper_id = 13
print("==== Test Paper Details ====")
print(paper_titles[test_paper_id])
print(paper_years[test_paper_id])
print(paper_abstracts[test_paper_id])
print(paper_keyphrases[test_paper_id])
print(paper_citations[test_paper_id])
print(paper_in_citations[test_paper_id])
print(paper_authors[test_paper_id])
print(paper_venues[test_paper_id])
print("==== Test Paper Details ====")
def _print_len(x, name=''):
print("No. of {} = {}".format(name, len(x)))
_print_len(paper_titles, 'Titles')
_print_len(paper_years, 'Years')
_print_len(paper_abstracts, 'Abstracts')
_print_len(paper_keyphrases, 'KeyPhrases')
_print_len(paper_citations, 'Paper Citations')
_print_len(paper_in_citations, 'Paper In citations')
_print_len(paper_authors, ' Authors')
_print_len(paper_venues, ' Venues')
logging.info("Skipped {} papers due to insufficient data".format(len(bad_ids)))
corpus = {}
for id, title in tqdm.tqdm(paper_titles.items()):
if id in bad_ids:
continue
doc = document_from_dict(
{
FieldNames.PAPER_ID: str(id),
FieldNames.TITLE: ' '.join(global_tokenizer(title)),
FieldNames.ABSTRACT: ' '.join(global_tokenizer(paper_abstracts[id])),
FieldNames.OUT_CITATIONS: paper_citations.get(id, []),
FieldNames.YEAR: paper_years[id],
FieldNames.AUTHORS: paper_authors.get(id, []),
FieldNames.KEY_PHRASES: paper_keyphrases[id],
FieldNames.OUT_CITATION_COUNT: len(paper_citations.get(id, [])),
FieldNames.IN_CITATION_COUNT: len(paper_in_citations.get(id, [])),
FieldNames.VENUE: paper_venues.get(id, ''),
FieldNames.TITLE_RAW: title,
FieldNames.ABSTRACT_RAW: paper_abstracts[id]
}
)
corpus[id] = doc
with open(output_path, 'w') as f:
for _, doc in corpus.items():
doc_json = dict_from_document(doc)
f.write(json.dumps(doc_json))
f.write("\n")
dp = DatasetPaths()
Corpus.build(dp.get_db_path(self.dataset_name), dp.get_json_path(self.dataset_name))
ConvertKddToCiteomatic.run(__name__)
|
citeomatic-master
|
citeomatic/scripts/convert_kdd_to_citeomatic.py
|
import logging
import tqdm
from citeomatic.common import DatasetPaths, FieldNames, global_tokenizer
from citeomatic.config import App
from citeomatic.corpus import Corpus
from citeomatic.traits import Unicode
import os
import json
from citeomatic import file_util
import pickle
class ConvertOpenCorpusToCiteomatic(App):
input_path = Unicode(default_value=DatasetPaths.OC_FILE)
output_path = Unicode(default_value=DatasetPaths.OC_CORPUS_JSON)
def main(self, args):
logging.info("Reading Open Corpus file from: {}".format(self.input_path))
logging.info("Writing json file to: {}".format(self.output_path))
dp = DatasetPaths()
assert os.path.exists(self.input_path)
assert not os.path.exists(self.output_path)
assert not os.path.exists(dp.get_pkl_path('oc'))
with open(self.output_path, 'w') as f:
for obj in tqdm.tqdm(file_util.read_json_lines(self.input_path)):
if 'year' not in obj:
continue
translated_obj = {
FieldNames.PAPER_ID: obj['id'],
FieldNames.TITLE_RAW: obj['title'],
FieldNames.ABSTRACT_RAW: obj['paperAbstract'],
FieldNames.AUTHORS: [a['name'] for a in obj['authors']],
FieldNames.IN_CITATION_COUNT: len(obj['inCitations']),
FieldNames.KEY_PHRASES: obj['keyPhrases'],
FieldNames.OUT_CITATIONS: obj['outCitations'],
FieldNames.URLS: obj['pdfUrls'],
FieldNames.S2_URL: obj['s2Url'],
FieldNames.VENUE: obj['venue'],
FieldNames.YEAR: obj['year'],
FieldNames.TITLE: ' '.join(global_tokenizer(obj['title'])),
FieldNames.ABSTRACT: ' '.join(global_tokenizer(obj['paperAbstract']))
}
f.write(json.dumps(translated_obj))
f.write("\n")
f.close()
oc_corpus = Corpus.build(dp.get_db_path('oc'), dp.get_json_path('oc'))
pickle.dump(oc_corpus, open(dp.get_pkl_path('oc')))
ConvertOpenCorpusToCiteomatic.run(__name__)
|
citeomatic-master
|
citeomatic/scripts/convert_open_corpus_to_citeomatic.py
|
import os
import tqdm
from whoosh.index import create_in
from citeomatic import file_util
from citeomatic.common import DatasetPaths
from citeomatic.common import schema
from citeomatic.config import App
from citeomatic.traits import Enum
class CreateBM25Index(App):
#
# Caveat: It is unclear how to really separate the train, validation and test sets from the
# index. We currently index all documents (including test docs) which somewhat pollutes the
# tf df scores.
#
#
dataset_name = Enum(options=['dblp', 'pubmed', 'oc'])
def main(self, args):
dp = DatasetPaths()
corpus_json = dp.get_json_path(self.dataset_name)
index_location = dp.get_bm25_index_path(self.dataset_name)
if os.path.exists(index_location):
assert False
else:
os.mkdir(index_location)
bm25_index = create_in(index_location, schema)
writer = bm25_index.writer()
for doc in tqdm.tqdm(file_util.read_json_lines(corpus_json)):
writer.add_document(
id=doc['id'],
title=doc['title'],
abstract=doc['abstract']
)
writer.commit()
CreateBM25Index.run(__name__)
|
citeomatic-master
|
citeomatic/scripts/create_bm25_index.py
|
"""Augment the CSV-Columns file to generate length measurement."""
# Copyright (c) 2021 The Allen Institute for Artificial Intelligence.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import csv
import os
from collections import OrderedDict
import time
def main():
"""Augment the ./CSV-Columns.csv file"""
records = OrderedDict()
with open("./CSV-Columns.csv") as infile:
dict_reader = csv.DictReader(infile)
headers = dict_reader.fieldnames
for row in dict_reader:
records[row['column_name']] = row
# Add start and end columns for length and auto length columns
for column in [x for x in records.keys() if
records[x]['measurement_type'] in ('length', 'auto length')]:
for label_augment, desc_augment in [("{}_x_start", "Starting X for {}"),
("{}_y_start", "Starting y for {}"),
("{}_x_end", "Ending X for {}"),
("{}_y_end", "Ending y for {}")]:
target_col = label_augment.format(column)
if target_col not in records:
desc = desc_augment.format(records[column]['description'])
records[target_col] = {'column_name': target_col, 'description': desc,
'units': 'pixels', 'measurement_type': 'auto point',
'export': 'False', 'editable': 'False', 'is_metadata': 'False'}
for column in [x for x in records.keys() if
records[x]['measurement_type'] in ('point', 'length')]:
if records[column]['measurement_type'] == 'point' and column.endswith("_y"):
continue
measurement_type = records[column]['measurement_type']
if column.endswith("_x"):
column = column[:-2]
desc = "Review of {} {}".format(column, measurement_type)
target_col = "{}_reviewed".format(column)
records[target_col] = {'column_name': target_col, 'description': desc,
'units': 'boolean', 'measurement_type': 'boolean',
'export': 'False', 'editable': 'False', 'is_metadata': 'False'}
os.rename("./CSV-Columns.csv", "./CSV-Columns.csv.{}.bak".format(int(time.time())))
with open("./CSV-Columns.csv", 'w') as outfile:
# with sys.stdout as outfile:
dict_writer = csv.DictWriter(outfile, headers)
dict_writer.writeheader()
dict_writer.writerows(records.values())
if __name__ == '__main__':
main()
|
AMPT-main
|
measurement-tool-config/augmentcsv.py
|
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import argparse, json, os
"""
During rendering, each CLEVR scene file is dumped to disk as a separate JSON
file; this is convenient for distributing rendering across multiple machines.
This script collects all CLEVR scene files stored in a directory and combines
them into a single JSON file. This script also adds the version number, date,
and license to the output file.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir', default='output/scenes')
parser.add_argument('--output_file', default='output/CLEVR_misc_scenes.json')
parser.add_argument('--version', default='1.0')
parser.add_argument('--date', default='7/8/2017')
parser.add_argument('--license',
default='Creative Commons Attribution (CC-BY 4.0')
def main(args):
input_files = os.listdir(args.input_dir)
scenes = []
split = None
for filename in os.listdir(args.input_dir):
if not filename.endswith('.json'):
continue
path = os.path.join(args.input_dir, filename)
with open(path, 'r') as f:
scene = json.load(f)
scenes.append(scene)
if split is not None:
msg = 'Input directory contains scenes from multiple splits'
assert scene['split'] == split, msg
else:
split = scene['split']
scenes.sort(key=lambda s: s['image_index'])
for s in scenes:
print(s['image_filename'])
output = {
'info': {
'date': args.date,
'version': args.version,
'split': split,
'license': args.license,
},
'scenes': scenes
}
with open(args.output_file, 'w') as f:
json.dump(output, f)
if __name__ == '__main__':
args = parser.parse_args()
main(args)
|
clevr-dataset-gen-master
|
image_generation/collect_scenes.py
|
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
# The 2D bounding box generation code is taken from:
# https://blender.stackexchange.com/questions/7198/save-the-2d-bounding-box-of-an-object-in-rendered-image-to-a-text-file
from __future__ import print_function
import math, sys, random, argparse, json, os, tempfile
from datetime import datetime as dt
from collections import Counter
"""
Renders random scenes using Blender, each with with a random number of objects;
each object has a random size, position, color, and shape. Objects will be
nonintersecting but may partially occlude each other. Output images will be
written to disk as PNGs, and we will also write a JSON file for each image with
ground-truth scene information.
This file expects to be run from Blender like this:
blender --background --python render_images.py -- [arguments to this script]
"""
INSIDE_BLENDER = True
try:
import bpy, bpy_extras
from mathutils import Vector
except ImportError as e:
INSIDE_BLENDER = False
if INSIDE_BLENDER:
try:
import utils
except ImportError as e:
print("\nERROR")
print("Running render_images.py from Blender and cannot import utils.py.")
print("You may need to add a .pth file to the site-packages of Blender's")
print("bundled python with a command like this:\n")
print("echo $PWD >> $BLENDER/$VERSION/python/lib/python3.5/site-packages/clevr.pth")
print("\nWhere $BLENDER is the directory where Blender is installed, and")
print("$VERSION is your Blender version (such as 2.78).")
sys.exit(1)
parser = argparse.ArgumentParser()
# Input options
parser.add_argument('--base_scene_blendfile', default='data/base_scene.blend',
help="Base blender file on which all scenes are based; includes " +
"ground plane, lights, and camera.")
parser.add_argument('--properties_json', default='data/properties.json',
help="JSON file defining objects, materials, sizes, and colors. " +
"The \"colors\" field maps from CLEVR color names to RGB values; " +
"The \"sizes\" field maps from CLEVR size names to scalars used to " +
"rescale object models; the \"materials\" and \"shapes\" fields map " +
"from CLEVR material and shape names to .blend files in the " +
"--object_material_dir and --shape_dir directories respectively.")
parser.add_argument('--shape_dir', default='data/shapes',
help="Directory where .blend files for object models are stored")
parser.add_argument('--material_dir', default='data/materials',
help="Directory where .blend files for materials are stored")
parser.add_argument('--shape_color_combos_json', default=None,
help="Optional path to a JSON file mapping shape names to a list of " +
"allowed color names for that shape. This allows rendering images " +
"for CLEVR-CoGenT.")
# Settings for objects
parser.add_argument('--min_objects', default=3, type=int,
help="The minimum number of objects to place in each scene")
parser.add_argument('--max_objects', default=10, type=int,
help="The maximum number of objects to place in each scene")
parser.add_argument('--min_dist', default=0.25, type=float,
help="The minimum allowed distance between object centers")
parser.add_argument('--margin', default=0.4, type=float,
help="Along all cardinal directions (left, right, front, back), all " +
"objects will be at least this distance apart. This makes resolving " +
"spatial relationships slightly less ambiguous.")
parser.add_argument('--min_pixels_per_object', default=200, type=int,
help="All objects will have at least this many visible pixels in the " +
"final rendered images; this ensures that no objects are fully " +
"occluded by other objects.")
parser.add_argument('--max_retries', default=50, type=int,
help="The number of times to try placing an object before giving up and " +
"re-placing all objects in the scene.")
# Output settings
parser.add_argument('--start_idx', default=0, type=int,
help="The index at which to start for numbering rendered images. Setting " +
"this to non-zero values allows you to distribute rendering across " +
"multiple machines and recombine the results later.")
parser.add_argument('--num_images', default=5, type=int,
help="The number of images to render")
parser.add_argument('--filename_prefix', default='CLEVR',
help="This prefix will be prepended to the rendered images and JSON scenes")
parser.add_argument('--split', default='new',
help="Name of the split for which we are rendering. This will be added to " +
"the names of rendered images, and will also be stored in the JSON " +
"scene structure for each image.")
parser.add_argument('--output_image_dir', default='../output/images/',
help="The directory where output images will be stored. It will be " +
"created if it does not exist.")
parser.add_argument('--output_scene_dir', default='../output/scenes/',
help="The directory where output JSON scene structures will be stored. " +
"It will be created if it does not exist.")
parser.add_argument('--output_scene_file', default='../output/CLEVR_scenes.json',
help="Path to write a single JSON file containing all scene information")
parser.add_argument('--output_blend_dir', default='output/blendfiles',
help="The directory where blender scene files will be stored, if the " +
"user requested that these files be saved using the " +
"--save_blendfiles flag; in this case it will be created if it does " +
"not already exist.")
parser.add_argument('--save_blendfiles', type=int, default=0,
help="Setting --save_blendfiles 1 will cause the blender scene file for " +
"each generated image to be stored in the directory specified by " +
"the --output_blend_dir flag. These files are not saved by default " +
"because they take up ~5-10MB each.")
parser.add_argument('--version', default='1.0',
help="String to store in the \"version\" field of the generated JSON file")
parser.add_argument('--license',
default="Creative Commons Attribution (CC-BY 4.0)",
help="String to store in the \"license\" field of the generated JSON file")
parser.add_argument('--date', default=dt.today().strftime("%m/%d/%Y"),
help="String to store in the \"date\" field of the generated JSON file; " +
"defaults to today's date")
# Rendering options
parser.add_argument('--use_gpu', default=0, type=int,
help="Setting --use_gpu 1 enables GPU-accelerated rendering using CUDA. " +
"You must have an NVIDIA GPU with the CUDA toolkit installed for " +
"to work.")
parser.add_argument('--width', default=320, type=int,
help="The width (in pixels) for the rendered images")
parser.add_argument('--height', default=240, type=int,
help="The height (in pixels) for the rendered images")
parser.add_argument('--key_light_jitter', default=1.0, type=float,
help="The magnitude of random jitter to add to the key light position.")
parser.add_argument('--fill_light_jitter', default=1.0, type=float,
help="The magnitude of random jitter to add to the fill light position.")
parser.add_argument('--back_light_jitter', default=1.0, type=float,
help="The magnitude of random jitter to add to the back light position.")
parser.add_argument('--camera_jitter', default=0.5, type=float,
help="The magnitude of random jitter to add to the camera position")
parser.add_argument('--render_num_samples', default=512, type=int,
help="The number of samples to use when rendering. Larger values will " +
"result in nicer images but will cause rendering to take longer.")
parser.add_argument('--render_min_bounces', default=8, type=int,
help="The minimum number of bounces to use for rendering.")
parser.add_argument('--render_max_bounces', default=8, type=int,
help="The maximum number of bounces to use for rendering.")
parser.add_argument('--render_tile_size', default=256, type=int,
help="The tile size to use for rendering. This should not affect the " +
"quality of the rendered image but may affect the speed; CPU-based " +
"rendering may achieve better performance using smaller tile sizes " +
"while larger tile sizes may be optimal for GPU-based rendering.")
def main(args):
num_digits = 6
prefix = '%s_%s_' % (args.filename_prefix, args.split)
img_template = '%s%%0%dd.png' % (prefix, num_digits)
scene_template = '%s%%0%dd.json' % (prefix, num_digits)
blend_template = '%s%%0%dd.blend' % (prefix, num_digits)
img_template = os.path.join(args.output_image_dir, img_template)
scene_template = os.path.join(args.output_scene_dir, scene_template)
blend_template = os.path.join(args.output_blend_dir, blend_template)
if not os.path.isdir(args.output_image_dir):
os.makedirs(args.output_image_dir)
if not os.path.isdir(args.output_scene_dir):
os.makedirs(args.output_scene_dir)
if args.save_blendfiles == 1 and not os.path.isdir(args.output_blend_dir):
os.makedirs(args.output_blend_dir)
all_scene_paths = []
for i in range(args.num_images):
img_path = img_template % (i + args.start_idx)
scene_path = scene_template % (i + args.start_idx)
all_scene_paths.append(scene_path)
blend_path = None
if args.save_blendfiles == 1:
blend_path = blend_template % (i + args.start_idx)
num_objects = random.randint(args.min_objects, args.max_objects)
render_scene(args,
num_objects=num_objects,
output_index=(i + args.start_idx),
output_split=args.split,
output_image=img_path,
output_scene=scene_path,
output_blendfile=blend_path,
)
# After rendering all images, combine the JSON files for each scene into a
# single JSON file.
all_scenes = []
for scene_path in all_scene_paths:
with open(scene_path, 'r') as f:
all_scenes.append(json.load(f))
output = {
'info': {
'date': args.date,
'version': args.version,
'split': args.split,
'license': args.license,
},
'scenes': all_scenes
}
with open(args.output_scene_file, 'w') as f:
json.dump(output, f)
def render_scene(args,
num_objects=5,
output_index=0,
output_split='none',
output_image='render.png',
output_scene='render_json',
output_blendfile=None,
):
# Load the main blendfile
bpy.ops.wm.open_mainfile(filepath=args.base_scene_blendfile)
# Load materials
utils.load_materials(args.material_dir)
# Set render arguments so we can get pixel coordinates later.
# We use functionality specific to the CYCLES renderer so BLENDER_RENDER
# cannot be used.
render_args = bpy.context.scene.render
render_args.engine = "CYCLES"
render_args.filepath = output_image
render_args.resolution_x = args.width
render_args.resolution_y = args.height
render_args.resolution_percentage = 100
render_args.tile_x = args.render_tile_size
render_args.tile_y = args.render_tile_size
if args.use_gpu == 1:
# Blender changed the API for enabling CUDA at some point
if bpy.app.version < (2, 78, 0):
bpy.context.user_preferences.system.compute_device_type = 'CUDA'
bpy.context.user_preferences.system.compute_device = 'CUDA_0'
else:
cycles_prefs = bpy.context.user_preferences.addons['cycles'].preferences
cycles_prefs.compute_device_type = 'CUDA'
# Some CYCLES-specific stuff
bpy.data.worlds['World'].cycles.sample_as_light = True
bpy.context.scene.cycles.blur_glossy = 2.0
bpy.context.scene.cycles.samples = args.render_num_samples
bpy.context.scene.cycles.transparent_min_bounces = args.render_min_bounces
bpy.context.scene.cycles.transparent_max_bounces = args.render_max_bounces
if args.use_gpu == 1:
bpy.context.scene.cycles.device = 'GPU'
# This will give ground-truth information about the scene and its objects
scene_struct = {
'split': output_split,
'image_index': output_index,
'image_filename': os.path.basename(output_image),
'objects': [],
'directions': {},
}
# Put a plane on the ground so we can compute cardinal directions
bpy.ops.mesh.primitive_plane_add(radius=5)
plane = bpy.context.object
def rand(L):
return 2.0 * L * (random.random() - 0.5)
# Add random jitter to camera position
if args.camera_jitter > 0:
for i in range(3):
bpy.data.objects['Camera'].location[i] += rand(args.camera_jitter)
# Figure out the left, up, and behind directions along the plane and record
# them in the scene structure
camera = bpy.data.objects['Camera']
plane_normal = plane.data.vertices[0].normal
cam_behind = camera.matrix_world.to_quaternion() * Vector((0, 0, -1))
cam_left = camera.matrix_world.to_quaternion() * Vector((-1, 0, 0))
cam_up = camera.matrix_world.to_quaternion() * Vector((0, 1, 0))
plane_behind = (cam_behind - cam_behind.project(plane_normal)).normalized()
plane_left = (cam_left - cam_left.project(plane_normal)).normalized()
plane_up = cam_up.project(plane_normal).normalized()
# Delete the plane; we only used it for normals anyway. The base scene file
# contains the actual ground plane.
utils.delete_object(plane)
# Save all six axis-aligned directions in the scene struct
scene_struct['directions']['behind'] = tuple(plane_behind)
scene_struct['directions']['front'] = tuple(-plane_behind)
scene_struct['directions']['left'] = tuple(plane_left)
scene_struct['directions']['right'] = tuple(-plane_left)
scene_struct['directions']['above'] = tuple(plane_up)
scene_struct['directions']['below'] = tuple(-plane_up)
# Add random jitter to lamp positions
if args.key_light_jitter > 0:
for i in range(3):
bpy.data.objects['Lamp_Key'].location[i] += rand(args.key_light_jitter)
if args.back_light_jitter > 0:
for i in range(3):
bpy.data.objects['Lamp_Back'].location[i] += rand(args.back_light_jitter)
if args.fill_light_jitter > 0:
for i in range(3):
bpy.data.objects['Lamp_Fill'].location[i] += rand(args.fill_light_jitter)
# Now make some random objects
objects, blender_objects = add_random_objects(scene_struct, num_objects, args, camera)
# Render segmentation mask
seg_path = output_image[:-4]+'_mask.png'
seg_colors = render_shadeless(blender_objects, path=seg_path)
# Render the scene and dump the scene data structure
scene_struct['objects'] = objects
scene_struct['relationships'] = compute_all_relationships(scene_struct)
while True:
try:
bpy.ops.render.render(write_still=True)
break
except Exception as e:
print(e)
with open(output_scene, 'w') as f:
json.dump(scene_struct, f, indent=2)
if output_blendfile is not None:
bpy.ops.wm.save_as_mainfile(filepath=output_blendfile)
def add_random_objects(scene_struct, num_objects, args, camera):
"""
Add random objects to the current blender scene
"""
# Load the property file
with open(args.properties_json, 'r') as f:
properties = json.load(f)
color_name_to_rgba = {}
for name, rgb in properties['colors'].items():
rgba = [float(c) / 255.0 for c in rgb] + [1.0]
color_name_to_rgba[name] = rgba
material_mapping = [(v, k) for k, v in properties['materials'].items()]
object_mapping = [(v, k) for k, v in properties['shapes'].items()]
size_mapping = list(properties['sizes'].items())
shape_color_combos = None
if args.shape_color_combos_json is not None:
with open(args.shape_color_combos_json, 'r') as f:
shape_color_combos = list(json.load(f).items())
positions = []
objects = []
blender_objects = []
for i in range(num_objects):
# Choose a random size
size_name, r = random.choice(size_mapping)
# Try to place the object, ensuring that we don't intersect any existing
# objects and that we are more than the desired margin away from all existing
# objects along all cardinal directions.
num_tries = 0
while True:
# If we try and fail to place an object too many times, then delete all
# the objects in the scene and start over.
num_tries += 1
if num_tries > args.max_retries:
for obj in blender_objects:
utils.delete_object(obj)
return add_random_objects(scene_struct, num_objects, args, camera)
x = random.uniform(-3, 3)
y = random.uniform(-3, 3)
# Check to make sure the new object is further than min_dist from all
# other objects, and further than margin along the four cardinal directions
dists_good = True
margins_good = True
for (xx, yy, rr) in positions:
dx, dy = x - xx, y - yy
dist = math.sqrt(dx * dx + dy * dy)
if dist - r - rr < args.min_dist:
dists_good = False
break
for direction_name in ['left', 'right', 'front', 'behind']:
direction_vec = scene_struct['directions'][direction_name]
assert direction_vec[2] == 0
margin = dx * direction_vec[0] + dy * direction_vec[1]
if 0 < margin < args.margin:
print(margin, args.margin, direction_name)
print('BROKEN MARGIN!')
margins_good = False
break
if not margins_good:
break
if dists_good and margins_good:
break
# Choose random color and shape
if shape_color_combos is None:
obj_name, obj_name_out = random.choice(object_mapping)
color_name, rgba = random.choice(list(color_name_to_rgba.items()))
else:
obj_name_out, color_choices = random.choice(shape_color_combos)
color_name = random.choice(color_choices)
obj_name = [k for k, v in object_mapping if v == obj_name_out][0]
rgba = color_name_to_rgba[color_name]
# For cube, adjust the size a bit
if obj_name == 'Cube':
r /= math.sqrt(2)
# Choose random orientation for the object.
theta = 360.0 * random.random()
# Actually add the object to the scene
utils.add_object(args.shape_dir, obj_name, r, (x, y), theta=theta)
obj = bpy.context.object
blender_objects.append(obj)
positions.append((x, y, r))
# Attach a random material
mat_name, mat_name_out = random.choice(material_mapping)
utils.add_material(mat_name, Color=rgba)
# Record data about the object in the scene data structure
pixel_coords = utils.get_camera_coords(camera, obj.location)
# Get 2D pixel coordinates for all 8 points in the bounding box
scene = bpy.context.scene
cam_ob = scene.camera
me_ob = bpy.context.object
bound_box = camera_view_bounds_2d(bpy.context.scene, cam_ob, me_ob)
objects.append({
'shape': obj_name_out,
'size': size_name,
'material': mat_name_out,
'3d_coords': tuple(obj.location),
'rotation': theta,
'pixel_coords': pixel_coords,
'color': color_name,
'x': bound_box.x,
'y': bound_box.y,
'width': bound_box.width,
'height': bound_box.height
})
# Check that all objects are at least partially visible in the rendered image
all_visible = check_visibility(blender_objects, args.min_pixels_per_object)
if not all_visible:
# If any of the objects are fully occluded then start over; delete all
# objects from the scene and place them all again.
print('Some objects are occluded; replacing objects')
for obj in blender_objects:
utils.delete_object(obj)
return add_random_objects(scene_struct, num_objects, args, camera)
return objects, blender_objects
class Box:
dim_x = 1
dim_y = 1
def __init__(self, min_x, min_y, max_x, max_y, dim_x=dim_x, dim_y=dim_y):
self.min_x = min_x
self.min_y = min_y
self.max_x = max_x
self.max_y = max_y
self.dim_x = dim_x
self.dim_y = dim_y
@property
def x(self):
return round(self.min_x * self.dim_x)
@property
def y(self):
return round(self.dim_y - self.max_y * self.dim_y)
@property
def width(self):
return round((self.max_x - self.min_x) * self.dim_x)
@property
def height(self):
return round((self.max_y - self.min_y) * self.dim_y)
def __str__(self):
return "<Box, x=%i, y=%i, width=%i, height=%i>" % \
(self.x, self.y, self.width, self.height)
def to_tuple(self):
if self.width == 0 or self.height == 0:
return (0, 0, 0, 0)
return (self.x, self.y, self.width, self.height)
def camera_view_bounds_2d(scene, cam_ob, me_ob):
"""
Returns camera space bounding box of mesh object.
Negative 'z' value means the point is behind the camera.
Takes shift-x/y, lens angle and sensor size into account
as well as perspective/ortho projections.
:arg scene: Scene to use for frame size.
:type scene: :class:`bpy.types.Scene`
:arg obj: Camera object.
:type obj: :class:`bpy.types.Object`
:arg me: Untransformed Mesh.
:type me: :class:`bpy.types.Mesh´
:return: a Box object (call its to_tuple() method to get x, y, width and height)
:rtype: :class:`Box`
"""
mat = cam_ob.matrix_world.normalized().inverted()
me = me_ob.to_mesh(scene, True, 'PREVIEW')
me.transform(me_ob.matrix_world)
me.transform(mat)
camera = cam_ob.data
frame = [-v for v in camera.view_frame(scene=scene)[:3]]
camera_persp = camera.type != 'ORTHO'
lx = []
ly = []
for v in me.vertices:
co_local = v.co
z = -co_local.z
if camera_persp:
if z == 0.0:
lx.append(0.5)
ly.append(0.5)
# Does it make any sense to drop these?
#if z <= 0.0:
# continue
else:
frame = [(v / (v.z / z)) for v in frame]
min_x, max_x = frame[1].x, frame[2].x
min_y, max_y = frame[0].y, frame[1].y
x = (co_local.x - min_x) / (max_x - min_x)
y = (co_local.y - min_y) / (max_y - min_y)
lx.append(x)
ly.append(y)
min_x = clamp(min(lx), 0.0, 1.0)
max_x = clamp(max(lx), 0.0, 1.0)
min_y = clamp(min(ly), 0.0, 1.0)
max_y = clamp(max(ly), 0.0, 1.0)
bpy.data.meshes.remove(me)
r = scene.render
fac = r.resolution_percentage * 0.01
dim_x = r.resolution_x * fac
dim_y = r.resolution_y * fac
return Box(min_x, min_y, max_x, max_y, dim_x, dim_y)
def clamp(x, minimum, maximum):
return max(minimum, min(x, maximum))
def compute_all_relationships(scene_struct, eps=0.2):
"""
Computes relationships between all pairs of objects in the scene.
Returns a dictionary mapping string relationship names to lists of lists of
integers, where output[rel][i] gives a list of object indices that have the
relationship rel with object i. For example if j is in output['left'][i] then
object j is left of object i.
"""
all_relationships = {}
for name, direction_vec in scene_struct['directions'].items():
if name == 'above' or name == 'below': continue
all_relationships[name] = []
for i, obj1 in enumerate(scene_struct['objects']):
coords1 = obj1['3d_coords']
related = set()
for j, obj2 in enumerate(scene_struct['objects']):
if obj1 == obj2: continue
coords2 = obj2['3d_coords']
diff = [coords2[k] - coords1[k] for k in [0, 1, 2]]
dot = sum(diff[k] * direction_vec[k] for k in [0, 1, 2])
if dot > eps:
related.add(j)
all_relationships[name].append(sorted(list(related)))
return all_relationships
def check_visibility(blender_objects, min_pixels_per_object):
"""
Check whether all objects in the scene have some minimum number of visible
pixels; to accomplish this we assign random (but distinct) colors to all
objects, and render using no lighting or shading or antialiasing; this
ensures that each object is just a solid uniform color. We can then count
the number of pixels of each color in the output image to check the visibility
of each object.
Returns True if all objects are visible and False otherwise.
"""
f, path = tempfile.mkstemp(suffix='.png')
object_colors = render_shadeless(blender_objects, path=path)
img = bpy.data.images.load(path)
p = list(img.pixels)
color_count = Counter((p[i], p[i+1], p[i+2], p[i+3])
for i in range(0, len(p), 4))
os.remove(path)
if len(color_count) != len(blender_objects) + 1:
return False
for _, count in color_count.most_common():
if count < min_pixels_per_object:
return False
return True
def render_shadeless(blender_objects, path='flat.png'):
"""
Render a version of the scene with shading disabled and unique materials
assigned to all objects, and return a set of all colors that should be in the
rendered image. The image itself is written to path. This is used to ensure
that all objects will be visible in the final rendered scene.
"""
render_args = bpy.context.scene.render
# Cache the render args we are about to clobber
old_filepath = render_args.filepath
old_engine = render_args.engine
old_use_antialiasing = render_args.use_antialiasing
# Override some render settings to have flat shading
render_args.filepath = path
render_args.engine = 'BLENDER_RENDER'
render_args.use_antialiasing = False
# Move the lights and ground to layer 2 so they don't render
utils.set_layer(bpy.data.objects['Lamp_Key'], 2)
utils.set_layer(bpy.data.objects['Lamp_Fill'], 2)
utils.set_layer(bpy.data.objects['Lamp_Back'], 2)
utils.set_layer(bpy.data.objects['Ground'], 2)
# Add random shadeless materials to all objects
object_colors = set()
old_materials = []
for i, obj in enumerate(blender_objects):
old_materials.append(obj.data.materials[0])
bpy.ops.material.new()
mat = bpy.data.materials['Material']
mat.name = 'Material_%d' % i
while True:
r, g, b = [random.random() for _ in range(3)]
if (r, g, b) not in object_colors: break
object_colors.add((r, g, b))
mat.diffuse_color = [r, g, b]
mat.use_shadeless = True
obj.data.materials[0] = mat
# Render the scene
bpy.ops.render.render(write_still=True)
# Undo the above; first restore the materials to objects
for mat, obj in zip(old_materials, blender_objects):
obj.data.materials[0] = mat
# Move the lights and ground back to layer 0
utils.set_layer(bpy.data.objects['Lamp_Key'], 0)
utils.set_layer(bpy.data.objects['Lamp_Fill'], 0)
utils.set_layer(bpy.data.objects['Lamp_Back'], 0)
utils.set_layer(bpy.data.objects['Ground'], 0)
# Set the render settings back to what they were
render_args.filepath = old_filepath
render_args.engine = old_engine
render_args.use_antialiasing = old_use_antialiasing
return object_colors
if __name__ == '__main__':
if INSIDE_BLENDER:
# Run normally
argv = utils.extract_args()
args = parser.parse_args(argv)
main(args)
elif '--help' in sys.argv or '-h' in sys.argv:
parser.print_help()
else:
print('This script is intended to be called from blender like this:')
print()
print('blender --background --python render_images.py -- [args]')
print()
print('You can also run as a standalone python script to view all')
print('arguments like this:')
print()
print('python render_images.py --help')
|
clevr-dataset-gen-master
|
image_generation/render_images.py
|
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import sys, random, os
import bpy, bpy_extras
"""
Some utility functions for interacting with Blender
"""
def extract_args(input_argv=None):
"""
Pull out command-line arguments after "--". Blender ignores command-line flags
after --, so this lets us forward command line arguments from the blender
invocation to our own script.
"""
if input_argv is None:
input_argv = sys.argv
output_argv = []
if '--' in input_argv:
idx = input_argv.index('--')
output_argv = input_argv[(idx + 1):]
return output_argv
def parse_args(parser, argv=None):
return parser.parse_args(extract_args(argv))
# I wonder if there's a better way to do this?
def delete_object(obj):
""" Delete a specified blender object """
for o in bpy.data.objects:
o.select = False
obj.select = True
bpy.ops.object.delete()
def get_camera_coords(cam, pos):
"""
For a specified point, get both the 3D coordinates and 2D pixel-space
coordinates of the point from the perspective of the camera.
Inputs:
- cam: Camera object
- pos: Vector giving 3D world-space position
Returns a tuple of:
- (px, py, pz): px and py give 2D image-space coordinates; pz gives depth
in the range [-1, 1]
"""
scene = bpy.context.scene
x, y, z = bpy_extras.object_utils.world_to_camera_view(scene, cam, pos)
scale = scene.render.resolution_percentage / 100.0
w = int(scale * scene.render.resolution_x)
h = int(scale * scene.render.resolution_y)
px = int(round(x * w))
py = int(round(h - y * h))
return (px, py, z)
def set_layer(obj, layer_idx):
""" Move an object to a particular layer """
# Set the target layer to True first because an object must always be on
# at least one layer.
obj.layers[layer_idx] = True
for i in range(len(obj.layers)):
obj.layers[i] = (i == layer_idx)
def add_object(object_dir, name, scale, loc, theta=0):
"""
Load an object from a file. We assume that in the directory object_dir, there
is a file named "$name.blend" which contains a single object named "$name"
that has unit size and is centered at the origin.
- scale: scalar giving the size that the object should be in the scene
- loc: tuple (x, y) giving the coordinates on the ground plane where the
object should be placed.
"""
# First figure out how many of this object are already in the scene so we can
# give the new object a unique name
count = 0
for obj in bpy.data.objects:
if obj.name.startswith(name):
count += 1
filename = os.path.join(object_dir, '%s.blend' % name, 'Object', name)
bpy.ops.wm.append(filename=filename)
# Give it a new name to avoid conflicts
new_name = '%s_%d' % (name, count)
bpy.data.objects[name].name = new_name
# Set the new object as active, then rotate, scale, and translate it
x, y = loc
bpy.context.scene.objects.active = bpy.data.objects[new_name]
bpy.context.object.rotation_euler[2] = theta
bpy.ops.transform.resize(value=(scale, scale, scale))
bpy.ops.transform.translate(value=(x, y, scale))
def load_materials(material_dir):
"""
Load materials from a directory. We assume that the directory contains .blend
files with one material each. The file X.blend has a single NodeTree item named
X; this NodeTree item must have a "Color" input that accepts an RGBA value.
"""
for fn in os.listdir(material_dir):
if not fn.endswith('.blend'): continue
name = os.path.splitext(fn)[0]
filepath = os.path.join(material_dir, fn, 'NodeTree', name)
bpy.ops.wm.append(filename=filepath)
def add_material(name, **properties):
"""
Create a new material and assign it to the active object. "name" should be the
name of a material that has been previously loaded using load_materials.
"""
# Figure out how many materials are already in the scene
mat_count = len(bpy.data.materials)
# Create a new material; it is not attached to anything and
# it will be called "Material"
bpy.ops.material.new()
# Get a reference to the material we just created and rename it;
# then the next time we make a new material it will still be called
# "Material" and we will still be able to look it up by name
mat = bpy.data.materials['Material']
mat.name = 'Material_%d' % mat_count
# Attach the new material to the active object
# Make sure it doesn't already have materials
obj = bpy.context.active_object
assert len(obj.data.materials) == 0
obj.data.materials.append(mat)
# Find the output node of the new material
output_node = None
for n in mat.node_tree.nodes:
if n.name == 'Material Output':
output_node = n
break
# Add a new GroupNode to the node tree of the active material,
# and copy the node tree from the preloaded node group to the
# new group node. This copying seems to happen by-value, so
# we can create multiple materials of the same type without them
# clobbering each other
group_node = mat.node_tree.nodes.new('ShaderNodeGroup')
group_node.node_tree = bpy.data.node_groups[name]
# Find and set the "Color" input of the new group node
for inp in group_node.inputs:
if inp.name in properties:
inp.default_value = properties[inp.name]
# Wire the output of the new group node to the input of
# the MaterialOutput node
mat.node_tree.links.new(
group_node.outputs['Shader'],
output_node.inputs['Surface'],
)
|
clevr-dataset-gen-master
|
image_generation/utils.py
|
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import json, os, math
from collections import defaultdict
"""
Utilities for working with function program representations of questions.
Some of the metadata about what question node types are available etc are stored
in a JSON metadata file.
"""
# Handlers for answering questions. Each handler receives the scene structure
# that was output from Blender, the node, and a list of values that were output
# from each of the node's inputs; the handler should return the computed output
# value from this node.
def scene_handler(scene_struct, inputs, side_inputs):
# Just return all objects in the scene
return list(range(len(scene_struct['objects'])))
def make_filter_handler(attribute):
def filter_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
assert len(side_inputs) == 1
value = side_inputs[0]
output = []
for idx in inputs[0]:
atr = scene_struct['objects'][idx][attribute]
if value == atr or value in atr:
output.append(idx)
return output
return filter_handler
def unique_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
if len(inputs[0]) != 1:
return '__INVALID__'
return inputs[0][0]
def vg_relate_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
assert len(side_inputs) == 1
output = set()
for rel in scene_struct['relationships']:
if rel['predicate'] == side_inputs[0] and rel['subject_idx'] == inputs[0]:
output.add(rel['object_idx'])
return sorted(list(output))
def relate_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
assert len(side_inputs) == 1
relation = side_inputs[0]
return scene_struct['relationships'][relation][inputs[0]]
def union_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 2
assert len(side_inputs) == 0
return sorted(list(set(inputs[0]) | set(inputs[1])))
def intersect_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 2
assert len(side_inputs) == 0
return sorted(list(set(inputs[0]) & set(inputs[1])))
def count_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
return len(inputs[0])
def make_same_attr_handler(attribute):
def same_attr_handler(scene_struct, inputs, side_inputs):
cache_key = '_same_%s' % attribute
if cache_key not in scene_struct:
cache = {}
for i, obj1 in enumerate(scene_struct['objects']):
same = []
for j, obj2 in enumerate(scene_struct['objects']):
if i != j and obj1[attribute] == obj2[attribute]:
same.append(j)
cache[i] = same
scene_struct[cache_key] = cache
cache = scene_struct[cache_key]
assert len(inputs) == 1
assert len(side_inputs) == 0
return cache[inputs[0]]
return same_attr_handler
def make_query_handler(attribute):
def query_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
assert len(side_inputs) == 0
idx = inputs[0]
obj = scene_struct['objects'][idx]
assert attribute in obj
val = obj[attribute]
if type(val) == list and len(val) != 1:
return '__INVALID__'
elif type(val) == list and len(val) == 1:
return val[0]
else:
return val
return query_handler
def exist_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
assert len(side_inputs) == 0
return len(inputs[0]) > 0
def equal_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 2
assert len(side_inputs) == 0
return inputs[0] == inputs[1]
def less_than_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 2
assert len(side_inputs) == 0
return inputs[0] < inputs[1]
def greater_than_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 2
assert len(side_inputs) == 0
return inputs[0] > inputs[1]
# Register all of the answering handlers here.
# TODO maybe this would be cleaner with a function decorator that takes
# care of registration? Not sure. Also what if we want to reuse the same engine
# for different sets of node types?
execute_handlers = {
'scene': scene_handler,
'filter_color': make_filter_handler('color'),
'filter_shape': make_filter_handler('shape'),
'filter_material': make_filter_handler('material'),
'filter_size': make_filter_handler('size'),
'filter_objectcategory': make_filter_handler('objectcategory'),
'unique': unique_handler,
'relate': relate_handler,
'union': union_handler,
'intersect': intersect_handler,
'count': count_handler,
'query_color': make_query_handler('color'),
'query_shape': make_query_handler('shape'),
'query_material': make_query_handler('material'),
'query_size': make_query_handler('size'),
'exist': exist_handler,
'equal_color': equal_handler,
'equal_shape': equal_handler,
'equal_integer': equal_handler,
'equal_material': equal_handler,
'equal_size': equal_handler,
'equal_object': equal_handler,
'less_than': less_than_handler,
'greater_than': greater_than_handler,
'same_color': make_same_attr_handler('color'),
'same_shape': make_same_attr_handler('shape'),
'same_size': make_same_attr_handler('size'),
'same_material': make_same_attr_handler('material'),
}
def answer_question(question, metadata, scene_struct, all_outputs=False,
cache_outputs=True):
"""
Use structured scene information to answer a structured question. Most of the
heavy lifting is done by the execute handlers defined above.
We cache node outputs in the node itself; this gives a nontrivial speedup
when we want to answer many questions that share nodes on the same scene
(such as during question-generation DFS). This will NOT work if the same
nodes are executed on different scenes.
"""
all_input_types, all_output_types = [], []
node_outputs = []
for node in question['nodes']:
if cache_outputs and '_output' in node:
node_output = node['_output']
else:
node_type = node['type']
msg = 'Could not find handler for "%s"' % node_type
assert node_type in execute_handlers, msg
handler = execute_handlers[node_type]
node_inputs = [node_outputs[idx] for idx in node['inputs']]
side_inputs = node.get('side_inputs', [])
node_output = handler(scene_struct, node_inputs, side_inputs)
if cache_outputs:
node['_output'] = node_output
node_outputs.append(node_output)
if node_output == '__INVALID__':
break
if all_outputs:
return node_outputs
else:
return node_outputs[-1]
def insert_scene_node(nodes, idx):
# First make a shallow-ish copy of the input
new_nodes = []
for node in nodes:
new_node = {
'type': node['type'],
'inputs': node['inputs'],
}
if 'side_inputs' in node:
new_node['side_inputs'] = node['side_inputs']
new_nodes.append(new_node)
# Replace the specified index with a scene node
new_nodes[idx] = {'type': 'scene', 'inputs': []}
# Search backwards from the last node to see which nodes are actually used
output_used = [False] * len(new_nodes)
idxs_to_check = [len(new_nodes) - 1]
while idxs_to_check:
cur_idx = idxs_to_check.pop()
output_used[cur_idx] = True
idxs_to_check.extend(new_nodes[cur_idx]['inputs'])
# Iterate through nodes, keeping only those whose output is used;
# at the same time build up a mapping from old idxs to new idxs
old_idx_to_new_idx = {}
new_nodes_trimmed = []
for old_idx, node in enumerate(new_nodes):
if output_used[old_idx]:
new_idx = len(new_nodes_trimmed)
new_nodes_trimmed.append(node)
old_idx_to_new_idx[old_idx] = new_idx
# Finally go through the list of trimmed nodes and change the inputs
for node in new_nodes_trimmed:
new_inputs = []
for old_idx in node['inputs']:
new_inputs.append(old_idx_to_new_idx[old_idx])
node['inputs'] = new_inputs
return new_nodes_trimmed
def is_degenerate(question, metadata, scene_struct, answer=None, verbose=False):
"""
A question is degenerate if replacing any of its relate nodes with a scene
node results in a question with the same answer.
"""
if answer is None:
answer = answer_question(question, metadata, scene_struct)
for idx, node in enumerate(question['nodes']):
if node['type'] == 'relate':
new_question = {
'nodes': insert_scene_node(question['nodes'], idx)
}
new_answer = answer_question(new_question, metadata, scene_struct)
if verbose:
print('here is truncated question:')
for i, n in enumerate(new_question['nodes']):
name = n['type']
if 'side_inputs' in n:
name = '%s[%s]' % (name, n['side_inputs'][0])
print(i, name, n['_output'])
print('new answer is: ', new_answer)
if new_answer == answer:
return True
return False
|
clevr-dataset-gen-master
|
question_generation/question_engine.py
|
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import print_function
import argparse, json, os, itertools, random, shutil
import time
import re
import question_engine as qeng
"""
Generate synthetic questions and answers for CLEVR images. Input is a single
JSON file containing ground-truth scene information for all images, and output
is a single JSON file containing all generated questions, answers, and programs.
Questions are generated by expanding templates. Each template contains a single
program template and one or more text templates, both with the same set of typed
slots; by convention <Z> = Size, <C> = Color, <M> = Material, <S> = Shape.
Program templates may contain special nodes that expand into multiple functions
during instantiation; for example a "filter" node in a program template will
expand into a combination of "filter_size", "filter_color", "filter_material",
and "filter_shape" nodes after instantiation, and a "filter_unique" node in a
template will expand into some combination of filtering nodes followed by a
"unique" node.
Templates are instantiated using depth-first search; we are looking for template
instantiations where (1) each "unique" node actually refers to a single object,
(2) constraints in the template are satisfied, and (3) the answer to the question
passes our rejection sampling heuristics.
To efficiently handle (1) and (2), we keep track of partial evaluations of the
program during each step of template expansion. This together with the use of
composite nodes in program templates (filter_unique, relate_filter_unique) allow
us to efficiently prune the search space and terminate early when we know that
(1) or (2) will be violated.
"""
parser = argparse.ArgumentParser()
# Inputs
parser.add_argument('--input_scene_file', default='../output/CLEVR_scenes.json',
help="JSON file containing ground-truth scene information for all images " +
"from render_images.py")
parser.add_argument('--metadata_file', default='metadata.json',
help="JSON file containing metadata about functions")
parser.add_argument('--synonyms_json', default='synonyms.json',
help="JSON file defining synonyms for parameter values")
parser.add_argument('--template_dir', default='CLEVR_1.0_templates',
help="Directory containing JSON templates for questions")
# Output
parser.add_argument('--output_questions_file',
default='../output/CLEVR_questions.json',
help="The output file to write containing generated questions")
# Control which and how many images to process
parser.add_argument('--scene_start_idx', default=0, type=int,
help="The image at which to start generating questions; this allows " +
"question generation to be split across many workers")
parser.add_argument('--num_scenes', default=0, type=int,
help="The number of images for which to generate questions. Setting to 0 " +
"generates questions for all scenes in the input file starting from " +
"--scene_start_idx")
# Control the number of questions per image; we will attempt to generate
# templates_per_image * instances_per_template questions per image.
parser.add_argument('--templates_per_image', default=10, type=int,
help="The number of different templates that should be instantiated " +
"on each image")
parser.add_argument('--instances_per_template', default=1, type=int,
help="The number of times each template should be instantiated on an image")
# Misc
parser.add_argument('--reset_counts_every', default=250, type=int,
help="How often to reset template and answer counts. Higher values will " +
"result in flatter distributions over templates and answers, but " +
"will result in longer runtimes.")
parser.add_argument('--verbose', action='store_true',
help="Print more verbose output")
parser.add_argument('--time_dfs', action='store_true',
help="Time each depth-first search; must be given with --verbose")
parser.add_argument('--profile', action='store_true',
help="If given then run inside cProfile")
# args = parser.parse_args()
def precompute_filter_options(scene_struct, metadata):
# Keys are tuples (size, color, shape, material) (where some may be None)
# and values are lists of object idxs that match the filter criterion
attribute_map = {}
if metadata['dataset'] == 'CLEVR-v1.0':
attr_keys = ['size', 'color', 'material', 'shape']
else:
assert False, 'Unrecognized dataset'
# Precompute masks
masks = []
for i in range(2 ** len(attr_keys)):
mask = []
for j in range(len(attr_keys)):
mask.append((i // (2 ** j)) % 2)
masks.append(mask)
for object_idx, obj in enumerate(scene_struct['objects']):
if metadata['dataset'] == 'CLEVR-v1.0':
keys = [tuple(obj[k] for k in attr_keys)]
for mask in masks:
for key in keys:
masked_key = []
for a, b in zip(key, mask):
if b == 1:
masked_key.append(a)
else:
masked_key.append(None)
masked_key = tuple(masked_key)
if masked_key not in attribute_map:
attribute_map[masked_key] = set()
attribute_map[masked_key].add(object_idx)
scene_struct['_filter_options'] = attribute_map
def find_filter_options(object_idxs, scene_struct, metadata):
# Keys are tuples (size, color, shape, material) (where some may be None)
# and values are lists of object idxs that match the filter criterion
if '_filter_options' not in scene_struct:
precompute_filter_options(scene_struct, metadata)
attribute_map = {}
object_idxs = set(object_idxs)
for k, vs in scene_struct['_filter_options'].items():
attribute_map[k] = sorted(list(object_idxs & vs))
return attribute_map
def add_empty_filter_options(attribute_map, metadata, num_to_add):
# Add some filtering criterion that do NOT correspond to objects
if metadata['dataset'] == 'CLEVR-v1.0':
attr_keys = ['Size', 'Color', 'Material', 'Shape']
else:
assert False, 'Unrecognized dataset'
attr_vals = [metadata['types'][t] + [None] for t in attr_keys]
if '_filter_options' in metadata:
attr_vals = metadata['_filter_options']
target_size = len(attribute_map) + num_to_add
while len(attribute_map) < target_size:
k = (random.choice(v) for v in attr_vals)
if k not in attribute_map:
attribute_map[k] = []
def find_relate_filter_options(object_idx, scene_struct, metadata,
unique=False, include_zero=False, trivial_frac=0.1):
options = {}
if '_filter_options' not in scene_struct:
precompute_filter_options(scene_struct, metadata)
# TODO: Right now this is only looking for nontrivial combinations; in some
# cases I may want to add trivial combinations, either where the intersection
# is empty or where the intersection is equal to the filtering output.
trivial_options = {}
for relationship in scene_struct['relationships']:
related = set(scene_struct['relationships'][relationship][object_idx])
for filters, filtered in scene_struct['_filter_options'].items():
intersection = related & filtered
trivial = (intersection == filtered)
if unique and len(intersection) != 1: continue
if not include_zero and len(intersection) == 0: continue
if trivial:
trivial_options[(relationship, filters)] = sorted(list(intersection))
else:
options[(relationship, filters)] = sorted(list(intersection))
N, f = len(options), trivial_frac
num_trivial = int(round(N * f / (1 - f)))
trivial_options = list(trivial_options.items())
random.shuffle(trivial_options)
for k, v in trivial_options[:num_trivial]:
options[k] = v
return options
def node_shallow_copy(node):
new_node = {
'type': node['type'],
'inputs': node['inputs'],
}
if 'side_inputs' in node:
new_node['side_inputs'] = node['side_inputs']
return new_node
def other_heuristic(text, param_vals):
"""
Post-processing heuristic to handle the word "other"
"""
if ' other ' not in text and ' another ' not in text:
return text
target_keys = {
'<Z>', '<C>', '<M>', '<S>',
'<Z2>', '<C2>', '<M2>', '<S2>',
}
if param_vals.keys() != target_keys:
return text
key_pairs = [
('<Z>', '<Z2>'),
('<C>', '<C2>'),
('<M>', '<M2>'),
('<S>', '<S2>'),
]
remove_other = False
for k1, k2 in key_pairs:
v1 = param_vals.get(k1, None)
v2 = param_vals.get(k2, None)
if v1 != '' and v2 != '' and v1 != v2:
print('other has got to go! %s = %s but %s = %s'
% (k1, v1, k2, v2))
remove_other = True
break
if remove_other:
if ' other ' in text:
text = text.replace(' other ', ' ')
if ' another ' in text:
text = text.replace(' another ', ' a ')
return text
def instantiate_templates_dfs(scene_struct, template, metadata, answer_counts,
synonyms, max_instances=None, verbose=False):
param_name_to_type = {p['name']: p['type'] for p in template['params']}
initial_state = {
'nodes': [node_shallow_copy(template['nodes'][0])],
'vals': {},
'input_map': {0: 0},
'next_template_node': 1,
}
states = [initial_state]
final_states = []
while states:
state = states.pop()
# Check to make sure the current state is valid
q = {'nodes': state['nodes']}
outputs = qeng.answer_question(q, metadata, scene_struct, all_outputs=True)
answer = outputs[-1]
if answer == '__INVALID__': continue
# Check to make sure constraints are satisfied for the current state
skip_state = False
for constraint in template['constraints']:
if constraint['type'] == 'NEQ':
p1, p2 = constraint['params']
v1, v2 = state['vals'].get(p1), state['vals'].get(p2)
if v1 is not None and v2 is not None and v1 != v2:
if verbose:
print('skipping due to NEQ constraint')
print(constraint)
print(state['vals'])
skip_state = True
break
elif constraint['type'] == 'NULL':
p = constraint['params'][0]
p_type = param_name_to_type[p]
v = state['vals'].get(p)
if v is not None:
skip = False
if p_type == 'Shape' and v != 'thing': skip = True
if p_type != 'Shape' and v != '': skip = True
if skip:
if verbose:
print('skipping due to NULL constraint')
print(constraint)
print(state['vals'])
skip_state = True
break
elif constraint['type'] == 'OUT_NEQ':
i, j = constraint['params']
i = state['input_map'].get(i, None)
j = state['input_map'].get(j, None)
if i is not None and j is not None and outputs[i] == outputs[j]:
if verbose:
print('skipping due to OUT_NEQ constraint')
print(outputs[i])
print(outputs[j])
skip_state = True
break
else:
assert False, 'Unrecognized constraint type "%s"' % constraint['type']
if skip_state:
continue
# We have already checked to make sure the answer is valid, so if we have
# processed all the nodes in the template then the current state is a valid
# question, so add it if it passes our rejection sampling tests.
if state['next_template_node'] == len(template['nodes']):
# Use our rejection sampling heuristics to decide whether we should
# keep this template instantiation
cur_answer_count = answer_counts[answer]
answer_counts_sorted = sorted(answer_counts.values())
median_count = answer_counts_sorted[len(answer_counts_sorted) // 2]
median_count = max(median_count, 5)
if cur_answer_count > 1.1 * answer_counts_sorted[-2]:
if verbose: print('skipping due to second count')
continue
if cur_answer_count > 5.0 * median_count:
if verbose: print('skipping due to median')
continue
# If the template contains a raw relate node then we need to check for
# degeneracy at the end
has_relate = any(n['type'] == 'relate' for n in template['nodes'])
if has_relate:
degen = qeng.is_degenerate(q, metadata, scene_struct, answer=answer,
verbose=verbose)
if degen:
continue
answer_counts[answer] += 1
state['answer'] = answer
final_states.append(state)
if max_instances is not None and len(final_states) == max_instances:
break
continue
# Otherwise fetch the next node from the template
# Make a shallow copy so cached _outputs don't leak ... this is very nasty
next_node = template['nodes'][state['next_template_node']]
next_node = node_shallow_copy(next_node)
special_nodes = {
'filter_unique', 'filter_count', 'filter_exist', 'filter',
'relate_filter', 'relate_filter_unique', 'relate_filter_count',
'relate_filter_exist',
}
if next_node['type'] in special_nodes:
if next_node['type'].startswith('relate_filter'):
unique = (next_node['type'] == 'relate_filter_unique')
include_zero = (next_node['type'] == 'relate_filter_count'
or next_node['type'] == 'relate_filter_exist')
filter_options = find_relate_filter_options(answer, scene_struct, metadata,
unique=unique, include_zero=include_zero)
else:
filter_options = find_filter_options(answer, scene_struct, metadata)
if next_node['type'] == 'filter':
# Remove null filter
filter_options.pop((None, None, None, None), None)
if next_node['type'] == 'filter_unique':
# Get rid of all filter options that don't result in a single object
filter_options = {k: v for k, v in filter_options.items()
if len(v) == 1}
else:
# Add some filter options that do NOT correspond to the scene
if next_node['type'] == 'filter_exist':
# For filter_exist we want an equal number that do and don't
num_to_add = len(filter_options)
elif next_node['type'] == 'filter_count' or next_node['type'] == 'filter':
# For filter_count add nulls equal to the number of singletons
num_to_add = sum(1 for k, v in filter_options.items() if len(v) == 1)
add_empty_filter_options(filter_options, metadata, num_to_add)
filter_option_keys = list(filter_options.keys())
random.shuffle(filter_option_keys)
for k in filter_option_keys:
new_nodes = []
cur_next_vals = {k: v for k, v in state['vals'].items()}
next_input = state['input_map'][next_node['inputs'][0]]
filter_side_inputs = next_node['side_inputs']
if next_node['type'].startswith('relate'):
param_name = next_node['side_inputs'][0] # First one should be relate
filter_side_inputs = next_node['side_inputs'][1:]
param_type = param_name_to_type[param_name]
assert param_type == 'Relation'
param_val = k[0]
k = k[1]
new_nodes.append({
'type': 'relate',
'inputs': [next_input],
'side_inputs': [param_val],
})
cur_next_vals[param_name] = param_val
next_input = len(state['nodes']) + len(new_nodes) - 1
for param_name, param_val in zip(filter_side_inputs, k):
param_type = param_name_to_type[param_name]
filter_type = 'filter_%s' % param_type.lower()
if param_val is not None:
new_nodes.append({
'type': filter_type,
'inputs': [next_input],
'side_inputs': [param_val],
})
cur_next_vals[param_name] = param_val
next_input = len(state['nodes']) + len(new_nodes) - 1
elif param_val is None:
if metadata['dataset'] == 'CLEVR-v1.0' and param_type == 'Shape':
param_val = 'thing'
else:
param_val = ''
cur_next_vals[param_name] = param_val
input_map = {k: v for k, v in state['input_map'].items()}
extra_type = None
if next_node['type'].endswith('unique'):
extra_type = 'unique'
if next_node['type'].endswith('count'):
extra_type = 'count'
if next_node['type'].endswith('exist'):
extra_type = 'exist'
if extra_type is not None:
new_nodes.append({
'type': extra_type,
'inputs': [input_map[next_node['inputs'][0]] + len(new_nodes)],
})
input_map[state['next_template_node']] = len(state['nodes']) + len(new_nodes) - 1
states.append({
'nodes': state['nodes'] + new_nodes,
'vals': cur_next_vals,
'input_map': input_map,
'next_template_node': state['next_template_node'] + 1,
})
elif 'side_inputs' in next_node:
# If the next node has template parameters, expand them out
# TODO: Generalize this to work for nodes with more than one side input
assert len(next_node['side_inputs']) == 1, 'NOT IMPLEMENTED'
# Use metadata to figure out domain of valid values for this parameter.
# Iterate over the values in a random order; then it is safe to bail
# from the DFS as soon as we find the desired number of valid template
# instantiations.
param_name = next_node['side_inputs'][0]
param_type = param_name_to_type[param_name]
param_vals = metadata['types'][param_type][:]
random.shuffle(param_vals)
for val in param_vals:
input_map = {k: v for k, v in state['input_map'].items()}
input_map[state['next_template_node']] = len(state['nodes'])
cur_next_node = {
'type': next_node['type'],
'inputs': [input_map[idx] for idx in next_node['inputs']],
'side_inputs': [val],
}
cur_next_vals = {k: v for k, v in state['vals'].items()}
cur_next_vals[param_name] = val
states.append({
'nodes': state['nodes'] + [cur_next_node],
'vals': cur_next_vals,
'input_map': input_map,
'next_template_node': state['next_template_node'] + 1,
})
else:
input_map = {k: v for k, v in state['input_map'].items()}
input_map[state['next_template_node']] = len(state['nodes'])
next_node = {
'type': next_node['type'],
'inputs': [input_map[idx] for idx in next_node['inputs']],
}
states.append({
'nodes': state['nodes'] + [next_node],
'vals': state['vals'],
'input_map': input_map,
'next_template_node': state['next_template_node'] + 1,
})
# Actually instantiate the template with the solutions we've found
text_questions, structured_questions, answers = [], [], []
for state in final_states:
structured_questions.append(state['nodes'])
answers.append(state['answer'])
text = random.choice(template['text'])
for name, val in state['vals'].items():
if val in synonyms:
val = random.choice(synonyms[val])
text = text.replace(name, val)
text = ' '.join(text.split())
text = replace_optionals(text)
text = ' '.join(text.split())
text = other_heuristic(text, state['vals'])
text_questions.append(text)
return text_questions, structured_questions, answers
def replace_optionals(s):
"""
Each substring of s that is surrounded in square brackets is treated as
optional and is removed with probability 0.5. For example the string
"A [aa] B [bb]"
could become any of
"A aa B bb"
"A B bb"
"A aa B "
"A B "
with probability 1/4.
"""
pat = re.compile(r'\[([^\[]*)\]')
while True:
match = re.search(pat, s)
if not match:
break
i0 = match.start()
i1 = match.end()
if random.random() > 0.5:
s = s[:i0] + match.groups()[0] + s[i1:]
else:
s = s[:i0] + s[i1:]
return s
def main(args):
with open(args.metadata_file, 'r') as f:
metadata = json.load(f)
dataset = metadata['dataset']
if dataset != 'CLEVR-v1.0':
raise ValueError('Unrecognized dataset "%s"' % dataset)
functions_by_name = {}
for f in metadata['functions']:
functions_by_name[f['name']] = f
metadata['_functions_by_name'] = functions_by_name
# Load templates from disk
# Key is (filename, file_idx)
num_loaded_templates = 0
templates = {}
for fn in os.listdir(args.template_dir):
if not fn.endswith('.json'): continue
with open(os.path.join(args.template_dir, fn), 'r') as f:
base = os.path.splitext(fn)[0]
for i, template in enumerate(json.load(f)):
num_loaded_templates += 1
key = (fn, i)
templates[key] = template
print('Read %d templates from disk' % num_loaded_templates)
def reset_counts():
# Maps a template (filename, index) to the number of questions we have
# so far using that template
template_counts = {}
# Maps a template (filename, index) to a dict mapping the answer to the
# number of questions so far of that template type with that answer
template_answer_counts = {}
node_type_to_dtype = {n['name']: n['output'] for n in metadata['functions']}
for key, template in templates.items():
template_counts[key[:2]] = 0
final_node_type = template['nodes'][-1]['type']
final_dtype = node_type_to_dtype[final_node_type]
answers = metadata['types'][final_dtype]
if final_dtype == 'Bool':
answers = [True, False]
if final_dtype == 'Integer':
if metadata['dataset'] == 'CLEVR-v1.0':
answers = list(range(0, 11))
template_answer_counts[key[:2]] = {}
for a in answers:
template_answer_counts[key[:2]][a] = 0
return template_counts, template_answer_counts
template_counts, template_answer_counts = reset_counts()
# Read file containing input scenes
all_scenes = []
with open(args.input_scene_file, 'r') as f:
scene_data = json.load(f)
all_scenes = scene_data['scenes']
scene_info = scene_data['info']
begin = args.scene_start_idx
if args.num_scenes > 0:
end = args.scene_start_idx + args.num_scenes
all_scenes = all_scenes[begin:end]
else:
all_scenes = all_scenes[begin:]
# Read synonyms file
with open(args.synonyms_json, 'r') as f:
synonyms = json.load(f)
questions = []
scene_count = 0
for i, scene in enumerate(all_scenes):
scene_fn = scene['image_filename']
scene_struct = scene
print('starting image %s (%d / %d)'
% (scene_fn, i + 1, len(all_scenes)))
if scene_count % args.reset_counts_every == 0:
print('resetting counts')
template_counts, template_answer_counts = reset_counts()
scene_count += 1
# Order templates by the number of questions we have so far for those
# templates. This is a simple heuristic to give a flat distribution over
# templates.
templates_items = list(templates.items())
templates_items = sorted(templates_items,
key=lambda x: template_counts[x[0][:2]])
num_instantiated = 0
for (fn, idx), template in templates_items:
if args.verbose:
print('trying template ', fn, idx)
if args.time_dfs and args.verbose:
tic = time.time()
ts, qs, ans = instantiate_templates_dfs(
scene_struct,
template,
metadata,
template_answer_counts[(fn, idx)],
synonyms,
max_instances=args.instances_per_template,
verbose=False)
if args.time_dfs and args.verbose:
toc = time.time()
print('that took ', toc - tic)
image_index = int(os.path.splitext(scene_fn)[0].split('_')[-1])
for t, q, a in zip(ts, qs, ans):
questions.append({
'split': scene_info['split'],
'image_filename': scene_fn,
'image_index': image_index,
'image': os.path.splitext(scene_fn)[0],
'question': t,
'program': q,
'answer': a,
'template_filename': fn,
'question_family_index': idx,
'question_index': len(questions),
})
if len(ts) > 0:
if args.verbose:
print('got one!')
num_instantiated += 1
template_counts[(fn, idx)] += 1
elif args.verbose:
print('did not get any =(')
if num_instantiated >= args.templates_per_image:
break
# Change "side_inputs" to "value_inputs" in all functions of all functional
# programs. My original name for these was "side_inputs" but I decided to
# change the name to "value_inputs" for the public CLEVR release. I should
# probably go through all question generation code and templates and rename,
# but that could be tricky and take a while, so instead I'll just do it here.
# To further complicate things, originally functions without value inputs did
# not have a "side_inputs" field at all, and I'm pretty sure this fact is used
# in some of the code above; however in the public CLEVR release all functions
# have a "value_inputs" field, and it's an empty list for functions that take
# no value inputs. Again this should probably be refactored, but the quick and
# dirty solution is to keep the code above as-is, but here make "value_inputs"
# an empty list for those functions that do not have "side_inputs". Gross.
for q in questions:
for f in q['program']:
if 'side_inputs' in f:
f['value_inputs'] = f['side_inputs']
del f['side_inputs']
else:
f['value_inputs'] = []
with open(args.output_questions_file, 'w') as f:
print('Writing output to %s' % args.output_questions_file)
json.dump({
'info': scene_info,
'questions': questions,
}, f)
if __name__ == '__main__':
args = parser.parse_args()
if args.profile:
import cProfile
cProfile.run('main(args)')
else:
main(args)
|
clevr-dataset-gen-master
|
question_generation/generate_questions.py
|
# A file which enables command line arguments to be added dynamically when using a yaml file.
# Mostly, this is just a wrapper which runs train.main() with --gpus and --data_dir argument.
import os
import yaml
import sys
def parse_as_type(n):
if type(n) is list:
return [parse_as_type(x) for x in n]
try:
return int(n)
except ValueError:
pass
try:
return float(n)
except ValueError:
pass
return n
def main():
if len(sys.argv[1]) >= 4 and sys.argv[1][:4] == "app:":
yaml_file = sys.argv[1].split(":")[-1]
has_yaml_file = True
else:
yaml_file = "apps/default_cifar.yml"
has_yaml_file = False
with open(yaml_file, "r") as f:
yaml_map = yaml.safe_load(f)
if "/" in yaml_file:
yaml_file = yaml_file.split("/")[-1]
argmap = {"base": yaml_file.replace(".yml", "")}
i = 2 if has_yaml_file else 1
while i < len(sys.argv):
k = sys.argv[i][2:]
j = i + 1
while (
j < len(sys.argv) - 1
and not sys.argv[j + 1][0] == "-"
and not sys.argv[j][0] == "-"
):
j = j + 1
if j >= len(sys.argv) or sys.argv[j][0] == "-":
if len(k) > 2 and k[:2] == "no":
argmap[k[2:]] = False
else:
argmap[k] = True
elif j == i + 1:
argmap[k] = parse_as_type(sys.argv[j])
else:
argmap[k] = parse_as_type(sys.argv[i + 1 : j + 1])
i = j if j < len(sys.argv) and sys.argv[j][0] == "-" else j + 1
# Set environment variables.
if "gpus" in argmap:
if type(argmap["gpus"]) is not list:
argmap["gpus"] = [argmap["gpus"]]
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(str(x) for x in argmap["gpus"])
del argmap["gpus"]
if "data-dir" in argmap:
os.environ["DATA_DIR"] = argmap["data-dir"]
del argmap["data-dir"]
if "save_dir" in argmap:
del argmap["save_dir"]
if "log_dir" in argmap:
del argmap["log_dir"]
title = "_".join([k + "=" + str(v).replace(" ", "") for k, v in argmap.items()])
argmap["title"] = title
# write yaml.
yaml_map.update(argmap)
if not os.path.exists("apps/gen/"):
os.mkdir("apps/gen/")
new_yaml_file = os.path.join("apps/gen/", "{}.yml".format(title.replace("/", ".")))
with open(new_yaml_file, "w") as f:
yaml.dump(yaml_map, f)
# run
from genutil.config import reset_app
reset_app(new_yaml_file)
import train as train
train.main()
if __name__ == "__main__":
main()
|
dnw-master
|
runner.py
|
""" General structure of train.py borrowed from https://github.com/JiahuiYu/slimmable_networks """
import importlib
import os
import time
import random
import sys
import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torchvision.utils as vutils
import numpy as np
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy("file_system")
from genutil.config import FLAGS
from genutil.model_profiling import model_profiling
from tensorboardX import SummaryWriter
best_acc1 = 0
writer = None
def getter(name):
name = getattr(FLAGS, name)
if ":" in name:
name = name.split(":")
return getattr(importlib.import_module(name[0]), name[1])
return importlib.import_module(name)
def get_lr_scheduler(optimizer):
"""get learning rate"""
if FLAGS.lr_scheduler == "multistep":
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer,
milestones=FLAGS.multistep_lr_milestones,
gamma=FLAGS.multistep_lr_gamma,
)
elif FLAGS.lr_scheduler == "exp_decaying":
lr_dict = {}
for i in range(FLAGS.num_epochs):
if i == 0:
lr_dict[i] = 1
else:
lr_dict[i] = lr_dict[i - 1] * FLAGS.exp_decaying_lr_gamma
lr_lambda = lambda epoch: lr_dict[epoch]
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)
elif FLAGS.lr_scheduler == "linear_decaying":
lr_dict = {}
for i in range(FLAGS.num_epochs):
lr_dict[i] = 1.0 - i / FLAGS.num_epochs
lr_lambda = lambda epoch: lr_dict[epoch]
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)
elif FLAGS.lr_scheduler == "cosine":
if hasattr(FLAGS, "epoch_len"):
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, FLAGS.epoch_len * FLAGS.num_epochs
)
else:
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, FLAGS.len_loader * FLAGS.num_epochs
)
else:
try:
lr_scheduler_lib = importlib.import_module(FLAGS.lr_scheduler)
return lr_scheduler_lib.get_lr_scheduler(optimizer)
except ImportError:
raise NotImplementedError(
"Learning rate scheduler {} is not yet implemented.".format(
FLAGS.lr_scheduler
)
)
return lr_scheduler
def get_optimizer(model):
"""get optimizer"""
if FLAGS.optimizer == "sgd":
optimizer = torch.optim.SGD(
model.parameters(),
FLAGS.lr,
momentum=FLAGS.momentum,
weight_decay=FLAGS.weight_decay,
nesterov=FLAGS.nestorov,
)
else:
try:
optimizer_lib = importlib.import_module(FLAGS.optimizer)
return optimizer_lib.get_optimizer(model)
except ImportError:
raise NotImplementedError(
"Optimizer {} is not yet implemented.".format(FLAGS.optimizer)
)
return optimizer
class Meter(object):
"""Meter is to keep track of statistics along steps.
Meters cache values for purpose like printing average values.
Meters can be flushed to log files (i.e. TensorBoard) regularly.
Args:
name (str): the name of meter
"""
def __init__(self, name):
self.name = name
self.steps = 0
self.reset()
def reset(self):
self.values = []
def cache(self, value, pstep=1):
self.steps += pstep
self.values.append(value)
def cache_list(self, value_list, pstep=1):
self.steps += pstep
self.values += value_list
def flush(self, value, reset=True):
pass
class ScalarMeter(Meter):
"""ScalarMeter records scalar over steps.
"""
def __init__(self, name):
super(ScalarMeter, self).__init__(name)
def flush(self, value, step=-1, reset=True):
if reset:
self.reset()
def flush_scalar_meters(meters, method="avg"):
"""Docstring for flush_scalar_meters"""
results = {}
assert isinstance(meters, dict), "meters should be a dict."
for name, meter in meters.items():
if not isinstance(meter, ScalarMeter):
continue
if method == "avg":
if len(meter.values) == 0:
value = 0
else:
value = sum(meter.values) / len(meter.values)
elif method == "sum":
value = sum(meter.values)
elif method == "max":
value = max(meter.values)
elif method == "min":
value = min(meter.values)
else:
raise NotImplementedError(
"flush method: {} is not yet implemented.".format(method)
)
results[name] = value
meter.flush(value)
return results
def set_random_seed():
"""set random seed"""
if hasattr(FLAGS, "random_seed"):
seed = FLAGS.random_seed
else:
seed = 0
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def get_meters(phase, model):
"""util function for meters"""
meters = {}
meters["CELoss"] = ScalarMeter("{}_CELoss".format(phase))
for k in FLAGS.topk:
meters["top{}_accuracy".format(k)] = ScalarMeter(
"{}_top{}_accuracy".format(phase, k)
)
if hasattr(model, 'module') and hasattr(model.module, "__losses__"):
loss_info = model.module.__losses__
for i in range(1, len(loss_info)):
meters[loss_info[i][0]] = ScalarMeter(
"{}_{}".format(loss_info[i][0], phase)
)
meters["total_loss"] = ScalarMeter("{}_total_loss".format(phase))
return meters
def forward_loss(model, criterion, input, target, meter):
"""forward model """
output = model(input)
if type(output) is tuple:
assert hasattr(model.module, "__losses__")
losses_info = model.module.__losses__
loss = torch.mean(criterion(output[0], target))
meter["CELoss"].cache(loss.cpu().detach().numpy())
loss = loss * losses_info[0][1]
for i in range(1, len(output)):
ext_loss = torch.mean(output[i])
meter[losses_info[i][0]].cache(ext_loss.cpu().detach().numpy())
loss = loss + ext_loss * losses_info[i][1]
meter["total_loss"].cache(loss.cpu().detach().numpy())
output = output[0]
else:
loss = torch.mean(criterion(output, target))
meter["CELoss"].cache(loss.cpu().detach().numpy())
# topk
_, pred = output.topk(max(FLAGS.topk))
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
for k in FLAGS.topk:
correct_k = correct[:k].float().sum(0)
accuracy_list = list(correct_k.cpu().detach().numpy())
meter["top{}_accuracy".format(k)].cache_list(accuracy_list)
return loss
def run_one_epoch(
epoch,
loader,
model,
criterion,
optimizer,
meters,
phase="train",
iter=0.0,
scheduler=None,
):
"""run one epoch for train/val/test"""
print("epoch:", epoch, "phase:", phase)
model.apply(lambda m: setattr(m, "epoch", epoch))
t_start = time.time()
assert phase in ["train", "val", "test"], "phase not be in train/val/test."
train = phase == "train"
if train:
model.train()
else:
model.eval()
if train and FLAGS.lr_scheduler == "linear_decaying":
if hasattr(FLAGS, "epoch_len"):
linear_decaying_per_step = (
FLAGS.lr / FLAGS.num_epochs / FLAGS.epoch_len * FLAGS.batch_size
)
else:
linear_decaying_per_step = (
FLAGS.lr / FLAGS.num_epochs / len(loader.dataset) * FLAGS.batch_size
)
end = time.time()
for batch_idx, (input, target) in enumerate(loader):
data_time = time.time() - end
input, target = (
input.to(FLAGS.device, non_blocking=True),
target.to(FLAGS.device, non_blocking=True),
)
if train:
############################## Train ################################
if FLAGS.lr_scheduler == "linear_decaying":
for param_group in optimizer.param_groups:
param_group["lr"] -= linear_decaying_per_step
elif FLAGS.lr_scheduler == "cosine":
scheduler.step()
iter += 1
optimizer.zero_grad()
loss = forward_loss(model, criterion, input, target, meters)
loss.backward()
optimizer.step()
else:
############################### VAL #################################
loss = forward_loss(model, criterion, input, target, meters)
batch_time = time.time() - end
end = time.time()
if (batch_idx % 10) == 0:
print(
"Epoch: [{}][{}/{}]\tTime {:.3f}\tData {:.3f}\tLoss {:.3f}\t".format(
epoch, batch_idx, len(loader), batch_time, data_time, loss.item()
)
)
# Log.
writer.add_scalar(phase + "/epoch_time", time.time() - t_start, epoch)
results = flush_scalar_meters(meters)
print(
"{:.1f}s\t{}\t{}/{}: ".format(
time.time() - t_start, phase, epoch, FLAGS.num_epochs
)
+ ", ".join("{}: {:.3f}".format(k, v) for k, v in results.items())
)
for k, v in results.items():
if k != "best_val":
writer.add_scalar(phase + "/" + k, v, epoch)
# Visualize the adjacency matrix.
if hasattr(model.module, "get_weight"):
weights = model.module.get_weight()
if type(weights) is list:
for i, w in enumerate(weights):
w = w.squeeze().t()
nz = (w != 0).float()
nz_grid = vutils.make_grid(nz)
writer.add_image(phase + "/non_zero_{}".format(i), nz_grid, epoch)
else:
w = weights.squeeze().t()
nz = (w != 0).float()
nz_grid = vutils.make_grid(nz)
writer.add_image(phase + "/non_zero", nz_grid, epoch)
if train:
return results, iter
return results
def train_val_test():
global writer
if not os.path.exists(FLAGS.save_dir):
os.mkdir(FLAGS.save_dir)
# Set data_dir.
FLAGS.data_dir = os.environ["DATA_DIR"]
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
FLAGS.device = device
if hasattr(FLAGS, "random_seed"):
seed = FLAGS.random_seed
else:
seed = 0
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
print("=> loading dataset '{}'".format(FLAGS.data))
data = getter("data")()
train_loader = data.train_loader
FLAGS.len_loader = len(train_loader)
val_loader = data.val_loader
criterion = torch.nn.CrossEntropyLoss(reduction="none").to(device)
print("=> creating model '{}'".format(FLAGS.model))
model = getter("model")()
optimizer = get_optimizer(model)
if not FLAGS.evaluate:
model = nn.DataParallel(model)
model = model.to(device)
start_epoch = 0
lr_scheduler = get_lr_scheduler(optimizer)
best_val = 0.0
iter = 0.0
# optionally use the graph of another network
if getattr(FLAGS, "use_graph", False):
assert FLAGS.graph == "fine_tune"
print("=> loading '{}'".format(FLAGS.use_graph))
checkpoint = torch.load(
FLAGS.use_graph, map_location=lambda storage, loc: storage
)
state_dict = checkpoint["model"]
model.load_state_dict(state_dict)
# make a call to get_weight -- this will initialize the masks.
model.module.get_weight()
# optionally use the initialization of another network
if getattr(FLAGS, "use_init", False):
assert hasattr(FLAGS, "use_graph")
assert FLAGS.graph == "fine_tune"
print("=> loading '{}'".format(FLAGS.use_init))
checkpoint = torch.load(
FLAGS.use_init, map_location=lambda storage, loc: storage
)
state_dict = checkpoint["model"]
for k, v in model.state_dict().items():
if k not in state_dict:
state_dict[k] = v
print("inserting {}".format(k))
model.load_state_dict(state_dict)
# optionally resume from a checkpoint
if FLAGS.resume:
if os.path.isfile(FLAGS.resume):
print("=> loading checkpoint '{}'".format(FLAGS.resume))
checkpoint = torch.load(
FLAGS.resume, map_location=lambda storage, loc: storage
)
start_epoch = checkpoint["last_epoch"] + 1
best_val = checkpoint["best_val"]
iter = checkpoint["iter"]
state_dict = checkpoint["model"]
if FLAGS.evaluate:
state_dict = {k[7:]: v for k, v in state_dict.items()}
model.load_state_dict(state_dict)
optimizer.load_state_dict(checkpoint["optimizer"])
print(
"=> loaded checkpoint '{}' (epoch {})".format(
FLAGS.resume, checkpoint["last_epoch"]
)
)
else:
print("=> no checkpoint found at '{}'".format(FLAGS.resume))
torch.backends.cudnn.benchmark = True
# Logging.
start_time = time.time()
local_start_time_str = time.strftime(
"%Y-%m-%d_%H:%M:%S", time.localtime(start_time)
)
if hasattr(FLAGS, "title"):
title = FLAGS.title
else:
title = "-".join(sys.argv[-1].split(":")[-1].split("/"))
if getattr(FLAGS, "log_dir", False):
log_prefix = FLAGS.log_dir
else:
log_prefix = "./runs/"
if getattr(FLAGS, "save_dir", False):
checkpoint_prefix = FLAGS.save_dir
else:
checkpoint_prefix = "./checkpoints"
log_dir = os.path.join(log_prefix, title + "-" + local_start_time_str)
checkpoint_dir = os.path.join(checkpoint_prefix, title.replace("/", "_"))
if not os.path.exists(checkpoint_dir):
os.mkdir(checkpoint_dir)
writer = SummaryWriter(log_dir=log_dir)
train_meters = get_meters("train", model)
val_meters = get_meters("val", model)
val_meters["best_val"] = ScalarMeter("best_val")
if FLAGS.evaluate:
model_profiling(model)
print("Start evaluation.")
if getattr(FLAGS, "fast_eval", False):
model.prepare_for_fast_eval()
model = nn.DataParallel(model)
model = model.to(device)
with torch.no_grad():
results = run_one_epoch(
0,
val_loader,
model,
criterion,
optimizer,
val_meters,
phase="val",
iter=iter,
scheduler=lr_scheduler,
)
return
# save init.
torch.save(
{
"model": model.state_dict(),
"optimizer": optimizer.state_dict(),
"last_epoch": 0,
"best_val": best_val,
"meters": (train_meters, val_meters),
"iter": iter,
},
os.path.join(checkpoint_dir, "init.pt"),
)
print("Start training.")
for epoch in range(start_epoch, FLAGS.num_epochs):
if FLAGS.lr_scheduler != "cosine":
lr_scheduler.step(epoch)
# train
results, iter = run_one_epoch(
epoch,
train_loader,
model,
criterion,
optimizer,
train_meters,
phase="train",
iter=iter,
scheduler=lr_scheduler,
)
# val
val_meters["best_val"].cache(best_val)
with torch.no_grad():
results = run_one_epoch(
epoch,
val_loader,
model,
criterion,
optimizer,
val_meters,
phase="val",
iter=iter,
scheduler=lr_scheduler,
)
if results["top1_accuracy"] > best_val:
best_val = results["top1_accuracy"]
torch.save({"model": model.state_dict()}, os.path.join(log_dir, "best.pt"))
print("New best validation top1 accuracy: {:.3f}".format(best_val))
writer.add_scalar("val/best_val", best_val, epoch)
# save latest checkpoint.
if epoch == 0 or (epoch + 1) % 10 == 0:
torch.save(
{
"model": model.state_dict(),
"optimizer": optimizer.state_dict(),
"last_epoch": epoch,
"best_val": best_val,
"meters": (train_meters, val_meters),
"iter": iter,
},
os.path.join(checkpoint_dir, "epoch_{}.pt".format(epoch)),
)
flops, _ = model_profiling(model.module)
writer.add_scalar("flops/flops", flops, epoch)
return
def main():
"""train and eval model"""
train_val_test()
if __name__ == "__main__":
main()
|
dnw-master
|
train.py
|
import argparse
import os
import shutil
import time
import random
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
try:
from apex.parallel import DistributedDataParallel as DDP
from apex.fp16_utils import *
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")
import image_classification.resnet as models
import image_classification.logger as log
from image_classification.smoothing import LabelSmoothing
from image_classification.mixup import NLLMultiLabelSmooth, MixUpWrapper
from image_classification.dataloaders import *
from image_classification.training import *
from image_classification.utils import *
def add_parser_arguments(parser):
model_names = models.resnet_versions.keys()
model_configs = models.resnet_configs.keys()
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--data-backend', metavar='BACKEND', default='dali-cpu',
choices=DATA_BACKEND_CHOICES)
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet50',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet50)')
parser.add_argument('--model-config', '-c', metavar='CONF', default='classic',
choices=model_configs,
help='model configs: ' +
' | '.join(model_configs) + '(default: classic)')
parser.add_argument('-j', '--workers', default=5, type=int, metavar='N',
help='number of data loading workers (default: 5)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256) per gpu')
parser.add_argument('--optimizer-batch-size', default=-1, type=int,
metavar='N', help='size of a total batch size, for simulating bigger batches')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--lr-schedule', default='step', type=str, metavar='SCHEDULE', choices=['step','linear','cosine'])
parser.add_argument('--warmup', default=0, type=int,
metavar='E', help='number of warmup epochs')
parser.add_argument('--label-smoothing', default=0.0, type=float,
metavar='S', help='label smoothing')
parser.add_argument('--mixup', default=0.0, type=float,
metavar='ALPHA', help='mixup alpha')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--bn-weight-decay', action='store_true',
help='use weight_decay on batch normalization learnable parameters, default: false)')
parser.add_argument('--nesterov', action='store_true',
help='use nesterov momentum, default: false)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--pretrained-weights', default='', type=str, metavar='PATH',
help='load weights from here')
parser.add_argument('--fp16', action='store_true',
help='Run model fp16 mode.')
parser.add_argument('--static-loss-scale', type=float, default=1,
help='Static loss scale, positive power of 2 values can improve fp16 convergence.')
parser.add_argument('--dynamic-loss-scale', action='store_true',
help='Use dynamic loss scaling. If supplied, this argument supersedes ' +
'--static-loss-scale.')
parser.add_argument('--prof', type=int, default=-1,
help='Run only N iterations')
parser.add_argument('--amp', action='store_true',
help='Run model AMP (automatic mixed precision) mode.')
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument('--seed', default=None, type=int,
help='random seed used for np and pytorch')
parser.add_argument('--gather-checkpoints', action='store_true',
help='Gather checkpoints throughout the training')
parser.add_argument('--raport-file', default='experiment_raport.json', type=str,
help='file in which to store JSON experiment raport')
parser.add_argument('--final-weights', default='model.pth.tar', type=str,
help='file in which to store final model weights')
parser.add_argument('--evaluate', action='store_true', help='evaluate checkpoint/model')
parser.add_argument('--training-only', action='store_true', help='do not evaluate')
parser.add_argument('--no-checkpoints', action='store_false', dest='save_checkpoints')
parser.add_argument('--workspace', type=str, default='./')
def main(args):
exp_start_time = time.time()
global best_prec1
best_prec1 = 0
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
args.gpu = 0
args.world_size = 1
if args.distributed:
args.gpu = args.local_rank % torch.cuda.device_count()
torch.cuda.set_device(args.gpu)
dist.init_process_group(backend='nccl', init_method='env://')
args.world_size = torch.distributed.get_world_size()
if args.amp and args.fp16:
print("Please use only one of the --fp16/--amp flags")
exit(1)
if args.seed is not None:
print("Using seed = {}".format(args.seed))
torch.manual_seed(args.seed + args.local_rank)
torch.cuda.manual_seed(args.seed + args.local_rank)
np.random.seed(seed=args.seed + args.local_rank)
random.seed(args.seed + args.local_rank)
def _worker_init_fn(id):
np.random.seed(seed=args.seed + args.local_rank + id)
random.seed(args.seed + args.local_rank + id)
else:
def _worker_init_fn(id):
pass
if args.fp16:
assert torch.backends.cudnn.enabled, "fp16 mode requires cudnn backend to be enabled."
if args.static_loss_scale != 1.0:
if not args.fp16:
print("Warning: if --fp16 is not used, static_loss_scale will be ignored.")
if args.optimizer_batch_size < 0:
batch_size_multiplier = 1
else:
tbs = args.world_size * args.batch_size
if args.optimizer_batch_size % tbs != 0:
print("Warning: simulated batch size {} is not divisible by actual batch size {}".format(args.optimizer_batch_size, tbs))
batch_size_multiplier = int(args.optimizer_batch_size/ tbs)
print("BSM: {}".format(batch_size_multiplier))
pretrained_weights = None
if args.pretrained_weights:
if os.path.isfile(args.pretrained_weights):
print("=> loading pretrained weights from '{}'".format(args.pretrained_weights))
pretrained_weights = torch.load(args.pretrained_weights)
else:
print("=> no pretrained weights found at '{}'".format(args.resume))
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location = lambda storage, loc: storage.cuda(args.gpu))
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model_state = checkpoint['state_dict']
model_state = {k[7:] : v for k, v in model_state.items()}
optimizer_state = checkpoint['optimizer']
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
model_state = None
optimizer_state = None
else:
model_state = None
optimizer_state = None
loss = nn.CrossEntropyLoss
if args.mixup > 0.0:
loss = lambda: NLLMultiLabelSmooth(args.label_smoothing)
elif args.label_smoothing > 0.0:
loss = lambda: LabelSmoothing(args.label_smoothing)
model_and_loss = ModelAndLoss(
(args.arch, args.model_config),
loss,
pretrained_weights=pretrained_weights,
cuda = True, fp16 = args.fp16)
# Create data loaders and optimizers as needed
if args.data_backend == 'pytorch':
get_train_loader = get_pytorch_train_loader
get_val_loader = get_pytorch_val_loader
elif args.data_backend == 'dali-gpu':
get_train_loader = get_dali_train_loader(dali_cpu=False)
get_val_loader = get_dali_val_loader()
elif args.data_backend == 'dali-cpu':
get_train_loader = get_dali_train_loader(dali_cpu=True)
get_val_loader = get_dali_val_loader()
train_loader, train_loader_len = get_train_loader(args.data, args.batch_size, 1000, args.mixup > 0.0, workers=args.workers, fp16=args.fp16)
if args.mixup != 0.0:
train_loader = MixUpWrapper(args.mixup, 1000, train_loader)
val_loader, val_loader_len = get_val_loader(args.data, args.batch_size, 1000, False, workers=args.workers, fp16=args.fp16)
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
logger = log.Logger(
args.print_freq,
[
log.JsonBackend(os.path.join(args.workspace, args.raport_file), log_level=1),
log.StdOut1LBackend(train_loader_len, val_loader_len, args.epochs, log_level=0),
])
for k, v in args.__dict__.items():
logger.log_run_tag(k, v)
else:
logger = None
optimizer = get_optimizer(list(model_and_loss.model.named_parameters()),
args.fp16,
args.lr, args.momentum, args.weight_decay,
nesterov = args.nesterov,
bn_weight_decay = args.bn_weight_decay,
state=optimizer_state,
static_loss_scale = args.static_loss_scale,
dynamic_loss_scale = args.dynamic_loss_scale)
if args.lr_schedule == 'step':
lr_policy = lr_step_policy(args.lr, [30,60,80], 0.1, args.warmup, logger=logger)
elif args.lr_schedule == 'cosine':
lr_policy = lr_cosine_policy(args.lr, args.warmup, args.epochs, logger=logger)
elif args.lr_schedule == 'linear':
lr_policy = lr_linear_policy(args.lr, args.warmup, args.epochs, logger=logger)
if args.amp:
model_and_loss, optimizer = amp.initialize(
model_and_loss, optimizer,
opt_level="O2",
loss_scale="dynamic" if args.dynamic_loss_scale else args.static_loss_scale)
if args.distributed:
model_and_loss.distributed()
model_and_loss.load_model_state(model_state)
print('begininning to train')
train_loop(
model_and_loss, optimizer,
lr_policy,
train_loader, val_loader, args.epochs,
args.fp16, logger, should_backup_checkpoint(args), use_amp=args.amp,
batch_size_multiplier = batch_size_multiplier,
start_epoch = args.start_epoch, best_prec1 = best_prec1, prof=args.prof,
skip_training = args.evaluate, skip_validation = args.training_only,
save_checkpoints=args.save_checkpoints and not args.evaluate, checkpoint_dir=args.workspace)
exp_duration = time.time() - exp_start_time
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
logger.end()
print("Experiment ended")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
add_parser_arguments(parser)
args = parser.parse_args()
cudnn.benchmark = True
main(args)
|
dnw-master
|
imagenet_sparsity_experiments/main.py
|
import sys
import subprocess
import os
import socket
import time
from argparse import ArgumentParser, REMAINDER
import torch
def parse_args():
"""
Helper function parsing the command line options
@retval ArgumentParser
"""
parser = ArgumentParser(description="PyTorch distributed training launch "
"helper utilty that will spawn up "
"multiple distributed processes")
# Optional arguments for the launch helper
parser.add_argument("--nnodes", type=int, default=1,
help="The number of nodes to use for distributed "
"training")
parser.add_argument("--node_rank", type=int, default=0,
help="The rank of the node for multi-node distributed "
"training")
parser.add_argument("--nproc_per_node", type=int, default=1,
help="The number of processes to launch on each node, "
"for GPU training, this is recommended to be set "
"to the number of GPUs in your system so that "
"each process can be bound to a single GPU.")
parser.add_argument("--master_addr", default="127.0.0.1", type=str,
help="Master node (rank 0)'s address, should be either "
"the IP address or the hostname of node 0, for "
"single node multi-proc training, the "
"--master_addr can simply be 127.0.0.1")
parser.add_argument("--master_port", default=29500, type=int,
help="Master node (rank 0)'s free port that needs to "
"be used for communciation during distributed "
"training")
# positional
parser.add_argument("training_script", type=str,
help="The full path to the single GPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script")
# rest from the training program
parser.add_argument('training_script_args', nargs=REMAINDER)
return parser.parse_args()
def main():
args = parse_args()
# world size in terms of number of processes
dist_world_size = args.nproc_per_node * args.nnodes
# set PyTorch distributed related environmental variables
current_env = os.environ.copy()
current_env["MASTER_ADDR"] = args.master_addr
current_env["MASTER_PORT"] = str(args.master_port)
current_env["WORLD_SIZE"] = str(dist_world_size)
processes = []
for local_rank in range(0, args.nproc_per_node):
# each process's rank
dist_rank = args.nproc_per_node * args.node_rank + local_rank
current_env["RANK"] = str(dist_rank)
# spawn the processes
cmd = [sys.executable,
"-u",
args.training_script,
"--local_rank={}".format(local_rank)] + args.training_script_args
print(cmd)
stdout = None if local_rank == 0 else open("GPU_"+str(local_rank)+".log", "w")
process = subprocess.Popen(cmd, env=current_env, stdout=stdout)
processes.append(process)
try:
up = True
error = False
while up and not error:
up = False
for p in processes:
ret = p.poll()
if ret is None:
up = True
elif ret != 0:
error = True
time.sleep(1)
if error:
for p in processes:
if p.poll() is None:
p.terminate()
exit(1)
except KeyboardInterrupt:
for p in processes:
p.terminate()
raise
except SystemExit:
for p in processes:
p.terminate()
raise
except:
for p in processes:
p.terminate()
raise
if __name__ == "__main__":
main()
|
dnw-master
|
imagenet_sparsity_experiments/multiproc.py
|
import torch
import torch.nn as nn
import numpy as np
def mixup(alpha, num_classes, data, target):
with torch.no_grad():
bs = data.size(0)
c = np.random.beta(alpha, alpha)
perm = torch.randperm(bs).cuda()
md = c * data + (1-c) * data[perm, :]
mt = c * target + (1-c) * target[perm, :]
return md, mt
class MixUpWrapper(object):
def __init__(self, alpha, num_classes, dataloader):
self.alpha = alpha
self.dataloader = dataloader
self.num_classes = num_classes
def mixup_loader(self, loader):
for input, target in loader:
i, t = mixup(self.alpha, self.num_classes, input, target)
yield i, t
def __iter__(self):
return self.mixup_loader(self.dataloader)
class NLLMultiLabelSmooth(nn.Module):
def __init__(self, smoothing = 0.0):
super(NLLMultiLabelSmooth, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
def forward(self, x, target):
if self.training:
x = x.float()
target = target.float()
logprobs = torch.nn.functional.log_softmax(x, dim = -1)
nll_loss = -logprobs * target
nll_loss = nll_loss.sum(-1)
smooth_loss = -logprobs.mean(dim=-1)
loss = self.confidence * nll_loss + self.smoothing * smooth_loss
return loss.mean()
else:
return torch.nn.functional.cross_entropy(x, target)
|
dnw-master
|
imagenet_sparsity_experiments/image_classification/mixup.py
|
import os
import torch
import numpy as np
import torchvision.datasets as datasets
import torchvision.transforms as transforms
DATA_BACKEND_CHOICES = ['pytorch']
try:
from nvidia.dali.plugin.pytorch import DALIClassificationIterator
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
DATA_BACKEND_CHOICES.append('dali-gpu')
DATA_BACKEND_CHOICES.append('dali-cpu')
except ImportError:
print("Please install DALI from https://www.github.com/NVIDIA/DALI to run this example.")
class HybridTrainPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, dali_cpu=False):
super(HybridTrainPipe, self).__init__(batch_size, num_threads, device_id, seed = 12 + device_id)
if torch.distributed.is_initialized():
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
local_rank = 0
world_size = 1
self.input = ops.FileReader(
file_root = data_dir,
shard_id = local_rank,
num_shards = world_size,
random_shuffle = True)
if dali_cpu:
dali_device = "cpu"
self.decode = ops.HostDecoderRandomCrop(device=dali_device, output_type=types.RGB,
random_aspect_ratio=[0.75, 4./3.],
random_area=[0.08, 1.0],
num_attempts=100)
else:
dali_device = "gpu"
# This padding sets the size of the internal nvJPEG buffers to be able to handle all images from full-sized ImageNet
# without additional reallocations
self.decode = ops.nvJPEGDecoderRandomCrop(device="mixed", output_type=types.RGB, device_memory_padding=211025920, host_memory_padding=140544512,
random_aspect_ratio=[0.75, 4./3.],
random_area=[0.08, 1.0],
num_attempts=100)
self.res = ops.Resize(device=dali_device, resize_x=crop, resize_y=crop, interp_type=types.INTERP_TRIANGULAR)
self.cmnp = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT,
output_layout = types.NCHW,
crop = (crop, crop),
image_type = types.RGB,
mean = [0.485 * 255,0.456 * 255,0.406 * 255],
std = [0.229 * 255,0.224 * 255,0.225 * 255])
self.coin = ops.CoinFlip(probability = 0.5)
def define_graph(self):
rng = self.coin()
self.jpegs, self.labels = self.input(name = "Reader")
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images.gpu(), mirror = rng)
return [output, self.labels]
class HybridValPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, size):
super(HybridValPipe, self).__init__(batch_size, num_threads, device_id, seed = 12 + device_id)
if torch.distributed.is_initialized():
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
local_rank = 0
world_size = 1
self.input = ops.FileReader(
file_root = data_dir,
shard_id = local_rank,
num_shards = world_size,
random_shuffle = False)
self.decode = ops.nvJPEGDecoder(device = "mixed", output_type = types.RGB)
self.res = ops.Resize(device = "gpu", resize_shorter = size)
self.cmnp = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT,
output_layout = types.NCHW,
crop = (crop, crop),
image_type = types.RGB,
mean = [0.485 * 255,0.456 * 255,0.406 * 255],
std = [0.229 * 255,0.224 * 255,0.225 * 255])
def define_graph(self):
self.jpegs, self.labels = self.input(name = "Reader")
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images)
return [output, self.labels]
class DALIWrapper(object):
def gen_wrapper(dalipipeline, num_classes, one_hot):
for data in dalipipeline:
input = data[0]["data"]
target = data[0]["label"].squeeze().cuda().long()
if one_hot:
target = expand(num_classes, torch.float, target)
yield input, target
dalipipeline.reset()
def __init__(self, dalipipeline, num_classes, one_hot):
self.dalipipeline = dalipipeline
self.num_classes = num_classes
self.one_hot = one_hot
def __iter__(self):
return DALIWrapper.gen_wrapper(self.dalipipeline, self.num_classes, self.one_hot)
def get_dali_train_loader(dali_cpu=False):
def gdtl(data_path, batch_size, num_classes, one_hot, workers=5, _worker_init_fn=None, fp16=False):
if torch.distributed.is_initialized():
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
local_rank = 0
world_size = 1
traindir = os.path.join(data_path, 'train')
pipe = HybridTrainPipe(batch_size=batch_size, num_threads=workers,
device_id = local_rank,
data_dir = traindir, crop = 224, dali_cpu=dali_cpu)
pipe.build()
train_loader = DALIClassificationIterator(pipe, size = int(pipe.epoch_size("Reader") / world_size))
return DALIWrapper(train_loader, num_classes, one_hot), int(pipe.epoch_size("Reader") / (world_size * batch_size))
return gdtl
def get_dali_val_loader():
def gdvl(data_path, batch_size, num_classes, one_hot, workers=5, _worker_init_fn=None, fp16=False):
if torch.distributed.is_initialized():
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
local_rank = 0
world_size = 1
valdir = os.path.join(data_path, 'val')
pipe = HybridValPipe(batch_size=batch_size, num_threads=workers,
device_id = local_rank,
data_dir = valdir,
crop = 224, size = 256)
pipe.build()
val_loader = DALIClassificationIterator(pipe, size = int(pipe.epoch_size("Reader") / world_size))
return DALIWrapper(val_loader, num_classes, one_hot), int(pipe.epoch_size("Reader") / (world_size * batch_size))
return gdvl
def fast_collate(batch):
imgs = [img[0] for img in batch]
targets = torch.tensor([target[1] for target in batch], dtype=torch.int64)
w = imgs[0].size[0]
h = imgs[0].size[1]
tensor = torch.zeros( (len(imgs), 3, h, w), dtype=torch.uint8 )
for i, img in enumerate(imgs):
nump_array = np.asarray(img, dtype=np.uint8)
tens = torch.from_numpy(nump_array)
if(nump_array.ndim < 3):
nump_array = np.expand_dims(nump_array, axis=-1)
nump_array = np.rollaxis(nump_array, 2)
tensor[i] += torch.from_numpy(nump_array)
return tensor, targets
def expand(num_classes, dtype, tensor):
e = torch.zeros(tensor.size(0), num_classes, dtype=dtype, device=torch.device('cuda'))
e = e.scatter(1, tensor.unsqueeze(1), 1.0)
return e
class PrefetchedWrapper(object):
def prefetched_loader(loader, num_classes, fp16, one_hot):
mean = torch.tensor([0.485 * 255, 0.456 * 255, 0.406 * 255]).cuda().view(1,3,1,1)
std = torch.tensor([0.229 * 255, 0.224 * 255, 0.225 * 255]).cuda().view(1,3,1,1)
if fp16:
mean = mean.half()
std = std.half()
stream = torch.cuda.Stream()
first = True
for next_input, next_target in loader:
with torch.cuda.stream(stream):
next_input = next_input.cuda(async=True)
next_target = next_target.cuda(async=True)
if fp16:
next_input = next_input.half()
if one_hot:
next_target = expand(num_classes, torch.half, next_target)
else:
next_input = next_input.float()
if one_hot:
next_target = expand(num_classes, torch.float, next_target)
next_input = next_input.sub_(mean).div_(std)
if not first:
yield input, target
else:
first = False
torch.cuda.current_stream().wait_stream(stream)
input = next_input
target = next_target
yield input, target
def __init__(self, dataloader, num_classes, fp16, one_hot):
self.dataloader = dataloader
self.fp16 = fp16
self.epoch = 0
self.one_hot = one_hot
self.num_classes = num_classes
def __iter__(self):
if (self.dataloader.sampler is not None and
isinstance(self.dataloader.sampler,
torch.utils.data.distributed.DistributedSampler)):
self.dataloader.sampler.set_epoch(self.epoch)
self.epoch += 1
return PrefetchedWrapper.prefetched_loader(self.dataloader, self.num_classes, self.fp16, self.one_hot)
def get_pytorch_train_loader(data_path, batch_size, num_classes, one_hot, workers=5, _worker_init_fn=None, fp16=False):
traindir = os.path.join(data_path, 'train')
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
]))
if torch.distributed.is_initialized():
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, shuffle=(train_sampler is None),
num_workers=workers, worker_init_fn=_worker_init_fn, pin_memory=True, sampler=train_sampler, collate_fn=fast_collate, drop_last=True)
return PrefetchedWrapper(train_loader, num_classes, fp16, one_hot), len(train_loader)
def get_pytorch_val_loader(data_path, batch_size, num_classes, one_hot, workers=5, _worker_init_fn=None, fp16=False):
valdir = os.path.join(data_path, 'val')
val_dataset = datasets.ImageFolder(
valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
]))
if torch.distributed.is_initialized():
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)
else:
val_sampler = None
val_loader = torch.utils.data.DataLoader(
val_dataset,
sampler=val_sampler,
batch_size=batch_size, shuffle=False,
num_workers=workers, worker_init_fn=_worker_init_fn, pin_memory=True,
collate_fn=fast_collate)
return PrefetchedWrapper(val_loader, num_classes, fp16, one_hot), len(val_loader)
|
dnw-master
|
imagenet_sparsity_experiments/image_classification/dataloaders.py
|
from . import logger
from . import dataloaders
from . import training
from . import utils
from . import mixup
from . import resnet
from . import smoothing
|
dnw-master
|
imagenet_sparsity_experiments/image_classification/__init__.py
|
import random
import json
from collections import OrderedDict
class IterationMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.last = 0
def record(self, val, n = 1):
self.last = val
def get_val(self):
return None
def get_last(self):
return self.last
class EpochMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
def record(self, val, n = 1):
self.val = val
def get_val(self):
return self.val
def get_last(self):
return None
class AverageMeter(object):
def __init__(self, ret_last=True, ret_val=True):
self.reset()
self.ret_last = ret_last
self.ret_val = ret_val
def reset(self):
self.n = 0
self.val = 0
self.last = 0
def record(self, val, n = 1):
self.last = val
self.n += n
self.val += val * n
def get_val(self):
if self.ret_val:
if self.n == 0:
return 0.0
return self.val / self.n
else:
return None
def get_last(self):
if self.ret_last:
return self.last
else:
return None
class RunningMeter(object):
def __init__(self, decay):
self.decay = decay
def reset(self):
self.val = 0
self.last = 0
def record(self, val, n = 1):
self.last = val
decay = 1 - ((1 - self.decay) ** n)
self.val = (1 - decay) * self.val + decay * val
def get_val(self):
return self.val
def get_last(self):
return self.last
class Logger(object):
def __init__(self, print_interval, backends, verbose=False):
self.epoch = -1
self.iteration = -1
self.val_iteration = -1
self.metrics = OrderedDict()
self.backends = backends
self.print_interval = print_interval
self.verbose = verbose
def log_run_tag(self, name, val):
for b in self.backends:
b.log_run_tag(name, val)
def register_metric(self, metric_name, meter, log_level=0):
if self.verbose:
print("Registering metric: {}".format(metric_name))
self.metrics[metric_name] = {'meter' : meter, 'level' : log_level}
def log_metric(self, metric_name, val, n=1):
self.metrics[metric_name]['meter'].record(val, n=n)
def start_iteration(self, val=False):
if val:
self.val_iteration += 1
else:
self.iteration += 1
def end_iteration(self, val=False):
it = self.val_iteration if val else self.iteration
if (it % self.print_interval == 0):
for b in self.backends:
if val:
b.log_iteration_metric('val.it', it)
else:
b.log_iteration_metric('it', it)
f = lambda l: filter(lambda m : m['level'] <= b.level)
for n, m in [(n, m) for n, m in self.metrics.items() if m['level'] <= b.level and n.startswith('val') == val]:
mv = m['meter'].get_last()
if mv is not None:
b.log_iteration_metric(n, mv)
b.log_end_iteration()
def start_epoch(self):
self.epoch += 1
self.iteration = 0
self.val_iteration = 0
for b in self.backends:
b.log_epoch_metric('ep', self.epoch)
for n, m in [(n, m) for n, m in self.metrics.items() if m['level'] <= b.level]:
m['meter'].reset()
def end_epoch(self):
for b in self.backends:
for n, m in [(n, m) for n, m in self.metrics.items() if m['level'] <= b.level]:
mv = m['meter'].get_val()
if mv is not None:
b.log_epoch_metric(n, mv)
b.log_end_epoch()
def end(self):
for b in self.backends:
b.end()
def iteration_generator_wrapper(self, gen, val = False):
for g in gen:
self.start_iteration(val = val)
yield g
self.end_iteration(val = val)
def epoch_generator_wrapper(self, gen):
for g in gen:
self.start_epoch()
yield g
self.end_epoch()
class JsonBackend(object):
def __init__(self, filename, log_level=0):
self.level = log_level
self.filename = filename
self.json_log = OrderedDict([
('run' , OrderedDict()),
('epoch', OrderedDict()),
('iter' , OrderedDict()),
('event', OrderedDict()),
])
def log_run_tag(self, name, val):
self.json_log['run'][name] = val
def log_end_epoch(self):
pass
def log_end_iteration(self):
pass
def log_epoch_metric(self, name, val):
if not name in self.json_log['epoch'].keys():
self.json_log['epoch'][name] = []
self.json_log['epoch'][name].append(val)
if name != 'ep':
if name in self.json_log['iter'].keys():
self.json_log['iter'][name].append([])
else:
if not 'it' in self.json_log['iter'].keys():
self.json_log['iter']['it'] = []
self.json_log['iter']['it'].append([])
def log_iteration_metric(self, name, val):
if not (name in self.json_log['iter'].keys()):
self.json_log['iter'][name] = [[]]
self.json_log['iter'][name][-1].append(val)
def end(self):
print(json.dump(self.json_log, open(self.filename, 'w')))
class StdOut1LBackend(object):
def __init__(self, iters, val_iters, epochs, log_level=0):
self.level = log_level
self.iteration = 0
self.total_iterations = iters
self.total_val_iterations = val_iters
self.epoch = 0
self.total_epochs = epochs
self.iteration_metrics = {}
self.epoch_metrics = {}
self.mode = 'train'
def log_run_tag(self, name, val):
print("{} : {}".format(name, val))
def log_end_epoch(self):
print("Summary Epoch: {}/{};\t{}".format(
self.epoch, self.total_epochs,
"\t".join(["{} : {:.3f}".format(m,v) for m, v in self.epoch_metrics.items()])))
self.epoch_metrics = {}
def log_end_iteration(self):
md = "Validation" if self.mode == 'val' else ""
ti = self.total_val_iterations if self.mode == 'val' else self.total_iterations
print("Epoch: {}/{} {} Iteration: {}/{};\t{}".format(
self.epoch, self.total_epochs, md, self.iteration, ti,
"\t".join(["{} : {:.3f}".format(m,v) for m, v in self.iteration_metrics.items()])))
self.iteration_metrics = {}
def log_epoch_metric(self, name, value):
if name == 'ep':
self.epoch = value
self.iteration = 0
else:
self.epoch_metrics[name] = value
def log_iteration_metric(self, name, value):
if name == 'it' or name == 'val.it':
self.mode = 'train' if name == 'it' else 'val'
self.iteration = value
else:
self.iteration_metrics[name] = value
def end(self):
pass
class StdOutBackend(object):
def __init__(self, iters, epochs, log_level=0):
self.level = log_level
self.iteration = 0
self.epoch = 0
def log_run_tag(self, name, val):
print("{} : {}".format(name, val))
def log_end_epoch(self):
pass
def log_end_iteration(self):
pass
def log_epoch_metric(self, name, value):
if name == 'ep':
self.epoch = value
self.iteration = 0
else:
print("Summary Epoch: {}; {} = {:.3f}".format(self.epoch, name, value))
def log_iteration_metric(self, name, value):
if name == 'it' or name == 'val.it':
self.iteration = value
else:
print("Epoch: {} Iteration: {}; {} = {:.3f}".format(self.epoch, self.iteration, name, value))
def end(self):
pass
|
dnw-master
|
imagenet_sparsity_experiments/image_classification/logger.py
|
import math
import torch
import torch.nn as nn
import numpy as np
from .sparse_util import SparseConv, TDConv
__all__ = ['ResNet', 'build_resnet', 'resnet_versions', 'resnet_configs']
# ResNetBuilder {{{
class ResNetBuilder(object):
def __init__(self, version, config):
self.config = config
self.L = sum(version['layers'])
self.M = version['block'].M
# make everything sparse_conv
def conv(self, kernel_size, in_planes, out_planes, stride=1, first_conv=False):
if kernel_size == 3:
conv = self.config['conv'](
in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
elif kernel_size == 1:
conv = self.config['conv'](in_planes, out_planes, kernel_size=1, stride=stride,
bias=False)
elif kernel_size == 5:
conv = self.config['conv'](in_planes, out_planes, kernel_size=5, stride=stride,
padding=2, bias=False)
elif kernel_size == 7:
conv = self.config['conv'](in_planes, out_planes, kernel_size=7, stride=stride,
padding=3, bias=False)
else:
return None
if self.config['nonlinearity'] == 'relu':
nn.init.kaiming_normal_(conv.weight,
mode=self.config['conv_init'],
nonlinearity=self.config['nonlinearity'])
if 'prune_rate' in self.config:
prune_rate = self.config['prune_rate']
if 'ignore_first_conv' in self.config and first_conv:
prune_rate = 0.0
conv.set_prune_rate(prune_rate)
return conv
def conv3x3(self, in_planes, out_planes, stride=1, first_conv=False):
"""3x3 convolution with padding"""
c = self.conv(3, in_planes, out_planes, stride=stride, first_conv=first_conv)
return c
def conv1x1(self, in_planes, out_planes, stride=1, first_conv=False):
"""1x1 convolution with padding"""
c = self.conv(1, in_planes, out_planes, stride=stride, first_conv=first_conv)
return c
def conv7x7(self, in_planes, out_planes, stride=1, first_conv=False):
"""7x7 convolution with padding"""
c = self.conv(7, in_planes, out_planes, stride=stride, first_conv=first_conv)
return c
def conv5x5(self, in_planes, out_planes, stride=1, first_conv=False):
"""5x5 convolution with padding"""
c = self.conv(5, in_planes, out_planes, stride=stride, first_conv=first_conv)
return c
def batchnorm(self, planes, last_bn=False):
bn = nn.BatchNorm2d(planes)
gamma_init_val = 0 if last_bn and self.config['last_bn_0_init'] else 1
nn.init.constant_(bn.weight, gamma_init_val)
nn.init.constant_(bn.bias, 0)
return bn
def activation(self):
return self.config['activation']()
# ResNetBuilder }}}
# BasicBlock {{{
class BasicBlock(nn.Module):
M = 2
expansion = 1
def __init__(self, builder, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = builder.conv3x3(inplanes, planes, stride)
self.bn1 = builder.batchnorm(planes)
self.relu = builder.activation()
self.conv2 = builder.conv3x3(planes, planes)
self.bn2 = builder.batchnorm(planes, last_bn=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
if self.bn1 is not None:
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
if self.bn2 is not None:
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
# BasicBlock }}}
# Bottleneck {{{
class Bottleneck(nn.Module):
M = 3
expansion = 4
def __init__(self, builder, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = builder.conv1x1(inplanes, planes)
self.bn1 = builder.batchnorm(planes)
self.conv2 = builder.conv3x3(planes, planes, stride=stride)
self.bn2 = builder.batchnorm(planes)
self.conv3 = builder.conv1x1(planes, planes * self.expansion)
self.bn3 = builder.batchnorm(planes * self.expansion, last_bn=True)
self.relu = builder.activation()
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
# Bottleneck }}}
# ResNet {{{
class ResNet(nn.Module):
def __init__(self, builder, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = builder.conv7x7(3, 64, stride=2, first_conv=True)
self.bn1 = builder.batchnorm(64)
self.relu = builder.activation()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(builder, block, 64, layers[0])
self.layer2 = self._make_layer(builder, block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(builder, block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(builder, block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = builder.conv1x1(512 * block.expansion, num_classes)
#self.fc = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, builder, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
dconv = builder.conv1x1(self.inplanes, planes * block.expansion,
stride=stride)
dbn = builder.batchnorm(planes * block.expansion)
if dbn is not None:
downsample = nn.Sequential(dconv, dbn)
else:
downsample = dconv
layers = []
layers.append(block(builder, self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(builder, self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
if self.bn1 is not None:
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
#x = x.view(x.size(0), -1)
x = self.fc(x)
x = x.view(x.size(0), -1)
return x
# ResNet }}}
resnet_configs = {
'classic' : {
'conv' : nn.Conv2d,
'conv_init' : 'fan_out',
'nonlinearity' : 'relu',
'last_bn_0_init' : False,
'activation' : lambda: nn.ReLU(inplace=True),
},
'fanin' : {
'conv' : nn.Conv2d,
'conv_init' : 'fan_in',
'nonlinearity' : 'relu',
'last_bn_0_init' : False,
'activation' : lambda: nn.ReLU(inplace=True),
},
'dense': {
'conv': nn.Conv2d,
'conv_init': 'fan_in',
'nonlinearity': 'relu',
'last_bn_0_init': False,
'activation': lambda: nn.ReLU(inplace=True),
},
'allsparse10': {
'conv': SparseConv,
'conv_init': 'fan_in',
'nonlinearity': 'relu',
'last_bn_0_init': False,
'activation': lambda: nn.ReLU(inplace=True),
'prune_rate': 0.9,
},
'allsparse20': {
'conv': SparseConv,
'conv_init': 'fan_in',
'nonlinearity': 'relu',
'last_bn_0_init': False,
'activation': lambda: nn.ReLU(inplace=True),
'prune_rate': 0.8,
},
'allsparse30': {
'conv': SparseConv,
'conv_init': 'fan_in',
'nonlinearity': 'relu',
'last_bn_0_init': False,
'activation': lambda: nn.ReLU(inplace=True),
'prune_rate': 0.7,
},
'allsparse40': {
'conv': SparseConv,
'conv_init': 'fan_in',
'nonlinearity': 'relu',
'last_bn_0_init': False,
'activation': lambda: nn.ReLU(inplace=True),
'prune_rate': 0.6,
},
'allsparse50': {
'conv': SparseConv,
'conv_init': 'fan_in',
'nonlinearity': 'relu',
'last_bn_0_init': False,
'activation': lambda: nn.ReLU(inplace=True),
'prune_rate': 0.5,
},
'allsparse60': {
'conv': SparseConv,
'conv_init': 'fan_in',
'nonlinearity': 'relu',
'last_bn_0_init': False,
'activation': lambda: nn.ReLU(inplace=True),
'prune_rate': 0.4,
},
'allsparse70': {
'conv': SparseConv,
'conv_init': 'fan_in',
'nonlinearity': 'relu',
'last_bn_0_init': False,
'activation': lambda: nn.ReLU(inplace=True),
'prune_rate': 0.3,
},
'allsparse80': {
'conv': SparseConv,
'conv_init': 'fan_in',
'nonlinearity': 'relu',
'last_bn_0_init': False,
'activation': lambda: nn.ReLU(inplace=True),
'prune_rate': 0.2,
},
'allsparse90': {
'conv': SparseConv,
'conv_init': 'fan_in',
'nonlinearity': 'relu',
'last_bn_0_init': False,
'activation': lambda: nn.ReLU(inplace=True),
'prune_rate': 0.1,
},
'ignorefirst10': {
'conv': SparseConv,
'conv_init': 'fan_in',
'nonlinearity': 'relu',
'last_bn_0_init': False,
'activation': lambda: nn.ReLU(inplace=True),
'prune_rate': 0.9,
'ignore_first_conv': True,
},
'ignorefirst20': {
'conv': SparseConv,
'conv_init': 'fan_in',
'nonlinearity': 'relu',
'last_bn_0_init': False,
'activation': lambda: nn.ReLU(inplace=True),
'prune_rate': 0.8,
'ignore_first_conv': True,
},
'ignorefirst30': {
'conv': SparseConv,
'conv_init': 'fan_in',
'nonlinearity': 'relu',
'last_bn_0_init': False,
'activation': lambda: nn.ReLU(inplace=True),
'prune_rate': 0.7,
'ignore_first_conv': True,
},
'ignorefirst40': {
'conv': SparseConv,
'conv_init': 'fan_in',
'nonlinearity': 'relu',
'last_bn_0_init': False,
'activation': lambda: nn.ReLU(inplace=True),
'prune_rate': 0.6,
'ignore_first_conv': True,
},
'ignorefirst50': {
'conv': SparseConv,
'conv_init': 'fan_in',
'nonlinearity': 'relu',
'last_bn_0_init': False,
'activation': lambda: nn.ReLU(inplace=True),
'prune_rate': 0.5,
'ignore_first_conv': True,
},
'ignorefirst60': {
'conv': SparseConv,
'conv_init': 'fan_in',
'nonlinearity': 'relu',
'last_bn_0_init': False,
'activation': lambda: nn.ReLU(inplace=True),
'prune_rate': 0.4,
'ignore_first_conv': True,
},
'ignorefirst70': {
'conv': SparseConv,
'conv_init': 'fan_in',
'nonlinearity': 'relu',
'last_bn_0_init': False,
'activation': lambda: nn.ReLU(inplace=True),
'prune_rate': 0.3,
'ignore_first_conv': True,
},
'ignorefirst80': {
'conv': SparseConv,
'conv_init': 'fan_in',
'nonlinearity': 'relu',
'last_bn_0_init': False,
'activation': lambda: nn.ReLU(inplace=True),
'prune_rate': 0.2,
'ignore_first_conv': True,
},
'ignorefirst90': {
'conv': SparseConv,
'conv_init': 'fan_in',
'nonlinearity': 'relu',
'last_bn_0_init': False,
'activation': lambda: nn.ReLU(inplace=True),
'prune_rate': 0.1,
'ignore_first_conv': True,
},
'tdsparse10': {
'conv': TDConv,
'conv_init': 'fan_in',
'nonlinearity': 'relu',
'last_bn_0_init': False,
'activation': lambda: nn.ReLU(inplace=True),
'prune_rate': 0.9,
},
}
resnet_versions = {
'resnet18' : {
'net' : ResNet,
'block' : BasicBlock,
'layers' : [2, 2, 2, 2],
'num_classes' : 1000,
},
'resnet34' : {
'net' : ResNet,
'block' : BasicBlock,
'layers' : [3, 4, 6, 3],
'num_classes' : 1000,
},
'resnet50' : {
'net' : ResNet,
'block' : Bottleneck,
'layers' : [3, 4, 6, 3],
'num_classes' : 1000,
},
'resnet101' : {
'net' : ResNet,
'block' : Bottleneck,
'layers' : [3, 4, 23, 3],
'num_classes' : 1000,
},
'resnet152' : {
'net' : ResNet,
'block' : Bottleneck,
'layers' : [3, 8, 36, 3],
'num_classes' : 1000,
},
}
def build_resnet(version, config, model_state=None):
version = resnet_versions[version]
config = resnet_configs[config]
builder = ResNetBuilder(version, config)
print("Version: {}".format(version))
print("Config: {}".format(config))
model = version['net'](builder,
version['block'],
version['layers'],
version['num_classes'])
return model
|
dnw-master
|
imagenet_sparsity_experiments/image_classification/resnet.py
|
import os
import numpy as np
import torch
import shutil
import torch.distributed as dist
def should_backup_checkpoint(args):
def _sbc(epoch):
return args.gather_checkpoints and (epoch < 10 or epoch % 10 == 0)
return _sbc
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar', checkpoint_dir='./', backup_filename=None):
if (not torch.distributed.is_initialized()) or torch.distributed.get_rank() == 0:
filename = os.path.join(checkpoint_dir, filename)
print("SAVING {}".format(filename))
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, os.path.join(checkpoint_dir, 'model_best.pth.tar'))
if backup_filename is not None:
shutil.copyfile(filename, os.path.join(checkpoint_dir, backup_filename))
def timed_generator(gen):
start = time.time()
for g in gen:
end = time.time()
t = end - start
yield g, t
start = time.time()
def timed_function(f):
def _timed_function(*args, **kwargs):
start = time.time()
ret = f(*args, **kwargs)
return ret, time.time() - start
return _timed_function
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def reduce_tensor(tensor):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= torch.distributed.get_world_size() if torch.distributed.is_initialized() else 1
return rt
|
dnw-master
|
imagenet_sparsity_experiments/image_classification/utils.py
|
import torch
import torch.nn as nn
class LabelSmoothing(nn.Module):
"""
NLL loss with label smoothing.
"""
def __init__(self, smoothing=0.0):
"""
Constructor for the LabelSmoothing module.
:param smoothing: label smoothing factor
"""
super(LabelSmoothing, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
def forward(self, x, target):
logprobs = torch.nn.functional.log_softmax(x, dim=-1)
nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
nll_loss = nll_loss.squeeze(1)
smooth_loss = -logprobs.mean(dim=-1)
loss = self.confidence * nll_loss + self.smoothing * smooth_loss
return loss.mean()
|
dnw-master
|
imagenet_sparsity_experiments/image_classification/smoothing.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import autograd
class ChooseTopEdges(autograd.Function):
""" Chooses the top edges for the forwards pass but allows gradient flow to all edges in the backwards pass"""
@staticmethod
def forward(ctx, weight, prune_rate):
output = weight.clone()
_, idx = weight.flatten().abs().sort()
p = int(prune_rate * weight.numel())
flat_oup = output.flatten()
flat_oup[idx[:p]] = 0
return output
@staticmethod
def backward(ctx, grad_output):
return grad_output, None, None
class SparseConv(nn.Conv2d):
def set_prune_rate(self, prune_rate):
self.prune_rate = prune_rate
print('prune_rate_{}'.format(self.prune_rate))
def get_weight(self):
return ChooseTopEdges.apply(self.weight, self.prune_rate)
def forward(self, x):
w = self.get_weight()
x = F.conv2d(
x, w, self.bias, self.stride, self.padding, self.dilation, self.groups
)
return x
class TDConv(nn.Conv2d):
def set_prune_rate(self, prune_rate):
self.prune_rate = prune_rate
self.rho = prune_rate
print('td prune_rate_{}'.format(self.prune_rate))
def get_weight(self):
w = self.weight
shape = w.size()
w_flat = w.flatten().abs()
length = w_flat.size(0)
dropout_mask = torch.zeros_like(w_flat)
_, idx = w_flat.sort()
dropout_mask[idx[: int(length * self.prune_rate)]] = 1
if self.training:
dropout_mask = (F.dropout(dropout_mask, p=1 - self.rho) > 0).float()
w_flat = (1 - dropout_mask.detach()) * w_flat
return w_flat.view(*shape)
def forward(self, x):
w = self.get_weight()
x = F.conv2d(
x, w, self.bias, self.stride, self.padding, self.dilation, self.groups
)
return x
|
dnw-master
|
imagenet_sparsity_experiments/image_classification/sparse_util.py
|
import os
import time
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from . import logger as log
from . import resnet as models
from . import utils
try:
from apex.parallel import DistributedDataParallel as DDP
from apex.fp16_utils import *
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")
class ModelAndLoss(nn.Module):
def __init__(self, arch, loss, pretrained_weights=None, cuda=True, fp16=False):
super(ModelAndLoss, self).__init__()
self.arch = arch
print("=> creating model '{}'".format(arch))
model = models.build_resnet(arch[0], arch[1])
if pretrained_weights is not None:
print("=> using pre-trained model from a file '{}'".format(arch))
model.load_state_dict(pretrained_weights)
if cuda:
model = model.cuda()
if fp16:
model = network_to_half(model)
# define loss function (criterion) and optimizer
criterion = loss()
if cuda:
criterion = criterion.cuda()
self.model = model
self.loss = criterion
def forward(self, data, target):
output = self.model(data)
loss = self.loss(output, target)
return loss, output
def distributed(self):
self.model = DDP(self.model)
def load_model_state(self, state):
if not state is None:
self.model.load_state_dict(state)
def get_optimizer(parameters, fp16, lr, momentum, weight_decay,
nesterov=False,
state=None,
static_loss_scale=1., dynamic_loss_scale=False,
bn_weight_decay = False):
if bn_weight_decay:
print(" ! Weight decay applied to BN parameters ")
optimizer = torch.optim.SGD([v for n, v in parameters], lr,
momentum=momentum,
weight_decay=weight_decay,
nesterov = nesterov)
else:
print(" ! Weight decay NOT applied to BN parameters ")
bn_params = [v for n, v in parameters if 'bn' in n]
rest_params = [v for n, v in parameters if not 'bn' in n]
print(len(bn_params))
print(len(rest_params))
optimizer = torch.optim.SGD([{'params': bn_params, 'weight_decay' : 0},
{'params': rest_params, 'weight_decay' : weight_decay}],
lr,
momentum=momentum,
weight_decay=weight_decay,
nesterov = nesterov)
if fp16:
optimizer = FP16_Optimizer(optimizer,
static_loss_scale=static_loss_scale,
dynamic_loss_scale=dynamic_loss_scale,
verbose=False)
if not state is None:
optimizer.load_state_dict(state)
return optimizer
def lr_policy(lr_fn, logger=None):
if logger is not None:
logger.register_metric('lr', log.IterationMeter(), log_level=1)
def _alr(optimizer, iteration, epoch):
lr = lr_fn(iteration, epoch)
if logger is not None:
logger.log_metric('lr', lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return _alr
def lr_step_policy(base_lr, steps, decay_factor, warmup_length, logger=None):
def _lr_fn(iteration, epoch):
if epoch < warmup_length:
lr = base_lr * (epoch + 1) / warmup_length
else:
lr = base_lr
for s in steps:
if epoch >= s:
lr *= decay_factor
return lr
return lr_policy(_lr_fn, logger=logger)
def lr_linear_policy(base_lr, warmup_length, epochs, logger=None):
def _lr_fn(iteration, epoch):
if epoch < warmup_length:
lr = base_lr * (epoch + 1) / warmup_length
else:
e = epoch - warmup_length
es = epochs - warmup_length
lr = base_lr * (1-(e/es))
return lr
return lr_policy(_lr_fn, logger=logger)
def lr_cosine_policy(base_lr, warmup_length, epochs, logger=None):
def _lr_fn(iteration, epoch):
if epoch < warmup_length:
lr = base_lr * (epoch + 1) / warmup_length
else:
e = epoch - warmup_length
es = epochs - warmup_length
lr = 0.5 * (1 + np.cos(np.pi * e / es)) * base_lr
return lr
return lr_policy(_lr_fn, logger=logger)
def lr_exponential_policy(base_lr, warmup_length, epochs, final_multiplier=0.001, logger=None):
es = epochs - warmup_length
epoch_decay = np.power(2, np.log2(final_multiplier)/es)
def _lr_fn(iteration, epoch):
if epoch < warmup_length:
lr = base_lr * (epoch + 1) / warmup_length
else:
e = epoch - warmup_length
lr = base_lr * (epoch_decay ** e)
return lr
return lr_policy(_lr_fn, logger=logger)
def get_train_step(model_and_loss, optimizer, fp16, use_amp = False, batch_size_multiplier = 1):
def _step(input, target, optimizer_step = True):
input_var = Variable(input)
target_var = Variable(target)
loss, output = model_and_loss(input_var, target_var)
prec1, prec5 = torch.zeros(1), torch.zeros(1) #utils.accuracy(output.data, target, topk=(1, 5))
if torch.distributed.is_initialized():
reduced_loss = utils.reduce_tensor(loss.data)
#prec1 = reduce_tensor(prec1)
#prec5 = reduce_tensor(prec5)
else:
reduced_loss = loss.data
if fp16:
optimizer.backward(loss)
elif use_amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if optimizer_step:
opt = optimizer.optimizer if isinstance(optimizer, FP16_Optimizer) else optimizer
for param_group in opt.param_groups:
for param in param_group['params']:
param.grad /= batch_size_multiplier
optimizer.step()
optimizer.zero_grad()
torch.cuda.synchronize()
return reduced_loss, prec1, prec5
return _step
def train(train_loader, model_and_loss, optimizer, lr_scheduler, fp16, logger, epoch, use_amp=False, prof=-1, batch_size_multiplier=1, register_metrics=True):
if register_metrics and logger is not None:
logger.register_metric('train.top1', log.AverageMeter(), log_level = 0)
logger.register_metric('train.top5', log.AverageMeter(), log_level = 0)
logger.register_metric('train.loss', log.AverageMeter(), log_level = 0)
logger.register_metric('train.compute_ips', log.AverageMeter(), log_level=1)
logger.register_metric('train.total_ips', log.AverageMeter(), log_level=1)
logger.register_metric('train.data_time', log.AverageMeter(), log_level=1)
logger.register_metric('train.compute_time', log.AverageMeter(), log_level=1)
step = get_train_step(model_and_loss, optimizer, fp16, use_amp = use_amp, batch_size_multiplier = batch_size_multiplier)
model_and_loss.train()
end = time.time()
optimizer.zero_grad()
data_iter = enumerate(train_loader)
if logger is not None:
data_iter = logger.iteration_generator_wrapper(data_iter)
for i, (input, target) in data_iter:
bs = input.size(0)
lr_scheduler(optimizer, i, epoch)
data_time = time.time() - end
if prof > 0:
if i >= prof:
break
optimizer_step = ((i + 1) % batch_size_multiplier) == 0
loss, prec1, prec5 = step(input, target, optimizer_step = optimizer_step)
it_time = time.time() - end
if logger is not None:
logger.log_metric('train.top1', to_python_float(prec1))
logger.log_metric('train.top5', to_python_float(prec5))
logger.log_metric('train.loss', to_python_float(loss))
logger.log_metric('train.compute_ips', calc_ips(bs, it_time - data_time))
logger.log_metric('train.total_ips', calc_ips(bs, it_time))
logger.log_metric('train.data_time', data_time)
logger.log_metric('train.compute_time', it_time - data_time)
end = time.time()
def get_val_step(model_and_loss):
def _step(input, target):
input_var = Variable(input)
target_var = Variable(target)
with torch.no_grad():
loss, output = model_and_loss(input_var, target_var)
prec1, prec5 = utils.accuracy(output.data, target, topk=(1, 5))
if torch.distributed.is_initialized():
reduced_loss = utils.reduce_tensor(loss.data)
prec1 = utils.reduce_tensor(prec1)
prec5 = utils.reduce_tensor(prec5)
else:
reduced_loss = loss.data
torch.cuda.synchronize()
return reduced_loss, prec1, prec5
return _step
def validate(val_loader, model_and_loss, fp16, logger, epoch, prof=-1, register_metrics=True):
if register_metrics and logger is not None:
logger.register_metric('val.top1', log.AverageMeter(), log_level = 0)
logger.register_metric('val.top5', log.AverageMeter(), log_level = 0)
logger.register_metric('val.loss', log.AverageMeter(), log_level = 0)
logger.register_metric('val.compute_ips', log.AverageMeter(), log_level = 1)
logger.register_metric('val.total_ips', log.AverageMeter(), log_level = 1)
logger.register_metric('val.data_time', log.AverageMeter(), log_level = 1)
logger.register_metric('val.compute_time', log.AverageMeter(), log_level = 1)
step = get_val_step(model_and_loss)
top1 = log.AverageMeter()
# switch to evaluate mode
model_and_loss.eval()
end = time.time()
data_iter = enumerate(val_loader)
if not logger is None:
data_iter = logger.iteration_generator_wrapper(data_iter, val=True)
for i, (input, target) in data_iter:
bs = input.size(0)
data_time = time.time() - end
if prof > 0:
if i > prof:
break
loss, prec1, prec5 = step(input, target)
it_time = time.time() - end
top1.record(to_python_float(prec1), bs)
if logger is not None:
logger.log_metric('val.top1', to_python_float(prec1), n=bs)
logger.log_metric('val.top5', to_python_float(prec5), n=bs)
logger.log_metric('val.loss', to_python_float(loss), n=bs)
logger.log_metric('val.compute_ips', calc_ips(bs, it_time - data_time))
logger.log_metric('val.total_ips', calc_ips(bs, it_time))
logger.log_metric('val.data_time', data_time)
logger.log_metric('val.compute_time', it_time - data_time)
end = time.time()
return top1.get_val()
# Train loop {{{
def calc_ips(batch_size, time):
world_size = torch.distributed.get_world_size() if torch.distributed.is_initialized() else 1
tbs = world_size * batch_size
return tbs/time
def train_loop(model_and_loss, optimizer, lr_scheduler, train_loader, val_loader, epochs, fp16, logger,
should_backup_checkpoint, use_amp=False,
batch_size_multiplier = 1,
best_prec1 = 0, start_epoch = 0, prof = -1, skip_training = False, skip_validation = False, save_checkpoints = True, checkpoint_dir='./'):
prec1 = -1
# TODO: is this needed?
if skip_training:
epochs = start_epoch + 1
epoch_iter = range(start_epoch, epochs)
if logger is not None:
epoch_iter = logger.epoch_generator_wrapper(epoch_iter)
for epoch in epoch_iter:
if not skip_training:
train(train_loader, model_and_loss, optimizer, lr_scheduler, fp16, logger, epoch, use_amp = use_amp, prof = prof, register_metrics=epoch==start_epoch, batch_size_multiplier=batch_size_multiplier)
if not skip_validation:
prec1 = validate(val_loader, model_and_loss, fp16, logger, epoch, prof = prof, register_metrics=epoch==start_epoch)
if save_checkpoints and (not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0):
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
if should_backup_checkpoint(epoch):
backup_filename = 'checkpoint-{}.pth.tar'.format(epoch + 1)
else:
backup_filename = None
utils.save_checkpoint({
'epoch': epoch + 1,
'arch': model_and_loss.arch,
'state_dict': model_and_loss.model.state_dict(),
'best_prec1': best_prec1,
'optimizer' : optimizer.state_dict(),
}, is_best, checkpoint_dir=checkpoint_dir, backup_filename=backup_filename)
# }}}
|
dnw-master
|
imagenet_sparsity_experiments/image_classification/training.py
|
dnw-master
|
models/__init__.py
|
|
"""
MobileNet in PyTorch.
Borrowed from https://github.com/kuangliu/pytorch-cifar/blob/master/models/mobilenet.py
"""
import torch.nn as nn
import torch.nn.functional as F
from genutil.config import FLAGS
class Block(nn.Module):
"""Depthwise conv + Pointwise conv"""
def __init__(self, in_planes, out_planes, stride=1):
super(Block, self).__init__()
self.conv1 = nn.Conv2d(
in_planes,
in_planes,
kernel_size=3,
stride=stride,
padding=1,
groups=in_planes,
bias=False,
)
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv2 = nn.Conv2d(
in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False
)
self.bn2 = nn.BatchNorm2d(out_planes)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
return out
class MobileNetV1CIFAR(nn.Module):
cfg = [
64,
(128, 2),
128,
(256, 2),
256,
(512, 2),
512,
512,
512,
512,
512,
(1024, 2),
1024,
]
def __init__(self, num_classes=FLAGS.output_size):
super(MobileNetV1CIFAR, self).__init__()
self.width_mult = 1 if not hasattr(FLAGS, 'width_mult') else FLAGS.width_mult
self.conv1 = nn.Conv2d(3, int(32 * self.width_mult), kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(int(32 * self.width_mult))
self.layers = self._make_layers(in_planes=int(32 * self.width_mult))
self.linear = nn.Linear(int(1024 * self.width_mult), num_classes)
def _make_layers(self, in_planes):
layers = []
for x in self.cfg:
out_planes = x if isinstance(x, int) else x[0]
out_planes = int(self.width_mult * out_planes)
stride = 1 if isinstance(x, int) else x[1]
layers.append(Block(in_planes, out_planes, stride))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layers(out)
out = out.mean(dim=[2,3])
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
|
dnw-master
|
models/basic/mobilenetv1cifar.py
|
dnw-master
|
models/basic/__init__.py
|
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import autograd
from genutil.config import FLAGS
def get_conv(inp, oup):
return nn.Conv2d(
inp, oup, kernel_size=3, stride=1, padding=1, bias=False, groups=inp
)
########################################################################################################################
# Graph Superclass #
########################################################################################################################
class Graph(nn.Conv2d):
def __init__(self, prune_rate, dim_in, dim_out):
super(Graph, self).__init__(dim_in, dim_out, kernel_size=1, bias=False)
self.prune_rate = prune_rate
def get_weight(self):
return self.weight
def forward(self, x):
w = self.get_weight()
x = F.conv2d(
x, w, self.bias, self.stride, self.padding, self.dilation, self.groups
)
return x
########################################################################################################################
# Random Graph #
########################################################################################################################
class RandomGraph(Graph):
""" Creates a random neural graph. in_channels and out_channels must only be specified in the static case. """
def __init__(self, prune_rate, dim_in, dim_out, in_channels, out_channels):
super().__init__(prune_rate, dim_in, dim_out)
mask = torch.rand(self.weight.size())
if FLAGS.setting == "static" and in_channels is not None:
r = in_channels
i = 1
while r < self.weight.size(1):
mask[: i * out_channels, r : r + out_channels] = 0.0
r = r + out_channels
i = i + 1
flat_mask = mask.flatten()
_, idx = flat_mask.abs().sort()
flat_mask[idx[: int(prune_rate * flat_mask.size(0))]] = 0.0
flat_mask[flat_mask > 0] = 1.0
mask = flat_mask.view(*self.weight.size())
self.register_buffer("mask", mask)
def get_weight(self):
w = self.mask * self.weight
return w
########################################################################################################################
# Grpah learned by DNW #
########################################################################################################################
class ChooseTopEdges(autograd.Function):
""" Chooses the top edges for the forwards pass but allows gradient flow to all edges in the backwards pass"""
@staticmethod
def forward(ctx, weight, prune_rate):
output = weight.clone()
_, idx = weight.flatten().abs().sort()
p = int(prune_rate * weight.numel())
flat_oup = output.flatten()
flat_oup[idx[:p]] = 0
return output
@staticmethod
def backward(ctx, grad_output):
return grad_output, None, None
class DNW(Graph):
def __init__(self, prune_rate, dim_in, dim_out):
super().__init__(prune_rate, dim_in, dim_out)
def get_weight(self):
return ChooseTopEdges.apply(self.weight, self.prune_rate)
########################################################################################################################
# DNW without an update rule on the backwards pass #
########################################################################################################################
class DNWNoUpdate(Graph):
def __init__(self, prune_rate, dim_in, dim_out, in_channels, out_channels):
super().__init__(prune_rate, dim_in, dim_out)
mask = torch.rand((dim_out, dim_in, 1, 1))
if FLAGS.setting == "static" and in_channels is not None:
r = in_channels
i = 1
while r < in_channels:
mask[: i * out_channels, r : r + out_channels] = 0.0
r = r + out_channels
i = i + 1
flat_mask = mask.flatten()
flat_mask[flat_mask != 0] = 1.0
mask = flat_mask.view(dim_out, dim_in, 1, 1)
self.register_buffer("mask", mask)
def get_weight(self):
weight = self.weight * self.mask
output = weight.clone()
_, idx = weight.flatten().abs().sort()
p = int(self.prune_rate * weight.numel())
flat_oup = output.flatten()
flat_oup[idx[:p]] = 0
return output
########################################################################################################################
# Fine Tune with a fixed structure after training #
########################################################################################################################
class FineTune(Graph):
def __init__(self, prune_rate, dim_in, dim_out):
super().__init__(prune_rate, dim_in, dim_out)
self.register_buffer("mask", None)
def get_weight(self):
if self.mask is None:
with torch.no_grad():
flat_mask = self.weight.clone().flatten()
_, idx = flat_mask.abs().sort()
flat_mask[idx[: int(self.prune_rate * flat_mask.size(0))]] = 0.0
flat_mask[flat_mask != 0] = 1.0
print("Initializing Mask")
self.mask = flat_mask.view(*self.weight.size())
return self.mask * self.weight
########################################################################################################################
# Regular Targeted Dropout #
########################################################################################################################
class RegularTargetedDropout(Graph):
def __init__(self, prune_rate, dim_in, dim_out):
super().__init__(prune_rate, dim_in, dim_out)
if FLAGS.rho == "gamma":
self.rho = prune_rate
else:
self.rho = FLAGS.rho
def get_weight(self):
w = self.weight
shape = w.size()
w_flat = w.squeeze().abs()
dropout_mask = torch.zeros_like(w_flat).byte()
_, idx = w_flat.sort(dim=1)
dropout_mask = dropout_mask.scatter(
dim=1, index=idx[:, : int(idx.size(1) * self.prune_rate)], source=1
)
if self.training:
one_with_prob = (torch.rand(*dropout_mask.size()) < self.rho).to(
FLAGS.device
)
dropout_mask = dropout_mask * one_with_prob
w_flat = (1 - dropout_mask).float() * w_flat
return w_flat.view(*shape)
########################################################################################################################
# Unconstrained Targeted Dropout #
########################################################################################################################
class TargetedDropout(Graph):
def __init__(self, prune_rate, dim_in, dim_out):
super().__init__(prune_rate, dim_in, dim_out)
if FLAGS.rho == "gamma":
self.rho = prune_rate
else:
self.rho = FLAGS.rho
# Old, slow version used for CIFAR-10 experiments. Should be equivelant.
# def get_weight(self):
# w = self.weight
# shape = w.size()
#
# w_flat = w.flatten().abs()
# length = w_flat.size(0)
# dropout_mask = torch.zeros_like(w_flat).byte()
#
# _, idx = w_flat.sort()
# dropout_mask[idx[: int(length * self.prune_rate)]] = 1
#
# if self.training:
# one_with_prob_alpha = (torch.rand(length) < self.rho).to(FLAGS.device)
# dropout_mask = dropout_mask * one_with_prob_alpha
#
# w_flat = (1 - dropout_mask).float() * w_flat
# return w_flat.view(*shape)
def get_weight(self):
w = self.weight
shape = w.size()
w_flat = w.flatten().abs()
length = w_flat.size(0)
dropout_mask = torch.zeros_like(w_flat)
_, idx = w_flat.sort()
dropout_mask[idx[: int(length * self.prune_rate)]] = 1
if self.training:
dropout_mask = (F.dropout(dropout_mask, p=1-self.rho) > 0).float()
w_flat = (1 - dropout_mask.detach()) * w_flat
return w_flat.view(*shape)
########################################################################################################################
# Complete Graph #
########################################################################################################################
class Complete(Graph):
def __init__(self, prune_rate, dim_in, dim_out):
super().__init__(prune_rate, dim_in, dim_out)
def get_weight(self):
return self.weight
########################################################################################################################
# Get Graph #
########################################################################################################################
def get_graph(prune_rate, dim_in, dim_out, in_channels=None, out_channels=None):
if FLAGS.graph == "random":
return RandomGraph(prune_rate, dim_in, dim_out, in_channels, out_channels)
elif FLAGS.graph == "dnw":
return DNW(prune_rate, dim_in, dim_out)
elif FLAGS.graph == "dnw_no_update":
return DNWNoUpdate(prune_rate, dim_in, dim_out, in_channels, out_channels)
elif FLAGS.graph == "reg_td":
return RegularTargetedDropout(prune_rate, dim_in, dim_out)
elif FLAGS.graph == "td":
return TargetedDropout(prune_rate, dim_in, dim_out)
elif FLAGS.graph == "complete":
return Complete(prune_rate, dim_in, dim_out)
elif FLAGS.graph == "fine_tune":
return FineTune(prune_rate, dim_in, dim_out)
else:
raise Exception("We do not support the graph type {}".format(FLAGS.graph))
|
dnw-master
|
models/graphs/util.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .util import get_graph, get_conv
from genutil.config import FLAGS
if getattr(FLAGS, 'use_dgl', False):
import dgl
import dgl.function as fn
from scipy.sparse import coo_matrix
class Block(nn.Module):
def __init__(self, inp, oup, stride, blocks):
super(Block, self).__init__()
self.inp = inp
self.oup = oup
self.dim = oup * blocks
self.stride = stride
self.blocks = blocks
self.fast_eval = False
self.downsample = nn.Sequential(
nn.Conv2d(
inp,
inp,
kernel_size=3,
stride=stride,
padding=1,
groups=inp,
bias=False,
)
)
if self.blocks > 1:
self.conv = get_conv(self.dim - oup, self.dim - oup)
self.norm = nn.ModuleList()
self.norm.append(nn.BatchNorm2d(self.inp))
for _ in range(self.blocks - 1):
self.norm.append(nn.BatchNorm2d(self.oup))
self.relu = nn.ReLU(inplace=True)
# this is the total size of the graph.
graph_size = (self.inp + self.oup * (self.blocks - 1)) * self.oup * self.blocks
# this is the number of edges in mobilenet x d where d = 1 - FLAGS.prune_rate
num_edges = (
self.inp * self.oup * (1 - FLAGS.prune_rate) * (1 - FLAGS.prune_rate)
)
for _ in range(self.blocks - 1):
num_edges += (
self.oup * self.oup * (1 - FLAGS.prune_rate) * (1 - FLAGS.prune_rate)
)
prune_rate = 1 - (num_edges / float(graph_size))
self.graph = get_graph(
prune_rate,
self.inp + self.oup * (self.blocks - 1),
self.oup * self.blocks,
self.inp,
self.oup,
)
self.prune_rate = prune_rate
def profiling(self, spatail):
# using n_macs for conv2d as
# in_channels * out_channels * kernel_size[0] * kernel_size[1] * out_spatial[0] * out_spatial[1] // groups
# graph ops.
w = self.get_weight().squeeze().t()
num_edges = w.size(0) * w.size(1) * (1 - self.graph.prune_rate)
graph_n_macs = num_edges * spatail * spatail
graph_n_params = num_edges
has_output = w.abs().sum(1) != 0
has_input = w.abs().sum(0) != 0
input_with_output = has_output[: self.inp].sum()
output_with_input = has_input[-self.oup :].sum()
# node ops. no ops at output nodes.
num_nodes_with_ops = has_output.sum()
node_n_macs = num_nodes_with_ops * 3 * 3 * spatail * spatail
node_n_params = num_nodes_with_ops * 3 * 3
n_macs = int(node_n_macs + graph_n_macs)
n_params = int(node_n_params + graph_n_params)
return n_macs, n_params, input_with_output, output_with_input
def prepare_for_fast_eval(self, input_with_output, has_output, output_with_input):
self.fast_eval = True
self.input_with_output = input_with_output
self.has_output = has_output
self.output_with_input = output_with_input
# 1. first kill the dead neurons in the the dwconvs
# 1.a downsample
new_downsample = nn.Sequential(
nn.Conv2d(
input_with_output.sum(),
input_with_output.sum(),
kernel_size=3,
stride=self.stride,
padding=1,
groups=input_with_output.sum(),
bias=False,
)
)
new_downsample[0].weight.data = self.downsample[0].weight.data[
input_with_output
]
self.new_downsample = new_downsample
self.new_norm = nn.ModuleList()
new_norm_0 = nn.BatchNorm2d(input_with_output.sum().item())
new_norm_0.bias.data = self.norm[0].bias.data[input_with_output]
new_norm_0.weight.data = self.norm[0].weight.data[input_with_output]
new_norm_0.running_mean.data = self.norm[0].running_mean.data[input_with_output]
new_norm_0.running_var.data = self.norm[0].running_var.data[input_with_output]
self.new_norm.append(new_norm_0)
# 1.b intermediate
if self.blocks > 1:
new_dim = has_output[self.inp :].sum()
new_conv = nn.Conv2d(
new_dim,
new_dim,
kernel_size=3,
stride=1,
padding=1,
bias=False,
groups=new_dim,
)
new_conv.weight.data = self.conv.weight.data[has_output[self.inp :]]
self.new_conv = new_conv
# 2 get a new block_rng and block_sz
self.block_sz = [input_with_output.sum()]
for i in range(1, self.blocks):
range_to_consider = has_output[
self.inp + (i - 1) * self.oup : self.inp + i * self.oup
]
self.block_sz.append(range_to_consider.sum())
self.block_sz.append(output_with_input.sum())
self.block_rng = [0]
for i, sz in enumerate(self.block_sz):
self.block_rng.append(self.block_rng[i] + sz)
# update batch norm
for i in range(1, self.blocks):
range_to_consider = has_output[
self.inp + (i - 1) * self.oup : self.inp + i * self.oup
]
new_norm = nn.BatchNorm2d(range_to_consider.sum().item())
new_norm.bias.data = self.norm[i].bias.data[range_to_consider]
new_norm.weight.data = self.norm[i].weight.data[range_to_consider]
new_norm.running_mean.data = self.norm[i].running_mean.data[
range_to_consider
]
new_norm.running_var.data = self.norm[i].running_var.data[range_to_consider]
self.new_norm.append(new_norm)
def get_weight(self):
return self.graph.get_weight()
def forward(self, x):
if self.fast_eval:
return (
self.fast_forward_with_dgl(x) if getattr(FLAGS, 'use_dgl', False) else self.fast_forward(x)
)
w = self.get_weight()
# first layer
x = self.norm[0](x)
x = self.relu(x)
x = self.downsample(x)
x = F.conv2d(x, w[:, : self.inp]) # x is now oup*blocks
for i in range(self.blocks - 1):
x_active = x[:, : self.oup]
x_active = self.norm[i + 1](x_active)
x_active = self.relu(x_active)
x_active = F.conv2d(
x_active,
self.conv.weight[i * self.oup : (i + 1) * self.oup],
padding=1,
groups=self.oup,
)
x_active = F.conv2d(
x_active,
w[
(i + 1) * self.oup :,
self.inp + i * self.oup : self.inp + (i + 1) * self.oup,
],
)
x = x[:, self.oup :] + x_active
return x
def fast_forward_with_dgl(self, x):
if not hasattr(self, 'G'):
w = self.get_weight()
w_ = w[:, self.has_output]
w__ = w_[
torch.cat((self.has_output[self.inp :], self.output_with_input), dim=0)
]
# Create graph with weight w__
num_nodes = self.has_output.sum() + self.output_with_input.sum()
compressed_adj = w__.squeeze().t().cpu()
adj = torch.zeros(num_nodes, num_nodes)
adj[
: self.has_output.sum(), self.input_with_output.sum() :
] = compressed_adj
S = coo_matrix(adj.detach().numpy())
self.G = dgl.DGLGraph()
self.G.add_nodes(num_nodes)
self.G.add_edges(S.row, S.col)
self.G.edata["w"] = torch.from_numpy(S.data).to(FLAGS.device)
x = self.new_norm[0](x)
x = self.relu(x)
x = self.new_downsample(x)
# Initialize.
self.G.ndata["h"] = torch.zeros(
self.G.number_of_nodes(), x.size(0), x.size(2), x.size(3)
).to(FLAGS.device)
self.G.ndata["h_sum"] = torch.zeros(
self.G.number_of_nodes(), x.size(0), x.size(2), x.size(3)
).to(FLAGS.device)
self.G.ndata["h"][: self.block_rng[1]] = x.transpose(0, 1)
for i in range(1, self.blocks):
self.G.pull(
torch.arange(self.block_rng[i], self.block_rng[i + 1]),
fn.u_mul_e("h", "w", "m"),
fn.sum("m", "h_sum"),
)
x_active = self.G.ndata["h_sum"][
self.block_rng[i] : self.block_rng[i + 1]
].transpose(0, 1)
x_active = self.new_norm[i](x_active)
x_active = self.relu(x_active)
x_active = F.conv2d(
x_active,
self.new_conv.weight[
self.block_rng[i]
- self.block_sz[0] : self.block_rng[i + 1]
- self.block_sz[0]
],
padding=1,
groups=self.block_sz[i],
)
self.G.ndata["h"][
self.block_rng[i] : self.block_rng[i + 1]
] = x_active.transpose(0, 1)
self.G.pull(
torch.arange(self.block_rng[-2], self.block_rng[-1]),
fn.u_mul_e("h", "w", "m"),
fn.sum("m", "h_sum"),
)
return self.G.ndata["h_sum"][self.block_rng[-2] : self.block_rng[-1]].transpose(
0, 1
)
def fast_forward(self, x):
w = self.get_weight()
w_ = w[:, self.has_output]
w__ = w_[
torch.cat((self.has_output[self.inp:], self.output_with_input), dim=0)
]
x = self.new_norm[0](x)
x = self.relu(x)
x = self.new_downsample(x)
x = F.conv2d(x, w__[:, : self.block_sz[0]])
for i in range(1, self.blocks):
x_active = x[:, : self.block_sz[i]]
x_active = self.new_norm[i](x_active)
x_active = self.relu(x_active)
x_active = F.conv2d(
x_active,
self.new_conv.weight[
self.block_rng[i]
- self.block_sz[0] : self.block_rng[i + 1]
- self.block_sz[0]
],
padding=1,
groups=self.block_sz[i],
)
x_active = F.conv2d(
x_active,
w__[
self.block_rng[i + 1] - self.block_sz[0] :,
self.block_rng[i] : self.block_rng[i + 1],
],
)
x = x[:, self.block_sz[i] :] + x_active
return x
class Linear(nn.Module):
def __init__(self, inp):
super(Linear, self).__init__()
self.inp = inp
self.oup = FLAGS.output_size
self.graph = get_graph(FLAGS.prune_rate, inp, FLAGS.output_size)
def get_weight(self):
return self.graph.get_weight()
def profiling(self):
w = self.get_weight().squeeze().t()
num_edges = int(w.size(0) * w.size(1) * (1 - self.graph.prune_rate))
n_macs = num_edges
n_params = num_edges
return n_macs, n_params, None
def forward(self, x):
w = self.get_weight()
x = F.conv2d(x.view(*x.size(), 1, 1), w[:, : self.inp])
return x.squeeze()
class MobileNetV1Like(nn.Module):
# in_channels, out_channels, stride, blocks
cfg = [
(32, 64, 1, 1),
(64, 128, 2, 2),
(128, 256, 2, 2),
(256, 512, 2, 6),
(512, 1024, 2, 2),
]
def __init__(self,):
super(MobileNetV1Like, self).__init__()
self.conv1 = nn.Conv2d(
3,
32,
kernel_size=3,
stride=1 if FLAGS.image_size <= 64 else 2,
padding=1,
bias=False,
)
self.layers = self._make_layers()
self.pool = nn.AdaptiveAvgPool2d(1)
self.feature_dim = 1024
if getattr(FLAGS, "small", False):
self.linear = Linear(1024)
else:
self.linear = nn.Linear(1024, FLAGS.output_size)
self.relu = nn.ReLU(inplace=True)
self.bn = nn.BatchNorm2d(1024)
def _make_layers(self):
blocks = []
for x in self.cfg:
inp, oup, stride, layers = x
blocks.append(Block(inp, oup, stride, layers))
return nn.Sequential(*blocks)
def get_weight(self):
out = []
for layer in self.layers:
out.append(layer.get_weight())
if hasattr(self.linear, "get_weight"):
out.append(self.linear.get_weight())
return out
def forward(self, x):
out = self.conv1(x)
out = self.layers(out)
out = self.relu(self.bn(out))
out = self.pool(out)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def prepare_for_fast_eval(self):
has_output_list = []
input_with_output_list = []
outputs_with_input_list = []
for layer in self.layers:
w = layer.get_weight().squeeze().t()
has_output = w.abs().sum(1) != 0
has_input = w.abs().sum(0) != 0
input_with_output = has_output[: layer.inp]
output_with_input = has_input[-layer.oup :]
has_output_list.append(has_output)
input_with_output_list.append(input_with_output)
outputs_with_input_list.append(output_with_input)
self.has_output_list = has_output_list
self.input_with_output_list = input_with_output_list
self.outputs_with_input_list = outputs_with_input_list
# first, deal with conv1. which must only have output_channels which are input_with_output_list[0]
# make a new conv1
new_conv1 = nn.Conv2d(
3,
input_with_output_list[0].sum(),
kernel_size=3,
stride=1 if FLAGS.image_size <= 64 else 2,
padding=1,
bias=False,
)
new_conv1.weight.data = self.conv1.weight.data[input_with_output_list[0]]
self.conv1 = new_conv1
if not getattr(FLAGS, "small", False):
# do the same with the linear
new_linear = nn.Linear(outputs_with_input_list[-1].sum(), FLAGS.output_size)
new_linear.weight.data = self.linear.weight.data[
:, outputs_with_input_list[-1]
]
new_linear.bias.data = self.linear.bias.data
self.linear = new_linear
# fix self.bn
new_norm = nn.BatchNorm2d(outputs_with_input_list[-1].sum().item())
new_norm.bias.data = self.bn.bias.data[outputs_with_input_list[-1]]
new_norm.weight.data = self.bn.weight.data[outputs_with_input_list[-1]]
new_norm.running_mean.data = self.bn.running_mean.data[
outputs_with_input_list[-1]
]
new_norm.running_var.data = self.bn.running_var.data[
outputs_with_input_list[-1]
]
self.bn = new_norm
for i, layer in enumerate(self.layers):
layer.prepare_for_fast_eval(
input_with_output_list[i],
has_output_list[i],
outputs_with_input_list[i]
if i == len(self.layers) - 1
else input_with_output_list[i + 1],
)
|
dnw-master
|
models/graphs/mobilenetv1like.py
|
dnw-master
|
models/graphs/__init__.py
|
|
""" Tiny classifiers tested in Table 1.
The models have less than 42k parameters. At each node we perform Instance Normalization, ReLU,
and a 3 x 3 single channel convolution (order of operations may vary based on the implementation).
Each model follows downsample -> graph -> pool & fc. For pool we pool only the middle section of the tensor.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchdiffeq import odeint
from .util import get_graph, get_conv
from genutil.config import FLAGS
def downsample():
return nn.Sequential(
nn.Conv2d(
FLAGS.in_channels,
FLAGS.downsample_dim // 2,
kernel_size=3,
stride=2,
padding=1,
),
nn.BatchNorm2d(FLAGS.downsample_dim // 2),
nn.ReLU(inplace=True),
nn.Conv2d(
FLAGS.downsample_dim // 2,
FLAGS.downsample_dim // 2,
kernel_size=3,
stride=2,
padding=1,
groups=FLAGS.downsample_dim // 2,
),
nn.Conv2d(
FLAGS.downsample_dim // 2, FLAGS.downsample_dim, kernel_size=1, stride=1
),
nn.BatchNorm2d(FLAGS.downsample_dim),
)
########################################################################################################################
# Discrete Time Neural Graph #
########################################################################################################################
class DiscreteTimeNeuralGraph(nn.Module):
def __init__(self):
super(DiscreteTimeNeuralGraph, self).__init__()
self.dim = FLAGS.dim
self.layers = FLAGS.layers
self.downsample_dim = FLAGS.downsample_dim
self.feature_dim = FLAGS.feature_dim
self.downsample = downsample()
self.conv = get_conv(self.dim, self.dim)
self.graph = get_graph(
FLAGS.prune_rate, self.dim, self.dim, self.downsample_dim, self.feature_dim
)
self.norm = nn.InstanceNorm2d(self.dim, affine=True)
self.relu = nn.ReLU(inplace=True)
self.pool = nn.AvgPool2d(2)
self.fc = nn.Linear(self.feature_dim, FLAGS.output_size)
self.register_buffer(
"zeros",
torch.zeros(
1,
self.dim - self.downsample_dim,
FLAGS.image_size // 4,
FLAGS.image_size // 4,
),
)
self.half = (FLAGS.image_size // 4) // 2 - 1
def get_weight(self):
return self.graph.get_weight()
def profiling(self):
w = self.get_weight().squeeze().t()
num_edges = w.size(0) * w.size(1) * (1 - self.graph.prune_rate)
graph_n_params = num_edges
has_output = (w.abs().sum(1) != 0)
has_input = (w.abs().sum(0) != 0)
# node ops. no ops at output nodes.
num_nodes_with_ops = has_output.sum()
node_n_params = num_nodes_with_ops * 3 * 3
n_params = int(node_n_params + graph_n_params)
return n_params
def forward(self, x):
out = torch.cat(
(self.downsample(x), self.zeros.expand(x.size(0), *self.zeros.size()[1:])),
dim=1,
)
for i in range(self.layers):
out = self.relu(out)
out = self.conv(out)
out = self.graph(out)
out = self.norm(out)
out = self.pool(
out[
:,
-self.feature_dim :,
self.half - 1 : self.half + 2,
self.half - 1 : self.half + 2,
]
)
out = out.view(-1, self.feature_dim)
out = self.fc(out)
return out
########################################################################################################################
# Continuous Time Neural Graph #
########################################################################################################################
# See https://github.com/rtqichen/torchdiffeq for torchdiffeq implementation.
class ODEFunc(nn.Module):
def __init__(self):
super(ODEFunc, self).__init__()
self.dim = FLAGS.dim
self.layers = FLAGS.layers
self.downsample_dim = FLAGS.downsample_dim
self.feature_dim = FLAGS.feature_dim
self.conv = get_conv(self.dim, self.dim)
self.graph = get_graph(
FLAGS.prune_rate, self.dim, self.dim, self.downsample_dim, self.feature_dim
)
self.norm = nn.InstanceNorm2d(self.dim, affine=True)
self.relu = nn.ReLU(inplace=True)
def get_weight(self):
return self.graph.get_weight()
def forward(self, t, x):
out = self.relu(x)
out = self.conv(out)
out = self.graph(out)
out = self.norm(out)
return out
class ODEBlock(nn.Module):
def __init__(self, odefunc):
super(ODEBlock, self).__init__()
self.odefunc = odefunc
self.tol = 0.001
self.integration_time = torch.tensor([0, 1]).float()
self.solver = odeint
def forward(self, x):
self.integration_time = self.integration_time.type_as(x)
out = self.solver(
self.odefunc, x, self.integration_time, rtol=self.tol, atol=self.tol
)
return out[1]
class ContinuousTimeNeuralGraph(nn.Module):
def __init__(self):
super(ContinuousTimeNeuralGraph, self).__init__()
self.dim = FLAGS.dim
self.downsample_dim = FLAGS.downsample_dim
self.feature_dim = FLAGS.feature_dim
self.downsample = downsample()
self.pool = nn.AvgPool2d(2)
self.fc = nn.Linear(self.feature_dim, FLAGS.output_size)
self.register_buffer(
"zeros",
torch.zeros(
1,
self.dim - self.downsample_dim,
FLAGS.image_size // 4,
FLAGS.image_size // 4,
),
)
self.half = (FLAGS.image_size // 4) // 2 - 1
self.odesolve = ODEBlock(ODEFunc())
def get_weight(self):
return self.odesolve.odefunc.get_weight()
def forward(self, x):
out = torch.cat(
(self.downsample(x), self.zeros.expand(x.size(0), *self.zeros.size()[1:])),
dim=1,
)
out = self.odesolve(out)
out = self.pool(
out[
:,
-self.feature_dim :,
self.half - 1 : self.half + 2,
self.half - 1 : self.half + 2,
]
)
out = out.view(-1, self.feature_dim)
out = self.fc(out)
return out
########################################################################################################################
# Static Neural Graph #
########################################################################################################################
class StaticNeuralGraph(nn.Module):
def __init__(self):
super(StaticNeuralGraph, self).__init__()
self.dim = FLAGS.dim
self.layers = FLAGS.layers
self.downsample_dim = FLAGS.downsample_dim
self.feature_dim = FLAGS.feature_dim
self.downsample = downsample()
self.conv = get_conv(self.dim, self.dim)
self.graph = get_graph(
FLAGS.prune_rate, self.dim, self.dim, self.downsample_dim, self.feature_dim
)
self.relu = nn.ReLU(inplace=True)
self.pool = nn.AvgPool2d(2)
self.fc = nn.Linear(self.feature_dim, FLAGS.output_size)
self.register_buffer(
"zeros",
torch.zeros(
1,
self.dim - self.downsample_dim,
FLAGS.image_size // 4,
FLAGS.image_size // 4,
),
)
self.half = (FLAGS.image_size // 4) // 2 - 1
self.block_sz = [32]
self.block_rng = [0]
for i in range(4):
self.block_sz.append(192)
self.block_rng.append(i * 192 + 32)
self.block_rng.append(4 * 192 + 32)
self.norm = nn.ModuleList()
for s in self.block_sz:
self.norm.append(nn.InstanceNorm2d(s, affine=True))
def get_weight(self):
return self.graph.get_weight()
def forward(self, x):
x = torch.cat(
(self.downsample(x), self.zeros.expand(x.size(0), *self.zeros.size()[1:])),
dim=1,
)
w = self.graph.get_weight()
for i in range(self.layers):
x_active = x[:, : self.block_sz[i]]
x_active = self.norm[i](x_active)
x_active = self.relu(x_active)
x_active = F.conv2d(
x_active,
self.conv.weight[self.block_rng[i] : self.block_rng[i + 1]],
padding=1,
groups=self.block_sz[i],
)
x_active = F.conv2d(
x_active,
w[
min(self.block_rng[i + 1], self.dim - self.feature_dim) :,
self.block_rng[i] : self.block_rng[i + 1],
],
)
if i < self.layers - 1:
x = x[:, self.block_sz[i] :] + x_active
else:
x = x[:, -self.feature_dim :] + x_active
out = self.pool(
x[:, :, self.half - 1 : self.half + 2, self.half - 1 : self.half + 2]
)
out = out.view(-1, self.feature_dim)
out = self.fc(out)
return out
|
dnw-master
|
models/graphs/neuralgraph.py
|
""" Borrowed from https://github.com/JiahuiYu/slimmable_networks
config utilities for yml file."""
import os
import sys
import yaml
# singletone
FLAGS = None
class LoaderMeta(type):
"""Constructor for supporting `!include`.
"""
def __new__(mcs, __name__, __bases__, __dict__):
"""Add include constructer to class."""
# register the include constructor on the class
cls = super().__new__(mcs, __name__, __bases__, __dict__)
cls.add_constructor('!include', cls.construct_include)
return cls
class Loader(yaml.Loader, metaclass=LoaderMeta):
"""YAML Loader with `!include` constructor.
"""
def __init__(self, stream):
try:
self._root = os.path.split(stream.name)[0]
except AttributeError:
self._root = os.path.curdir
super().__init__(stream)
def construct_include(self, node):
"""Include file referenced at node."""
filename = os.path.abspath(
os.path.join(self._root, self.construct_scalar(node)))
extension = os.path.splitext(filename)[1].lstrip('.')
with open(filename, 'r') as f:
if extension in ('yaml', 'yml'):
return yaml.load(f, Loader)
else:
return ''.join(f.readlines())
class AttrDict(dict):
"""Dict as attribute trick.
"""
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
for key in self.__dict__:
value = self.__dict__[key]
if isinstance(value, dict):
self.__dict__[key] = AttrDict(value)
elif isinstance(value, list):
if isinstance(value[0], dict):
self.__dict__[key] = [AttrDict(item) for item in value]
else:
self.__dict__[key] = value
def yaml(self):
"""Convert object to yaml dict and return.
"""
yaml_dict = {}
for key in self.__dict__:
value = self.__dict__[key]
if isinstance(value, AttrDict):
yaml_dict[key] = value.yaml()
elif isinstance(value, list):
if isinstance(value[0], AttrDict):
new_l = []
for item in value:
new_l.append(item.yaml())
yaml_dict[key] = new_l
else:
yaml_dict[key] = value
else:
yaml_dict[key] = value
return yaml_dict
def __repr__(self):
"""Print all variables.
"""
ret_str = []
for key in self.__dict__:
value = self.__dict__[key]
if isinstance(value, AttrDict):
ret_str.append('{}:'.format(key))
child_ret_str = value.__repr__().split('\n')
for item in child_ret_str:
ret_str.append(' '+item)
elif isinstance(value, list):
if isinstance(value[0], AttrDict):
ret_str.append('{}:'.format(key))
for item in value:
# treat as AttrDict above
child_ret_str = item.__repr__().split('\n')
for item in child_ret_str:
ret_str.append(' '+item)
else:
ret_str.append('{}: {}'.format(key, value))
else:
ret_str.append('{}: {}'.format(key, value))
return '\n'.join(ret_str)
class Config(AttrDict):
"""Config with yaml file.
This class is used to config model hyper-parameters, global constants, and
other settings with yaml file. All settings in yaml file will be
automatically logged into file.
Args:
filename(str): File name.
Examples:
yaml file ``model.yml``::
NAME: 'neuralgym'
ALPHA: 1.0
DATASET: '/mnt/data/imagenet'
Usage in .py:
>>> from neuralgym import Config
>>> config = Config('model.yml')
>>> print(config.NAME)
neuralgym
>>> print(config.ALPHA)
1.0
>>> print(config.DATASET)
/mnt/data/imagenet
"""
def __init__(self, filename=None, verbose=False):
assert os.path.exists(filename), 'File {} not exist.'.format(filename)
try:
with open(filename, 'r') as f:
cfg_dict = yaml.load(f, Loader)
except EnvironmentError:
print('Please check the file with name of "%s"', filename)
super(Config, self).__init__(cfg_dict)
if verbose:
print(' pi.cfg '.center(80, '-'))
print(self.__repr__())
print(''.center(80, '-'))
def app():
"""Load app via stdin from subprocess"""
global FLAGS
if FLAGS is None:
job_yaml_file = None
for arg in sys.argv:
if arg.startswith('app:'):
job_yaml_file = arg[4:]
FLAGS = Config(job_yaml_file) # added.
#if job_yaml_file is None:
# job_yaml_file = sys.stdin.readline()
#FLAGS = Config(job_yaml_file)
return FLAGS
else:
return FLAGS
def reset_app(yaml_file):
global FLAGS
FLAGS = Config(yaml_file)
app()
|
dnw-master
|
genutil/config.py
|
dnw-master
|
genutil/__init__.py
|
|
from genutil.config import FLAGS
def model_profiling(model):
n_macs = 0
n_params = 0
if FLAGS.skip_profiling:
return n_macs, n_params
# using n_macs for conv2d as
# (ins[1] * outs[1] *
# self.kernel_size[0] * self.kernel_size[1] *
# outs[2] * outs[3] // self.groups) * outs[0]
# or, when batch_size = 1
# in_channels * out_channels * kernel_size[0] * kernel_size[1] * out_spatial[0] * out_spatial[1] // groups
# conv1 has stride 2. layer 1 has stride 1.
spatial = 224 // 2
# to compute the flops for conv1 we need to know how many input nodes in layer 1 have an output.
# this is the effective number of output channels for conv1
layer1_n_macs, layer1_n_params, input_with_output, _ = model.layers[0].profiling(
spatial
)
conv1_n_macs = (
model.conv1.in_channels * input_with_output * 3 * 3 * spatial * spatial
)
conv1_n_params = model.conv1.in_channels * input_with_output * 3 * 3
n_macs = layer1_n_macs + conv1_n_macs
n_params = layer1_n_params + conv1_n_params
for i, layer in enumerate(model.layers):
if i != 0:
spatial = spatial // 2 # stride 2 for all blocks >= 1
layer_n_macs, layer_n_params, _, output_with_input = layer.profiling(
spatial
)
n_macs += layer_n_macs
n_params += layer_n_params
# output_with_input is the effective number of output channels from the body of the net.
# pool
pool_n_macs = spatial * spatial * output_with_input
n_macs += pool_n_macs
if getattr(FLAGS, "small", False):
linear_n_macs, linear_n_params, _ = model.linear.profiling()
else:
linear_n_macs = output_with_input * model.linear.out_features
linear_n_params = output_with_input * model.linear.out_features
n_macs += linear_n_macs
n_params += linear_n_params
print(
"Pararms: {:,}".format(n_params).rjust(45, " ")
+ "Macs: {:,}".format(n_macs).rjust(45, " ")
)
return n_macs, n_params
|
dnw-master
|
genutil/model_profiling.py
|
import os
import torch
from torchvision import datasets, transforms
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy("file_system")
from genutil.config import FLAGS
class ImageNet:
def __init__(self):
super(ImageNet, self).__init__()
data_root = os.path.join(FLAGS.data_dir, "imagenet")
use_cuda = torch.cuda.is_available()
# Data loading code
kwargs = {"num_workers": FLAGS.workers, "pin_memory": True} if use_cuda else {}
# Data loading code
traindir = os.path.join(data_root, "train")
valdir = os.path.join(data_root, "val")
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
),
)
self.train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=FLAGS.batch_size, shuffle=True, **kwargs
)
self.val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(
valdir,
transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]
),
),
batch_size=FLAGS.batch_size,
shuffle=False,
**kwargs
)
|
dnw-master
|
data/imagenet.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.