python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import numpy.testing as npt
from aepsych.config import Config
from aepsych.generators import AxRandomGenerator, RandomGenerator
from ax.modelbridge import Models
class TestRandomGenerator(unittest.TestCase):
def test_randomgen_single(self):
# test that RandomGenerator doesn't mess with shapes
n = 100
rand = np.zeros((n, 3))
mod = RandomGenerator(lb=[1, 2, 3], ub=[2, 3, 4], dim=3)
for i in range(n):
rand[i, :] = mod.gen()
# check that bounds are right
self.assertTrue(np.all(rand[:, 0] > 1))
self.assertTrue(np.all(rand[:, 1] > 2))
self.assertTrue(np.all(rand[:, 2] > 3))
self.assertTrue(np.all(rand[:, 0] < 2))
self.assertTrue(np.all(rand[:, 1] < 3))
self.assertTrue(np.all(rand[:, 2] < 4))
def test_randomgen_batch(self):
# test that RandomGenerator doesn't mess with shapes
n = 100
mod = RandomGenerator(lb=[1, 2, 3], ub=[2, 3, 4], dim=3)
rand = mod.gen(n)
# check that bounds are right
self.assertTrue((rand[:, 0] > 1).all())
self.assertTrue((rand[:, 1] > 2).all())
self.assertTrue((rand[:, 2] > 3).all())
self.assertTrue((rand[:, 0] < 2).all())
self.assertTrue((rand[:, 1] < 3).all())
self.assertTrue((rand[:, 2] < 4).all())
def test_randomgen_config(self):
lb = [-1, 0]
ub = [1, 2]
config_str = f"""
[common]
lb = {lb}
ub = {ub}
"""
config = Config(config_str=config_str)
gen = RandomGenerator.from_config(config)
npt.assert_equal(gen.lb.numpy(), np.array(lb))
npt.assert_equal(gen.ub.numpy(), np.array(ub))
self.assertEqual(gen.dim, len(lb))
def test_axrandom_config(self):
config_str = """
[common]
parnames = [par1, par2]
lb = [-1, 0]
ub = [1, 2]
outcome_types = [continuous]
strategy_names = [init]
[init]
generator = RandomGenerator
[RandomGenerator]
seed=231
deduplicate=True
"""
config = Config(config_str=config_str)
gen = AxRandomGenerator.from_config(config, name="init")
self.assertEqual(gen.model, Models.UNIFORM)
self.assertEqual(gen.model_kwargs["seed"], 231)
self.assertTrue(gen.model_kwargs["deduplicate"])
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/generators/test_random_generator.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from aepsych.config import Config
from aepsych.generators.completion_criterion import (
MinAsks,
MinTotalOutcomeOccurrences,
MinTotalTells,
RunIndefinitely,
)
from aepsych.strategy import AEPsychStrategy
class CompletionCriteriaTestCase(unittest.TestCase):
def setUp(self):
config_str = """
[common]
use_ax = True
stimuli_per_trial = 1
outcome_types = [binary]
parnames = [x]
lb = [0]
ub = [1]
strategy_names = [test_strat]
[test_strat]
generator = SobolGenerator
"""
config = Config(config_str=config_str)
self.strat = AEPsychStrategy.from_config(config)
def test_min_asks(self):
config_str = """
[test_strat]
min_asks = 2
"""
config = Config(config_str=config_str)
criterion = MinAsks.from_config(config, "test_strat")
self.assertEqual(criterion.threshold, 2)
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.complete_new_trial({"x": 0.0}, 0.0)
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.complete_new_trial({"x": 1.0}, 0.0)
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.gen()
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.gen()
self.assertTrue(criterion.is_met(self.strat.experiment))
def test_min_total_tells(self):
config_str = """
[test_strat]
min_total_tells = 2
"""
config = Config(config_str=config_str)
criterion = MinTotalTells.from_config(config, "test_strat")
self.assertEqual(criterion.threshold, 2)
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.gen()
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.gen()
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.complete_new_trial({"x": 0.0}, 0.0)
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.complete_new_trial({"x": 1.0}, 0.0)
self.assertTrue(criterion.is_met(self.strat.experiment))
def test_min_total_outcome_occurences(self):
config_str = """
[common]
outcome_types = [binary]
min_total_outcome_occurrences = 2
"""
config = Config(config_str=config_str)
criterion = MinTotalOutcomeOccurrences.from_config(config, "test_strat")
self.assertEqual(criterion.threshold, 2)
self.strat.complete_new_trial({"x": 0.0}, 0.0)
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.complete_new_trial({"x": 1.0}, 0.0)
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.complete_new_trial({"x": 0.0}, 1.0)
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.complete_new_trial({"x": 1.0}, 1.0)
self.assertTrue(criterion.is_met(self.strat.experiment))
def run_indefinitely(self):
config_str = """
[common]
outcome_types = [binary]
run_indefinitely = False
"""
config = Config(config_str=config_str)
criterion = RunIndefinitely(**RunIndefinitely.from_config(config, "test_strat"))
self.assertTrue(criterion.is_met(self.strat.experiment))
config_str = """
[common]
outcome_types = [binary]
run_indefinitely = True
"""
config = Config(config_str=config_str)
criterion = RunIndefinitely(**RunIndefinitely.from_config(config, "test_strat"))
self.assertFalse(criterion.is_met(self.strat.experiment))
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/generators/test_completion_criteria.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from unittest.mock import MagicMock
import numpy as np
import torch
from aepsych.acquisition import MonotonicMCLSE
from aepsych.config import Config
from aepsych.generators import EpsilonGreedyGenerator, MonotonicRejectionGenerator
class TestEpsilonGreedyGenerator(unittest.TestCase):
def test_epsilon_greedy(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
total_trials = 2000
extra_acqf_args = {"target": 0.75, "beta": 1.96}
for epsilon in (0.1, 0.5):
gen = EpsilonGreedyGenerator(
subgenerator=MonotonicRejectionGenerator(
acqf=MonotonicMCLSE, acqf_kwargs=extra_acqf_args
),
epsilon=epsilon,
)
model = MagicMock()
gen.subgenerator.gen = MagicMock()
for _ in range(total_trials):
gen.gen(1, model)
self.assertTrue(
np.abs(gen.subgenerator.gen.call_count / total_trials - (1 - epsilon))
< 0.01
)
def test_greedyepsilon_config(self):
config_str = """
[common]
acqf = MonotonicMCLSE
[EpsilonGreedyGenerator]
subgenerator = MonotonicRejectionGenerator
epsilon = .5
"""
config = Config()
config.update(config_str=config_str)
gen = EpsilonGreedyGenerator.from_config(config)
self.assertIsInstance(gen.subgenerator, MonotonicRejectionGenerator)
self.assertEqual(gen.subgenerator.acqf, MonotonicMCLSE)
self.assertEqual(gen.epsilon, 0.5)
|
aepsych-main
|
tests/generators/test_epsilon_greedy_generator.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
|
aepsych-main
|
tests/generators/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import numpy.testing as npt
import torch
from aepsych.config import Config
from aepsych.generators import AxSobolGenerator, SobolGenerator
from aepsych.utils import make_scaled_sobol
from ax.modelbridge import Models
class TestSobolGenerator(unittest.TestCase):
def test_batchsobol(self):
mod = SobolGenerator(lb=[1, 2, 3], ub=[2, 3, 4], dim=3, seed=12345)
acq1 = mod.gen(num_points=2)
self.assertEqual(acq1.shape, (2, 3))
acq2 = mod.gen(num_points=3)
self.assertEqual(acq2.shape, (3, 3))
acq3 = mod.gen()
self.assertEqual(acq3.shape, (1, 3))
def test_sobolgen_single(self):
# test that SobolGenerator doesn't mess with shapes
sobol1 = make_scaled_sobol(lb=[1, 2, 3], ub=[2, 3, 4], size=10, seed=12345)
sobol2 = torch.zeros((10, 3))
mod = SobolGenerator(lb=[1, 2, 3], ub=[2, 3, 4], dim=3, seed=12345)
for i in range(10):
sobol2[i, :] = mod.gen()
npt.assert_almost_equal(sobol1.numpy(), sobol2.numpy())
# check that bounds are also right
self.assertTrue(torch.all(sobol1[:, 0] > 1))
self.assertTrue(torch.all(sobol1[:, 1] > 2))
self.assertTrue(torch.all(sobol1[:, 2] > 3))
self.assertTrue(torch.all(sobol1[:, 0] < 2))
self.assertTrue(torch.all(sobol1[:, 1] < 3))
self.assertTrue(torch.all(sobol1[:, 2] < 4))
def test_sobol_config(self):
config_str = """
[common]
lb = [0]
ub = [1]
parnames = [par1]
stimuli_per_trial = 1
[SobolGenerator]
seed=12345
"""
config = Config()
config.update(config_str=config_str)
gen = SobolGenerator.from_config(config)
npt.assert_equal(gen.lb.numpy(), np.array([0]))
npt.assert_equal(gen.ub.numpy(), np.array([1]))
self.assertEqual(gen.seed, 12345)
self.assertEqual(gen.stimuli_per_trial, 1)
def test_pairwise_sobol_sizes(self):
for dim in np.arange(1, 4):
for nsamp in (3, 5, 7):
generator = SobolGenerator(
lb=np.arange(dim).tolist(),
ub=(1 + np.arange(dim)).tolist(),
stimuli_per_trial=2,
)
shape_out = (nsamp, dim, 2)
self.assertEqual(generator.gen(nsamp).shape, shape_out)
def test_axsobol_config(self):
config_str = """
[common]
parnames = [par1]
lb = [0]
ub = [1]
stimuli_per_trial = 1
outcome_types = [continuous]
strategy_names = [init]
[init]
generator = SobolGenerator
[SobolGenerator]
seed=12345
scramble=False
"""
config = Config(config_str=config_str)
gen = AxSobolGenerator.from_config(config, name="init")
self.assertEqual(gen.model, Models.SOBOL)
self.assertEqual(gen.model_kwargs["seed"], 12345)
self.assertFalse(gen.model_kwargs["scramble"])
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/generators/test_sobol_generator.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import numpy.testing as npt
from aepsych.config import Config
from aepsych.generators import ManualGenerator
class TestManualGenerator(unittest.TestCase):
def test_batchmanual(self):
points = np.random.rand(10, 3)
mod = ManualGenerator(
lb=[0, 0, 0], ub=[1, 1, 1], dim=3, points=points, shuffle=False
)
npt.assert_allclose(points, mod.points) # make sure they weren't shuffled
acq1 = mod.gen(num_points=2)
self.assertEqual(acq1.shape, (2, 3))
acq2 = mod.gen(num_points=3)
self.assertEqual(acq2.shape, (3, 3))
acq3 = mod.gen()
self.assertEqual(acq3.shape, (1, 3))
with self.assertWarns(RuntimeWarning):
acq4 = mod.gen(num_points=10)
self.assertEqual(acq4.shape, (4, 3))
def test_manual_generator(self):
points = [[0, 0], [0, 1], [1, 0], [1, 1]]
config_str = f"""
[common]
lb = [0, 0]
ub = [1, 1]
parnames = [par1, par2]
[ManualGenerator]
points = {points}
"""
config = Config()
config.update(config_str=config_str)
gen = ManualGenerator.from_config(config)
npt.assert_equal(gen.lb.numpy(), np.array([0, 0]))
npt.assert_equal(gen.ub.numpy(), np.array([1, 1]))
self.assertFalse(gen.finished)
p1 = list(gen.gen()[0])
p2 = list(gen.gen()[0])
p3 = list(gen.gen()[0])
p4 = list(gen.gen()[0])
self.assertEqual(sorted([p1, p2, p3, p4]), points)
self.assertTrue(gen.finished)
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/generators/test_manual_generator.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# This source code is licensed under the MIT license found in the
# LICENSE file in the scripts directory.
# -- Path setup --------------------------------------------------------------
import os
import sys
# from pkg_resources import get_distribution
# sys.path.insert(0, os.path.abspath("../../"))
current_dir = os.path.dirname(__file__)
target_dir = os.path.abspath(os.path.join(current_dir, "../../"))
sys.path.insert(0, target_dir)
# base_path = os.path.abspath(os.path.join(__file__, "..", "..", "..", "aepsych"))
# print(sys.path, base_path, "======")
# sys.path.append(base_path)
# -- Project information -----------------------------------------------------
project = "AEPsych"
# copyright = "Meta, Inc."
author = "Meta, Inc."
# get version string
# version = get_distribution("aepsych").version
version = ""
release = ""
# -- General configuration ---------------------------------------------------
# Sphinx extension modules
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.doctest",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames
# source_suffix = [".rst", ".md"]
source_suffix = ".rst"
# The main toctree document.
index_doc = "index"
# The language for content autogenerated by Sphinx.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# Default options for autodoc directives. Applied to all autodoc directives
autodoc_default_options = {
"undoc-members": True,
"show-inheritance": True,
"member-order": "bysource",
}
# show type hints in the method description
autodoc_typehints = "description"
# Inlcude init docstrings into body of autoclass directives
autoclass_content = "both"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
html_static_path = [] # for now we have no static files to track
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "aepsychdoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(index_doc, "aepsych.tex", "AEPsych Documentation", "Meta, Inc.", "manual")
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(index_doc, "aepsych", "aepsych Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
index_doc,
"aepsych",
"AEPsych Documentation",
author,
"AEPsych",
"AEPsych",
"Miscellaneous",
)
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
autodoc_mock_imports = ["botorch"]
|
aepsych-main
|
sphinx/source/conf.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from datetime import datetime
import numpy as np
constants = {
"savefolder": "./databases/",
"timestamp": datetime.now().strftime("%Y-%m-%d_%H-%M-%S"),
"config_path": "./aepsych_config.ini",
"seed": 1,
}
# base parameters in case we don't want AEPsych to manage all 8.
base_params = {
"spatial_frequency": 2,
"orientation": 0,
"pedestal": 0.5,
"contrast": 0.75,
"temporal_frequency": 0,
"size": 10,
"angle_dist": 0,
"eccentricity": 0,
}
psychopy_vars = {
"setSizePix": [1680, 1050],
"setWidth": 47.475,
"setDistance": 57,
"pre_duration_s": 0.0,
"stim_duration_s": 5.0,
"post_duration_s": 1,
"response_wait": 2,
"iti": 0,
}
|
aepsych-main
|
examples/contrast_discrimination_psychopy/experiment_config.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import experiment_config
import numpy as np
import torch
from aepsych_client import AEPsychClient
from helpers import HalfGrating
from psychopy import core, data, event, gui, monitors, visual
from psychopy.tools.filetools import toFile
def run_experiment():
seed = experiment_config.constants["seed"]
config_path = experiment_config.constants["config_path"]
torch.manual_seed(seed)
np.random.seed(seed)
expInfo = {"observer": "default_observer"}
expInfo["dateStr"] = data.getDateStr() # add the current time
# present a dialogue to change params
dlg = gui.DlgFromDict(expInfo, title="multi-D JND Exp", fixed=["dateStr"])
if dlg.OK:
toFile("lastParams.pickle", expInfo) # save params to file for next time
else:
core.quit() # the user hit cancel so exit
screen = monitors.Monitor("testMonitor", gamma=1)
screen.setSizePix(experiment_config.psychopy_vars["setSizePix"])
screen.setWidth(experiment_config.psychopy_vars["setWidth"])
screen.setDistance(experiment_config.psychopy_vars["setDistance"])
win = visual.Window(
allowGUI=True,
units="deg",
monitor=screen,
bpc=(8, 8, 8),
size=experiment_config.psychopy_vars["setSizePix"],
fullscr=False,
)
screen_text_g = visual.TextStim(win, text=None, alignHoriz="center", color="green")
screen_text_r = visual.TextStim(win, text=None, alignHoriz="center", color="red")
screen_text = visual.TextStim(win, text=None, alignHoriz="center", color="gray")
# display instructions and wait
message2 = visual.TextStim(
win,
pos=[0, +3],
text="Hit the space bar key when ready and "
"to advance to the next trial after you see a red cross.",
)
message1 = visual.TextStim(
win,
pos=[0, -3],
text="You'll see a stimulus. One side will have a grating and the other will be noise."
" "
"Press left or right corresponding to the side with noise. If you don't know, please guess.",
)
message1.draw()
message2.draw()
win.flip() # to show our newly drawn 'stimuli'
# pause until there's a keypress
event.waitKeys()
# start the trial: draw grating
clock = core.Clock()
screen_text_r.setText("+")
screen_text_r.draw(win=win)
win.flip()
aepsych_client = AEPsychClient()
aepsych_client.configure(config_path=config_path)
# create stimulus
stim = HalfGrating(**experiment_config.base_params, win=win)
i = 0
is_finished = False
while not is_finished:
ask_response = aepsych_client.ask()
trial_params = ask_response["config"]
is_finished = ask_response["is_finished"]
stim.update(trial_params)
print(trial_params)
bg_color = np.array([stim.pedestal_psychopy_scale] * 3)
win.setColor(bg_color)
win.color = bg_color
win.flip()
screen_text_r.setText("+")
screen_text_r.draw(win=win)
win.flip()
core.wait(experiment_config.psychopy_vars["iti"])
fixation_keys = []
while not fixation_keys:
fixation_keys = event.getKeys(keyList=["space"])
fixation_keys = ["space"] ## for debugging
if "space" in fixation_keys:
screen_text.setText("+")
screen_text.draw(win=win)
win.flip()
noisy_half = "left" if np.random.randint(2) == 0 else "right"
clock.reset()
keys = stim.draw(
noisy_half=noisy_half,
win=win,
pre_duration_s=experiment_config.psychopy_vars["pre_duration_s"],
stim_duration_s=experiment_config.psychopy_vars["stim_duration_s"],
)
# keys = event.waitKeys(keyList=["left", "right"]) # phil took out max wait
rt = clock.getTime()
response = noisy_half in keys
print(f"keys:{keys}, ca:{noisy_half}, acc:{response}, rt:{rt}")
win.flip()
if response:
screen_text_g.setText("Correct")
screen_text_g.draw()
win.flip()
else:
screen_text_r.setText("Incorrect")
screen_text_r.draw()
win.flip()
# inform bayesopt of the response, needed to calculate next contrast
aepsych_client.tell(config=trial_params, outcome=response, rt=rt)
# core.wait(experiment_config.psychopy_vars["post_duration_s"])
event.clearEvents()
print(f"trial {i}")
i = i + 1
win.close()
aepsych_client.finalize()
core.quit()
if __name__ == "__main__":
run_experiment()
|
aepsych-main
|
examples/contrast_discrimination_psychopy/experiment.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import pyglet
from psychopy import core, event
from psychopy.visual import Window
from psychopy.visual.image import ImageStim
pyglet.options["debug_gl"] = False
GL = pyglet.gl
def polar_to_cartesian(r, theta):
z = r * np.exp(1j * np.radians(theta))
return z.real, z.imag
def cartesian_to_polar(x, y):
z = x + 1j * y
return (np.abs(z), np.angle(z, deg=True))
class AnimatedGrating:
param_transforms = {"contrast": lambda x: 10 ** x, "pedestal": lambda x: 10 ** x}
def __init__(
self,
spatial_frequency: float,
orientation: float,
pedestal: float,
contrast: float,
temporal_frequency: float,
eccentricity: float,
size: float,
angle_dist: float,
win: Window,
cpd=60, # display cycles per degree
Lmin=0, # min luminance in nits
Lmax=255, # max luminance in nits
res=256, # texture resolution
noisy=False,
*args,
**kw,
):
"""Generate animated Gabor grating
Args:
spatial_frequency (float): Spatial frequency.
orientation (float): Orientation (degrees)
pedestal (float): Background luminance.
contrast (float): Stimulus contrast.
temporal_frequency (float): Temporal frequency (seconds).
eccentricity (float): Stimulus eccentricity relative to center (degrees).
size (float): Stimulus size.
angle_dist (float): Stimulus angle relative to center.
win (Window): Window to render to.
cpd (int, optional): Display cycles per degree. Defaults to 60.
"""
self.spatial_frequency = spatial_frequency
self.temporal_frequency = temporal_frequency
self.orientation = orientation
self.pedestal = pedestal
self.contrast = contrast
self.settable_params = (
"spatial_frequency",
"temporal_frequency",
"orientation",
"pedestal",
"contrast",
"size",
"eccentricity",
"angle_dist",
)
self.cpd = cpd
self.Lmin = Lmin
self.Lmax = Lmax
self.res = res
self.noisy = noisy
self.initial_phase = np.random.uniform(low=0, high=0.2, size=(1))
img = np.zeros((self.res, self.res))
self.win = win
self._stim = ImageStim(image=img, mask="gauss", win=win, *args, **kw)
# these get set on _stim
self.size = size
self.eccentricity = eccentricity
self.angle_dist = angle_dist
def update(self, trial_config):
for k, v in trial_config.items():
if k in self.settable_params:
if k in self.param_transforms:
setattr(self, k, self.param_transforms[k](v[0]))
else:
setattr(self, k, v[0])
@property
def size(self):
return self._stim.size
@size.setter
def size(self, x):
self._stim.size = x
@property
def eccentricity(self):
return cartesian_to_polar(*self._stim.pos)[0]
@eccentricity.setter
def eccentricity(self, x):
current_coords = cartesian_to_polar(*self._stim.pos)
self._stim.pos = polar_to_cartesian(x, current_coords[1])
@property
def angle_dist(self):
return cartesian_to_polar(*self._stim.pos)[1]
@angle_dist.setter
def angle_dist(self, deg):
current_coords = cartesian_to_polar(*self._stim.pos)
self._stim.pos = polar_to_cartesian(current_coords[0], deg + 90)
@property
def pedestal_psychopy_scale(self):
return self.pedestal * 2 - 1
def draw(
self,
noisy=False,
win=None,
pre_duration_s=0.1,
stim_duration_s=5.0,
*args,
**kwargs,
):
win = win or self.win
clock = core.Clock()
clock.reset()
self._stim.image = self.get_texture(self.initial_phase, noisy=noisy)
while clock.getTime() < pre_duration_s:
win.flip()
start_time = clock.getTime()
while clock.getTime() < pre_duration_s + stim_duration_s:
if self.temporal_frequency > 0:
newphase = (clock.getTime() - start_time) * self.temporal_frequency
self._stim.image = self.get_texture(
newphase + self.initial_phase, noisy=noisy
)
self._stim.draw()
def get_texture(self, phase=0, noisy=False):
pedestal_lum = self.pedestal * (self.Lmax - self.Lmin) + self.Lmin
grating_max = (self.contrast * (2 * pedestal_lum + self.Lmin) + self.Lmin) / 2
x = np.arange(0, self.res) / self.cpd + phase
y = np.arange(0, self.res) / self.cpd + phase
x_grid, y_grid = np.meshgrid(x, y)
wave = x_grid * np.cos(np.radians(self.orientation)) + y_grid * np.sin(
np.radians(self.orientation)
)
scaled_imag_wave = 1j * 2 * np.pi * self.spatial_frequency * wave
img = grating_max * np.real(np.exp(scaled_imag_wave)) + pedestal_lum
# convert from luminance to values in [-1, 1] as psychopy wants
img = img / ((self.Lmax - self.Lmin) / 2) - 1
if noisy:
flatimg = img.flatten()
np.random.shuffle(flatimg)
img = flatimg.reshape(self.res, self.res)
return img
class HalfGrating(AnimatedGrating):
"""Gabor animated grating, half of which is scrambled into white noise."""
def noisify_half_texture(self, img, noisy_half):
img = img.T # transpose so our indexing tricks work
flatimg = img.flatten()
if noisy_half == "left":
noisy = flatimg[: (self.res ** 2) // 2]
np.random.shuffle(noisy)
img = np.r_[noisy, flatimg[(self.res ** 2) // 2 :]].reshape(
self.res, self.res
)
else:
noisy = flatimg[(self.res ** 2) // 2 :]
np.random.shuffle(noisy)
img = np.r_[flatimg[: (self.res ** 2) // 2], noisy].reshape(
self.res, self.res
)
return img.T # untranspose
def get_texture(self, phase, noisy_half):
img = super().get_texture(phase, noisy=False)
img = self.noisify_half_texture(img, noisy_half)
return img
def draw(
self,
noisy_half="left",
win=None,
pre_duration_s=0.1,
stim_duration_s=5.0,
*args,
**kwargs,
):
win = win or self.win
clock = core.Clock()
clock.reset()
event.clearEvents()
self._stim.image = self.get_texture(self.initial_phase, noisy_half=noisy_half)
while clock.getTime() < pre_duration_s:
win.flip()
start_time = clock.getTime()
while True:
if self.temporal_frequency > 0:
newphase = (clock.getTime() - start_time) * self.temporal_frequency
self._stim.image = self.get_texture(
newphase + self.initial_phase, noisy_half=noisy_half
)
self._stim.draw()
keys = event.getKeys(keyList=["left", "right"])
win.flip()
if len(keys) > 0:
return keys
return keys
class ExperimentAborted(Exception):
pass
class QuitHelper:
"""Helper to quit the experiment by pressing a key twice within 500ms.
It quits by simply raising 'ExperimentAborted'. This is necessary because
from the separate thread that psychopy checks its global key events in, you
cannot raise an Exception in the main thread.
"""
def __init__(self):
self.quit_requested = False
self.debounce_timestamp = None
def request_quit(self):
"""Must be called twice in 500ms to set a flag that causes ExperimentAborted
to be raised when quit_if_requested is called. This indirection is needed if request_quit
is called from a separate thread (as with psychopy global event keys)
"""
tprev = self.debounce_timestamp
tnow = core.getTime()
if tprev is not None and tnow - tprev < 0.5:
self.quit_requested = True
self.debounce_timestamp = tnow
def quit_if_requested(self):
"""Raises ExperimentAborted if request_quit has been called twice in 500ms"""
if self.quit_requested:
raise ExperimentAborted
return True
|
aepsych-main
|
examples/contrast_discrimination_psychopy/helpers.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# we have pretty verbose messaging by default, suppress that here
import logging
import warnings
warnings.filterwarnings("ignore")
logging.disable(logging.WARNING) # disable anything below warning
import os
import time
from copy import copy
from itertools import product
from aepsych.benchmark import (
Problem,
LSEProblem,
BenchmarkLogger,
PathosBenchmark,
combine_benchmarks,
)
from aepsych.benchmark.test_functions import (
make_songetal_testfun,
novel_detection_testfun,
novel_discrimination_testfun,
)
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["MKL_THREADING_LAYER"] = "GNU"
nproc = 94
n_reps = 100
sobol_trials = 5
total_trials = 150
global_seed = 3
log_every = 5
# test functions and boundaries
novel_names = ["novel_detection", "novel_discrimination"]
novel_testfuns = [novel_detection_testfun, novel_discrimination_testfun]
novel_bounds = [{"lb": [-1, -1], "ub": [1, 1]}, {"lb": [-1, -1], "ub": [1, 1]}]
song_phenotypes = ["Metabolic", "Sensory", "Metabolic+Sensory", "Older-normal"]
song_betavals = [0.2, 0.5, 1, 2, 5, 10]
song_testfuns = [
make_songetal_testfun(p, b) for p, b in product(song_phenotypes, song_betavals)
]
song_bounds = [{"lb": [-3, -20], "ub": [4, 120]}] * len(song_testfuns)
song_names = [f"song_p{p}_b{b}" for p, b in product(song_phenotypes, song_betavals)]
all_testfuns = song_testfuns + novel_testfuns
all_bounds = song_bounds + novel_bounds
all_names = song_names + novel_names
combo_logger = BenchmarkLogger(log_every=log_every)
# benchmark configs, have to subdivide into 5
# configs Sobol, MCLSETS, and Song vs ours get set up all differently
# Song benches
bench_config_nonsobol_song = {
"common": {"outcome_type": "single_probit", "target": 0.75},
"experiment": {
"acqf": [
"MCLevelSetEstimation",
"BernoulliMCMutualInformation",
"MCPosteriorVariance",
],
"modelbridge_cls": "SingleProbitModelbridgeWithSongHeuristic",
"init_strat_cls": "SobolStrategy",
"opt_strat_cls": "ModelWrapperStrategy",
"model": "GPClassificationModel",
"parnames": "[context,intensity]",
},
"MCLevelSetEstimation": {
"target": 0.75,
"beta": 3.84,
"objective": "ProbitObjective",
},
"GPClassificationModel": {
"inducing_size": 100,
"dim": 2,
"mean_covar_factory": [
"song_mean_covar_factory",
],
},
"SingleProbitModelbridgeWithSongHeuristic": {"restarts": 10, "samps": 1000},
"SobolStrategy": {
"n_trials": [sobol_trials],
},
"ModelWrapperStrategy": {
"n_trials": [total_trials - sobol_trials],
"refit_every": 1,
},
}
bench_config_sobol_song = {
"common": {"outcome_type": "single_probit", "target": 0.75},
"experiment": {
"acqf": "MCLevelSetEstimation",
"modelbridge_cls": "SingleProbitModelbridgeWithSongHeuristic",
"init_strat_cls": "SobolStrategy",
"opt_strat_cls": "ModelWrapperStrategy",
"model": "GPClassificationModel",
"parnames": "[context,intensity]",
},
"MCLevelSetEstimation": {
"target": 0.75,
"beta": 3.84,
"objective": "ProbitObjective",
},
"GPClassificationModel": {
"inducing_size": 100,
"dim": 2,
"mean_covar_factory": [
"song_mean_covar_factory",
],
},
"SingleProbitModelbridgeWithSongHeuristic": {"restarts": 10, "samps": 1000},
"SobolStrategy": {
"n_trials": list(range(sobol_trials, total_trials - 1, log_every)),
},
"ModelWrapperStrategy": {
"n_trials": [1],
"refit_every": 1,
},
}
# non-Song benches
bench_config_sobol_rbf = {
"common": {"outcome_type": "single_probit", "target": 0.75},
"experiment": {
"acqf": "MonotonicMCLSE",
"modelbridge_cls": "MonotonicSingleProbitModelbridge",
"init_strat_cls": "SobolStrategy",
"opt_strat_cls": "ModelWrapperStrategy",
"model": "MonotonicGPLSETS",
"parnames": "[context,intensity]",
},
"MonotonicMCLSE": {
"target": 0.75,
"beta": 3.84,
},
"MonotonicGPLSETS": {
"inducing_size": 100,
"mean_covar_factory": [
"monotonic_mean_covar_factory",
],
"monotonic_idxs": ["[1]", "[]"],
"uniform_idxs": "[]",
},
"MonotonicSingleProbitModelbridge": {"restarts": 10, "samps": 1000},
"SobolStrategy": {
"n_trials": list(range(sobol_trials, total_trials - 1, log_every)),
},
"ModelWrapperStrategy": {
"n_trials": [1],
"refit_every": 1,
},
}
bench_config_all_but_gplsets_rbf = {
"common": {"outcome_type": "single_probit", "target": 0.75},
"experiment": {
"acqf": [
"MonotonicMCLSE",
"MonotonicBernoulliMCMutualInformation",
"MonotonicMCPosteriorVariance",
],
"modelbridge_cls": "MonotonicSingleProbitModelbridge",
"init_strat_cls": "SobolStrategy",
"opt_strat_cls": "ModelWrapperStrategy",
"model": "MonotonicRejectionGP",
"parnames": "[context,intensity]",
},
"MonotonicMCLSE": {
"target": 0.75,
"beta": 3.84,
},
"MonotonicBernoulliMCMutualInformation": {},
"MonotonicMCPosteriorVariance": {},
"MonotonicRejectionGP": {
"inducing_size": 100,
"mean_covar_factory": [
"monotonic_mean_covar_factory",
],
"monotonic_idxs": ["[1]", "[]"],
"uniform_idxs": "[]",
},
"MonotonicSingleProbitModelbridge": {"restarts": 10, "samps": 1000},
"SobolStrategy": {
"n_trials": [sobol_trials],
},
"ModelWrapperStrategy": {
"n_trials": [total_trials - sobol_trials],
"refit_every": 1,
},
}
bench_config_gplsets_rbf = {
"common": {"outcome_type": "single_probit", "target": 0.75},
"experiment": {
"acqf": "MonotonicMCLSE",
"modelbridge_cls": "MonotonicSingleProbitModelbridge",
"init_strat_cls": "SobolStrategy",
"opt_strat_cls": "ModelWrapperStrategy",
"model": "MonotonicGPLSETS",
"parnames": "[context,intensity]",
},
"MonotonicMCLSE": {
"target": 0.75,
"beta": 3.84,
},
"MonotonicGPLSETS": {
"inducing_size": 100,
"mean_covar_factory": [
"monotonic_mean_covar_factory",
],
"monotonic_idxs": ["[1]", "[]"],
"uniform_idxs": "[]",
},
"MonotonicSingleProbitModelbridge": {"restarts": 10, "samps": 1000},
"SobolStrategy": {
"n_trials": [sobol_trials],
},
"ModelWrapperStrategy": {
"n_trials": [total_trials - sobol_trials],
"refit_every": 1,
},
}
all_bench_configs = [
bench_config_sobol_song,
bench_config_nonsobol_song,
bench_config_sobol_rbf,
bench_config_all_but_gplsets_rbf,
bench_config_gplsets_rbf,
]
def make_problemobj(testfun, lb, ub):
# This constructs a Problem from a
# test function and bounds
class Inner(LSEProblem, Problem):
def f(self, x):
return testfun(x)
obj = Inner(lb=lb, ub=ub)
return obj
def make_bench(testfun, logger, name, configs, lb, ub):
# make a bench object from test function config
# and bench config
benches = []
problem = make_problemobj(testfun, lb, ub)
for config in configs:
full_config = copy(config)
full_config["common"]["lb"] = str(lb)
full_config["common"]["ub"] = str(ub)
full_config["common"]["name"] = name
benches.append(
PathosBenchmark(
nproc=nproc,
problem=problem,
logger=logger,
configs=full_config,
global_seed=global_seed,
n_reps=n_reps,
)
)
return combine_benchmarks(*benches)
def aggregate_bench_results(all_benchmarks):
combo_logger = BenchmarkLogger(log_every=log_every)
for bench in all_benchmarks:
combo_logger._log.extend(bench.logger._log)
out_pd = combo_logger.pandas()
return out_pd
if __name__ == "__main__":
# one benchmark per test function
print("Creating benchmark objects...")
all_benchmarks = [
make_bench(testfun, combo_logger, name, all_bench_configs, **bounds)
for (testfun, bounds, name) in zip(all_testfuns, all_bounds, all_names)
]
# start all the benchmarks
print("Starting benchmarks...")
for bench in all_benchmarks:
bench_name = bench.combinations[0]["common"]["name"]
print(f"starting {bench_name}...")
bench.start_benchmarks()
done = False
# checkpoint every minute in case something breaks
while not done:
time.sleep(60)
print("Checkpointing benches...")
done = True
for bench in all_benchmarks:
bench_name = bench.combinations[0]["common"]["name"]
bench.collate_benchmarks(wait=False)
if bench.is_done:
print(f"bench {bench_name} is done!")
else:
done = False
temp_results = aggregate_bench_results(all_benchmarks)
temp_results.to_csv(f"bench_checkpoint_seed{global_seed}.csv")
print("Done with all benchmarks, saving!")
final_results = aggregate_bench_results(all_benchmarks)
final_results.to_csv(f"bench_final_seed{global_seed}.csv")
|
aepsych-main
|
pubs/owenetal/code/benchmark_threshold.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from copy import copy
import matplotlib.pyplot as plt
import numpy as np
import torch
from aepsych.benchmark import (
Problem,
LSEProblem,
BenchmarkLogger,
Benchmark,
combine_benchmarks,
)
from aepsych.benchmark.test_functions import (
make_songetal_testfun,
novel_detection_testfun,
novel_discrimination_testfun,
)
from aepsych.config import Config
from aepsych.plotting import plot_strat
from aepsych.strategy import SequentialStrategy
from scipy.stats import norm
global_seed = 3
refit_every = 1
figdir = "./figs/"
def plot_audiometric_lse_grids(
sobol_trials, opt_trials, phenotype="Metabolic+Sensory", beta=2
):
"""
Generates Fig. 8
"""
logger = BenchmarkLogger(log_every=5)
bench_rbf = {
"common": {"pairwise": False, "target": 0.75},
"experiment": {
"acqf": "MonotonicMCLSE",
"modelbridge_cls": "MonotonicSingleProbitModelbridge",
"init_strat_cls": "SobolStrategy",
"opt_strat_cls": "ModelWrapperStrategy",
"model": "MonotonicRejectionGP",
"parnames": "[context,intensity]",
},
"MonotonicMCLSE": {
"target": 0.75,
"beta": 3.84,
},
"MonotonicRejectionGP": {
"inducing_size": 100,
"mean_covar_factory": [
"monotonic_mean_covar_factory",
],
"monotonic_idxs": ["[1]", "[]"],
"uniform_idxs": "[]",
},
"MonotonicSingleProbitModelbridge": {"restarts": 10, "samps": 1000},
"SobolStrategy": {
"n_trials": [sobol_trials],
},
"ModelWrapperStrategy": {
"n_trials": [opt_trials],
"refit_every": [refit_every],
},
}
bench_song = {
"common": {"pairwise": False, "target": 0.75},
"experiment": {
"acqf": "BernoulliMCMutualInformation",
"modelbridge_cls": "SingleProbitModelbridgeWithSongHeuristic",
"init_strat_cls": "SobolStrategy",
"opt_strat_cls": "ModelWrapperStrategy",
"model": "GPClassificationModel",
"parnames": "[context,intensity]",
},
"GPClassificationModel": {
"inducing_size": 100,
"dim": 2,
"mean_covar_factory": [
"song_mean_covar_factory",
],
},
"SingleProbitModelbridgeWithSongHeuristic": {"restarts": 10, "samps": 1000},
"SobolStrategy": {
"n_trials": [sobol_trials],
},
"ModelWrapperStrategy": {
"n_trials": [opt_trials],
"refit_every": [refit_every],
},
}
all_bench_configs = [bench_rbf, bench_song]
testfun = make_songetal_testfun(phenotype=phenotype, beta=beta)
class AudiometricProblem(LSEProblem, Problem):
def f(self, x):
return testfun(x)
lb = [-3, -20]
ub = [4, 120]
benches = []
problem = AudiometricProblem(lb, ub)
for config in all_bench_configs:
full_config = copy(config)
full_config["common"]["lb"] = str(lb)
full_config["common"]["ub"] = str(ub)
benches.append(
Benchmark(
problem=problem,
logger=logger,
configs=full_config,
global_seed=global_seed,
n_reps=1,
)
)
combo_bench = combine_benchmarks(*benches)
strats = []
for config in combo_bench.combinations:
strat = combo_bench.run_experiment(config, logger, seed=global_seed, rep=0)
strats.append(strat)
titles = [
"Monotonic RBF Model, LSE (ours)",
"Nonmonotonic RBF Model, LSE (ours)",
"Linear-Additive Model, BALD",
]
fig, axes = plt.subplots(2, 2, figsize=(7.5, 6.5))
plotting_axes = [axes[1, 0], axes[0, 1], axes[0, 0]]
fig.delaxes(axes[1, 1])
_ = [
plot_strat(
strat=strat_,
title=title_,
ax=ax_,
true_testfun=testfun,
xlabel="Frequency (kHz)",
ylabel="Intensity (dB HL)",
flipx=True,
logx=True,
show=False,
include_legend=False,
include_colorbar=False
)
for ax_, strat_, title_ in zip(plotting_axes, strats, titles)
]
fig.tight_layout()
handles, labels = axes[1, 0].get_legend_handles_labels()
fig.legend(handles, labels, loc="lower right", bbox_to_anchor=(0.8, 0.2))
cbr = fig.colorbar(axes[1, 0].images[0], ax=plotting_axes)
cbr.set_label("Probability of Detection")
return fig
def plot_novel_lse_grids(sobol_trials, opt_trials, funtype="detection"):
"""
Generates Fig. TBA
"""
logger = BenchmarkLogger(log_every=opt_trials) # we only care about final perf
bench_rbf = {
"common": {"pairwise": False, "target": 0.75},
"experiment": {
"acqf": "MonotonicMCLSE",
"modelbridge_cls": "MonotonicSingleProbitModelbridge",
"init_strat_cls": "SobolStrategy",
"opt_strat_cls": "ModelWrapperStrategy",
"model": "MonotonicRejectionGP",
"parnames": "[context,intensity]",
},
"MonotonicMCLSE": {
"target": 0.75,
"beta": 3.84,
},
"MonotonicRejectionGP": {
"inducing_size": 100,
"mean_covar_factory": [
"monotonic_mean_covar_factory",
],
"monotonic_idxs": ["[1]", "[]"],
"uniform_idxs": "[]",
},
"MonotonicSingleProbitModelbridge": {"restarts": 10, "samps": 1000},
"SobolStrategy": {
"n_trials": [sobol_trials],
},
"ModelWrapperStrategy": {
"n_trials": [opt_trials],
"refit_every": [refit_every],
},
}
bench_song = {
"common": {"pairwise": False, "target": 0.75},
"experiment": {
"acqf": "BernoulliMCMutualInformation",
"modelbridge_cls": "SingleProbitModelbridgeWithSongHeuristic",
"init_strat_cls": "SobolStrategy",
"opt_strat_cls": "ModelWrapperStrategy",
"model": "GPClassificationModel",
"parnames": "[context,intensity]",
},
"GPClassificationModel": {
"inducing_size": 100,
"dim": 2,
"mean_covar_factory": [
"song_mean_covar_factory",
],
},
"SingleProbitModelbridgeWithSongHeuristic": {"restarts": 10, "samps": 1000},
"SobolStrategy": {
"n_trials": [sobol_trials],
},
"ModelWrapperStrategy": {
"n_trials": [opt_trials],
"refit_every": [refit_every],
},
}
all_bench_configs = [bench_rbf, bench_song]
if funtype == "detection":
testfun = novel_detection_testfun
yes_label = "Detected trial"
no_label = "Nondetected trial"
elif funtype == "discrimination":
testfun = novel_discrimination_testfun
yes_label = "Correct trial"
no_label = "Incorrect trial"
else:
raise RuntimeError("unknown testfun")
class NovelProblem(LSEProblem, Problem):
def f(self, x):
return testfun(x)
lb = [-1, -1]
ub = [1, 1]
benches = []
problem = NovelProblem(lb, ub, gridsize=50)
for config in all_bench_configs:
full_config = copy(config)
full_config["common"]["lb"] = str(lb)
full_config["common"]["ub"] = str(ub)
benches.append(
Benchmark(
problem=problem,
logger=logger,
configs=full_config,
global_seed=global_seed,
n_reps=1,
)
)
combo_bench = combine_benchmarks(*benches)
strats = []
for config in combo_bench.combinations:
strat = combo_bench.run_experiment(config, logger, seed=global_seed, rep=0)
strats.append(strat)
titles = [
"Monotonic RBF Model, LSE (ours)",
"Nonmonotonic RBF Model, LSE (ours)",
"Linear-Additive Model, BALD",
]
fig, axes = plt.subplots(2, 2, figsize=(7.5, 6.5))
plotting_axes = [axes[1, 0], axes[0, 1], axes[0, 0]]
fig.delaxes(axes[1, 1])
_ = [
plot_strat(
strat=strat_,
title=title_,
ax=ax_,
true_testfun=testfun,
yes_label=yes_label,
no_label=no_label,
show=False,
include_legend=False,
include_colorbar=False
)
for ax_, strat_, title_ in zip(plotting_axes, strats, titles)
]
fig.tight_layout()
handles, labels = axes[1, 0].get_legend_handles_labels()
fig.legend(handles, labels, loc="lower right", bbox_to_anchor=(0.8, 0.2))
cbr = fig.colorbar(axes[1, 0].images[0], ax=plotting_axes)
cbr.set_label("Probability of Detection")
return fig
def plot_acquisition_examples(sobol_trials, opt_trials, target_level=0.75):
### Same model, different acqf figure ####
configs = {
"common": {
"pairwise": False,
"target": target_level,
"lb": "[-3]",
"ub": "[3]",
},
"experiment": {
"acqf": [
"MonotonicMCPosteriorVariance",
"MonotonicBernoulliMCMutualInformation",
"MonotonicMCLSE",
],
"modelbridge_cls": "MonotonicSingleProbitModelbridge",
"init_strat_cls": "SobolStrategy",
"opt_strat_cls": "ModelWrapperStrategy",
"model": "MonotonicRejectionGP",
"parnames": "[intensity]",
},
"MonotonicMCLSE": {
"target": target_level,
"beta": 3.84,
},
"MonotonicRejectionGP": {
"inducing_size": 100,
"mean_covar_factory": "monotonic_mean_covar_factory",
"monotonic_idxs": "[0]",
"uniform_idxs": "[]",
},
"MonotonicSingleProbitModelbridge": {"restarts": 10, "samps": 1000},
"SobolStrategy": {"n_trials": sobol_trials},
"ModelWrapperStrategy": {
"n_trials": opt_trials,
"refit_every": refit_every,
},
}
def true_testfun(x):
return norm.cdf(3 * x)
class SimpleLinearProblem(Problem):
def f(self, x):
return norm.ppf(true_testfun(x))
lb = [-3]
ub = [3]
logger = BenchmarkLogger()
problem = SimpleLinearProblem(lb, ub)
bench = Benchmark(
problem=problem,
logger=logger,
configs=configs,
global_seed=global_seed,
n_reps=1,
)
# sobol_trials
# now run each for just init trials, taking care to reseed each time
strats = []
for c in bench.combinations:
np.random.seed(global_seed)
torch.manual_seed(global_seed)
s = SequentialStrategy.from_config(Config(config_dict=c))
for _ in range(sobol_trials):
next_x = s.gen()
s.add_data(next_x, [problem.sample_y(next_x)])
strats.append(s)
# get first gen from all 3
first_gens = [s.gen() for s in strats]
fig, ax = plt.subplots(2, 2)
plot_strat(
strat=strats[0],
title=f"First active trial\n (after {sobol_trials} Sobol trials)",
ax=ax[0, 0],
true_testfun=true_testfun,
target_level=target_level,
show=False,
include_legend=False
)
samps = [
norm.cdf(s.sample(torch.Tensor(g), num_samples=10000))
for s, g in zip(strats, first_gens)
]
predictions = [np.mean(s) for s in samps]
names = ["First BALV sample", "First BALD sample", "First LSE sample"]
markers = ["s", "*", "^"]
for i in range(3):
ax[0, 0].scatter(
first_gens[i][0][0],
predictions[i],
label=names[i],
marker=markers[i],
color="black",
)
# now run them all for the full duration
for s in strats:
for _tr in range(opt_trials):
next_x = s.gen()
s.add_data(next_x, [problem.sample_y(next_x)])
plotting_axes = [ax[0, 1], ax[1, 0], ax[1, 1]]
titles = [
f"Monotonic RBF Model,\n BALV, after {sobol_trials+opt_trials} total trials",
f"Monotonic RBF Model,\n BALD, after {sobol_trials+opt_trials} total trials",
f"Monotonic RBF Model,\n LSE (ours) after {sobol_trials+opt_trials} total trials",
]
_ = [
plot_strat(
strat=s, title=t, ax=a, true_testfun=true_testfun, target_level=target_level, show=False, include_legend=False
)
for a, s, t in zip(plotting_axes, strats, titles)
]
fig.tight_layout()
handles, labels = ax[0, 0].get_legend_handles_labels()
lgd = fig.legend(handles, labels, loc="lower right", bbox_to_anchor=(1.5, 0.25))
# return legend so savefig works correctly
return fig, lgd
if __name__ == "__main__":
audio_lse_grids_fig = plot_audiometric_lse_grids(sobol_trials=5, opt_trials=45)
audio_lse_grids_fig.savefig(fname=figdir + "audio_lse_grids_fig.pdf", dpi=200)
novel_detection_lse_grids_fig = plot_novel_lse_grids(
sobol_trials=5, opt_trials=45, funtype="detection"
)
novel_detection_lse_grids_fig.savefig(
fname=figdir + "detection_lse_grids_fig.pdf", dpi=200
)
# this is extra hard, run more trials
novel_discrimination_lse_grids_fig = plot_novel_lse_grids(
sobol_trials=5, opt_trials=95, funtype="discrimination"
)
novel_discrimination_lse_grids_fig.savefig(
fname=figdir + "discrimination_lse_grids_fig.pdf", dpi=200
)
same_model_different_acq_fig, lgd = plot_acquisition_examples(
sobol_trials=5, opt_trials=15
)
same_model_different_acq_fig.savefig(
fname=figdir + "same_model_different_acq.pdf",
bbox_extra_artists=(lgd,),
bbox_inches="tight",
dpi=200,
)
|
aepsych-main
|
pubs/owenetal/code/stratplots.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import gpytorch
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import torch
from botorch.utils.sampling import draw_sobol_samples
from scipy.stats import norm
sns.set_theme()
from aepsych.config import Config
from aepsych.factory import (
default_mean_covar_factory,
song_mean_covar_factory,
monotonic_mean_covar_factory,
)
from aepsych.models import GPClassificationModel, MonotonicRejectionGP
from aepsych.models.monotonic_rejection_gp import MixedDerivativeVariationalGP
from aepsych.utils import _dim_grid
global_seed = 3
def plot_prior_samps_1d():
config = Config(
config_dict={
"common": {
"outcome_type": "single_probit",
"target": 0.75,
"lb": "[-3]",
"ub": "[3]",
},
"default_mean_covar_factory": {},
"song_mean_covar_factory": {},
"monotonic_mean_covar_factory": {"monotonic_idxs": "[0]"},
}
)
lb = torch.Tensor([-3])
ub = torch.Tensor([3])
nsamps = 10
gridsize = 50
grid = _dim_grid(lower=lb, upper=ub, dim=1, gridsize=gridsize)
np.random.seed(global_seed)
torch.random.manual_seed(global_seed)
with gpytorch.settings.prior_mode(True):
rbf_mean, rbf_covar = default_mean_covar_factory(config)
rbf_model = GPClassificationModel(
inducing_min=lb,
inducing_max=ub,
inducing_size=100,
mean_module=rbf_mean,
covar_module=rbf_covar,
)
# add just two samples at high and low
rbf_model.set_train_data(
torch.Tensor([-3, 3])[:, None], torch.LongTensor([0, 1])
)
rbf_samps = rbf_model(grid).sample(torch.Size([nsamps]))
song_mean, song_covar = song_mean_covar_factory(config)
song_model = GPClassificationModel(
inducing_min=lb,
inducing_max=ub,
inducing_size=100,
mean_module=song_mean,
covar_module=song_covar,
)
song_model.set_train_data(
torch.Tensor([-3, 3])[:, None], torch.LongTensor([0, 1])
)
song_samps = song_model(grid).sample(torch.Size([nsamps]))
mono_mean, mono_covar = monotonic_mean_covar_factory(config)
mono_model = MonotonicRejectionGP(
likelihood="probit-bernoulli",
monotonic_idxs=[0],
mean_module=mono_mean,
covar_module=mono_covar,
)
bounds_ = torch.tensor([-3.0, 3.0])[:, None]
# Select inducing points
mono_model.inducing_points = draw_sobol_samples(
bounds=bounds_, n=mono_model.num_induc, q=1
).squeeze(1)
inducing_points_aug = mono_model._augment_with_deriv_index(
mono_model.inducing_points, 0
)
scales = ub - lb
dummy_train_x = mono_model._augment_with_deriv_index(
torch.Tensor([-3, 3])[:, None], 0
)
mono_model.model = MixedDerivativeVariationalGP(
train_x=dummy_train_x,
train_y=torch.LongTensor([0, 1]),
inducing_points=inducing_points_aug,
scales=scales,
fixed_prior_mean=torch.Tensor([0.75]),
covar_module=mono_covar,
mean_module=mono_mean,
)
mono_samps = mono_model.sample(grid, nsamps)
fig, ax = plt.subplots(1, 3, figsize=(7.5, 3))
fig.tight_layout(rect=[0.01, 0.03, 1, 0.9])
fig.suptitle("GP prior samples (probit-transformed)")
ax[0].plot(grid.squeeze(), norm.cdf(song_samps.T), "b")
ax[0].set_ylabel("Response Probability")
ax[0].set_title("Linear kernel")
ax[1].plot(grid.squeeze(), norm.cdf(rbf_samps.T), "b")
ax[1].set_xlabel("Intensity")
ax[1].set_title("RBF kernel (nonmonotonic)")
ax[2].plot(grid.squeeze(), norm.cdf(mono_samps.T), "b")
ax[2].set_title("RBF kernel (monotonic)")
return fig
def plot_prior_samps_2d():
config = Config(
config_dict={
"common": {
"outcome_type": "single_probit",
"target": 0.75,
"lb": "[-3, -3]",
"ub": "[3, 3]",
},
"default_mean_covar_factory": {},
"song_mean_covar_factory": {},
"monotonic_mean_covar_factory": {"monotonic_idxs": "[1]"},
}
)
lb = torch.Tensor([-3, -3])
ub = torch.Tensor([3, 3])
nsamps = 5
gridsize = 30
grid = _dim_grid(lower=lb, upper=ub, dim=2, gridsize=gridsize)
np.random.seed(global_seed)
torch.random.manual_seed(global_seed)
with gpytorch.settings.prior_mode(True):
rbf_mean, rbf_covar = default_mean_covar_factory(config)
rbf_model = GPClassificationModel(
inducing_min=lb,
inducing_max=ub,
inducing_size=100,
mean_module=rbf_mean,
covar_module=rbf_covar,
)
# add just two samples at high and low
rbf_model.set_train_data(torch.Tensor([-3, -3])[:, None], torch.LongTensor([0]))
rbf_samps = rbf_model(grid).sample(torch.Size([nsamps]))
song_mean, song_covar = song_mean_covar_factory(config)
song_model = GPClassificationModel(
inducing_min=lb,
inducing_max=ub,
inducing_size=100,
mean_module=song_mean,
covar_module=song_covar,
)
song_model.set_train_data(
torch.Tensor([-3, -3])[:, None], torch.LongTensor([0])
)
song_samps = song_model(grid).sample(torch.Size([nsamps]))
mono_mean, mono_covar = monotonic_mean_covar_factory(config)
mono_model = MonotonicRejectionGP(
likelihood="probit-bernoulli",
monotonic_idxs=[1],
mean_module=mono_mean,
covar_module=mono_covar,
num_induc=1000,
)
bounds_ = torch.tensor([-3.0, -3.0, 3.0, 3.0]).reshape(2, -1)
# Select inducing points
mono_model.inducing_points = draw_sobol_samples(
bounds=bounds_, n=mono_model.num_induc, q=1
).squeeze(1)
inducing_points_aug = mono_model._augment_with_deriv_index(
mono_model.inducing_points, 0
)
scales = ub - lb
dummy_train_x = mono_model._augment_with_deriv_index(
torch.Tensor([-3, 3])[None, :], 0
)
mono_model.model = MixedDerivativeVariationalGP(
train_x=dummy_train_x,
train_y=torch.LongTensor([0]),
inducing_points=inducing_points_aug,
scales=scales,
fixed_prior_mean=torch.Tensor([0.75]),
covar_module=mono_covar,
mean_module=mono_mean,
)
mono_samps = mono_model.sample(grid, nsamps)
intensity_grid = np.linspace(-3, 3, gridsize)
fig, ax = plt.subplots(1, 3, figsize=(7.5, 3))
fig.tight_layout(rect=[0, 0.03, 1, 0.9])
fig.suptitle("Prior samples")
square_samps = np.array([s.reshape((gridsize,) * 2).numpy() for s in song_samps])
plotsamps = norm.cdf(square_samps[:, ::5, :]).T.reshape(gridsize, -1)
ax[0].plot(intensity_grid, plotsamps, "b")
ax[0].set_title("Linear kernel model")
square_samps = np.array([s.reshape((gridsize,) * 2).numpy() for s in rbf_samps])
plotsamps = norm.cdf(square_samps[:, ::5, :]).T.reshape(gridsize, -1)
ax[1].plot(intensity_grid, plotsamps, "b")
ax[1].set_title("Nonmonotonic RBF kernel model")
square_samps = np.array([s.reshape((gridsize,) * 2).numpy() for s in mono_samps])
plotsamps = norm.cdf(square_samps[:, ::5, :]).T.reshape(gridsize, -1)
ax[2].plot(intensity_grid, plotsamps, "b")
ax[2].set_title("Monotonic RBF kernel model")
return fig
if __name__ == "__main__":
prior_samps_1d = plot_prior_samps_1d()
prior_samps_1d.savefig("./figs/prior_samps.pdf", dpi=200)
|
aepsych-main
|
pubs/owenetal/code/prior_plots.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the scripts directory.
from __future__ import annotations
import argparse
import json
import os
import nbformat
from bs4 import BeautifulSoup
from nbconvert import HTMLExporter, PythonExporter
TEMPLATE = """const CWD = process.cwd();
const React = require('react');
const Tutorial = require(`${{CWD}}/core/Tutorial.js`);
class TutorialPage extends React.Component {{
render() {{
const {{config: siteConfig}} = this.props;
const {{baseUrl}} = siteConfig;
return <Tutorial baseUrl={{baseUrl}} tutorialID="{}"/>;
}}
}}
module.exports = TutorialPage;
"""
JS_SCRIPTS = """
<script src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.1.10/require.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.0.3/jquery.min.js"></script>
""" # noqa: E501
def validate_tutorial_links(repo_dir: str) -> None:
"""Checks that all .ipynb files that present are linked on the website, and vice
versa, that any linked tutorial has an associated .ipynb file present.
"""
with open(os.path.join(repo_dir, "website", "tutorials.json"), "r") as infile:
tutorial_config = json.load(infile)
tutorial_ids = {x["id"] for v in tutorial_config.values() for x in v}
tutorials_nbs = {
fn.replace(".ipynb", "")
for fn in os.listdir(os.path.join(repo_dir, "tutorials"))
if fn[-6:] == ".ipynb"
}
missing_files = tutorial_ids - tutorials_nbs
missing_ids = tutorials_nbs - tutorial_ids
if missing_files:
raise RuntimeError(
"The following tutorials are linked on the website, but missing an "
f"associated .ipynb file: {missing_files}."
)
if missing_ids:
print( '\033[93m' + 'Warning: ' + '\x1b[0m' + "The following tutorial files are present, but are not linked on the "
"website: {}.".format(", ".join([nbid + ".ipynb" for nbid in missing_ids])))
# raise RuntimeError(
# "The following tutorial files are present, but are not linked on the "
# "website: {}.".format(", ".join([nbid + ".ipynb" for nbid in missing_ids]))
# )
def gen_tutorials(repo_dir: str) -> None:
"""Generate HTML tutorials for AEPsych Docusaurus site from Jupyter notebooks.
Also create ipynb and py versions of tutorial in Docusaurus site for
download.
"""
with open(os.path.join(repo_dir, "website", "tutorials.json"), "r") as infile:
tutorial_config = json.load(infile)
# create output directories if necessary
html_out_dir = os.path.join(repo_dir, "website", "_tutorials")
files_out_dir = os.path.join(repo_dir, "website", "static", "files")
for d in (html_out_dir, files_out_dir):
if not os.path.exists(d):
os.makedirs(d)
tutorial_ids = {x["id"] for v in tutorial_config.values() for x in v}
for tid in tutorial_ids:
print(f"Generating {tid} tutorial")
# convert notebook to HTML
ipynb_in_path = os.path.join(repo_dir, "tutorials", f"{tid}.ipynb")
with open(ipynb_in_path, "r") as infile:
nb_str = infile.read()
nb = nbformat.reads(nb_str, nbformat.NO_CONVERT)
# displayname is absent from notebook metadata
nb["metadata"]["kernelspec"]["display_name"] = "python3"
exporter = HTMLExporter(template_name="classic")
html, meta = exporter.from_notebook_node(nb)
# pull out html div for notebook
soup = BeautifulSoup(html, "html.parser")
nb_meat = soup.find("div", {"id": "notebook-container"})
del nb_meat.attrs["id"]
nb_meat.attrs["class"] = ["notebook"]
html_out = JS_SCRIPTS + str(nb_meat)
# generate html file
html_out_path = os.path.join(
html_out_dir,
f"{tid}.html",
)
with open(html_out_path, "w") as html_outfile:
html_outfile.write(html_out)
# generate JS file
script = TEMPLATE.format(tid)
js_out_path = os.path.join(
repo_dir, "website", "pages", "tutorials", f"{tid}.js"
)
with open(js_out_path, "w") as js_outfile:
js_outfile.write(script)
# output tutorial in both ipynb & py form
ipynb_out_path = os.path.join(files_out_dir, f"{tid}.ipynb")
with open(ipynb_out_path, "w") as ipynb_outfile:
ipynb_outfile.write(nb_str)
exporter = PythonExporter()
script, meta = exporter.from_notebook_node(nb)
# make sure to use python3 shebang
script = script.replace("#!/usr/bin/env python", "#!/usr/bin/env python3")
py_out_path = os.path.join(repo_dir, "website", "static", "files", f"{tid}.py")
with open(py_out_path, "w") as py_outfile:
py_outfile.write(script)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate JS, HTML, ipynb, and py files for tutorials."
)
parser.add_argument(
"-w",
"--repo_dir",
metavar="path",
required=True,
help="aepsych repo directory.",
)
args = parser.parse_args()
validate_tutorial_links(args.repo_dir)
gen_tutorials(args.repo_dir)
|
aepsych-main
|
scripts/parse_tutorials.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the scripts directory.
from __future__ import annotations
import argparse
import os
from bs4 import BeautifulSoup
#The base_url must match the base url in the /website/siteConfig.js
# Note if it is not updated API doc searchbar will not be displayed
# 1) update base_url below
base_url = "/"
js_scripts = """
<script type="text/javascript" id="documentation_options" data-url_root="./" src="{0}js/documentation_options.js"></script>
<script type="text/javascript" src="{0}js/jquery.js"></script>
<script type="text/javascript" src="{0}js/underscore.js"></script>
<script type="text/javascript" src="{0}js/doctools.js"></script>
<script type="text/javascript" src="{0}js/language_data.js"></script>
<script type="text/javascript" src="{0}js/searchtools.js"></script>
""".format(base_url) # noqa: E501
# 2) update
# Search.loadIndex("/<<update to match baseUrl>>/js/searchindex.js"
search_js_scripts = """
<script type="text/javascript">
jQuery(function() { Search.loadIndex("/js/searchindex.js"); });
</script>
<script type="text/javascript" id="searchindexloader"></script>
"""
def parse_sphinx(input_dir, output_dir):
for cur, _, files in os.walk(input_dir):
for fname in files:
if fname.endswith(".html"):
with open(os.path.join(cur, fname), "r") as f:
soup = BeautifulSoup(f.read(), "html.parser")
doc = soup.find("div", {"class": "document"})
wrapped_doc = doc.wrap(soup.new_tag("div", **{"class": "sphinx"}))
# add js
if fname == "search.html":
out = js_scripts + search_js_scripts + str(wrapped_doc)
else:
out = js_scripts + str(wrapped_doc)
output_path = os.path.join(output_dir, os.path.relpath(cur, input_dir))
os.makedirs(output_path, exist_ok=True)
with open(os.path.join(output_path, fname), "w") as fout:
fout.write(out)
# update reference in JS file
with open(os.path.join(input_dir, "_static/searchtools.js"), "r") as js_file:
js = js_file.read()
js = js.replace(
"DOCUMENTATION_OPTIONS.URL_ROOT + '_sources/'", "'_sphinx-sources/'"
)
with open(os.path.join(input_dir, "_static/searchtools.js"), "w") as js_file:
js_file.write(js)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Strip HTML body from Sphinx docs.")
parser.add_argument(
"-i",
"--input_dir",
metavar="path",
required=True,
help="Input directory for Sphinx HTML.",
)
parser.add_argument(
"-o",
"--output_dir",
metavar="path",
required=True,
help="Output directory in Docusaurus.",
)
args = parser.parse_args()
parse_sphinx(args.input_dir, args.output_dir)
|
aepsych-main
|
scripts/parse_sphinx.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
from __future__ import annotations
import argparse
import json
import os
import shutil
import nbformat
from bs4 import BeautifulSoup
from nbconvert import HTMLExporter
TEMPLATE = """const CWD = process.cwd();
const React = require('react');
const Demo = require(`${{CWD}}/core/Demo.js`);
class DemoPage extends React.Component {{
render() {{
const {{config: siteConfig}} = this.props;
const {{baseUrl}} = siteConfig;
return <Demo baseUrl={{baseUrl}} demoID="{}" hasWinDemo="{}"
hasMacDemo="{}"/>;
}}
}}
module.exports = DemoPage;
"""
JS_SCRIPTS = """
<script src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.1.10/require.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.0.3/jquery.min.js"></script>
"""
def validate_demo_links(repo_dir: str) -> None:
"""Checks that all .zip files that present are linked on the website, and vice
versa, that any linked demos has an associated .zip file present.
"""
with open(os.path.join(repo_dir, "website", "demos.json")) as f:
demo_config = json.load(f)
demo_ids = {x["id"] for v in demo_config.values() for x in v}
demo_names = {
fn.replace(".zip", "")
for fn in os.listdir(os.path.join(repo_dir, "demos"))
if fn[-4:] == ".zip"
}
# Check if the ID is present in the set and if both "_Mac" and "_Win" endings exist
for id in demo_ids:
if f"{id}_Mac" in demo_names and f"{id}_Win" in demo_names:
print(f"Both '{id}_Mac' and {id}_Win' demos .zip files are present.")
elif f"{id}_Mac" in demo_names:
print(f"Only '{id}_Mac'.zip demo is present.")
else:
print(f"Only '{id}_Win'.zip demo is present.")
def gen_demos(repo_dir: str) -> None:
"""Generate HTML demos for AEPsych Docusaurus site for download."""
with open(os.path.join(repo_dir, "website", "demos.json"), "r") as f:
demo_config = json.load(f)
# create output directories if necessary
html_out_dir = os.path.join(repo_dir, "website", "_demos")
files_out_dir = os.path.join(repo_dir, "website", "static", "files", "demos")
for d in (html_out_dir, files_out_dir):
if not os.path.exists(d):
os.makedirs(d)
demo_ids = {x["id"] for v in demo_config.values() for x in v}
for d_id in demo_ids:
print(f"Generating {d_id} demo")
# convert markdown to HTML
md_in_path = os.path.join(repo_dir, "demos", "markdown", f"{d_id}.md")
with open(md_in_path, "r") as infile:
markdown_content = infile.read()
notebook_node = nbformat.v4.new_notebook()
markdown_cell = nbformat.v4.new_markdown_cell(markdown_content)
notebook_node["cells"] = [markdown_cell]
exporter = HTMLExporter(template_name="classic")
html, meta = exporter.from_notebook_node(notebook_node)
# pull out html div for notebook
soup = BeautifulSoup(html, "html.parser")
nb_meat = soup.find("div", {"id": "notebook-container"})
del nb_meat.attrs["id"]
nb_meat.attrs["class"] = ["notebook"]
html_out = JS_SCRIPTS + str(nb_meat)
# generate html file
html_out_path = os.path.join(
html_out_dir,
f"{d_id}.html",
)
with open(html_out_path, "w") as html_outfile:
html_outfile.write(html_out)
# generate JS file
has_mac_demo = os.path.exists(os.path.join(repo_dir, "demos", f"{d_id}_Mac.zip"))
has_win_demo = os.path.exists(os.path.join(repo_dir, "demos", f"{d_id}_Win.zip"))
script = TEMPLATE.format(d_id,has_win_demo,has_mac_demo)
js_out_path = os.path.join(repo_dir, "website", "pages", "demos", f"{d_id}.js")
with open(js_out_path, "w") as js_outfile:
js_outfile.write(script)
# output demo in zip format
if has_mac_demo:
mac_source_path = os.path.join(repo_dir, "demos", f"{d_id}_Mac.zip")
mac_zip_out_path = os.path.join(files_out_dir, f"{d_id}_Mac.zip")
shutil.copy(mac_source_path, mac_zip_out_path)
if has_win_demo:
win_source_path = os.path.join(repo_dir, "demos", f"{d_id}_Win.zip")
win_zip_out_path = os.path.join(files_out_dir, f"{d_id}_Win.zip")
shutil.copy(win_source_path, win_zip_out_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate JS, HTML, and zip files for demos."
)
parser.add_argument(
"-w",
"--repo_dir",
metavar="path",
required=True,
help="aepsych repo directory.",
)
args = parser.parse_args()
validate_demo_links(args.repo_dir)
gen_demos(args.repo_dir)
|
aepsych-main
|
scripts/parse_demos.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import setup
with open("README.md") as f:
readme = f.read()
setup(
name="BLINK",
version="0.1.0",
description="BLINK: Better entity LINKing",
url="", # TODO
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
long_description=readme,
long_description_content_type="text/markdown",
setup_requires=["setuptools>=18.0",],
install_requires=[
"torch>=1.2.0",
"pysolr>=3.8.1",
"emoji>=0.5.3",
"regex>=2019.8.19",
"matplotlib>=3.1.0",
"tqdm>=4.32.1",
"nltk>=3.4.4",
"numpy>=1.17.2",
"segtok>=1.5.7",
"flair>=0.4.3",
"pytorch-transformers>=1.2.0",
"colorama>=0.4.3",
"termcolor>=1.1.0",
"faiss-cpu>=1.6.1",
],
)
|
BLINK-main
|
setup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import re
import os
import pysolr
import sys
import blink.candidate_retrieval.utils as utils
def get_model(params):
return BM45_Candidate_Generator(params)
class Candidate_Generator:
def __init__(self, parameters=None):
pass
def get_candidates(self, mention_data):
"""Given the mentions from the named entity recognition model, generates candidates for each mention and adds them as an additional field to the mention dictionary"""
pass
class BM45_Candidate_Generator(Candidate_Generator):
ESCAPE_CHARS_RE = re.compile(r'(?<!\\)(?P<char>[&|+\-!(){}[\]\/^"~*?:])')
def __init__(self, params):
self.solr_address = params["solr_address"]
self.raw_solr_fields = params["raw_solr_fields"]
self.solr = pysolr.Solr(self.solr_address, always_commit=True, timeout=100)
self.rows = params["rows"]
self.query = params["query"]
self.keys = [k.strip() for k in params["keys"].split(",")]
self.c = 0
self.query_arguments = {
"fl": "* score",
"rows": self.rows,
"defType": "edismax",
}
if params["boosting"] is not None:
self.query_arguments["bf"] = params["boosting"]
def _filter_result(self, cand, detailed=True):
wikidata_id = cand.get("wikidata_id", None)
res = {
"wikidata_id": wikidata_id,
"wikipedia_id": cand["id"],
"wikipedia_title": cand["title"],
}
if detailed:
res["aliases"] = cand.get("aliases", None)
sents = []
for k in range(0, 10):
key = "sent_desc_{}".format(k + 1)
sents.append(cand.get(key, ""))
res["sentences"] = sents
return res
def get_candidates(self, mention_data):
solr = self.solr
# Build query
keys = self.keys
query = self.query
if not self.raw_solr_fields:
query = query.format(
*[
BM45_Candidate_Generator.solr_escape(mention_data[key])
if key in mention_data
else utils.get_sent_context(mention_data, key)
for key in keys
]
)
else:
query = query.format(
*[
mention_data[key]
if key in mention_data
else utils.get_sent_context(mention_data, key)
for key in keys
]
)
try:
results = solr.search(query, **self.query_arguments)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
print("\nException:", exc_type, "- line", exc_tb.tb_lineno)
print(repr(e))
c = self.c
if c < 10:
print(
"Exception with: \naddress: {} \nquery: {} \nmention_data: {} \n".format(
self.solr_address, query, str(mention_data)
)
)
self.c = c + 1
return []
# Filter the data in the retrieved objects, while ignoring the ones without a wikidata_id (only a very small fraction in the dataset; they are noise)
filtered_results = [
self._filter_result(cand) for cand in results.docs if "wikidata_id" in cand
]
return filtered_results
@staticmethod
def process_mentions_for_candidate_generator(sentences, mentions):
for m in mentions:
m["context"] = sentences[m["sent_idx"]]
return mentions
@staticmethod
def solr_escape(string):
if (string == "OR") or (string == "AND"):
return string.lower()
interior = r"\s+(OR|AND)\s+"
start = r"^(OR|AND) "
end = r" (OR|AND)$"
string = re.sub(interior, lambda x: x.group(0).lower(), string)
string = re.sub(start, lambda x: x.group(0).lower(), string)
string = re.sub(end, lambda x: x.group(0).lower(), string)
return BM45_Candidate_Generator.ESCAPE_CHARS_RE.sub(r"\\\g<char>", string)
|
BLINK-main
|
blink/candidate_generation.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import prettytable
import blink.main_dense as main_dense
import blink.candidate_ranking.utils as utils
DATASETS = [
{
"name": "AIDA-YAGO2 testa",
"filename": "data/BLINK_benchmark/AIDA-YAGO2_testa.jsonl",
},
{
"name": "AIDA-YAGO2 testb",
"filename": "data/BLINK_benchmark/AIDA-YAGO2_testb.jsonl",
},
{"name": "ACE 2004", "filename": "data/BLINK_benchmark/ace2004_questions.jsonl"},
{"name": "aquaint", "filename": "data/BLINK_benchmark/aquaint_questions.jsonl"},
{
"name": "clueweb - WNED-CWEB (CWEB)",
"filename": "data/BLINK_benchmark/clueweb_questions.jsonl",
},
{"name": "msnbc", "filename": "data/BLINK_benchmark/msnbc_questions.jsonl"},
{
"name": "wikipedia - WNED-WIKI (WIKI)",
"filename": "data/BLINK_benchmark/wnedwiki_questions.jsonl",
},
]
PARAMETERS = {
"faiss_index": None,
"index_path": None,
"test_entities": None,
"test_mentions": None,
"interactive": False,
"biencoder_model": "models/biencoder_wiki_large.bin",
"biencoder_config": "models/biencoder_wiki_large.json",
"entity_catalogue": "models/entity.jsonl",
"entity_encoding": "models/all_entities_large.t7",
"crossencoder_model": "models/crossencoder_wiki_large.bin",
"crossencoder_config": "models/crossencoder_wiki_large.json",
"output_path": "output",
"fast": False,
"top_k": 100,
}
args = argparse.Namespace(**PARAMETERS)
logger = utils.get_logger(args.output_path)
models = main_dense.load_models(args, logger)
table = prettytable.PrettyTable(
[
"DATASET",
"biencoder accuracy",
"recall at 100",
"crossencoder normalized accuracy",
"overall unormalized accuracy",
"support",
]
)
for dataset in DATASETS:
logger.info(dataset["name"])
PARAMETERS["test_mentions"] = dataset["filename"]
args = argparse.Namespace(**PARAMETERS)
(
biencoder_accuracy,
recall_at,
crossencoder_normalized_accuracy,
overall_unormalized_accuracy,
num_datapoints,
predictions,
scores,
) = main_dense.run(args, logger, *models)
table.add_row(
[
dataset["name"],
round(biencoder_accuracy, 4),
round(recall_at, 4),
round(crossencoder_normalized_accuracy, 4),
round(overall_unormalized_accuracy, 4),
num_datapoints,
]
)
logger.info("\n{}".format(table))
|
BLINK-main
|
blink/run_benchmark.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from blink.candidate_ranking.bert_reranking import BertReranker
def get_model(params):
return BertReranker(params)
|
BLINK-main
|
blink/reranker.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import pickle
import emoji
def get_model(parameters):
return Wikimedia_Data_Fetcher(parameters["path_to_candidate_data_dict"])
class Wikimedia_Data_Fetcher:
def __init__(self, path_to_data):
self.data = pickle.load(open(path_to_data, "rb"))
def get_data_for_entity(self, entity_data):
"""Given an entity data dictionary that contains some linking data (ex. title or ID), additional information (ex. description, aliases etc.) is added to the given entity dictionary"""
data = self.data
title = entity_data["wikipedia_title"]
if "wikidata_info" in data[title]:
if ("aliases" in data[title]["wikidata_info"]) and (
data[title]["wikidata_info"]["aliases"]
) is not None:
aliases = [
alias
for alias in data[title]["wikidata_info"]["aliases"]
if alias not in emoji.UNICODE_EMOJI
]
else:
aliases = None
else:
aliases = None
entity_data["aliases"] = aliases
sents = []
for k in range(0, 10):
key = "sent_desc_{}".format(k + 1)
sents.append(data[title].get(key, ""))
entity_data["sentences"] = sents
return entity_data
|
BLINK-main
|
blink/candidate_data_fetcher.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import json
import sys
from tqdm import tqdm
import logging
import torch
import numpy as np
from colorama import init
from termcolor import colored
import blink.ner as NER
from torch.utils.data import DataLoader, SequentialSampler, TensorDataset
from blink.biencoder.biencoder import BiEncoderRanker, load_biencoder
from blink.crossencoder.crossencoder import CrossEncoderRanker, load_crossencoder
from blink.biencoder.data_process import (
process_mention_data,
get_candidate_representation,
)
import blink.candidate_ranking.utils as utils
from blink.crossencoder.train_cross import modify, evaluate
from blink.crossencoder.data_process import prepare_crossencoder_data
from blink.indexer.faiss_indexer import DenseFlatIndexer, DenseHNSWFlatIndexer
HIGHLIGHTS = [
"on_red",
"on_green",
"on_yellow",
"on_blue",
"on_magenta",
"on_cyan",
]
def _print_colorful_text(input_sentence, samples):
init() # colorful output
msg = ""
if samples and (len(samples) > 0):
msg += input_sentence[0 : int(samples[0]["start_pos"])]
for idx, sample in enumerate(samples):
msg += colored(
input_sentence[int(sample["start_pos"]) : int(sample["end_pos"])],
"grey",
HIGHLIGHTS[idx % len(HIGHLIGHTS)],
)
if idx < len(samples) - 1:
msg += input_sentence[
int(sample["end_pos"]) : int(samples[idx + 1]["start_pos"])
]
else:
msg += input_sentence[int(sample["end_pos"]) :]
else:
msg = input_sentence
print("Failed to identify entity from text:")
print("\n" + str(msg) + "\n")
def _print_colorful_prediction(
idx, sample, e_id, e_title, e_text, e_url, show_url=False
):
print(colored(sample["mention"], "grey", HIGHLIGHTS[idx % len(HIGHLIGHTS)]))
to_print = "id:{}\ntitle:{}\ntext:{}\n".format(e_id, e_title, e_text[:256])
if show_url:
to_print += "url:{}\n".format(e_url)
print(to_print)
def _annotate(ner_model, input_sentences):
ner_output_data = ner_model.predict(input_sentences)
sentences = ner_output_data["sentences"]
mentions = ner_output_data["mentions"]
samples = []
for mention in mentions:
record = {}
record["label"] = "unknown"
record["label_id"] = -1
# LOWERCASE EVERYTHING !
record["context_left"] = sentences[mention["sent_idx"]][
: mention["start_pos"]
].lower()
record["context_right"] = sentences[mention["sent_idx"]][
mention["end_pos"] :
].lower()
record["mention"] = mention["text"].lower()
record["start_pos"] = int(mention["start_pos"])
record["end_pos"] = int(mention["end_pos"])
record["sent_idx"] = mention["sent_idx"]
samples.append(record)
return samples
def _load_candidates(
entity_catalogue, entity_encoding, faiss_index=None, index_path=None, logger=None
):
# only load candidate encoding if not using faiss index
if faiss_index is None:
candidate_encoding = torch.load(entity_encoding)
indexer = None
else:
if logger:
logger.info("Using faiss index to retrieve entities.")
candidate_encoding = None
assert index_path is not None, "Error! Empty indexer path."
if faiss_index == "flat":
indexer = DenseFlatIndexer(1)
elif faiss_index == "hnsw":
indexer = DenseHNSWFlatIndexer(1)
else:
raise ValueError("Error! Unsupported indexer type! Choose from flat,hnsw.")
indexer.deserialize_from(index_path)
# load all the 5903527 entities
title2id = {}
id2title = {}
id2text = {}
wikipedia_id2local_id = {}
local_idx = 0
with open(entity_catalogue, "r") as fin:
lines = fin.readlines()
for line in lines:
entity = json.loads(line)
if "idx" in entity:
split = entity["idx"].split("curid=")
if len(split) > 1:
wikipedia_id = int(split[-1].strip())
else:
wikipedia_id = entity["idx"].strip()
assert wikipedia_id not in wikipedia_id2local_id
wikipedia_id2local_id[wikipedia_id] = local_idx
title2id[entity["title"]] = local_idx
id2title[local_idx] = entity["title"]
id2text[local_idx] = entity["text"]
local_idx += 1
return (
candidate_encoding,
title2id,
id2title,
id2text,
wikipedia_id2local_id,
indexer,
)
def __map_test_entities(test_entities_path, title2id, logger):
# load the 732859 tac_kbp_ref_know_base entities
kb2id = {}
missing_pages = 0
n = 0
with open(test_entities_path, "r") as fin:
lines = fin.readlines()
for line in lines:
entity = json.loads(line)
if entity["title"] not in title2id:
missing_pages += 1
else:
kb2id[entity["entity_id"]] = title2id[entity["title"]]
n += 1
if logger:
logger.info("missing {}/{} pages".format(missing_pages, n))
return kb2id
def __load_test(test_filename, kb2id, wikipedia_id2local_id, logger):
test_samples = []
with open(test_filename, "r") as fin:
lines = fin.readlines()
for line in lines:
record = json.loads(line)
record["label"] = str(record["label_id"])
# for tac kbp we should use a separate knowledge source to get the entity id (label_id)
if kb2id and len(kb2id) > 0:
if record["label"] in kb2id:
record["label_id"] = kb2id[record["label"]]
else:
continue
# check that each entity id (label_id) is in the entity collection
elif wikipedia_id2local_id and len(wikipedia_id2local_id) > 0:
try:
key = int(record["label"].strip())
if key in wikipedia_id2local_id:
record["label_id"] = wikipedia_id2local_id[key]
else:
continue
except:
continue
# LOWERCASE EVERYTHING !
record["context_left"] = record["context_left"].lower()
record["context_right"] = record["context_right"].lower()
record["mention"] = record["mention"].lower()
test_samples.append(record)
if logger:
logger.info("{}/{} samples considered".format(len(test_samples), len(lines)))
return test_samples
def _get_test_samples(
test_filename, test_entities_path, title2id, wikipedia_id2local_id, logger
):
kb2id = None
if test_entities_path:
kb2id = __map_test_entities(test_entities_path, title2id, logger)
test_samples = __load_test(test_filename, kb2id, wikipedia_id2local_id, logger)
return test_samples
def _process_biencoder_dataloader(samples, tokenizer, biencoder_params):
_, tensor_data = process_mention_data(
samples,
tokenizer,
biencoder_params["max_context_length"],
biencoder_params["max_cand_length"],
silent=True,
logger=None,
debug=biencoder_params["debug"],
)
sampler = SequentialSampler(tensor_data)
dataloader = DataLoader(
tensor_data, sampler=sampler, batch_size=biencoder_params["eval_batch_size"]
)
return dataloader
def _run_biencoder(biencoder, dataloader, candidate_encoding, top_k=100, indexer=None):
biencoder.model.eval()
labels = []
nns = []
all_scores = []
for batch in tqdm(dataloader):
context_input, _, label_ids = batch
with torch.no_grad():
if indexer is not None:
context_encoding = biencoder.encode_context(context_input).numpy()
context_encoding = np.ascontiguousarray(context_encoding)
scores, indicies = indexer.search_knn(context_encoding, top_k)
else:
scores = biencoder.score_candidate(
context_input, None, cand_encs=candidate_encoding # .to(device)
)
scores, indicies = scores.topk(top_k)
scores = scores.data.numpy()
indicies = indicies.data.numpy()
labels.extend(label_ids.data.numpy())
nns.extend(indicies)
all_scores.extend(scores)
return labels, nns, all_scores
def _process_crossencoder_dataloader(context_input, label_input, crossencoder_params):
tensor_data = TensorDataset(context_input, label_input)
sampler = SequentialSampler(tensor_data)
dataloader = DataLoader(
tensor_data, sampler=sampler, batch_size=crossencoder_params["eval_batch_size"]
)
return dataloader
def _run_crossencoder(crossencoder, dataloader, logger, context_len, device="cuda"):
crossencoder.model.eval()
accuracy = 0.0
crossencoder.to(device)
res = evaluate(crossencoder, dataloader, device, logger, context_len, zeshel=False, silent=False)
accuracy = res["normalized_accuracy"]
logits = res["logits"]
if accuracy > -1:
predictions = np.argsort(logits, axis=1)
else:
predictions = []
return accuracy, predictions, logits
def load_models(args, logger=None):
# load biencoder model
if logger:
logger.info("loading biencoder model")
with open(args.biencoder_config) as json_file:
biencoder_params = json.load(json_file)
biencoder_params["path_to_model"] = args.biencoder_model
biencoder = load_biencoder(biencoder_params)
crossencoder = None
crossencoder_params = None
if not args.fast:
# load crossencoder model
if logger:
logger.info("loading crossencoder model")
with open(args.crossencoder_config) as json_file:
crossencoder_params = json.load(json_file)
crossencoder_params["path_to_model"] = args.crossencoder_model
crossencoder = load_crossencoder(crossencoder_params)
# load candidate entities
if logger:
logger.info("loading candidate entities")
(
candidate_encoding,
title2id,
id2title,
id2text,
wikipedia_id2local_id,
faiss_indexer,
) = _load_candidates(
args.entity_catalogue,
args.entity_encoding,
faiss_index=getattr(args, 'faiss_index', None),
index_path=getattr(args, 'index_path' , None),
logger=logger,
)
return (
biencoder,
biencoder_params,
crossencoder,
crossencoder_params,
candidate_encoding,
title2id,
id2title,
id2text,
wikipedia_id2local_id,
faiss_indexer,
)
def run(
args,
logger,
biencoder,
biencoder_params,
crossencoder,
crossencoder_params,
candidate_encoding,
title2id,
id2title,
id2text,
wikipedia_id2local_id,
faiss_indexer=None,
test_data=None,
):
if not test_data and not args.test_mentions and not args.interactive:
msg = (
"ERROR: either you start BLINK with the "
"interactive option (-i) or you pass in input test mentions (--test_mentions)"
"and test entitied (--test_entities)"
)
raise ValueError(msg)
id2url = {
v: "https://en.wikipedia.org/wiki?curid=%s" % k
for k, v in wikipedia_id2local_id.items()
}
stopping_condition = False
while not stopping_condition:
samples = None
if args.interactive:
logger.info("interactive mode")
# biencoder_params["eval_batch_size"] = 1
# Load NER model
ner_model = NER.get_model()
# Interactive
text = input("insert text:")
# Identify mentions
samples = _annotate(ner_model, [text])
_print_colorful_text(text, samples)
else:
if logger:
logger.info("test dataset mode")
if test_data:
samples = test_data
else:
# Load test mentions
samples = _get_test_samples(
args.test_mentions,
args.test_entities,
title2id,
wikipedia_id2local_id,
logger,
)
stopping_condition = True
# don't look at labels
keep_all = (
args.interactive
or samples[0]["label"] == "unknown"
or samples[0]["label_id"] < 0
)
# prepare the data for biencoder
if logger:
logger.info("preparing data for biencoder")
dataloader = _process_biencoder_dataloader(
samples, biencoder.tokenizer, biencoder_params
)
# run biencoder
if logger:
logger.info("run biencoder")
top_k = args.top_k
labels, nns, scores = _run_biencoder(
biencoder, dataloader, candidate_encoding, top_k, faiss_indexer
)
if args.interactive:
print("\nfast (biencoder) predictions:")
_print_colorful_text(text, samples)
# print biencoder prediction
idx = 0
for entity_list, sample in zip(nns, samples):
e_id = entity_list[0]
e_title = id2title[e_id]
e_text = id2text[e_id]
e_url = id2url[e_id]
_print_colorful_prediction(
idx, sample, e_id, e_title, e_text, e_url, args.show_url
)
idx += 1
print()
if args.fast:
# use only biencoder
continue
else:
biencoder_accuracy = -1
recall_at = -1
if not keep_all:
# get recall values
top_k = args.top_k
x = []
y = []
for i in range(1, top_k):
temp_y = 0.0
for label, top in zip(labels, nns):
if label in top[:i]:
temp_y += 1
if len(labels) > 0:
temp_y /= len(labels)
x.append(i)
y.append(temp_y)
# plt.plot(x, y)
biencoder_accuracy = y[0]
recall_at = y[-1]
print("biencoder accuracy: %.4f" % biencoder_accuracy)
print("biencoder recall@%d: %.4f" % (top_k, y[-1]))
if args.fast:
predictions = []
for entity_list in nns:
sample_prediction = []
for e_id in entity_list:
e_title = id2title[e_id]
sample_prediction.append(e_title)
predictions.append(sample_prediction)
# use only biencoder
return (
biencoder_accuracy,
recall_at,
-1,
-1,
len(samples),
predictions,
scores,
)
# prepare crossencoder data
context_input, candidate_input, label_input = prepare_crossencoder_data(
crossencoder.tokenizer, samples, labels, nns, id2title, id2text, keep_all,
)
context_input = modify(
context_input, candidate_input, crossencoder_params["max_seq_length"]
)
dataloader = _process_crossencoder_dataloader(
context_input, label_input, crossencoder_params
)
# run crossencoder and get accuracy
accuracy, index_array, unsorted_scores = _run_crossencoder(
crossencoder,
dataloader,
logger,
context_len=biencoder_params["max_context_length"],
)
if args.interactive:
print("\naccurate (crossencoder) predictions:")
_print_colorful_text(text, samples)
# print crossencoder prediction
idx = 0
for entity_list, index_list, sample in zip(nns, index_array, samples):
e_id = entity_list[index_list[-1]]
e_title = id2title[e_id]
e_text = id2text[e_id]
e_url = id2url[e_id]
_print_colorful_prediction(
idx, sample, e_id, e_title, e_text, e_url, args.show_url
)
idx += 1
print()
else:
scores = []
predictions = []
for entity_list, index_list, scores_list in zip(
nns, index_array, unsorted_scores
):
index_list = index_list.tolist()
# descending order
index_list.reverse()
sample_prediction = []
sample_scores = []
for index in index_list:
e_id = entity_list[index]
e_title = id2title[e_id]
sample_prediction.append(e_title)
sample_scores.append(scores_list[index])
predictions.append(sample_prediction)
scores.append(sample_scores)
crossencoder_normalized_accuracy = -1
overall_unormalized_accuracy = -1
if not keep_all:
crossencoder_normalized_accuracy = accuracy
print(
"crossencoder normalized accuracy: %.4f"
% crossencoder_normalized_accuracy
)
if len(samples) > 0:
overall_unormalized_accuracy = (
crossencoder_normalized_accuracy * len(label_input) / len(samples)
)
print(
"overall unnormalized accuracy: %.4f" % overall_unormalized_accuracy
)
return (
biencoder_accuracy,
recall_at,
crossencoder_normalized_accuracy,
overall_unormalized_accuracy,
len(samples),
predictions,
scores,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--interactive", "-i", action="store_true", help="Interactive mode."
)
# test_data
parser.add_argument(
"--test_mentions", dest="test_mentions", type=str, help="Test Dataset."
)
parser.add_argument(
"--test_entities", dest="test_entities", type=str, help="Test Entities."
)
# biencoder
parser.add_argument(
"--biencoder_model",
dest="biencoder_model",
type=str,
default="models/biencoder_wiki_large.bin",
help="Path to the biencoder model.",
)
parser.add_argument(
"--biencoder_config",
dest="biencoder_config",
type=str,
default="models/biencoder_wiki_large.json",
help="Path to the biencoder configuration.",
)
parser.add_argument(
"--entity_catalogue",
dest="entity_catalogue",
type=str,
# default="models/tac_entity.jsonl", # TAC-KBP
default="models/entity.jsonl", # ALL WIKIPEDIA!
help="Path to the entity catalogue.",
)
parser.add_argument(
"--entity_encoding",
dest="entity_encoding",
type=str,
# default="models/tac_candidate_encode_large.t7", # TAC-KBP
default="models/all_entities_large.t7", # ALL WIKIPEDIA!
help="Path to the entity catalogue.",
)
# crossencoder
parser.add_argument(
"--crossencoder_model",
dest="crossencoder_model",
type=str,
default="models/crossencoder_wiki_large.bin",
help="Path to the crossencoder model.",
)
parser.add_argument(
"--crossencoder_config",
dest="crossencoder_config",
type=str,
default="models/crossencoder_wiki_large.json",
help="Path to the crossencoder configuration.",
)
parser.add_argument(
"--top_k",
dest="top_k",
type=int,
default=10,
help="Number of candidates retrieved by biencoder.",
)
# output folder
parser.add_argument(
"--output_path",
dest="output_path",
type=str,
default="output",
help="Path to the output.",
)
parser.add_argument(
"--fast", dest="fast", action="store_true", help="only biencoder mode"
)
parser.add_argument(
"--show_url",
dest="show_url",
action="store_true",
help="whether to show entity url in interactive mode",
)
parser.add_argument(
"--faiss_index", type=str, default=None, help="whether to use faiss index",
)
parser.add_argument(
"--index_path", type=str, default=None, help="path to load indexer",
)
args = parser.parse_args()
logger = utils.get_logger(args.output_path)
models = load_models(args, logger)
run(args, logger, *models)
|
BLINK-main
|
blink/main_dense.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
|
BLINK-main
|
blink/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import logging
import numpy
import os
import time
import torch
from blink.indexer.faiss_indexer import DenseFlatIndexer, DenseHNSWFlatIndexer
import blink.candidate_ranking.utils as utils
logger = utils.get_logger()
def main(params):
output_path = params["output_path"]
output_dir, _ = os.path.split(output_path)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
logger = utils.get_logger(output_dir)
logger.info("Loading candidate encoding from path: %s" % params["candidate_encoding"])
candidate_encoding = torch.load(params["candidate_encoding"])
vector_size = candidate_encoding.size(1)
index_buffer = params["index_buffer"]
if params["hnsw"]:
logger.info("Using HNSW index in FAISS")
index = DenseHNSWFlatIndexer(vector_size, index_buffer)
else:
logger.info("Using Flat index in FAISS")
index = DenseFlatIndexer(vector_size, index_buffer)
logger.info("Building index.")
index.index_data(candidate_encoding.numpy())
logger.info("Done indexing data.")
if params.get("save_index", None):
index.serialize(output_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--output_path",
required=True,
type=str,
help="output file path",
)
parser.add_argument(
"--candidate_encoding",
default="models/all_entities_large.t7",
type=str,
help="file path for candidte encoding.",
)
parser.add_argument(
"--hnsw", action='store_true',
help='If enabled, use inference time efficient HNSW index',
)
parser.add_argument(
"--save_index", action='store_true',
help='If enabled, save index',
)
parser.add_argument(
'--index_buffer', type=int, default=50000,
help="Temporal memory data buffer size (in samples) for indexer",
)
params = parser.parse_args()
params = params.__dict__
main(params)
|
BLINK-main
|
blink/build_faiss_index.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from flair.models import SequenceTagger
from flair.data import Sentence
def get_model(parameters=None):
return Flair(parameters)
class NER_model:
def __init__(self, parameters=None):
pass
def predict(self, sents):
"""Sents: List of plain text consequtive sentences.
Returns a dictionary consisting of a list of sentences and a list of mentions, where for each mention AT LEAST (it may give additional information) the following information is given:
sent_idx - the index of the sentence that contains the mention
text - the textual span that we hypothesise that represents an entity
start_pos - the character idx at which the textual mention starts
end_pos - the character idx at which the mention ends"""
pass
class Flair(NER_model):
def __init__(self, parameters=None):
self.model = SequenceTagger.load("ner")
def predict(self, sentences):
mentions = []
for sent_idx, sent in enumerate(sentences):
sent = Sentence(sent, use_tokenizer=True)
self.model.predict(sent)
sent_mentions = sent.to_dict(tag_type="ner")["entities"]
for mention in sent_mentions:
mention["sent_idx"] = sent_idx
mentions.extend(sent_mentions)
return {"sentences": sentences, "mentions": mentions}
|
BLINK-main
|
blink/ner.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import io
import json
import os
import pickle
from segtok.segmenter import split_multi
##### Reading helpers #####
def read_sentences_from_file(path_to_file, one_sentence_per_line=True):
lines = []
with io.open(path_to_file, mode="r", encoding="utf-8") as file:
for line in file:
line = line.strip()
if line != "":
lines.append(line.strip())
if one_sentence_per_line:
sentences = lines
else:
text = " ".join(lines)
sentences = list(split_multi(text))
sentences = [sentence for sentence in sentences if sentence != ""]
return sentences
##### Printing / writing helpers #####
def get_candidate_summary(candidate):
wikipedia_id = candidate["wikipedia_id"]
wikidata_id = candidate["wikidata_id"]
wikipedia_title = candidate["wikipedia_title"]
return "{}, {}, {}".format(wikipedia_id, wikidata_id, wikipedia_title)
def present_sentence_mentions(sentence, mentions, output_file):
if output_file != None:
f = io.open(output_file, mode="a", encoding="utf-8")
output = lambda s: f.write("{}\n".format(s))
else:
output = lambda s: print(s)
output("Sentence: {}".format(sentence))
mention_entity_pairs = []
for mention in mentions:
candidates = mention["candidates"]
# prediction = mention.get('predicted_candidate_idx', 0)
prediction = mention["predicted_candidate_idx"]
if prediction < len(candidates):
# print(type(mention['prob_assigned_to_candidate']))
# print(mention['prob_assigned_to_candidate'])
mention_rep = "{} ({}, {}) - {} (conf. {:.5f})".format(
mention["text"],
mention["start_pos"],
mention["end_pos"],
get_candidate_summary(candidates[prediction]),
mention["prob_assigned_to_candidate"],
)
else:
mention_rep = "{} ({}, {}) - {}".format(
mention["text"],
mention["start_pos"],
mention["end_pos"],
"No candidate selected",
)
mention_entity_pairs.append(mention_rep)
if len(mention_entity_pairs) != 0:
output("Mention-Entity pairs: \n{}".format("\n".join(mention_entity_pairs)))
else:
output("No detected mentions")
output("")
def sentence_mentions_pairs(sentences, mentions):
mentions_per_sent = {}
for m in mentions:
sent_idx = int(m["sent_idx"])
curr_ments = mentions_per_sent.get(sent_idx, [])
curr_ments.append(m)
mentions_per_sent[sent_idx] = curr_ments
pairs = []
for idx, sent in enumerate(sentences):
pairs.append((sent, mentions_per_sent.get(idx, [])))
return pairs
def present_annotated_sentences(sentences, mentions, output_file=None):
pairs = sentence_mentions_pairs(sentences, mentions)
for sent, ments in pairs:
present_sentence_mentions(sent, ments, output_file)
def write_dicts_as_json_per_line(list_of_dicts, txt_file_path):
with io.open(txt_file_path, mode="w", encoding="utf-8") as file:
for idx, mention in enumerate(list_of_dicts):
json_string = json.dumps(mention)
file.write(json_string)
if idx != (len(list_of_dicts) - 1):
file.write("\n")
def get_mentions_txt_file_path(output_folder_path):
os.makedirs(output_folder_path, exist_ok=True)
file_name = "mentions.jsonl"
path_to_file = os.path.join(output_folder_path, file_name)
return path_to_file
def get_sentences_txt_file_path(output_folder_path):
os.makedirs(output_folder_path, exist_ok=True)
file_name = "sentences.jsonl"
path_to_file = os.path.join(output_folder_path, file_name)
return path_to_file
def get_end2end_pickle_output_file_path(output_folder_path):
os.makedirs(output_folder_path, exist_ok=True)
file_name = "mentions_and_sentences.pickle"
path_to_file = os.path.join(output_folder_path, file_name)
return path_to_file
def write_end2end_pickle_output(sentences, mentions, output_file_id):
obj = {"sentences": sentences, "mentions": mentions}
with open(get_end2end_pickle_output_file_path(output_file_id), "wb") as file:
pickle.dump(obj, file)
def get_end2end_pretty_output_file_path(output_folder_path):
os.makedirs(output_folder_path, exist_ok=True)
file_name = "pretty.txt"
path_to_file = os.path.join(output_folder_path, file_name)
return path_to_file
|
BLINK-main
|
blink/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import blink.utils as utils
import blink.ner as NER
import blink.candidate_generation as CG
import blink.candidate_data_fetcher as CDF
import blink.reranker as R
import argparse
import shutil
def main(parameters):
print("Parameters:", parameters)
# Read data
sentences = utils.read_sentences_from_file(
parameters["path_to_input_file"],
one_sentence_per_line=parameters["one_sentence_per_line"],
)
# Identify mentions
ner_model = NER.get_model(parameters)
ner_output_data = ner_model.predict(sentences)
sentences = ner_output_data["sentences"]
mentions = ner_output_data["mentions"]
output_folder_path = parameters["output_folder_path"]
if (
(output_folder_path is not None)
and os.path.exists(output_folder_path)
and os.listdir(output_folder_path)
):
print(
"The given output directory ({}) already exists and is not empty.".format(
output_folder_path
)
)
answer = input("Would you like to empty the existing directory? [Y/N]\n")
if answer.strip() == "Y":
print("Deleting {}...".format(output_folder_path))
shutil.rmtree(output_folder_path)
else:
raise ValueError(
"Output directory ({}) already exists and is not empty.".format(
output_folder_path
)
)
if output_folder_path is not None:
utils.write_dicts_as_json_per_line(
sentences, utils.get_sentences_txt_file_path(output_folder_path)
)
utils.write_dicts_as_json_per_line(
mentions, utils.get_mentions_txt_file_path(output_folder_path)
)
# Generate candidates and get the data that describes the candidates
candidate_generator = CG.get_model(parameters)
candidate_generator.process_mentions_for_candidate_generator(
sentences=sentences, mentions=mentions
)
for mention in mentions:
mention["candidates"] = candidate_generator.get_candidates(mention)
if parameters["consider_additional_datafetcher"]:
data_fetcher = CDF.get_model(parameters)
for candidate in mention["candidates"]:
data_fetcher.get_data_for_entity(candidate)
if output_folder_path is not None:
utils.write_dicts_as_json_per_line(
mentions, utils.get_mentions_txt_file_path(output_folder_path)
)
# Reranking
reranking_model = R.get_model(parameters)
reranking_model.rerank(mentions, sentences)
if output_folder_path is not None:
utils.write_dicts_as_json_per_line(
mentions, utils.get_mentions_txt_file_path(output_folder_path)
)
utils.write_end2end_pickle_output(sentences, mentions, output_folder_path)
utils.present_annotated_sentences(
sentences,
mentions,
utils.get_end2end_pretty_output_file_path(output_folder_path),
)
# Showcase results
utils.present_annotated_sentences(sentences, mentions)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Input data
parser.add_argument(
"--path_to_input_file",
"--i",
dest="path_to_input_file",
type=str,
required=True,
)
parser.add_argument(
"--one_sentence_per_line",
action="store_true",
help="Set if the input file has one sentence per line",
)
# Candidate generation
parser.add_argument(
"--solr_address",
default="http://localhost:8983/solr/wikipedia",
type=str,
help="The address to the solr index.",
)
parser.add_argument(
"--query",
type=str,
default='title:( {} ) OR aliases:" {} " OR sent_desc_1:( {} )^0.5',
help="The query following the argument template of str.format",
)
parser.add_argument(
"--keys",
type=str,
default="text,text,context",
help="The comma separated list of keys to be feeded to str.format with the query as the formating string.",
)
parser.add_argument(
"--boosting",
default="log(sum(num_incoming_links,1))",
type=str,
help="The address to the solr index.",
)
parser.add_argument(
"--raw_solr_fields",
action="store_true",
help="Whether to escape the special characters in the solr queries.",
)
# Candidate desciptions and additional data
parser.add_argument(
"--consider_additional_datafetcher",
action="store_true",
help="Whether to include some additional data to the candidates using a datafetcher.",
)
parser.add_argument(
"--path_to_candidate_data_dict",
default="data/KB_data/title2enriched_parsed_obj_plus.p",
type=str,
help="The path to the data used by the data fetcher (the default path points to the wikipedia data).",
)
# Reranking
parser.add_argument(
"--path_to_model",
"--m",
dest="path_to_model",
type=str,
required=True,
help="The full path to the model.",
)
parser.add_argument(
"--max_seq_length",
default=512,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.",
)
parser.add_argument(
"--evaluation_batch_size",
default=1,
type=int,
help="Total batch size for evaluation.",
)
parser.add_argument(
"--top_k",
type=int,
default=80,
help="The number of candidates retrieved by the candiadate generator and considered by the reranker",
)
parser.add_argument(
"--no_cuda", action="store_true", help="Whether to use CUDA when available"
)
parser.add_argument(
"--lowercase_flag",
action="store_true",
help="Whether to lower case the input text. True for uncased models, False for cased models.",
)
parser.add_argument(
"--context_key",
default="tagged_context",
type=str,
help="The field that contains the mention context.",
)
parser.add_argument(
"--dataparallel_bert",
action="store_true",
help="Whether to distributed the candidate generation process.",
)
parser.add_argument(
"--silent", action="store_true", help="Whether to print progress bars."
)
# Output
parser.add_argument(
"--output_folder_path",
"--o",
dest="output_folder_path",
default=None,
type=str,
help="A path to the folder where the mentions and sentences are to be dumped. If it is not given, the results would not be saved.",
)
args = parser.parse_args()
args.rows = args.top_k
parameters = args.__dict__
main(parameters)
|
BLINK-main
|
blink/main_solr.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from multiprocessing.pool import ThreadPool
from candidate_generators import (
Simple_Candidate_Generator,
Pregenerated_Candidates_Data_Fetcher,
)
import multiprocessing
import utils
import time
import argparse
import pickle
import os
from evaluator import Evaluator
from tqdm import tqdm
import pysolr
from tqdm import tqdm
def run_thread(arguments):
mentions = arguments["data"]
candidate_generator = arguments["candidate_generator"]
args = arguments["args"]
if args.keep_pregenerated_candidates:
data_fetcher = arguments["pregenereted_cands_data_fetcher"]
if arguments["id"] == 0:
print("Query args: ", candidate_generator.query_arguments)
print_query_flag = True
for mention in tqdm(mentions):
mention["generated_candidates"] = candidate_generator.get_candidates(
mention, print_query_flag=print_query_flag
)
print_query_flag = False
if args.keep_pregenerated_candidates:
wikidata_ids = mention["candidates_wikidata_ids"]
mention["candidates_data"] = data_fetcher.get_candidates_data(
wikidata_ids
)
else:
for mention in mentions:
mention["generated_candidates"] = candidate_generator.get_candidates(
mention
)
if args.keep_pregenerated_candidates:
wikidata_ids = mention["candidates_wikidata_ids"]
mention["candidates_data"] = data_fetcher.get_candidates_data(
wikidata_ids
)
return arguments["id"], mentions
def split(a, n):
k, m = divmod(len(a), n)
return (a[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] for i in range(n))
def main(args):
wall_start = time.time()
parameters = get_parameters(args)
print("Candidate generator parameters:", parameters)
datasets = utils.get_datasets(
args.include_aida_train, args.keep_pregenerated_candidates
)
if args.single_dataset:
datasets = [datasets[0]]
mentions = utils.get_list_of_mentions(datasets)
# NUM_TREADS = multiprocessing.cpu_count()
NUM_THREADS = args.num_threads
pool = ThreadPool(NUM_THREADS)
# Split the data into approximately equal parts and give one block to each thread
data_per_thread = split(mentions, NUM_THREADS)
if args.keep_pregenerated_candidates:
arguments = [
{
"id": idx,
"data": data_bloc,
"args": args,
"candidate_generator": Simple_Candidate_Generator(parameters),
"pregenereted_cands_data_fetcher": Pregenerated_Candidates_Data_Fetcher(
parameters
),
}
for idx, data_bloc in enumerate(data_per_thread)
]
else:
arguments = [
{
"id": idx,
"data": data_bloc,
"args": args,
"candidate_generator": Simple_Candidate_Generator(parameters),
}
for idx, data_bloc in enumerate(data_per_thread)
]
results = pool.map(run_thread, arguments)
# Merge the results
processed_mentions = []
for _id, mentions in results:
processed_mentions = processed_mentions + mentions
has_gold = 0
pool.terminate()
pool.join()
execution_time = (time.time() - wall_start) / 60
print("The execution took:", execution_time, " minutes")
# Evaluate the generation
evaluator = Evaluator(processed_mentions)
evaluator.candidate_generation(
save_gold_pos=True, save_pregenerated_gold_pos=args.keep_pregenerated_candidates
)
# Dump the data if the dump_mentions flag was set
if args.dump_mentions:
print("Dumping processed mentions")
# Create the directory for the mention dumps if it does not exist
dump_folder = args.dump_mentions_folder
os.makedirs(dump_folder, exist_ok=True)
dump_object = {}
dump_object["mentions"] = processed_mentions
dump_object["total_per_dataset"] = evaluator.total_per_dataset
dump_object["has_gold_per_dataset"] = evaluator.has_gold_per_dataset
dump_object["parameters"] = parameters
dump_object["args"] = args
dump_object["execution_time"] = execution_time
pickle.dump(
dump_object,
open(os.path.join(dump_folder, args.dump_file_id), "wb"),
protocol=4,
)
# evaluator.candidate_generation(max_rank=100)
return evaluator.recall
def get_parameters(args):
parameters = {
"collection_name": args.collection_name,
"rows": args.rows,
"solr_address": args.solr_address,
}
parameters["query_data"] = {}
parameters["query_data"]["string"] = args.query
parameters["query_data"]["keys"] = [k.strip() for k in args.keys.split(",")]
parameters["boosting"] = args.boosting
return parameters
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Debugging setting
parser.add_argument("--single_dataset", dest="single_dataset", action="store_true")
parser.set_defaults(single_dataset=False)
# Query parameters
parser.add_argument(
"--query",
type=str,
default='title:( {} ) OR aliases:" {} " OR sent_desc_1:( {} )^0.5',
help="The query following the argument template of q.format",
)
parser.add_argument(
"--keys",
type=str,
default="mention,mention,sent_context_curr",
help="The comma separated list of keys to be feeded to str.format with the query as the formating string. Example fields `mention`, `query_context`, `query_truncated_10_context` or `query_truncated_25_context`",
)
parser.add_argument("--rows", type=int, default=80)
parser.add_argument("--collection_name", type=str, default="wikipedia")
parser.add_argument("--solr_address", type=str, default="http://localhost:8983")
parser.add_argument(
"--boosting", type=str, default="log(sum(num_incoming_links,1))"
)
# Multithreading
parser.add_argument("--num_threads", type=int, required=True)
# Candidates dumping
parser.add_argument("--dump_mentions", dest="dump_mentions", action="store_true")
parser.set_defaults(dump_mentions=False)
parser.add_argument(
"--dump_mentions_folder", type=str, default="data/mention_dumps"
)
parser.add_argument("--dump_file_id", type=str)
# Include training dataset
parser.add_argument(
"--include_aida_train", dest="include_aida_train", action="store_true"
)
parser.set_defaults(include_aida_train=False)
# Keep pregenerated candidates
parser.add_argument(
"--keep_pregenerated_candidates",
action="store_true",
help="Whether to keep the candidates given with the dataset.",
)
args = parser.parse_args()
print(args)
main(args)
|
BLINK-main
|
blink/candidate_retrieval/perform_and_evaluate_candidate_retrieval_multithreaded.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import pysolr
import pickle
import emoji
import time
import os
parser = argparse.ArgumentParser()
parser.add_argument(
"--processed_data_file_path",
type=str,
help="The full path to the data file",
required=True,
)
parser.add_argument(
"--collection_name",
type=str,
help="The solr collection name, in which the ingestion should be performed",
required=True,
)
parser.add_argument(
"--add_sentence_data", dest="add_sentence_data", action="store_true"
)
parser.set_defaults(add_sentence_data=False)
parser.add_argument(
"--remove_disambiguation_pages",
dest="remove_disambiguation_pages",
action="store_true",
)
parser.set_defaults(remove_disambiguation_pages=False)
parser.add_argument("--min_tokens", type=int, default=0)
args = parser.parse_args()
processed_data_path = args.processed_data_file_path
collection_name = args.collection_name
# processed_data_path = "/scratch/martinjosifoski/data/en-wiki-filtered-wikidata"
def remove_all_docs():
solr.delete(q="*:*")
def load_data():
return pickle.load(open(processed_data_path, "rb"))
def get_data_for_key(data, title):
obj = {}
obj["id"] = data[title]["wikipedia_id"]
obj["title"] = title
if ("wikidata_info" in data[title]) and (
data[title]["wikidata_info"]["wikidata_id"] is not None
):
obj["wikidata_id"] = data[title]["wikidata_info"]["wikidata_id"]
else:
obj["wikidata_id"] = data[title]["wikidata_id_from_index"]
description = data[title]["intro_concatenated"]
obj["desc"] = description
if "wikidata_info" in data[title]:
if "description" in data[title]["wikidata_info"]:
wikidata_description = data[title]["wikidata_info"]["description"]
else:
wikidata_description = ""
if ("aliases" in data[title]["wikidata_info"]) and (
data[title]["wikidata_info"]["aliases"]
) is not None:
aliases = " ".join(
[
'"{}"'.format(alias)
for alias in data[title]["wikidata_info"]["aliases"]
if alias not in emoji.UNICODE_EMOJI
]
)
else:
aliases = ""
else:
aliases = ""
wikidata_description = ""
obj["aliases"] = aliases
obj["wikidata_desc"] = wikidata_description
obj["num_tokens"] = data[title]["num_tokens"]
obj["num_incoming_links"] = data[title].get("num_incoming_links", 0)
if args.add_sentence_data:
for k in range(0, 10):
key = "sent_desc_{}".format(k + 1)
obj[key] = data[title].get(key, "")
return obj
print("Loading data")
title2data = load_data()
for key in title2data:
title2data[key]["intro_concatenated"] = " ".join(
[line for line in title2data[key]["intro_lines"] if line != ""]
)
# Filter documents with less then `args.min_tokens` tokens
if args.min_tokens != 0:
print("Removing documents with less then {} tokens".format(args.min_tokens))
print("Number of docs BEFORE removal:", len(title2data))
title2data = {
key: value
for key, value in title2data.items()
if value["num_tokens"] >= args.min_tokens
}
print("Number of docs AFTER removal:", len(title2data))
print("")
# Remove disambiguation pages
if args.remove_disambiguation_pages:
print("Remove disambiguation pages")
print("Number of docs BEFORE removal:", len(title2data))
titles_to_delete = []
for title in title2data:
parsed_obj = title2data[title]
if ("disambiguation" in title) or ("Disambiguation" in title):
titles_to_delete.append(title)
else:
if (parsed_obj.get("wikidata_info", None) is not None) and (
parsed_obj["wikidata_info"].get("description", None) is not None
):
wikidata_info = parsed_obj["wikidata_info"]
if ("disambiguation page" in wikidata_info["description"]) or (
"Disambiguation page" in wikidata_info["description"]
):
titles_to_delete.append(title)
for title in titles_to_delete:
del title2data[title]
print("Number of docs AFTER removal:", len(title2data))
print("Number of removed docs:", len(titles_to_delete))
print("")
ingestion_data = [get_data_for_key(title2data, key) for key in title2data]
print("Starting ingestion")
wall_start = time.time()
l = 0
r = step = 10000
solr = pysolr.Solr(
"http://localhost:8983/solr/{}".format(collection_name),
always_commit=True,
timeout=100,
)
c = 0
for r in range(r, len(ingestion_data), step):
c += 1
if (c % 10) == 0:
print("Processed", c, "batches")
temp_data = ingestion_data[l:r]
solr.add(temp_data, commit=True)
l = r
solr.add(ingestion_data[l : len(ingestion_data)], commit=True)
solr.commit()
print("The processing took:", (time.time() - wall_start) / 60, " minutes")
|
BLINK-main
|
blink/candidate_retrieval/data_ingestion.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import pickle
import os
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument(
"--output_folder",
type=str,
help="The full path to the output folder",
required=True,
)
args = parser.parse_args()
output_folder = args.output_folder
output_file_path = os.path.join(output_folder, "en-wiki-filtered-wikidata")
if os.path.isfile(output_file_path):
print("Output file `{}` already exists!".format(output_file_path))
sys.exit()
# Add wikidata_id from the download index to wikipedia articles whenever we have it
wikipediaid2wikidataid_file_path = os.path.join(
output_folder, "wikipediaid2wikidataid.p"
)
wikipedia_data_filtered_file_path = os.path.join(output_folder, "en-wiki-filtered")
wikipediaid2wikidataid = pickle.load(open(wikipediaid2wikidataid_file_path, "rb"))
wikipedia_data_filtered = pickle.load(open(wikipedia_data_filtered_file_path, "rb"))
for key in wikipedia_data_filtered.keys():
wikipedia_id, wikipedia_title = key
wikipedia_id = int(wikipedia_id)
if wikipedia_id in wikipediaid2wikidataid:
wikidata_id = wikipediaid2wikidataid[wikipedia_id]
wikipedia_data_filtered[key]["wikidata_id_from_index"] = wikidata_id
else:
wikipedia_data_filtered[key]["wikidata_id_from_index"] = None
# Read the processed wikidata object and generate amenable mappings
wikidataid_title2parsed_obj_file_path = os.path.join(
output_folder, "wikidataid_title2parsed_obj.p"
)
wikidataid_title2parsed_obj = pickle.load(
open(wikidataid_title2parsed_obj_file_path, "rb")
)
title2parsed_obj = {}
wikidataid2parsed_obj = {}
for key in wikidataid_title2parsed_obj.keys():
wikidata_id, wikipedia_title = key
wikidataid_title2parsed_obj[key]["wikidata_id"] = wikidata_id
wikidataid_title2parsed_obj[key]["wikipedia_title"] = wikipedia_title
title2parsed_obj[wikipedia_title] = wikidataid_title2parsed_obj[key]
wikidataid2parsed_obj[wikidata_id] = wikidataid_title2parsed_obj[key]
matched_by_title = 0
not_matched_by_title_list = []
matched_by_id = 0
not_matched_by_anything = []
# link wikipedia with wikidata
for key in wikipedia_data_filtered.keys():
wikipedia_id, wikipedia_title = key
wikipedia_id = int(wikipedia_id)
wikidata_id_from_index = wikipedia_data_filtered[key]["wikidata_id_from_index"]
## 1) TITLE 2) ID
## works better, linking is more accurate
if wikipedia_title in title2parsed_obj:
matched_by_title += 1
wikipedia_data_filtered[key]["wikidata_info"] = title2parsed_obj[
wikipedia_title
]
else:
not_matched_by_title_list.append(
(wikipedia_id, wikipedia_title, wikidata_id_from_index)
)
if (wikidata_id_from_index is not None) and (
wikidata_id_from_index in wikidataid2parsed_obj
):
matched_by_id += 1
wikipedia_data_filtered[key]["wikidata_info"] = wikidataid2parsed_obj[
wikidata_id_from_index
]
else:
not_matched_by_anything.append(
(wikipedia_id, wikipedia_title, wikidata_id_from_index)
)
## 1) ID 2) TITLE
# if (wikidata_id_from_index is not None) and (wikidata_id_from_index in wikidataid2parsed_obj):
# matched_by_id += 1
# wikipedia_data_filtered[key]['wikidata_info'] = wikidataid2parsed_obj[wikidata_id_from_index]
# else:
# not_matched_by_title_list.append((wikipedia_id, wikipedia_title, wikidata_id_from_index))
# if wikipedia_title in title2parsed_obj:
# matched_by_title += 1
# wikipedia_data_filtered[key]['wikidata_info'] = title2parsed_obj[wikipedia_title]
# else:
# not_matched_by_anything.append((wikipedia_id, wikipedia_title, wikidata_id_from_index))
print("Matched by title:", matched_by_title)
print("Matched by id:", matched_by_id)
print("Not found:", len(not_matched_by_anything))
print("Dumping", output_file_path)
pickle.dump(wikipedia_data_filtered, open(output_file_path, "wb"), protocol=4)
|
BLINK-main
|
blink/candidate_retrieval/link_wikipedia_and_wikidata.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import xml.etree.ElementTree as ET
import io
import re
import argparse
import os
import pickle
import sys
import urllib.parse
import regex
parser = argparse.ArgumentParser()
parser.add_argument(
"--input", type=str, help="The full path to the file to process", required=True
)
parser.add_argument(
"--output", type=str, help="The full path to the output file", required=True
)
args = parser.parse_args()
input_file_path = args.input
output_file_path = args.output
if not os.path.isfile(input_file_path):
print("Input file `{}` doesn't exist!".format(output_file_path))
sys.exit()
if os.path.isfile(output_file_path):
print("Output file `{}` already exists!".format(output_file_path))
sys.exit()
xml_end_tag = "</doc>"
entities_with_duplicate_titles = set()
title2id = {}
id_title2parsed_obj = {}
num_lines = 0
with io.open(input_file_path, mode="rt", encoding="utf-8", errors="ignore") as f:
for line in f:
num_lines += 1
c = 0
pattern = re.compile("(<a href=([^>]+)>((?:.(?!\<\/a\>))*.)<\/a>)")
docs_failed_xml = 0
with io.open(input_file_path, mode="rt", encoding="utf-8", errors="ignore") as f:
for line in f:
c += 1
if c % 1000000 == 0:
print("Processed: {:.2f}%".format(c * 100 / num_lines))
if line.startswith("<doc id="):
doc_xml = ET.fromstring("{}{}".format(line, xml_end_tag))
doc_attr = doc_xml.attrib
lines = []
lines.append(line.strip())
if line.startswith("</doc>"):
temp_obj = {}
temp_obj["url"] = doc_attr["url"]
temp_obj["lines"] = lines
text = " ".join([l for l in lines if l != ""])
try:
doc_xml = ET.fromstring(text)
links_xml = doc_xml.getchildren()
links = []
for link_xml in links_xml:
link = {}
link["xml_attributes"] = link_xml.attrib
link["text"] = link_xml.text.strip()
link["href_unquoted"] = urllib.parse.unquote(
link_xml.attrib["href"]
)
link_xml.tail = ""
link["raw"] = ET.tostring(
link_xml, encoding="unicode", method="xml"
)
links.append(link)
temp_obj["links_xml"] = links
except Exception as e:
temp_obj["links_xml"] = None
docs_failed_xml += 1
text = " ".join([l for l in lines[1:-1] if l != ""])
links = []
for match in pattern.findall(text):
raw, href, text = match
link = {}
link["raw"] = raw
link["href_unquoted"] = urllib.parse.unquote(href.strip('"'))
link["text"] = text
links.append(link)
temp_obj["links_regex"] = links
id_, title = doc_attr["id"], doc_attr["title"]
key = (id_, title)
id_title2parsed_obj[key] = temp_obj
# # check for duplicate titles
# if title in title2id:
# entities_with_duplicate_titles.add(id_)
# entities_with_duplicate_titles.add(title2id[title])
# print("DUPLICATE TITLE:", id_, title2id[title])
# else:
# title2id[title] = id_
print("Processed: {:.2f}%".format(c * 100 / num_lines))
print("Dumping", output_file_path)
pickle.dump(id_title2parsed_obj, open(output_file_path, "wb"), protocol=4)
# print('Portion of documents with improper xml: {:.2f}%'.format(docs_failed_xml*100/len(id_title2parsed_obj)))
|
BLINK-main
|
blink/candidate_retrieval/process_wiki_extractor_output_links.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import xml.etree.ElementTree as ET
import io
import re
import argparse
import os
import pickle
import sys
parser = argparse.ArgumentParser()
parser.add_argument(
"--input", type=str, help="The full path to the file to process", required=True
)
parser.add_argument(
"--output", type=str, help="The full path to the output file", required=True
)
args = parser.parse_args()
input_file_path = args.input
output_file_path = args.output
if not os.path.isfile(input_file_path):
print("Input file `{}` doesn't exist!".format(output_file_path))
sys.exit()
if os.path.isfile(output_file_path):
print("Output file `{}` already exists!".format(output_file_path))
sys.exit()
xml_end_tag = "</doc>"
entities_with_duplicate_titles = set()
title2id = {}
id_title2parsed_obj = {}
num_lines = 0
with io.open(input_file_path, mode="rt", encoding="utf-8", errors="ignore") as f:
for line in f:
num_lines += 1
c = 0
with io.open(input_file_path, mode="rt", encoding="utf-8", errors="ignore") as f:
for line in f:
c += 1
if c % 1000000 == 0:
print("Processed: {:.2f}%".format(c * 100 / num_lines))
if line.startswith("<doc id="):
doc_xml = ET.fromstring("{}{}".format(line, xml_end_tag))
doc_attr = doc_xml.attrib
first_paragraph_flag = True
lines = []
continue
if not first_paragraph_flag:
continue
if line.startswith("Section::::") or line.startswith("</doc>"):
temp_obj = {}
temp_obj["url"] = doc_attr["url"]
temp_obj["intro_lines"] = lines
id_, title = doc_attr["id"], doc_attr["title"]
key = (id_, title)
id_title2parsed_obj[key] = temp_obj
# # check for duplicate titles
# if title in title2id:
# entities_with_duplicate_titles.add(id_)
# entities_with_duplicate_titles.add(title2id[title])
# print("DUPLICATE TITLE:", id_, title2id[title])
# else:
# title2id[title] = id_
first_paragraph_flag = False
continue
lines.append(line.strip())
print("Dumping", output_file_path)
pickle.dump(id_title2parsed_obj, open(output_file_path, "wb"), protocol=4)
|
BLINK-main
|
blink/candidate_retrieval/process_wiki_extractor_output.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import re
import pickle
import os
import time
import numpy as np
"""
This script is adapted from https://github.com/lephong/mulrel-nel
"""
def read_csv_file(path, added_params):
data = {}
info = True
with open(path, "r", encoding="utf8") as f:
for line in f:
comps = line.strip().split("\t")
doc_name = comps[0] + " " + comps[1]
mention = comps[2]
lctx = comps[3]
rctx = comps[4]
if comps[6] != "EMPTYCAND":
cands = [c.split(",") for c in comps[6:-2]]
cands = [
(",".join(c[2:]).replace('"', "%22").replace(" ", "_"), float(c[1]))
for c in cands
]
else:
cands = []
gold = comps[-1].split(",")
if gold[0] == "-1":
gold = (
",".join(gold[2:]).replace('"', "%22").replace(" ", "_"),
1e-5,
-1,
)
else:
gold = (
",".join(gold[3:]).replace('"', "%22").replace(" ", "_"),
1e-5,
-1,
)
if added_params["generate_cands"]:
if info:
print("Generating candidates")
info = False
cands = added_params["cand_generator"].process(mention)
if doc_name not in data:
data[doc_name] = []
data[doc_name].append(
{
"mention": mention,
"context": (lctx, rctx),
"candidates": cands,
"gold": gold,
}
)
return data
### Adds original textual data to pregenerated data
def read_conll_file(data, path):
conll = {}
with open(path, "r", encoding="utf8") as f:
cur_sent = None
cur_doc = None
for line in f:
line = line.strip()
if line.startswith("-DOCSTART-"):
docname = line.split()[1][1:]
conll[docname] = {"sentences": [], "mentions": []}
cur_doc = conll[docname]
cur_sent = []
else:
if line == "":
cur_doc["sentences"].append(cur_sent)
cur_sent = []
else:
comps = line.split("\t")
tok = comps[0]
cur_sent.append(tok)
if len(comps) >= 6:
bi = comps[1]
wikilink = comps[4]
if bi == "I":
cur_doc["mentions"][-1]["end"] += 1
else:
new_ment = {
"sent_id": len(cur_doc["sentences"]),
"start": len(cur_sent) - 1,
"end": len(cur_sent),
"wikilink": wikilink,
}
cur_doc["mentions"].append(new_ment)
# merge with data
rmpunc = re.compile("[\W]+")
for doc_name, content in data.items():
conll_doc = conll[doc_name.split()[0]]
content[0]["conll_doc"] = conll_doc
cur_conll_m_id = 0
for m in content:
mention = m["mention"]
gold = m["gold"]
while True:
cur_conll_m = conll_doc["mentions"][cur_conll_m_id]
cur_conll_mention = " ".join(
conll_doc["sentences"][cur_conll_m["sent_id"]][
cur_conll_m["start"] : cur_conll_m["end"]
]
)
if rmpunc.sub("", cur_conll_mention.lower()) == rmpunc.sub(
"", mention.lower()
):
m["conll_m"] = cur_conll_m
cur_conll_m_id += 1
break
else:
cur_conll_m_id += 1
return data
##### Check whether an entity is a person and if the doc contains other references with a more descriptive name for the person
##### (ex. John vs John Snow vs John Snow Stark). Then processes the candidate lists for all of the mentions that fit this description.
def load_person_names(path):
data = []
with open(path, "r", encoding="utf8") as f:
for line in f:
data.append(line.strip().replace(" ", "_"))
return set(data)
def find_coref(ment, mentlist, person_names):
cur_m = ment["mention"].lower()
coref = []
for m in mentlist:
if len(m["candidates"]) == 0 or m["candidates"][0][0] not in person_names:
continue
mention = m["mention"].lower()
start_pos = mention.find(cur_m)
if start_pos == -1 or mention == cur_m:
continue
end_pos = start_pos + len(cur_m) - 1
if (start_pos == 0 or mention[start_pos - 1] == " ") and (
end_pos == len(mention) - 1 or mention[end_pos + 1] == " "
):
coref.append(m)
return coref
def with_coref(dataset, person_names):
for data_name, content in dataset.items():
for cur_m in content:
coref = find_coref(cur_m, content, person_names)
if coref is not None and len(coref) > 0:
cur_cands = {}
for m in coref:
for c, p in m["candidates"]:
cur_cands[c] = cur_cands.get(c, 0) + p
for c in cur_cands.keys():
cur_cands[c] /= len(coref)
cur_m["candidates"] = sorted(
list(cur_cands.items()), key=lambda x: x[1]
)[::-1]
######
def eval(testset, system_pred, nel=False):
gold = []
pred = []
for doc_name, content in testset.items():
gold += [c["gold"][0] for c in content] # the gold named entity
pred += [
c["pred"][0] for c in system_pred[doc_name]
] # the predicted named entity
true_pos = 0
for g, p in zip(gold, pred):
if g == p and p != "NIL":
true_pos += 1
if nel:
NIL_preds = len([p for p in pred if p == "NIL"])
total_discovered_mentions = 0
for doc_name, content in testset.items():
total_discovered_mentions += np.sum(
len(ment) for ment in content[0]["ments_per_sent_flair"]
)
precision = true_pos / (total_discovered_mentions - NIL_preds)
else:
precision = true_pos / len([p for p in pred if p != "NIL"])
recall = true_pos / len(gold)
f1 = 2 * precision * recall / (precision + recall)
return precision, recall, f1
def get_candidate_generator(added_params):
if added_params["candidate_generator_type"] == "p_e_m":
if "p_e_m_data_path" in added_params:
return FetchCandidateEntities(added_params["p_e_m_data_path"])
else:
return FetchCandidateEntities()
else:
pass
class CoNLLDataset:
"""
reading dataset from CoNLL dataset, extracted by https://github.com/dalab/deep-ed/
"""
def __init__(self, path, person_path, conll_path, added_params):
if added_params["generate_ments_and_cands"]:
added_params["generate_cands"] = False
if added_params["generate_cands"] or added_params["generate_ments_and_cands"]:
added_params["cand_generator"] = get_candidate_generator(added_params)
print(added_params)
print("load csv")
self.train = read_csv_file(path + "/aida_train.csv", added_params)
self.testA = read_csv_file(path + "/aida_testA.csv", added_params)
self.testB = read_csv_file(path + "/aida_testB.csv", added_params)
self.ace2004 = read_csv_file(path + "/wned-ace2004.csv", added_params)
self.aquaint = read_csv_file(path + "/wned-aquaint.csv", added_params)
self.clueweb = read_csv_file(path + "/wned-clueweb.csv", added_params)
self.msnbc = read_csv_file(path + "/wned-msnbc.csv", added_params)
self.wikipedia = read_csv_file(path + "/wned-wikipedia.csv", added_params)
self.wikipedia.pop("JiΕΓ_TΕanovskΓ½ JiΕΓ_TΕanovskΓ½", None)
print("process coref")
person_names = load_person_names(person_path)
with_coref(self.train, person_names)
with_coref(self.testA, person_names)
with_coref(self.testB, person_names)
with_coref(self.ace2004, person_names)
with_coref(self.aquaint, person_names)
with_coref(self.clueweb, person_names)
with_coref(self.msnbc, person_names)
with_coref(self.wikipedia, person_names)
print("load conll")
read_conll_file(self.train, conll_path + "/AIDA/aida_train.txt")
read_conll_file(self.testA, conll_path + "/AIDA/testa_testb_aggregate_original")
read_conll_file(self.testB, conll_path + "/AIDA/testa_testb_aggregate_original")
read_conll_file(
self.ace2004, conll_path + "/wned-datasets/ace2004/ace2004.conll"
)
read_conll_file(
self.aquaint, conll_path + "/wned-datasets/aquaint/aquaint.conll"
)
read_conll_file(self.msnbc, conll_path + "/wned-datasets/msnbc/msnbc.conll")
read_conll_file(
self.clueweb, conll_path + "/wned-datasets/clueweb/clueweb.conll"
)
read_conll_file(
self.wikipedia, conll_path + "/wned-datasets/wikipedia/wikipedia.conll"
)
if added_params["generate_cands"]:
print(
"Number of candidates not present in p_e_m originally, but present when lowercased",
len(added_params["cand_generator"].lower_org),
)
print(
"Number of candidates not present in p_e_m originally, but present in p_e_m_lower when lowercased ",
len(added_params["cand_generator"].lower_lower),
)
class FetchCandidateEntities(object):
"""takes as input a string or a list of words and checks if it is inside p_e_m
if yes it returns the candidate entities otherwise it returns None.
it also checks if string.lower() inside p_e_m and if string.lower() inside p_e_m_low"""
def __init__(self, p_e_m_data_path="data/basic_data/p_e_m_data/"):
print("Reading p_e_m dictionaries")
# return
wall_start = time.time()
self.lower_org = []
self.lower_lower = []
self.p_e_m = pickle.load(
open(os.path.join(p_e_m_data_path, "p_e_m_dict.pickle"), "rb")
)
self.p_e_m_lower = pickle.load(
open(os.path.join(p_e_m_data_path, "p_e_m_lower_dict.pickle"), "rb")
)
self.mention_total_freq = pickle.load(
open(os.path.join(p_e_m_data_path, "mention_total_freq.pickle"), "rb")
)
print("The reading took:", (time.time() - wall_start) / 60, " minutes")
def process(self, span):
"""span can be either a string or a list of words"""
title = span.title()
# 'obama 44th president of united states'.title() # 'Obama 44Th President Of United States'
title_freq = (
self.mention_total_freq[title] if title in self.mention_total_freq else 0
)
span_freq = (
self.mention_total_freq[span] if span in self.mention_total_freq else 0
)
if title_freq == 0 and span_freq == 0:
if span.lower() in self.p_e_m:
self.lower_org.append(span)
return self.p_e_m[span.lower()]
elif span.lower() in self.p_e_m_lower:
self.lower_lower.append(span)
return self.p_e_m_lower[span.lower()]
else:
return []
else:
if span_freq > title_freq:
return self.p_e_m[span]
else:
return self.p_e_m[title]
|
BLINK-main
|
blink/candidate_retrieval/dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sqlite3
import pickle
import os
import io
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument(
"--output", type=str, help="The full path to the data folder", required=True
)
args = parser.parse_args()
data_folder = args.output
output_file_name = "title2enriched_parsed_obj.p"
output_file_path = os.path.join(data_folder, output_file_name)
if os.path.isfile(output_file_path):
print("Output file `{}` already exists!".format(output_file_path))
sys.exit()
linktitle2wikidataid_file_name = "linktitle2wikidataid.p"
linktitle2wikidataid_path = os.path.join(data_folder, linktitle2wikidataid_file_name)
linktitle2wikidataid = pickle.load(open(linktitle2wikidataid_path, "rb"))
# Read links data
links_file_name = "en-wikilinks-processed"
links_file_path = os.path.join(data_folder, links_file_name)
links_data = pickle.load(open(links_file_path, "rb"))
print("Links data is loaded")
# Read full text data
full_num_tokens_file_name = "en-wiki-full-text"
full_num_tokens_file_path = os.path.join(data_folder, full_num_tokens_file_name)
full_num_tokens_data = pickle.load(open(full_num_tokens_file_path, "rb"))
print("Full text and number of tokens data is loaded")
# Read linked (wikipedia with wikidata) data
filtered_and_wikidata_file_name = "en-wiki-filtered-wikidata"
filtered_and_wikidata_file_path = os.path.join(
data_folder, filtered_and_wikidata_file_name
)
filtered_and_wikidata_data = pickle.load(open(filtered_and_wikidata_file_path, "rb"))
print("Introduction text, linked with wikidata data is loaded")
# Transform the linked data into a title2parsed_obj dictionary
# Add the number of tokens information
title2parsed_obj = {}
for key in filtered_and_wikidata_data.keys():
wikipedia_id, wikipedia_title = key
filtered_and_wikidata_data[key]["wikipedia_id"] = wikipedia_id
filtered_and_wikidata_data[key]["wikipedia_title"] = wikipedia_title
filtered_and_wikidata_data[key]["num_tokens"] = full_num_tokens_data[key][
"num_tokens"
]
title2parsed_obj[wikipedia_title] = filtered_and_wikidata_data[key]
total = {"xml": 0, "regex": 0}
found = {"xml": 0, "regex": 0}
not_found = {"xml": [], "regex": []}
# Counting using the title
for key in links_data.keys():
wikipedia_id, wikipedia_title = key
if links_data[key]["links_xml"] != None:
links = links_data[key]["links_xml"]
total["xml"] = total["xml"] + len(links)
for link in links:
title = link["href_unquoted"]
if title in title2parsed_obj:
title2parsed_obj[title]["num_incoming_links"] = (
title2parsed_obj[title].get("num_incoming_links", 0) + 1
)
found["xml"] = found["xml"] + 1
else:
not_found["xml"].append(link)
else:
links = links_data[key]["links_regex"]
total["regex"] = total["regex"] + len(links)
for link in links:
title = link["href_unquoted"]
if title in title2parsed_obj:
title2parsed_obj[title]["num_incoming_links"] = (
title2parsed_obj[title].get("num_incoming_links", 0) + 1
)
found["regex"] = found["regex"] + 1
else:
not_found["regex"].append(link)
print(
"Matched {:2f}% using only the title".format(
(found["xml"] + found["regex"]) * 100 / (total["xml"] + total["regex"])
)
)
# Counting using the index
wikidataid2count = {}
for link in not_found["xml"] + not_found["regex"]:
title = link["href_unquoted"]
title = title.replace(" ", "_")
if title in linktitle2wikidataid:
wikidata_id = linktitle2wikidataid[title]
wikidataid2count[wikidata_id] = wikidataid2count.get(wikidata_id, 0) + 1
found["xml"] = found["xml"] + 1
elif title.capitalize() in linktitle2wikidataid:
wikidata_id = linktitle2wikidataid[title.capitalize()]
wikidataid2count[wikidata_id] = wikidataid2count.get(wikidata_id, 0) + 1
found["xml"] = found["xml"] + 1
print(
"Matched {:2f}% by additionally using the title to wikidataid index".format(
(found["xml"] + found["regex"]) * 100 / (total["xml"] + total["regex"])
)
)
# Adding the counts from the index to the original dictionary
updated = 0
wikdiata_info = 0
wikidata_id_from_index = 0
for key in title2parsed_obj:
parsed_obj = title2parsed_obj[key]
wikidata_id = None
if parsed_obj.get("wikidata_info", None) is not None:
wikdiata_info += 1
if parsed_obj["wikidata_info"].get("wikidata_id", None) is not None:
wikidata_id = parsed_obj["wikidata_info"]["wikidata_id"]
else:
if parsed_obj.get("wikidata_id_from_index", None) is not None:
wikidata_id_from_index += 1
wikidata_id = parsed_obj["wikidata_id_from_index"]
if (wikidata_id is not None) and (wikidata_id in wikidataid2count):
parsed_obj["num_incoming_links"] = (
parsed_obj.get("num_incoming_links", 0) + wikidataid2count[wikidata_id]
)
updated += 1
print("Dumping", output_file_path)
pickle.dump(title2parsed_obj, open(output_file_path, "wb"), protocol=4)
# Include unprocessed data and dump it together with the processed data
# (convenient if we want to extent the data that we use)
for wikipedia_title in title2parsed_obj.keys():
wikipedia_id = title2parsed_obj[wikipedia_title]["wikipedia_id"]
key = wikipedia_id, wikipedia_title
title2parsed_obj[wikipedia_title]["links_data"] = {
"links_xml": links_data[key]["links_xml"],
"links_regex": links_data[key]["links_regex"],
}
title2parsed_obj[wikipedia_title]["lines_full_text"] = full_num_tokens_data[key][
"lines"
]
output_file_name = "title2parsed_obj_full_data.p"
output_file_path = os.path.join(data_folder, output_file_name)
print("Dumping", output_file_path)
pickle.dump(title2parsed_obj, open(output_file_path, "wb"), protocol=4)
|
BLINK-main
|
blink/candidate_retrieval/enrich_data.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
import pickle
import subprocess
import blink.candidate_retrieval.dataset as D
import re
import os
ESCAPE_CHARS_RE = re.compile(r'(?<!\\)(?P<char>[&|+\-!(){}[\]\/^"~*?:])')
def solr_escape(string):
if (string == "OR") or (string == "AND"):
return string.lower()
interior = r"\s+(OR|AND)\s+"
start = r"^(OR|AND) "
end = r" (OR|AND)$"
string = re.sub(interior, lambda x: x.group(0).lower(), string)
string = re.sub(start, lambda x: x.group(0).lower(), string)
string = re.sub(end, lambda x: x.group(0).lower(), string)
return ESCAPE_CHARS_RE.sub(r"\\\g<char>", string)
linktitle2id = None
def get_wikidata_id_from_link_name(link):
global linktitle2id
if linktitle2id is None:
path_to_file = "data/KB_data/linktitle2wikidataid.p"
if os.path.isfile(path_to_file):
linktitle2id = pickle.load(open(path_to_file, "rb"))
else:
subprocess.call(
"./blink/candidate_retrieval/scripts/generate_wiki2wikidata_mapping.sh"
)
linktitle2id = pickle.load(open(path_to_file, "rb"))
return linktitle2id.get(link, None)
def get_datasets(get_test_dataset=False, get_pregenereted_candidates_wikidata_id=False):
train_and_benchmarking_data_dir = "data/train_and_benchmark_data"
datadir = os.path.join(
train_and_benchmarking_data_dir, "generated/test_train_data/"
)
conll_path = os.path.join(
train_and_benchmarking_data_dir, "basic_data/test_datasets/"
)
person_path = os.path.join(
train_and_benchmarking_data_dir, "basic_data/p_e_m_data/persons.txt"
)
p_e_m_path = os.path.join(train_and_benchmarking_data_dir, "basic_data/p_e_m_data/")
added_params = {
"generate_cands": False,
"generate_ments_and_cands": False,
"candidate_generator_type": "p_e_m",
"p_e_m_data_path": p_e_m_path,
}
conll = D.CoNLLDataset(datadir, person_path, conll_path, added_params)
dev_datasets = [
("aida-A", conll.testA),
("aida-B", conll.testB),
("msnbc", conll.msnbc),
("aquaint", conll.aquaint),
("ace2004", conll.ace2004),
("clueweb", conll.clueweb),
("wikipedia", conll.wikipedia),
]
if get_test_dataset:
dev_datasets.append(("aida-train", conll.train))
not_found = []
total = 0
for ds_name, dataset in dev_datasets:
print("Processing dataset:", ds_name)
for doc_name, content in dataset.items():
for m in content:
total += 1
link = m["gold"][0]
wikidata_id = get_wikidata_id_from_link_name(link)
if wikidata_id is None:
not_found.append(m)
m["gold_wikidata_id"] = wikidata_id
if get_pregenereted_candidates_wikidata_id:
cands = []
for candidate in m["candidates"]:
link, prob = candidate
wikidata_id = get_wikidata_id_from_link_name(link)
cands.append((wikidata_id, link, prob))
m["candidates_wikidata_ids"] = cands
print("Number of entities:", total)
print(
"Wikidata ID not found for:",
len(not_found),
"({:.3f} %)".format(len(not_found) * 1.0 / total),
)
return dev_datasets
def get_sent_context(mention, key, solr_escaped=True):
if not solr_escaped:
mention_data_key = "sent_context_orig"
else:
mention_data_key = "sent_context"
if key.endswith("next"):
if key.endswith("prev_next"):
res = "{} {} {}".format(
""
if mention[mention_data_key][0] is None
else mention[mention_data_key][0],
mention[mention_data_key][1],
""
if mention[mention_data_key][2] is None
else mention[mention_data_key][2],
)
else:
res = "{} {}".format(
mention[mention_data_key][1],
""
if mention[mention_data_key][2] is None
else mention[mention_data_key][2],
)
elif key.endswith("prev"):
res = "{} {}".format(
""
if mention[mention_data_key][0] is None
else mention[mention_data_key][0],
mention[mention_data_key][1],
)
else:
res = mention[mention_data_key][1]
return res.strip()
def get_list_of_mentions(dev_datasets):
mentions = []
total_invalid = 0
total_valid = 0
for ds_name, dataset in dev_datasets:
invalid = 0
valid = 0
print("Processing dataset:", ds_name)
for doc_name, content in dataset.items():
sentences = content[0]["conll_doc"]["sentences"]
for m in content:
gold_wikidata_id = m["gold_wikidata_id"]
left_context, right_context = m["context"]
m["mention_orig"] = m["mention"]
m["mention"] = solr_escape(m["mention"])
if left_context != "EMPTYCTXT":
left_context_orig = left_context
left_context = solr_escape(left_context)
else:
left_context = ""
if right_context != "EMPTYCTXT":
right_context_orig = right_context
right_context = solr_escape(right_context)
else:
right_context = ""
m["left_context_orig"] = left_context_orig
m["right_context_orig"] = right_context_orig
m["query_context"] = "{} {} {}".format(
left_context, m["mention"], right_context
).strip()
m["query_context_orig"] = "{} {} {}".format(
left_context_orig, m["mention_orig"], right_context_orig
).strip()
truncated_left_context = " ".join(left_context.split(" ")[-25:])
truncated_right_context = " ".join(right_context.split(" ")[:25])
m["query_truncated_25_context"] = "{} {} {}".format(
truncated_left_context, m["mention"], truncated_right_context
).strip()
truncated_left_context = " ".join(left_context.split(" ")[-10:])
truncated_right_context = " ".join(right_context.split(" ")[:10])
m["query_truncated_10_context"] = "{} {} {}".format(
truncated_left_context, m["mention"], truncated_right_context
).strip()
m["dataset_name"] = ds_name
m["doc_name"] = doc_name
sent_id, start, end = (
m["conll_m"]["sent_id"],
m["conll_m"]["start"],
m["conll_m"]["end"],
)
prev_sent_id = sent_id - 1
next_sent_id = sent_id + 1
sent_orig = " ".join(sentences[sent_id]).strip()
m["left_query_sent_context_orig"] = " ".join(sentences[sent_id][:start])
m["right_query_sent_context_orig"] = " ".join(sentences[sent_id][end:])
sent = solr_escape(sent_orig)
# try:
# context_parts_lower = '{} {} {}'.format(m['left_query_sent_context_orig'], m['mention_orig'], m['right_query_sent_context_orig']).strip().lower()
# context_orig_lower = sent_orig.lower()
# assert(context_parts_lower == context_orig_lower)
# except:
# print(context_parts_lower)
# print(context_orig_lower)
# input("")
if prev_sent_id > 0:
prev_sent_orig = " ".join(sentences[prev_sent_id])
prev_sent = solr_escape(prev_sent_orig)
else:
prev_sent_orig = None
prev_sent = None
if next_sent_id < len(sentences):
next_sent_orig = " ".join(sentences[next_sent_id])
next_sent = solr_escape(next_sent_orig)
else:
next_sent_orig = None
next_sent = None
m["sent_context"] = (prev_sent, sent, next_sent)
m["sent_context_orig"] = (prev_sent_orig, sent_orig, next_sent_orig)
# m['sent_context_prev'] = get_sent_context(m, 'sent_context_prev')
# m['sent_context_next'] = get_sent_context(m, 'sent_context_next')
# m['sent_context_prev_next'] = get_sent_context(m, 'sent_context_prev_next')
# m['sent_context_curr'] = get_sent_context(m, 'sent_context_curr')
if gold_wikidata_id is None:
invalid += 1
continue
mentions.append(m)
valid += 1
print("Invalid: ", invalid)
print("Valid: ", valid)
total_invalid += invalid
total_valid += valid
return mentions
def write_candidate_generation_results_for_a_run_to_file(run, results_dump_file_path):
txt_file_path = "{}.txt".format(results_dump_file_path)
with open(txt_file_path, "a+") as file:
id_ = "Q: `{}` === K: `{}` === ID: `{}`".format(
run[0]["query"], run[0]["keys"], run[0]["dump_file_id"]
)
res = " --- ".join(
["{} - {:.2f}".format(key, run[1][key]) for key in sorted(run[1].keys())]
)
file.write("{} === {}\n".format(res, id_))
def write_candidate_generation_execution_time_to_file(
results_dump_file_path, execution_time
):
txt_file_path = "{}.txt".format(results_dump_file_path)
with open(txt_file_path, "a+") as file:
file.write("The execution took: {} minutes".format(execution_time))
def write_candidate_generation_results_to_file(
runs, results_dump_file_path, execution_time=None
):
runs.sort(key=lambda run: -run[1]["overall"])
for run in runs:
write_candidate_generation_results_for_a_run_to_file(
run, results_dump_file_path
)
if execution_time is not None:
write_candidate_generation_execution_time_to_file(
results_dump_file_path, execution_time
)
|
BLINK-main
|
blink/candidate_retrieval/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import pysolr
import sys
import utils
def mention_data_summary(mention):
return (mention["mention"], mention["query_truncated_25_context"])
class Simple_Candidate_Generator:
def __init__(self, params):
self.collection_name = params["collection_name"]
self.solr_address = params["solr_address"]
self.solr = pysolr.Solr(
"{}/solr/{}".format(self.solr_address, self.collection_name),
always_commit=True,
timeout=100,
)
self.rows = params["rows"]
self.query_data = params["query_data"]
self.c = 0
self.query_arguments = {
"fl": "* score",
"rows": self.rows,
"defType": "edismax",
}
if params["boosting"] is not None:
self.query_arguments["bf"] = params["boosting"]
def _filter_result(self, cand):
wikidata_id = cand.get("wikidata_id", None)
res = {
"wikidata_id": wikidata_id,
"wikipedia_id": cand["id"],
"wikipedia_title": cand["title"],
}
res["aliases"] = cand.get("aliases", None)
sents = []
for k in range(0, 10):
key = "sent_desc_{}".format(k + 1)
sents.append(cand.get(key, ""))
res["sentences"] = sents
res["num_incoming_links"] = cand.get("num_incoming_links", 0)
res["score"] = cand["score"]
return res
def get_candidates(
self,
mention_data,
verbose=False,
print_number_of_docs_retrieved=False,
print_query_flag=False,
):
solr = self.solr
query_data = self.query_data
# Build query
keys = query_data["keys"]
query = query_data["string"]
query = query.format(
*[
mention_data[key]
if key in mention_data
else utils.get_sent_context(mention_data, key)
for key in keys
]
)
if print_query_flag:
print("Query: {}".format(query))
try:
results = solr.search(query, **self.query_arguments)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
print("\nException:", exc_type, "- line", exc_tb.tb_lineno)
print(repr(e))
c = self.c
c += 1
if c < 10:
print(
"Exception with: \ncollection_name: {} \nquery: {} \nmention_data: {} \ndataset_name: {}\nquery_args: {}\n".format(
self.collection_name,
query,
mention_data_summary(mention_data),
mention_data["dataset_name"],
str(self.query_arguments),
)
)
return []
if print_number_of_docs_retrieved:
print("Retrieved {0} result(s).".format(len(results)))
# Return the full retrieved objects (debuging purposes)
if verbose:
return results
# Filter the data in the retrieved objects, while ignoring the ones without a wikidata_id (only a very small fraction in the dataset; they are noise)
filtered_results = [
self._filter_result(cand) for cand in results.docs if "wikidata_id" in cand
]
return filtered_results
class Pregenerated_Candidates_Data_Fetcher:
def __init__(self, parameters):
solr_address = "http://localhost:8983/solr/{}".format(
parameters["collection_name"]
)
query_arguments = {"fl": "* score", "rows": 1, "defType": "edismax"}
query_arguments["bf"] = "log(sum(num_incoming_links,1))"
self.solr = pysolr.Solr(solr_address, always_commit=True, timeout=100)
self.query_arguments = query_arguments
def get_candidates_data(self, candidates_wikidata_ids):
candidates_rich = []
for candidate in candidates_wikidata_ids:
candidate_data = self.get_candidate_data_for_wikidata_id(candidate[0])
if candidate_data != None:
candidate_data["p_e_m_score"] = candidate[2]
candidates_rich.append(candidate_data)
return candidates_rich
@staticmethod
def filter_result(cand, detailed=True):
wikidata_id = cand.get("wikidata_id", None)
res = {
"wikidata_id": wikidata_id,
"wikipedia_id": cand["id"],
"wikipedia_title": cand["title"],
}
if detailed:
res["aliases"] = cand.get("aliases", None)
sents = []
for k in range(0, 10):
key = "sent_desc_{}".format(k + 1)
sents.append(cand.get(key, ""))
res["sentences"] = sents
return res
def get_candidate_data_for_wikidata_id(self, wikidata_id):
results = self.solr.search(
"wikidata_id:{}".format(wikidata_id), **self.query_arguments
)
if len(results) == 0:
return None
filtered_results = [
Pregenerated_Candidates_Data_Fetcher.filter_result(cand)
for cand in results.docs
if "wikidata_id" in cand
]
return filtered_results[0]
|
BLINK-main
|
blink/candidate_retrieval/candidate_generators.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import xml.etree.ElementTree as ET
import io
import re
import argparse
import os
import pickle
import sys
parser = argparse.ArgumentParser()
parser.add_argument(
"--input", type=str, help="The full path to the file to process", required=True
)
parser.add_argument(
"--output", type=str, help="The full path to the output file", required=True
)
args = parser.parse_args()
input_file_path = args.input
output_file_path = args.output
if not os.path.isfile(input_file_path):
print("Input file `{}` doesn't exist!".format(output_file_path))
sys.exit()
if os.path.isfile(output_file_path):
print("Output file `{}` already exists!".format(output_file_path))
sys.exit()
xml_end_tag = "</doc>"
entities_with_duplicate_titles = set()
title2id = {}
id_title2parsed_obj = {}
num_lines = 0
with io.open(input_file_path, mode="rt", encoding="utf-8", errors="ignore") as f:
for line in f:
num_lines += 1
c = 0
with io.open(input_file_path, mode="rt", encoding="utf-8", errors="ignore") as f:
for line in f:
c += 1
if c % 1000000 == 0:
print("Processed: {:.2f}%".format(c * 100 / num_lines))
if line.startswith("<doc id="):
doc_xml = ET.fromstring("{}{}".format(line, xml_end_tag))
doc_attr = doc_xml.attrib
lines = []
continue
if line.startswith("</doc>"):
temp_obj = {}
temp_obj["url"] = doc_attr["url"]
temp_obj["lines"] = lines
text = " ".join([l for l in lines if l != ""])
temp_obj["num_tokens"] = len(text.split(" "))
id_, title = doc_attr["id"], doc_attr["title"]
key = (id_, title)
id_title2parsed_obj[key] = temp_obj
# check for duplicate titles
# if title in title2id:
# entities_with_duplicate_titles.add(id_)
# entities_with_duplicate_titles.add(title2id[title])
# print("DUPLICATE TITLE:", id_, title2id[title])
# else:
# title2id[title] = id_
continue
# if it is not a document start or end tag, add it to lines
lines.append(line.strip())
print("Processed: {:.2f}%".format(c * 100 / num_lines))
print("Dumping", output_file_path)
pickle.dump(id_title2parsed_obj, open(output_file_path, "wb"), protocol=4)
|
BLINK-main
|
blink/candidate_retrieval/process_wiki_extractor_output_full.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import bz2
import sys
import pickle
import os
import json
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--input",
type=str,
help="The full path to the wikidata dump for processing",
required=True,
)
parser.add_argument(
"--output", type=str, help="The full path to the output folder", required=True
)
args = parser.parse_args()
input_file_path = args.input
output_file_path = args.output
if not os.path.isfile(input_file_path):
print("Input file `{}` doesn't exist!".format(output_file_path))
sys.exit()
if os.path.isfile(output_file_path):
print("Output file `{}` already exists!".format(output_file_path))
sys.exit()
id_title2parsed_obj = {}
num_lines = 0
with bz2.open(input_file_path, "rt") as f:
for line in f:
num_lines += 1
c = 0
with bz2.open(input_file_path, "rt") as f:
for line in f:
c += 1
if c % 1000000 == 0:
print("Processed: {:.2f}%".format(c * 100 / num_lines))
try:
json_obj = json.loads(line.strip().strip(","))
if ("sitelinks" not in json_obj) or ("enwiki" not in json_obj["sitelinks"]):
continue
id_, title = json_obj["id"], json_obj["sitelinks"]["enwiki"]["title"]
key = id_, title
parsed_obj = {}
if "en" in json_obj["aliases"]:
parsed_obj["aliases"] = [
alias["value"] for alias in json_obj["aliases"]["en"]
]
else:
parsed_obj["aliases"] = None
if "en" in json_obj["labels"]:
parsed_obj["wikidata_label"] = json_obj["labels"]["en"]["value"]
else:
parsed_obj["wikidata_label"] = None
if "en" in json_obj["descriptions"]:
parsed_obj["description"] = json_obj["descriptions"]["en"]["value"]
else:
parsed_obj["description"] = None
if "enwikiquote" in json_obj["sitelinks"]:
parsed_obj["enwikiquote_title"] = json_obj["sitelinks"]["enwikiquote"][
"title"
]
id_title2parsed_obj[key] = parsed_obj
except Exception as e:
line = line.strip().strip(",")
if line == "[" or line == "]":
continue
exc_type, exc_obj, exc_tb = sys.exc_info()
print("Exception:", exc_type, "- line", exc_tb.tb_lineno)
if len(line) < 30:
print("Failed line:", line)
print("Processed: {:.2f}%".format(c * 100 / num_lines))
print("Dumping", output_file_path)
pickle.dump(id_title2parsed_obj, open(output_file_path, "wb"), protocol=4)
|
BLINK-main
|
blink/candidate_retrieval/process_wikidata.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import pickle
import nltk.data
import argparse
import sys
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument(
"--output", type=str, help="The full path to the data folder", required=True
)
args = parser.parse_args()
data_folder = args.output
output_file_name = "title2enriched_parsed_obj_plus.p"
output_file_path = os.path.join(data_folder, output_file_name)
if os.path.isfile(output_file_path):
print("Output file `{}` already exists!".format(output_file_path))
sys.exit()
print("Reading title2parsed_obj data")
title2enriched_parsed_obj_file_name = "title2enriched_parsed_obj.p"
title2enriched_parsed_obj_path = os.path.join(
data_folder, title2enriched_parsed_obj_file_name
)
title2parsed_obj = pickle.load(open(title2enriched_parsed_obj_path, "rb"))
print("Reading title2parsed_obj_full_data")
title2parsed_obj_full_data_file_name = "title2parsed_obj_full_data.p"
title2parsed_obj_full_data_full_path = os.path.join(
data_folder, title2parsed_obj_full_data_file_name
)
title2parsed_obj_full = pickle.load(open(title2parsed_obj_full_data_full_path, "rb"))
sent_detector = nltk.data.load("tokenizers/punkt/english.pickle")
for title in tqdm(title2parsed_obj_full.keys()):
lines = title2parsed_obj_full[title]["lines_full_text"][1:] # remove title
lines = [
line for line in lines if not line.startswith("Section::")
] # remove section titles
lines = [
line.strip() for line in lines if line != ""
] # remove blank lines and trailing spaces
text = " ".join(lines)
sentences = sent_detector.tokenize(text)
sentences = [sent.strip() for sent in sentences]
for k in range(0, min(10, len(sentences))):
key = "sent_desc_{}".format(k + 1)
value = sentences[k]
title2parsed_obj[title][key] = value
print("Dumping", output_file_path)
pickle.dump(title2parsed_obj, open(output_file_path, "wb"), protocol=4)
|
BLINK-main
|
blink/candidate_retrieval/process_intro_sents.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sqlite3
import pickle
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_file",
type=str,
help="The full path to the precomputed index",
required=True,
)
parser.add_argument(
"--output_folder",
type=str,
help="The full path to the output folder",
required=True,
)
args = parser.parse_args()
precomp_index_path = args.input_file
output_folder_path = args.output_folder
output_file = os.path.join(output_folder_path, "linktitle2wikidataid.p")
if not os.path.isfile(output_file):
conn = sqlite3.connect(precomp_index_path)
cursorObj = conn.cursor()
cursorObj.execute("SELECT wikipedia_title, wikidata_id FROM mapping")
data = cursorObj.fetchall()
linktitle2wikidataid = {item[0]: item[1] for item in data}
pickle.dump(linktitle2wikidataid, open(output_file, "wb"))
else:
print("Output file `{}` already exists!".format(output_file))
output_file = os.path.join(output_folder_path, "wikipediaid2wikidataid.p")
if not os.path.isfile(output_file):
conn = sqlite3.connect(precomp_index_path)
cursorObj = conn.cursor()
cursorObj.execute("SELECT wikipedia_id, wikidata_id FROM mapping")
data = cursorObj.fetchall()
wikipediaid2wikidataid = {item[0]: item[1] for item in data}
pickle.dump(wikipediaid2wikidataid, open(output_file, "wb"))
else:
print("Output file `{}` already exists!".format(output_file))
|
BLINK-main
|
blink/candidate_retrieval/generate_wiki2wikidata_mappings.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import matplotlib.pyplot as plt
import numpy as np
from collections import Counter
class Evaluator:
def __init__(self, data):
self.data = data
def candidate_generation(
self, max_rank=None, save_gold_pos=False, save_pregenerated_gold_pos=False
):
has_gold_per_dataset = {}
total_per_dataset = {}
recall = {}
processed_mentions = self.data
if max_rank is None:
print("Max rank: None")
else:
print("Max rank", max_rank)
for mention in processed_mentions:
dataset_name = mention["dataset_name"]
gold_wikidata_id = mention["gold_wikidata_id"]
gold_pos = -1
for idx, cand in enumerate(mention["generated_candidates"]):
cand_wikidata_id = cand["wikidata_id"]
if gold_wikidata_id == cand_wikidata_id:
gold_pos = idx + 1 # Because idx starts at 0
break
if save_gold_pos:
mention["gold_pos"] = gold_pos
if gold_pos > 0 and ((max_rank is None) or gold_pos <= max_rank):
has_gold = has_gold_per_dataset.get(dataset_name, 0) + 1
has_gold_per_dataset[dataset_name] = has_gold
if save_pregenerated_gold_pos:
pre_gen_gold_pos = -1
for idx, cand in enumerate(mention["candidates_data"]):
cand_wikidata_id = cand["wikidata_id"]
if gold_wikidata_id == cand_wikidata_id:
pre_gen_gold_pos = idx + 1 # Because idx starts at 0
break
mention["pre_gen_candidates_gold_pos"] = pre_gen_gold_pos
total = total_per_dataset.get(dataset_name, 0) + 1
total_per_dataset[dataset_name] = total
total = 0
has_gold = 0
for dataset_name in total_per_dataset:
has_gold_ds = has_gold_per_dataset.get(dataset_name, 0)
total_ds = total_per_dataset[dataset_name]
has_gold += has_gold_ds
total += total_ds
recall[dataset_name] = has_gold_ds / total_ds
print("Dataset:", dataset_name)
print(
"Recall (w.r.t candidate generation): {:.3f}".format(
recall[dataset_name]
)
)
recall["overall"] = has_gold / total
print(
"Overal recall (w.r.t candidate generation): {:.3f}".format(
recall["overall"]
)
)
self.has_gold_per_dataset = has_gold_per_dataset
self.total_per_dataset = total_per_dataset
self.total = total
self.has_gold = has_gold
self.recall = recall
def candidate_generation_recall_at(self, ax=None, max_rank=None):
processed_mentions = self.data
total_num_of_docs = len(processed_mentions)
gold_positions = np.array(
[
mention["gold_pos"]
for mention in processed_mentions
if mention["gold_pos"] >= 0
]
)
if ax == None:
fig = plt.figure(figsize=(7, 7))
ax = plt.subplot(111)
ax.set_ylabel(str("Recall"))
ax.set_xlabel(str("True entity rank"))
rank_count_pairs = sorted(Counter(gold_positions).items(), key=lambda x: x[0])
# rank_count_pairs = rank_count_pairs[:k]
counts = [i[1] for i in rank_count_pairs]
recall = np.cumsum(counts) / total_num_of_docs * 100
rankings = [i[0] for i in rank_count_pairs]
if max_rank is not None:
for idx, rank in enumerate(rankings):
if rank > max_rank:
rankings = rankings[:idx]
recall = recall[:idx]
break
ax.plot(rankings, recall)
|
BLINK-main
|
blink/candidate_retrieval/evaluator.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import pickle
import json
import emoji
import sys
import os
import io
import blink.candidate_retrieval.utils as utils
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument(
"--processed_mention_data_file_path",
type=str,
help="The full path to the mention data file",
default="data/mention_dumps/train_and_eval_data",
)
parser.add_argument(
"--dump_folder_path",
type=str,
help="The path to the dump folder",
default="data/train_and_benchmark_processed_json",
)
# Keep pregenerated candidates
parser.add_argument(
"--keep_pregenerated_candidates",
action="store_true",
help="Whether to keep the candidates given with the dataset.",
)
args = parser.parse_args()
print(args)
dump_folder = args.dump_folder_path
path_to_processed_mention_data = args.processed_mention_data_file_path
os.makedirs(dump_folder, exist_ok=True)
print("Reading data")
run_dump = pickle.load(open(path_to_processed_mention_data, "rb"))
mentions = run_dump["mentions"]
dataset2processed_mentions = {}
for m in tqdm(mentions):
mention_obj = {}
mention_obj["candidates"] = m["generated_candidates"]
# Gold data
mention_obj["gold_pos"] = m["gold_pos"]
mention_obj["gold"] = m["gold"]
# Mention data
mention_obj["text"] = m["mention_orig"]
# mention_obj['query_context_50'] = m['query_context_orig']
# mention_obj['query_context_sent_prev_curr_next'] = utils.get_sent_context(m, 'prev_next', solr_escaped=False)
# mention_obj['tagged_context_50'] = (m['left_context_orig'], m['right_context_orig'])
prev_sent = m["sent_context_orig"][0] if m["sent_context_orig"][0] != None else ""
next_sent = m["sent_context_orig"][2] if m["sent_context_orig"][2] != None else ""
mention_obj["tagged_query_context_sent_prev_curr_next"] = (
"{} {}".format(prev_sent, m["left_query_sent_context_orig"]).strip(),
"{} {}".format(m["right_query_sent_context_orig"], next_sent).strip(),
)
mention_obj["tagged_query_context_sent_curr"] = (
m["left_query_sent_context_orig"].strip(),
m["right_query_sent_context_orig"].strip(),
)
# Keep the candidates given with the dataset (used for the purposes of comparison with baseline)
if args.keep_pregenerated_candidates:
mention_obj["pregenerated_candidates"] = m["candidates_data"]
mention_obj["pregenerated_gold_pos"] = m["pre_gen_candidates_gold_pos"]
# Add data to output dics
dataset_name = m["dataset_name"]
processed_mentions = dataset2processed_mentions.get(dataset_name, [])
processed_mentions.append(mention_obj)
dataset2processed_mentions[dataset_name] = processed_mentions
for dataset_name in dataset2processed_mentions:
print("Dumping dataset:", dataset_name)
processed_mentions = dataset2processed_mentions[dataset_name]
file_name = "{}.jsonl".format(dataset_name)
txt_file_path = os.path.join(dump_folder, file_name)
# with open(txt_file_path, "w+") as file:
with io.open(txt_file_path, mode="w", encoding="utf-8") as file:
for idx, mention in enumerate(processed_mentions):
json_string = json.dumps(mention)
file.write(json_string)
if idx != (len(processed_mentions) - 1):
file.write("\n")
|
BLINK-main
|
blink/candidate_retrieval/json_data_generation.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from tqdm import tqdm
from pytorch_transformers.modeling_utils import CONFIG_NAME, WEIGHTS_NAME
from pytorch_transformers.modeling_bert import (
BertPreTrainedModel,
BertConfig,
BertModel,
)
from pytorch_transformers.modeling_roberta import (
RobertaConfig,
RobertaModel,
)
from pytorch_transformers.tokenization_bert import BertTokenizer
from pytorch_transformers.tokenization_roberta import RobertaTokenizer
from blink.common.ranker_base import BertEncoder, get_model_obj
from blink.common.optimizer import get_bert_optimizer
from blink.common.params import ENT_START_TAG, ENT_END_TAG, ENT_TITLE_TAG
def load_crossencoder(params):
# Init model
crossencoder = CrossEncoderRanker(params)
return crossencoder
class CrossEncoderModule(torch.nn.Module):
def __init__(self, params, tokenizer):
super(CrossEncoderModule, self).__init__()
model_path = params["bert_model"]
if params.get("roberta"):
encoder_model = RobertaModel.from_pretrained(model_path)
else:
encoder_model = BertModel.from_pretrained(model_path)
encoder_model.resize_token_embeddings(len(tokenizer))
self.encoder = BertEncoder(
encoder_model,
params["out_dim"],
layer_pulled=params["pull_from_layer"],
add_linear=params["add_linear"],
)
self.config = self.encoder.bert_model.config
def forward(
self, token_idx_ctxt, segment_idx_ctxt, mask_ctxt,
):
embedding_ctxt = self.encoder(token_idx_ctxt, segment_idx_ctxt, mask_ctxt)
return embedding_ctxt.squeeze(-1)
class CrossEncoderRanker(torch.nn.Module):
def __init__(self, params, shared=None):
super(CrossEncoderRanker, self).__init__()
self.params = params
self.device = torch.device(
"cuda" if torch.cuda.is_available() and not params["no_cuda"] else "cpu"
)
self.n_gpu = torch.cuda.device_count()
if params.get("roberta"):
self.tokenizer = RobertaTokenizer.from_pretrained(params["bert_model"],)
else:
self.tokenizer = BertTokenizer.from_pretrained(
params["bert_model"], do_lower_case=params["lowercase"]
)
special_tokens_dict = {
"additional_special_tokens": [
ENT_START_TAG,
ENT_END_TAG,
ENT_TITLE_TAG,
],
}
self.tokenizer.add_special_tokens(special_tokens_dict)
self.NULL_IDX = self.tokenizer.pad_token_id
self.START_TOKEN = self.tokenizer.cls_token
self.END_TOKEN = self.tokenizer.sep_token
# init model
self.build_model()
if params["path_to_model"] is not None:
self.load_model(params["path_to_model"])
self.model = self.model.to(self.device)
self.data_parallel = params.get("data_parallel")
if self.data_parallel:
self.model = torch.nn.DataParallel(self.model)
def load_model(self, fname, cpu=False):
if cpu:
state_dict = torch.load(fname, map_location=lambda storage, location: "cpu")
else:
state_dict = torch.load(fname)
self.model.load_state_dict(state_dict)
def save(self, output_dir):
self.save_model(output_dir)
self.tokenizer.save_vocabulary(output_dir)
def build_model(self):
self.model = CrossEncoderModule(self.params, self.tokenizer)
def save_model(self, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = get_model_obj(self.model)
output_model_file = os.path.join(output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
def get_optimizer(self, optim_states=None, saved_optim_type=None):
return get_bert_optimizer(
[self.model],
self.params["type_optimization"],
self.params["learning_rate"],
fp16=self.params.get("fp16"),
)
def score_candidate(self, text_vecs, context_len):
# Encode contexts first
num_cand = text_vecs.size(1)
text_vecs = text_vecs.view(-1, text_vecs.size(-1))
token_idx_ctxt, segment_idx_ctxt, mask_ctxt = to_bert_input(
text_vecs, self.NULL_IDX, context_len,
)
embedding_ctxt = self.model(token_idx_ctxt, segment_idx_ctxt, mask_ctxt,)
return embedding_ctxt.view(-1, num_cand)
def forward(self, input_idx, label_input, context_len):
scores = self.score_candidate(input_idx, context_len)
loss = F.cross_entropy(scores, label_input, reduction="mean")
return loss, scores
def to_bert_input(token_idx, null_idx, segment_pos):
""" token_idx is a 2D tensor int.
return token_idx, segment_idx and mask
"""
segment_idx = token_idx * 0
if segment_pos > 0:
segment_idx[:, segment_pos:] = token_idx[:, segment_pos:] > 0
mask = token_idx != null_idx
# nullify elements in case self.NULL_IDX was not 0
# token_idx = token_idx * mask.long()
return token_idx, segment_idx, mask
|
BLINK-main
|
blink/crossencoder/crossencoder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import sys
import numpy as np
from tqdm import tqdm
import blink.biencoder.data_process as data
from blink.common.params import ENT_START_TAG, ENT_END_TAG
def prepare_crossencoder_mentions(
tokenizer,
samples,
max_context_length=32,
mention_key="mention",
context_key="context",
ent_start_token=ENT_START_TAG,
ent_end_token=ENT_END_TAG,
):
context_input_list = [] # samples X 128
for sample in tqdm(samples):
context_tokens = data.get_context_representation(
sample,
tokenizer,
max_context_length,
mention_key,
context_key,
ent_start_token,
ent_end_token,
)
tokens_ids = context_tokens["ids"]
context_input_list.append(tokens_ids)
context_input_list = np.asarray(context_input_list)
return context_input_list
def prepare_crossencoder_candidates(
tokenizer, labels, nns, id2title, id2text, max_cand_length=128, topk=100
):
START_TOKEN = tokenizer.cls_token
END_TOKEN = tokenizer.sep_token
candidate_input_list = [] # samples X topk=10 X 128
label_input_list = [] # samples
idx = 0
for label, nn in zip(labels, nns):
candidates = []
label_id = -1
for jdx, candidate_id in enumerate(nn[:topk]):
if label == candidate_id:
label_id = jdx
rep = data.get_candidate_representation(
id2text[candidate_id],
tokenizer,
max_cand_length,
id2title[candidate_id],
)
tokens_ids = rep["ids"]
assert len(tokens_ids) == max_cand_length
candidates.append(tokens_ids)
label_input_list.append(label_id)
candidate_input_list.append(candidates)
idx += 1
sys.stdout.write("{}/{} \r".format(idx, len(labels)))
sys.stdout.flush()
label_input_list = np.asarray(label_input_list)
candidate_input_list = np.asarray(candidate_input_list)
return label_input_list, candidate_input_list
def filter_crossencoder_tensor_input(
context_input_list, label_input_list, candidate_input_list
):
# remove the - 1 : examples for which gold is not among the candidates
context_input_list_filtered = [
x
for x, y, z in zip(context_input_list, candidate_input_list, label_input_list)
if z != -1
]
label_input_list_filtered = [
z
for x, y, z in zip(context_input_list, candidate_input_list, label_input_list)
if z != -1
]
candidate_input_list_filtered = [
y
for x, y, z in zip(context_input_list, candidate_input_list, label_input_list)
if z != -1
]
return (
context_input_list_filtered,
label_input_list_filtered,
candidate_input_list_filtered,
)
def prepare_crossencoder_data(
tokenizer, samples, labels, nns, id2title, id2text, keep_all=False
):
# encode mentions
context_input_list = prepare_crossencoder_mentions(tokenizer, samples)
# encode candidates (output of biencoder)
label_input_list, candidate_input_list = prepare_crossencoder_candidates(
tokenizer, labels, nns, id2title, id2text
)
if not keep_all:
# remove examples where the gold entity is not among the candidates
(
context_input_list,
label_input_list,
candidate_input_list,
) = filter_crossencoder_tensor_input(
context_input_list, label_input_list, candidate_input_list
)
else:
label_input_list = [0] * len(label_input_list)
context_input = torch.LongTensor(context_input_list)
label_input = torch.LongTensor(label_input_list)
candidate_input = torch.LongTensor(candidate_input_list)
return (
context_input,
candidate_input,
label_input,
)
|
BLINK-main
|
blink/crossencoder/data_process.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import argparse
import pickle
import torch
import json
import sys
import io
import random
import time
import numpy as np
from multiprocessing.pool import ThreadPool
from tqdm import tqdm, trange
from collections import OrderedDict
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from pytorch_transformers.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from pytorch_transformers.optimization import WarmupLinearSchedule
from pytorch_transformers.tokenization_bert import BertTokenizer
import blink.candidate_retrieval.utils
from blink.crossencoder.crossencoder import CrossEncoderRanker, load_crossencoder
import logging
import blink.candidate_ranking.utils as utils
import blink.biencoder.data_process as data
from blink.biencoder.zeshel_utils import DOC_PATH, WORLDS, world_to_id
from blink.common.optimizer import get_bert_optimizer
from blink.common.params import BlinkParser
logger = None
def modify(context_input, candidate_input, max_seq_length):
new_input = []
context_input = context_input.tolist()
candidate_input = candidate_input.tolist()
for i in range(len(context_input)):
cur_input = context_input[i]
cur_candidate = candidate_input[i]
mod_input = []
for j in range(len(cur_candidate)):
# remove [CLS] token from candidate
sample = cur_input + cur_candidate[j][1:]
sample = sample[:max_seq_length]
mod_input.append(sample)
new_input.append(mod_input)
return torch.LongTensor(new_input)
def evaluate(reranker, eval_dataloader, device, logger, context_length, zeshel=False, silent=True):
reranker.model.eval()
if silent:
iter_ = eval_dataloader
else:
iter_ = tqdm(eval_dataloader, desc="Evaluation")
results = {}
eval_accuracy = 0.0
nb_eval_examples = 0
nb_eval_steps = 0
acc = {}
tot = {}
world_size = len(WORLDS)
for i in range(world_size):
acc[i] = 0.0
tot[i] = 0.0
all_logits = []
cnt = 0
for step, batch in enumerate(iter_):
if zeshel:
src = batch[2]
cnt += 1
batch = tuple(t.to(device) for t in batch)
context_input = batch[0]
label_input = batch[1]
with torch.no_grad():
eval_loss, logits = reranker(context_input, label_input, context_length)
logits = logits.detach().cpu().numpy()
label_ids = label_input.cpu().numpy()
tmp_eval_accuracy, eval_result = utils.accuracy(logits, label_ids)
eval_accuracy += tmp_eval_accuracy
all_logits.extend(logits)
nb_eval_examples += context_input.size(0)
if zeshel:
for i in range(context_input.size(0)):
src_w = src[i].item()
acc[src_w] += eval_result[i]
tot[src_w] += 1
nb_eval_steps += 1
normalized_eval_accuracy = -1
if nb_eval_examples > 0:
normalized_eval_accuracy = eval_accuracy / nb_eval_examples
if zeshel:
macro = 0.0
num = 0.0
for i in range(len(WORLDS)):
if acc[i] > 0:
acc[i] /= tot[i]
macro += acc[i]
num += 1
if num > 0:
logger.info("Macro accuracy: %.5f" % (macro / num))
logger.info("Micro accuracy: %.5f" % normalized_eval_accuracy)
else:
if logger:
logger.info("Eval accuracy: %.5f" % normalized_eval_accuracy)
results["normalized_accuracy"] = normalized_eval_accuracy
results["logits"] = all_logits
return results
def get_optimizer(model, params):
return get_bert_optimizer(
[model],
params["type_optimization"],
params["learning_rate"],
fp16=params.get("fp16"),
)
def get_scheduler(params, optimizer, len_train_data, logger):
batch_size = params["train_batch_size"]
grad_acc = params["gradient_accumulation_steps"]
epochs = params["num_train_epochs"]
num_train_steps = int(len_train_data / batch_size / grad_acc) * epochs
num_warmup_steps = int(num_train_steps * params["warmup_proportion"])
scheduler = WarmupLinearSchedule(
optimizer, warmup_steps=num_warmup_steps, t_total=num_train_steps,
)
logger.info(" Num optimization steps = %d" % num_train_steps)
logger.info(" Num warmup steps = %d", num_warmup_steps)
return scheduler
def main(params):
model_output_path = params["output_path"]
if not os.path.exists(model_output_path):
os.makedirs(model_output_path)
logger = utils.get_logger(params["output_path"])
# Init model
reranker = CrossEncoderRanker(params)
tokenizer = reranker.tokenizer
model = reranker.model
# utils.save_model(model, tokenizer, model_output_path)
device = reranker.device
n_gpu = reranker.n_gpu
if params["gradient_accumulation_steps"] < 1:
raise ValueError(
"Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
params["gradient_accumulation_steps"]
)
)
# An effective batch size of `x`, when we are accumulating the gradient accross `y` batches will be achieved by having a batch size of `z = x / y`
# args.gradient_accumulation_steps = args.gradient_accumulation_steps // n_gpu
params["train_batch_size"] = (
params["train_batch_size"] // params["gradient_accumulation_steps"]
)
train_batch_size = params["train_batch_size"]
eval_batch_size = params["eval_batch_size"]
grad_acc_steps = params["gradient_accumulation_steps"]
# Fix the random seeds
seed = params["seed"]
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if reranker.n_gpu > 0:
torch.cuda.manual_seed_all(seed)
max_seq_length = params["max_seq_length"]
context_length = params["max_context_length"]
fname = os.path.join(params["data_path"], "train.t7")
train_data = torch.load(fname)
context_input = train_data["context_vecs"]
candidate_input = train_data["candidate_vecs"]
label_input = train_data["labels"]
if params["debug"]:
max_n = 200
context_input = context_input[:max_n]
candidate_input = candidate_input[:max_n]
label_input = label_input[:max_n]
context_input = modify(context_input, candidate_input, max_seq_length)
if params["zeshel"]:
src_input = train_data['worlds'][:len(context_input)]
train_tensor_data = TensorDataset(context_input, label_input, src_input)
else:
train_tensor_data = TensorDataset(context_input, label_input)
train_sampler = RandomSampler(train_tensor_data)
train_dataloader = DataLoader(
train_tensor_data,
sampler=train_sampler,
batch_size=params["train_batch_size"]
)
fname = os.path.join(params["data_path"], "valid.t7")
valid_data = torch.load(fname)
context_input = valid_data["context_vecs"]
candidate_input = valid_data["candidate_vecs"]
label_input = valid_data["labels"]
if params["debug"]:
max_n = 200
context_input = context_input[:max_n]
candidate_input = candidate_input[:max_n]
label_input = label_input[:max_n]
context_input = modify(context_input, candidate_input, max_seq_length)
if params["zeshel"]:
src_input = valid_data["worlds"][:len(context_input)]
valid_tensor_data = TensorDataset(context_input, label_input, src_input)
else:
valid_tensor_data = TensorDataset(context_input, label_input)
valid_sampler = SequentialSampler(valid_tensor_data)
valid_dataloader = DataLoader(
valid_tensor_data,
sampler=valid_sampler,
batch_size=params["eval_batch_size"]
)
# evaluate before training
results = evaluate(
reranker,
valid_dataloader,
device=device,
logger=logger,
context_length=context_length,
zeshel=params["zeshel"],
silent=params["silent"],
)
number_of_samples_per_dataset = {}
time_start = time.time()
utils.write_to_file(
os.path.join(model_output_path, "training_params.txt"), str(params)
)
logger.info("Starting training")
logger.info(
"device: {} n_gpu: {}, distributed training: {}".format(device, n_gpu, False)
)
optimizer = get_optimizer(model, params)
scheduler = get_scheduler(params, optimizer, len(train_tensor_data), logger)
model.train()
best_epoch_idx = -1
best_score = -1
num_train_epochs = params["num_train_epochs"]
for epoch_idx in trange(int(num_train_epochs), desc="Epoch"):
tr_loss = 0
results = None
if params["silent"]:
iter_ = train_dataloader
else:
iter_ = tqdm(train_dataloader, desc="Batch")
part = 0
for step, batch in enumerate(iter_):
batch = tuple(t.to(device) for t in batch)
context_input = batch[0]
label_input = batch[1]
loss, _ = reranker(context_input, label_input, context_length)
# if n_gpu > 1:
# loss = loss.mean() # mean() to average on multi-gpu.
if grad_acc_steps > 1:
loss = loss / grad_acc_steps
tr_loss += loss.item()
if (step + 1) % (params["print_interval"] * grad_acc_steps) == 0:
logger.info(
"Step {} - epoch {} average loss: {}\n".format(
step,
epoch_idx,
tr_loss / (params["print_interval"] * grad_acc_steps),
)
)
tr_loss = 0
loss.backward()
if (step + 1) % grad_acc_steps == 0:
torch.nn.utils.clip_grad_norm_(
model.parameters(), params["max_grad_norm"]
)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
if (step + 1) % (params["eval_interval"] * grad_acc_steps) == 0:
logger.info("Evaluation on the development dataset")
evaluate(
reranker,
valid_dataloader,
device=device,
logger=logger,
context_length=context_length,
zeshel=params["zeshel"],
silent=params["silent"],
)
logger.info("***** Saving fine - tuned model *****")
epoch_output_folder_path = os.path.join(
model_output_path, "epoch_{}_{}".format(epoch_idx, part)
)
part += 1
utils.save_model(model, tokenizer, epoch_output_folder_path)
model.train()
logger.info("\n")
logger.info("***** Saving fine - tuned model *****")
epoch_output_folder_path = os.path.join(
model_output_path, "epoch_{}".format(epoch_idx)
)
utils.save_model(model, tokenizer, epoch_output_folder_path)
# reranker.save(epoch_output_folder_path)
output_eval_file = os.path.join(epoch_output_folder_path, "eval_results.txt")
results = evaluate(
reranker,
valid_dataloader,
device=device,
logger=logger,
context_length=context_length,
zeshel=params["zeshel"],
silent=params["silent"],
)
ls = [best_score, results["normalized_accuracy"]]
li = [best_epoch_idx, epoch_idx]
best_score = ls[np.argmax(ls)]
best_epoch_idx = li[np.argmax(ls)]
logger.info("\n")
execution_time = (time.time() - time_start) / 60
utils.write_to_file(
os.path.join(model_output_path, "training_time.txt"),
"The training took {} minutes\n".format(execution_time),
)
logger.info("The training took {} minutes\n".format(execution_time))
# save the best model in the parent_dir
logger.info("Best performance in epoch: {}".format(best_epoch_idx))
params["path_to_model"] = os.path.join(
model_output_path, "epoch_{}".format(best_epoch_idx)
)
if __name__ == "__main__":
parser = BlinkParser(add_model_args=True)
parser.add_training_args()
parser.add_eval_args()
# args = argparse.Namespace(**params)
args = parser.parse_args()
print(args)
params = args.__dict__
main(params)
|
BLINK-main
|
blink/crossencoder/train_cross.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import json
import logging
import os
import torch
from tqdm import tqdm
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from pytorch_transformers.tokenization_bert import BertTokenizer
from blink.biencoder.biencoder import BiEncoderRanker
import blink.biencoder.data_process as data
import blink.biencoder.nn_prediction as nnquery
import blink.candidate_ranking.utils as utils
from blink.biencoder.zeshel_utils import WORLDS, load_entity_dict_zeshel, Stats
from blink.common.params import BlinkParser
def load_entity_dict(logger, params, is_zeshel):
if is_zeshel:
return load_entity_dict_zeshel(logger, params)
path = params.get("entity_dict_path", None)
assert path is not None, "Error! entity_dict_path is empty."
entity_list = []
logger.info("Loading entity description from path: " + path)
with open(path, 'rt') as f:
for line in f:
sample = json.loads(line.rstrip())
title = sample['title']
text = sample.get("text", "").strip()
entity_list.append((title, text))
if params["debug"] and len(entity_list) > 200:
break
return entity_list
# zeshel version of get candidate_pool_tensor
def get_candidate_pool_tensor_zeshel(
entity_dict,
tokenizer,
max_seq_length,
logger,
):
candidate_pool = {}
for src in range(len(WORLDS)):
if entity_dict.get(src, None) is None:
continue
logger.info("Get candidate desc to id for pool %s" % WORLDS[src])
candidate_pool[src] = get_candidate_pool_tensor(
entity_dict[src],
tokenizer,
max_seq_length,
logger,
)
return candidate_pool
def get_candidate_pool_tensor_helper(
entity_desc_list,
tokenizer,
max_seq_length,
logger,
is_zeshel,
):
if is_zeshel:
return get_candidate_pool_tensor_zeshel(
entity_desc_list,
tokenizer,
max_seq_length,
logger,
)
else:
return get_candidate_pool_tensor(
entity_desc_list,
tokenizer,
max_seq_length,
logger,
)
def get_candidate_pool_tensor(
entity_desc_list,
tokenizer,
max_seq_length,
logger,
):
# TODO: add multiple thread process
logger.info("Convert candidate text to id")
cand_pool = []
for entity_desc in tqdm(entity_desc_list):
if type(entity_desc) is tuple:
title, entity_text = entity_desc
else:
title = None
entity_text = entity_desc
rep = data.get_candidate_representation(
entity_text,
tokenizer,
max_seq_length,
title,
)
cand_pool.append(rep["ids"])
cand_pool = torch.LongTensor(cand_pool)
return cand_pool
def encode_candidate(
reranker,
candidate_pool,
encode_batch_size,
silent,
logger,
is_zeshel,
):
if is_zeshel:
src = 0
cand_encode_dict = {}
for src, cand_pool in candidate_pool.items():
logger.info("Encoding candidate pool %s" % WORLDS[src])
cand_pool_encode = encode_candidate(
reranker,
cand_pool,
encode_batch_size,
silent,
logger,
is_zeshel=False,
)
cand_encode_dict[src] = cand_pool_encode
return cand_encode_dict
reranker.model.eval()
device = reranker.device
sampler = SequentialSampler(candidate_pool)
data_loader = DataLoader(
candidate_pool, sampler=sampler, batch_size=encode_batch_size
)
if silent:
iter_ = data_loader
else:
iter_ = tqdm(data_loader)
cand_encode_list = None
for step, batch in enumerate(iter_):
cands = batch
cands = cands.to(device)
cand_encode = reranker.encode_candidate(cands)
if cand_encode_list is None:
cand_encode_list = cand_encode
else:
cand_encode_list = torch.cat((cand_encode_list, cand_encode))
return cand_encode_list
def load_or_generate_candidate_pool(
tokenizer,
params,
logger,
cand_pool_path,
):
candidate_pool = None
is_zeshel = params.get("zeshel", None)
if cand_pool_path is not None:
# try to load candidate pool from file
try:
logger.info("Loading pre-generated candidate pool from: ")
logger.info(cand_pool_path)
candidate_pool = torch.load(cand_pool_path)
except:
logger.info("Loading failed. Generating candidate pool")
if candidate_pool is None:
# compute candidate pool from entity list
entity_desc_list = load_entity_dict(logger, params, is_zeshel)
candidate_pool = get_candidate_pool_tensor_helper(
entity_desc_list,
tokenizer,
params["max_cand_length"],
logger,
is_zeshel,
)
if cand_pool_path is not None:
logger.info("Saving candidate pool.")
torch.save(candidate_pool, cand_pool_path)
return candidate_pool
def main(params):
output_path = params["output_path"]
if not os.path.exists(output_path):
os.makedirs(output_path)
logger = utils.get_logger(params["output_path"])
# Init model
reranker = BiEncoderRanker(params)
tokenizer = reranker.tokenizer
model = reranker.model
device = reranker.device
cand_encode_path = params.get("cand_encode_path", None)
# candidate encoding is not pre-computed.
# load/generate candidate pool to compute candidate encoding.
cand_pool_path = params.get("cand_pool_path", None)
candidate_pool = load_or_generate_candidate_pool(
tokenizer,
params,
logger,
cand_pool_path,
)
candidate_encoding = None
if cand_encode_path is not None:
# try to load candidate encoding from path
# if success, avoid computing candidate encoding
try:
logger.info("Loading pre-generated candidate encode path.")
candidate_encoding = torch.load(cand_encode_path)
except:
logger.info("Loading failed. Generating candidate encoding.")
if candidate_encoding is None:
candidate_encoding = encode_candidate(
reranker,
candidate_pool,
params["encode_batch_size"],
silent=params["silent"],
logger=logger,
is_zeshel=params.get("zeshel", None)
)
if cand_encode_path is not None:
# Save candidate encoding to avoid re-compute
logger.info("Saving candidate encoding to file " + cand_encode_path)
torch.save(candidate_encoding, cand_encode_path)
test_samples = utils.read_dataset(params["mode"], params["data_path"])
logger.info("Read %d test samples." % len(test_samples))
test_data, test_tensor_data = data.process_mention_data(
test_samples,
tokenizer,
params["max_context_length"],
params["max_cand_length"],
context_key=params['context_key'],
silent=params["silent"],
logger=logger,
debug=params["debug"],
)
test_sampler = SequentialSampler(test_tensor_data)
test_dataloader = DataLoader(
test_tensor_data,
sampler=test_sampler,
batch_size=params["eval_batch_size"]
)
save_results = params.get("save_topk_result")
new_data = nnquery.get_topk_predictions(
reranker,
test_dataloader,
candidate_pool,
candidate_encoding,
params["silent"],
logger,
params["top_k"],
params.get("zeshel", None),
save_results,
)
if save_results:
save_data_dir = os.path.join(
params['output_path'],
"top%d_candidates" % params['top_k'],
)
if not os.path.exists(save_data_dir):
os.makedirs(save_data_dir)
save_data_path = os.path.join(save_data_dir, "%s.t7" % params['mode'])
torch.save(new_data, save_data_path)
if __name__ == "__main__":
parser = BlinkParser(add_model_args=True)
parser.add_eval_args()
args = parser.parse_args()
print(args)
params = args.__dict__
mode_list = params["mode"].split(',')
for mode in mode_list:
new_params = params
new_params["mode"] = mode
main(new_params)
|
BLINK-main
|
blink/biencoder/eval_biencoder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Utility code for zeshel dataset
import json
import torch
DOC_PATH = "/private/home/ledell/zeshel/data/documents/"
WORLDS = [
'american_football',
'doctor_who',
'fallout',
'final_fantasy',
'military',
'pro_wrestling',
'starwars',
'world_of_warcraft',
'coronation_street',
'muppets',
'ice_hockey',
'elder_scrolls',
'forgotten_realms',
'lego',
'star_trek',
'yugioh'
]
world_to_id = {src : k for k, src in enumerate(WORLDS)}
def load_entity_dict_zeshel(logger, params):
entity_dict = {}
# different worlds in train/valid/test
if params["mode"] == "train":
start_idx = 0
end_idx = 8
elif params["mode"] == "valid":
start_idx = 8
end_idx = 12
else:
start_idx = 12
end_idx = 16
# load data
for i, src in enumerate(WORLDS[start_idx:end_idx]):
fname = DOC_PATH + src + ".json"
cur_dict = {}
doc_list = []
src_id = world_to_id[src]
with open(fname, 'rt') as f:
for line in f:
line = line.rstrip()
item = json.loads(line)
text = item["text"]
doc_list.append(text[:256])
if params["debug"]:
if len(doc_list) > 200:
break
logger.info("Load for world %s." % src)
entity_dict[src_id] = doc_list
return entity_dict
class Stats():
def __init__(self, top_k=1000):
self.cnt = 0
self.hits = []
self.top_k = top_k
self.rank = [1, 4, 8, 16, 32, 64, 100, 128, 256, 512]
self.LEN = len(self.rank)
for i in range(self.LEN):
self.hits.append(0)
def add(self, idx):
self.cnt += 1
if idx == -1:
return
for i in range(self.LEN):
if idx < self.rank[i]:
self.hits[i] += 1
def extend(self, stats):
self.cnt += stats.cnt
for i in range(self.LEN):
self.hits[i] += stats.hits[i]
def output(self):
output_json = "Total: %d examples." % self.cnt
for i in range(self.LEN):
if self.top_k < self.rank[i]:
break
output_json += " r@%d: %.4f" % (self.rank[i], self.hits[i] / float(self.cnt))
return output_json
|
BLINK-main
|
blink/biencoder/zeshel_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import json
import logging
import torch
from tqdm import tqdm
import blink.candidate_ranking.utils as utils
from blink.biencoder.zeshel_utils import WORLDS, Stats
def get_topk_predictions(
reranker,
train_dataloader,
candidate_pool,
cand_encode_list,
silent,
logger,
top_k=10,
is_zeshel=False,
save_predictions=False,
):
reranker.model.eval()
device = reranker.device
logger.info("Getting top %d predictions." % top_k)
if silent:
iter_ = train_dataloader
else:
iter_ = tqdm(train_dataloader)
nn_context = []
nn_candidates = []
nn_labels = []
nn_worlds = []
stats = {}
if is_zeshel:
world_size = len(WORLDS)
else:
# only one domain
world_size = 1
candidate_pool = [candidate_pool]
cand_encode_list = [cand_encode_list]
logger.info("World size : %d" % world_size)
for i in range(world_size):
stats[i] = Stats(top_k)
oid = 0
for step, batch in enumerate(iter_):
batch = tuple(t.to(device) for t in batch)
context_input, _, srcs, label_ids = batch
src = srcs[0].item()
scores = reranker.score_candidate(
context_input,
None,
cand_encs=cand_encode_list[src].to(device)
)
values, indicies = scores.topk(top_k)
old_src = src
for i in range(context_input.size(0)):
oid += 1
inds = indicies[i]
if srcs[i] != old_src:
src = srcs[i].item()
# not the same domain, need to re-do
new_scores = reranker.score_candidate(
context_input[[i]],
None,
cand_encs=cand_encode_list[src].to(device)
)
_, inds = new_scores.topk(top_k)
inds = inds[0]
pointer = -1
for j in range(top_k):
if inds[j].item() == label_ids[i].item():
pointer = j
break
stats[src].add(pointer)
if pointer == -1:
continue
if not save_predictions:
continue
# add examples in new_data
cur_candidates = candidate_pool[src][inds]
nn_context.append(context_input[i].cpu().tolist())
nn_candidates.append(cur_candidates.cpu().tolist())
nn_labels.append(pointer)
nn_worlds.append(src)
res = Stats(top_k)
for src in range(world_size):
if stats[src].cnt == 0:
continue
if is_zeshel:
logger.info("In world " + WORLDS[src])
output = stats[src].output()
logger.info(output)
res.extend(stats[src])
logger.info(res.output())
nn_context = torch.LongTensor(nn_context)
nn_candidates = torch.LongTensor(nn_candidates)
nn_labels = torch.LongTensor(nn_labels)
nn_data = {
'context_vecs': nn_context,
'candidate_vecs': nn_candidates,
'labels': nn_labels,
}
if is_zeshel:
nn_data["worlds"] = torch.LongTensor(nn_worlds)
return nn_data
|
BLINK-main
|
blink/biencoder/nn_prediction.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
|
BLINK-main
|
blink/biencoder/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
import torch
from tqdm import tqdm, trange
from torch.utils.data import DataLoader, TensorDataset
from pytorch_transformers.tokenization_bert import BertTokenizer
from blink.biencoder.zeshel_utils import world_to_id
from blink.common.params import ENT_START_TAG, ENT_END_TAG, ENT_TITLE_TAG
def select_field(data, key1, key2=None):
if key2 is None:
return [example[key1] for example in data]
else:
return [example[key1][key2] for example in data]
def get_context_representation(
sample,
tokenizer,
max_seq_length,
mention_key="mention",
context_key="context",
ent_start_token=ENT_START_TAG,
ent_end_token=ENT_END_TAG,
):
mention_tokens = []
if sample[mention_key] and len(sample[mention_key]) > 0:
mention_tokens = tokenizer.tokenize(sample[mention_key])
mention_tokens = [ent_start_token] + mention_tokens + [ent_end_token]
context_left = sample[context_key + "_left"]
context_right = sample[context_key + "_right"]
context_left = tokenizer.tokenize(context_left)
context_right = tokenizer.tokenize(context_right)
left_quota = (max_seq_length - len(mention_tokens)) // 2 - 1
right_quota = max_seq_length - len(mention_tokens) - left_quota - 2
left_add = len(context_left)
right_add = len(context_right)
if left_add <= left_quota:
if right_add > right_quota:
right_quota += left_quota - left_add
else:
if right_add <= right_quota:
left_quota += right_quota - right_add
context_tokens = (
context_left[-left_quota:] + mention_tokens + context_right[:right_quota]
)
context_tokens = ["[CLS]"] + context_tokens + ["[SEP]"]
input_ids = tokenizer.convert_tokens_to_ids(context_tokens)
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
assert len(input_ids) == max_seq_length
return {
"tokens": context_tokens,
"ids": input_ids,
}
def get_candidate_representation(
candidate_desc,
tokenizer,
max_seq_length,
candidate_title=None,
title_tag=ENT_TITLE_TAG,
):
cls_token = tokenizer.cls_token
sep_token = tokenizer.sep_token
cand_tokens = tokenizer.tokenize(candidate_desc)
if candidate_title is not None:
title_tokens = tokenizer.tokenize(candidate_title)
cand_tokens = title_tokens + [title_tag] + cand_tokens
cand_tokens = cand_tokens[: max_seq_length - 2]
cand_tokens = [cls_token] + cand_tokens + [sep_token]
input_ids = tokenizer.convert_tokens_to_ids(cand_tokens)
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
assert len(input_ids) == max_seq_length
return {
"tokens": cand_tokens,
"ids": input_ids,
}
def process_mention_data(
samples,
tokenizer,
max_context_length,
max_cand_length,
silent,
mention_key="mention",
context_key="context",
label_key="label",
title_key='label_title',
ent_start_token=ENT_START_TAG,
ent_end_token=ENT_END_TAG,
title_token=ENT_TITLE_TAG,
debug=False,
logger=None,
):
processed_samples = []
if debug:
samples = samples[:200]
if silent:
iter_ = samples
else:
iter_ = tqdm(samples)
use_world = True
for idx, sample in enumerate(iter_):
context_tokens = get_context_representation(
sample,
tokenizer,
max_context_length,
mention_key,
context_key,
ent_start_token,
ent_end_token,
)
label = sample[label_key]
title = sample.get(title_key, None)
label_tokens = get_candidate_representation(
label, tokenizer, max_cand_length, title,
)
label_idx = int(sample["label_id"])
record = {
"context": context_tokens,
"label": label_tokens,
"label_idx": [label_idx],
}
if "world" in sample:
src = sample["world"]
src = world_to_id[src]
record["src"] = [src]
use_world = True
else:
use_world = False
processed_samples.append(record)
if debug and logger:
logger.info("====Processed samples: ====")
for sample in processed_samples[:5]:
logger.info("Context tokens : " + " ".join(sample["context"]["tokens"]))
logger.info(
"Context ids : " + " ".join([str(v) for v in sample["context"]["ids"]])
)
logger.info("Label tokens : " + " ".join(sample["label"]["tokens"]))
logger.info(
"Label ids : " + " ".join([str(v) for v in sample["label"]["ids"]])
)
logger.info("Src : %d" % sample["src"][0])
logger.info("Label_id : %d" % sample["label_idx"][0])
context_vecs = torch.tensor(
select_field(processed_samples, "context", "ids"), dtype=torch.long,
)
cand_vecs = torch.tensor(
select_field(processed_samples, "label", "ids"), dtype=torch.long,
)
if use_world:
src_vecs = torch.tensor(
select_field(processed_samples, "src"), dtype=torch.long,
)
label_idx = torch.tensor(
select_field(processed_samples, "label_idx"), dtype=torch.long,
)
data = {
"context_vecs": context_vecs,
"cand_vecs": cand_vecs,
"label_idx": label_idx,
}
if use_world:
data["src"] = src_vecs
tensor_data = TensorDataset(context_vecs, cand_vecs, src_vecs, label_idx)
else:
tensor_data = TensorDataset(context_vecs, cand_vecs, label_idx)
return data, tensor_data
|
BLINK-main
|
blink/biencoder/data_process.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import argparse
import pickle
import torch
import json
import sys
import io
import random
import time
import numpy as np
from multiprocessing.pool import ThreadPool
from tqdm import tqdm, trange
from collections import OrderedDict
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from pytorch_transformers.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from pytorch_transformers.optimization import WarmupLinearSchedule
from pytorch_transformers.tokenization_bert import BertTokenizer
from pytorch_transformers.modeling_utils import WEIGHTS_NAME
from blink.biencoder.biencoder import BiEncoderRanker, load_biencoder
import logging
import blink.candidate_ranking.utils as utils
import blink.biencoder.data_process as data
from blink.biencoder.zeshel_utils import DOC_PATH, WORLDS, world_to_id
from blink.common.optimizer import get_bert_optimizer
from blink.common.params import BlinkParser
logger = None
# The evaluate function during training uses in-batch negatives:
# for a batch of size B, the labels from the batch are used as label candidates
# B is controlled by the parameter eval_batch_size
def evaluate(
reranker, eval_dataloader, params, device, logger,
):
reranker.model.eval()
if params["silent"]:
iter_ = eval_dataloader
else:
iter_ = tqdm(eval_dataloader, desc="Evaluation")
results = {}
eval_accuracy = 0.0
nb_eval_examples = 0
nb_eval_steps = 0
for step, batch in enumerate(iter_):
batch = tuple(t.to(device) for t in batch)
context_input, candidate_input, _, _ = batch
with torch.no_grad():
eval_loss, logits = reranker(context_input, candidate_input)
logits = logits.detach().cpu().numpy()
# Using in-batch negatives, the label ids are diagonal
label_ids = torch.LongTensor(
torch.arange(params["eval_batch_size"])
).numpy()
tmp_eval_accuracy, _ = utils.accuracy(logits, label_ids)
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += context_input.size(0)
nb_eval_steps += 1
normalized_eval_accuracy = eval_accuracy / nb_eval_examples
logger.info("Eval accuracy: %.5f" % normalized_eval_accuracy)
results["normalized_accuracy"] = normalized_eval_accuracy
return results
def get_optimizer(model, params):
return get_bert_optimizer(
[model],
params["type_optimization"],
params["learning_rate"],
fp16=params.get("fp16"),
)
def get_scheduler(params, optimizer, len_train_data, logger):
batch_size = params["train_batch_size"]
grad_acc = params["gradient_accumulation_steps"]
epochs = params["num_train_epochs"]
num_train_steps = int(len_train_data / batch_size / grad_acc) * epochs
num_warmup_steps = int(num_train_steps * params["warmup_proportion"])
scheduler = WarmupLinearSchedule(
optimizer, warmup_steps=num_warmup_steps, t_total=num_train_steps,
)
logger.info(" Num optimization steps = %d" % num_train_steps)
logger.info(" Num warmup steps = %d", num_warmup_steps)
return scheduler
def main(params):
model_output_path = params["output_path"]
if not os.path.exists(model_output_path):
os.makedirs(model_output_path)
logger = utils.get_logger(params["output_path"])
# Init model
reranker = BiEncoderRanker(params)
tokenizer = reranker.tokenizer
model = reranker.model
device = reranker.device
n_gpu = reranker.n_gpu
if params["gradient_accumulation_steps"] < 1:
raise ValueError(
"Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
params["gradient_accumulation_steps"]
)
)
# An effective batch size of `x`, when we are accumulating the gradient accross `y` batches will be achieved by having a batch size of `z = x / y`
# args.gradient_accumulation_steps = args.gradient_accumulation_steps // n_gpu
params["train_batch_size"] = (
params["train_batch_size"] // params["gradient_accumulation_steps"]
)
train_batch_size = params["train_batch_size"]
eval_batch_size = params["eval_batch_size"]
grad_acc_steps = params["gradient_accumulation_steps"]
# Fix the random seeds
seed = params["seed"]
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if reranker.n_gpu > 0:
torch.cuda.manual_seed_all(seed)
# Load train data
train_samples = utils.read_dataset("train", params["data_path"])
logger.info("Read %d train samples." % len(train_samples))
train_data, train_tensor_data = data.process_mention_data(
train_samples,
tokenizer,
params["max_context_length"],
params["max_cand_length"],
context_key=params["context_key"],
silent=params["silent"],
logger=logger,
debug=params["debug"],
)
if params["shuffle"]:
train_sampler = RandomSampler(train_tensor_data)
else:
train_sampler = SequentialSampler(train_tensor_data)
train_dataloader = DataLoader(
train_tensor_data, sampler=train_sampler, batch_size=train_batch_size
)
# Load eval data
# TODO: reduce duplicated code here
valid_samples = utils.read_dataset("valid", params["data_path"])
logger.info("Read %d valid samples." % len(valid_samples))
valid_data, valid_tensor_data = data.process_mention_data(
valid_samples,
tokenizer,
params["max_context_length"],
params["max_cand_length"],
context_key=params["context_key"],
silent=params["silent"],
logger=logger,
debug=params["debug"],
)
valid_sampler = SequentialSampler(valid_tensor_data)
valid_dataloader = DataLoader(
valid_tensor_data, sampler=valid_sampler, batch_size=eval_batch_size
)
# evaluate before training
results = evaluate(
reranker, valid_dataloader, params, device=device, logger=logger,
)
number_of_samples_per_dataset = {}
time_start = time.time()
utils.write_to_file(
os.path.join(model_output_path, "training_params.txt"), str(params)
)
logger.info("Starting training")
logger.info(
"device: {} n_gpu: {}, distributed training: {}".format(device, n_gpu, False)
)
optimizer = get_optimizer(model, params)
scheduler = get_scheduler(params, optimizer, len(train_tensor_data), logger)
model.train()
best_epoch_idx = -1
best_score = -1
num_train_epochs = params["num_train_epochs"]
for epoch_idx in trange(int(num_train_epochs), desc="Epoch"):
tr_loss = 0
results = None
if params["silent"]:
iter_ = train_dataloader
else:
iter_ = tqdm(train_dataloader, desc="Batch")
for step, batch in enumerate(iter_):
batch = tuple(t.to(device) for t in batch)
context_input, candidate_input, _, _ = batch
loss, _ = reranker(context_input, candidate_input)
# if n_gpu > 1:
# loss = loss.mean() # mean() to average on multi-gpu.
if grad_acc_steps > 1:
loss = loss / grad_acc_steps
tr_loss += loss.item()
if (step + 1) % (params["print_interval"] * grad_acc_steps) == 0:
logger.info(
"Step {} - epoch {} average loss: {}\n".format(
step,
epoch_idx,
tr_loss / (params["print_interval"] * grad_acc_steps),
)
)
tr_loss = 0
loss.backward()
if (step + 1) % grad_acc_steps == 0:
torch.nn.utils.clip_grad_norm_(
model.parameters(), params["max_grad_norm"]
)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
if (step + 1) % (params["eval_interval"] * grad_acc_steps) == 0:
logger.info("Evaluation on the development dataset")
evaluate(
reranker, valid_dataloader, params, device=device, logger=logger,
)
model.train()
logger.info("\n")
logger.info("***** Saving fine - tuned model *****")
epoch_output_folder_path = os.path.join(
model_output_path, "epoch_{}".format(epoch_idx)
)
utils.save_model(model, tokenizer, epoch_output_folder_path)
output_eval_file = os.path.join(epoch_output_folder_path, "eval_results.txt")
results = evaluate(
reranker, valid_dataloader, params, device=device, logger=logger,
)
ls = [best_score, results["normalized_accuracy"]]
li = [best_epoch_idx, epoch_idx]
best_score = ls[np.argmax(ls)]
best_epoch_idx = li[np.argmax(ls)]
logger.info("\n")
execution_time = (time.time() - time_start) / 60
utils.write_to_file(
os.path.join(model_output_path, "training_time.txt"),
"The training took {} minutes\n".format(execution_time),
)
logger.info("The training took {} minutes\n".format(execution_time))
# save the best model in the parent_dir
logger.info("Best performance in epoch: {}".format(best_epoch_idx))
params["path_to_model"] = os.path.join(
model_output_path,
"epoch_{}".format(best_epoch_idx),
WEIGHTS_NAME,
)
reranker = load_biencoder(params)
utils.save_model(reranker.model, tokenizer, model_output_path)
if params["evaluate"]:
params["path_to_model"] = model_output_path
evaluate(params, logger=logger)
if __name__ == "__main__":
parser = BlinkParser(add_model_args=True)
parser.add_training_args()
parser.add_eval_args()
# args = argparse.Namespace(**params)
args = parser.parse_args()
print(args)
params = args.__dict__
main(params)
|
BLINK-main
|
blink/biencoder/train_biencoder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
from pytorch_transformers.modeling_bert import (
BertPreTrainedModel,
BertConfig,
BertModel,
)
from pytorch_transformers.tokenization_bert import BertTokenizer
from blink.common.ranker_base import BertEncoder, get_model_obj
from blink.common.optimizer import get_bert_optimizer
def load_biencoder(params):
# Init model
biencoder = BiEncoderRanker(params)
return biencoder
class BiEncoderModule(torch.nn.Module):
def __init__(self, params):
super(BiEncoderModule, self).__init__()
ctxt_bert = BertModel.from_pretrained(params["bert_model"])
cand_bert = BertModel.from_pretrained(params['bert_model'])
self.context_encoder = BertEncoder(
ctxt_bert,
params["out_dim"],
layer_pulled=params["pull_from_layer"],
add_linear=params["add_linear"],
)
self.cand_encoder = BertEncoder(
cand_bert,
params["out_dim"],
layer_pulled=params["pull_from_layer"],
add_linear=params["add_linear"],
)
self.config = ctxt_bert.config
def forward(
self,
token_idx_ctxt,
segment_idx_ctxt,
mask_ctxt,
token_idx_cands,
segment_idx_cands,
mask_cands,
):
embedding_ctxt = None
if token_idx_ctxt is not None:
embedding_ctxt = self.context_encoder(
token_idx_ctxt, segment_idx_ctxt, mask_ctxt
)
embedding_cands = None
if token_idx_cands is not None:
embedding_cands = self.cand_encoder(
token_idx_cands, segment_idx_cands, mask_cands
)
return embedding_ctxt, embedding_cands
class BiEncoderRanker(torch.nn.Module):
def __init__(self, params, shared=None):
super(BiEncoderRanker, self).__init__()
self.params = params
self.device = torch.device(
"cuda" if torch.cuda.is_available() and not params["no_cuda"] else "cpu"
)
self.n_gpu = torch.cuda.device_count()
# init tokenizer
self.NULL_IDX = 0
self.START_TOKEN = "[CLS]"
self.END_TOKEN = "[SEP]"
self.tokenizer = BertTokenizer.from_pretrained(
params["bert_model"], do_lower_case=params["lowercase"]
)
# init model
self.build_model()
model_path = params.get("path_to_model", None)
if model_path is not None:
self.load_model(model_path)
self.model = self.model.to(self.device)
self.data_parallel = params.get("data_parallel")
if self.data_parallel:
self.model = torch.nn.DataParallel(self.model)
def load_model(self, fname, cpu=False):
if cpu:
state_dict = torch.load(fname, map_location=lambda storage, location: "cpu")
else:
state_dict = torch.load(fname)
self.model.load_state_dict(state_dict)
def build_model(self):
self.model = BiEncoderModule(self.params)
def save_model(self, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = get_model_obj(self.model)
output_model_file = os.path.join(output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
def get_optimizer(self, optim_states=None, saved_optim_type=None):
return get_bert_optimizer(
[self.model],
self.params["type_optimization"],
self.params["learning_rate"],
fp16=self.params.get("fp16"),
)
def encode_context(self, cands):
token_idx_cands, segment_idx_cands, mask_cands = to_bert_input(
cands, self.NULL_IDX
)
embedding_context, _ = self.model(
token_idx_cands, segment_idx_cands, mask_cands, None, None, None
)
return embedding_context.cpu().detach()
def encode_candidate(self, cands):
token_idx_cands, segment_idx_cands, mask_cands = to_bert_input(
cands, self.NULL_IDX
)
_, embedding_cands = self.model(
None, None, None, token_idx_cands, segment_idx_cands, mask_cands
)
return embedding_cands.cpu().detach()
# TODO: why do we need cpu here?
# return embedding_cands
# Score candidates given context input and label input
# If cand_encs is provided (pre-computed), cand_ves is ignored
def score_candidate(
self,
text_vecs,
cand_vecs,
random_negs=True,
cand_encs=None, # pre-computed candidate encoding.
):
# Encode contexts first
token_idx_ctxt, segment_idx_ctxt, mask_ctxt = to_bert_input(
text_vecs, self.NULL_IDX
)
embedding_ctxt, _ = self.model(
token_idx_ctxt, segment_idx_ctxt, mask_ctxt, None, None, None
)
# Candidate encoding is given, do not need to re-compute
# Directly return the score of context encoding and candidate encoding
if cand_encs is not None:
return embedding_ctxt.mm(cand_encs.t())
# Train time. We compare with all elements of the batch
token_idx_cands, segment_idx_cands, mask_cands = to_bert_input(
cand_vecs, self.NULL_IDX
)
_, embedding_cands = self.model(
None, None, None, token_idx_cands, segment_idx_cands, mask_cands
)
if random_negs:
# train on random negatives
return embedding_ctxt.mm(embedding_cands.t())
else:
# train on hard negatives
embedding_ctxt = embedding_ctxt.unsqueeze(1) # batchsize x 1 x embed_size
embedding_cands = embedding_cands.unsqueeze(2) # batchsize x embed_size x 2
scores = torch.bmm(embedding_ctxt, embedding_cands) # batchsize x 1 x 1
scores = torch.squeeze(scores)
return scores
# label_input -- negatives provided
# If label_input is None, train on in-batch negatives
def forward(self, context_input, cand_input, label_input=None):
flag = label_input is None
scores = self.score_candidate(context_input, cand_input, flag)
bs = scores.size(0)
if label_input is None:
target = torch.LongTensor(torch.arange(bs))
target = target.to(self.device)
loss = F.cross_entropy(scores, target, reduction="mean")
else:
loss_fct = nn.BCEWithLogitsLoss(reduction="mean")
# TODO: add parameters?
loss = loss_fct(scores, label_input)
return loss, scores
def to_bert_input(token_idx, null_idx):
""" token_idx is a 2D tensor int.
return token_idx, segment_idx and mask
"""
segment_idx = token_idx * 0
mask = token_idx != null_idx
# nullify elements in case self.NULL_IDX was not 0
token_idx = token_idx * mask.long()
return token_idx, segment_idx, mask
|
BLINK-main
|
blink/biencoder/biencoder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import os
import numpy as np
from pytorch_transformers.modeling_bert import (
BertPreTrainedModel,
BertConfig,
BertModel,
)
from pytorch_transformers.tokenization_bert import BertTokenizer
from torch.utils.data import DataLoader, SequentialSampler, TensorDataset
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from tqdm import tqdm
from pytorch_transformers.optimization import AdamW, WarmupLinearSchedule
from pytorch_transformers.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
class BertForReranking(BertPreTrainedModel):
r"""
Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows:
(a) For sequence pairs:
``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1``
(b) For single sequences:
``tokens: [CLS] the dog is hairy . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0``
Indices can be obtained using :class:`pytorch_transformers.BertTokenizer`.
See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
:func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Segment token indices to indicate first and second portions of the inputs.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
(see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details).
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Mask to avoid performing attention on padding token indices.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss.
**classification_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForMultipleChoice.from_pretrained('bert-base-uncased')
choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
labels = torch.tensor(1).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, classification_scores = outputs[:2]
"""
def __init__(self, config):
super(BertForReranking, self).__init__(config)
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
def forward(
self,
input_ids,
token_type_ids=None,
attention_mask=None,
labels=None,
position_ids=None,
head_mask=None,
entity_mask=None,
):
num_choices = input_ids.shape[1]
# from batch_size x cands x tokens -> (batch_size x cands) x tokens
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = (
token_type_ids.view(-1, token_type_ids.size(-1))
if token_type_ids is not None
else None
)
flat_attention_mask = (
attention_mask.view(-1, attention_mask.size(-1))
if attention_mask is not None
else None
)
flat_position_ids = (
position_ids.view(-1, position_ids.size(-1))
if position_ids is not None
else None
)
outputs = self.bert(
flat_input_ids,
position_ids=flat_position_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask,
head_mask=head_mask,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
entity_mask = (1.0 - entity_mask) * -1000.0
reshaped_logits = reshaped_logits + entity_mask
outputs = (reshaped_logits,)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
outputs = (loss,) + outputs
return outputs
class BertReranker:
def __init__(self, parameters):
if "path_to_model" not in parameters:
parameters["path_to_model"] = parameters["bert_model"]
self.parameters = parameters
self.device = torch.device(
"cuda" if torch.cuda.is_available() and not parameters["no_cuda"] else "cpu"
)
self.n_gpu = torch.cuda.device_count()
# Load the fine-tuned model and the tokenizer used by it
self.model = BertReranker.get_model(parameters)
self.model.to(self.device)
self.tokenizer = BertReranker.get_tokenizer(parameters)
print("The reranking model is loaded")
def rerank(self, mentions, sentences):
model = self.model
tokenizer = self.tokenizer
p = self.parameters
device = self.device
data, tensor_data = BertReranker._process_mentions_for_model(
p["context_key"],
mentions,
tokenizer,
p["max_seq_length"],
p["top_k"],
p["silent"],
sentences=sentences,
)
sampler = SequentialSampler(tensor_data)
dataloader = DataLoader(
tensor_data, sampler=sampler, batch_size=p["evaluation_batch_size"]
)
softmax = torch.nn.Softmax(dim=1)
for input_ids, input_mask, segment_ids, mention_ids, entity_mask in tqdm(
dataloader, desc="Inferring"
):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
mention_ids = mention_ids.numpy()
entity_mask = entity_mask.to(device)
with torch.no_grad():
logits = self.model(
input_ids, segment_ids, input_mask, entity_mask=entity_mask
)[0]
probs = softmax(logits)
logits = logits.detach().cpu().numpy()
probs = probs.detach().cpu().numpy()
predictions = np.argmax(logits, axis=1)
for idx, mention_idx in enumerate(mention_ids):
pred = predictions[idx].item()
mentions[mention_idx]["predicted_candidate_idx"] = pred
mentions[mention_idx]["prob_assigned_to_candidate"] = probs[idx][
pred
].item()
return mentions
def get_scheduler_and_optimizer(self, parameters, train_tensor_data, logger):
model = self.model
num_train_optimization_steps = (
int(
len(train_tensor_data)
/ parameters["train_batch_size"]
/ parameters["gradient_accumulation_steps"]
)
* parameters["num_train_epochs"]
)
num_warmup_steps = int(
num_train_optimization_steps * parameters["warmup_proportion"]
)
param_optimizer = list(model.named_parameters())
param_optimizer = [n for n in param_optimizer]
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p for n, p in param_optimizer if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.01,
},
{
"params": [
p for n, p in param_optimizer if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=parameters["learning_rate"],
correct_bias=False,
)
scheduler = WarmupLinearSchedule(
optimizer,
warmup_steps=num_warmup_steps,
t_total=num_train_optimization_steps,
)
logger.info(" Num optimization steps = %d", num_train_optimization_steps)
logger.info(" Num warmup steps = %d", num_warmup_steps)
return optimizer, scheduler
@staticmethod
def get_model(parameters):
model = BertForReranking.from_pretrained(
parameters["path_to_model"],
num_labels=parameters["top_k"],
cache_dir=os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), "local"),
)
if parameters["dataparallel_bert"]:
model.bert = torch.nn.DataParallel(model.bert)
print("Data parallel Bert")
return model
@staticmethod
def get_tokenizer(parameters):
tokenizer = BertTokenizer.from_pretrained(
parameters["path_to_model"], do_lower_case=parameters["lowercase_flag"]
)
return tokenizer
@staticmethod
def _get_candidate_representation(
context_tokens, candidate_desc, tokenizer, max_seq_length, max_sub_seq_length
):
"""Tokenizes and truncates description; combines it with the tokenized context and generates one input sample for bert"""
candidate_desc_tokens = tokenizer.tokenize(candidate_desc)
candidate_desc_tokens = candidate_desc_tokens[:max_sub_seq_length]
tokens = (
["[CLS]"] + context_tokens + ["[SEP]"] + candidate_desc_tokens + ["[SEP]"]
)
segment_ids = [0] * (len(context_tokens) + 2) + [1] * (
len(candidate_desc_tokens) + 1
)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
return {
"tokens": tokens,
"input_ids": input_ids,
"input_mask": input_mask,
"segment_ids": segment_ids,
}
@staticmethod
def _get_mention_context_end2end(mention, sentences):
"""Given a mention and a list of sentences that follow the blink conventions, it returns a left and right context for the mention"""
sent_idx = mention["sent_idx"]
prev_sent = sentences[sent_idx - 1] if sent_idx > 0 else ""
next_sent = sentences[sent_idx + 1] if sent_idx + 1 < len(sentences) else ""
prev_sent = sentences[sent_idx - 1] if False else ""
next_sent = sentences[sent_idx + 1] if False else ""
sent = sentences[sent_idx]
curr_sent_prev = sent[: mention["start_pos"]].strip()
curr_sent_next = sent[mention["end_pos"] :].strip()
left_context = "{} {}".format(prev_sent, curr_sent_prev).strip()
right_context = "{} {}".format(curr_sent_next, next_sent).strip()
return (left_context, right_context)
@staticmethod
def _select_field(samples, field):
"""Helper function that returns a list of lists, each of which contains the information for all candidates for each sample"""
return [
[cand[field] for cand in sample["candidate_features"]] for sample in samples
]
@staticmethod
def _get_context_token_representation(
context_key,
sample,
tokenizer,
max_sub_seq_length,
start_token,
end_token,
mention_text_key="text",
tagged=True,
):
"""Tags the mention, trims the context and concatenates everything to form the context representation"""
mention_tokens = (
[start_token] + tokenizer.tokenize(sample[mention_text_key]) + [end_token]
)
max_sub_seq_length = (max_sub_seq_length - len(mention_tokens)) // 2
context_left, context_right = sample[context_key]
context_left = tokenizer.tokenize(context_left)
context_right = tokenizer.tokenize(context_right)
if len(context_left) > max_sub_seq_length:
context_left = context_left[-max_sub_seq_length:]
if len(context_right) > max_sub_seq_length:
context_right = context_right[:max_sub_seq_length]
context_tokens = context_left + mention_tokens + context_right
return context_tokens
@staticmethod
def _process_mentions_for_model(
context_key,
mentions,
tokenizer,
max_seq_length,
top_k,
silent,
start_token="[unused0]",
end_token="[unused1]",
debug=False,
tagged=True,
sentences=None,
candidates_key="candidates",
gold_key="gold_pos",
logger=None,
):
processed_mentions = []
if debug:
mentions = mentions[:200]
max_sub_seq_length = (max_seq_length - 3) // 2
if silent:
iter_ = mentions
else:
iter_ = tqdm(mentions)
for idx, mention in enumerate(iter_):
# if sentences is not none that means that we are processing end2end data for inference
if sentences is not None:
mention[context_key] = BertReranker._get_mention_context_end2end(
mention, sentences
)
context_tokens = BertReranker._get_context_token_representation(
context_key,
mention,
tokenizer,
max_sub_seq_length,
start_token,
end_token,
)
candidates = mention[candidates_key]
candidate_features = []
for candidate in candidates[:top_k]:
candidate_desc = " ".join(candidate["sentences"])
candidate_obj = BertReranker._get_candidate_representation(
context_tokens,
candidate_desc,
tokenizer,
max_seq_length,
max_sub_seq_length,
)
candidate_features.append(candidate_obj)
entity_mask = [1] * len(candidate_features) + [0] * (
top_k - len(candidate_features)
)
if len(candidates) < top_k:
candidate_desc = ""
padding_candidate_obj = BertReranker._get_candidate_representation(
context_tokens,
candidate_desc,
tokenizer,
max_seq_length,
max_sub_seq_length,
)
for _ in range(top_k - len(candidates)):
candidate_features.append(padding_candidate_obj)
assert len(candidate_features) == top_k
assert len(entity_mask) == top_k
if sentences is not None:
processed_mentions.append(
{
"candidate_features": candidate_features,
"mention_idx": idx,
"entity_mask": entity_mask,
}
)
else:
label = mention[gold_key] - 1
processed_mentions.append(
{
"candidate_features": candidate_features,
"label": label,
"entity_mask": entity_mask,
}
)
all_input_ids = torch.tensor(
BertReranker._select_field(processed_mentions, "input_ids"),
dtype=torch.long,
)
all_input_mask = torch.tensor(
BertReranker._select_field(processed_mentions, "input_mask"),
dtype=torch.long,
)
all_segment_ids = torch.tensor(
BertReranker._select_field(processed_mentions, "segment_ids"),
dtype=torch.long,
)
all_entity_masks = torch.tensor(
[s["entity_mask"] for s in processed_mentions], dtype=torch.float
)
data = {
"all_input_ids": all_input_ids,
"all_input_mask": all_input_mask,
"all_segment_ids": all_segment_ids,
"all_entity_masks": all_entity_masks,
}
if sentences is not None:
all_mention_indices = torch.tensor(
[s["mention_idx"] for s in processed_mentions], dtype=torch.long
)
data["all_mention_indices"] = all_mention_indices
tensor_data = TensorDataset(
all_input_ids,
all_input_mask,
all_segment_ids,
all_mention_indices,
all_entity_masks,
)
else:
all_label = torch.tensor(
[s["label"] for s in processed_mentions], dtype=torch.long
)
data["all_label"] = all_label
tensor_data = TensorDataset(
all_input_ids,
all_input_mask,
all_segment_ids,
all_label,
all_entity_masks,
)
if logger != None:
logger.info("all_input_ids shape: {}".format(all_input_ids.shape))
logger.info("all_input_mask shape: {}".format(all_input_mask.shape))
logger.info("all_segment_ids shape: {}".format(all_segment_ids.shape))
logger.info("all_entity_masks shape: {}".format(all_entity_masks.shape))
if sentences is not None:
logger.info(
"all_mention_indices shape: {}".format(all_mention_indices.shape)
)
else:
logger.info("all_label shape: {}".format(all_label.shape))
return data, tensor_data
|
BLINK-main
|
blink/candidate_ranking/bert_reranking.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import io
import sys
import json
import torch
import logging
import numpy as np
from collections import OrderedDict
from pytorch_transformers.modeling_utils import CONFIG_NAME, WEIGHTS_NAME
from tqdm import tqdm
from blink.candidate_ranking.bert_reranking import BertReranker
from blink.biencoder.biencoder import BiEncoderRanker
def read_dataset(dataset_name, preprocessed_json_data_parent_folder, debug=False):
file_name = "{}.jsonl".format(dataset_name)
txt_file_path = os.path.join(preprocessed_json_data_parent_folder, file_name)
samples = []
with io.open(txt_file_path, mode="r", encoding="utf-8") as file:
for line in file:
samples.append(json.loads(line.strip()))
if debug and len(samples) > 200:
break
return samples
def filter_samples(samples, top_k, gold_key="gold_pos"):
if top_k == None:
return samples
filtered_samples = [
sample
for sample in samples
if sample[gold_key] > 0 and sample[gold_key] <= top_k
]
return filtered_samples
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def eval_precision_bm45_dataloader(dataloader, ks=[1, 5, 10], number_of_samples=None):
label_ids = torch.cat([label_ids for _, _, _, label_ids, _ in dataloader])
label_ids = label_ids + 1
p = {}
for k in ks:
p[k] = 0
for label in label_ids:
if label > 0:
for k in ks:
if label <= k:
p[k] += 1
for k in ks:
if number_of_samples is None:
p[k] /= len(label_ids)
else:
p[k] /= number_of_samples
return p
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels), outputs == labels
def remove_module_from_state_dict(state_dict):
new_state_dict = OrderedDict()
for key, value in state_dict.items():
name = "".join(key.split(".module"))
new_state_dict[name] = value
return new_state_dict
def save_model(model, tokenizer, output_dir):
"""Saves the model and the tokenizer used in the output directory."""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, "module") else model
output_model_file = os.path.join(output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(output_dir)
def get_logger(output_dir=None):
if output_dir != None:
os.makedirs(output_dir, exist_ok=True)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
handlers=[
logging.FileHandler(
"{}/log.txt".format(output_dir), mode="a", delay=False
),
logging.StreamHandler(sys.stdout),
],
)
else:
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
handlers=[logging.StreamHandler(sys.stdout)],
)
logger = logging.getLogger('Blink')
logger.setLevel(10)
return logger
def write_to_file(path, string, mode="w"):
with open(path, mode) as writer:
writer.write(string)
def get_reranker(parameters):
return BertReranker(parameters)
def get_biencoder(parameters):
return BiEncoderRanker(parameters)
|
BLINK-main
|
blink/candidate_ranking/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import argparse
import pickle
import torch
import json
import sys
import io
import random
import sys
import time
import numpy as np
import pprint
import shutil
from multiprocessing.pool import ThreadPool
from tqdm import tqdm, trange
from collections import OrderedDict
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from pytorch_transformers.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from pytorch_transformers.tokenization_bert import BertTokenizer
import blink.candidate_retrieval.utils
from blink.candidate_ranking.bert_reranking import BertForReranking
import logging
import utils
from evaluate import evaluate_model_on_dataset, evaluate
logger = None
def main(parameters):
# Read model
reranker = utils.get_reranker(parameters)
tokenizer = reranker.tokenizer
model = reranker.model
device = reranker.device
n_gpu = reranker.n_gpu
if parameters["gradient_accumulation_steps"] < 1:
raise ValueError(
"Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
parameters["gradient_accumulation_steps"]
)
)
# An effective batch size of `x`, when we are accumulating the gradient accross `y` batches will be achieved by having a batch size of `z = x / y`
# args.gradient_accumulation_steps = args.gradient_accumulation_steps // n_gpu
parameters["train_batch_size"] = (
parameters["train_batch_size"] // parameters["gradient_accumulation_steps"]
)
train_batch_size = parameters["train_batch_size"]
evaluation_batch_size = parameters["evaluation_batch_size"]
gradient_accumulation_steps = parameters["gradient_accumulation_steps"]
# Fix the random seeds
seed = parameters["seed"]
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
logger = None
number_of_samples_per_dataset = {}
if reranker.n_gpu > 0:
torch.cuda.manual_seed_all(seed)
time_start = time.time()
model_output_path = parameters["model_output_path"]
# Make sure everything is in order with the output directiory
if os.path.exists(model_output_path) and os.listdir(model_output_path):
print(
"Output directory ({}) already exists and is not empty.".format(
model_output_path
)
)
answer = input("Would you like to empty the existing directory? [Y/N]\n")
if answer.strip() == "Y":
print("Deleteing {}...".format(model_output_path))
shutil.rmtree(model_output_path)
else:
raise ValueError(
"Output directory ({}) already exists and is not empty.".format(
model_output_path
)
)
if not os.path.exists(model_output_path):
os.makedirs(model_output_path)
utils.write_to_file(
os.path.join(model_output_path, "training_parameters.txt"), str(parameters)
)
logger = utils.get_logger(model_output_path)
logger.info("Starting training")
logger.info(
"device: {} n_gpu: {}, distributed training: {}".format(device, n_gpu, False)
)
### Load training data
train_dataset_name = "aida-train"
train_samples = utils.read_dataset(
train_dataset_name, parameters["path_to_preprocessed_json_data"]
)
train_samples_filtered = utils.filter_samples(train_samples, parameters["top_k"])
logger.info(
"Retained {} out of {} samples".format(
len(train_samples_filtered), len(train_samples)
)
)
number_of_samples_per_dataset[train_dataset_name] = len(train_samples)
train_data, train_tensor_data = reranker._process_mentions_for_model(
parameters["context_key"],
train_samples_filtered,
tokenizer,
parameters["max_seq_length"],
silent=parameters["silent"],
logger=logger,
top_k=parameters["top_k"],
debug=parameters["debug"],
)
train_sampler = RandomSampler(train_tensor_data)
train_dataloader = DataLoader(
train_tensor_data, sampler=train_sampler, batch_size=train_batch_size
)
###
### Loading dev data
dev_dataset_name = "aida-A"
dev_samples = utils.read_dataset(
dev_dataset_name, parameters["path_to_preprocessed_json_data"]
)
dev_samples_filtered = utils.filter_samples(dev_samples, parameters["top_k"])
logger.info(
"Retained {} out of {} samples".format(
len(dev_samples_filtered), len(dev_samples)
)
)
number_of_samples_per_dataset[dev_dataset_name] = len(dev_samples)
dev_data, dev_tensor_data = reranker._process_mentions_for_model(
parameters["context_key"],
train_samples_filtered,
tokenizer,
parameters["max_seq_length"],
silent=parameters["silent"],
logger=logger,
top_k=parameters["top_k"],
debug=parameters["debug"],
)
dev_sampler = SequentialSampler(dev_tensor_data)
dev_dataloader = DataLoader(
dev_tensor_data, sampler=dev_sampler, batch_size=evaluation_batch_size
)
###
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_samples_filtered))
logger.info(" Batch size = %d", train_batch_size)
logger.info(" Gradient accumulation steps = %d", gradient_accumulation_steps)
optimizer, scheduler = reranker.get_scheduler_and_optimizer(
parameters, train_tensor_data, logger
)
best_epoch_idx = -1
best_score = -1
num_train_epochs = parameters["num_train_epochs"]
model.train()
for epoch_idx in trange(int(num_train_epochs), desc="Epoch"):
tr_loss = 0
results = None
for step, batch in enumerate(tqdm(train_dataloader, desc="Batch")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids, entity_mask = batch
loss, _ = model(
input_ids, segment_ids, input_mask, label_ids, entity_mask=entity_mask
)
# if n_gpu > 1:
# loss = loss.mean() # mean() to average on multi-gpu.
if gradient_accumulation_steps > 1:
loss = loss / gradient_accumulation_steps
tr_loss += loss.item()
if (step + 1) % (
parameters["print_tr_loss_opt_steps_interval"]
* parameters["gradient_accumulation_steps"]
) == 0:
logger.info(
"Step {} - epoch {} average loss: {}\n".format(
step,
epoch_idx,
tr_loss
/ (
parameters["print_tr_loss_opt_steps_interval"]
* gradient_accumulation_steps
),
)
)
tr_loss = 0
loss.backward()
if (step + 1) % gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(
model.parameters(), parameters["max_grad_norm"]
)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
if (step + 1) % (
parameters["dev_evaluation_interval"]
* gradient_accumulation_steps
* train_batch_size
) == 0:
logger.info("Evaluation on the development dataset")
evaluate_model_on_dataset(
model,
dev_dataloader,
dev_dataset_name,
device=device,
logger=logger,
number_of_samples=number_of_samples_per_dataset[dev_dataset_name],
)
model.train()
logger.info("\n")
logger.info("***** Saving fine - tuned model *****")
epoch_output_folder_path = os.path.join(
model_output_path, "epoch_{}".format(epoch_idx)
)
utils.save_model(model, tokenizer, epoch_output_folder_path)
output_eval_file = os.path.join(epoch_output_folder_path, "eval_results.txt")
results = evaluate_model_on_dataset(
model,
dev_dataloader,
dev_dataset_name,
device=device,
logger=logger,
path_to_file_to_write_results=output_eval_file,
number_of_samples=number_of_samples_per_dataset[dev_dataset_name],
)
ls = [best_score, results["normalized_accuracy"]]
li = [best_epoch_idx, epoch_idx]
best_score = ls[np.argmax(ls)]
best_epoch_idx = li[np.argmax(ls)]
logger.info("\n")
execution_time = (time.time() - time_start) / 60
utils.write_to_file(
os.path.join(model_output_path, "training_time.txt"),
"The training took {} minutes\n".format(execution_time),
)
logger.info("The training took {} minutes\n".format(execution_time))
# save the best model in the parent_dir
logger.info("Best performance in epoch: {}".format(best_epoch_idx))
parameters["path_to_model"] = os.path.join(
model_output_path, "epoch_{}".format(best_epoch_idx)
)
reranker = utils.get_reranker(parameters)
utils.save_model(reranker.model, tokenizer, model_output_path)
if parameters["evaluate"]:
parameters["path_to_model"] = model_output_path
evaluate(parameters, logger=logger)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--path_to_preprocessed_json_data",
default="data/train_and_benchmark_processed_json",
type=str,
help="The path to the train and benchmarking data.",
)
parser.add_argument(
"--bert_model",
default="bert-large-cased",
type=str,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.",
)
parser.add_argument(
"--model_output_path",
default=None,
type=str,
required=True,
help="The output directory where the trained model is to be dumped.",
)
parser.add_argument(
"--context_key", default="tagged_query_context_sent_prev_curr_next", type=str
)
parser.add_argument(
"--lowercase_flag",
action="store_true",
help="Whether to lower case the input text. True for uncased models, False for cased models.",
)
parser.add_argument("--top_k", default=80, type=int)
parser.add_argument(
"--max_seq_length",
default=512,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.",
)
parser.add_argument(
"--evaluate", action="store_true", help="Whether to run evaluation."
)
parser.add_argument(
"--full_evaluation",
action="store_true",
help="Whether to run the evaluation on all datasets.",
)
parser.add_argument(
"--evaluate_with_pregenerated_candidates",
action="store_true",
help="Whether to run in debug mode with only 200 samples.",
)
parser.add_argument(
"--output_eval_file",
default=None,
type=str,
help="The txt file where the the evaluation results will be written.",
)
parser.add_argument(
"--debug",
action="store_true",
help="Whether to run in debug mode with only 200 samples.",
)
parser.add_argument(
"--silent", action="store_true", help="Whether to print progress bars."
)
parser.add_argument(
"--train_batch_size", default=8, type=int, help="Total batch size for training."
)
parser.add_argument(
"--evaluation_batch_size",
default=4,
type=int,
help="Total batch size for evaluation.",
)
parser.add_argument(
"--dataparallel_bert",
action="store_true",
help="Whether to distributed the candidate generation process.",
)
parser.add_argument("--max_grad_norm", default=1.0, type=float)
parser.add_argument(
"--learning_rate",
default=3e-5,
type=float,
help="The initial learning rate for Adam.",
)
parser.add_argument(
"--num_train_epochs",
default=3,
type=int,
help="Total number of training epochs to perform.",
)
parser.add_argument(
"--print_tr_loss_opt_steps_interval",
type=int,
default=20,
help="Interval of loss printing",
)
parser.add_argument(
"--dev_evaluation_interval",
type=int,
default=160,
help="Interval for evaluation during training",
)
parser.add_argument(
"--save_interval", type=int, default=1, help="Interval for model saving"
)
parser.add_argument(
"--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.",
)
parser.add_argument(
"--no_cuda", action="store_true", help="Whether not to use CUDA when available"
)
parser.add_argument(
"--seed", type=int, default=12345, help="random seed for initialization"
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=8,
help="Number of updates steps to accumualte before performing a backward/update pass.",
)
# args = argparse.Namespace(**params)
args = parser.parse_args()
print(args)
parameters = args.__dict__
main(parameters)
|
BLINK-main
|
blink/candidate_ranking/train.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import time
import utils
import torch
import utils
import argparse
import os
from bert_reranking import BertReranker
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm
def evaluate_model_on_dataset(
model,
dataloader,
dataset_name,
device,
logger,
number_of_samples,
eval_bm45_acc=False,
path_to_file_to_write_results=None,
):
model.eval()
eval_accuracy = 0
nb_eval_steps, nb_eval_examples = 0, 0
for input_ids, input_mask, segment_ids, label_ids, entity_mask in tqdm(
dataloader, desc="Evaluating"
):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
entity_mask = entity_mask.to(device)
with torch.no_grad():
tmp_eval_loss, logits = model(
input_ids, segment_ids, input_mask, label_ids, entity_mask=entity_mask
)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to("cpu").numpy()
tmp_eval_accuracy = utils.accuracy(logits, label_ids)
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
logger.info("\n")
normalized_eval_accuracy = eval_accuracy / nb_eval_examples
result = {"normalized_accuracy": normalized_eval_accuracy}
result["unnormalized_accuracy"] = eval_accuracy / number_of_samples
result["candidate_generation_recall"] = nb_eval_examples / number_of_samples
if eval_bm45_acc:
result["normalized_bm45_recall_@"] = utils.eval_precision_bm45_dataloader(
dataloader, [1, 5, 10, 20, 40, 60, 80, 100]
)
result["unnormalized_bm45_recall_@"] = utils.eval_precision_bm45_dataloader(
dataloader, [1, 5, 10, 20, 40, 60, 80, 100], number_of_samples
)
if path_to_file_to_write_results is None:
logger.info(
"***** Eval results - {} ({} / {} samples) *****\n".format(
dataset_name, nb_eval_examples, number_of_samples
)
)
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
else:
with open(path_to_file_to_write_results, "a+") as writer:
logger.info(
"***** Eval results - {} ({} / {} samples) *****\n".format(
dataset_name, nb_eval_examples, number_of_samples
)
)
writer.write(
"***** Eval results - {} ({} / {} samples) *****\n".format(
dataset_name, nb_eval_examples, number_of_samples
)
)
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
writer.write("\n")
logger.info("\n")
return result
def evaluate(parameters, logger=None):
reranker = utils.get_reranker(parameters)
if parameters["full_evaluation"]:
eval_datasets = [
"aida-A",
"aida-B",
"msnbc",
"aquaint",
"ace2004",
"clueweb",
"wikipedia",
]
else:
eval_datasets = ["aida-B"]
candidates_key = (
"pregenerated_candidates"
if parameters["evaluate_with_pregenerated_candidates"]
else "candidates"
)
gold_key = (
"pregenerated_gold_pos"
if parameters["evaluate_with_pregenerated_candidates"]
else "gold_pos"
)
number_of_samples_per_dataset = {}
total_time = 0
for eval_dataset_name in eval_datasets:
time_start = time.time()
logger.info("\nEvaluating on the {} dataset".format(eval_dataset_name))
eval_samples = utils.read_dataset(
eval_dataset_name, parameters["path_to_preprocessed_json_data"]
)
eval_samples_filtered = utils.filter_samples(
eval_samples, parameters["top_k"], gold_key
)
logger.info(
"Retained {} out of {} samples".format(
len(eval_samples_filtered), len(eval_samples)
)
)
number_of_samples_per_dataset[eval_dataset_name] = len(eval_samples)
# if args.num_preprocessing_threads == -1:
# eval_data, eval_tensor_data = process_samples_for_model(args.context_key, eval_samples_filtered, tokenizer, args.max_seq_length, logger = logger, top_k = args.top_k, example = False, debug = args.debug, tagged = args.tag_mention, candidates_key = candidates_key, gold_key = gold_key)
# else:
# eval_data, eval_tensor_data = preprocessing_multithreaded(eval_samples_filtered, logger, args, output_dir=True)
eval_data, eval_tensor_data = reranker._process_mentions_for_model(
parameters["context_key"],
eval_samples_filtered,
reranker.tokenizer,
parameters["max_seq_length"],
parameters["top_k"],
parameters["silent"],
candidates_key=candidates_key,
gold_key=gold_key,
debug=parameters["debug"],
)
eval_sampler = SequentialSampler(eval_tensor_data)
eval_dataloader = DataLoader(
eval_tensor_data,
sampler=eval_sampler,
batch_size=parameters["evaluation_batch_size"],
)
if parameters["output_eval_file"] is None:
output_eval_file = os.path.join(
parameters["path_to_model"], "eval_results.txt"
)
else:
output_eval_file = parameters["output_eval_file"]
result = evaluate_model_on_dataset(
reranker.model,
eval_dataloader,
eval_dataset_name,
eval_bm45_acc=True,
device=reranker.device,
logger=logger,
path_to_file_to_write_results=output_eval_file,
number_of_samples=number_of_samples_per_dataset[eval_dataset_name],
)
execution_time = (time.time() - time_start) / 60
total_time += execution_time
if logger != None:
logger.info(
"The execution for dataset {} took {} minutes".format(
eval_dataset_name, execution_time
)
)
else:
print(
"The execution for dataset {} took {} minutes".format(
eval_dataset_name, execution_time
)
)
if logger != None:
logger.info(
"The execution for dataset {} took {} minutes".format(
eval_dataset_name, execution_time
)
)
else:
print("The evaluation took:", total_time, " minutes")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--path_to_preprocessed_json_data",
default="data/train_and_benchmark_processed_json",
type=str,
help="The path to the train and benchmarking data.",
)
parser.add_argument(
"--path_to_model",
default=None,
type=str,
required=True,
help="The full path to the model to be evaluated.",
)
parser.add_argument("--top_k", default=80, type=int)
parser.add_argument(
"--max_seq_length",
default=512,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.",
)
parser.add_argument(
"--context_key", default="tagged_query_context_sent_prev_curr_next", type=str
)
parser.add_argument(
"--lowercase_flag",
action="store_true",
help="Whether to lower case the input text. True for uncased models, False for cased models.",
)
parser.add_argument(
"--full_evaluation",
action="store_true",
help="Whether to run the evaluation on all datasets.",
)
parser.add_argument(
"--evaluate_with_pregenerated_candidates",
action="store_true",
help="Whether to run in debug mode with only 200 samples.",
)
parser.add_argument(
"--evaluation_batch_size",
default=8,
type=int,
help="Total batch size for evaluation.",
)
parser.add_argument(
"--dataparallel_bert",
action="store_true",
help="Whether to distributed the candidate generation process.",
)
parser.add_argument(
"--no_cuda", action="store_true", help="Whether not to use CUDA when available"
)
parser.add_argument(
"--debug",
action="store_true",
help="Whether to run in debug mode with only 200 samples.",
)
parser.add_argument(
"--silent", action="store_true", help="Whether to print progress bars."
)
parser.add_argument(
"--output_eval_file",
default=None,
type=str,
help="The txt file where the the evaluation results will be written.",
)
args = parser.parse_args()
print(args)
parameters = args.__dict__
evaluate(parameters, logger=utils.get_logger())
|
BLINK-main
|
blink/candidate_ranking/evaluate.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Provide an argument parser and default command line options for using BLINK.
import argparse
import importlib
import os
import sys
import datetime
ENT_START_TAG = "[unused0]"
ENT_END_TAG = "[unused1]"
ENT_TITLE_TAG = "[unused2]"
class BlinkParser(argparse.ArgumentParser):
"""
Provide an opt-producer and CLI arguement parser.
More options can be added specific by paassing this object and calling
''add_arg()'' or add_argument'' on it.
:param add_blink_args:
(default True) initializes the default arguments for BLINK package.
:param add_model_args:
(default False) initializes the default arguments for loading models,
including initializing arguments from the model.
"""
def __init__(
self, add_blink_args=True, add_model_args=False,
description='BLINK parser',
):
super().__init__(
description=description,
allow_abbrev=False,
conflict_handler='resolve',
formatter_class=argparse.HelpFormatter,
add_help=add_blink_args,
)
self.blink_home = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
)
os.environ['BLINK_HOME'] = self.blink_home
self.add_arg = self.add_argument
self.overridable = {}
if add_blink_args:
self.add_blink_args()
if add_model_args:
self.add_model_args()
def add_blink_args(self, args=None):
"""
Add common BLINK args across all scripts.
"""
parser = self.add_argument_group("Common Arguments")
parser.add_argument(
"--silent", action="store_true", help="Whether to print progress bars."
)
parser.add_argument(
"--debug",
action="store_true",
help="Whether to run in debug mode with only 200 samples.",
)
parser.add_argument(
"--data_parallel",
action="store_true",
help="Whether to distributed the candidate generation process.",
)
parser.add_argument(
"--no_cuda", action="store_true",
help="Whether not to use CUDA when available",
)
parser.add_argument("--top_k", default=10, type=int)
parser.add_argument(
"--seed", type=int, default=52313, help="random seed for initialization"
)
parser.add_argument(
"--zeshel",
default=True,
type=bool,
help="Whether the dataset is from zeroshot.",
)
def add_model_args(self, args=None):
"""
Add model args.
"""
parser = self.add_argument_group("Model Arguments")
parser.add_argument(
"--max_seq_length",
default=256,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.",
)
parser.add_argument(
"--max_context_length",
default=128,
type=int,
help="The maximum total context input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.",
)
parser.add_argument(
"--max_cand_length",
default=128,
type=int,
help="The maximum total label input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.",
)
parser.add_argument(
"--path_to_model",
default=None,
type=str,
required=False,
help="The full path to the model to load.",
)
parser.add_argument(
"--bert_model",
default="bert-base-uncased",
type=str,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.",
)
parser.add_argument(
"--pull_from_layer", type=int, default=-1, help="Layers to pull from BERT",
)
parser.add_argument(
"--lowercase",
action="store_false",
help="Whether to lower case the input text. True for uncased models, False for cased models.",
)
parser.add_argument("--context_key", default="context", type=str)
parser.add_argument(
"--out_dim", type=int, default=1, help="Output dimention of bi-encoders.",
)
parser.add_argument(
"--add_linear",
action="store_true",
help="Whether to add an additonal linear projection on top of BERT.",
)
parser.add_argument(
"--data_path",
default="data/zeshel",
type=str,
help="The path to the train data.",
)
parser.add_argument(
"--output_path",
default=None,
type=str,
required=True,
help="The output directory where generated output file (model, etc.) is to be dumped.",
)
def add_training_args(self, args=None):
"""
Add model training args.
"""
parser = self.add_argument_group("Model Training Arguments")
parser.add_argument(
"--evaluate", action="store_true", help="Whether to run evaluation."
)
parser.add_argument(
"--output_eval_file",
default=None,
type=str,
help="The txt file where the the evaluation results will be written.",
)
parser.add_argument(
"--train_batch_size", default=8, type=int,
help="Total batch size for training."
)
parser.add_argument("--max_grad_norm", default=1.0, type=float)
parser.add_argument(
"--learning_rate",
default=3e-5,
type=float,
help="The initial learning rate for Adam.",
)
parser.add_argument(
"--num_train_epochs",
default=1,
type=int,
help="Number of training epochs.",
)
parser.add_argument(
"--print_interval", type=int, default=10,
help="Interval of loss printing",
)
parser.add_argument(
"--eval_interval",
type=int,
default=100,
help="Interval for evaluation during training",
)
parser.add_argument(
"--save_interval", type=int, default=1,
help="Interval for model saving"
)
parser.add_argument(
"--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumualte before performing a backward/update pass.",
)
parser.add_argument(
"--type_optimization",
type=str,
default="all_encoder_layers",
help="Which type of layers to optimize in BERT",
)
parser.add_argument(
"--shuffle", type=bool, default=False,
help="Whether to shuffle train data",
)
def add_eval_args(self, args=None):
"""
Add model evaluation args.
"""
parser = self.add_argument_group("Model Evaluation Arguments")
parser.add_argument(
"--eval_batch_size", default=8, type=int,
help="Total batch size for evaluation.",
)
parser.add_argument(
"--mode",
default="valid",
type=str,
help="Train / validation / test",
)
parser.add_argument(
"--save_topk_result",
action="store_true",
help="Whether to save prediction results.",
)
parser.add_argument(
"--encode_batch_size",
default=8,
type=int,
help="Batch size for encoding."
)
parser.add_argument(
"--cand_pool_path",
default=None,
type=str,
help="Path for cached candidate pool (id tokenization of candidates)",
)
parser.add_argument(
"--cand_encode_path",
default=None,
type=str,
help="Path for cached candidate encoding",
)
|
BLINK-main
|
blink/common/params.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from torch import nn
def get_model_obj(model):
model = model.module if hasattr(model, "module") else model
return model
class BertEncoder(nn.Module):
def __init__(
self, bert_model, output_dim, layer_pulled=-1, add_linear=None):
super(BertEncoder, self).__init__()
self.layer_pulled = layer_pulled
bert_output_dim = bert_model.embeddings.word_embeddings.weight.size(1)
self.bert_model = bert_model
if add_linear:
self.additional_linear = nn.Linear(bert_output_dim, output_dim)
self.dropout = nn.Dropout(0.1)
else:
self.additional_linear = None
def forward(self, token_ids, segment_ids, attention_mask):
output_bert, output_pooler = self.bert_model(
token_ids, segment_ids, attention_mask
)
# get embedding of [CLS] token
if self.additional_linear is not None:
embeddings = output_pooler
else:
embeddings = output_bert[:, 0, :]
# in case of dimensionality reduction
if self.additional_linear is not None:
result = self.additional_linear(self.dropout(embeddings))
else:
result = embeddings
return result
|
BLINK-main
|
blink/common/ranker_base.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import os
import numpy as np
from pytorch_transformers.modeling_bert import (
BertPreTrainedModel,
BertConfig,
BertModel,
)
from pytorch_transformers.tokenization_bert import BertTokenizer
from torch import nn
from pytorch_transformers.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from pytorch_transformers.optimization import AdamW
patterns_optimizer = {
'additional_layers': ['additional'],
'top_layer': ['additional', 'bert_model.encoder.layer.11.'],
'top4_layers': [
'additional',
'bert_model.encoder.layer.11.',
'encoder.layer.10.',
'encoder.layer.9.',
'encoder.layer.8',
],
'all_encoder_layers': ['additional', 'bert_model.encoder.layer'],
'all': ['additional', 'bert_model.encoder.layer', 'bert_model.embeddings'],
}
def get_bert_optimizer(models, type_optimization, learning_rate, fp16=False):
""" Optimizes the network with AdamWithDecay
"""
if type_optimization not in patterns_optimizer:
print(
'Error. Type optimizer must be one of %s' % (str(patterns_optimizer.keys()))
)
parameters_with_decay = []
parameters_with_decay_names = []
parameters_without_decay = []
parameters_without_decay_names = []
no_decay = ['bias', 'gamma', 'beta']
patterns = patterns_optimizer[type_optimization]
for model in models:
for n, p in model.named_parameters():
if any(t in n for t in patterns):
if any(t in n for t in no_decay):
parameters_without_decay.append(p)
parameters_without_decay_names.append(n)
else:
parameters_with_decay.append(p)
parameters_with_decay_names.append(n)
print('The following parameters will be optimized WITH decay:')
print(ellipse(parameters_with_decay_names, 5, ' , '))
print('The following parameters will be optimized WITHOUT decay:')
print(ellipse(parameters_without_decay_names, 5, ' , '))
optimizer_grouped_parameters = [
{'params': parameters_with_decay, 'weight_decay': 0.01},
{'params': parameters_without_decay, 'weight_decay': 0.0},
]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=learning_rate,
correct_bias=False
)
if fp16:
optimizer = fp16_optimizer_wrapper(optimizer)
return optimizer
def ellipse(lst, max_display=5, sep='|'):
"""
Like join, but possibly inserts an ellipsis.
:param lst: The list to join on
:param int max_display: the number of items to display for ellipsing.
If -1, shows all items
:param string sep: the delimiter to join on
"""
# copy the list (or force it to a list if it's a set)
choices = list(lst)
# insert the ellipsis if necessary
if max_display > 0 and len(choices) > max_display:
ellipsis = '...and {} more'.format(len(choices) - max_display)
choices = choices[:max_display] + [ellipsis]
return sep.join(str(c) for c in choices)
|
BLINK-main
|
blink/common/optimizer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
FAISS-based index components. Original from
https://github.com/facebookresearch/DPR/blob/master/dpr/indexer/faiss_indexers.py
"""
import os
import logging
import pickle
import faiss
import numpy as np
logger = logging.getLogger()
class DenseIndexer(object):
def __init__(self, buffer_size: int = 50000):
self.buffer_size = buffer_size
self.index_id_to_db_id = []
self.index = None
def index_data(self, data: np.array):
raise NotImplementedError
def search_knn(self, query_vectors: np.array, top_docs: int):
raise NotImplementedError
def serialize(self, index_file: str):
logger.info("Serializing index to %s", index_file)
faiss.write_index(self.index, index_file)
def deserialize_from(self, index_file: str):
logger.info("Loading index from %s", index_file)
self.index = faiss.read_index(index_file)
logger.info(
"Loaded index of type %s and size %d", type(self.index), self.index.ntotal
)
# DenseFlatIndexer does exact search
class DenseFlatIndexer(DenseIndexer):
def __init__(self, vector_sz: int = 1, buffer_size: int = 50000):
super(DenseFlatIndexer, self).__init__(buffer_size=buffer_size)
self.index = faiss.IndexFlatIP(vector_sz)
def index_data(self, data: np.array):
n = len(data)
# indexing in batches is beneficial for many faiss index types
logger.info("Indexing data, this may take a while.")
cnt = 0
for i in range(0, n, self.buffer_size):
vectors = [np.reshape(t, (1, -1)) for t in data[i : i + self.buffer_size]]
vectors = np.concatenate(vectors, axis=0)
self.index.add(vectors)
cnt += self.buffer_size
logger.info("Total data indexed %d", n)
def search_knn(self, query_vectors, top_k):
scores, indexes = self.index.search(query_vectors, top_k)
return scores, indexes
# DenseHNSWFlatIndexer does approximate search
class DenseHNSWFlatIndexer(DenseIndexer):
"""
Efficient index for retrieval. Note: default settings are for hugh accuracy but also high RAM usage
"""
def __init__(
self,
vector_sz: int,
buffer_size: int = 50000,
store_n: int = 128,
ef_search: int = 256,
ef_construction: int = 200,
):
super(DenseHNSWFlatIndexer, self).__init__(buffer_size=buffer_size)
# IndexHNSWFlat supports L2 similarity only
# so we have to apply DOT -> L2 similairy space conversion with the help of an extra dimension
index = faiss.IndexHNSWFlat(vector_sz + 1, store_n)
index.hnsw.efSearch = ef_search
index.hnsw.efConstruction = ef_construction
self.index = index
self.phi = 0
def index_data(self, data: np.array):
n = len(data)
# max norm is required before putting all vectors in the index to convert inner product similarity to L2
if self.phi > 0:
raise RuntimeError(
"DPR HNSWF index needs to index all data at once,"
"results will be unpredictable otherwise."
)
phi = 0
for i, item in enumerate(data):
doc_vector = item
norms = (doc_vector ** 2).sum()
phi = max(phi, norms)
logger.info("HNSWF DotProduct -> L2 space phi={}".format(phi))
self.phi = 0
# indexing in batches is beneficial for many faiss index types
logger.info("Indexing data, this may take a while.")
cnt = 0
for i in range(0, n, self.buffer_size):
vectors = [np.reshape(t, (1, -1)) for t in data[i : i + self.buffer_size]]
norms = [(doc_vector ** 2).sum() for doc_vector in vectors]
aux_dims = [np.sqrt(phi - norm) for norm in norms]
hnsw_vectors = [
np.hstack((doc_vector, aux_dims[i].reshape(-1, 1)))
for i, doc_vector in enumerate(vectors)
]
hnsw_vectors = np.concatenate(hnsw_vectors, axis=0)
self.index.add(hnsw_vectors)
cnt += self.buffer_size
logger.info("Indexed data %d" % cnt)
logger.info("Total data indexed %d" % n)
def search_knn(self, query_vectors, top_k):
aux_dim = np.zeros(len(query_vectors), dtype="float32")
query_nhsw_vectors = np.hstack((query_vectors, aux_dim.reshape(-1, 1)))
logger.info("query_hnsw_vectors %s", query_nhsw_vectors.shape)
scores, indexes = self.index.search(query_nhsw_vectors, top_k)
return scores, indexes
def deserialize_from(self, file: str):
super(DenseHNSWFlatIndexer, self).deserialize_from(file)
# to trigger warning on subsequent indexing
self.phi = 1
|
BLINK-main
|
blink/indexer/faiss_indexer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import json
import sys
from elq.index.faiss_indexer import DenseFlatIndexer, DenseHNSWFlatIndexer, DenseIVFFlatIndexer
import logging
import torch
import numpy as np
from colorama import init
from termcolor import colored
import torch.nn.functional as F
import blink.ner as NER
from torch.utils.data import DataLoader, SequentialSampler, TensorDataset
from elq.biencoder.biencoder import BiEncoderRanker, load_biencoder, to_bert_input
from elq.biencoder.data_process import (
process_mention_data,
get_context_representation_single_mention,
get_candidate_representation,
)
import elq.candidate_ranking.utils as utils
import math
from elq.vcg_utils.measures import entity_linking_tp_with_overlap
from elq.biencoder.utils import batch_reshape_mask_left
import os
import sys
from tqdm import tqdm
import pdb
import time
HIGHLIGHTS = [
"on_red",
"on_green",
"on_yellow",
"on_blue",
"on_magenta",
"on_cyan",
]
from transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
def _print_colorful_text(input_tokens, tokenizer, pred_triples):
"""
pred_triples:
Assumes no overlapping triples
"""
sort_idxs = sorted(range(len(pred_triples)), key=lambda idx: pred_triples[idx][1])
init() # colorful output
msg = ""
if pred_triples and (len(pred_triples) > 0):
msg += tokenizer.decode(input_tokens[0 : int(pred_triples[sort_idxs[0]][1])])
for i, idx in enumerate(sort_idxs):
triple = pred_triples[idx]
msg += " " + colored(
tokenizer.decode(input_tokens[int(triple[1]) : int(triple[2])]),
"grey",
HIGHLIGHTS[idx % len(HIGHLIGHTS)],
)
if i < len(sort_idxs) - 1:
msg += " " + tokenizer.decode(input_tokens[
int(triple[2]) : int(pred_triples[sort_idxs[i + 1]][1])
])
else:
msg += " " + tokenizer.decode(input_tokens[int(triple[2]) : ])
else:
msg = tokenizer.decode(input_tokens)
print("\n" + str(msg) + "\n")
def _print_colorful_prediction(all_entity_preds, pred_triples, id2text, id2wikidata):
sort_idxs = sorted(range(len(pred_triples)), key=lambda idx: pred_triples[idx][1])
for idx in sort_idxs:
print(colored(all_entity_preds[0]['pred_tuples_string'][idx][1], "grey", HIGHLIGHTS[idx % len(HIGHLIGHTS)]))
if pred_triples[idx][0] in id2wikidata:
print(" Wikidata ID: {}".format(id2wikidata[pred_triples[idx][0]]))
print(" Title: {}".format(all_entity_preds[0]['pred_tuples_string'][idx][0]))
print(" Score: {}".format(str(all_entity_preds[0]['scores'][idx])))
print(" Triple: {}".format(str(pred_triples[idx])))
print(" Text: {}".format(id2text[pred_triples[idx][0]]))
def _load_candidates(
entity_catalogue, entity_encoding,
faiss_index="none", index_path=None,
logger=None,
):
if faiss_index == "none":
candidate_encoding = torch.load(entity_encoding)
indexer = None
else:
candidate_encoding = None
assert index_path is not None, "Error! Empty indexer path."
if faiss_index == "flat":
indexer = DenseFlatIndexer(1)
elif faiss_index == "hnsw":
indexer = DenseHNSWFlatIndexer(1)
elif faiss_index == "ivfflat":
indexer = DenseIVFFlatIndexer(1)
else:
raise ValueError("Error! Unsupported indexer type! Choose from flat,hnsw,ivfflat.")
indexer.deserialize_from(index_path)
candidate_encoding = torch.load(entity_encoding)
if not os.path.exists("models/id2title.json"):
id2title = {}
id2text = {}
id2wikidata = {}
local_idx = 0
with open(entity_catalogue, "r") as fin:
lines = fin.readlines()
for line in lines:
entity = json.loads(line)
id2title[str(local_idx)] = entity["title"]
id2text[str(local_idx)] = entity["text"]
if "kb_idx" in entity:
id2wikidata[str(local_idx)] = entity["kb_idx"]
local_idx += 1
json.dump(id2title, open("models/id2title.json", "w"))
json.dump(id2text, open("models/id2text.json", "w"))
json.dump(id2wikidata, open("models/id2wikidata.json", "w"))
else:
if logger: logger.info("Loading id2title")
id2title = json.load(open("models/id2title.json"))
if logger: logger.info("Loading id2text")
id2text = json.load(open("models/id2text.json"))
if logger: logger.info("Loading id2wikidata")
id2wikidata = json.load(open("models/id2wikidata.json"))
return (
candidate_encoding, indexer,
id2title, id2text, id2wikidata,
)
def _get_test_samples(
test_filename, test_entities_path, logger,
):
"""
Parses jsonl format with one example per line
Each line of the following form
IF HAVE LABELS
{
"id": "WebQTest-12",
"text": "who is governor of ohio 2011?",
"mentions": [[19, 23], [7, 15]],
"tokenized_text_ids": [2040, 2003, 3099, 1997, 4058, 2249, 1029],
"tokenized_mention_idxs": [[4, 5], [2, 3]],
"label_id": [10902, 28422],
"wikidata_id": ["Q1397", "Q132050"],
"entity": ["Ohio", "Governor"],
"label": [list of wikipedia descriptions]
}
IF NO LABELS (JUST PREDICTION)
{
"id": "WebQTest-12",
"text": "who is governor of ohio 2011?",
}
"""
if logger: logger.info("Loading test samples")
test_samples = []
unknown_entity_samples = []
num_unknown_entity_samples = 0
num_no_gold_entity = 0
ner_errors = 0
with open(test_filename, "r") as fin:
lines = fin.readlines()
sample_idx = 0
do_setup_samples = True
for i, line in enumerate(lines):
record = json.loads(line)
test_samples.append(record)
return test_samples, num_unknown_entity_samples
def _process_biencoder_dataloader(samples, tokenizer, biencoder_params, logger):
"""
Samples: list of examples, each of the form--
IF HAVE LABELS
{
"id": "WebQTest-12",
"text": "who is governor of ohio 2011?",
"mentions": [[19, 23], [7, 15]],
"tokenized_text_ids": [2040, 2003, 3099, 1997, 4058, 2249, 1029],
"tokenized_mention_idxs": [[4, 5], [2, 3]],
"label_id": [10902, 28422],
"wikidata_id": ["Q1397", "Q132050"],
"entity": ["Ohio", "Governor"],
"label": [list of wikipedia descriptions]
}
IF NO LABELS (JUST PREDICTION)
{
"id": "WebQTest-12",
"text": "who is governor of ohio 2011?",
}
"""
if 'label_id' in samples[0]:
# have labels
tokens_data, tensor_data_tuple, _ = process_mention_data(
samples=samples,
tokenizer=tokenizer,
max_context_length=biencoder_params["max_context_length"],
max_cand_length=biencoder_params["max_cand_length"],
silent=False,
logger=logger,
debug=biencoder_params["debug"],
add_mention_bounds=(not biencoder_params.get("no_mention_bounds", False)),
params=biencoder_params,
)
else:
samples_text_tuple = []
max_seq_len = 0
for sample in samples:
samples_text_tuple
# truncate the end if the sequence is too long...
encoded_sample = [101] + tokenizer.encode(sample['text'])[:biencoder_params["max_context_length"]-2] + [102]
max_seq_len = max(len(encoded_sample), max_seq_len)
samples_text_tuple.append(encoded_sample + [0 for _ in range(biencoder_params["max_context_length"] - len(encoded_sample))])
# print(samples_text_tuple)
tensor_data_tuple = [torch.tensor(samples_text_tuple)]
tensor_data = TensorDataset(*tensor_data_tuple)
sampler = SequentialSampler(tensor_data)
dataloader = DataLoader(
tensor_data, sampler=sampler, batch_size=biencoder_params["eval_batch_size"]
)
return dataloader
def _run_biencoder(
args, biencoder, dataloader, candidate_encoding, samples,
num_cand_mentions=50, num_cand_entities=10,
device="cpu", sample_to_all_context_inputs=None,
threshold=0.0, indexer=None,
):
"""
Returns: tuple
labels (List[int]) [(max_num_mentions_gold) x exs]: gold labels -- returns None if no labels
nns (List[Array[int]]) [(# of pred mentions, cands_per_mention) x exs]: predicted entity IDs in each example
dists (List[Array[float]]) [(# of pred mentions, cands_per_mention) x exs]: scores of each entity in nns
pred_mention_bounds (List[Array[int]]) [(# of pred mentions, 2) x exs]: predicted mention boundaries in each examples
mention_scores (List[Array[float]]) [(# of pred mentions,) x exs]: mention score logit
cand_scores (List[Array[float]]) [(# of pred mentions, cands_per_mention) x exs]: candidate score logit
"""
biencoder.model.eval()
biencoder_model = biencoder.model
if hasattr(biencoder.model, "module"):
biencoder_model = biencoder.model.module
context_inputs = []
nns = []
dists = []
mention_dists = []
pred_mention_bounds = []
mention_scores = []
cand_scores = []
sample_idx = 0
ctxt_idx = 0
label_ids = None
for step, batch in enumerate(tqdm(dataloader)):
context_input = batch[0].to(device)
mask_ctxt = context_input != biencoder.NULL_IDX
with torch.no_grad():
context_outs = biencoder.encode_context(
context_input, num_cand_mentions=num_cand_mentions, topK_threshold=threshold,
)
embedding_ctxt = context_outs['mention_reps']
left_align_mask = context_outs['mention_masks']
chosen_mention_logits = context_outs['mention_logits']
chosen_mention_bounds = context_outs['mention_bounds']
'''
GET TOP CANDIDATES PER MENTION
'''
# (all_pred_mentions_batch, embed_dim)
embedding_ctxt = embedding_ctxt[left_align_mask]
if indexer is None:
try:
cand_logits, _, _ = biencoder.score_candidate(
context_input, None,
text_encs=embedding_ctxt,
cand_encs=candidate_encoding.to(device),
)
# DIM (all_pred_mentions_batch, num_cand_entities); (all_pred_mentions_batch, num_cand_entities)
top_cand_logits_shape, top_cand_indices_shape = cand_logits.topk(num_cand_entities, dim=-1, sorted=True)
except:
# for memory savings, go through one chunk of candidates at a time
SPLIT_SIZE=1000000
done=False
while not done:
top_cand_logits_list = []
top_cand_indices_list = []
max_chunk = int(len(candidate_encoding) / SPLIT_SIZE)
for chunk_idx in range(max_chunk):
try:
# DIM (num_total_mentions, num_cand_entities); (num_total_mention, num_cand_entities)
top_cand_logits, top_cand_indices = embedding_ctxt.mm(candidate_encoding[chunk_idx*SPLIT_SIZE:(chunk_idx+1)*SPLIT_SIZE].to(device).t().contiguous()).topk(10, dim=-1, sorted=True)
top_cand_logits_list.append(top_cand_logits)
top_cand_indices_list.append(top_cand_indices + chunk_idx*SPLIT_SIZE)
if len((top_cand_indices_list[chunk_idx] < 0).nonzero()) > 0:
import pdb
pdb.set_trace()
except:
SPLIT_SIZE = int(SPLIT_SIZE/2)
break
if len(top_cand_indices_list) == max_chunk:
# DIM (num_total_mentions, num_cand_entities); (num_total_mentions, num_cand_entities) -->
# top_top_cand_indices_shape indexes into top_cand_indices
top_cand_logits_shape, top_top_cand_indices_shape = torch.cat(
top_cand_logits_list, dim=-1).topk(num_cand_entities, dim=-1, sorted=True)
# make indices index into candidate_encoding
# DIM (num_total_mentions, max_chunk*num_cand_entities)
all_top_cand_indices = torch.cat(top_cand_indices_list, dim=-1)
# DIM (num_total_mentions, num_cand_entities)
top_cand_indices_shape = all_top_cand_indices.gather(-1, top_top_cand_indices_shape)
done = True
else:
# DIM (all_pred_mentions_batch, num_cand_entities); (all_pred_mentions_batch, num_cand_entities)
top_cand_logits_shape, top_cand_indices_shape = indexer.search_knn(embedding_ctxt.cpu().numpy(), num_cand_entities)
top_cand_logits_shape = torch.tensor(top_cand_logits_shape).to(embedding_ctxt.device)
top_cand_indices_shape = torch.tensor(top_cand_indices_shape).to(embedding_ctxt.device)
# DIM (bs, max_num_pred_mentions, num_cand_entities)
top_cand_logits = torch.zeros(chosen_mention_logits.size(0), chosen_mention_logits.size(1), top_cand_logits_shape.size(-1)).to(
top_cand_logits_shape.device, top_cand_logits_shape.dtype)
top_cand_logits[left_align_mask] = top_cand_logits_shape
top_cand_indices = torch.zeros(chosen_mention_logits.size(0), chosen_mention_logits.size(1), top_cand_indices_shape.size(-1)).to(
top_cand_indices_shape.device, top_cand_indices_shape.dtype)
top_cand_indices[left_align_mask] = top_cand_indices_shape
'''
COMPUTE FINAL SCORES FOR EACH CAND-MENTION PAIR + PRUNE USING IT
'''
# Has NAN for impossible mentions...
# log p(entity && mb) = log [p(entity|mention bounds) * p(mention bounds)] = log p(e|mb) + log p(mb)
# DIM (bs, max_num_pred_mentions, num_cand_entities)
scores = torch.log_softmax(top_cand_logits, -1) + torch.sigmoid(chosen_mention_logits.unsqueeze(-1)).log()
'''
DON'T NEED TO RESORT BY NEW SCORE -- DISTANCE PRESERVING (largest entity score still be largest entity score)
'''
for idx in range(len(batch[0])):
# [(seqlen) x exs] <= (bsz, seqlen)
context_inputs.append(context_input[idx][mask_ctxt[idx]].data.cpu().numpy())
# [(max_num_mentions, cands_per_mention) x exs] <= (bsz, max_num_mentions=num_cand_mentions, cands_per_mention)
nns.append(top_cand_indices[idx][left_align_mask[idx]].data.cpu().numpy())
# [(max_num_mentions, cands_per_mention) x exs] <= (bsz, max_num_mentions=num_cand_mentions, cands_per_mention)
dists.append(scores[idx][left_align_mask[idx]].data.cpu().numpy())
# [(max_num_mentions, 2) x exs] <= (bsz, max_num_mentions=num_cand_mentions, 2)
pred_mention_bounds.append(chosen_mention_bounds[idx][left_align_mask[idx]].data.cpu().numpy())
# [(max_num_mentions,) x exs] <= (bsz, max_num_mentions=num_cand_mentions)
mention_scores.append(chosen_mention_logits[idx][left_align_mask[idx]].data.cpu().numpy())
# [(max_num_mentions, cands_per_mention) x exs] <= (bsz, max_num_mentions=num_cand_mentions, cands_per_mention)
cand_scores.append(top_cand_logits[idx][left_align_mask[idx]].data.cpu().numpy())
return nns, dists, pred_mention_bounds, mention_scores, cand_scores
def get_predictions(
args, dataloader, biencoder_params, samples, nns, dists, mention_scores, cand_scores,
pred_mention_bounds, id2title, threshold=-2.9, mention_threshold=-0.6931,
):
"""
Arguments:
args, dataloader, biencoder_params, samples, nns, dists, pred_mention_bounds
Returns:
all_entity_preds,
num_correct_weak, num_correct_strong, num_predicted, num_gold,
num_correct_weak_from_input_window, num_correct_strong_from_input_window, num_gold_from_input_window
"""
# save biencoder predictions and print precision/recalls
num_correct_weak = 0
num_correct_strong = 0
num_predicted = 0
num_gold = 0
num_correct_weak_from_input_window = 0
num_correct_strong_from_input_window = 0
num_gold_from_input_window = 0
all_entity_preds = []
f = errors_f = None
if getattr(args, 'save_preds_dir', None) is not None:
save_biencoder_file = os.path.join(args.save_preds_dir, 'biencoder_outs.jsonl')
f = open(save_biencoder_file, 'w')
errors_f = open(os.path.join(args.save_preds_dir, 'biencoder_errors.jsonl'), 'w')
# nns (List[Array[int]]) [(num_pred_mentions, cands_per_mention) x exs])
# dists (List[Array[float]]) [(num_pred_mentions, cands_per_mention) x exs])
# pred_mention_bounds (List[Array[int]]) [(num_pred_mentions, 2) x exs]
# cand_scores (List[Array[float]]) [(num_pred_mentions, cands_per_mention) x exs])
# mention_scores (List[Array[float]]) [(num_pred_mentions,) x exs])
for batch_num, batch_data in enumerate(dataloader):
batch_context = batch_data[0]
if len(batch_data) > 1:
_, batch_cands, batch_label_ids, batch_mention_idxs, batch_mention_idx_masks = batch_data
for b in range(len(batch_context)):
i = batch_num * biencoder_params['eval_batch_size'] + b
sample = samples[i]
input_context = batch_context[b][batch_context[b] != 0].tolist() # filter out padding
# (num_pred_mentions, cands_per_mention)
scores = dists[i] if args.threshold_type == "joint" else cand_scores[i]
cands_mask = (scores[:,0] == scores[:,0])
pred_entity_list = nns[i][cands_mask]
if len(pred_entity_list) > 0:
e_id = pred_entity_list[0]
distances = scores[cands_mask]
# (num_pred_mentions, 2)
entity_mention_bounds_idx = pred_mention_bounds[i][cands_mask]
utterance = sample['text']
if args.threshold_type == "joint":
# THRESHOLDING
assert utterance is not None
top_mentions_mask = (distances[:,0] > threshold)
elif args.threshold_type == "top_entity_by_mention":
top_mentions_mask = (mention_scores[i] > mention_threshold)
elif args.threshold_type == "thresholded_entity_by_mention":
top_mentions_mask = (distances[:,0] > threshold) & (mention_scores[i] > mention_threshold)
_, sort_idxs = torch.tensor(distances[:,0][top_mentions_mask]).sort(descending=True)
# cands already sorted by score
all_pred_entities = pred_entity_list[:,0][top_mentions_mask]
e_mention_bounds = entity_mention_bounds_idx[top_mentions_mask]
chosen_distances = distances[:,0][top_mentions_mask]
if len(all_pred_entities) >= 2:
all_pred_entities = all_pred_entities[sort_idxs]
e_mention_bounds = e_mention_bounds[sort_idxs]
chosen_distances = chosen_distances[sort_idxs]
# prune mention overlaps
e_mention_bounds_pruned = []
all_pred_entities_pruned = []
chosen_distances_pruned = []
mention_masked_utterance = np.zeros(len(input_context))
# ensure well-formed-ness, prune overlaps
# greedily pick highest scoring, then prune all overlapping
for idx, mb in enumerate(e_mention_bounds):
mb[1] += 1 # prediction was inclusive, now make exclusive
# check if in existing mentions
if args.threshold_type != "top_entity_by_mention" and mention_masked_utterance[mb[0]:mb[1]].sum() >= 1:
continue
e_mention_bounds_pruned.append(mb)
all_pred_entities_pruned.append(all_pred_entities[idx])
chosen_distances_pruned.append(float(chosen_distances[idx]))
mention_masked_utterance[mb[0]:mb[1]] = 1
input_context = input_context[1:-1] # remove BOS and sep
pred_triples = [(
str(all_pred_entities_pruned[j]),
int(e_mention_bounds_pruned[j][0]) - 1, # -1 for BOS
int(e_mention_bounds_pruned[j][1]) - 1,
) for j in range(len(all_pred_entities_pruned))]
entity_results = {
"id": sample["id"],
"text": sample["text"],
"scores": chosen_distances_pruned,
}
if 'label_id' in sample:
# Get LABELS
input_mention_idxs = batch_mention_idxs[b][batch_mention_idx_masks[b]].tolist()
input_label_ids = batch_label_ids[b][batch_label_ids[b] != -1].tolist()
assert len(input_label_ids) == len(input_mention_idxs)
gold_mention_bounds = [
sample['text'][ment[0]-10:ment[0]] + "[" + sample['text'][ment[0]:ment[1]] + "]" + sample['text'][ment[1]:ment[1]+10]
for ment in sample['mentions']
]
# GET ALIGNED MENTION_IDXS (input is slightly different to model) between ours and gold labels -- also have to account for BOS
gold_input = sample['tokenized_text_ids']
# return first instance of my_input in gold_input
for my_input_start in range(len(gold_input)):
if (
gold_input[my_input_start] == input_context[0] and
gold_input[my_input_start:my_input_start+len(input_context)] == input_context
):
break
# add alignment factor (my_input_start) to predicted mention triples
pred_triples = [(
triple[0],
triple[1] + my_input_start, triple[2] + my_input_start,
) for triple in pred_triples]
gold_triples = [(
str(sample['label_id'][j]),
sample['tokenized_mention_idxs'][j][0], sample['tokenized_mention_idxs'][j][1],
) for j in range(len(sample['label_id']))]
num_overlap_weak, num_overlap_strong = entity_linking_tp_with_overlap(gold_triples, pred_triples)
num_correct_weak += num_overlap_weak
num_correct_strong += num_overlap_strong
num_predicted += len(all_pred_entities_pruned)
num_gold += len(sample["label_id"])
# compute number correct given the input window
pred_input_window_triples = [(
str(all_pred_entities_pruned[j]),
int(e_mention_bounds_pruned[j][0]), int(e_mention_bounds_pruned[j][1]),
) for j in range(len(all_pred_entities_pruned))]
gold_input_window_triples = [(
str(input_label_ids[j]),
input_mention_idxs[j][0], input_mention_idxs[j][1] + 1,
) for j in range(len(input_label_ids))]
num_overlap_weak_window, num_overlap_strong_window = entity_linking_tp_with_overlap(gold_input_window_triples, pred_input_window_triples)
num_correct_weak_from_input_window += num_overlap_weak_window
num_correct_strong_from_input_window += num_overlap_strong_window
num_gold_from_input_window += len(input_mention_idxs)
entity_results.update({
"pred_tuples_string": [
[id2title[triple[0]], tokenizer.decode(sample['tokenized_text_ids'][triple[1]:triple[2]])]
for triple in pred_triples
],
"gold_tuples_string": [
[id2title[triple[0]], tokenizer.decode(sample['tokenized_text_ids'][triple[1]:triple[2]])]
for triple in gold_triples
],
"pred_triples": pred_triples,
"gold_triples": gold_triples,
"tokens": input_context,
})
if errors_f is not None and (num_overlap_weak != len(gold_triples) or num_overlap_weak != len(pred_triples)):
errors_f.write(json.dumps(entity_results) + "\n")
else:
entity_results.update({
"pred_tuples_string": [
[id2title[triple[0]], tokenizer.decode(input_context[triple[1]:triple[2]])]
for triple in pred_triples
],
"pred_triples": pred_triples,
"tokens": input_context,
})
all_entity_preds.append(entity_results)
if f is not None:
f.write(
json.dumps(entity_results) + "\n"
)
if f is not None:
f.close()
errors_f.close()
return (
all_entity_preds, num_correct_weak, num_correct_strong, num_predicted, num_gold,
num_correct_weak_from_input_window, num_correct_strong_from_input_window, num_gold_from_input_window
)
def _save_biencoder_outs(save_preds_dir, nns, dists, pred_mention_bounds, cand_scores, mention_scores, runtime):
np.save(os.path.join(save_preds_dir, "biencoder_nns.npy"), nns)
np.save(os.path.join(save_preds_dir, "biencoder_dists.npy"), dists)
np.save(os.path.join(save_preds_dir, "biencoder_mention_bounds.npy"), pred_mention_bounds)
np.save(os.path.join(save_preds_dir, "biencoder_cand_scores.npy"), cand_scores)
np.save(os.path.join(save_preds_dir, "biencoder_mention_scores.npy"), mention_scores)
with open(os.path.join(save_preds_dir, "runtime.txt"), "w") as wf:
wf.write(str(runtime))
def _load_biencoder_outs(save_preds_dir):
nns = np.load(os.path.join(save_preds_dir, "biencoder_nns.npy"), allow_pickle=True)
dists = np.load(os.path.join(save_preds_dir, "biencoder_dists.npy"), allow_pickle=True)
pred_mention_bounds = np.load(os.path.join(save_preds_dir, "biencoder_mention_bounds.npy"), allow_pickle=True)
cand_scores = np.load(os.path.join(save_preds_dir, "biencoder_cand_scores.npy"), allow_pickle=True)
mention_scores = np.load(os.path.join(save_preds_dir, "biencoder_mention_scores.npy"), allow_pickle=True)
runtime = float(open(os.path.join(args.save_preds_dir, "runtime.txt")).read())
return nns, dists, pred_mention_bounds, cand_scores, mention_scores, runtime
def display_metrics(
num_correct, num_predicted, num_gold, prefix="",
):
p = 0 if num_predicted == 0 else float(num_correct) / float(num_predicted)
r = 0 if num_gold == 0 else float(num_correct) / float(num_gold)
if p + r > 0:
f1 = 2 * p * r / (p + r)
else:
f1 = 0
print("{0}precision = {1} / {2} = {3}".format(prefix, num_correct, num_predicted, p))
print("{0}recall = {1} / {2} = {3}".format(prefix, num_correct, num_gold, r))
print("{0}f1 = {1}".format(prefix, f1))
def load_models(args, logger):
# load biencoder model
if logger: logger.info("Loading biencoder model")
try:
with open(args.biencoder_config) as json_file:
biencoder_params = json.load(json_file)
except json.decoder.JSONDecodeError:
with open(args.biencoder_config) as json_file:
for line in json_file:
line = line.replace("'", "\"")
line = line.replace("True", "true")
line = line.replace("False", "false")
line = line.replace("None", "null")
biencoder_params = json.loads(line)
break
biencoder_params["path_to_model"] = args.biencoder_model
biencoder_params["cand_token_ids_path"] = args.cand_token_ids_path
biencoder_params["eval_batch_size"] = getattr(args, 'eval_batch_size', 8)
biencoder_params["no_cuda"] = (not getattr(args, 'use_cuda', False) or not torch.cuda.is_available())
if biencoder_params["no_cuda"]:
biencoder_params["data_parallel"] = False
biencoder_params["load_cand_enc_only"] = False
if getattr(args, 'max_context_length', None) is not None:
biencoder_params["max_context_length"] = args.max_context_length
biencoder = load_biencoder(biencoder_params)
if biencoder_params["no_cuda"] and type(biencoder.model).__name__ == 'DataParallel':
biencoder.model = biencoder.model.module
elif not biencoder_params["no_cuda"] and type(biencoder.model).__name__ != 'DataParallel':
biencoder.model = torch.nn.DataParallel(biencoder.model)
# load candidate entities
if logger: logger.info("Loading candidate entities")
(
candidate_encoding,
indexer,
id2title,
id2text,
id2wikidata,
) = _load_candidates(
args.entity_catalogue, args.entity_encoding,
args.faiss_index, args.index_path, logger=logger,
)
return (
biencoder,
biencoder_params,
candidate_encoding,
indexer,
id2title,
id2text,
id2wikidata,
)
def run(
args,
logger,
biencoder,
biencoder_params,
candidate_encoding,
indexer,
id2title,
id2text,
id2wikidata,
test_data=None,
):
if not test_data and not getattr(args, 'test_mentions', None) and not getattr(args, 'interactive', None):
msg = (
"ERROR: either you start BLINK with the "
"interactive option (-i) or you pass in input test mentions (--test_mentions)"
"and test entities (--test_entities) or manually pass in test data"
)
raise ValueError(msg)
if getattr(args, 'save_preds_dir', None) is not None and not os.path.exists(args.save_preds_dir):
os.makedirs(args.save_preds_dir)
print("Saving preds in {}".format(args.save_preds_dir))
stopping_condition = False
threshold = float(args.threshold)
if args.threshold_type == "top_entity_by_mention":
assert args.mention_threshold is not None
mention_threshold = float(args.mention_threshold)
else:
mention_threshold = threshold
if args.interactive:
while not stopping_condition:
if logger: logger.info("interactive mode")
# Interactive
text = input("insert text: ")
# Prepare data
samples = [{"id": "-1", "text": text}]
dataloader = _process_biencoder_dataloader(
samples, biencoder.tokenizer, biencoder_params, logger,
)
# Run inference
nns, dists, pred_mention_bounds, mention_scores, cand_scores = _run_biencoder(
args, biencoder, dataloader, candidate_encoding, samples=samples,
num_cand_mentions=args.num_cand_mentions, num_cand_entities=args.num_cand_entities,
device="cpu" if biencoder_params["no_cuda"] else "cuda",
threshold=mention_threshold, indexer=indexer,
)
action = "c"
while action == "c":
all_entity_preds = get_predictions(
args, dataloader, biencoder_params,
samples, nns, dists, mention_scores, cand_scores,
pred_mention_bounds, id2title, threshold=threshold,
mention_threshold=mention_threshold,
)[0]
pred_triples = all_entity_preds[0]['pred_triples']
_print_colorful_text(all_entity_preds[0]['tokens'], tokenizer, pred_triples)
_print_colorful_prediction(all_entity_preds, pred_triples, id2text, id2wikidata)
action = input("Next question [n] / change threshold [c]: ")
while action != "n" and action != "c":
action = input("Next question [n] / change threshold [c]: ")
if action == "c":
print("Current threshold {}".format(threshold))
while True:
threshold = input("New threshold (increase for less cands, decrease for more cands): ")
try:
threshold = float(threshold)
break
except:
print("Error! Expected float, got {}. Try again.".format(threshold))
else:
if not test_data:
samples, num_unk = _get_test_samples(
args.test_mentions, args.test_entities, logger,
)
else:
samples = test_data
if logger: logger.info("Preparing data for biencoder")
dataloader = _process_biencoder_dataloader(
samples, biencoder.tokenizer, biencoder_params, None,
)
stopping_condition = True
# prepare the data for biencoder
# run biencoder if predictions not saved
if not getattr(args, 'save_preds_dir', None) or not os.path.exists(
os.path.join(args.save_preds_dir, 'biencoder_mention_bounds.npy')):
# run biencoder
if logger: logger.info("Running biencoder...")
start_time = time.time()
nns, dists, pred_mention_bounds, mention_scores, cand_scores = _run_biencoder(
args, biencoder, dataloader, candidate_encoding, samples=samples,
num_cand_mentions=args.num_cand_mentions, num_cand_entities=args.num_cand_entities,
device="cpu" if biencoder_params["no_cuda"] else "cuda",
threshold=mention_threshold, indexer=indexer,
)
end_time = time.time()
if logger: logger.info("Finished running biencoder")
runtime = end_time - start_time
if getattr(args, 'save_preds_dir', None):
_save_biencoder_outs(
args.save_preds_dir, nns, dists, pred_mention_bounds, cand_scores, mention_scores, runtime,
)
else:
nns, dists, pred_mention_bounds, cand_scores, mention_scores, runtime = _load_biencoder_outs(args.save_preds_dir)
assert len(samples) == len(nns) == len(dists) == len(pred_mention_bounds) == len(cand_scores) == len(mention_scores)
(
all_entity_preds, num_correct_weak, num_correct_strong, num_predicted, num_gold,
num_correct_weak_from_input_window, num_correct_strong_from_input_window, num_gold_from_input_window,
) = get_predictions(
args, dataloader, biencoder_params,
samples, nns, dists, mention_scores, cand_scores,
pred_mention_bounds, id2title, threshold=threshold,
mention_threshold=mention_threshold,
)
print("*--------*")
if num_gold > 0:
print("WEAK MATCHING")
display_metrics(num_correct_weak, num_predicted, num_gold)
print("Just entities within input window...")
display_metrics(num_correct_weak_from_input_window, num_predicted, num_gold_from_input_window)
print("*--------*")
print("STRONG MATCHING")
display_metrics(num_correct_strong, num_predicted, num_gold)
print("Just entities within input window...")
display_metrics(num_correct_strong_from_input_window, num_predicted, num_gold_from_input_window)
print("*--------*")
print("biencoder runtime = {}".format(runtime))
print("*--------*")
return all_entity_preds
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--debug_biencoder", "-db", action="store_true", default=False, help="Debug biencoder"
)
# evaluation mode
parser.add_argument(
"--get_predictions", "-p", action="store_true", default=False, help="Getting predictions mode. Does not filter at crossencoder step."
)
parser.add_argument(
"--interactive", "-i", action="store_true", help="Interactive mode."
)
# test_data
parser.add_argument(
"--test_mentions", dest="test_mentions", type=str, help="Test Dataset."
)
parser.add_argument(
"--test_entities", dest="test_entities", type=str, help="Test Entities.",
default="models/entity.jsonl", # ALL WIKIPEDIA!
)
parser.add_argument(
"--save_preds_dir", type=str, help="Directory to save model predictions to."
)
parser.add_argument(
"--mention_threshold", type=str, default=None,
dest="mention_threshold",
help="Used if threshold type is `top_entity_by_mention`. "
"Threshold for mention score, for which mentions will be pruned if they fall under that threshold. "
"Set to '-inf' to get all mentions."
)
parser.add_argument(
"--threshold", type=str, default="-4.5",
dest="threshold",
help="Threshold for final joint score, for which examples will be pruned if they fall under that threshold. "
"Set to `-inf` to get all entities."
)
parser.add_argument(
"--num_cand_mentions", type=int, default=50, help="Number of mention candidates to consider per example (at most)"
)
parser.add_argument(
"--num_cand_entities", type=int, default=10, help="Number of entity candidates to consider per mention (at most)"
)
parser.add_argument(
"--threshold_type", type=str, default="joint",
choices=["joint", "top_entity_by_mention"],
help="How to threshold the final candidates. "
"`top_entity_by_mention`: get top candidate (with entity score) for each predicted mention bound. "
"`joint`: by thresholding joint score."
)
# biencoder
parser.add_argument(
"--biencoder_model",
dest="biencoder_model",
type=str,
default="models/elq_wiki_large.bin",
help="Path to the biencoder model.",
)
parser.add_argument(
"--biencoder_config",
dest="biencoder_config",
type=str,
default="models/elq_large_params.txt",
help="Path to the biencoder configuration.",
)
parser.add_argument(
"--cand_token_ids_path",
dest="cand_token_ids_path",
type=str,
default="models/entity_token_ids_128.t7", # ALL WIKIPEDIA!
help="Path to tokenized entity catalogue",
)
parser.add_argument(
"--entity_catalogue",
dest="entity_catalogue",
type=str,
default="models/entity.jsonl", # ALL WIKIPEDIA!
help="Path to the entity catalogue.",
)
parser.add_argument(
"--entity_encoding",
dest="entity_encoding",
type=str,
default="models/all_entities_large.t7", # ALL WIKIPEDIA!
help="Path to the entity catalogue.",
)
parser.add_argument(
"--eval_batch_size",
dest="eval_batch_size",
type=int,
default=8,
help="Crossencoder's batch size for evaluation",
)
parser.add_argument(
"--faiss_index",
dest="faiss_index",
type=str,
default="hnsw",
choices=["hnsw", "flat", "ivfflat", "none"],
help="whether to use faiss index",
)
parser.add_argument(
"--index_path",
dest="index_path",
type=str,
default="models/faiss_hnsw_index.pkl",
help="path to load indexer",
)
parser.add_argument(
"--max_context_length",
dest="max_context_length",
type=int,
help="Maximum length of context. (Don't set to inherit from training config)",
)
# output folder
parser.add_argument(
"--output_path",
dest="output_path",
type=str,
default="output",
help="Path to the output.",
)
parser.add_argument(
"--use_cuda", dest="use_cuda", action="store_true", default=False, help="run on gpu"
)
parser.add_argument(
"--no_logger", dest="no_logger", action="store_true", default=False, help="don't log progress"
)
args = parser.parse_args()
logger = None
if not args.no_logger:
logger = utils.get_logger(args.output_path)
logger.setLevel(10)
models = load_models(args, logger)
run(args, logger, *models)
|
BLINK-main
|
elq/main_dense.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import logging
import numpy
import os
import time
import torch
from elq.index.faiss_indexer import DenseFlatIndexer, DenseIVFFlatIndexer, DenseHNSWFlatIndexer
import elq.candidate_ranking.utils as utils
logger = utils.get_logger()
def main(params):
output_path = params["output_path"]
logger.info("Loading candidate encoding from path: %s" % params["candidate_encoding"])
candidate_encoding = torch.load(params["candidate_encoding"])
vector_size = candidate_encoding.size(1)
index_buffer = params["index_buffer"]
if params["faiss_index"] == "hnsw":
logger.info("Using HNSW index in FAISS")
index = DenseHNSWFlatIndexer(vector_size, index_buffer)
elif params["faiss_index"] == "ivfflat":
logger.info("Using IVF Flat index in FAISS")
index = DenseIVFFlatIndexer(vector_size, 75, 100)
else:
logger.info("Using Flat index in FAISS")
index = DenseFlatIndexer(vector_size, index_buffer)
logger.info("Building index.")
index.index_data(candidate_encoding.numpy())
logger.info("Done indexing data.")
if params.get("save_index", None):
index.serialize(output_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--output_path",
required=True,
type=str,
help="output file path",
)
parser.add_argument(
"--candidate_encoding",
default="models/all_entities_large.t7",
type=str,
help="file path for candidte encoding.",
)
parser.add_argument(
"--faiss_index", type=str, choices=["hnsw", "flat", "ivfflat"],
help='Which faiss index to use',
)
parser.add_argument(
"--save_index", action='store_true',
help='If enabled, save index',
)
parser.add_argument(
'--index_buffer', type=int, default=50000,
help="Temporal memory data buffer size (in samples) for indexer",
)
params = parser.parse_args()
params = params.__dict__
main(params)
|
BLINK-main
|
elq/build_faiss_index.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Code partially adopted from https://github.com/allenai/allennlp
#
from typing import Any, Dict, List, Optional, Sequence, Tuple, TypeVar, Union
import torch
def get_device_of(tensor: torch.Tensor) -> int:
"""
Returns the device of the tensor.
"""
if not tensor.is_cuda:
return -1
else:
return tensor.get_device()
def get_range_vector(size: int, device: int) -> torch.Tensor:
"""
Returns a range vector with the desired size, starting at 0. The CUDA implementation
is meant to avoid copy data from CPU to GPU.
"""
if device > -1:
return torch.cuda.LongTensor(size, device=device).fill_(1).cumsum(0) - 1
else:
return torch.arange(0, size, dtype=torch.long)
def flatten_and_batch_shift_indices(indices: torch.Tensor, sequence_length: int) -> torch.Tensor:
"""
This is a subroutine for [`batched_index_select`](./util.md#batched_index_select).
The given `indices` of size `(batch_size, d_1, ..., d_n)` indexes into dimension 2 of a
target tensor, which has size `(batch_size, sequence_length, embedding_size)`. This
function returns a vector that correctly indexes into the flattened target. The sequence
length of the target must be provided to compute the appropriate offsets.
```python
indices = torch.ones([2,3], dtype=torch.long)
# Sequence length of the target tensor.
sequence_length = 10
shifted_indices = flatten_and_batch_shift_indices(indices, sequence_length)
# Indices into the second element in the batch are correctly shifted
# to take into account that the target tensor will be flattened before
# the indices are applied.
assert shifted_indices == [1, 1, 1, 11, 11, 11]
```
# Parameters
indices : `torch.LongTensor`, required.
sequence_length : `int`, required.
The length of the sequence the indices index into.
This must be the second dimension of the tensor.
# Returns
offset_indices : `torch.LongTensor`
"""
# Shape: (batch_size)
if torch.max(indices) >= sequence_length or torch.min(indices) < 0:
raise IndexError(
f"All elements in indices should be in range (0, {sequence_length - 1})"
)
offsets = get_range_vector(indices.size(0), get_device_of(indices)) * sequence_length
for _ in range(len(indices.size()) - 1):
offsets = offsets.unsqueeze(1)
# Shape: (batch_size, d_1, ..., d_n)
offset_indices = indices + offsets
# Shape: (batch_size * d_1 * ... * d_n)
offset_indices = offset_indices.view(-1)
return offset_indices
def batched_index_select(
target: torch.Tensor,
indices: torch.LongTensor,
flattened_indices: Optional[torch.LongTensor] = None,
) -> torch.Tensor:
"""
The given `indices` of size `(batch_size, d_1, ..., d_n)` indexes into the sequence
dimension (dimension 2) of the target, which has size `(batch_size, sequence_length,
embedding_size)`.
This function returns selected values in the target with respect to the provided indices, which
have size `(batch_size, d_1, ..., d_n, embedding_size)`. This can use the optionally
precomputed `flattened_indices` with size `(batch_size * d_1 * ... * d_n)` if given.
An example use case of this function is looking up the start and end indices of spans in a
sequence tensor. This is used in the
[CoreferenceResolver](../models/coreference_resolution/coref.md). Model to select
contextual word representations corresponding to the start and end indices of mentions. The key
reason this can't be done with basic torch functions is that we want to be able to use look-up
tensors with an arbitrary number of dimensions (for example, in the coref model, we don't know
a-priori how many spans we are looking up).
# Parameters
target : `torch.Tensor`, required.
A 3 dimensional tensor of shape (batch_size, sequence_length, embedding_size).
This is the tensor to be indexed.
indices : `torch.LongTensor`
A tensor of shape (batch_size, ...), where each element is an index into the
`sequence_length` dimension of the `target` tensor.
flattened_indices : Optional[torch.Tensor], optional (default = None)
An optional tensor representing the result of calling `flatten_and_batch_shift_indices`
on `indices`. This is helpful in the case that the indices can be flattened once and
cached for many batch lookups.
# Returns
selected_targets : `torch.Tensor`
A tensor with shape [indices.size(), target.size(-1)] representing the embedded indices
extracted from the batch flattened target tensor.
"""
if flattened_indices is None:
# Shape: (batch_size * d_1 * ... * d_n)
try:
flattened_indices = flatten_and_batch_shift_indices(indices, target.size(1))
except:
print("indices: {}".format(indices))
print("target: {}".format(target))
flattened_indices = flatten_and_batch_shift_indices(indices, target.size(1))
# Shape: (batch_size * sequence_length, embedding_size)
flattened_target = target.view(-1, target.size(-1))
# Shape: (batch_size * d_1 * ... * d_n, embedding_size)
flattened_selected = flattened_target.index_select(0, flattened_indices)
selected_shape = list(indices.size()) + [target.size(-1)]
# Shape: (batch_size, d_1, ..., d_n, embedding_size)
selected_targets = flattened_selected.view(*selected_shape)
return selected_targets
def batched_span_select(target: torch.Tensor, spans: torch.LongTensor) -> torch.Tensor:
"""
The given `spans` of size `(batch_size, num_spans, 2)` indexes into the sequence
dimension (dimension 2) of the target, which has size `(batch_size, sequence_length,
embedding_size)`.
This function returns segmented spans in the target with respect to the provided span indices.
It does not guarantee element order within each span.
# Parameters
target : `torch.Tensor`, required.
A 3 dimensional tensor of shape (batch_size, sequence_length, embedding_size).
This is the tensor to be indexed.
indices : `torch.LongTensor`
A 3 dimensional tensor of shape (batch_size, num_spans, 2) representing start and end
indices (both inclusive) into the `sequence_length` dimension of the `target` tensor.
# Returns
span_embeddings : `torch.Tensor`
A tensor with shape (batch_size, num_spans, max_batch_span_width, embedding_size]
representing the embedded spans extracted from the batch flattened target tensor.
span_mask: `torch.BoolTensor`
A tensor with shape (batch_size, num_spans, max_batch_span_width) representing the mask on
the returned span embeddings.
"""
# both of shape (batch_size, num_spans, 1)
span_starts, span_ends = spans.split(1, dim=-1)
# shape (batch_size, num_spans, 1)
# These span widths are off by 1, because the span ends are `inclusive`.
span_widths = span_ends - span_starts
# We need to know the maximum span width so we can
# generate indices to extract the spans from the sequence tensor.
# These indices will then get masked below, such that if the length
# of a given span is smaller than the max, the rest of the values
# are masked.
max_batch_span_width = span_widths.max().item() + 1
# Shape: (1, 1, max_batch_span_width)
max_span_range_indices = get_range_vector(max_batch_span_width, get_device_of(target)).view(
1, 1, -1
)
# Shape: (batch_size, num_spans, max_batch_span_width)
# This is a broadcasted comparison - for each span we are considering,
# we are creating a range vector of size max_span_width, but masking values
# which are greater than the actual length of the span.
#
# We're using <= here (and for the mask below) because the span ends are
# inclusive, so we want to include indices which are equal to span_widths rather
# than using it as a non-inclusive upper bound.
span_mask = max_span_range_indices <= span_widths
raw_span_indices = span_ends - max_span_range_indices
# We also don't want to include span indices which are less than zero,
# which happens because some spans near the beginning of the sequence
# have an end index < max_batch_span_width, so we add this to the mask here.
span_mask = span_mask & (raw_span_indices >= 0)
span_indices = torch.nn.functional.relu(raw_span_indices.float()).long()
# Shape: (batch_size, num_spans, max_batch_span_width, embedding_dim)
span_embeddings = batched_index_select(target, span_indices)
return span_embeddings, span_mask
|
BLINK-main
|
elq/biencoder/allennlp_span_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import json
import logging
import torch
from tqdm import tqdm, trange
from torch.utils.data import DataLoader, TensorDataset
from pytorch_transformers.tokenization_bert import BertTokenizer
from blink.biencoder.zeshel_utils import world_to_id
from elq.common.params import ENT_START_TAG, ENT_END_TAG, ENT_TITLE_TAG
def select_field_with_padding(data, key1, key2=None, pad_idx=-1):
max_len = 0
selected_list = []
padding_mask = []
for example in data:
if key2 is None:
selected_list.append(example[key1])
max_len = max(max_len, len(example[key1]))
else:
selected_list.append(example[key1][key2])
max_len = max(max_len, len(example[key1][key2]))
for i, entry in enumerate(selected_list):
# pad to max len
pad_list = [1 for _ in range(len(entry))] + [0 for _ in range(max_len - len(entry))]
selected_list[i] += [pad_idx for _ in range(max_len - len(entry))]
assert len(pad_list) == max_len
assert len(selected_list[i]) == max_len
padding_mask.append(pad_list)
return selected_list, padding_mask
def select_field(data, key1, key2=None):
if key2 is None:
return [example[key1] for example in data]
else:
return [example[key1][key2] for example in data]
def get_context_representation_single_mention(
sample,
tokenizer,
max_seq_length,
mention_key="mention",
context_key="context",
ent_start_token=ENT_START_TAG,
ent_end_token=ENT_END_TAG,
add_mention_bounds=True,
):
mention_tokens = []
if sample[mention_key] and len(sample[mention_key]) > 0:
mention_tokens = tokenizer.tokenize(sample[mention_key])
to_subtract = 4 if add_mention_bounds else 2
if len(mention_tokens) > max_seq_length - to_subtract:
# -4 as 2 for ent_start and ent_end, 2 for [CLS] and [SEP]
mention_tokens = mention_tokens[:max_seq_length - to_subtract]
if add_mention_bounds:
mention_tokens = [ent_start_token] + mention_tokens + [ent_end_token]
context_left = sample[context_key + "_left"]
context_right = sample[context_key + "_right"]
context_left = tokenizer.tokenize(context_left)
context_right = tokenizer.tokenize(context_right)
left_quota = (max_seq_length - len(mention_tokens)) // 2 - 1
right_quota = max_seq_length - len(mention_tokens) - left_quota - 2
left_add = len(context_left)
right_add = len(context_right)
if left_add <= left_quota:
if right_add > right_quota:
right_quota += left_quota - left_add
else:
if right_add <= right_quota:
left_quota += right_quota - right_add
if left_quota <= 0:
context_left = []
if right_quota <= 0:
context_right = []
context_tokens = (
context_left[-left_quota:] + mention_tokens + context_right[:right_quota]
)
context_tokens = ["[CLS]"] + context_tokens + ["[SEP]"]
mention_idxs = [
len(context_left[-left_quota:]) + 1,
len(context_left[-left_quota:]) + len(mention_tokens) + 1,
]
input_ids = tokenizer.convert_tokens_to_ids(context_tokens)
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
assert len(input_ids) == max_seq_length
return {
"tokens": context_tokens,
"ids": input_ids,
"mention_idxs": mention_idxs,
}
def get_context_representation_multiple_mentions_left_right(
sample,
tokenizer,
max_seq_length,
mention_key="mention",
context_key="context",
ent_start_token=ENT_START_TAG,
ent_end_token=ENT_END_TAG,
):
all_mentions = sample[mention_key]
all_context_lefts = sample[context_key + "_left"]
all_context_rights = sample[context_key + "_right"]
if len(all_mentions[0]) == 0 and len(all_context_lefts[0]) == 0 and len(all_context_rights[0]) == 0: # passed in empty string
context_tokens = ["[CLS]", "[SEP]"]
input_ids = tokenizer.convert_tokens_to_ids(context_tokens)
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
assert len(input_ids) == max_seq_length
return {
"tokens": context_tokens,
"ids": input_ids,
"mention_idxs": [],
}
mention_tokens = []
for mention in all_mentions:
if mention and len(mention) > 0:
mention_token = tokenizer.tokenize(mention)
if len(mention_token) > max_seq_length - 2:
# -2 for [CLS] and [SEP]
mention_token = mention_token[:max_seq_length - 2]
mention_tokens.append(mention_token)
mention_idxs = []
assert len(all_context_lefts) == len(all_context_rights)
assert len(all_context_rights) == len(all_mentions)
context_tokens = None
for c in range(len(all_context_lefts)):
context_left = all_context_lefts[c]
context_right = all_context_rights[c]
context_left = tokenizer.tokenize(context_left)
context_right = tokenizer.tokenize(context_right)
left_quota = (max_seq_length - len(mention_tokens[c])) // 2 - 1
right_quota = max_seq_length - len(mention_tokens[c]) - left_quota - 2
left_add = len(context_left)
right_add = len(context_right)
if left_add <= left_quota:
if right_add > right_quota:
right_quota += left_quota - left_add
else:
if right_add <= right_quota:
left_quota += right_quota - right_add
if left_quota <= 0:
context_left = []
if right_quota <= 0:
context_right = []
context_tokens_itr = (
context_left[-left_quota:] + mention_tokens[c] + context_right[:right_quota]
)
context_tokens_itr = ["[CLS]"] + context_tokens_itr + ["[SEP]"]
if context_tokens is None:
context_tokens = context_tokens_itr
else:
try:
assert context_tokens == context_tokens_itr
except:
import pdb
pdb.set_trace()
mention_idxs.append([
len(context_left[-left_quota:]) + 1,
len(context_left[-left_quota:]) + len(mention_tokens[c]) + 1,
])
input_ids = tokenizer.convert_tokens_to_ids(context_tokens)
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
assert len(input_ids) == max_seq_length
return {
"tokens": context_tokens,
"ids": input_ids,
"mention_idxs": mention_idxs,
}
def sort_mentions(
lst, sort_map=None,
):
"""
sort_map: {orig_idx: idx in new "sorted" array}
"""
new_lst = [0 for _ in range(len(lst))]
for i in range(len(lst)):
new_lst[sort_map[i]] = lst[i]
return new_lst
def do_sort(
sample, orig_idx_to_sort_idx,
):
sample['mentions'] = sort_mentions(sample['mentions'], orig_idx_to_sort_idx)
sample['label_id'] = sort_mentions(sample['label_id'], orig_idx_to_sort_idx)
sample['wikidata_id'] = sort_mentions(sample['wikidata_id'], orig_idx_to_sort_idx)
sample['entity'] = sort_mentions(sample['entity'], orig_idx_to_sort_idx)
sample['label'] = sort_mentions(sample['label'], orig_idx_to_sort_idx)
def get_context_representation_multiple_mentions_idxs(
sample, tokenizer, max_seq_length,
mention_key, context_key, ent_start_token, ent_end_token,
):
'''
Also cuts out mentions beyond that context window
ASSUMES MENTION_IDXS ARE SORTED!!!!
Returns:
List of mention bounds that are [inclusive, exclusive) (make both inclusive later)
NOTE: 2nd index of mention bound may be outside of max_seq_length-range (must deal with later)
'''
mention_idxs = sample["tokenized_mention_idxs"]
input_ids = sample["tokenized_text_ids"]
# sort mentions / entities / everything associated
# [[orig_index, [start, end]], ....] --> sort by start, then end
sort_tuples = [[i[0], i[1]] for i in sorted(enumerate(mention_idxs), key=lambda x:(x[1][0], x[1][1]))]
if [tup[1] for tup in sort_tuples] != mention_idxs:
orig_idx_to_sort_idx = {itm[0]: i for i, itm in enumerate(sort_tuples)}
assert [tup[1] for tup in sort_tuples] == sort_mentions(mention_idxs, orig_idx_to_sort_idx)
mention_idxs = [tup[1] for tup in sort_tuples]
sample['tokenized_mention_idxs'] = mention_idxs
do_sort(sample, orig_idx_to_sort_idx)
# TODO SORT EVERYTHING
# fit leftmost mention, then all of the others that can reasonably fit...
all_mention_spans_range = [mention_idxs[0][0], mention_idxs[-1][1]]
while all_mention_spans_range[1] - all_mention_spans_range[0] + 2 > max_seq_length:
if len(mention_idxs) == 1:
# don't cut further
assert mention_idxs[0][1] - mention_idxs[0][0] + 2 > max_seq_length
# truncate mention
mention_idxs[0][1] = max_seq_length + mention_idxs[0][0] - 2
else:
# cut last mention
mention_idxs = mention_idxs[:len(mention_idxs) - 1]
all_mention_spans_range = [mention_idxs[0][0], mention_idxs[-1][1]]
context_left = input_ids[:all_mention_spans_range[0]]
all_mention_tokens = input_ids[all_mention_spans_range[0]:all_mention_spans_range[1]]
context_right = input_ids[all_mention_spans_range[1]:]
left_quota = (max_seq_length - len(all_mention_tokens)) // 2 - 1
right_quota = max_seq_length - len(all_mention_tokens) - left_quota - 2
left_add = len(context_left)
right_add = len(context_right)
if left_add <= left_quota: # tokens left to add <= quota ON THE LEFT
if right_add > right_quota: # add remaining quota to right quota
right_quota += left_quota - left_add
else:
if right_add <= right_quota: # tokens left to add <= quota ON THE RIGHT
left_quota += right_quota - right_add # add remaining quota to left quota
if left_quota <= 0:
left_quota = -len(context_left) # cut entire list (context_left = [])
if right_quota <= 0:
right_quota = 0 # cut entire list (context_right = [])
input_ids_window = context_left[-left_quota:] + all_mention_tokens + context_right[:right_quota]
# shift mention_idxs
if len(input_ids) <= max_seq_length - 2:
try:
assert input_ids == input_ids_window
except:
import pdb
pdb.set_trace()
else:
assert input_ids != input_ids_window
cut_from_left = len(context_left) - len(context_left[-left_quota:])
if cut_from_left > 0:
# must shift mention_idxs
for c in range(len(mention_idxs)):
mention_idxs[c] = [
mention_idxs[c][0] - cut_from_left, mention_idxs[c][1] - cut_from_left,
]
input_ids_window = [101] + input_ids_window + [102]
tokens = tokenizer.convert_ids_to_tokens(input_ids_window)
# +1 for CLS token
mention_idxs = [[mention[0]+1, mention[1]+1] for mention in mention_idxs]
# input_ids = tokenizer.convert_tokens_to_ids(input_ids_window)
padding = [0] * (max_seq_length - len(input_ids_window))
input_ids_window += padding
assert len(input_ids_window) == max_seq_length
return {
"tokens": tokens,
"ids": input_ids_window,
"mention_idxs": mention_idxs,
# "pruned_ents": [1 for i in range(len(all_mentions)) if i < len(mention_idxs) else 0], # pruned last N entities, TODO change if changed
}
def get_candidate_representation(
candidate_desc,
tokenizer,
max_seq_length,
candidate_title=None,
title_tag=ENT_TITLE_TAG,
):
cls_token = tokenizer.cls_token
sep_token = tokenizer.sep_token
cand_tokens = tokenizer.tokenize(candidate_desc)
if candidate_title is not None:
title_tokens = tokenizer.tokenize(candidate_title)
cand_tokens = title_tokens + [title_tag] + cand_tokens
cand_tokens = cand_tokens[: max_seq_length - 2]
cand_tokens = [cls_token] + cand_tokens + [sep_token]
input_ids = tokenizer.convert_tokens_to_ids(cand_tokens)
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
assert len(input_ids) == max_seq_length
return {
"tokens": cand_tokens,
"ids": [input_ids],
}
def process_mention_data(
samples,
tokenizer,
max_context_length,
max_cand_length,
silent,
mention_key="mention",
context_key="context",
label_key="label",
title_key='label_title',
ent_start_token=ENT_START_TAG,
ent_end_token=ENT_END_TAG,
title_token=ENT_TITLE_TAG,
debug=False,
logger=None,
add_mention_bounds=True,
saved_context_dir=None,
candidate_token_ids=None,
params=None,
):
'''
Returns /inclusive/ bounds
'''
extra_ret_values = {}
if saved_context_dir is not None and os.path.exists(os.path.join(saved_context_dir, "tensor_tuple.pt")):
data = torch.load(os.path.join(saved_context_dir, "data.pt"))
tensor_data_tuple = torch.load(os.path.join(saved_context_dir, "tensor_tuple.pt"))
return data, tensor_data_tuple, extra_ret_values
if candidate_token_ids is None and not debug:
candidate_token_ids = torch.load(params["cand_token_ids_path"])
if logger: logger.info("Loaded saved entities info")
extra_ret_values["candidate_token_ids"] = candidate_token_ids
processed_samples = []
if debug:
samples = samples[:200]
if silent:
iter_ = samples
else:
iter_ = tqdm(samples)
use_world = True
ent_start_id = tokenizer.convert_tokens_to_ids(ent_start_token)
ent_end_id = tokenizer.convert_tokens_to_ids(ent_end_token)
cls_token_id = tokenizer.convert_tokens_to_ids("[CLS]")
sep_token_id = tokenizer.convert_tokens_to_ids("[SEP]")
for idx, sample in enumerate(iter_):
assert not add_mention_bounds, "Adding mention bounds, but we have multiple entities per example"
if context_key + "_left" in sample:
context_tokens = get_context_representation_multiple_mentions_left_right(
sample, tokenizer, max_context_length,
mention_key, context_key, ent_start_token, ent_end_token,
)
else:
context_tokens = get_context_representation_multiple_mentions_idxs(
sample, tokenizer, max_context_length,
mention_key, context_key, ent_start_token, ent_end_token,
)
for i in range(len(context_tokens["mention_idxs"])):
context_tokens["mention_idxs"][i][1] -= 1 # make bounds inclusive
label = sample[label_key]
title = sample.get(title_key)
label_ids = sample.get("label_id")
if label is None:
label = [None]
label_ids = [label_ids]
# remove those that got pruned off
if len(label) > len(context_tokens['mention_idxs']):
label = label[:len(context_tokens['mention_idxs'])]
label_ids = sample["label_id"][:len(context_tokens['mention_idxs'])]
if candidate_token_ids is not None:
token_ids = [[candidate_token_ids[label_id].tolist()] for label_id in label_ids]
label_tokens = {
"tokens": "",
"ids": token_ids,
}
elif not params["freeze_cand_enc"]:
label_tokens = [get_candidate_representation(
l, tokenizer, max_cand_length, title[i],
) for i, l in enumerate(label)]
label_tokens = {
k: [label_tokens[l][k] for l in range(len(label_tokens))]
for k in label_tokens[0]}
else:
label_tokens = None
if isinstance(sample["label_id"], list):
# multiple candidates
if len(sample["label_id"]) > len(context_tokens['mention_idxs']):
sample["label_id"] = sample["label_id"][:len(context_tokens['mention_idxs'])]
label_idx = [int(id) for id in sample["label_id"]]
else:
assert isinstance(sample["label_id"], int) or isinstance(sample["label_id"], str)
label_idx = int(sample["label_id"])
record = {
"context": context_tokens,
}
if not params["freeze_cand_enc"]:
record["label"] = label_tokens
record["label_idx"] = label_idx
if "world" in sample:
src = sample["world"]
src = world_to_id[src]
record["src"] = [src]
use_world = True
else:
use_world = False
processed_samples.append(record)
if debug and logger:
logger.info("====Processed samples: ====")
for sample in processed_samples[:5]:
logger.info("Context tokens : " + " ".join(sample["context"]["tokens"]))
logger.info(
"Context ids : " + " ".join([str(v) for v in sample["context"]["ids"]])
)
if not params["freeze_cand_encs"]:
logger.info("Label tokens : " + " ".join(sample["label"]["tokens"]))
logger.info(
"Label ids : " + " ".join([str(v) for v in sample["label"]["ids"]])
)
logger.info("Label_id : %d" % sample["label_idx"])
if use_world:
logger.info("Src : %d" % sample["src"][0])
context_vecs = torch.tensor(
select_field(processed_samples, "context", "ids"), dtype=torch.long,
)
if logger:
logger.info("Created context IDs vector")
if isinstance(processed_samples[0]["context"]["mention_idxs"][0], int):
mention_idx_vecs = torch.tensor(
select_field(processed_samples, "context", "mention_idxs"), dtype=torch.long,
).unsqueeze(1)
mention_idx_mask = torch.ones(mention_idx_vecs.size(0), dtype=torch.bool).unsqueeze(-1)
if logger:
logger.info("Created mention positions vector")
if not params["freeze_cand_enc"]:
cand_vecs = torch.tensor(
select_field(processed_samples, "label", "ids"), dtype=torch.long,
)
if logger:
logger.info("Created candidate IDs vector")
label_idx = torch.tensor(
select_field(processed_samples, "label_idx"), dtype=torch.long,
).unsqueeze(-1)
if logger:
logger.info("Created label IDXs vector")
else:
mention_idx_vecs, mention_idx_mask = select_field_with_padding(
processed_samples, "context", "mention_idxs", pad_idx=[0,1], #ensure is a well-formed span
)
# (bs, max_num_spans, 2)
mention_idx_vecs = torch.tensor(mention_idx_vecs, dtype=torch.long)
# (bs, max_num_spans)
mention_idx_mask = torch.tensor(mention_idx_mask, dtype=torch.bool)
if not params["freeze_cand_enc"]:
cand_vecs, cand_mask = select_field_with_padding(
processed_samples, "label", "ids", pad_idx=[[0 for _ in range(max_cand_length)]],
)
# (bs, max_num_spans, 1, max_cand_length)
cand_vecs = torch.tensor(cand_vecs, dtype=torch.long)
cand_mask = torch.tensor(cand_mask, dtype=torch.bool)
assert (cand_mask == mention_idx_mask).all() or cand_mask.all()
if logger:
logger.info("Created candidate IDs vector")
else:
cand_vecs = torch.Tensor(context_vecs.size())
label_idx_vecs, label_idx_mask = select_field_with_padding(processed_samples, "label_idx", pad_idx=-1)
# (bs, max_num_spans)
label_idx = torch.tensor(label_idx_vecs, dtype=torch.long)
label_idx_mask = torch.tensor(label_idx_mask, dtype=torch.bool)
assert (label_idx_mask == mention_idx_mask).all() or label_idx_mask.all()
if logger:
logger.info("Created label IDXs vector")
# mention_idx_vecs: (bs, max_num_spans, 2), mention_idx_mask: (bs, max_num_spans)
assert len(mention_idx_vecs.size()) == 3
# prune mention_idx_vecs to max_context_length
mention_idx_vecs[mention_idx_vecs >= max_context_length] = (max_context_length - 1)
if use_world:
src_vecs = torch.tensor(
select_field(processed_samples, "src"), dtype=torch.long,
)
if logger:
logger.info("Created source vector")
data = {
"context_vecs": context_vecs,
"mention_idx_vecs": mention_idx_vecs,
"cand_vecs": cand_vecs,
"label_idx": label_idx,
}
if use_world:
data["src"] = src_vecs
tensor_data_tuple = (context_vecs, cand_vecs, src_vecs, label_idx, mention_idx_vecs, mention_idx_mask)
else:
tensor_data_tuple = (context_vecs, cand_vecs, label_idx, mention_idx_vecs, mention_idx_mask)
# save data
if saved_context_dir is not None and not os.path.exists(os.path.join(saved_context_dir, "tensor_tuple.pt")):
os.makedirs(saved_context_dir, exist_ok=True)
torch.save(data, os.path.join(saved_context_dir, "data.pt"))
torch.save(tensor_data_tuple, os.path.join(saved_context_dir, "tensor_tuple.pt"))
return data, tensor_data_tuple, extra_ret_values
|
BLINK-main
|
elq/biencoder/data_process.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import numpy as np
def batch_reshape_mask_left(
input_t, selected, pad_idx=0, left_align_mask=None
):
"""
Left-aligns all ``selected" values in input_t, which is a batch of examples.
- input_t: >=2D tensor (N, M, *)
- selected: 2D torch.Bool tensor, 2 dims same size as first 2 dims of `input_t` (N, M)
- pad_idx represents the padding to be used in the output
- left_align_mask: if already precomputed, pass the alignment mask in
(mask on the output, corresponding to `selected` on the input)
Example:
input_t = [[1,2,3,4],[5,6,7,8]]
selected = [[0,1,0,1],[1,1,0,1]]
output = [[2,4,0],[5,6,8]]
"""
batch_num_selected = selected.sum(1)
max_num_selected = batch_num_selected.max()
# (bsz, 2)
repeat_freqs = torch.stack([batch_num_selected, max_num_selected - batch_num_selected], dim=-1)
# (bsz x 2,)
repeat_freqs = repeat_freqs.view(-1)
if left_align_mask is None:
# (bsz, 2)
left_align_mask = torch.zeros(input_t.size(0), 2).to(input_t.device).bool()
left_align_mask[:,0] = 1
# (bsz x 2,): [1,0,1,0,...]
left_align_mask = left_align_mask.view(-1)
# (bsz x max_num_selected,): [1 xrepeat_freqs[0],0 x(M-repeat_freqs[0]),1 xrepeat_freqs[1],0 x(M-repeat_freqs[1]),...]
left_align_mask = left_align_mask.repeat_interleave(repeat_freqs)
# (bsz, max_num_selected)
left_align_mask = left_align_mask.view(-1, max_num_selected)
# reshape to (bsz, max_num_selected, *)
input_reshape = torch.Tensor(left_align_mask.size() + input_t.size()[2:]).to(input_t.device, input_t.dtype).fill_(pad_idx)
input_reshape[left_align_mask] = input_t[selected]
# (bsz, max_num_selected, *); (bsz, max_num_selected)
return input_reshape, left_align_mask
|
BLINK-main
|
elq/biencoder/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import argparse
import faiss
import pickle
import torch
import json
import sys
import io
import random
import time
import traceback
import numpy as np
from scipy.special import softmax, expit
import torch.nn.functional as F
from multiprocessing.pool import ThreadPool
from tqdm import tqdm, trange
from collections import OrderedDict
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from pytorch_transformers.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from pytorch_transformers.optimization import WarmupLinearSchedule
from pytorch_transformers.tokenization_bert import BertTokenizer
from elq.biencoder.biencoder import BiEncoderRanker
from elq.vcg_utils.measures import entity_linking_tp_with_overlap
import logging
import elq.candidate_ranking.utils as utils
from elq.biencoder.data_process import process_mention_data
from blink.biencoder.zeshel_utils import DOC_PATH, WORLDS, world_to_id
from blink.common.optimizer import get_bert_optimizer
from elq.common.params import ElqParser
from elq.index.faiss_indexer import DenseFlatIndexer, DenseHNSWFlatIndexer, DenseIVFFlatIndexer
logger = None
np.random.seed(1234) # reproducible for FAISS indexer
def evaluate(
reranker, eval_dataloader, params, device, logger,
cand_encs=None, faiss_index=None,
get_losses=False,
):
reranker.model.eval()
if params["silent"]:
iter_ = eval_dataloader
else:
iter_ = tqdm(eval_dataloader, desc="Evaluation")
results = {}
eval_num_correct = 0.0
eval_num_p = 0.0
eval_num_g = 0.0
nb_eval_examples = 0
nb_eval_steps = 0
overall_loss = 0.0
if cand_encs is not None and not params["freeze_cand_enc"]:
torch.cuda.empty_cache()
cand_encs = cand_encs.to(device)
for step, batch in enumerate(iter_):
batch = tuple(t.to(device) for t in batch)
context_input = batch[0]
candidate_input = batch[1]
# (bs, num_actual_spans)
label_ids = batch[2].cpu().numpy() if params["freeze_cand_enc"] else None
if params["debug"] and label_ids is not None:
label_ids[label_ids > 199] = 199
mention_idx = batch[-2].cpu().numpy()
mention_idx_mask = batch[-1].cpu().numpy()
with torch.no_grad():
# evaluate with joint mention detection
if params["freeze_cand_enc"]:
context_outs = reranker.encode_context(
context_input,
num_cand_mentions=50,
topK_threshold=-3.5,
)
embedding_context = context_outs['mention_reps'].cpu().numpy()
pred_mention_mask = context_outs['mention_masks'].cpu().numpy()
chosen_mention_bounds = context_outs['mention_bounds'].cpu().numpy()
embedding_ctxt = embedding_context[pred_mention_mask]
# do faiss search for closest entity
# DIM (all_pred_mentions_batch, num_cand_entities); (all_pred_mentions_batch, num_cand_entities)
top_cand_logits_shape, top_cand_indices_shape = faiss_index.search_knn(embedding_ctxt, 10)
top_cand_logits = np.zeros((pred_mention_mask.shape[0], pred_mention_mask.shape[1], 10), dtype=np.float)
top_cand_indices = np.zeros_like(pred_mention_mask, dtype=np.int)
top_cand_logits[pred_mention_mask] = top_cand_logits_shape
top_cand_indices[pred_mention_mask] = top_cand_indices_shape[:,0]
scores = (np.log(softmax(top_cand_logits, -1)) + torch.sigmoid(context_outs['mention_logits'].unsqueeze(-1)).log().cpu().numpy())[:,:,0]
tmp_num_correct = 0.0
tmp_num_p = 0.0
tmp_num_g = 0.0
for i, ex in enumerate(top_cand_indices):
gold_mb = mention_idx[i][mention_idx_mask[i]]
gold_label_ids = label_ids[i][mention_idx_mask[i]]
overall_score_mask = scores[i][pred_mention_mask[i]] > -2.5
pred_mb = chosen_mention_bounds[i][pred_mention_mask[i]][overall_score_mask]
pred_label_ids = ex[pred_mention_mask[i]][overall_score_mask]
gold_triples = [(str(gold_label_ids[j]), gold_mb[j][0], gold_mb[j][1]) for j in range(len(gold_mb))]
pred_triples = [(str(pred_label_ids[j]), pred_mb[j][0], pred_mb[j][1]) for j in range(len(pred_mb))]
num_overlap_weak, _ = entity_linking_tp_with_overlap(gold_triples, pred_triples)
tmp_num_correct += num_overlap_weak
tmp_num_p += float(len(pred_triples))
tmp_num_g += float(len(gold_triples))
text_encs = embedding_context
else:
loss, logits, mention_logits, mention_bounds = reranker(
context_input, candidate_input,
cand_encs=cand_encs,
gold_mention_bounds=batch[-2],
gold_mention_bounds_mask=batch[-1],
return_loss=True,
)
logits = logits.cpu().numpy()
# Using in-batch negatives, the label ids are diagonal
label_ids = torch.LongTensor(torch.arange(logits.shape[0]))
label_ids = label_ids.cpu().numpy()
tmp_num_correct = utils.accuracy(logits, label_ids)
tmp_num_p = len(batch[-2][batch[-1]])
tmp_num_g = len(batch[-2][batch[-1]])
overall_loss += loss
eval_num_correct += tmp_num_correct
eval_num_p += tmp_num_p
eval_num_g += tmp_num_g
nb_eval_steps += 1
if cand_encs is not None:
cand_encs = cand_encs.to("cpu")
torch.cuda.empty_cache()
if nb_eval_steps > 0 and overall_loss > 0:
normalized_overall_loss = overall_loss / nb_eval_steps
logger.info("Overall loss: %.5f" % normalized_overall_loss)
if eval_num_p > 0:
normalized_eval_p = eval_num_correct / eval_num_p
else:
normalized_eval_p = 0.0
if eval_num_g > 0:
normalized_eval_r = eval_num_correct / eval_num_g
else:
normalized_eval_r = 0.0
logger.info("Precision: %.5f" % normalized_eval_p)
logger.info("Recall: %.5f" % normalized_eval_r)
if normalized_eval_p + normalized_eval_r == 0:
f1 = 0
else:
f1 = 2 * normalized_eval_p * normalized_eval_r / (normalized_eval_p + normalized_eval_r)
logger.info("F1: %.5f" % f1)
results["normalized_f1"] = f1
return results
def get_optimizer(model, params):
return get_bert_optimizer(
[model],
params["type_optimization"],
params["learning_rate"],
fp16=params.get("fp16"),
)
def get_scheduler(params, optimizer, len_train_data, logger):
batch_size = params["train_batch_size"]
grad_acc = params["gradient_accumulation_steps"]
epochs = params["num_train_epochs"]
num_train_steps = int(len_train_data / batch_size / grad_acc) * epochs
num_warmup_steps = int(num_train_steps * params["warmup_proportion"])
scheduler = WarmupLinearSchedule(
optimizer, warmup_steps=num_warmup_steps, t_total=num_train_steps,
)
logger.info(" Num optimization steps = %d" % num_train_steps)
logger.info(" Num warmup steps = %d", num_warmup_steps)
return scheduler
def main(params):
model_output_path = params["output_path"]
if not os.path.exists(model_output_path):
os.makedirs(model_output_path)
logger = utils.get_logger(params["output_path"])
# Init model
reranker = BiEncoderRanker(params)
tokenizer = reranker.tokenizer
model = reranker.model
device = reranker.device
n_gpu = reranker.n_gpu
if params["gradient_accumulation_steps"] < 1:
raise ValueError(
"Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
params["gradient_accumulation_steps"]
)
)
# An effective batch size of `x`, when we are accumulating the gradient accross `y` batches will be achieved by having a batch size of `z = x / y`
params["train_batch_size"] = (
params["train_batch_size"] // params["gradient_accumulation_steps"]
)
train_batch_size = params["train_batch_size"]
eval_batch_size = params["eval_batch_size"]
grad_acc_steps = params["gradient_accumulation_steps"]
# Fix the random seeds
seed = params["seed"]
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if reranker.n_gpu > 0:
torch.cuda.manual_seed_all(seed)
# Load train data
train_samples = utils.read_dataset("train", params["data_path"])
logger.info("Read %d train samples." % len(train_samples))
logger.info("Finished reading all train samples")
# Load eval data
try:
valid_samples = utils.read_dataset("valid", params["data_path"])
except FileNotFoundError:
valid_samples = utils.read_dataset("dev", params["data_path"])
# MUST BE DIVISBLE BY n_gpus
if len(valid_samples) > 1024:
valid_subset = 1024
else:
valid_subset = len(valid_samples) - len(valid_samples) % torch.cuda.device_count()
logger.info("Read %d valid samples, choosing %d subset" % (len(valid_samples), valid_subset))
valid_data, valid_tensor_data, extra_ret_values = process_mention_data(
samples=valid_samples[:valid_subset], # use subset of valid data
tokenizer=tokenizer,
max_context_length=params["max_context_length"],
max_cand_length=params["max_cand_length"],
context_key=params["context_key"],
title_key=params["title_key"],
silent=params["silent"],
logger=logger,
debug=params["debug"],
add_mention_bounds=(not args.no_mention_bounds),
candidate_token_ids=None,
params=params,
)
candidate_token_ids = extra_ret_values["candidate_token_ids"]
valid_tensor_data = TensorDataset(*valid_tensor_data)
valid_sampler = SequentialSampler(valid_tensor_data)
valid_dataloader = DataLoader(
valid_tensor_data, sampler=valid_sampler, batch_size=eval_batch_size
)
# load candidate encodings
cand_encs = None
cand_encs_index = None
if params["freeze_cand_enc"]:
cand_encs = torch.load(params['cand_enc_path'])
logger.info("Loaded saved entity encodings")
if params["debug"]:
cand_encs = cand_encs[:200]
# build FAISS index
cand_encs_index = DenseHNSWFlatIndexer(1)
cand_encs_index.deserialize_from(params['index_path'])
logger.info("Loaded FAISS index on entity encodings")
num_neighbors = 10
# evaluate before training
results = evaluate(
reranker, valid_dataloader, params,
cand_encs=cand_encs, device=device,
logger=logger, faiss_index=cand_encs_index,
)
number_of_samples_per_dataset = {}
time_start = time.time()
utils.write_to_file(
os.path.join(model_output_path, "training_params.txt"), str(params)
)
logger.info("Starting training")
logger.info(
"device: {} n_gpu: {}, distributed training: {}".format(device, n_gpu, False)
)
num_train_epochs = params["num_train_epochs"]
if params["dont_distribute_train_samples"]:
num_samples_per_batch = len(train_samples)
train_data, train_tensor_data_tuple, extra_ret_values = process_mention_data(
samples=train_samples,
tokenizer=tokenizer,
max_context_length=params["max_context_length"],
max_cand_length=params["max_cand_length"],
context_key=params["context_key"],
title_key=params["title_key"],
silent=params["silent"],
logger=logger,
debug=params["debug"],
add_mention_bounds=(not args.no_mention_bounds),
candidate_token_ids=candidate_token_ids,
params=params,
)
logger.info("Finished preparing training data")
else:
num_samples_per_batch = len(train_samples) // num_train_epochs
trainer_path = params.get("path_to_trainer_state", None)
optimizer = get_optimizer(model, params)
scheduler = get_scheduler(
params, optimizer, num_samples_per_batch,
logger
)
if trainer_path is not None and os.path.exists(trainer_path):
training_state = torch.load(trainer_path)
optimizer.load_state_dict(training_state["optimizer"])
scheduler.load_state_dict(training_state["scheduler"])
logger.info("Loaded saved training state")
model.train()
best_epoch_idx = -1
best_score = -1
logger.info("Num samples per batch : %d" % num_samples_per_batch)
for epoch_idx in trange(params["last_epoch"] + 1, int(num_train_epochs), desc="Epoch"):
tr_loss = 0
results = None
if not params["dont_distribute_train_samples"]:
start_idx = epoch_idx * num_samples_per_batch
end_idx = (epoch_idx + 1) * num_samples_per_batch
train_data, train_tensor_data_tuple, extra_ret_values = process_mention_data(
samples=train_samples[start_idx:end_idx],
tokenizer=tokenizer,
max_context_length=params["max_context_length"],
max_cand_length=params["max_cand_length"],
context_key=params["context_key"],
title_key=params["title_key"],
silent=params["silent"],
logger=logger,
debug=params["debug"],
add_mention_bounds=(not args.no_mention_bounds),
candidate_token_ids=candidate_token_ids,
params=params,
)
logger.info("Finished preparing training data for epoch {}: {} samples".format(epoch_idx, len(train_tensor_data_tuple[0])))
batch_train_tensor_data = TensorDataset(
*list(train_tensor_data_tuple)
)
if params["shuffle"]:
train_sampler = RandomSampler(batch_train_tensor_data)
else:
train_sampler = SequentialSampler(batch_train_tensor_data)
train_dataloader = DataLoader(
batch_train_tensor_data, sampler=train_sampler, batch_size=train_batch_size
)
if params["silent"]:
iter_ = train_dataloader
else:
iter_ = tqdm(train_dataloader, desc="Batch")
for step, batch in enumerate(iter_):
batch = tuple(t.to(device) for t in batch)
context_input = batch[0]
candidate_input = batch[1]
label_ids = batch[2] if params["freeze_cand_enc"] else None
mention_idxs = batch[-2]
mention_idx_mask = batch[-1]
if params["debug"] and label_ids is not None:
label_ids[label_ids > 199] = 199
cand_encs_input = None
label_input = None
mention_reps_input = None
mention_logits = None
mention_bounds = None
hard_negs_mask = None
if params["adversarial_training"]:
assert cand_encs is not None and label_ids is not None # due to params["freeze_cand_enc"] being set
'''
GET CLOSEST N CANDIDATES (AND APPROPRIATE LABELS)
'''
# (bs, num_spans, embed_size)
pos_cand_encs_input = cand_encs[label_ids.to("cpu")]
pos_cand_encs_input[~mention_idx_mask] = 0
context_outs = reranker.encode_context(
context_input, gold_mention_bounds=mention_idxs,
gold_mention_bounds_mask=mention_idx_mask,
get_mention_scores=True,
)
mention_logits = context_outs['all_mention_logits']
mention_bounds = context_outs['all_mention_bounds']
mention_reps = context_outs['mention_reps']
# mention_reps: (bs, max_num_spans, embed_size) -> masked_mention_reps: (all_pred_mentions_batch, embed_size)
masked_mention_reps = mention_reps[context_outs['mention_masks']]
# neg_cand_encs_input_idxs: (all_pred_mentions_batch, num_negatives)
_, neg_cand_encs_input_idxs = cand_encs_index.search_knn(masked_mention_reps.detach().cpu().numpy(), num_neighbors)
neg_cand_encs_input_idxs = torch.from_numpy(neg_cand_encs_input_idxs)
# set "correct" closest entities to -1
# masked_label_ids: (all_pred_mentions_batch)
masked_label_ids = label_ids[mention_idx_mask]
# neg_cand_encs_input_idxs: (max_spans_in_batch, num_negatives)
neg_cand_encs_input_idxs[neg_cand_encs_input_idxs - masked_label_ids.to("cpu").unsqueeze(-1) == 0] = -1
# reshape back tensor (extract num_spans dimension)
# (bs, num_spans, num_negatives)
neg_cand_encs_input_idxs_reconstruct = torch.zeros(label_ids.size(0), label_ids.size(1), neg_cand_encs_input_idxs.size(-1), dtype=neg_cand_encs_input_idxs.dtype)
neg_cand_encs_input_idxs_reconstruct[mention_idx_mask] = neg_cand_encs_input_idxs
neg_cand_encs_input_idxs = neg_cand_encs_input_idxs_reconstruct
# create neg_example_idx (corresponding example (in batch) for each negative)
# neg_example_idx: (bs * num_negatives)
neg_example_idx = torch.arange(neg_cand_encs_input_idxs.size(0)).unsqueeze(-1)
neg_example_idx = neg_example_idx.expand(neg_cand_encs_input_idxs.size(0), neg_cand_encs_input_idxs.size(2))
neg_example_idx = neg_example_idx.flatten()
# flatten and filter -1 (i.e. any correct/positive entities)
# neg_cand_encs_input_idxs: (bs * num_negatives, num_spans)
neg_cand_encs_input_idxs = neg_cand_encs_input_idxs.permute(0,2,1)
neg_cand_encs_input_idxs = neg_cand_encs_input_idxs.reshape(-1, neg_cand_encs_input_idxs.size(-1))
# mask invalid negatives (actually the positive example)
# (bs * num_negatives)
mask = ~((neg_cand_encs_input_idxs == -1).sum(1).bool()) # rows without any -1 entry
# deletes corresponding negative for *all* spans in that example (deletes at most 3 of 10 negatives / example)
# neg_cand_encs_input_idxs: (bs * num_negatives - invalid_negs, num_spans)
neg_cand_encs_input_idxs = neg_cand_encs_input_idxs[mask]
# neg_cand_encs_input_idxs: (bs * num_negatives - invalid_negs)
neg_example_idx = neg_example_idx[mask]
# (bs * num_negatives - invalid_negs, num_spans, embed_size)
neg_cand_encs_input = cand_encs[neg_cand_encs_input_idxs]
# (bs * num_negatives - invalid_negs, num_spans, embed_size)
neg_mention_idx_mask = mention_idx_mask[neg_example_idx]
neg_cand_encs_input[~neg_mention_idx_mask] = 0
# create input tensors (concat [pos examples, neg examples])
# (bs + bs * num_negatives, num_spans, embed_size)
mention_reps_input = torch.cat([
mention_reps, mention_reps[neg_example_idx.to(device)],
])
assert mention_reps.size(0) == pos_cand_encs_input.size(0)
# (bs + bs * num_negatives, num_spans)
label_input = torch.cat([
torch.ones(pos_cand_encs_input.size(0), pos_cand_encs_input.size(1), dtype=label_ids.dtype),
torch.zeros(neg_cand_encs_input.size(0), neg_cand_encs_input.size(1), dtype=label_ids.dtype),
]).to(device)
# (bs + bs * num_negatives, num_spans, embed_size)
cand_encs_input = torch.cat([
pos_cand_encs_input, neg_cand_encs_input,
]).to(device)
hard_negs_mask = torch.cat([mention_idx_mask, neg_mention_idx_mask])
loss, _, _, _ = reranker(
context_input, candidate_input,
cand_encs=cand_encs_input, text_encs=mention_reps_input,
mention_logits=mention_logits, mention_bounds=mention_bounds,
label_input=label_input, gold_mention_bounds=mention_idxs,
gold_mention_bounds_mask=mention_idx_mask,
hard_negs_mask=hard_negs_mask,
return_loss=True,
)
if grad_acc_steps > 1:
loss = loss / grad_acc_steps
tr_loss += loss.item()
if (step + 1) % (params["print_interval"] * grad_acc_steps) == 0:
logger.info(
"Step {} - epoch {} average loss: {}\n".format(
step,
epoch_idx,
tr_loss / (params["print_interval"] * grad_acc_steps),
)
)
tr_loss = 0
loss.backward()
if (step + 1) % grad_acc_steps == 0:
torch.nn.utils.clip_grad_norm_(
model.parameters(), params["max_grad_norm"]
)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
if (step + 1) % (params["eval_interval"] * grad_acc_steps) == 0:
logger.info("Evaluation on the development dataset")
loss = None # for GPU mem management
mention_reps = None
mention_reps_input = None
label_input = None
cand_encs_input = None
evaluate(
reranker, valid_dataloader, params,
cand_encs=cand_encs, device=device,
logger=logger, faiss_index=cand_encs_index,
get_losses=params["get_losses"],
)
model.train()
logger.info("\n")
logger.info("***** Saving fine - tuned model *****")
epoch_output_folder_path = os.path.join(
model_output_path, "epoch_{}".format(epoch_idx)
)
utils.save_model(model, tokenizer, epoch_output_folder_path)
torch.save({
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
}, os.path.join(epoch_output_folder_path, "training_state.th"))
output_eval_file = os.path.join(epoch_output_folder_path, "eval_results.txt")
logger.info("Valid data evaluation")
results = evaluate(
reranker, valid_dataloader, params,
cand_encs=cand_encs, device=device,
logger=logger, faiss_index=cand_encs_index,
get_losses=params["get_losses"],
)
logger.info("Train data evaluation")
results = evaluate(
reranker, train_dataloader, params,
cand_encs=cand_encs, device=device,
logger=logger, faiss_index=cand_encs_index,
get_losses=params["get_losses"],
)
ls = [best_score, results["normalized_f1"]]
li = [best_epoch_idx, epoch_idx]
best_score = ls[np.argmax(ls)]
best_epoch_idx = li[np.argmax(ls)]
logger.info("\n")
execution_time = (time.time() - time_start) / 60
utils.write_to_file(
os.path.join(model_output_path, "training_time.txt"),
"The training took {} minutes\n".format(execution_time),
)
logger.info("The training took {} minutes\n".format(execution_time))
# save the best model in the parent_dir
logger.info("Best performance in epoch: {}".format(best_epoch_idx))
params["path_to_model"] = os.path.join(
model_output_path, "epoch_{}".format(best_epoch_idx)
)
utils.save_model(reranker.model, tokenizer, model_output_path)
if params["evaluate"]:
params["path_to_model"] = model_output_path
evaluate(params, cand_encs=cand_encs, logger=logger, faiss_index=cand_encs_index)
if __name__ == "__main__":
parser = ElqParser(add_model_args=True)
parser.add_training_args()
args = parser.parse_args()
print(args)
params = args.__dict__
main(params)
|
BLINK-main
|
elq/biencoder/train_biencoder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
from collections import OrderedDict
from pytorch_transformers.modeling_bert import (
BertPreTrainedModel,
BertConfig,
BertModel,
)
from pytorch_transformers.tokenization_bert import BertTokenizer
from elq.common.ranker_base import BertEncoder, get_model_obj
from blink.common.optimizer import get_bert_optimizer
from elq.biencoder.allennlp_span_utils import batched_span_select, batched_index_select
from elq.biencoder.utils import batch_reshape_mask_left
def load_biencoder(params):
# Init model
biencoder = BiEncoderRanker(params)
return biencoder
def get_submodel_from_state_dict(state_dict, prefix):
# get only submodel specified with prefix 'prefix' from the state_dict
new_state_dict = OrderedDict()
for key, value in state_dict.items():
if key.startswith(prefix):
key = key[len(prefix)+1:] # +1 for '.'
new_state_dict[key] = value
return new_state_dict
class MentionScoresHead(nn.Module):
def __init__(
self, bert_output_dim, scoring_method="qa_linear", max_mention_length=10,
):
super(MentionScoresHead, self).__init__()
self.scoring_method = scoring_method
self.max_mention_length = max_mention_length
if self.scoring_method == "qa_linear":
self.bound_classifier = nn.Linear(bert_output_dim, 3)
elif self.scoring_method == "qa_mlp" or self.scoring_method == "qa": # for back-compatibility
self.bound_classifier = nn.Sequential(
nn.Linear(bert_output_dim, bert_output_dim),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(bert_output_dim, 3),
)
else:
raise NotImplementedError()
def forward(self, bert_output, mask_ctxt):
'''
Retuns scores for *inclusive* mention boundaries
'''
# (bs, seqlen, 3)
logits = self.bound_classifier(bert_output)
if self.scoring_method[:2] == "qa":
# (bs, seqlen, 1); (bs, seqlen, 1); (bs, seqlen, 1)
start_logprobs, end_logprobs, mention_logprobs = logits.split(1, dim=-1)
# (bs, seqlen)
start_logprobs = start_logprobs.squeeze(-1)
end_logprobs = end_logprobs.squeeze(-1)
mention_logprobs = mention_logprobs.squeeze(-1)
# impossible to choose masked tokens as starts/ends of spans
start_logprobs[~mask_ctxt] = -float("Inf")
end_logprobs[~mask_ctxt] = -float("Inf")
mention_logprobs[~mask_ctxt] = -float("Inf")
# take sum of log softmaxes:
# log p(mention) = log p(start_pos && end_pos) = log p(start_pos) + log p(end_pos)
# DIM: (bs, starts, ends)
mention_scores = start_logprobs.unsqueeze(2) + end_logprobs.unsqueeze(1)
# (bs, starts, ends)
mention_cum_scores = torch.zeros(mention_scores.size(), dtype=mention_scores.dtype).to(mention_scores.device)
# add ends
mention_logprobs_end_cumsum = torch.zeros(mask_ctxt.size(0), dtype=mention_scores.dtype).to(mention_scores.device)
for i in range(mask_ctxt.size(1)):
mention_logprobs_end_cumsum += mention_logprobs[:,i]
mention_cum_scores[:,:,i] += mention_logprobs_end_cumsum.unsqueeze(-1)
# subtract starts
mention_logprobs_start_cumsum = torch.zeros(mask_ctxt.size(0), dtype=mention_scores.dtype).to(mention_scores.device)
for i in range(mask_ctxt.size(1)-1):
mention_logprobs_start_cumsum += mention_logprobs[:,i]
mention_cum_scores[:,(i+1),:] -= mention_logprobs_start_cumsum.unsqueeze(-1)
# DIM: (bs, starts, ends)
mention_scores += mention_cum_scores
# DIM: (starts, ends, 2) -- tuples of [start_idx, end_idx]
mention_bounds = torch.stack([
torch.arange(mention_scores.size(1)).unsqueeze(-1).expand(mention_scores.size(1), mention_scores.size(2)), # start idxs
torch.arange(mention_scores.size(1)).unsqueeze(0).expand(mention_scores.size(1), mention_scores.size(2)), # end idxs
], dim=-1).to(mask_ctxt.device)
# DIM: (starts, ends)
mention_sizes = mention_bounds[:,:,1] - mention_bounds[:,:,0] + 1 # (+1 as ends are inclusive)
# Remove invalids (startpos > endpos, endpos > seqlen) and renormalize
# DIM: (bs, starts, ends)
valid_mask = (mention_sizes.unsqueeze(0) > 0) & mask_ctxt.unsqueeze(1)
# DIM: (bs, starts, ends)
mention_scores[~valid_mask] = -float("inf") # invalids have logprob=-inf (p=0)
# DIM: (bs, starts * ends)
mention_scores = mention_scores.view(mention_scores.size(0), -1)
# DIM: (bs, starts * ends, 2)
mention_bounds = mention_bounds.view(-1, 2)
mention_bounds = mention_bounds.unsqueeze(0).expand(mention_scores.size(0), mention_scores.size(1), 2)
if self.max_mention_length is not None:
mention_scores, mention_bounds = self.filter_by_mention_size(
mention_scores, mention_bounds, self.max_mention_length,
)
return mention_scores, mention_bounds
def filter_by_mention_size(self, mention_scores, mention_bounds, max_mention_length):
'''
Filter all mentions > maximum mention length
mention_scores: torch.FloatTensor (bsz, num_mentions)
mention_bounds: torch.LongTensor (bsz, num_mentions, 2)
'''
# (bsz, num_mentions)
mention_bounds_mask = (mention_bounds[:,:,1] - mention_bounds[:,:,0] <= max_mention_length)
# (bsz, num_filtered_mentions)
mention_scores = mention_scores[mention_bounds_mask]
mention_scores = mention_scores.view(mention_bounds_mask.size(0),-1)
# (bsz, num_filtered_mentions, 2)
mention_bounds = mention_bounds[mention_bounds_mask]
mention_bounds = mention_bounds.view(mention_bounds_mask.size(0),-1,2)
return mention_scores, mention_bounds
class GetContextEmbedsHead(nn.Module):
def __init__(self, mention_aggregation_type, ctxt_output_dim, cand_output_dim, dropout=0.1):
"""
mention_aggregation_type
`all_avg`: average across tokens in mention
`fl_avg`: to average across first/last tokens in mention
`{all/fl}_linear`: for linear layer over mention
`{all/fl}_mlp` to MLP over mention
"""
super(GetContextEmbedsHead, self).__init__()
# for aggregating mention outputs of context encoder
self.mention_aggregation_type = mention_aggregation_type.split('_')
self.tokens_to_aggregate = self.mention_aggregation_type[0]
self.aggregate_method = "_".join(self.mention_aggregation_type[1:])
self.dropout = nn.Dropout(dropout)
if self.mention_aggregation_type == 'all_avg' or self.mention_aggregation_type == 'none':
assert ctxt_output_dim == cand_output_dim
if self.aggregate_method == 'linear':
self.mention_agg_linear = nn.Linear(ctxt_output_dim * 2, cand_output_dim)
elif self.aggregate_method == 'avg_linear':
self.mention_agg_linear = nn.Linear(ctxt_output_dim, cand_output_dim)
elif self.aggregate_method == 'mlp':
self.mention_agg_mlp = nn.Sequential(
nn.Linear(bert_output_dim, bert_output_dim),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(bert_output_dim, output_dim),
)
else:
self.mention_agg_mlp = None
def forward(self, bert_output, mention_bounds):
'''
bert_output
(bs, seqlen, embed_dim)
mention_bounds: both bounds are inclusive [start, end]
(bs, num_spans, 2)
'''
# get embedding of [CLS] token
if mention_bounds.size(0) == 0:
return mention_bounds
if self.tokens_to_aggregate == 'all':
(
embedding_ctxt, # (batch_size, num_spans, max_batch_span_width, embedding_size)
mask, # (batch_size, num_spans, max_batch_span_width)
) = batched_span_select(
bert_output, # (batch_size, sequence_length, embedding_size)
mention_bounds, # (batch_size, num_spans, 2)
)
embedding_ctxt[~mask] = 0 # 0 out masked elements
# embedding_ctxt = (batch_size, num_spans, max_batch_span_width, embedding_size)
if self.aggregate_method.startswith('avg'):
embedding_ctxt = embedding_ctxt.sum(2) / mask.sum(2).float().unsqueeze(-1)
# embedding_ctxt = (batch_size, num_spans, embedding_size)
if self.aggregate_method == 'avg_linear':
embedding_ctxt = self.mention_agg_linear(embedding_ctxt)
# embedding_ctxt = (batch_size, num_spans, output_dim)
elif self.tokens_to_aggregate == 'fl':
start_embeddings = batched_index_select(bert_output, mention_bounds[:,:,0])
end_embeddings = batched_index_select(bert_output, mention_bounds[:,:,1])
embedding_ctxt = torch.cat([start_embeddings.unsqueeze(2), end_embeddings.unsqueeze(2)], dim=2)
# embedding_ctxt = (batch_size, num_spans, 2, embedding_size)
if self.aggregate_method == 'avg':
embedding_ctxt = embedding_ctxt.mean(2)
# embedding_ctxt = (batch_size, num_spans, embedding_size)
elif self.aggregate_method == 'linear':
embedding_ctxt = embedding_ctxt.view(embedding_ctxt.size(0), embedding_ctxt.size(1), -1)
# embedding_ctxt = (batch_size, num_spans, 2 * embedding_size)
embedding_ctxt = self.mention_agg_linear(embedding_ctxt)
# embedding_ctxt = (batch_size, num_spans, output_dim)
else:
raise NotImplementedError()
return embedding_ctxt
class BiEncoderModule(torch.nn.Module):
def __init__(self, params):
super(BiEncoderModule, self).__init__()
ctxt_bert = BertModel.from_pretrained(params["bert_model"], output_hidden_states=True)
if params["load_cand_enc_only"]:
bert_model = "bert-large-uncased"
else:
bert_model = params['bert_model']
cand_bert = BertModel.from_pretrained(
bert_model,
output_hidden_states=True,
)
self.context_encoder = BertEncoder(
ctxt_bert,
params["out_dim"],
layer_pulled=params["pull_from_layer"],
add_linear=params["add_linear"],
)
self.cand_encoder = BertEncoder(
cand_bert,
params["out_dim"],
layer_pulled=params["pull_from_layer"],
add_linear=params["add_linear"],
)
if params.get("freeze_cand_enc", False):
for param in self.cand_encoder.parameters():
param.requires_grad = False
self.config = ctxt_bert.config
ctxt_bert_output_dim = ctxt_bert.embeddings.word_embeddings.weight.size(1)
self.mention_aggregation_type = params.get('mention_aggregation_type', None)
self.classification_heads = nn.ModuleDict({})
self.linear_compression = None
if self.mention_aggregation_type is not None:
classification_heads_dict = {'get_context_embeds': GetContextEmbedsHead(
self.mention_aggregation_type,
ctxt_bert_output_dim,
cand_bert.embeddings.word_embeddings.weight.size(1),
)}
classification_heads_dict['mention_scores'] = MentionScoresHead(
ctxt_bert_output_dim,
params["mention_scoring_method"],
params.get("max_mention_length", 10),
)
self.classification_heads = nn.ModuleDict(classification_heads_dict)
elif ctxt_bert_output_dim != cand_bert.embeddings.word_embeddings.weight.size(1):
# mapping to make the output dimensions match for dot-product similarity
self.linear_compression = nn.Linear(ctxt_bert_output_dim, cand_bert.embeddings.word_embeddings.weight.size(1))
def get_raw_ctxt_encoding(
self,
token_idx_ctxt,
segment_idx_ctxt,
mask_ctxt,
):
"""
Gets raw, shared context embeddings from BERT,
to be used by both mention detector and entity linker
Returns:
torch.FloatTensor (bsz, seqlen, embed_dim)
"""
raw_ctxt_encoding, _, _ = self.context_encoder.bert_model(
token_idx_ctxt, segment_idx_ctxt, mask_ctxt,
)
return raw_ctxt_encoding
def get_ctxt_mention_scores(
self,
token_idx_ctxt,
segment_idx_ctxt,
mask_ctxt,
raw_ctxt_encoding = None,
):
"""
Gets mention scores using raw context encodings
Inputs:
raw_ctxt_encoding: torch.FloatTensor (bsz, seqlen, embed_dim)
Returns:
torch.FloatTensor (bsz, num_total_mentions): mention scores/logits
torch.IntTensor (bsz, num_total_mentions): mention boundaries
"""
# (bsz, seqlen, embed_dim)
if raw_ctxt_encoding is None:
raw_ctxt_encoding = self.get_raw_ctxt_encoding(
token_idx_ctxt, segment_idx_ctxt, mask_ctxt,
)
# (num_total_mentions,); (num_total_mentions,)
return self.classification_heads['mention_scores'](
raw_ctxt_encoding, mask_ctxt,
)
def prune_ctxt_mentions(
self,
mention_logits,
mention_bounds,
num_cand_mentions,
threshold,
):
'''
Prunes mentions based on mention scores/logits (by either
`threshold` or `num_cand_mentions`, whichever yields less candidates)
Inputs:
mention_logits: torch.FloatTensor (bsz, num_total_mentions)
mention_bounds: torch.IntTensor (bsz, num_total_mentions)
num_cand_mentions: int
threshold: float
Returns:
torch.FloatTensor(bsz, max_num_pred_mentions): top mention scores/logits
torch.IntTensor(bsz, max_num_pred_mentions, 2): top mention boundaries
torch.BoolTensor(bsz, max_num_pred_mentions): mask on top mentions
torch.BoolTensor(bsz, total_possible_mentions): mask for reshaping from total possible mentions -> max # pred mentions
'''
# (bsz, num_cand_mentions); (bsz, num_cand_mentions)
top_mention_logits, mention_pos = mention_logits.topk(num_cand_mentions, sorted=True)
# (bsz, num_cand_mentions, 2)
# [:,:,0]: index of batch
# [:,:,1]: index into top mention in mention_bounds
mention_pos = torch.stack([torch.arange(mention_pos.size(0)).to(mention_pos.device).unsqueeze(-1).expand_as(mention_pos), mention_pos], dim=-1)
# (bsz, num_cand_mentions)
top_mention_pos_mask = torch.sigmoid(top_mention_logits).log() > threshold
# (total_possible_mentions, 2)
# tuples of [index of batch, index into mention_bounds] of what mentions to include
mention_pos = mention_pos[top_mention_pos_mask | (
# 2nd part of OR: if nothing is > threshold, use topK that are > -inf
((top_mention_pos_mask.sum(1) == 0).unsqueeze(-1)) & (top_mention_logits > -float("inf"))
)]
mention_pos = mention_pos.view(-1, 2)
# (bsz, total_possible_mentions)
# mask of possible logits
mention_pos_mask = torch.zeros(mention_logits.size(), dtype=torch.bool).to(mention_pos.device)
mention_pos_mask[mention_pos[:,0], mention_pos[:,1]] = 1
# (bsz, max_num_pred_mentions, 2)
chosen_mention_bounds, chosen_mention_mask = batch_reshape_mask_left(mention_bounds, mention_pos_mask, pad_idx=0)
# (bsz, max_num_pred_mentions)
chosen_mention_logits, _ = batch_reshape_mask_left(mention_logits, mention_pos_mask, pad_idx=-float("inf"), left_align_mask=chosen_mention_mask)
return chosen_mention_logits, chosen_mention_bounds, chosen_mention_mask, mention_pos_mask
def get_ctxt_embeds(
self,
raw_ctxt_encoding,
mention_bounds,
):
"""
Get candidate scores + embeddings associated with passed-in mention_bounds
Input
raw_ctxt_encoding: torch.FloatTensor (bsz, seqlen, embed_dim)
shared embeddings straight from BERT
mention_bounds: torch.IntTensor (bsz, max_num_pred_mentions, 2)
top mention boundaries
Returns
torch.FloatTensor (bsz, max_num_pred_mentions, embed_dim)
"""
# (bs, max_num_pred_mentions, embed_dim)
embedding_ctxt = self.classification_heads['get_context_embeds'](raw_ctxt_encoding, mention_bounds)
if self.linear_compression is not None:
embedding_ctxt = self.linear_compression(embedding_ctxt)
return embedding_ctxt
def forward_ctxt(
self,
token_idx_ctxt,
segment_idx_ctxt,
mask_ctxt,
gold_mention_bounds=None,
gold_mention_bounds_mask=None,
num_cand_mentions=50,
topK_threshold=-4.5,
get_mention_scores=True,
):
"""
If gold_mention_bounds is set, returns mention embeddings of passed-in mention bounds
Otherwise, uses top-scoring mentions
"""
if self.mention_aggregation_type is None:
'''
OLD system: don't do mention aggregation (use tokens around mention)
'''
embedding_ctxt = self.context_encoder(
token_idx_ctxt, segment_idx_ctxt, mask_ctxt,
)
# linear mapping to correct context length
if self.linear_compression is not None:
embedding_ctxt = self.linear_compression(embedding_ctxt)
return embedding_ctxt, None, None, None
else:
'''
NEW system: aggregate mention tokens
'''
# (bs, seqlen, embed_size)
raw_ctxt_encoding = self.get_raw_ctxt_encoding(
token_idx_ctxt, segment_idx_ctxt, mask_ctxt,
)
top_mention_bounds = None
top_mention_logits = None
extra_rets = {}
if get_mention_scores:
mention_logits, mention_bounds = self.get_ctxt_mention_scores(
token_idx_ctxt, segment_idx_ctxt, mask_ctxt, raw_ctxt_encoding,
)
extra_rets['all_mention_logits'] = mention_logits
extra_rets['all_mention_bounds'] = mention_bounds
if gold_mention_bounds is None:
(
top_mention_logits, top_mention_bounds, top_mention_mask, all_mention_mask,
) = self.prune_ctxt_mentions(
mention_logits, mention_bounds, num_cand_mentions, topK_threshold,
)
extra_rets['mention_logits'] = top_mention_logits.view(-1)
extra_rets['all_mention_mask'] = all_mention_mask
if top_mention_bounds is None:
# use gold mention
top_mention_bounds = gold_mention_bounds
top_mention_mask = gold_mention_bounds_mask
assert top_mention_bounds is not None
assert top_mention_mask is not None
# (bs, num_pred_mentions OR num_gold_mentions, embed_size)
embedding_ctxt = self.get_ctxt_embeds(
raw_ctxt_encoding, top_mention_bounds,
)
# for merging dataparallel, only 1st dimension can differ...
return {
"mention_reps": embedding_ctxt.view(-1, embedding_ctxt.size(-1)),
"mention_bounds": top_mention_bounds.view(-1, top_mention_bounds.size(-1)),
"mention_masks": top_mention_mask.view(-1),
"mention_dims": torch.tensor(top_mention_mask.size()).unsqueeze(0).to(embedding_ctxt.device),
**extra_rets
}
def forward_candidate(
self,
token_idx_cands,
segment_idx_cands,
mask_cands,
):
try:
return self.cand_encoder(
token_idx_cands, segment_idx_cands, mask_cands
)
except:
print(token_idx_cands.size())
print(segment_idx_cands.size())
print(mask_cands.size())
return torch.rand(token_idx_cands.size()).to(token_idx_cands.device)
def forward(
self,
token_idx_ctxt,
segment_idx_ctxt,
mask_ctxt,
token_idx_cands,
segment_idx_cands,
mask_cands,
gold_mention_bounds=None,
gold_mention_bounds_mask=None,
num_cand_mentions=50,
topK_threshold=-4.5,
get_mention_scores=True,
):
"""
If gold_mention_bounds is set, returns mention embeddings of passed-in mention bounds
Otherwise, uses top-scoring mentions
"""
embedding_ctxt = embedding_cands = top_mention_mask = \
top_mention_logits = top_mention_bounds = all_mention_mask = \
all_mention_logits = all_mention_bounds = max_num_pred_mentions = None
context_outs = None
cand_outs = None
if token_idx_ctxt is not None:
context_outs = self.forward_ctxt(
token_idx_ctxt, segment_idx_ctxt, mask_ctxt,
gold_mention_bounds=gold_mention_bounds,
gold_mention_bounds_mask=gold_mention_bounds_mask,
num_cand_mentions=num_cand_mentions, topK_threshold=topK_threshold,
get_mention_scores=get_mention_scores,
)
if token_idx_cands is not None:
cand_outs = self.forward_candidate(
token_idx_cands, segment_idx_cands, mask_cands
)
return context_outs, cand_outs
def upgrade_state_dict_named(self, state_dict):
prefix = ''
current_head_names = [] if not hasattr(self, 'classification_heads') else \
self.classification_heads.keys()
# Handle new classification heads present in the state dict.
keys_to_delete = []
for k in state_dict.keys():
if not k.startswith(prefix + 'classification_heads.'):
continue
head_name = k[len(prefix + 'classification_heads.'):].split('.')[0]
if head_name not in current_head_names:
print(
'WARNING: deleting classification head ({}) from checkpoint '
'not present in current model: {}'.format(head_name, k)
)
keys_to_delete.append(k)
for k in keys_to_delete:
del state_dict[k]
# Copy any newly-added classification heads into the state dict
# with their current weights.
if hasattr(self, 'classification_heads'):
cur_state = self.classification_heads.state_dict()
for k, v in cur_state.items():
if prefix + 'classification_heads.' + k not in state_dict:
print('Overwriting', prefix + 'classification_heads.' + k)
state_dict[prefix + 'classification_heads.' + k] = v
class BiEncoderRanker(torch.nn.Module):
def __init__(self, params, shared=None):
super(BiEncoderRanker, self).__init__()
self.params = params
self.device = torch.device(
"cuda" if torch.cuda.is_available() and not params["no_cuda"] else "cpu"
)
self.n_gpu = torch.cuda.device_count()
# init tokenizer
self.NULL_IDX = 0
self.START_TOKEN = "[CLS]"
self.END_TOKEN = "[SEP]"
self.tokenizer = BertTokenizer.from_pretrained(
params["bert_model"], do_lower_case=params["lowercase"]
)
# init model
self.build_model()
model_path = params.get("path_to_model", None)
if model_path is not None:
self.load_model(
model_path,
cand_enc_only=params.get("load_cand_enc_only", False),
)
self.model = self.model.to(self.device)
self.data_parallel = params.get("data_parallel")
if self.data_parallel:
self.model = torch.nn.DataParallel(self.model)
def load_model(self, fname, cpu=False, cand_enc_only=False):
if cpu or not torch.cuda.is_available():
state_dict = torch.load(fname, map_location=torch.device("cpu"))
else:
state_dict = torch.load(fname)
if cand_enc_only:
cand_state_dict = get_submodel_from_state_dict(state_dict, 'cand_encoder')
self.model.cand_encoder.load_state_dict(cand_state_dict)
else:
self.model.upgrade_state_dict_named(state_dict)
self.model.load_state_dict(state_dict)
def build_model(self):
self.model = BiEncoderModule(self.params)
def save_model(self, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = get_model_obj(self.model)
output_model_file = os.path.join(output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
def get_optimizer(self, optim_states=None, saved_optim_type=None):
return get_bert_optimizer(
[self.model],
self.params["type_optimization"],
self.params["learning_rate"],
fp16=self.params.get("fp16"),
)
def encode_context(
self, cands, gold_mention_bounds=None, gold_mention_bounds_mask=None,
num_cand_mentions=50, topK_threshold=-4.5,
get_mention_scores=True,
):
"""
if gold_mention_bounds specified, selects according to gold_mention_bounds,
otherwise selects according to top-scoring mentions
Returns: Dictionary
mention_reps: torch.FloatTensor (bsz, max_num_pred_mentions, embed_dim): mention embeddings
mention_masks: torch.BoolTensor (bsz, max_num_pred_mentions): mention padding mask
mention_bounds: torch.LongTensor (bsz, max_num_pred_mentions, 2)
(
mention_logits: torch.FloatTensor (bsz, max_num_pred_mentions): mention scores/logits
all_mention_mask: torch.BoolTensor ((bsz, all_cand_mentions)
all_mention_logits: torch.FloatTensor (bsz, all_cand_mentions): all mention scores/logits
all_mention_bounds: torch.LongTensor (bsz, all_cand_mentions, 2): all mention bounds
)
"""
token_idx_cands, segment_idx_cands, mask_cands = to_bert_input(
cands, self.NULL_IDX
)
context_outs, _ = self.model(
token_idx_cands, segment_idx_cands, mask_cands,
None, None, None,
gold_mention_bounds=gold_mention_bounds,
gold_mention_bounds_mask=gold_mention_bounds_mask,
num_cand_mentions=num_cand_mentions,
topK_threshold=topK_threshold,
get_mention_scores=get_mention_scores
)
if context_outs['mention_dims'].size(0) <= 1:
for key in context_outs:
if 'all' in key or key == 'mention_dims':
continue
context_outs[key] = context_outs[key].view([context_outs['mention_dims'][0,0], -1] + list(context_outs[key].size()[1:]))
return context_outs
'''
Reshape to (bs, num_mentions, *), iterating across GPUs
'''
def init_tensor(shape, dtype, init_value):
return init_value * torch.ones(
shape
).to(dtype=dtype, device=context_outs['mention_dims'].device)
bs = cands.size(0)
n_pred_mentions = context_outs['mention_dims'][:,1].max()
context_outs_reshape = {}
for key in context_outs:
if 'all' in key or key == 'mention_dims':
context_outs_reshape[key] = context_outs[key]
continue
# (bsz, max_num_pred_mentions, *)
context_outs_reshape[key] = init_tensor(
[bs, n_pred_mentions] + list(context_outs[key].size()[1:]),
context_outs[key].dtype,
-float("inf") if 'logit' in key else 0,
)
for idx in range(len(context_outs['mention_dims'])):
# reshape
gpu_bs = context_outs['mention_dims'][idx, 0]
b_width = context_outs['mention_dims'][idx, 1]
start_idx = (context_outs['mention_dims'][:idx, 0] * context_outs['mention_dims'][:idx, 1]).sum()
end_idx = start_idx + b_width * gpu_bs
s_reshape = context_outs['mention_dims'][:idx, 0].sum()
e_reshape = s_reshape + gpu_bs
for key in context_outs_reshape:
if 'all' in key or key == 'mention_dims':
continue
if len(context_outs[key].size()) == 1:
target_tensor = context_outs[key][start_idx:end_idx].view(gpu_bs, b_width)
else:
target_tensor = context_outs[key][start_idx:end_idx].view(gpu_bs, b_width, -1)
context_outs_reshape[key][s_reshape:e_reshape, :b_width] = target_tensor
return context_outs_reshape
def encode_candidate(self, cands):
token_idx_cands, segment_idx_cands, mask_cands = to_bert_input(
cands, self.NULL_IDX
)
_, embedding_cands = self.model(
None, None, None,
token_idx_cands, segment_idx_cands, mask_cands
)
return embedding_cands
# Score candidates given context input and label input
# If text_encs/cand_encs is provided (pre-computed), text_vecs/cand_vecs is ignored
def score_candidate(
self,
text_vecs,
cand_vecs,
text_encs=None, # pre-computed mention encoding
cand_encs=None, # pre-computed candidate encoding.
gold_mention_bounds=None,
gold_mention_bounds_mask=None,
num_cand_mentions=50,
mention_threshold=-4.5,
get_mention_scores=True,
hard_negs=False, # (if training) passed in a subset of hard negatives
hard_negs_mask=None, # (if hard negs training) mask for gold candidate mentions on all inputs (pos + negs)
):
"""
text_vecs (bs, max_ctxt_size):
cand_vecs (bs, max_num_gold_mentions, 1, max_cand_size):
text_encs (batch_num_mentions, embed_size): Pre-encoded mention vectors, masked before input
cand_encs (num_ents_to_match [batch_num_total_ents/all_ents], embed_size): Pre-encoded candidate vectors, masked before input
"""
'''
Compute context representations and/or get mention scores
'''
if text_encs is None or get_mention_scores:
# embedding_ctxt: (bs, num_gold_mentions/num_pred_mentions, embed_size)
context_outs = self.encode_context(
text_vecs, gold_mention_bounds=gold_mention_bounds,
gold_mention_bounds_mask=gold_mention_bounds_mask,
num_cand_mentions=num_cand_mentions,
topK_threshold=mention_threshold,
get_mention_scores=get_mention_scores,
)
mention_logits = None
mention_bounds = None
if get_mention_scores:
mention_logits = context_outs['all_mention_logits']
mention_bounds = context_outs['all_mention_bounds']
if text_encs is None:
if gold_mention_bounds is None:
# (all_batch_pred_mentions, embed_size)
embedding_ctxt = context_outs['mention_reps'][context_outs['mention_masks']]
else:
# (all_batch_pred_mentions, embed_size)
embedding_ctxt = context_outs['mention_reps'][gold_mention_bounds_mask]
else:
# Context encoding is given, do not need to re-compute
embedding_ctxt = text_encs
'''
Compute candidate representations
'''
if cand_encs is None:
# Train time: Compute candidates in batch and compare in-batch negatives
# cand_vecs: (bs, num_gold_mentions, 1, cand_width) -> (batch_num_gold_mentions, cand_width)
cand_vecs = cand_vecs[gold_mention_bounds_mask].squeeze(1)
# (batch_num_gold_mentions, embed_dim)
embedding_cands = self.encode_candidate(cand_vecs)
else:
# (batch_num_gold_mentions, embed_dim)
embedding_cands = cand_encs
'''
Do inner-product search, or obtain scores on hard-negative entities
'''
if hard_negs:
assert hard_negs_mask is not None
# (num_mention_in_batch, embed_dim)
embedding_ctxt = embedding_ctxt[hard_negs_mask]
embedding_cands = embedding_cands[hard_negs_mask]
embedding_ctxt = embedding_ctxt.unsqueeze(1) # num_mention_in_batch x 1 x embed_size
embedding_cands = embedding_cands.unsqueeze(2) # num_mention_in_batch x embed_size x 1
scores = torch.bmm(embedding_ctxt, embedding_cands) # num_mention_in_batch x 1 x 1
scores = torch.squeeze(scores)
# (num_mention_in_batch,)
return scores, mention_logits, mention_bounds
else:
# matmul across all cand_encs (in-batch, if cand_encs is None, or across all cand_encs)
# (all_batch_pred_mentions, num_cands)
# similarity score between ctxt i and cand j
all_scores = embedding_ctxt.mm(embedding_cands.t())
return all_scores, mention_logits, mention_bounds
# label_input -- negatives provided
# If label_input is None, train on in-batch negatives
def forward(
self, context_input, cand_input,
text_encs=None, # pre-computed mention encoding.
cand_encs=None, # pre-computed candidate embeddings
mention_logits=None, # pre-computed mention logits
mention_bounds=None, # pre-computed mention bounds
label_input=None, # labels for passed-in (if hard negatives training)
gold_mention_bounds=None,
gold_mention_bounds_mask=None,
hard_negs_mask=None, # should be non-none if we are using negs
return_loss=True,
):
"""
text_encs/cand_encs/label_inputs masked before training
In-batch negs training: cand_encs None, label_inputs None, return_loss True
Hard negs training: cand_encs non-None, label_inputs non-None, return_loss True
cand_encs = all entities in batch + additional hard negatives
Inference: cand_encs non-None, label_inputs None, return_loss False
cand_encs = all entities in DB
cand_encs
non-None: set of candidate encodings to search in
None: compute in-batch candidate vectors (used as negatives if train mode)
label_inputs
non-None: labels to use for hard negatives training
None: random negatives training and/or inference
"""
hard_negs = label_input is not None
'''
GET CANDIDATE SCORES
'''
scores, out_mention_logits, out_mention_bounds = self.score_candidate(
context_input, cand_input,
hard_negs=hard_negs,
cand_encs=cand_encs,
text_encs=text_encs,
gold_mention_bounds=gold_mention_bounds,
gold_mention_bounds_mask=gold_mention_bounds_mask,
hard_negs_mask=hard_negs_mask,
get_mention_scores=(return_loss and (mention_logits is None or mention_bounds is None)),
)
if mention_logits is None:
mention_logits = out_mention_logits
if mention_bounds is None:
mention_bounds = out_mention_bounds
if not return_loss:
return None, scores, mention_logits, mention_bounds
'''
COMPUTE MENTION LOSS (TRAINING MODE)
'''
span_loss = 0
if mention_logits is not None and mention_bounds is not None:
N = context_input.size(0) # batch size
M = gold_mention_bounds.size(1) # num_mentions per instance (just 1, so far)
# 1 value
span_loss = self.get_span_loss(
gold_mention_bounds=gold_mention_bounds,
gold_mention_bounds_mask=gold_mention_bounds_mask,
mention_logits=mention_logits, mention_bounds=mention_bounds,
)
'''
COMPUTE EL LOSS (TRAINING MODE)
'''
if hard_negs:
'''
Hard negatives (negatives passed in)
'''
loss_fct = nn.BCEWithLogitsLoss(reduction="mean")
label_input = label_input[hard_negs_mask]
# scores: (num_mentions_in_batch,); label_input: (num_mentions_in_batch,)
loss = loss_fct(scores, label_input.float()) + span_loss
else:
'''
Random negatives (use in-batch negatives)
'''
# scores: (bs*num_mentions [filtered], bs*num_mentions [filtered])
target = torch.LongTensor(torch.arange(scores.size(1)))
target = target.to(self.device)
# log P(entity|mention) + log P(mention) = log [P(entity|mention)P(mention)]
loss = F.cross_entropy(scores, target, reduction="mean") + span_loss
return loss, scores, mention_logits, mention_bounds
def get_span_loss(
self, gold_mention_bounds, gold_mention_bounds_mask, mention_logits, mention_bounds,
):
"""
gold_mention_bounds (bs, num_mentions, 2)
gold_mention_bounds_mask (bs, num_mentions):
mention_logits (bs, all_mentions)
menion_bounds (bs, all_mentions, 2)
"""
loss_fct = nn.BCEWithLogitsLoss(reduction="mean")
gold_mention_bounds[~gold_mention_bounds_mask] = -1 # ensure don't select masked to score
# triples of [ex in batch, mention_idx in gold_mention_bounds, idx in mention_bounds]
# use 1st, 2nd to index into gold_mention_bounds, 1st, 3rd to index into mention_bounds
gold_mention_pos_idx = ((
mention_bounds.unsqueeze(1) - gold_mention_bounds.unsqueeze(2) # (bs, num_mentions, start_pos * end_pos, 2)
).abs().sum(-1) == 0).nonzero()
# gold_mention_pos_idx should have 1 entry per masked element
# (num_gold_mentions [~gold_mention_bounds_mask])
gold_mention_pos = gold_mention_pos_idx[:,2]
# (bs, total_possible_spans)
gold_mention_binary = torch.zeros(mention_logits.size(), dtype=mention_logits.dtype).to(gold_mention_bounds.device)
gold_mention_binary[gold_mention_pos_idx[:,0], gold_mention_pos_idx[:,2]] = 1
# prune masked spans
mask = mention_logits != -float("inf")
masked_mention_logits = mention_logits[mask]
masked_gold_mention_binary = gold_mention_binary[mask]
# (bs, total_possible_spans)
span_loss = loss_fct(masked_mention_logits, masked_gold_mention_binary)
return span_loss
def to_bert_input(token_idx, null_idx):
"""
token_idx is a 2D tensor int.
"""
segment_idx = token_idx * 0
mask = token_idx != null_idx
# nullify elements in case self.NULL_IDX was not 0
token_idx = token_idx * mask.long()
return token_idx, segment_idx, mask
|
BLINK-main
|
elq/biencoder/biencoder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import io
import sys
import json
import torch
import logging
import numpy as np
from collections import OrderedDict
from pytorch_transformers.modeling_utils import CONFIG_NAME, WEIGHTS_NAME
from tqdm import tqdm
from elq.biencoder.biencoder import BiEncoderRanker
def read_dataset(dataset_name, preprocessed_json_data_parent_folder, debug=False):
file_name = "{}.jsonl".format(dataset_name)
txt_file_path = os.path.join(preprocessed_json_data_parent_folder, file_name)
samples = []
with io.open(txt_file_path, mode="r", encoding="utf-8") as file:
for line in file:
samples.append(json.loads(line.strip()))
if debug and len(samples) > 200:
break
return samples
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
def remove_module_from_state_dict(state_dict):
new_state_dict = OrderedDict()
for key, value in state_dict.items():
name = "".join(key.split(".module"))
new_state_dict[name] = value
return new_state_dict
def save_model(model, tokenizer, output_dir):
"""Saves the model and the tokenizer used in the output directory."""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, "module") else model
output_model_file = os.path.join(output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(output_dir)
def get_logger(output_dir=None):
if output_dir != None:
os.makedirs(output_dir, exist_ok=True)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
handlers=[
logging.FileHandler(
"{}/log.txt".format(output_dir), mode="a", delay=False
),
logging.StreamHandler(sys.stdout),
],
)
else:
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
handlers=[logging.StreamHandler(sys.stdout)],
)
logger = logging.getLogger('Blink')
logger.setLevel(10)
return logger
def write_to_file(path, string, mode="w"):
with open(path, mode) as writer:
writer.write(string)
def get_biencoder(parameters):
return BiEncoderRanker(parameters)
|
BLINK-main
|
elq/candidate_ranking/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Provide an argument parser and default command line options for using ELQ.
import argparse
import importlib
import os
import sys
import datetime
ENT_START_TAG = "[unused0]"
ENT_END_TAG = "[unused1]"
ENT_TITLE_TAG = "[unused2]"
class ElqParser(argparse.ArgumentParser):
"""
Provide an opt-producer and CLI arguement parser.
More options can be added specific by paassing this object and calling
''add_arg()'' or add_argument'' on it.
:param add_elq_args:
(default True) initializes the default arguments for ELQ package.
:param add_model_args:
(default False) initializes the default arguments for loading models,
including initializing arguments from the model.
"""
def __init__(
self, add_elq_args=True, add_model_args=False,
description='ELQ parser',
):
super().__init__(
description=description,
allow_abbrev=False,
conflict_handler='resolve',
formatter_class=argparse.HelpFormatter,
add_help=add_elq_args,
)
self.elq_home = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
)
os.environ['ELQ_HOME'] = self.elq_home
self.add_arg = self.add_argument
self.overridable = {}
if add_elq_args:
self.add_elq_args()
if add_model_args:
self.add_model_args()
def add_elq_args(self, args=None):
"""
Add common ELQ args across all scripts.
"""
parser = self.add_argument_group("Common Arguments")
parser.add_argument(
"--silent", action="store_true", help="Whether to print progress bars."
)
parser.add_argument(
"--debug",
action="store_true",
help="Whether to run in debug mode with only 200 samples.",
)
parser.add_argument(
"--data_parallel",
action="store_true",
help="Whether to distributed the candidate generation process.",
)
parser.add_argument(
"--no_cuda", action="store_true",
help="Whether not to use CUDA when available",
)
parser.add_argument("--top_k", default=10, type=int)
parser.add_argument(
"--seed", type=int, default=52313, help="random seed for initialization"
)
parser.add_argument(
"--zeshel",
default=True,
type=bool,
help="Whether the dataset is from zeroshot.",
)
def add_model_args(self, args=None):
"""
Add model args.
"""
parser = self.add_argument_group("Model Arguments")
parser.add_argument(
"--max_seq_length",
default=256,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.",
)
parser.add_argument(
"--max_context_length",
default=128,
type=int,
help="The maximum total context input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.",
)
parser.add_argument(
"--max_cand_length",
default=128,
type=int,
help="The maximum total label input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.",
)
parser.add_argument(
"--path_to_model",
default=None,
type=str,
required=False,
help="The full path to the model to load.",
)
parser.add_argument(
"--bert_model",
default="bert-base-uncased",
type=str,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.",
)
parser.add_argument(
"--pull_from_layer", type=int, default=-1, help="Layers to pull from BERT",
)
parser.add_argument(
"--lowercase",
action="store_false",
help="Whether to lower case the input text. True for uncased models, False for cased models.",
)
parser.add_argument("--context_key", default="context", type=str)
parser.add_argument("--title_key", default="entity", type=str)
parser.add_argument(
"--out_dim", type=int, default=1, help="Output dimention of bi-encoders.",
)
parser.add_argument(
"--add_linear",
action="store_true",
help="Whether to add an additonal linear projection on top of BERT.",
)
parser.add_argument(
"--data_path",
default="data/zeshel",
type=str,
help="The path to the train data.",
)
parser.add_argument(
"--output_path",
default=None,
type=str,
required=True,
help="The output directory where generated output file (model, etc.) is to be dumped.",
)
parser.add_argument(
"--mention_aggregation_type",
default=None,
type=str,
help="Type of mention aggregation (None to just use [CLS] token, "
"'all_avg' to average across tokens in mention, 'fl_avg' to average across first/last tokens in mention, "
"'{all/fl}_linear' for linear layer over mention, '{all/fl}_mlp' to MLP over mention)",
)
parser.add_argument(
"--no_mention_bounds",
dest="no_mention_bounds",
action="store_true",
default=False,
help="Don't add tokens around target mention. MUST BE FALSE IF 'mention_aggregation_type' is NONE",
)
parser.add_argument(
"--mention_scoring_method",
dest="mention_scoring_method",
default="qa_linear",
type=str,
help="Method for generating/scoring mentions boundaries (options: 'qa_mlp', 'qa_linear', 'BIO')",
)
parser.add_argument(
"--max_mention_length",
dest="max_mention_length",
default=10,
type=int,
help="Maximum length of span to consider as candidate mention",
)
def add_training_args(self, args=None):
"""
Add model training args.
"""
parser = self.add_argument_group("Model Training Arguments")
parser.add_argument(
"--evaluate", action="store_true", help="Whether to run evaluation."
)
parser.add_argument(
"--output_eval_file",
default=None,
type=str,
help="The txt file where the the evaluation results will be written.",
)
parser.add_argument(
"--train_batch_size", default=8, type=int,
help="Total batch size for training."
)
parser.add_argument(
"--eval_batch_size", default=8, type=int,
help="Total batch size for evaluation.",
)
parser.add_argument("--max_grad_norm", default=1.0, type=float)
parser.add_argument(
"--learning_rate",
default=3e-5,
type=float,
help="The initial learning rate for Adam.",
)
parser.add_argument(
"--num_train_epochs",
default=1,
type=int,
help="Number of training epochs.",
)
parser.add_argument(
"--print_interval", type=int, default=5,
help="Interval of loss printing",
)
parser.add_argument(
"--eval_interval",
type=int,
default=40,
help="Interval for evaluation during training",
)
parser.add_argument(
"--save_interval", type=int, default=1,
help="Interval for model saving"
)
parser.add_argument(
"--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumualte before performing a backward/update pass.",
)
parser.add_argument(
"--type_optimization",
type=str,
default="all_encoder_layers",
help="Which type of layers to optimize in BERT",
)
parser.add_argument(
"--shuffle", type=bool, default=False,
help="Whether to shuffle train data",
)
# TODO DELETE LATER!!!
parser.add_argument(
"--start_idx",
default=None,
type=int,
)
parser.add_argument(
"--end_idx",
default=None,
type=int,
)
parser.add_argument(
"--last_epoch",
default=0,
type=int,
help="Epoch to restore from when pretraining",
)
parser.add_argument(
"--path_to_trainer_state",
default=None,
type=str,
required=False,
help="The full path to the last checkpoint's training state to load.",
)
parser.add_argument(
'--dont_distribute_train_samples',
default=False,
action="store_true",
help="Don't distribute all training samples across the epochs (go through all samples every epoch)",
)
parser.add_argument(
"--freeze_cand_enc",
default=False,
action="store_true",
help="Freeze the candidate encoder",
)
parser.add_argument(
"--load_cand_enc_only",
default=False,
action="store_true",
help="Only load the candidate encoder from saved model path",
)
parser.add_argument(
"--cand_enc_path",
default="models/all_entities_large.t7",
type=str,
required=False,
help="Filepath to the saved entity encodings.",
)
parser.add_argument(
"--cand_token_ids_path",
default="models/entity_token_ids_128.t7",
type=str,
required=False,
help="Filepath to the saved tokenized entity descriptions.",
)
parser.add_argument(
"--index_path",
default="models/faiss_hnsw_index.pkl",
type=str,
required=False,
help="Filepath to the HNSW index for adversarial training.",
)
parser.add_argument(
"--adversarial_training",
default=False,
action="store_true",
help="Do adversarial training (only takes effect if `freeze_cand_enc` is set)",
)
parser.add_argument(
"--get_losses",
default=False,
action="store_true",
help="Get losses during evaluation",
)
def add_eval_args(self, args=None):
"""
Add model evaluation args.
"""
parser = self.add_argument_group("Model Evaluation Arguments")
parser.add_argument(
"--mode",
default="valid",
type=str,
help="Train / validation / test",
)
parser.add_argument(
"--save_topk_result",
action="store_true",
help="Whether to save prediction results.",
)
parser.add_argument(
"--encode_batch_size",
default=8,
type=int,
help="Batch size for encoding."
)
parser.add_argument(
"--cand_pool_path",
default=None,
type=str,
help="Path for candidate pool",
)
parser.add_argument(
"--cand_encode_path",
default=None,
type=str,
help="Path for candidate encoding",
)
|
BLINK-main
|
elq/common/params.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from torch import nn
import torch
def get_model_obj(model):
model = model.module if hasattr(model, "module") else model
return model
class BertEncoder(nn.Module):
def __init__(
self, bert_model, output_dim, layer_pulled=-1, add_linear=None,
):
super(BertEncoder, self).__init__()
self.layer_pulled = layer_pulled
bert_output_dim = bert_model.embeddings.word_embeddings.weight.size(1)
self.bert_model = bert_model
self.dropout = nn.Dropout(0.1)
if add_linear:
self.additional_linear = nn.Linear(bert_output_dim, output_dim)
else:
self.additional_linear = None
def forward(self, token_ids, segment_ids, attention_mask, DEBUG=False):
if DEBUG:
import pdb
pdb.set_trace()
try:
output_bert, output_pooler, _ = self.bert_model(
token_ids, segment_ids, attention_mask
)
except RuntimeError as e:
print(token_ids.size())
print(segment_ids.size())
print(attention_mask.size())
print(e)
import pdb
pdb.set_trace()
output_bert, output_pooler, _ = self.bert_model(
token_ids, segment_ids, attention_mask
)
if self.additional_linear is not None:
# embeddings = (batch_size, embedding_size)
embeddings = output_pooler
else:
# embeddings = (batch_size, embedding_size)
embeddings = output_bert[:, 0, :]
# in case of dimensionality reduction
if self.additional_linear is not None:
result = self.additional_linear(self.dropout(embeddings))
else:
result = embeddings
return result
|
BLINK-main
|
elq/common/ranker_base.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
FAISS-based index components. Original from
https://github.com/facebookresearch/DPR/blob/master/dpr/indexer/faiss_indexers.py
"""
import os
import logging
import pickle
import faiss
import numpy as np
logger = logging.getLogger()
class DenseIndexer(object):
def __init__(self, buffer_size: int = 50000):
self.buffer_size = buffer_size
self.index_id_to_db_id = []
self.index = None
def index_data(self, data: np.array):
raise NotImplementedError
def search_knn(self, query_vectors: np.array, top_docs: int):
raise NotImplementedError
def serialize(self, index_file: str):
logger.info("Serializing index to %s", index_file)
faiss.write_index(self.index, index_file)
def deserialize_from(self, index_file: str):
logger.info("Loading index from %s", index_file)
self.index = faiss.read_index(index_file)
logger.info(
"Loaded index of type %s and size %d", type(self.index), self.index.ntotal
)
# DenseFlatIndexer does exact search
class DenseFlatIndexer(DenseIndexer):
def __init__(self, vector_sz: int = 1, buffer_size: int = 50000):
super(DenseFlatIndexer, self).__init__(buffer_size=buffer_size)
self.index = faiss.IndexFlatIP(vector_sz)
def index_data(self, data: np.array):
n = len(data)
# indexing in batches is beneficial for many faiss index types
logger.info("Indexing data, this may take a while.")
cnt = 0
for i in range(0, n, self.buffer_size):
vectors = [np.reshape(t, (1, -1)) for t in data[i : i + self.buffer_size]]
vectors = np.concatenate(vectors, axis=0)
self.index.add(vectors)
cnt += self.buffer_size
logger.info("Total data indexed %d", n)
def search_knn(self, query_vectors, top_k):
scores, indexes = self.index.search(query_vectors, top_k)
return scores, indexes
# DenseIVFFlatIndexer does bucketed exact search
class DenseIVFFlatIndexer(DenseIndexer):
def __init__(self, vector_sz: int = 1, nprobe: int = 10, nlist: int = 100):
super(DenseIVFFlatIndexer, self).__init__()
self.nprobe = nprobe
self.nlist = nlist
quantizer = faiss.IndexFlatL2(vector_sz) # the other index
self.index = faiss.IndexIVFFlat(quantizer, vector_sz, self.nlist, faiss.METRIC_INNER_PRODUCT)
self.index.nprobe = nprobe
def index_data(self, data: np.array):
n = len(data)
# indexing in batches is beneficial for many faiss index types
logger.info("Indexing data, this may take a while.")
self.index.train(data)
self.index.add(data)
logger.info("Total data indexed %d", n)
def search_knn(self, query_vectors, top_k):
scores, indexes = self.index.search(query_vectors, top_k)
return scores, indexes
# DenseHNSWFlatIndexer does approximate search
class DenseHNSWFlatIndexer(DenseIndexer):
"""
Efficient index for retrieval. Note: default settings are for hugh accuracy but also high RAM usage
"""
def __init__(
self,
vector_sz: int,
buffer_size: int = 50000,
store_n: int = 128,
ef_search: int = 256,
ef_construction: int = 200,
):
super(DenseHNSWFlatIndexer, self).__init__(buffer_size=buffer_size)
index = faiss.IndexHNSWFlat(vector_sz, store_n, faiss.METRIC_INNER_PRODUCT)
index.hnsw.efSearch = ef_search
index.hnsw.efConstruction = ef_construction
self.index = index
def index_data(self, data: np.array):
n = len(data)
# indexing in batches is beneficial for many faiss index types
logger.info("Indexing data, this may take a while.")
self.index.add(data)
logger.info("Total data indexed %d" % n)
def search_knn(self, query_vectors, top_k):
scores, indexes = self.index.search(query_vectors, top_k)
return scores, indexes
def deserialize_from(self, file: str):
super(DenseHNSWFlatIndexer, self).deserialize_from(file)
# to trigger warning on subsequent indexing
self.phi = 1
|
BLINK-main
|
elq/index/faiss_indexer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
def entity_linking_tp_with_overlap(gold, predicted):
"""
Partially adopted from: https://github.com/UKPLab/starsem2018-entity-linking
Counts weak and strong matches
:param gold:
:param predicted:
:return:
>>> entity_linking_tp_with_overlap([('Q7366', 14, 18), ('Q780394', 19, 35)], [('Q7366', 14, 16), ('Q780394', 19, 35)])
2, 1
>>> entity_linking_tp_with_overlap([('Q7366', 14, 18), ('Q780394', 19, 35)], [('Q7366', 14, 16)])
1, 0
>>> entity_linking_tp_with_overlap([(None, 14, 18), ('Q780394', 19, 35)], [('Q7366', 14, 16)])
0, 0
>>> entity_linking_tp_with_overlap([(None, 14, 18), (None, )], [(None,)])
1, 0
>>> entity_linking_tp_with_overlap([('Q7366', ), ('Q780394', )], [('Q7366', 14, 16)])
1, 0
>>> entity_linking_tp_with_overlap([], [('Q7366', 14, 16)])
0, 0
"""
if not gold or not predicted:
return 0, 0
# Add dummy spans, if no spans are given, everything is overlapping per default
if any(len(e) != 3 for e in gold):
gold = [(e[0], 0, 1) for e in gold]
predicted = [(e[0], 0, 1) for e in predicted]
# Replace None KB ids with empty strings
gold = [("",) + e[1:] if e[0] is None else e for e in gold]
predicted = [("",) + e[1:] if e[0] is None else e for e in predicted]
gold = sorted(gold, key=lambda x: x[2])
predicted = sorted(predicted, key=lambda x: x[2])
# tracks weak matches
lcs_matrix_weak = np.zeros((len(gold), len(predicted)), dtype=np.int16)
# tracks strong matches
lcs_matrix_strong = np.zeros((len(gold), len(predicted)), dtype=np.int16)
for g_i in range(len(gold)):
for p_i in range(len(predicted)):
gm = gold[g_i]
pm = predicted[p_i]
# increment lcs_matrix_weak
if not (gm[1] >= pm[2] or pm[1] >= gm[2]) and (gm[0].lower() == pm[0].lower()):
if g_i == 0 or p_i == 0:
lcs_matrix_weak[g_i, p_i] = 1
else:
lcs_matrix_weak[g_i, p_i] = 1 + lcs_matrix_weak[g_i - 1, p_i - 1]
else:
if g_i == 0 and p_i == 0:
lcs_matrix_weak[g_i, p_i] = 0
elif g_i == 0 and p_i != 0:
lcs_matrix_weak[g_i, p_i] = max(0, lcs_matrix_weak[g_i, p_i - 1])
elif g_i != 0 and p_i == 0:
lcs_matrix_weak[g_i, p_i] = max(lcs_matrix_weak[g_i - 1, p_i], 0)
elif g_i != 0 and p_i != 0:
lcs_matrix_weak[g_i, p_i] = max(lcs_matrix_weak[g_i - 1, p_i], lcs_matrix_weak[g_i, p_i - 1])
# increment lcs_matrix_strong
if (gm[1] == pm[1] and pm[2] == gm[2]) and (gm[0].lower() == pm[0].lower()):
if g_i == 0 or p_i == 0:
lcs_matrix_strong[g_i, p_i] = 1
else:
lcs_matrix_strong[g_i, p_i] = 1 + lcs_matrix_strong[g_i - 1, p_i - 1]
else:
if g_i == 0 and p_i == 0:
lcs_matrix_strong[g_i, p_i] = 0
elif g_i == 0 and p_i != 0:
lcs_matrix_strong[g_i, p_i] = max(0, lcs_matrix_strong[g_i, p_i - 1])
elif g_i != 0 and p_i == 0:
lcs_matrix_strong[g_i, p_i] = max(lcs_matrix_strong[g_i - 1, p_i], 0)
elif g_i != 0 and p_i != 0:
lcs_matrix_strong[g_i, p_i] = max(lcs_matrix_strong[g_i - 1, p_i], lcs_matrix_strong[g_i, p_i - 1])
weak_match_count = lcs_matrix_weak[len(gold) - 1, len(predicted) - 1]
strong_match_count = lcs_matrix_strong[len(gold) - 1, len(predicted) - 1]
return weak_match_count, strong_match_count
|
BLINK-main
|
elq/vcg_utils/measures.py
|
import argparse
import json
import logging
import os
import random
import time
import torch
from datetime import timedelta
WORLDS = {
'american_football',
'doctor_who',
'fallout',
'final_fantasy',
'military',
'pro_wrestling',
'starwars',
'world_of_warcraft',
'coronation_street',
'muppets',
'ice_hockey',
'elder_scrolls',
'forgotten_realms',
'lego',
'star_trek',
'yugioh'
}
domain_set = {}
domain_set['val'] = set(['coronation_street', 'muppets', 'ice_hockey', 'elder_scrolls'])
domain_set['test'] = set(['forgotten_realms', 'lego', 'star_trek', 'yugioh'])
domain_set['train'] = set(['american_football', 'doctor_who', 'fallout', 'final_fantasy', 'military', 'pro_wrestling', 'starwars', 'world_of_warcraft'])
class LogFormatter():
def __init__(self):
self.start_time = time.time()
def format(self, record):
elapsed_seconds = round(record.created - self.start_time)
prefix = "%s - %s - %s" % (
record.levelname,
time.strftime("%x %X"),
timedelta(seconds=elapsed_seconds)
)
message = record.getMessage()
message = message.replace('\n', '\n' + ' ' * (len(prefix) + 3))
return "%s - %s" % (prefix, message) if message else ''
log_formatter = LogFormatter()
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(log_formatter)
logger = logging.getLogger()
logger.handlers = []
logger.setLevel(logging.INFO)
logger.propagate = False
logger.addHandler(console_handler)
def load_entity_dict(params):
entity_dict = {}
entity_map = {}
for src in WORLDS:
fname = os.path.join(params.document_path, src + ".json")
assert os.path.isfile(fname), "File not found! %s" % fname
cur_dict = {}
doc_map = {}
doc_list = []
with open(fname, 'rt') as f:
for line in f:
line = line.rstrip()
item = json.loads(line)
doc_id = item["document_id"]
title = item["title"]
text = item["text"]
doc_map[doc_id] = len(doc_list)
doc_list.append(item)
logger.info("Load for world %s." % src)
entity_dict[src] = doc_list
entity_map[src] = doc_map
return entity_dict, entity_map
def convert_data(params, entity_dict, entity_map, mode):
if mode == "valid":
fname = os.path.join(params.mention_path, "val.json")
else:
fname = os.path.join(params.mention_path, mode + ".json")
fout = open(os.path.join(params.output_path, mode + ".jsonl"), 'wt')
cnt = 0
max_tok = 128
with open(fname, 'rt') as f:
for line in f:
cnt += 1
line = line.rstrip()
item = json.loads(line)
mention = item["text"].lower()
src = item["corpus"]
label_doc_id = item["label_document_id"]
orig_doc_id = item["context_document_id"]
start = item["start_index"]
end = item["end_index"]
# add context around the mention as well
orig_id = entity_map[src][orig_doc_id]
text = entity_dict[src][orig_id]["text"].lower()
tokens = text.split(" ")
assert mention == ' '.join(tokens[start:end + 1])
tokenized_query = mention
mention_context_left = tokens[max(0, start - max_tok):start]
mention_context_right = tokens[end + 1:min(len(tokens), end + max_tok + 1)]
# entity info
k = entity_map[src][label_doc_id]
ent_title = entity_dict[src][k]['title']
ent_text = entity_dict[src][k]["text"]
example = {}
example["context_left"] = ' '.join(mention_context_left)
example['context_right'] = ' '.join(mention_context_right)
example["mention"] = mention
example["label"] = ent_text
example["label_id"] = k
example['label_title'] = ent_title
example['world'] = src
fout.write(json.dumps(example))
fout.write('\n')
fout.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Zero-shot Entity Linking Dataset')
parser.add_argument(
'--document_path',
default='data/zeshel/documents',
type=str,
)
parser.add_argument(
'--mention_path',
default='data/zeshel/mentions',
type=str,
)
parser.add_argument(
'--output_path',
default='data/zeshel/blink_format',
type=str,
)
params = parser.parse_args()
os.makedirs(params.output_path, exist_ok=True)
entity_dict, entity_map = load_entity_dict(params)
convert_data(params, entity_dict, entity_map, 'train')
convert_data(params, entity_dict, entity_map, 'valid')
convert_data(params, entity_dict, entity_map, 'test')
|
BLINK-main
|
examples/zeshel/create_BLINK_zeshel_data.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from elq.biencoder.biencoder import load_biencoder
import elq.candidate_ranking.utils as utils
import json
import sys
import os
from tqdm import tqdm
import argparse
def encode_candidate(
reranker,
candidate_pool,
encode_batch_size,
silent,
logger,
):
reranker.model.eval()
device = reranker.device
#for cand_pool in candidate_pool:
#logger.info("Encoding candidate pool %s" % src)
sampler = SequentialSampler(candidate_pool)
data_loader = DataLoader(
candidate_pool, sampler=sampler, batch_size=encode_batch_size
)
if silent:
iter_ = data_loader
else:
iter_ = tqdm(data_loader)
cand_encode_list = None
for step, batch in enumerate(iter_):
cands = batch
cands = cands.to(device)
cand_encode = reranker.encode_candidate(cands)
if cand_encode_list is None:
cand_encode_list = cand_encode
else:
cand_encode_list = torch.cat((cand_encode_list, cand_encode))
return cand_encode_list
def load_candidate_pool(
tokenizer,
params,
logger,
cand_pool_path,
):
candidate_pool = None
# try to load candidate pool from file
try:
logger.info("Loading pre-generated candidate pool from: ")
logger.info(cand_pool_path)
candidate_pool = torch.load(cand_pool_path)
except:
logger.info("Loading failed.")
assert candidate_pool is not None
return candidate_pool
parser = argparse.ArgumentParser()
parser.add_argument('--path_to_model_config', type=str, required=True, help='filepath to saved model config')
parser.add_argument('--path_to_model', type=str, required=True, help='filepath to saved model')
parser.add_argument('--entity_dict_path', type=str, required=True, help='filepath to entities to encode (.jsonl file)')
parser.add_argument('--saved_cand_ids', type=str, help='filepath to entities pre-parsed into IDs')
parser.add_argument('--encoding_save_file_dir', type=str, help='directory of file to save generated encodings', default=None)
parser.add_argument('--test', action='store_true', default=False, help='whether to just test encoding subsample of entities')
parser.add_argument('--compare_saved_embeds', type=str, help='compare against these saved embeddings')
parser.add_argument('--batch_size', type=int, default=512, help='batch size for encoding candidate vectors (default 512)')
parser.add_argument('--chunk_start', type=int, default=0, help='example idx to start encoding at (for parallelizing encoding process)')
parser.add_argument('--chunk_end', type=int, default=-1, help='example idx to stop encoding at (for parallelizing encoding process)')
args = parser.parse_args()
try:
with open(args.path_to_model_config) as json_file:
biencoder_params = json.load(json_file)
except json.decoder.JSONDecodeError:
with open(args.path_to_model_config) as json_file:
for line in json_file:
line = line.replace("'", "\"")
line = line.replace("True", "true")
line = line.replace("False", "false")
line = line.replace("None", "null")
biencoder_params = json.loads(line)
break
# model to use
biencoder_params["path_to_model"] = args.path_to_model
# entities to use
biencoder_params["entity_dict_path"] = args.entity_dict_path
biencoder_params["degug"] = False
biencoder_params["data_parallel"] = True
biencoder_params["no_cuda"] = False
biencoder_params["max_context_length"] = 32
biencoder_params["encode_batch_size"] = args.batch_size
saved_cand_ids = getattr(args, 'saved_cand_ids', None)
encoding_save_file_dir = args.encoding_save_file_dir
if encoding_save_file_dir is not None and not os.path.exists(encoding_save_file_dir):
os.makedirs(encoding_save_file_dir, exist_ok=True)
logger = utils.get_logger(biencoder_params.get("model_output_path", None))
biencoder = load_biencoder(biencoder_params)
baseline_candidate_encoding = None
if getattr(args, 'compare_saved_embeds', None) is not None:
baseline_candidate_encoding = torch.load(getattr(args, 'compare_saved_embeds'))
candidate_pool = load_candidate_pool(
biencoder.tokenizer,
biencoder_params,
logger,
getattr(args, 'saved_cand_ids', None),
)
if args.test:
candidate_pool = candidate_pool[:10]
# encode in chunks to parallelize
save_file = None
if getattr(args, 'encoding_save_file_dir', None) is not None:
save_file = os.path.join(
args.encoding_save_file_dir,
"{}_{}.t7".format(args.chunk_start, args.chunk_end),
)
print("Saving in: {}".format(save_file))
if save_file is not None:
f = open(save_file, "w").close() # mark as existing
candidate_encoding = encode_candidate(
biencoder,
candidate_pool[args.chunk_start:args.chunk_end],
biencoder_params["encode_batch_size"],
biencoder_params["silent"],
logger,
)
if save_file is not None:
torch.save(candidate_encoding, save_file)
print(candidate_encoding[0,:10])
if baseline_candidate_encoding is not None:
print(baseline_candidate_encoding[0,:10])
|
BLINK-main
|
scripts/generate_candidates.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import json
import os
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
BEGIN_ENT_TOKEN = "[START_ENT]"
END_ENT_TOKEN = "[END_ENT]"
url2id_cache = {}
def _read_url(url):
with urllib.request.urlopen(url) as response:
html = response.read()
soup = BeautifulSoup(html, features="html.parser")
title = soup.title.string.replace(" - Wikipedia", "").strip()
return title
def _get_pageid_from_api(title, client=None):
pageid = None
title_html = title.strip().replace(" ", "%20")
url = "https://en.wikipedia.org/w/api.php?action=query&titles={}&format=json".format(
title_html
)
try:
# Package the request, send the request and catch the response: r
r = requests.get(url)
# Decode the JSON data into a dictionary: json_data
json_data = r.json()
if len(json_data["query"]["pages"]) > 1:
print("WARNING: more than one result returned from wikipedia api")
for _, v in json_data["query"]["pages"].items():
pageid = v["pageid"]
except:
pass
return pageid
def extract_questions(filename):
# all the datapoints
global_questions = []
# left context so far in the document
left_context = []
# working datapoints for the document
document_questions = []
# is the entity open
open_entity = False
# question id in the document
question_i = 0
with open(filename) as fin:
lines = fin.readlines()
for line in tqdm(lines):
if "-DOCSTART-" in line:
# new document is starting
doc_id = line.split("(")[-1][:-2]
# END DOCUMENT
# check end of entity
if open_entity:
document_questions[-1]["input"].append(END_ENT_TOKEN)
open_entity = False
"""
#DEBUG
for q in document_questions:
pp.pprint(q)
input("...")
"""
# add sentence_questions to global_questions
global_questions.extend(document_questions)
# reset
left_context = []
document_questions = []
question_i = 0
else:
split = line.split("\t")
token = split[0].strip()
if len(split) >= 5:
B_I = split[1]
mention = split[2]
# Β YAGO2_entity = split[3]
Wikipedia_URL = split[4]
Wikipedia_ID = split[5]
# Freee_base_id = split[6]
if B_I == "I":
pass
elif B_I == "B":
title = Wikipedia_URL.split("/")[-1].replace("_", " ")
if Wikipedia_ID == "000":
if Wikipedia_URL in url2id_cache:
pageid = url2id_cache[Wikipedia_URL]
else:
pageid = _get_pageid_from_api(title)
url2id_cache[Wikipedia_URL] = pageid
Wikipedia_ID = pageid
q = {
"id": "{}:{}".format(doc_id, question_i),
"input": left_context.copy() + [BEGIN_ENT_TOKEN],
"mention": mention,
"Wikipedia_title": title,
"Wikipedia_URL": Wikipedia_URL,
"Wikipedia_ID": Wikipedia_ID,
"left_context": left_context.copy(),
"right_context": [],
}
document_questions.append(q)
open_entity = True
question_i += 1
else:
print("Invalid B_I {}", format(B_I))
sys.exit(-1)
# print(token,B_I,mention,Wikipedia_URL,Wikipedia_ID)
else:
if open_entity:
document_questions[-1]["input"].append(END_ENT_TOKEN)
open_entity = False
left_context.append(token)
for q in document_questions:
q["input"].append(token)
for q in document_questions[:-1]:
q["right_context"].append(token)
if len(document_questions) > 0 and not open_entity:
document_questions[-1]["right_context"].append(token)
# FINAL SENTENCE
if open_entity:
document_questions[-1]["input"].append(END_ENT_TOKEN)
open_entity = False
# add sentence_questions to global_questions
global_questions.extend(document_questions)
return global_questions
# store on file
def store_questions(questions, OUT_FILENAME):
if not os.path.exists(os.path.dirname(OUT_FILENAME)):
try:
os.makedirs(os.path.dirname(OUT_FILENAME))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open(OUT_FILENAME, "w+") as fout:
for q in questions:
json.dump(q, fout)
fout.write("\n")
def convert_to_BLINK_format(questions):
data = []
for q in questions:
datapoint = {
"context_left": " ".join(q["left_context"]).strip(),
"mention": q["mention"],
"context_right": " ".join(q["right_context"]).strip(),
"query_id": q["id"],
"label_id": q["Wikipedia_ID"],
"Wikipedia_ID": q["Wikipedia_ID"],
"Wikipedia_URL": q["Wikipedia_URL"],
"Wikipedia_title": q["Wikipedia_title"],
}
data.append(datapoint)
return data
# AIDA-YAGO2
print("AIDA-YAGO2")
in_aida_filename = (
"data/train_and_benchmark_data/basic_data/test_datasets/AIDA/AIDA-YAGO2-dataset.tsv"
)
aida_questions = extract_questions(in_aida_filename)
train = []
testa = []
testb = []
for element in aida_questions:
if "testa" in element["id"]:
testa.append(element)
elif "testb" in element["id"]:
testb.append(element)
else:
train.append(element)
print("train: {}".format(len(train)))
print("testa: {}".format(len(testa)))
print("testb: {}".format(len(testb)))
train_blink = convert_to_BLINK_format(train)
testa_blink = convert_to_BLINK_format(testa)
testb_blink = convert_to_BLINK_format(testb)
out_train_aida_filename = "data/BLINK_benchmark/AIDA-YAGO2_train.jsonl"
store_questions(train_blink, out_train_aida_filename)
out_testa_aida_filename = "data/BLINK_benchmark/AIDA-YAGO2_testa.jsonl"
store_questions(testa_blink, out_testa_aida_filename)
out_testb_aida_filename = "data/BLINK_benchmark/AIDA-YAGO2_testb.jsonl"
store_questions(testb_blink, out_testb_aida_filename)
# ACE 2004
print("ACE 2004")
in_ace_filename = "data/train_and_benchmark_data/basic_data/test_datasets/wned-datasets/ace2004/ace2004.conll"
ace_questions = convert_to_BLINK_format(extract_questions(in_ace_filename))
out_ace_filename = "data/BLINK_benchmark/ace2004_questions.jsonl"
store_questions(ace_questions, out_ace_filename)
print(len(ace_questions))
# aquaint
print("aquaint")
in_aquaint_filename = "data/train_and_benchmark_data/basic_data/test_datasets/wned-datasets/aquaint/aquaint.conll"
aquaint_questions = convert_to_BLINK_format(extract_questions(in_aquaint_filename))
out_aquaint_filename = "data/BLINK_benchmark/aquaint_questions.jsonl"
store_questions(aquaint_questions, out_aquaint_filename)
print(len(aquaint_questions))
# Β clueweb - WNED-CWEB (CWEB)
print("clueweb - WNED-CWEB (CWEB)")
in_clueweb_filename = "data/train_and_benchmark_data/basic_data/test_datasets/wned-datasets/clueweb/clueweb.conll"
clueweb_questions = convert_to_BLINK_format(extract_questions(in_clueweb_filename))
out_clueweb_filename = "data/BLINK_benchmark/clueweb_questions.jsonl"
store_questions(clueweb_questions, out_clueweb_filename)
print(len(clueweb_questions))
# msnbc
print("msnbc")
in_msnbc_filename = "data/train_and_benchmark_data/basic_data/test_datasets/wned-datasets/msnbc/msnbc.conll"
msnbc_questions = convert_to_BLINK_format(extract_questions(in_msnbc_filename))
out_msnbc_filename = "data/BLINK_benchmark/msnbc_questions.jsonl"
store_questions(msnbc_questions, out_msnbc_filename)
print(len(msnbc_questions))
# wikipedia - WNED-WIKI (WIKI)
print("wikipedia - WNED-WIKI (WIKI)")
in_wnedwiki_filename = "data/train_and_benchmark_data/basic_data/test_datasets/wned-datasets/wikipedia/wikipedia.conll"
wnedwiki_questions = convert_to_BLINK_format(extract_questions(in_wnedwiki_filename))
out_wnedwiki_filename = "data/BLINK_benchmark/wnedwiki_questions.jsonl"
store_questions(wnedwiki_questions, out_wnedwiki_filename)
print(len(wnedwiki_questions))
|
BLINK-main
|
scripts/create_BLINK_benchmark_data.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import json
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--path_to_saved_chunks', type=str, required=True, help='filepath to directory containing saved chunks')
parser.add_argument('--chunk_size', type=int, default=1000000, help='size of each chunk')
args = parser.parse_args()
CHUNK_SIZES = args.chunk_size
all_chunks = []
for fn in range(0, 5903526, CHUNK_SIZES):
f_chunk = os.path.join(
args.path_to_saved_chunks, '{}_{}.t7'.format(fn, fn+CHUNK_SIZES),
)
if not os.path.exists(f_chunk) or os.path.getsize(f_chunk) == 0:
continue
loaded_chunk = torch.load(f_chunk)
all_chunks.append(loaded_chunk[:CHUNK_SIZES])
all_chunks = torch.cat(all_chunks, dim=0)
torch.save(all_chunks, os.path.join(
args.path_to_saved_chunks, 'all.t7'.format(fn, fn+CHUNK_SIZES),
))
|
BLINK-main
|
scripts/merge_candidates.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import json
import os
import numpy as np
import torch
from elq.vcg_utils.measures import entity_linking_tp_with_overlap
from tqdm import tqdm
from transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
id2title = json.load(open("models/id2title.json"))
def load_dists(all_save_dir, data, split, model, joint_threshold):
save_dir = "{}/{}_{}_{}_joint{}_top50cands_final_joint".format(all_save_dir, data, split, model, joint_threshold)
if not os.path.exists(save_dir):
save_dir += "_0"
with open(os.path.join(save_dir, "biencoder_outs.jsonl")) as f:
examples = f.readlines()
examples = [json.loads(line) for line in examples]
biencoder_indices = np.load(os.path.join(save_dir, "biencoder_nns.npy"), allow_pickle=True) # corresponds to biencoder_dists
biencoder_dists = np.load(os.path.join(save_dir, "biencoder_dists.npy"), allow_pickle=True)
if os.path.exists(os.path.join(save_dir, "biencoder_cand_scores.npy")):
cand_dists = np.load(os.path.join(save_dir, "biencoder_cand_scores.npy"), allow_pickle=True)
else:
cand_dists = np.load(os.path.join(save_dir, "biencoder_cand_dists.npy"), allow_pickle=True)
pred_mention_bounds = np.load(os.path.join(save_dir, "biencoder_mention_bounds.npy"), allow_pickle=True)
if os.path.exists(os.path.join(save_dir, "biencoder_mention_scores.npy")):
mention_dists = np.load(os.path.join(save_dir, "biencoder_mention_scores.npy"), allow_pickle=True)
else:
mention_dists = [biencoder_dists[i] - torch.log_softmax(torch.tensor(cand_dists[i]), 1).numpy() for i in range(len(biencoder_dists))]
# inverse sigmoid
mention_dists = [np.log(md / (1 - md)) for md in mention_dists]
return examples, biencoder_indices, biencoder_dists, cand_dists, pred_mention_bounds, mention_dists
def filter_repeats(pred_triples, pred_scores):
# sort pred_triples and pred_scores by pred_scores
score_sort_ids = sorted(enumerate(pred_scores), key=lambda x: x[1], reverse=True)
pred_triples = [pred_triples[si[0]] for si in score_sort_ids]
pred_scores = [score_sort_id[1] for score_sort_id in score_sort_ids]
all_pred_entities = {}
all_pred_entities_pruned = []
all_pred_scores_pruned = []
for idx, ent in enumerate(pred_triples):
if ent[0] in all_pred_entities:
continue
all_pred_entities_pruned.append(ent)
all_pred_scores_pruned.append(pred_scores[idx])
all_pred_entities[ent[0]] = 0
return all_pred_entities_pruned, all_pred_scores_pruned
def filter_overlaps(tokens, pred_triples, pred_scores):
all_pred_entities_pruned = []
all_pred_scores_pruned = []
mention_masked_utterance = np.zeros(len(tokens))
# ensure well-formed-ness, prune overlaps
# greedily pick highest scoring, then prune all overlapping
for idx, mb in enumerate(pred_triples):
if sum(mention_masked_utterance[mb[1]:mb[2]]) > 0:
continue
all_pred_entities_pruned.append(mb)
all_pred_scores_pruned.append(pred_scores[idx])
mention_masked_utterance[mb[1]:mb[2]] = 1
return all_pred_entities_pruned, all_pred_scores_pruned
def filter_repeat_overlaps(tokens, pred_triples, pred_scores):
all_pred_entities_pruned = []
all_pred_scores_pruned = []
mention_masked_utterance = {triple[0]: np.zeros(len(tokens)) for triple in pred_triples}
# ensure well-formed-ness, prune overlaps
# greedily pick highest scoring, then prune all overlapping
for idx, mb in enumerate(pred_triples):
if sum(mention_masked_utterance[mb[0]][mb[1]:mb[2]]) > 0:
continue
all_pred_entities_pruned.append(mb)
all_pred_scores_pruned.append(pred_scores[idx])
mention_masked_utterance[mb[0]][mb[1]:mb[2]] = 1
return all_pred_entities_pruned, all_pred_scores_pruned
# threshold and sort by score
def get_threshold_mask_and_sort(mention_dists, cand_dists, biencoder_dists, valid_cands_mask, threshold, top_mention_sort=True):
"""
top_mention_sort:
True: sort top candidates per mention only
scores_mask and sorted_idxs has dim (#_valid_examples,)
False: sort ALL candidates (assumes multiple candidates per mention)
scores_mask and sorted_idxs has dim (#_valid_examples, #_cands)
"""
mention_scores = mention_dists[valid_cands_mask]
if len(mention_scores.shape) > 1:
mention_scores = mention_scores[:,0]
scores = torch.log_softmax(torch.tensor(cand_dists[valid_cands_mask]), 1) + torch.sigmoid(torch.tensor(mention_scores)).log().unsqueeze(-1)
if top_mention_sort:
scores_mask = (scores[:,0] > threshold)
# sort...
_, sorted_idxs = scores[:,0][scores_mask].sort(descending=True)
sorted_filtered_scores = scores[scores_mask][sorted_idxs]
else:
scores_mask = (scores > threshold) # GRAPHQUESTIONS BEST
sorted_filtered_scores, sorted_idxs = scores[scores_mask].sort(descending=True)
return scores_mask.numpy(), sorted_idxs.numpy(), sorted_filtered_scores.numpy()
all_save_dir = "saved_preds"
model_type = "finetuned_webqsp" # wiki
if model_type == "wiki":
model = '{0}_all_ents;all_mention_biencoder_all_avg_true_128_true_true_bert_large_qa_linear;15'.format(model_type)
elif model_type == "finetuned_webqsp":
model= '{0}_all_ents;all_mention_biencoder_all_avg_true_128_true_true_bert_large_qa_linear;18'.format(model_type)
get_topk_cands = True
topk = 100
if get_topk_cands:
threshold=-float("inf")
else:
threshold=-5
for data in ["nq", "WebQuestions", "triviaqa"]:
if data == "nq":
splits = ["train0", "train1", "train2", "dev", "test"]
else:
splits = ["train", "dev", "test"]
for split in splits:
(
examples, biencoder_indices, biencoder_dists,
cand_dists, pred_mention_bounds, mention_dists
) = load_dists(all_save_dir, data, split, model, "0.0" if model_type == "wiki" else "-inf")
new_examples = []
num_correct=0
num_predicted=0
num_gold=0
for i, example in enumerate(tqdm(examples)):
# select valid candidates
valid_cands_mask = (biencoder_dists[i][:,0] != -1) & (biencoder_dists[i][:,0] == biencoder_dists[i][:,0])
# get scores and masking/sorting by score
scores_mask, sorted_idxs, sorted_filtered_scores = get_threshold_mask_and_sort(
mention_dists[i], cand_dists[i], biencoder_dists[i], valid_cands_mask, threshold, top_mention_sort=(not get_topk_cands)
)
if get_topk_cands:
# (filtered_examples, #cands, 2)
ex_pred_mention_bounds = np.repeat(np.expand_dims(pred_mention_bounds[i], axis=1), biencoder_indices[i].shape[1], axis=1)
# (filtered_examples, #cands,)
ex_mention_dists = np.repeat(np.expand_dims(mention_dists[i], axis=1), biencoder_indices[i].shape[1], axis=1)
ex_biencoder_indices = biencoder_indices[i]
ex_cand_dists = cand_dists[i]
else:
ex_pred_mention_bounds = pred_mention_bounds[i]
ex_mention_dists = mention_dists[i]
ex_biencoder_indices = biencoder_indices[i] #[:,0]
ex_cand_dists = cand_dists[i] #[:,0]
# output threshold_entities_translate, pred_triples, pred_scores
threshold_entities = ex_biencoder_indices[valid_cands_mask][scores_mask][sorted_idxs] # (filtered_exs, #cands) / (filtered_cands,)
threshold_mention_bounds = ex_pred_mention_bounds[valid_cands_mask][scores_mask][sorted_idxs] # (filtered_exs, 2) / (filtered_cands, 2)
threshold_cand_scores = ex_cand_dists[valid_cands_mask][scores_mask][sorted_idxs] # (filtered_exs, #cands) / (filtered_cands,)
threshold_mention_scores = ex_mention_dists[valid_cands_mask][scores_mask][sorted_idxs] # (filtered_exs,) / (filtered_cands,)
threshold_scores = sorted_filtered_scores # (filtered_exs, #cands) / (filtered_cands,)
threshold_entities_translate = {}
pred_triples = []
pred_scores = []
example['tokens'] = [101] + example['tokens'] + [102]
for m in range(len(threshold_scores)):
mb = threshold_mention_bounds[m].tolist()
mention_text = tokenizer.decode(example['tokens'][mb[0]:mb[1]+1])
threshold_entities_translate[mention_text] = {
"mention_idx": m, "mention_score": float(threshold_mention_scores[m])
}
if len(threshold_entities[m].shape) > 0:
pred_triples.append([str(threshold_entities[m][0]), mb[0], mb[1]+1])
pred_scores.append(float(threshold_scores[m][0]))
threshold_entities_translate[mention_text]["candidate_entities"] = []
threshold_entities_translate[mention_text]["cand_scores"] = threshold_cand_scores[m].tolist()
for id in threshold_entities[m]:
threshold_entities_translate[mention_text]["candidate_entities"].append(id2title[str(id)])
else:
pred_triples.append([str(threshold_entities[m]), mb[0], mb[1]+1])
pred_scores.append(float(threshold_scores[m]))
threshold_entities_translate[mention_text]["candidate_entities"] = id2title[str(threshold_entities[m])]
threshold_entities_translate[mention_text]["cand_scores"] = float(threshold_cand_scores[m])
new_ex = {
"id": example["id"],
"text": example["text"],
"tokens": example["tokens"],
}
if "gold_triples" in example:
all_pred_entities_pruned = pred_triples
all_pred_scores_pruned = pred_scores
if get_topk_cands:
all_pred_entities_pruned, all_pred_scores_pruned = filter_repeats(pred_triples, pred_scores)
all_pred_entities_pruned = all_pred_entities_pruned[:topk]
all_pred_scores_pruned = all_pred_scores_pruned[:topk]
else:
all_pred_entities_pruned, all_pred_scores_pruned = filter_overlaps(example["tokens"], pred_triples, pred_scores)
else:
all_pred_entities_pruned = pred_triples
all_pred_scores_pruned = pred_scores
if get_topk_cands:
all_pred_entities_pruned, all_pred_scores_pruned = filter_repeats(pred_triples, pred_scores)
all_pred_entities_pruned = all_pred_entities_pruned[:topk]
all_pred_scores_pruned = all_pred_scores_pruned[:topk]
else:
all_pred_entities_pruned, all_pred_scores_pruned = filter_overlaps(example["tokens"], pred_triples, pred_scores)
new_ex['pred_mentions'] = threshold_entities_translate
new_ex['pred_triples'] = [[triple[0], triple[1]-1, triple[2]-1] for triple in all_pred_entities_pruned]
new_ex['pred_triples_score'] = all_pred_scores_pruned
new_ex['pred_triples_string'] = [
[id2title[triple[0]], tokenizer.decode(example['tokens'][triple[1]:triple[2]])]
for triple in all_pred_entities_pruned
]
# get scores
if "gold_triples" in example:
gold_triples = example["gold_triples"]
new_ex["gold_triples"] = gold_triples
num_overlap_weak, num_overlap_strong = entity_linking_tp_with_overlap(gold_triples, new_ex['pred_triples'])
num_correct += num_overlap_weak
num_predicted += len(all_pred_entities_pruned)
num_gold += len(gold_triples)
new_examples.append(new_ex)
# compute metrics
if num_predicted > 0 and num_gold > 0:
p = num_correct / num_predicted
r = num_correct / num_gold
f1 = 2*p*r / (p+r)
print(f1)
f1s.append(f1)
if get_topk_cands:
print("Saving {} {} {}".format(data, split, str(topk)))
save_file = "{}_{}_top{}.jsonl".format(split, model_type, str(topk))
else:
print("Saving {} {} {}".format(data, split, str(threshold)))
save_file = "{}_{}_{}.jsonl".format(split, model_type, str(threshold))
# save
with open(os.path.join("/checkpoint/belindali/entity_link/data/{}/saved_preds".format(data), save_file), 'w') as wf:
for new_ex in new_examples:
b=wf.write(json.dumps(new_ex) + "\n")
|
BLINK-main
|
scripts/tune_hyperparams_new.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import builtins
import math
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from timm.data.auto_augment import rand_augment_transform
import moco.loader
import moco.builder
model_names = sorted(
name
for name in models.__dict__
if name.islower()
and not name.startswith("__")
and callable(models.__dict__[name])
)
parser = argparse.ArgumentParser(description="PyTorch ImageNet Training")
parser.add_argument("data", metavar="DIR", help="path to dataset")
parser.add_argument(
"-a",
"--arch",
metavar="ARCH",
default="resnet50",
choices=model_names,
help="model architecture: "
+ " | ".join(model_names)
+ " (default: resnet50)",
)
parser.add_argument(
"-j",
"--workers",
default=32,
type=int,
metavar="N",
help="number of data loading workers (default: 32)",
)
parser.add_argument(
"--epochs",
default=100,
type=int,
metavar="N",
help="number of total epochs to run",
)
parser.add_argument(
"--start-epoch",
default=0,
type=int,
metavar="N",
help="manual epoch number (useful on restarts)",
)
parser.add_argument(
"-b",
"--batch-size",
default=256,
type=int,
metavar="N",
help="mini-batch size (default: 256), this is the total "
"batch size of all GPUs on the current node when "
"using Data Parallel or Distributed Data Parallel",
)
parser.add_argument(
"--lr",
"--learning-rate",
default=0.03,
type=float,
metavar="LR",
help="initial learning rate",
dest="lr",
)
parser.add_argument(
"--momentum",
default=0.9,
type=float,
metavar="M",
help="momentum of SGD solver",
)
parser.add_argument(
"--wd",
"--weight-decay",
default=1e-4,
type=float,
metavar="W",
help="weight decay (default: 1e-4)",
dest="weight_decay",
)
parser.add_argument(
"-p",
"--print-freq",
default=10,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument(
"--resume",
default="",
type=str,
metavar="PATH",
help="path to latest checkpoint (default: none)",
)
parser.add_argument(
"--world-size",
default=-1,
type=int,
help="number of nodes for distributed training",
)
parser.add_argument(
"--rank", default=-1, type=int, help="node rank for distributed training"
)
parser.add_argument(
"--dist-url",
default="tcp://224.66.41.62:23456",
type=str,
help="url used to set up distributed training",
)
parser.add_argument(
"--dist-backend", default="nccl", type=str, help="distributed backend"
)
parser.add_argument(
"--seed", default=None, type=int, help="seed for initializing training. "
)
parser.add_argument("--gpu", default=None, type=int, help="GPU id to use.")
parser.add_argument(
"--multiprocessing-distributed",
action="store_true",
help="Use multi-processing distributed training to launch "
"N processes per node, which has N GPUs. This is the "
"fastest way to use PyTorch for either single node or "
"multi node data parallel training",
)
# moco specific configs:
parser.add_argument(
"--moco-dim", default=128, type=int, help="feature dimension (default: 128)"
)
parser.add_argument(
"--moco-k",
default=65536,
type=int,
help="queue size; number of negative keys (default: 65536)",
)
parser.add_argument(
"--moco-m",
default=0.999,
type=float,
help="moco momentum of updating key encoder (default: 0.999)",
)
parser.add_argument(
"--moco-t",
default=0.2,
type=float,
help="softmax temperature (default: 0.2)",
)
# options for Asymmetry Siamese Representation Learning.
parser.add_argument(
"--enable-scalemix",
default=False,
action="store_true",
help="enable ScaleMix to generate new views of an image by mixing two "
"views of potentially different scales together via binary masking",
)
parser.add_argument(
"--enable-multicrop",
default=False,
action="store_true",
help="enable MultiCrop to take additional views (commonly in lower "
"resolution) from each image per iteration",
)
parser.add_argument(
"--enable-asymm-aug",
default=False,
action="store_true",
help="enable Asymmetrical Augmentation to form an asymmetric augmentation "
"recipes for source and target",
)
parser.add_argument(
"--enable-asym-bn",
default=False,
action="store_true",
help="enable Asymmetrical BN to employ SyncBN to normalizes batch stats "
"over all devices for target decoder",
)
parser.add_argument(
"--enable-mean-encoding",
default=False,
action="store_true",
help="enable Mean Encoding to perform i.i.d. sampling multiple times and "
"take the mean as target encoder output",
)
parser.add_argument(
"--tag",
default="",
type=str,
help="job tag for checkpoint name."
)
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn(
"You have chosen to seed training. "
"This will turn on the CUDNN deterministic setting, "
"which can slow down your training considerably! "
"You may see unexpected behavior when restarting "
"from checkpoints."
)
if args.gpu is not None:
warnings.warn(
"You have chosen a specific GPU. This will completely "
"disable data parallelism."
)
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(
main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args)
)
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
# suppress printing if not master
if args.multiprocessing_distributed and args.gpu != 0:
def print_pass(*args):
pass
builtins.print = print_pass
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank,
)
# create model
print("=> creating model '{}'".format(args.arch))
model = moco.builder.MoCo(
models.__dict__[args.arch],
args.moco_dim,
args.moco_k,
args.moco_m,
args.moco_t,
args.enable_asym_bn,
)
print(model)
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int(
(args.workers + ngpus_per_node - 1) / ngpus_per_node
)
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.gpu]
)
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to
# all available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
# comment out the following line for debugging
raise NotImplementedError("Only DistributedDataParallel is supported.")
else:
# AllGather implementation (batch shuffle, queue update, etc.) in
# this code only supports DistributedDataParallel.
raise NotImplementedError("Only DistributedDataParallel is supported.")
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(
model.parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = "cuda:{}".format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint["epoch"]
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
print(
"=> loaded checkpoint '{}' (epoch {})".format(
args.resume, checkpoint["epoch"]
)
)
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, "train")
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
# MoCo v2's aug: similar to SimCLR https://arxiv.org/abs/2002.05709
if args.enable_multicrop:
ratio_range=(0.14,1.0)
else:
ratio_range=(0.2,1.0)
augmentation = [
transforms.RandomResizedCrop(224, scale=ratio_range),
transforms.RandomApply(
[transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], # not strengthened
p=0.8,
),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([moco.loader.GaussianBlur([0.1, 2.0])], p=0.5),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
"""
# --------------------------------------------------------------------------- #
# Asymmetric Augmentations #
# --------------------------------------------------------------------------- #
asymmetric augmentation recipes are formed by stronger and weaker augmentation
in source and target. Stronger augmentation introduces a higher variance, that
hurts target but helps source, and vice versa for weaker augmentation.
# --------------------------------------------------------------------------- #
"""
augmentation_stronger = [
transforms.RandomResizedCrop(224, scale=ratio_range),
transforms.RandomApply(
[transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], # not strengthened
p=0.8,
),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([moco.loader.GaussianBlur([0.1, 2.0])], p=0.5),
transforms.RandomHorizontalFlip(),
rand_augment_transform(
"rand-m10-n2-mstd0.5", {"translate_const": 100},
),
transforms.ToTensor(),
normalize,
]
augmentation_weaker = [
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
"""
# --------------------------------------------------------------------------- #
# MultiCrop #
# --------------------------------------------------------------------------- #
Besides the two basic views needed for Siamese learning, MultiCrop takes
additional views from each image per iteration. To alleviate the added
computation cost, a common strategy is to have low-resolution crops
(e.g., 96Γ96) instead of standard-resolution crops (224Γ224) as added views.
As a side effect, inputting small crops can potentially increase the variance
for an encoder due to the size and crop-distribution changes.
# --------------------------------------------------------------------------- #
"""
augmentation_mini = [
transforms.RandomResizedCrop(96, scale=(0.05, 0.14)),
transforms.RandomApply(
[transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], # not strengthened
p=0.8,
),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([moco.loader.GaussianBlur([0.1, 2.0])], p=0.5),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
train_dataset = datasets.ImageFolder(
traindir,
moco.loader.CropsTransform(
key_transform=transforms.Compose(augmentation_weaker)
if args.enable_asymm_aug
else transforms.Compose(augmentation),
query_mini_transform=transforms.Compose(augmentation_mini),
query_transform=transforms.Compose(augmentation_stronger)
if args.enable_asymm_aug
else transforms.Compose(augmentation),
enable_scalemix=args.enable_scalemix,
enable_multicrop=args.enable_multicrop,
enable_mean_encoding=args.enable_mean_encoding,
),
)
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset
)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=(train_sampler is None),
num_workers=args.workers,
pin_memory=True,
sampler=train_sampler,
drop_last=True,
)
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
if not args.multiprocessing_distributed or (
args.multiprocessing_distributed and args.rank % ngpus_per_node == 0
):
save_checkpoint(
{
"epoch": epoch + 1,
"arch": args.arch,
"state_dict": model.state_dict(),
"optimizer": optimizer.state_dict(),
},
is_best=False,
filename="checkpoint_{}_{:04d}.pth.tar".format(args.tag, epoch),
)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter("Time", ":6.3f")
data_time = AverageMeter("Data", ":6.3f")
losses = AverageMeter("Loss", ":.4e")
top1 = AverageMeter("Acc@1", ":6.2f")
top5 = AverageMeter("Acc@5", ":6.2f")
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch),
)
# switch to train mode
model.train()
end = time.time()
for i, (images, _) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
for j in range(len(images)):
images[j] = images[j].cuda(args.gpu, non_blocking=True)
if args.enable_mean_encoding:
q_mini_ind, k_ind = 1, -2
else:
q_mini_ind, k_ind = 1, -1
# compute outputs
outputs, targets = model(
im_q=images[:q_mini_ind],
im_q_mini=images[q_mini_ind:k_ind],
im_k=images[k_ind:],
)
loss = criterion(outputs[0], targets[0])
# Loss for mini multi-crops
if args.enable_multicrop:
loss += sum(
map(
lambda crop: criterion(crop[0], crop[1]),
zip(outputs[q_mini_ind:], targets[q_mini_ind:]),
)
) / len(outputs[q_mini_ind:])
# acc1/acc5 are (K+1)-way contrast classifier accuracy
# measure accuracy and record loss
acc1, acc5 = accuracy(outputs[0], targets[0], topk=(1, 5))
losses.update(loss.item(), images[0].size(0))
top1.update(acc1[0], images[0].size(0))
top5.update(acc5[0], images[0].size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
def save_checkpoint(state, is_best, filename="checkpoint.pth.tar"):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, "model_best.pth.tar")
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=":f"):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print("\t".join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = "{:" + str(num_digits) + "d}"
return "[" + fmt + "/" + fmt.format(num_batches) + "]"
def adjust_learning_rate(optimizer, epoch, args):
"""Decay the learning rate based on schedule"""
lr = args.lr
lr *= 0.5 * (1.0 + math.cos(math.pi * epoch / args.epochs))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def accuracy(output, target, topk=(1,)):
"""
Computes the accuracy over the k top predictions for the specified values
of k
"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = (
correct[:k].reshape(-1).float().sum(0, keepdim=True)
)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == "__main__":
main()
|
asym-siam-main
|
main_moco.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import builtins
import math
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
model_names = sorted(
name
for name in models.__dict__
if name.islower() and not name.startswith("__") and callable(models.__dict__[name])
)
parser = argparse.ArgumentParser(description="PyTorch ImageNet Training")
parser.add_argument("data", metavar="DIR", help="path to dataset")
parser.add_argument(
"-a",
"--arch",
metavar="ARCH",
default="resnet50",
choices=model_names,
help="model architecture: " + " | ".join(model_names) + " (default: resnet50)",
)
parser.add_argument(
"-j",
"--workers",
default=32,
type=int,
metavar="N",
help="number of data loading workers (default: 32)",
)
parser.add_argument(
"--epochs", default=90, type=int, metavar="N", help="number of total epochs to run"
)
parser.add_argument(
"--start-epoch",
default=0,
type=int,
metavar="N",
help="manual epoch number (useful on restarts)",
)
parser.add_argument(
"-b",
"--batch-size",
default=4096,
type=int,
metavar="N",
help="mini-batch size (default: 4096), this is the total "
"batch size of all GPUs on the current node when "
"using Data Parallel or Distributed Data Parallel",
)
parser.add_argument(
"--lr",
"--learning-rate",
default=0.1,
type=float,
metavar="LR",
help="initial (base) learning rate",
dest="lr",
)
parser.add_argument("--momentum", default=0.9, type=float, metavar="M", help="momentum")
parser.add_argument(
"--wd",
"--weight-decay",
default=0.0,
type=float,
metavar="W",
help="weight decay (default: 0.)",
dest="weight_decay",
)
parser.add_argument(
"-p",
"--print-freq",
default=10,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument(
"--resume",
default="",
type=str,
metavar="PATH",
help="path to latest checkpoint (default: none)",
)
parser.add_argument(
"-e",
"--evaluate",
dest="evaluate",
action="store_true",
help="evaluate model on validation set",
)
parser.add_argument(
"--world-size",
default=-1,
type=int,
help="number of nodes for distributed training",
)
parser.add_argument(
"--rank", default=-1, type=int, help="node rank for distributed training"
)
parser.add_argument(
"--dist-url",
default="tcp://224.66.41.62:23456",
type=str,
help="url used to set up distributed training",
)
parser.add_argument(
"--dist-backend", default="nccl", type=str, help="distributed backend"
)
parser.add_argument(
"--seed", default=None, type=int, help="seed for initializing training. "
)
parser.add_argument("--gpu", default=None, type=int, help="GPU id to use.")
parser.add_argument(
"--multiprocessing-distributed",
action="store_true",
help="Use multi-processing distributed training to launch "
"N processes per node, which has N GPUs. This is the "
"fastest way to use PyTorch for either single node or "
"multi node data parallel training",
)
# additional configs:
parser.add_argument(
"--pretrained", default="", type=str, help="path to simsiam pretrained checkpoint"
)
parser.add_argument("--lars", action="store_true", help="Use LARS")
best_acc1 = 0
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn(
"You have chosen to seed training. "
"This will turn on the CUDNN deterministic setting, "
"which can slow down your training considerably! "
"You may see unexpected behavior when restarting "
"from checkpoints."
)
if args.gpu is not None:
warnings.warn(
"You have chosen a specific GPU. This will completely "
"disable data parallelism."
)
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
# suppress printing if not master
if args.multiprocessing_distributed and args.gpu != 0:
def print_pass(*args):
pass
builtins.print = print_pass
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank,
)
torch.distributed.barrier()
# create model
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
# freeze all layers but the last fc
for name, param in model.named_parameters():
if name not in ["fc.weight", "fc.bias"]:
param.requires_grad = False
# init the fc layer
model.fc.weight.data.normal_(mean=0.0, std=0.01)
model.fc.bias.data.zero_()
# load from pre-trained, before DistributedDataParallel constructor
if args.pretrained:
if os.path.isfile(args.pretrained):
print("=> loading checkpoint '{}'".format(args.pretrained))
checkpoint = torch.load(args.pretrained, map_location="cpu")
# rename moco pre-trained keys
state_dict = checkpoint["state_dict"]
for k in list(state_dict.keys()):
# retain only encoder_q up to before the embedding layer
if k.startswith("module.encoder_q") and not k.startswith(
"module.encoder_q.fc"
):
# remove prefix
state_dict[k[len("module.encoder_q.") :]] = state_dict[k]
# delete renamed or unused k
del state_dict[k]
args.start_epoch = 0
msg = model.load_state_dict(state_dict, strict=False)
assert set(msg.missing_keys) == {"fc.weight", "fc.bias"}
print("=> loaded pre-trained model '{}'".format(args.pretrained))
else:
print("=> no checkpoint found at '{}'".format(args.pretrained))
# infer learning rate before changing batch size
init_lr = args.lr * args.batch_size / 256
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.gpu]
)
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith("alexnet") or args.arch.startswith("vgg"):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
# optimize only the linear classifier
parameters = list(filter(lambda p: p.requires_grad, model.parameters()))
assert len(parameters) == 2 # fc.weight, fc.bias
optimizer = torch.optim.SGD(
parameters, init_lr, momentum=args.momentum, weight_decay=args.weight_decay
)
if args.lars:
print("=> use LARS optimizer.")
from apex.parallel.LARC import LARC
optimizer = LARC(optimizer=optimizer, trust_coefficient=0.001, clip=False)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = "cuda:{}".format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint["epoch"]
best_acc1 = checkpoint["best_acc1"]
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
print(
"=> loaded checkpoint '{}' (epoch {})".format(
args.resume, checkpoint["epoch"]
)
)
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, "train")
valdir = os.path.join(args.data, "val")
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose(
[
transforms.RandomResizedCrop(224, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
),
)
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=(train_sampler is None),
num_workers=args.workers,
pin_memory=True,
sampler=train_sampler,
drop_last=True,
)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(
valdir,
transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]
),
),
batch_size=256,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, init_lr, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (
args.multiprocessing_distributed and args.rank % ngpus_per_node == 0
):
save_checkpoint(
{
"epoch": epoch + 1,
"arch": args.arch,
"state_dict": model.state_dict(),
"best_acc1": best_acc1,
"optimizer": optimizer.state_dict(),
},
is_best,
)
if epoch == args.start_epoch:
sanity_check(model.state_dict(), args.pretrained)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter("Time", ":6.3f")
data_time = AverageMeter("Data", ":6.3f")
losses = AverageMeter("Loss", ":.4e")
top1 = AverageMeter("Acc@1", ":6.2f")
top5 = AverageMeter("Acc@5", ":6.2f")
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch),
)
"""
Switch to eval mode:
Under the protocol of linear classification on frozen features/models,
it is not legitimate to change any part of the pre-trained model.
BatchNorm in train mode may revise running mean/std (even if it receives
no gradient), which are part of the model parameters too.
"""
model.eval()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter("Time", ":6.3f")
losses = AverageMeter("Loss", ":.4e")
top1 = AverageMeter("Acc@1", ":6.2f")
top5 = AverageMeter("Acc@5", ":6.2f")
progress = ProgressMeter(
len(val_loader), [batch_time, losses, top1, top5], prefix="Test: "
)
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(
" * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}".format(top1=top1, top5=top5)
)
return top1.avg
def save_checkpoint(state, is_best, filename="checkpoint.pth.tar"):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, "model_best.pth.tar")
def sanity_check(state_dict, pretrained_weights):
"""
Linear classifier should not change any weights other than the linear layer.
This sanity check asserts nothing wrong happens (e.g., BN stats updated).
"""
print("=> loading '{}' for sanity check".format(pretrained_weights))
checkpoint = torch.load(pretrained_weights, map_location="cpu")
state_dict_pre = checkpoint["state_dict"]
for k in list(state_dict.keys()):
# only ignore fc layer
if "fc.weight" in k or "fc.bias" in k:
continue
# name in pretrained model
k_pre = (
"module.encoder_q." + k[len("module.") :]
if k.startswith("module.")
else "module.encoder_q." + k
)
assert (
state_dict[k].cpu() == state_dict_pre[k_pre]
).all(), "{} is changed in linear classifier training.".format(k)
print("=> sanity check passed.")
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=":f"):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print("\t".join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = "{:" + str(num_digits) + "d}"
return "[" + fmt + "/" + fmt.format(num_batches) + "]"
def adjust_learning_rate(optimizer, init_lr, epoch, args):
"""Decay the learning rate based on schedule"""
cur_lr = init_lr * 0.5 * (1.0 + math.cos(math.pi * epoch / args.epochs))
for param_group in optimizer.param_groups:
param_group["lr"] = cur_lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == "__main__":
main()
|
asym-siam-main
|
main_lincls.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
asym-siam-main
|
moco/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
import torch.nn as nn
class MoCo(nn.Module):
"""
Build a MoCo model with: a query encoder, a key encoder, and a queue
https://arxiv.org/abs/1911.05722
"""
def __init__(
self,
base_encoder,
dim=128,
K=65536,
m=0.999,
T=0.07,
enable_asym_bn=False,
):
"""
dim: feature dimension (default: 128)
K: queue size; number of negative keys (default: 65536)
m: moco momentum of updating key encoder (default: 0.999)
T: softmax temperature (default: 0.07)
"""
super(MoCo, self).__init__()
self.K = K
self.m = m
self.T = T
# create the encoders
# num_classes is the output fc dimension
self.encoder_q = base_encoder(num_classes=dim)
self.encoder_k = base_encoder(num_classes=dim)
dim_mlp = self.encoder_q.fc.weight.shape[1]
self.encoder_q.fc = nn.Sequential(
nn.Linear(dim_mlp, dim_mlp),
nn.BatchNorm1d(dim_mlp),
nn.ReLU(),
nn.Linear(dim_mlp, dim_mlp),
nn.ReLU(),
self.encoder_q.fc,
)
self.encoder_k.fc = nn.Sequential(
nn.Linear(dim_mlp, dim_mlp),
nn.BatchNorm1d(dim_mlp),
nn.ReLU(),
nn.Linear(dim_mlp, dim_mlp),
nn.ReLU(),
self.encoder_k.fc,
)
"""
# --------------------------------------------------------------------------- #
# Sync BatchNorm #
# --------------------------------------------------------------------------- #
Intermediate Sync BatchNorm layers is a way to reduce intra-image variance
intarget encoder. Sync BatchNorm leads to a notable improvement when applied to
target (as referred βAsymBNβ in our paper) and degeneration to source.
# --------------------------------------------------------------------------- #
"""
if enable_asym_bn:
process_group = create_syncbn_process_group(8)
self.encoder_k.fc = torch.nn.SyncBatchNorm.convert_sync_batchnorm(
self.encoder_k.fc, process_group
)
for param_q, param_k in zip(
self.encoder_q.parameters(), self.encoder_k.parameters()
):
param_k.data.copy_(param_q.data) # initialize
param_k.requires_grad = False # not update by gradient
# create the queue
self.register_buffer("queue", torch.randn(dim, K))
self.queue = nn.functional.normalize(self.queue, dim=0)
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
@torch.no_grad()
def _momentum_update_key_encoder(self):
"""
Momentum update of the key encoder
"""
for param_q, param_k in zip(
self.encoder_q.parameters(), self.encoder_k.parameters()
):
param_k.data = param_k.data * self.m + param_q.data * (1.0 - self.m)
@torch.no_grad()
def _dequeue_and_enqueue(self, keys):
# gather keys before updating queue
keys = concat_all_gather(keys)
batch_size = keys.shape[0]
ptr = int(self.queue_ptr)
assert self.K % batch_size == 0 # for simplicity
# replace the keys at ptr (dequeue and enqueue)
self.queue[:, ptr : ptr + batch_size] = keys.T
ptr = (ptr + batch_size) % self.K # move pointer
self.queue_ptr[0] = ptr
@torch.no_grad()
def _batch_shuffle_ddp(self, x):
"""
Batch shuffle, for making use of BatchNorm.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# random shuffle index
idx_shuffle = torch.randperm(batch_size_all).cuda()
# broadcast to all gpus
torch.distributed.broadcast(idx_shuffle, src=0)
# index for restoring
idx_unshuffle = torch.argsort(idx_shuffle)
# shuffled index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this], idx_unshuffle
@torch.no_grad()
def _batch_unshuffle_ddp(self, x, idx_unshuffle):
"""
Undo batch shuffle.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# restored index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_unshuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this]
def forward(self, im_q, im_q_mini, im_k):
"""
Input:
im_q: a batch of query images
im_k: a batch of key images
Output:
logits, targets
"""
# compute query features
q_large = []
for im in im_q:
_q = self.encoder_q(im) # queries: NxC
_q = nn.functional.normalize(_q, dim=1)
q_large.append(_q)
q_mini = []
for im in im_q_mini:
_q_mini = self.encoder_q(im) # queries: NxC
_q_mini = nn.functional.normalize(_q_mini, dim=1)
q_mini.append(_q_mini)
"""
# --------------------------------------------------------------------------- #
# Mean Encoding #
# --------------------------------------------------------------------------- #
Mean Encoding is a direct approach to reduce the variance of a random variable
by performing i.i.d. sampling multiple times and take the mean as the new
variable. Mean Encoding is simply generated by running the same encoder on
multiple augmented views of the same image.
# --------------------------------------------------------------------------- #
"""
crop_num = len(im_k)
# compute key features
with torch.no_grad(): # no gradient to keys
self._momentum_update_key_encoder() # update the key encoder
im_k = torch.cat(im_k, dim=0)
# shuffle for making use of BN
im_k, idx_unshuffle = self._batch_shuffle_ddp(im_k)
k = self.encoder_k(im_k) # keys: NxC
# undo shuffle
k = self._batch_unshuffle_ddp(k, idx_unshuffle)
cur_size, embedding_length = k.shape
k = k.view(crop_num, cur_size // crop_num, embedding_length)
k = nn.functional.normalize(torch.mean(k, dim=0), dim=1)
logits_list = []
labels_list = []
for q in q_large + q_mini:
# compute logits
# Einstein sum is more intuitive
# positive logits: Nx1
l_pos = torch.einsum("nc,nc->n", [q, k]).unsqueeze(-1)
# negative logits: NxK
l_neg = torch.einsum("nc,ck->nk", [q, self.queue.clone().detach()])
# logits: Nx(1+K)
logits = torch.cat([l_pos, l_neg], dim=1)
# apply temperature
logits /= self.T
# labels: positive key indicators
labels = torch.zeros(logits.shape[0], dtype=torch.long).cuda()
logits_list.append(logits)
labels_list.append(labels)
# dequeue and enqueue
self._dequeue_and_enqueue(k)
return logits_list, labels_list
# utils
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [
torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())
]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
def create_syncbn_process_group(num_gpu_per_group):
if num_gpu_per_group == 0:
return None
world_size = torch.distributed.get_world_size()
assert world_size >= num_gpu_per_group
assert world_size % num_gpu_per_group == 0
group = None
for group_num in range(world_size // num_gpu_per_group):
group_ids = range(
group_num * num_gpu_per_group, (group_num + 1) * num_gpu_per_group
)
cur_group = torch.distributed.new_group(ranks=group_ids)
if torch.distributed.get_rank() // num_gpu_per_group == group_num:
group = cur_group
assert group is not None
return group
|
asym-siam-main
|
moco/builder.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from PIL import ImageFilter
import random
import numpy as np
"""
# --------------------------------------------------------------------------- #
# ScaleMix #
# --------------------------------------------------------------------------- #
ScaleMix generates new views of an image by mixing two views of potentially
different scales together via binary masking. The masking strategy follows
CutMix. where an entire region - denoted by a box with randomly sampled
coordinates - is cropped and pasted. Unlike CutMix, ScaleMix only operates on
views from the same image, and the output is a single view of standard size
(224x224). This single view can be regarded as an efficient approximation of
MultiCrop, without the need to process small crops separately.
# --------------------------------------------------------------------------- #
"""
def scalemix(view1, view2):
def random_bbox(lam, H, W):
cut_rat = np.sqrt(1.0 - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
_, h, w = view1.shape
lam = np.random.uniform(low=0.0, high=1.0)
bbx1, bby1, bbx2, bby2 = random_bbox(lam, h, w)
view1[:, bbx1:bbx2, bby1:bby2] = view2[:, bbx1:bbx2, bby1:bby2]
return view1
class CropsTransform:
"""Take two random crops of one image as the query and key."""
def __init__(
self,
key_transform,
query_mini_transform,
query_transform,
enable_scalemix=False,
enable_multicrop=False,
enable_mean_encoding=False,
):
self.key_transform = key_transform
self.query_mini_transform = query_mini_transform
self.query_transform = query_transform
self.enable_scalemix = enable_scalemix
self.enable_multicrop = enable_multicrop
self.enable_mean_encoding = enable_mean_encoding
def __call__(self, x):
crops = []
# Query crop
if self.enable_scalemix:
q = scalemix(self.query_transform(x), self.query_transform(x),)
else:
q = self.query_transform(x)
crops.append(q)
# Query mini crops
if self.enable_multicrop:
for i in range(6):
crops.append(self.query_mini_transform(x))
# Key crop
crops.append(self.key_transform(x))
if self.enable_mean_encoding:
crops.append(self.key_transform(x))
return crops
class GaussianBlur(object):
"""Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709"""
def __init__(self, sigma=[0.1, 2.0]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
|
asym-siam-main
|
moco/loader.py
|
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import argparse, json, os
"""
During rendering, each CLEVR scene file is dumped to disk as a separate JSON
file; this is convenient for distributing rendering across multiple machines.
This script collects all CLEVR scene files stored in a directory and combines
them into a single JSON file. This script also adds the version number, date,
and license to the output file.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir', default='output/scenes')
parser.add_argument('--output_file', default='output/CLEVR_misc_scenes.json')
parser.add_argument('--version', default='1.0')
parser.add_argument('--date', default='7/8/2017')
parser.add_argument('--license',
default='Creative Commons Attribution (CC-BY 4.0')
def main(args):
input_files = os.listdir(args.input_dir)
scenes = []
split = None
for filename in os.listdir(args.input_dir):
if not filename.endswith('.json'):
continue
path = os.path.join(args.input_dir, filename)
with open(path, 'r') as f:
scene = json.load(f)
scenes.append(scene)
if split is not None:
msg = 'Input directory contains scenes from multiple splits'
assert scene['split'] == split, msg
else:
split = scene['split']
scenes.sort(key=lambda s: s['image_index'])
for s in scenes:
print(s['image_filename'])
output = {
'info': {
'date': args.date,
'version': args.version,
'split': split,
'license': args.license,
},
'scenes': scenes
}
with open(args.output_file, 'w') as f:
json.dump(output, f)
if __name__ == '__main__':
args = parser.parse_args()
main(args)
|
clevr-dataset-gen-main
|
image_generation/collect_scenes.py
|
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import print_function
import math, sys, random, argparse, json, os, tempfile
from datetime import datetime as dt
from collections import Counter
"""
Renders random scenes using Blender, each with with a random number of objects;
each object has a random size, position, color, and shape. Objects will be
nonintersecting but may partially occlude each other. Output images will be
written to disk as PNGs, and we will also write a JSON file for each image with
ground-truth scene information.
This file expects to be run from Blender like this:
blender --background --python render_images.py -- [arguments to this script]
"""
INSIDE_BLENDER = True
try:
import bpy, bpy_extras
from mathutils import Vector
except ImportError as e:
INSIDE_BLENDER = False
if INSIDE_BLENDER:
try:
import utils
except ImportError as e:
print("\nERROR")
print("Running render_images.py from Blender and cannot import utils.py.")
print("You may need to add a .pth file to the site-packages of Blender's")
print("bundled python with a command like this:\n")
print("echo $PWD >> $BLENDER/$VERSION/python/lib/python3.5/site-packages/clevr.pth")
print("\nWhere $BLENDER is the directory where Blender is installed, and")
print("$VERSION is your Blender version (such as 2.78).")
sys.exit(1)
parser = argparse.ArgumentParser()
# Input options
parser.add_argument('--base_scene_blendfile', default='data/base_scene.blend',
help="Base blender file on which all scenes are based; includes " +
"ground plane, lights, and camera.")
parser.add_argument('--properties_json', default='data/properties.json',
help="JSON file defining objects, materials, sizes, and colors. " +
"The \"colors\" field maps from CLEVR color names to RGB values; " +
"The \"sizes\" field maps from CLEVR size names to scalars used to " +
"rescale object models; the \"materials\" and \"shapes\" fields map " +
"from CLEVR material and shape names to .blend files in the " +
"--object_material_dir and --shape_dir directories respectively.")
parser.add_argument('--shape_dir', default='data/shapes',
help="Directory where .blend files for object models are stored")
parser.add_argument('--material_dir', default='data/materials',
help="Directory where .blend files for materials are stored")
parser.add_argument('--shape_color_combos_json', default=None,
help="Optional path to a JSON file mapping shape names to a list of " +
"allowed color names for that shape. This allows rendering images " +
"for CLEVR-CoGenT.")
# Settings for objects
parser.add_argument('--min_objects', default=3, type=int,
help="The minimum number of objects to place in each scene")
parser.add_argument('--max_objects', default=10, type=int,
help="The maximum number of objects to place in each scene")
parser.add_argument('--min_dist', default=0.25, type=float,
help="The minimum allowed distance between object centers")
parser.add_argument('--margin', default=0.4, type=float,
help="Along all cardinal directions (left, right, front, back), all " +
"objects will be at least this distance apart. This makes resolving " +
"spatial relationships slightly less ambiguous.")
parser.add_argument('--min_pixels_per_object', default=200, type=int,
help="All objects will have at least this many visible pixels in the " +
"final rendered images; this ensures that no objects are fully " +
"occluded by other objects.")
parser.add_argument('--max_retries', default=50, type=int,
help="The number of times to try placing an object before giving up and " +
"re-placing all objects in the scene.")
# Output settings
parser.add_argument('--start_idx', default=0, type=int,
help="The index at which to start for numbering rendered images. Setting " +
"this to non-zero values allows you to distribute rendering across " +
"multiple machines and recombine the results later.")
parser.add_argument('--num_images', default=5, type=int,
help="The number of images to render")
parser.add_argument('--filename_prefix', default='CLEVR',
help="This prefix will be prepended to the rendered images and JSON scenes")
parser.add_argument('--split', default='new',
help="Name of the split for which we are rendering. This will be added to " +
"the names of rendered images, and will also be stored in the JSON " +
"scene structure for each image.")
parser.add_argument('--output_image_dir', default='../output/images/',
help="The directory where output images will be stored. It will be " +
"created if it does not exist.")
parser.add_argument('--output_scene_dir', default='../output/scenes/',
help="The directory where output JSON scene structures will be stored. " +
"It will be created if it does not exist.")
parser.add_argument('--output_scene_file', default='../output/CLEVR_scenes.json',
help="Path to write a single JSON file containing all scene information")
parser.add_argument('--output_blend_dir', default='output/blendfiles',
help="The directory where blender scene files will be stored, if the " +
"user requested that these files be saved using the " +
"--save_blendfiles flag; in this case it will be created if it does " +
"not already exist.")
parser.add_argument('--save_blendfiles', type=int, default=0,
help="Setting --save_blendfiles 1 will cause the blender scene file for " +
"each generated image to be stored in the directory specified by " +
"the --output_blend_dir flag. These files are not saved by default " +
"because they take up ~5-10MB each.")
parser.add_argument('--version', default='1.0',
help="String to store in the \"version\" field of the generated JSON file")
parser.add_argument('--license',
default="Creative Commons Attribution (CC-BY 4.0)",
help="String to store in the \"license\" field of the generated JSON file")
parser.add_argument('--date', default=dt.today().strftime("%m/%d/%Y"),
help="String to store in the \"date\" field of the generated JSON file; " +
"defaults to today's date")
# Rendering options
parser.add_argument('--use_gpu', default=0, type=int,
help="Setting --use_gpu 1 enables GPU-accelerated rendering using CUDA. " +
"You must have an NVIDIA GPU with the CUDA toolkit installed for " +
"to work.")
parser.add_argument('--width', default=320, type=int,
help="The width (in pixels) for the rendered images")
parser.add_argument('--height', default=240, type=int,
help="The height (in pixels) for the rendered images")
parser.add_argument('--key_light_jitter', default=1.0, type=float,
help="The magnitude of random jitter to add to the key light position.")
parser.add_argument('--fill_light_jitter', default=1.0, type=float,
help="The magnitude of random jitter to add to the fill light position.")
parser.add_argument('--back_light_jitter', default=1.0, type=float,
help="The magnitude of random jitter to add to the back light position.")
parser.add_argument('--camera_jitter', default=0.5, type=float,
help="The magnitude of random jitter to add to the camera position")
parser.add_argument('--render_num_samples', default=512, type=int,
help="The number of samples to use when rendering. Larger values will " +
"result in nicer images but will cause rendering to take longer.")
parser.add_argument('--render_min_bounces', default=8, type=int,
help="The minimum number of bounces to use for rendering.")
parser.add_argument('--render_max_bounces', default=8, type=int,
help="The maximum number of bounces to use for rendering.")
parser.add_argument('--render_tile_size', default=256, type=int,
help="The tile size to use for rendering. This should not affect the " +
"quality of the rendered image but may affect the speed; CPU-based " +
"rendering may achieve better performance using smaller tile sizes " +
"while larger tile sizes may be optimal for GPU-based rendering.")
def main(args):
num_digits = 6
prefix = '%s_%s_' % (args.filename_prefix, args.split)
img_template = '%s%%0%dd.png' % (prefix, num_digits)
scene_template = '%s%%0%dd.json' % (prefix, num_digits)
blend_template = '%s%%0%dd.blend' % (prefix, num_digits)
img_template = os.path.join(args.output_image_dir, img_template)
scene_template = os.path.join(args.output_scene_dir, scene_template)
blend_template = os.path.join(args.output_blend_dir, blend_template)
if not os.path.isdir(args.output_image_dir):
os.makedirs(args.output_image_dir)
if not os.path.isdir(args.output_scene_dir):
os.makedirs(args.output_scene_dir)
if args.save_blendfiles == 1 and not os.path.isdir(args.output_blend_dir):
os.makedirs(args.output_blend_dir)
all_scene_paths = []
for i in range(args.num_images):
img_path = img_template % (i + args.start_idx)
scene_path = scene_template % (i + args.start_idx)
all_scene_paths.append(scene_path)
blend_path = None
if args.save_blendfiles == 1:
blend_path = blend_template % (i + args.start_idx)
num_objects = random.randint(args.min_objects, args.max_objects)
render_scene(args,
num_objects=num_objects,
output_index=(i + args.start_idx),
output_split=args.split,
output_image=img_path,
output_scene=scene_path,
output_blendfile=blend_path,
)
# After rendering all images, combine the JSON files for each scene into a
# single JSON file.
all_scenes = []
for scene_path in all_scene_paths:
with open(scene_path, 'r') as f:
all_scenes.append(json.load(f))
output = {
'info': {
'date': args.date,
'version': args.version,
'split': args.split,
'license': args.license,
},
'scenes': all_scenes
}
with open(args.output_scene_file, 'w') as f:
json.dump(output, f)
def render_scene(args,
num_objects=5,
output_index=0,
output_split='none',
output_image='render.png',
output_scene='render_json',
output_blendfile=None,
):
# Load the main blendfile
bpy.ops.wm.open_mainfile(filepath=args.base_scene_blendfile)
# Load materials
utils.load_materials(args.material_dir)
# Set render arguments so we can get pixel coordinates later.
# We use functionality specific to the CYCLES renderer so BLENDER_RENDER
# cannot be used.
render_args = bpy.context.scene.render
render_args.engine = "CYCLES"
render_args.filepath = output_image
render_args.resolution_x = args.width
render_args.resolution_y = args.height
render_args.resolution_percentage = 100
render_args.tile_x = args.render_tile_size
render_args.tile_y = args.render_tile_size
if args.use_gpu == 1:
# Blender changed the API for enabling CUDA at some point
if bpy.app.version < (2, 78, 0):
bpy.context.user_preferences.system.compute_device_type = 'CUDA'
bpy.context.user_preferences.system.compute_device = 'CUDA_0'
else:
cycles_prefs = bpy.context.user_preferences.addons['cycles'].preferences
cycles_prefs.compute_device_type = 'CUDA'
# Some CYCLES-specific stuff
bpy.data.worlds['World'].cycles.sample_as_light = True
bpy.context.scene.cycles.blur_glossy = 2.0
bpy.context.scene.cycles.samples = args.render_num_samples
bpy.context.scene.cycles.transparent_min_bounces = args.render_min_bounces
bpy.context.scene.cycles.transparent_max_bounces = args.render_max_bounces
if args.use_gpu == 1:
bpy.context.scene.cycles.device = 'GPU'
# This will give ground-truth information about the scene and its objects
scene_struct = {
'split': output_split,
'image_index': output_index,
'image_filename': os.path.basename(output_image),
'objects': [],
'directions': {},
}
# Put a plane on the ground so we can compute cardinal directions
bpy.ops.mesh.primitive_plane_add(radius=5)
plane = bpy.context.object
def rand(L):
return 2.0 * L * (random.random() - 0.5)
# Add random jitter to camera position
if args.camera_jitter > 0:
for i in range(3):
bpy.data.objects['Camera'].location[i] += rand(args.camera_jitter)
# Figure out the left, up, and behind directions along the plane and record
# them in the scene structure
camera = bpy.data.objects['Camera']
plane_normal = plane.data.vertices[0].normal
cam_behind = camera.matrix_world.to_quaternion() * Vector((0, 0, -1))
cam_left = camera.matrix_world.to_quaternion() * Vector((-1, 0, 0))
cam_up = camera.matrix_world.to_quaternion() * Vector((0, 1, 0))
plane_behind = (cam_behind - cam_behind.project(plane_normal)).normalized()
plane_left = (cam_left - cam_left.project(plane_normal)).normalized()
plane_up = cam_up.project(plane_normal).normalized()
# Delete the plane; we only used it for normals anyway. The base scene file
# contains the actual ground plane.
utils.delete_object(plane)
# Save all six axis-aligned directions in the scene struct
scene_struct['directions']['behind'] = tuple(plane_behind)
scene_struct['directions']['front'] = tuple(-plane_behind)
scene_struct['directions']['left'] = tuple(plane_left)
scene_struct['directions']['right'] = tuple(-plane_left)
scene_struct['directions']['above'] = tuple(plane_up)
scene_struct['directions']['below'] = tuple(-plane_up)
# Add random jitter to lamp positions
if args.key_light_jitter > 0:
for i in range(3):
bpy.data.objects['Lamp_Key'].location[i] += rand(args.key_light_jitter)
if args.back_light_jitter > 0:
for i in range(3):
bpy.data.objects['Lamp_Back'].location[i] += rand(args.back_light_jitter)
if args.fill_light_jitter > 0:
for i in range(3):
bpy.data.objects['Lamp_Fill'].location[i] += rand(args.fill_light_jitter)
# Now make some random objects
objects, blender_objects = add_random_objects(scene_struct, num_objects, args, camera)
# Render the scene and dump the scene data structure
scene_struct['objects'] = objects
scene_struct['relationships'] = compute_all_relationships(scene_struct)
while True:
try:
bpy.ops.render.render(write_still=True)
break
except Exception as e:
print(e)
with open(output_scene, 'w') as f:
json.dump(scene_struct, f, indent=2)
if output_blendfile is not None:
bpy.ops.wm.save_as_mainfile(filepath=output_blendfile)
def add_random_objects(scene_struct, num_objects, args, camera):
"""
Add random objects to the current blender scene
"""
# Load the property file
with open(args.properties_json, 'r') as f:
properties = json.load(f)
color_name_to_rgba = {}
for name, rgb in properties['colors'].items():
rgba = [float(c) / 255.0 for c in rgb] + [1.0]
color_name_to_rgba[name] = rgba
material_mapping = [(v, k) for k, v in properties['materials'].items()]
object_mapping = [(v, k) for k, v in properties['shapes'].items()]
size_mapping = list(properties['sizes'].items())
shape_color_combos = None
if args.shape_color_combos_json is not None:
with open(args.shape_color_combos_json, 'r') as f:
shape_color_combos = list(json.load(f).items())
positions = []
objects = []
blender_objects = []
for i in range(num_objects):
# Choose a random size
size_name, r = random.choice(size_mapping)
# Try to place the object, ensuring that we don't intersect any existing
# objects and that we are more than the desired margin away from all existing
# objects along all cardinal directions.
num_tries = 0
while True:
# If we try and fail to place an object too many times, then delete all
# the objects in the scene and start over.
num_tries += 1
if num_tries > args.max_retries:
for obj in blender_objects:
utils.delete_object(obj)
return add_random_objects(scene_struct, num_objects, args, camera)
x = random.uniform(-3, 3)
y = random.uniform(-3, 3)
# Check to make sure the new object is further than min_dist from all
# other objects, and further than margin along the four cardinal directions
dists_good = True
margins_good = True
for (xx, yy, rr) in positions:
dx, dy = x - xx, y - yy
dist = math.sqrt(dx * dx + dy * dy)
if dist - r - rr < args.min_dist:
dists_good = False
break
for direction_name in ['left', 'right', 'front', 'behind']:
direction_vec = scene_struct['directions'][direction_name]
assert direction_vec[2] == 0
margin = dx * direction_vec[0] + dy * direction_vec[1]
if 0 < margin < args.margin:
print(margin, args.margin, direction_name)
print('BROKEN MARGIN!')
margins_good = False
break
if not margins_good:
break
if dists_good and margins_good:
break
# Choose random color and shape
if shape_color_combos is None:
obj_name, obj_name_out = random.choice(object_mapping)
color_name, rgba = random.choice(list(color_name_to_rgba.items()))
else:
obj_name_out, color_choices = random.choice(shape_color_combos)
color_name = random.choice(color_choices)
obj_name = [k for k, v in object_mapping if v == obj_name_out][0]
rgba = color_name_to_rgba[color_name]
# For cube, adjust the size a bit
if obj_name == 'Cube':
r /= math.sqrt(2)
# Choose random orientation for the object.
theta = 360.0 * random.random()
# Actually add the object to the scene
utils.add_object(args.shape_dir, obj_name, r, (x, y), theta=theta)
obj = bpy.context.object
blender_objects.append(obj)
positions.append((x, y, r))
# Attach a random material
mat_name, mat_name_out = random.choice(material_mapping)
utils.add_material(mat_name, Color=rgba)
# Record data about the object in the scene data structure
pixel_coords = utils.get_camera_coords(camera, obj.location)
objects.append({
'shape': obj_name_out,
'size': size_name,
'material': mat_name_out,
'3d_coords': tuple(obj.location),
'rotation': theta,
'pixel_coords': pixel_coords,
'color': color_name,
})
# Check that all objects are at least partially visible in the rendered image
all_visible = check_visibility(blender_objects, args.min_pixels_per_object)
if not all_visible:
# If any of the objects are fully occluded then start over; delete all
# objects from the scene and place them all again.
print('Some objects are occluded; replacing objects')
for obj in blender_objects:
utils.delete_object(obj)
return add_random_objects(scene_struct, num_objects, args, camera)
return objects, blender_objects
def compute_all_relationships(scene_struct, eps=0.2):
"""
Computes relationships between all pairs of objects in the scene.
Returns a dictionary mapping string relationship names to lists of lists of
integers, where output[rel][i] gives a list of object indices that have the
relationship rel with object i. For example if j is in output['left'][i] then
object j is left of object i.
"""
all_relationships = {}
for name, direction_vec in scene_struct['directions'].items():
if name == 'above' or name == 'below': continue
all_relationships[name] = []
for i, obj1 in enumerate(scene_struct['objects']):
coords1 = obj1['3d_coords']
related = set()
for j, obj2 in enumerate(scene_struct['objects']):
if obj1 == obj2: continue
coords2 = obj2['3d_coords']
diff = [coords2[k] - coords1[k] for k in [0, 1, 2]]
dot = sum(diff[k] * direction_vec[k] for k in [0, 1, 2])
if dot > eps:
related.add(j)
all_relationships[name].append(sorted(list(related)))
return all_relationships
def check_visibility(blender_objects, min_pixels_per_object):
"""
Check whether all objects in the scene have some minimum number of visible
pixels; to accomplish this we assign random (but distinct) colors to all
objects, and render using no lighting or shading or antialiasing; this
ensures that each object is just a solid uniform color. We can then count
the number of pixels of each color in the output image to check the visibility
of each object.
Returns True if all objects are visible and False otherwise.
"""
f, path = tempfile.mkstemp(suffix='.png')
object_colors = render_shadeless(blender_objects, path=path)
img = bpy.data.images.load(path)
p = list(img.pixels)
color_count = Counter((p[i], p[i+1], p[i+2], p[i+3])
for i in range(0, len(p), 4))
os.remove(path)
if len(color_count) != len(blender_objects) + 1:
return False
for _, count in color_count.most_common():
if count < min_pixels_per_object:
return False
return True
def render_shadeless(blender_objects, path='flat.png'):
"""
Render a version of the scene with shading disabled and unique materials
assigned to all objects, and return a set of all colors that should be in the
rendered image. The image itself is written to path. This is used to ensure
that all objects will be visible in the final rendered scene.
"""
render_args = bpy.context.scene.render
# Cache the render args we are about to clobber
old_filepath = render_args.filepath
old_engine = render_args.engine
old_use_antialiasing = render_args.use_antialiasing
# Override some render settings to have flat shading
render_args.filepath = path
render_args.engine = 'BLENDER_RENDER'
render_args.use_antialiasing = False
# Move the lights and ground to layer 2 so they don't render
utils.set_layer(bpy.data.objects['Lamp_Key'], 2)
utils.set_layer(bpy.data.objects['Lamp_Fill'], 2)
utils.set_layer(bpy.data.objects['Lamp_Back'], 2)
utils.set_layer(bpy.data.objects['Ground'], 2)
# Add random shadeless materials to all objects
object_colors = set()
old_materials = []
for i, obj in enumerate(blender_objects):
old_materials.append(obj.data.materials[0])
bpy.ops.material.new()
mat = bpy.data.materials['Material']
mat.name = 'Material_%d' % i
while True:
r, g, b = [random.random() for _ in range(3)]
if (r, g, b) not in object_colors: break
object_colors.add((r, g, b))
mat.diffuse_color = [r, g, b]
mat.use_shadeless = True
obj.data.materials[0] = mat
# Render the scene
bpy.ops.render.render(write_still=True)
# Undo the above; first restore the materials to objects
for mat, obj in zip(old_materials, blender_objects):
obj.data.materials[0] = mat
# Move the lights and ground back to layer 0
utils.set_layer(bpy.data.objects['Lamp_Key'], 0)
utils.set_layer(bpy.data.objects['Lamp_Fill'], 0)
utils.set_layer(bpy.data.objects['Lamp_Back'], 0)
utils.set_layer(bpy.data.objects['Ground'], 0)
# Set the render settings back to what they were
render_args.filepath = old_filepath
render_args.engine = old_engine
render_args.use_antialiasing = old_use_antialiasing
return object_colors
if __name__ == '__main__':
if INSIDE_BLENDER:
# Run normally
argv = utils.extract_args()
args = parser.parse_args(argv)
main(args)
elif '--help' in sys.argv or '-h' in sys.argv:
parser.print_help()
else:
print('This script is intended to be called from blender like this:')
print()
print('blender --background --python render_images.py -- [args]')
print()
print('You can also run as a standalone python script to view all')
print('arguments like this:')
print()
print('python render_images.py --help')
|
clevr-dataset-gen-main
|
image_generation/render_images.py
|
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import sys, random, os
import bpy, bpy_extras
"""
Some utility functions for interacting with Blender
"""
def extract_args(input_argv=None):
"""
Pull out command-line arguments after "--". Blender ignores command-line flags
after --, so this lets us forward command line arguments from the blender
invocation to our own script.
"""
if input_argv is None:
input_argv = sys.argv
output_argv = []
if '--' in input_argv:
idx = input_argv.index('--')
output_argv = input_argv[(idx + 1):]
return output_argv
def parse_args(parser, argv=None):
return parser.parse_args(extract_args(argv))
# I wonder if there's a better way to do this?
def delete_object(obj):
""" Delete a specified blender object """
for o in bpy.data.objects:
o.select = False
obj.select = True
bpy.ops.object.delete()
def get_camera_coords(cam, pos):
"""
For a specified point, get both the 3D coordinates and 2D pixel-space
coordinates of the point from the perspective of the camera.
Inputs:
- cam: Camera object
- pos: Vector giving 3D world-space position
Returns a tuple of:
- (px, py, pz): px and py give 2D image-space coordinates; pz gives depth
in the range [-1, 1]
"""
scene = bpy.context.scene
x, y, z = bpy_extras.object_utils.world_to_camera_view(scene, cam, pos)
scale = scene.render.resolution_percentage / 100.0
w = int(scale * scene.render.resolution_x)
h = int(scale * scene.render.resolution_y)
px = int(round(x * w))
py = int(round(h - y * h))
return (px, py, z)
def set_layer(obj, layer_idx):
""" Move an object to a particular layer """
# Set the target layer to True first because an object must always be on
# at least one layer.
obj.layers[layer_idx] = True
for i in range(len(obj.layers)):
obj.layers[i] = (i == layer_idx)
def add_object(object_dir, name, scale, loc, theta=0):
"""
Load an object from a file. We assume that in the directory object_dir, there
is a file named "$name.blend" which contains a single object named "$name"
that has unit size and is centered at the origin.
- scale: scalar giving the size that the object should be in the scene
- loc: tuple (x, y) giving the coordinates on the ground plane where the
object should be placed.
"""
# First figure out how many of this object are already in the scene so we can
# give the new object a unique name
count = 0
for obj in bpy.data.objects:
if obj.name.startswith(name):
count += 1
filename = os.path.join(object_dir, '%s.blend' % name, 'Object', name)
bpy.ops.wm.append(filename=filename)
# Give it a new name to avoid conflicts
new_name = '%s_%d' % (name, count)
bpy.data.objects[name].name = new_name
# Set the new object as active, then rotate, scale, and translate it
x, y = loc
bpy.context.scene.objects.active = bpy.data.objects[new_name]
bpy.context.object.rotation_euler[2] = theta
bpy.ops.transform.resize(value=(scale, scale, scale))
bpy.ops.transform.translate(value=(x, y, scale))
def load_materials(material_dir):
"""
Load materials from a directory. We assume that the directory contains .blend
files with one material each. The file X.blend has a single NodeTree item named
X; this NodeTree item must have a "Color" input that accepts an RGBA value.
"""
for fn in os.listdir(material_dir):
if not fn.endswith('.blend'): continue
name = os.path.splitext(fn)[0]
filepath = os.path.join(material_dir, fn, 'NodeTree', name)
bpy.ops.wm.append(filename=filepath)
def add_material(name, **properties):
"""
Create a new material and assign it to the active object. "name" should be the
name of a material that has been previously loaded using load_materials.
"""
# Figure out how many materials are already in the scene
mat_count = len(bpy.data.materials)
# Create a new material; it is not attached to anything and
# it will be called "Material"
bpy.ops.material.new()
# Get a reference to the material we just created and rename it;
# then the next time we make a new material it will still be called
# "Material" and we will still be able to look it up by name
mat = bpy.data.materials['Material']
mat.name = 'Material_%d' % mat_count
# Attach the new material to the active object
# Make sure it doesn't already have materials
obj = bpy.context.active_object
assert len(obj.data.materials) == 0
obj.data.materials.append(mat)
# Find the output node of the new material
output_node = None
for n in mat.node_tree.nodes:
if n.name == 'Material Output':
output_node = n
break
# Add a new GroupNode to the node tree of the active material,
# and copy the node tree from the preloaded node group to the
# new group node. This copying seems to happen by-value, so
# we can create multiple materials of the same type without them
# clobbering each other
group_node = mat.node_tree.nodes.new('ShaderNodeGroup')
group_node.node_tree = bpy.data.node_groups[name]
# Find and set the "Color" input of the new group node
for inp in group_node.inputs:
if inp.name in properties:
inp.default_value = properties[inp.name]
# Wire the output of the new group node to the input of
# the MaterialOutput node
mat.node_tree.links.new(
group_node.outputs['Shader'],
output_node.inputs['Surface'],
)
|
clevr-dataset-gen-main
|
image_generation/utils.py
|
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import json, os, math
from collections import defaultdict
"""
Utilities for working with function program representations of questions.
Some of the metadata about what question node types are available etc are stored
in a JSON metadata file.
"""
# Handlers for answering questions. Each handler receives the scene structure
# that was output from Blender, the node, and a list of values that were output
# from each of the node's inputs; the handler should return the computed output
# value from this node.
def scene_handler(scene_struct, inputs, side_inputs):
# Just return all objects in the scene
return list(range(len(scene_struct['objects'])))
def make_filter_handler(attribute):
def filter_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
assert len(side_inputs) == 1
value = side_inputs[0]
output = []
for idx in inputs[0]:
atr = scene_struct['objects'][idx][attribute]
if value == atr or value in atr:
output.append(idx)
return output
return filter_handler
def unique_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
if len(inputs[0]) != 1:
return '__INVALID__'
return inputs[0][0]
def vg_relate_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
assert len(side_inputs) == 1
output = set()
for rel in scene_struct['relationships']:
if rel['predicate'] == side_inputs[0] and rel['subject_idx'] == inputs[0]:
output.add(rel['object_idx'])
return sorted(list(output))
def relate_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
assert len(side_inputs) == 1
relation = side_inputs[0]
return scene_struct['relationships'][relation][inputs[0]]
def union_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 2
assert len(side_inputs) == 0
return sorted(list(set(inputs[0]) | set(inputs[1])))
def intersect_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 2
assert len(side_inputs) == 0
return sorted(list(set(inputs[0]) & set(inputs[1])))
def count_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
return len(inputs[0])
def make_same_attr_handler(attribute):
def same_attr_handler(scene_struct, inputs, side_inputs):
cache_key = '_same_%s' % attribute
if cache_key not in scene_struct:
cache = {}
for i, obj1 in enumerate(scene_struct['objects']):
same = []
for j, obj2 in enumerate(scene_struct['objects']):
if i != j and obj1[attribute] == obj2[attribute]:
same.append(j)
cache[i] = same
scene_struct[cache_key] = cache
cache = scene_struct[cache_key]
assert len(inputs) == 1
assert len(side_inputs) == 0
return cache[inputs[0]]
return same_attr_handler
def make_query_handler(attribute):
def query_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
assert len(side_inputs) == 0
idx = inputs[0]
obj = scene_struct['objects'][idx]
assert attribute in obj
val = obj[attribute]
if type(val) == list and len(val) != 1:
return '__INVALID__'
elif type(val) == list and len(val) == 1:
return val[0]
else:
return val
return query_handler
def exist_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 1
assert len(side_inputs) == 0
return len(inputs[0]) > 0
def equal_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 2
assert len(side_inputs) == 0
return inputs[0] == inputs[1]
def less_than_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 2
assert len(side_inputs) == 0
return inputs[0] < inputs[1]
def greater_than_handler(scene_struct, inputs, side_inputs):
assert len(inputs) == 2
assert len(side_inputs) == 0
return inputs[0] > inputs[1]
# Register all of the answering handlers here.
# TODO maybe this would be cleaner with a function decorator that takes
# care of registration? Not sure. Also what if we want to reuse the same engine
# for different sets of node types?
execute_handlers = {
'scene': scene_handler,
'filter_color': make_filter_handler('color'),
'filter_shape': make_filter_handler('shape'),
'filter_material': make_filter_handler('material'),
'filter_size': make_filter_handler('size'),
'filter_objectcategory': make_filter_handler('objectcategory'),
'unique': unique_handler,
'relate': relate_handler,
'union': union_handler,
'intersect': intersect_handler,
'count': count_handler,
'query_color': make_query_handler('color'),
'query_shape': make_query_handler('shape'),
'query_material': make_query_handler('material'),
'query_size': make_query_handler('size'),
'exist': exist_handler,
'equal_color': equal_handler,
'equal_shape': equal_handler,
'equal_integer': equal_handler,
'equal_material': equal_handler,
'equal_size': equal_handler,
'equal_object': equal_handler,
'less_than': less_than_handler,
'greater_than': greater_than_handler,
'same_color': make_same_attr_handler('color'),
'same_shape': make_same_attr_handler('shape'),
'same_size': make_same_attr_handler('size'),
'same_material': make_same_attr_handler('material'),
}
def answer_question(question, metadata, scene_struct, all_outputs=False,
cache_outputs=True):
"""
Use structured scene information to answer a structured question. Most of the
heavy lifting is done by the execute handlers defined above.
We cache node outputs in the node itself; this gives a nontrivial speedup
when we want to answer many questions that share nodes on the same scene
(such as during question-generation DFS). This will NOT work if the same
nodes are executed on different scenes.
"""
all_input_types, all_output_types = [], []
node_outputs = []
for node in question['nodes']:
if cache_outputs and '_output' in node:
node_output = node['_output']
else:
node_type = node['type']
msg = 'Could not find handler for "%s"' % node_type
assert node_type in execute_handlers, msg
handler = execute_handlers[node_type]
node_inputs = [node_outputs[idx] for idx in node['inputs']]
side_inputs = node.get('side_inputs', [])
node_output = handler(scene_struct, node_inputs, side_inputs)
if cache_outputs:
node['_output'] = node_output
node_outputs.append(node_output)
if node_output == '__INVALID__':
break
if all_outputs:
return node_outputs
else:
return node_outputs[-1]
def insert_scene_node(nodes, idx):
# First make a shallow-ish copy of the input
new_nodes = []
for node in nodes:
new_node = {
'type': node['type'],
'inputs': node['inputs'],
}
if 'side_inputs' in node:
new_node['side_inputs'] = node['side_inputs']
new_nodes.append(new_node)
# Replace the specified index with a scene node
new_nodes[idx] = {'type': 'scene', 'inputs': []}
# Search backwards from the last node to see which nodes are actually used
output_used = [False] * len(new_nodes)
idxs_to_check = [len(new_nodes) - 1]
while idxs_to_check:
cur_idx = idxs_to_check.pop()
output_used[cur_idx] = True
idxs_to_check.extend(new_nodes[cur_idx]['inputs'])
# Iterate through nodes, keeping only those whose output is used;
# at the same time build up a mapping from old idxs to new idxs
old_idx_to_new_idx = {}
new_nodes_trimmed = []
for old_idx, node in enumerate(new_nodes):
if output_used[old_idx]:
new_idx = len(new_nodes_trimmed)
new_nodes_trimmed.append(node)
old_idx_to_new_idx[old_idx] = new_idx
# Finally go through the list of trimmed nodes and change the inputs
for node in new_nodes_trimmed:
new_inputs = []
for old_idx in node['inputs']:
new_inputs.append(old_idx_to_new_idx[old_idx])
node['inputs'] = new_inputs
return new_nodes_trimmed
def is_degenerate(question, metadata, scene_struct, answer=None, verbose=False):
"""
A question is degenerate if replacing any of its relate nodes with a scene
node results in a question with the same answer.
"""
if answer is None:
answer = answer_question(question, metadata, scene_struct)
for idx, node in enumerate(question['nodes']):
if node['type'] == 'relate':
new_question = {
'nodes': insert_scene_node(question['nodes'], idx)
}
new_answer = answer_question(new_question, metadata, scene_struct)
if verbose:
print('here is truncated question:')
for i, n in enumerate(new_question['nodes']):
name = n['type']
if 'side_inputs' in n:
name = '%s[%s]' % (name, n['side_inputs'][0])
print(i, name, n['_output'])
print('new answer is: ', new_answer)
if new_answer == answer:
return True
return False
|
clevr-dataset-gen-main
|
question_generation/question_engine.py
|
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import print_function
import argparse, json, os, itertools, random, shutil
import time
import re
import question_engine as qeng
"""
Generate synthetic questions and answers for CLEVR images. Input is a single
JSON file containing ground-truth scene information for all images, and output
is a single JSON file containing all generated questions, answers, and programs.
Questions are generated by expanding templates. Each template contains a single
program template and one or more text templates, both with the same set of typed
slots; by convention <Z> = Size, <C> = Color, <M> = Material, <S> = Shape.
Program templates may contain special nodes that expand into multiple functions
during instantiation; for example a "filter" node in a program template will
expand into a combination of "filter_size", "filter_color", "filter_material",
and "filter_shape" nodes after instantiation, and a "filter_unique" node in a
template will expand into some combination of filtering nodes followed by a
"unique" node.
Templates are instantiated using depth-first search; we are looking for template
instantiations where (1) each "unique" node actually refers to a single object,
(2) constraints in the template are satisfied, and (3) the answer to the question
passes our rejection sampling heuristics.
To efficiently handle (1) and (2), we keep track of partial evaluations of the
program during each step of template expansion. This together with the use of
composite nodes in program templates (filter_unique, relate_filter_unique) allow
us to efficiently prune the search space and terminate early when we know that
(1) or (2) will be violated.
"""
parser = argparse.ArgumentParser()
# Inputs
parser.add_argument('--input_scene_file', default='../output/CLEVR_scenes.json',
help="JSON file containing ground-truth scene information for all images " +
"from render_images.py")
parser.add_argument('--metadata_file', default='metadata.json',
help="JSON file containing metadata about functions")
parser.add_argument('--synonyms_json', default='synonyms.json',
help="JSON file defining synonyms for parameter values")
parser.add_argument('--template_dir', default='CLEVR_1.0_templates',
help="Directory containing JSON templates for questions")
# Output
parser.add_argument('--output_questions_file',
default='../output/CLEVR_questions.json',
help="The output file to write containing generated questions")
# Control which and how many images to process
parser.add_argument('--scene_start_idx', default=0, type=int,
help="The image at which to start generating questions; this allows " +
"question generation to be split across many workers")
parser.add_argument('--num_scenes', default=0, type=int,
help="The number of images for which to generate questions. Setting to 0 " +
"generates questions for all scenes in the input file starting from " +
"--scene_start_idx")
# Control the number of questions per image; we will attempt to generate
# templates_per_image * instances_per_template questions per image.
parser.add_argument('--templates_per_image', default=10, type=int,
help="The number of different templates that should be instantiated " +
"on each image")
parser.add_argument('--instances_per_template', default=1, type=int,
help="The number of times each template should be instantiated on an image")
# Misc
parser.add_argument('--reset_counts_every', default=250, type=int,
help="How often to reset template and answer counts. Higher values will " +
"result in flatter distributions over templates and answers, but " +
"will result in longer runtimes.")
parser.add_argument('--verbose', action='store_true',
help="Print more verbose output")
parser.add_argument('--time_dfs', action='store_true',
help="Time each depth-first search; must be given with --verbose")
parser.add_argument('--profile', action='store_true',
help="If given then run inside cProfile")
# args = parser.parse_args()
def precompute_filter_options(scene_struct, metadata):
# Keys are tuples (size, color, shape, material) (where some may be None)
# and values are lists of object idxs that match the filter criterion
attribute_map = {}
if metadata['dataset'] == 'CLEVR-v1.0':
attr_keys = ['size', 'color', 'material', 'shape']
else:
assert False, 'Unrecognized dataset'
# Precompute masks
masks = []
for i in range(2 ** len(attr_keys)):
mask = []
for j in range(len(attr_keys)):
mask.append((i // (2 ** j)) % 2)
masks.append(mask)
for object_idx, obj in enumerate(scene_struct['objects']):
if metadata['dataset'] == 'CLEVR-v1.0':
keys = [tuple(obj[k] for k in attr_keys)]
for mask in masks:
for key in keys:
masked_key = []
for a, b in zip(key, mask):
if b == 1:
masked_key.append(a)
else:
masked_key.append(None)
masked_key = tuple(masked_key)
if masked_key not in attribute_map:
attribute_map[masked_key] = set()
attribute_map[masked_key].add(object_idx)
scene_struct['_filter_options'] = attribute_map
def find_filter_options(object_idxs, scene_struct, metadata):
# Keys are tuples (size, color, shape, material) (where some may be None)
# and values are lists of object idxs that match the filter criterion
if '_filter_options' not in scene_struct:
precompute_filter_options(scene_struct, metadata)
attribute_map = {}
object_idxs = set(object_idxs)
for k, vs in scene_struct['_filter_options'].items():
attribute_map[k] = sorted(list(object_idxs & vs))
return attribute_map
def add_empty_filter_options(attribute_map, metadata, num_to_add):
# Add some filtering criterion that do NOT correspond to objects
if metadata['dataset'] == 'CLEVR-v1.0':
attr_keys = ['Size', 'Color', 'Material', 'Shape']
else:
assert False, 'Unrecognized dataset'
attr_vals = [metadata['types'][t] + [None] for t in attr_keys]
if '_filter_options' in metadata:
attr_vals = metadata['_filter_options']
target_size = len(attribute_map) + num_to_add
while len(attribute_map) < target_size:
k = (random.choice(v) for v in attr_vals)
if k not in attribute_map:
attribute_map[k] = []
def find_relate_filter_options(object_idx, scene_struct, metadata,
unique=False, include_zero=False, trivial_frac=0.1):
options = {}
if '_filter_options' not in scene_struct:
precompute_filter_options(scene_struct, metadata)
# TODO: Right now this is only looking for nontrivial combinations; in some
# cases I may want to add trivial combinations, either where the intersection
# is empty or where the intersection is equal to the filtering output.
trivial_options = {}
for relationship in scene_struct['relationships']:
related = set(scene_struct['relationships'][relationship][object_idx])
for filters, filtered in scene_struct['_filter_options'].items():
intersection = related & filtered
trivial = (intersection == filtered)
if unique and len(intersection) != 1: continue
if not include_zero and len(intersection) == 0: continue
if trivial:
trivial_options[(relationship, filters)] = sorted(list(intersection))
else:
options[(relationship, filters)] = sorted(list(intersection))
N, f = len(options), trivial_frac
num_trivial = int(round(N * f / (1 - f)))
trivial_options = list(trivial_options.items())
random.shuffle(trivial_options)
for k, v in trivial_options[:num_trivial]:
options[k] = v
return options
def node_shallow_copy(node):
new_node = {
'type': node['type'],
'inputs': node['inputs'],
}
if 'side_inputs' in node:
new_node['side_inputs'] = node['side_inputs']
return new_node
def other_heuristic(text, param_vals):
"""
Post-processing heuristic to handle the word "other"
"""
if ' other ' not in text and ' another ' not in text:
return text
target_keys = {
'<Z>', '<C>', '<M>', '<S>',
'<Z2>', '<C2>', '<M2>', '<S2>',
}
if param_vals.keys() != target_keys:
return text
key_pairs = [
('<Z>', '<Z2>'),
('<C>', '<C2>'),
('<M>', '<M2>'),
('<S>', '<S2>'),
]
remove_other = False
for k1, k2 in key_pairs:
v1 = param_vals.get(k1, None)
v2 = param_vals.get(k2, None)
if v1 != '' and v2 != '' and v1 != v2:
print('other has got to go! %s = %s but %s = %s'
% (k1, v1, k2, v2))
remove_other = True
break
if remove_other:
if ' other ' in text:
text = text.replace(' other ', ' ')
if ' another ' in text:
text = text.replace(' another ', ' a ')
return text
def instantiate_templates_dfs(scene_struct, template, metadata, answer_counts,
synonyms, max_instances=None, verbose=False):
param_name_to_type = {p['name']: p['type'] for p in template['params']}
initial_state = {
'nodes': [node_shallow_copy(template['nodes'][0])],
'vals': {},
'input_map': {0: 0},
'next_template_node': 1,
}
states = [initial_state]
final_states = []
while states:
state = states.pop()
# Check to make sure the current state is valid
q = {'nodes': state['nodes']}
outputs = qeng.answer_question(q, metadata, scene_struct, all_outputs=True)
answer = outputs[-1]
if answer == '__INVALID__': continue
# Check to make sure constraints are satisfied for the current state
skip_state = False
for constraint in template['constraints']:
if constraint['type'] == 'NEQ':
p1, p2 = constraint['params']
v1, v2 = state['vals'].get(p1), state['vals'].get(p2)
if v1 is not None and v2 is not None and v1 != v2:
if verbose:
print('skipping due to NEQ constraint')
print(constraint)
print(state['vals'])
skip_state = True
break
elif constraint['type'] == 'NULL':
p = constraint['params'][0]
p_type = param_name_to_type[p]
v = state['vals'].get(p)
if v is not None:
skip = False
if p_type == 'Shape' and v != 'thing': skip = True
if p_type != 'Shape' and v != '': skip = True
if skip:
if verbose:
print('skipping due to NULL constraint')
print(constraint)
print(state['vals'])
skip_state = True
break
elif constraint['type'] == 'OUT_NEQ':
i, j = constraint['params']
i = state['input_map'].get(i, None)
j = state['input_map'].get(j, None)
if i is not None and j is not None and outputs[i] == outputs[j]:
if verbose:
print('skipping due to OUT_NEQ constraint')
print(outputs[i])
print(outputs[j])
skip_state = True
break
else:
assert False, 'Unrecognized constraint type "%s"' % constraint['type']
if skip_state:
continue
# We have already checked to make sure the answer is valid, so if we have
# processed all the nodes in the template then the current state is a valid
# question, so add it if it passes our rejection sampling tests.
if state['next_template_node'] == len(template['nodes']):
# Use our rejection sampling heuristics to decide whether we should
# keep this template instantiation
cur_answer_count = answer_counts[answer]
answer_counts_sorted = sorted(answer_counts.values())
median_count = answer_counts_sorted[len(answer_counts_sorted) // 2]
median_count = max(median_count, 5)
if cur_answer_count > 1.1 * answer_counts_sorted[-2]:
if verbose: print('skipping due to second count')
continue
if cur_answer_count > 5.0 * median_count:
if verbose: print('skipping due to median')
continue
# If the template contains a raw relate node then we need to check for
# degeneracy at the end
has_relate = any(n['type'] == 'relate' for n in template['nodes'])
if has_relate:
degen = qeng.is_degenerate(q, metadata, scene_struct, answer=answer,
verbose=verbose)
if degen:
continue
answer_counts[answer] += 1
state['answer'] = answer
final_states.append(state)
if max_instances is not None and len(final_states) == max_instances:
break
continue
# Otherwise fetch the next node from the template
# Make a shallow copy so cached _outputs don't leak ... this is very nasty
next_node = template['nodes'][state['next_template_node']]
next_node = node_shallow_copy(next_node)
special_nodes = {
'filter_unique', 'filter_count', 'filter_exist', 'filter',
'relate_filter', 'relate_filter_unique', 'relate_filter_count',
'relate_filter_exist',
}
if next_node['type'] in special_nodes:
if next_node['type'].startswith('relate_filter'):
unique = (next_node['type'] == 'relate_filter_unique')
include_zero = (next_node['type'] == 'relate_filter_count'
or next_node['type'] == 'relate_filter_exist')
filter_options = find_relate_filter_options(answer, scene_struct, metadata,
unique=unique, include_zero=include_zero)
else:
filter_options = find_filter_options(answer, scene_struct, metadata)
if next_node['type'] == 'filter':
# Remove null filter
filter_options.pop((None, None, None, None), None)
if next_node['type'] == 'filter_unique':
# Get rid of all filter options that don't result in a single object
filter_options = {k: v for k, v in filter_options.items()
if len(v) == 1}
else:
# Add some filter options that do NOT correspond to the scene
if next_node['type'] == 'filter_exist':
# For filter_exist we want an equal number that do and don't
num_to_add = len(filter_options)
elif next_node['type'] == 'filter_count' or next_node['type'] == 'filter':
# For filter_count add nulls equal to the number of singletons
num_to_add = sum(1 for k, v in filter_options.items() if len(v) == 1)
add_empty_filter_options(filter_options, metadata, num_to_add)
filter_option_keys = list(filter_options.keys())
random.shuffle(filter_option_keys)
for k in filter_option_keys:
new_nodes = []
cur_next_vals = {k: v for k, v in state['vals'].items()}
next_input = state['input_map'][next_node['inputs'][0]]
filter_side_inputs = next_node['side_inputs']
if next_node['type'].startswith('relate'):
param_name = next_node['side_inputs'][0] # First one should be relate
filter_side_inputs = next_node['side_inputs'][1:]
param_type = param_name_to_type[param_name]
assert param_type == 'Relation'
param_val = k[0]
k = k[1]
new_nodes.append({
'type': 'relate',
'inputs': [next_input],
'side_inputs': [param_val],
})
cur_next_vals[param_name] = param_val
next_input = len(state['nodes']) + len(new_nodes) - 1
for param_name, param_val in zip(filter_side_inputs, k):
param_type = param_name_to_type[param_name]
filter_type = 'filter_%s' % param_type.lower()
if param_val is not None:
new_nodes.append({
'type': filter_type,
'inputs': [next_input],
'side_inputs': [param_val],
})
cur_next_vals[param_name] = param_val
next_input = len(state['nodes']) + len(new_nodes) - 1
elif param_val is None:
if metadata['dataset'] == 'CLEVR-v1.0' and param_type == 'Shape':
param_val = 'thing'
else:
param_val = ''
cur_next_vals[param_name] = param_val
input_map = {k: v for k, v in state['input_map'].items()}
extra_type = None
if next_node['type'].endswith('unique'):
extra_type = 'unique'
if next_node['type'].endswith('count'):
extra_type = 'count'
if next_node['type'].endswith('exist'):
extra_type = 'exist'
if extra_type is not None:
new_nodes.append({
'type': extra_type,
'inputs': [input_map[next_node['inputs'][0]] + len(new_nodes)],
})
input_map[state['next_template_node']] = len(state['nodes']) + len(new_nodes) - 1
states.append({
'nodes': state['nodes'] + new_nodes,
'vals': cur_next_vals,
'input_map': input_map,
'next_template_node': state['next_template_node'] + 1,
})
elif 'side_inputs' in next_node:
# If the next node has template parameters, expand them out
# TODO: Generalize this to work for nodes with more than one side input
assert len(next_node['side_inputs']) == 1, 'NOT IMPLEMENTED'
# Use metadata to figure out domain of valid values for this parameter.
# Iterate over the values in a random order; then it is safe to bail
# from the DFS as soon as we find the desired number of valid template
# instantiations.
param_name = next_node['side_inputs'][0]
param_type = param_name_to_type[param_name]
param_vals = metadata['types'][param_type][:]
random.shuffle(param_vals)
for val in param_vals:
input_map = {k: v for k, v in state['input_map'].items()}
input_map[state['next_template_node']] = len(state['nodes'])
cur_next_node = {
'type': next_node['type'],
'inputs': [input_map[idx] for idx in next_node['inputs']],
'side_inputs': [val],
}
cur_next_vals = {k: v for k, v in state['vals'].items()}
cur_next_vals[param_name] = val
states.append({
'nodes': state['nodes'] + [cur_next_node],
'vals': cur_next_vals,
'input_map': input_map,
'next_template_node': state['next_template_node'] + 1,
})
else:
input_map = {k: v for k, v in state['input_map'].items()}
input_map[state['next_template_node']] = len(state['nodes'])
next_node = {
'type': next_node['type'],
'inputs': [input_map[idx] for idx in next_node['inputs']],
}
states.append({
'nodes': state['nodes'] + [next_node],
'vals': state['vals'],
'input_map': input_map,
'next_template_node': state['next_template_node'] + 1,
})
# Actually instantiate the template with the solutions we've found
text_questions, structured_questions, answers = [], [], []
for state in final_states:
structured_questions.append(state['nodes'])
answers.append(state['answer'])
text = random.choice(template['text'])
for name, val in state['vals'].items():
if val in synonyms:
val = random.choice(synonyms[val])
text = text.replace(name, val)
text = ' '.join(text.split())
text = replace_optionals(text)
text = ' '.join(text.split())
text = other_heuristic(text, state['vals'])
text_questions.append(text)
return text_questions, structured_questions, answers
def replace_optionals(s):
"""
Each substring of s that is surrounded in square brackets is treated as
optional and is removed with probability 0.5. For example the string
"A [aa] B [bb]"
could become any of
"A aa B bb"
"A B bb"
"A aa B "
"A B "
with probability 1/4.
"""
pat = re.compile(r'\[([^\[]*)\]')
while True:
match = re.search(pat, s)
if not match:
break
i0 = match.start()
i1 = match.end()
if random.random() > 0.5:
s = s[:i0] + match.groups()[0] + s[i1:]
else:
s = s[:i0] + s[i1:]
return s
def main(args):
with open(args.metadata_file, 'r') as f:
metadata = json.load(f)
dataset = metadata['dataset']
if dataset != 'CLEVR-v1.0':
raise ValueError('Unrecognized dataset "%s"' % dataset)
functions_by_name = {}
for f in metadata['functions']:
functions_by_name[f['name']] = f
metadata['_functions_by_name'] = functions_by_name
# Load templates from disk
# Key is (filename, file_idx)
num_loaded_templates = 0
templates = {}
for fn in os.listdir(args.template_dir):
if not fn.endswith('.json'): continue
with open(os.path.join(args.template_dir, fn), 'r') as f:
base = os.path.splitext(fn)[0]
for i, template in enumerate(json.load(f)):
num_loaded_templates += 1
key = (fn, i)
templates[key] = template
print('Read %d templates from disk' % num_loaded_templates)
def reset_counts():
# Maps a template (filename, index) to the number of questions we have
# so far using that template
template_counts = {}
# Maps a template (filename, index) to a dict mapping the answer to the
# number of questions so far of that template type with that answer
template_answer_counts = {}
node_type_to_dtype = {n['name']: n['output'] for n in metadata['functions']}
for key, template in templates.items():
template_counts[key[:2]] = 0
final_node_type = template['nodes'][-1]['type']
final_dtype = node_type_to_dtype[final_node_type]
answers = metadata['types'][final_dtype]
if final_dtype == 'Bool':
answers = [True, False]
if final_dtype == 'Integer':
if metadata['dataset'] == 'CLEVR-v1.0':
answers = list(range(0, 11))
template_answer_counts[key[:2]] = {}
for a in answers:
template_answer_counts[key[:2]][a] = 0
return template_counts, template_answer_counts
template_counts, template_answer_counts = reset_counts()
# Read file containing input scenes
all_scenes = []
with open(args.input_scene_file, 'r') as f:
scene_data = json.load(f)
all_scenes = scene_data['scenes']
scene_info = scene_data['info']
begin = args.scene_start_idx
if args.num_scenes > 0:
end = args.scene_start_idx + args.num_scenes
all_scenes = all_scenes[begin:end]
else:
all_scenes = all_scenes[begin:]
# Read synonyms file
with open(args.synonyms_json, 'r') as f:
synonyms = json.load(f)
questions = []
scene_count = 0
for i, scene in enumerate(all_scenes):
scene_fn = scene['image_filename']
scene_struct = scene
print('starting image %s (%d / %d)'
% (scene_fn, i + 1, len(all_scenes)))
if scene_count % args.reset_counts_every == 0:
print('resetting counts')
template_counts, template_answer_counts = reset_counts()
scene_count += 1
# Order templates by the number of questions we have so far for those
# templates. This is a simple heuristic to give a flat distribution over
# templates.
templates_items = list(templates.items())
templates_items = sorted(templates_items,
key=lambda x: template_counts[x[0][:2]])
num_instantiated = 0
for (fn, idx), template in templates_items:
if args.verbose:
print('trying template ', fn, idx)
if args.time_dfs and args.verbose:
tic = time.time()
ts, qs, ans = instantiate_templates_dfs(
scene_struct,
template,
metadata,
template_answer_counts[(fn, idx)],
synonyms,
max_instances=args.instances_per_template,
verbose=False)
if args.time_dfs and args.verbose:
toc = time.time()
print('that took ', toc - tic)
image_index = int(os.path.splitext(scene_fn)[0].split('_')[-1])
for t, q, a in zip(ts, qs, ans):
questions.append({
'split': scene_info['split'],
'image_filename': scene_fn,
'image_index': image_index,
'image': os.path.splitext(scene_fn)[0],
'question': t,
'program': q,
'answer': a,
'template_filename': fn,
'question_family_index': idx,
'question_index': len(questions),
})
if len(ts) > 0:
if args.verbose:
print('got one!')
num_instantiated += 1
template_counts[(fn, idx)] += 1
elif args.verbose:
print('did not get any =(')
if num_instantiated >= args.templates_per_image:
break
# Change "side_inputs" to "value_inputs" in all functions of all functional
# programs. My original name for these was "side_inputs" but I decided to
# change the name to "value_inputs" for the public CLEVR release. I should
# probably go through all question generation code and templates and rename,
# but that could be tricky and take a while, so instead I'll just do it here.
# To further complicate things, originally functions without value inputs did
# not have a "side_inputs" field at all, and I'm pretty sure this fact is used
# in some of the code above; however in the public CLEVR release all functions
# have a "value_inputs" field, and it's an empty list for functions that take
# no value inputs. Again this should probably be refactored, but the quick and
# dirty solution is to keep the code above as-is, but here make "value_inputs"
# an empty list for those functions that do not have "side_inputs". Gross.
for q in questions:
for f in q['program']:
if 'side_inputs' in f:
f['value_inputs'] = f['side_inputs']
del f['side_inputs']
else:
f['value_inputs'] = []
with open(args.output_questions_file, 'w') as f:
print('Writing output to %s' % args.output_questions_file)
json.dump({
'info': scene_info,
'questions': questions,
}, f)
if __name__ == '__main__':
args = parser.parse_args()
if args.profile:
import cProfile
cProfile.run('main(args)')
else:
main(args)
|
clevr-dataset-gen-main
|
question_generation/generate_questions.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="c3dm", # Replace with your own username
version="1.0.0",
author="Facebook AI Research",
author_email="romansh@fb.com",
description="""Code for the paper: Canonical 3D Deformer Maps: \
Unifying parametric and non-parametric methods for \
dense weakly-supervised category reconstruction\
""",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/facebookresearch/c3dm",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Environment :: GPU :: NVIDIA CUDA :: 10.1",
"Intended Audience :: Science/Research",
"Topic :: Multimedia :: Graphics :: 3D Modeling",
],
python_requires='>=3.6',
install_requires=[
"torch==1.5.1",
"pytorch3d",
"pyyaml>=5.3.1",
"numpy>=1.17",
"pillow>=1.7.2",
"trimesh>=3.7.3",
"matplotlib",
"visdom>=0.1.8.9",
"plotly>=4.8.1",
],
)
|
c3dm-main
|
setup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import torch
from torch import nn as nn
import torch.nn.functional as Fu
import numpy as np
from tools.utils import NumpySeedFix, auto_init_args
from tools.vis_utils import get_visdom_connection, \
denorm_image_trivial, \
show_projections, \
visdom_plot_pointclouds
from tools.functions import masked_kp_mean, avg_l2_dist, \
safe_sqrt, \
argmin_translation, argmin_scale, \
avg_l2_huber, \
find_camera_T
from tools.so3 import so3_exponential_map, rand_rot
from dataset.dataset_configs import STICKS
from PIL import Image
class C3DPO(torch.nn.Module):
def __init__( self, n_keypoints = 17,
shape_basis_size = 10,
mult_shape_by_class_mask = False,
squared_reprojection_loss = False,
n_fully_connected = 1024,
n_layers = 6,
keypoint_rescale = float(1),
keypoint_norm_type = 'to_mean',
projection_type = 'orthographic',
z_augment = True,
z_augment_rot_angle = float(np.pi),
z_equivariance = False,
z_equivariance_rot_angle = float(np.pi)/4, # < 0 means same as z_augment_rot_angle
compose_z_equivariant_rot = True, # TODO: remove this soon!
camera_translation = False,
camera_xy_translation = True,
argmin_translation = False,
argmin_translation_test = False,
argmin_translation_min_depth = 3.,
argmin_to_augmented = False,
camera_scale = False,
argmin_scale = False,
argmin_scale_test = False,
loss_normalization = 'kp_total_count',
independent_phi_for_aug = False,
shape_pred_wd = 1.,
connectivity_setup = 'NONE',
custom_param_groups = False,
use_huber = False,
huber_scaling = 0.1,
alpha_bias = True,
canonicalization = {
'use': False,
'n_layers': 6,
'n_rand_samples': 4,
'rot_angle': float(np.pi),
'n_fully_connected': 1024,
},
linear_instead_of_conv = False,
perspective_depth_threshold = 0.1,
depth_offset = 0.,
replace_keypoints_with_input = False,
root_joint = 0,
loss_weights = { 'l_reprojection': 1.,
'l_canonicalization': 1. },
log_vars = [ \
'objective',
'dist_reprojection',
'l_reprojection',
'l_canonicalization' ],
**kwargs ):
super(C3DPO, self).__init__()
# autoassign constructor params to self
auto_init_args(self)
# factorization net
self.phi = nn.Sequential( \
*make_trunk( dim_in=self.n_keypoints * 3 , # 2 dim loc, 1 dim visibility
n_fully_connected=self.n_fully_connected,
n_layers=self.n_layers ) )
if linear_instead_of_conv:
layer_init_fn = linear_layer
else:
layer_init_fn = conv1x1
# shape coefficient predictor
self.alpha_layer = layer_init_fn( self.n_fully_connected,
self.shape_basis_size,
init='normal0.01',
cnv_args = {'bias': self.alpha_bias,
'kernel_size': 1 } )
# 3D shape predictor
self.shape_layer = layer_init_fn( self.shape_basis_size,
3*n_keypoints,
init='normal0.01' )
# rotation predictor (predicts log-rotation)
self.rot_layer = layer_init_fn(self.n_fully_connected,3,init='normal0.01')
if self.camera_translation:
# camera translation
self.translation_layer = layer_init_fn(self.n_fully_connected,3,init='normal0.01')
if self.camera_scale:
# camera scale (non-negative predictions)
self.scale_layer = nn.Sequential( \
layer_init_fn(self.n_fully_connected,1,init='normal0.01'),
nn.Softplus() )
if self.canonicalization['use']:
# canonicalization net:
self.psi = nn.Sequential( \
*make_trunk( dim_in=self.n_keypoints*3 ,
n_fully_connected=self.canonicalization['n_fully_connected'],
n_layers=self.canonicalization['n_layers'] ) )
self.alpha_layer_psi = conv1x1( \
self.n_fully_connected,
self.shape_basis_size,
init='normal0.01')
# def _get_param_groups(self,lr,wd=0.):
# # make sure to set correct weight decay for the shape predictor
# shape_param_names = [ 'shape_pred_layer.weight', \
# 'shape_pred_layer.bias' ]
# prm_shape = []
# prm_remain = []
# for name,prm in self.named_parameters():
# if not prm.requires_grad: continue
# if name in shape_param_names:
# prm_list = prm_shape
# else:
# prm_list = prm_remain
# prm_list.append(prm)
# p_groups = [ { 'params':prm_remain,'lr':float(lr), \
# 'weight_decay': wd },
# { 'params':prm_shape, 'lr':float(lr), \
# 'weight_decay': float(wd*self.shape_pred_wd) } ]
# return p_groups
def _get_param_groups(self,lr,wd=0.):
assert False
# make sure to set correct weight decay for the shape predictor
shape_param_names = [ 'shape_pred_layer.weight', \
'shape_pred_layer.bias' ]
prm_shape = []
prm_remain = []
for name,prm in self.named_parameters():
if not prm.requires_grad: continue
if name in shape_param_names:
prm_list = prm_shape
else:
prm_list = prm_remain
prm_list.append(prm)
p_groups = [ { 'params':prm_remain,'lr':float(lr), \
'weight_decay': wd },
{ 'params':prm_shape, 'lr':float(lr), \
'weight_decay': float(wd*self.shape_pred_wd) } ]
return p_groups
def forward( self, kp_loc=None, kp_vis=None, \
class_mask=None, K=None, dense_basis=None, \
phi_out = None, dense_basis_mask=None,
shape_coeff_in = None, **kwargs ):
# dictionary with outputs of the fw pass
preds = {}
# input sizes ...
ba,kp_dim,n_kp = kp_loc.shape
dtype = kp_loc.type()
assert kp_dim==2, 'bad input keypoint dim'
assert n_kp==self.n_keypoints, 'bad # of keypoints!'
if self.projection_type=='perspective':
kp_loc_cal = self.calibrate_keypoints(kp_loc, K)
else:
kp_loc_cal = kp_loc
# save for later visualisations ...
kp_loc_norm, kp_mean, kp_scale = \
self.normalize_keypoints( \
kp_loc_cal, kp_vis, rescale=self.keypoint_rescale )
preds['kp_loc_norm'] = kp_loc_norm
preds['kp_mean'], preds['kp_scale'] = kp_mean, kp_scale
# run the shape predictor
if phi_out is not None: # bypass the predictor and use input
preds['phi'] = phi_out
else:
preds['phi'] = self.run_phi(kp_loc_norm, kp_vis, \
class_mask=class_mask, \
shape_coeff_in=shape_coeff_in)
if self.canonicalization['use']:
preds['l_canonicalization' ], preds['psi'] = \
self.canonicalization_loss( preds['phi'], \
class_mask=class_mask )
# 3D->2D project shape to camera
kp_reprojected, depth = self.camera_projection( \
preds['phi']['shape_camera_coord'])
preds['kp_reprojected'] = kp_reprojected
if dense_basis is not None:
preds['phi_dense'] = self.run_phi_dense(dense_basis, preds['phi'])
kp_reprojected_dense, depth_dense = self.camera_projection( \
preds['phi_dense']['shape_camera_coord_dense'])
preds['kp_reprojected_dense'] = kp_reprojected_dense
preds['depth_dense'] = depth_dense
# compute the repro loss for backpropagation
if self.loss_normalization=='kp_count_per_image':
preds['l_reprojection'] = avg_l2_dist( \
kp_reprojected,
kp_loc_norm,
mask=kp_vis,
squared=self.squared_reprojection_loss )
# print(float(preds['l_reprojection']))
elif self.loss_normalization=='kp_total_count':
kp_reprojected_flatten = \
kp_reprojected.permute(1,2,0).contiguous().view(1,2,self.n_keypoints*ba)
kp_loc_norm_flatten = \
kp_loc_norm.permute(1,2,0).contiguous().view(1,2,self.n_keypoints*ba)
kp_vis_flatten = \
kp_vis.permute(1,0).contiguous().view(1,self.n_keypoints*ba)
if self.use_huber:
preds['l_reprojection'] = avg_l2_huber( \
kp_reprojected_flatten,
kp_loc_norm_flatten,
mask=kp_vis_flatten,
scaling=self.huber_scaling )
else:
assert False
preds['l_reprojection'] = avg_l2_dist( \
kp_reprojected_flatten,
kp_loc_norm_flatten,
mask=kp_vis_flatten,
squared=self.squared_reprojection_loss )
else:
raise ValueError('undefined loss normalization %s' % self.loss_normalization)
if self.squared_reprojection_loss:
assert False
# compute the average reprojection distance
# = easier to interpret than the squared repro loss
preds['dist_reprojection'] = avg_l2_dist( \
kp_reprojected,
kp_loc_norm,
mask=kp_vis,
squared=False )
# unnormalize the shape projections
kp_reprojected_image = \
self.unnormalize_keypoints(kp_reprojected, kp_mean, \
rescale=self.keypoint_rescale, kp_scale=kp_scale)
if dense_basis is not None:
kp_reprojected_image_dense = \
self.unnormalize_keypoints( \
preds['kp_reprojected_dense'], kp_mean, \
rescale=self.keypoint_rescale, kp_scale=kp_scale)
preds['kp_reprojected_image_dense'] = kp_reprojected_image_dense
# projections in the image coordinate frame
if self.replace_keypoints_with_input and not self.training:
# use the input points
kp_reprojected_image = (1-kp_vis[:,None,:]) * kp_reprojected_image + \
kp_vis[:,None,:] * kp_loc_cal
preds['kp_reprojected_image'] = kp_reprojected_image
# projected 3D shape in the image space
# = unprojection of kp_reprojected_image
shape_image_coord, depth_image_coord = \
self.camera_unprojection( \
kp_reprojected_image, depth, \
rescale=self.keypoint_rescale, \
kp_scale=kp_scale )
if dense_basis is not None:
shape_image_coord_dense, depth_image_coord_dense = \
self.camera_unprojection( \
kp_reprojected_image_dense, depth_dense, \
rescale=self.keypoint_rescale, \
kp_scale=kp_scale )
if self.projection_type=='perspective':
preds['kp_reprojected_image_cal'] = kp_reprojected_image
preds['shape_image_coord_cal'] = shape_image_coord
preds['shape_image_coord'] = \
self.uncalibrate_keypoints(shape_image_coord, K)
preds['kp_reprojected_image'], _ = \
self.camera_projection(preds['shape_image_coord'])
if dense_basis is not None:
preds['shape_image_coord_cal_dense'] = shape_image_coord_dense
preds['shape_image_coord_dense'] = \
self.uncalibrate_keypoints(shape_image_coord_dense, K)
preds['kp_reprojected_image_dense'], _ = \
self.camera_projection(preds['shape_image_coord_dense'])
# if True:
# preds['shape_image_coord_dense'].register_hook(\
# lambda grad: print(grad.abs().view(-1).topk(10)[0][-1]))
# preds['kp_reprojected_image_dense'].register_hook(\
# lambda grad: print(grad.abs().view(-1).topk(10)[0][-1]))
preds['depth_image_coord_dense'] = depth_image_coord_dense
elif self.projection_type=='orthographic':
preds['shape_image_coord'] = shape_image_coord
preds['depth_image_coord'] = depth_image_coord
if dense_basis is not None:
preds['shape_image_coord_dense'] = shape_image_coord_dense
preds['depth_image_coord_dense'] = depth_image_coord_dense
else:
raise ValueError()
# get the final loss
preds['objective'] = self.get_objective(preds)
assert np.isfinite(preds['objective'].sum().data.cpu().numpy()), "nans!"
return preds
def camera_projection(self, shape):
out = {}
depth = shape[:,2:3]
if self.projection_type=='perspective':
if self.perspective_depth_threshold > 0:
depth = torch.clamp(depth, self.perspective_depth_threshold)
projections = shape[:,0:2] / depth
elif self.projection_type=='orthographic':
projections = shape[:,0:2]
else:
raise ValueError('no such projection type %s' % \
self.projection_type )
return projections, depth
def camera_unprojection(self,kp_loc,depth,kp_scale=None,rescale=float(1)):
corr_scale = 1./rescale if kp_scale is None else kp_scale / rescale
if kp_scale is not None:
depth = depth * corr_scale[:,None,None]
else:
depth = depth * corr_scale
if self.projection_type=='perspective':
shape = torch.cat((kp_loc * depth, depth), dim=1)
elif self.projection_type=='orthographic':
shape = torch.cat((kp_loc, depth), dim=1)
else:
raise ValueError('no such projection type %s' % self.projection_type)
return shape, depth
def calibrate_keypoints(self, kp_loc, K):
# undo the projection matrix
assert K is not None
orig_shape = kp_loc.shape
kp_loc = kp_loc.view(orig_shape[0],2,-1) - K[:,0:2,2:3]
focal = torch.stack((K[:,0,0], K[:,1,1]), dim=1)
kp_loc = kp_loc / focal[:,:,None]
kp_loc = kp_loc.view(orig_shape)
return kp_loc
def uncalibrate_keypoints(self, kp_loc, K):
assert K is not None
ba = kp_loc.shape[0]
kp_loc = torch.bmm(K, kp_loc.view(ba,3,-1) ).view(kp_loc.shape)
return kp_loc
def normalize_keypoints( self,
kp_loc,
kp_vis,
rescale=1.,
kp_mean=None ):
if self.keypoint_norm_type=='to_root':
if kp_mean is None:
# center around the root joint
kp_mean = kp_loc[:,:,self.root_joint]
kp_loc_norm = kp_loc - kp_mean[:,:,None]
kp_scale = None
elif self.keypoint_norm_type=='to_mean':
if kp_mean is None:
# calc the mean of visible points
kp_mean = masked_kp_mean( kp_loc, kp_vis )
# remove the mean from the keypoint locations
kp_loc_norm = kp_loc - kp_mean[:,:,None]
kp_scale = None
else:
raise BaseException( 'no such kp norm %s' % \
self.keypoint_norm_type )
# rescale
kp_loc_norm = kp_loc_norm * rescale
return kp_loc_norm, kp_mean, kp_scale
def unnormalize_keypoints( self,
kp_loc_norm,
kp_mean,
rescale=1.,
kp_scale=None,
K=None ):
kp_loc = kp_loc_norm * (1. / rescale)
if kp_scale is not None:
kp_loc = kp_loc * kp_scale[:,None,None]
kp_loc = (kp_loc.view(kp_loc.shape[0],2,-1)
+ kp_mean[:, :, None]).view(kp_loc.shape)
return kp_loc
def run_phi(
self,
kp_loc,
kp_vis,
class_mask=None,
shape_coeff_in=None,
):
preds = {}
# batch size
ba = kp_loc.shape[0]
dtype = kp_loc.type()
eps = 1e-4
kp_loc_orig = kp_loc.clone()
if self.z_augment and self.training:
R_rand = rand_rot( ba,
dtype=dtype,
max_rot_angle=float(self.z_augment_rot_angle),
axes=(0,0,1) )
kp_loc_in = torch.bmm(R_rand[:,0:2,0:2],kp_loc)
else:
R_rand = torch.eye(3).type(dtype)[None].repeat( (ba,1,1) )
kp_loc_in = kp_loc_orig
if self.z_equivariance and self.training:
if self.z_equivariance_rot_angle < 0.:
zeq_angle = self.z_augment_rot_angle
else:
zeq_angle = self.z_equivariance_rot_angle
# random xy rot
R_rand_eq = rand_rot( ba,
dtype=dtype,
max_rot_angle=float(zeq_angle),
axes=(0,0,1) )
kp_loc_in = torch.cat( \
( kp_loc_in, \
torch.bmm(R_rand_eq[:,0:2,0:2],
kp_loc_in if self.compose_z_equivariant_rot else kp_loc_orig) \
), dim=0 )
kp_vis_in = kp_vis.repeat( (2,1) )
else:
kp_vis_in = kp_vis
# mask kp_loc by kp_visibility
kp_loc_masked = kp_loc_in * kp_vis_in[:,None,:]
# vectorize
kp_loc_flatten = kp_loc_masked.view(-1, 2*self.n_keypoints)
# concatenate visibilities and kp locations
l1_input = torch.cat( (kp_loc_flatten,kp_vis_in) , dim=1 )
# pass to network
if self.independent_phi_for_aug and l1_input.shape[0]==2*ba:
feats = torch.cat([ self.phi(l1_[:,:,None,None]) for \
l1_ in l1_input.split(ba, dim=0) ], dim=0)
else:
feats = self.phi( l1_input[:,:,None,None] )
# here the network runs once on concatenated input ... maybe split it?
# coefficients into the linear basis
shape_coeff = self.alpha_layer(feats)[:,:,0,0]
if self.z_equivariance and self.training:
# use the shape coeff from the second set of preds
shape_coeff = shape_coeff[ba:]
# take the feats from the first set
feats = feats[:ba]
if shape_coeff_in is not None:
preds['shape_coeff_orig'] = shape_coeff
shape_coeff = shape_coeff_in
# shape prediction is just a linear layer implemented as a conv
shape_canonical = self.shape_layer( \
shape_coeff[:,:,None,None])[:,:,0,0]
shape_canonical = shape_canonical.view(ba,3,self.n_keypoints)
if self.keypoint_norm_type=='to_root':
# make sure we fix the root at 0
root_j = shape_canonical[:,:,self.root_joint]
shape_canonical = shape_canonical - root_j[:,:,None]
# predict camera params
# ... log rotation (exponential representation)
R_log = self.rot_layer(feats)[:,:,0,0]
# convert from the 3D to 3x3 rot matrix
R = so3_exponential_map(R_log)
# T vector of the camera
if self.camera_translation:
T = self.translation_layer(feats)[:,:,0,0]
if self.camera_xy_translation: # kill the last z-dim
T = T * torch.tensor([1.,1.,0.]).type(dtype)[None,:]
else:
T = R_log.new_zeros(ba, 3)
# offset the translation vector of the camera
if self.depth_offset > 0.:
T[:,2] = T[:,2] + self.depth_offset
# scale of the camera
if self.camera_scale:
scale = self.scale_layer(feats)[:,0,0,0]
else:
scale = R_log.new_ones(ba)
# rotated+scaled shape into the camera ( Y = sRX + T )
shape_camera_coord = self.apply_similarity_t(shape_canonical,R,T,scale)
# undo equivariant transformation
if (self.z_equivariance or self.z_augment) and self.training:
R_rand_inv = R_rand.transpose(2,1)
R = torch.bmm(R_rand_inv,R)
T = torch.bmm(R_rand_inv,T[:,:,None])[:,:,0]
shape_camera_coord = torch.bmm(R_rand_inv,shape_camera_coord)
# estimate translation
if self.argmin_translation or \
(self.argmin_translation_test and not self.training) :
if self.projection_type=='orthographic':
projection, _ = self.camera_projection(shape_camera_coord)
if self.argmin_to_augmented:
assert False
T_amin = argmin_translation( projection, kp_loc_in[:ba], v=kp_vis )
else:
T_amin = argmin_translation( projection, kp_loc_orig, v=kp_vis )
T_amin = Fu.pad(T_amin,(0,1),'constant',float(0))
shape_camera_coord = shape_camera_coord + T_amin[:,:,None]
T = T + T_amin
elif self.projection_type=='perspective':
K_ = torch.eye(3).type_as(kp_loc)[None].repeat(ba,1,1)
T = find_camera_T(\
K_, shape_camera_coord, kp_loc_orig, v=kp_vis)
if self.argmin_translation_min_depth > 0.:
T = torch.cat( \
( T[:,0:2], \
torch.clamp(T[:,2:3], self.argmin_translation_min_depth)),
dim = 1 )
shape_camera_coord = shape_camera_coord + T[:,:,None]
else:
raise ValueError(self.projection_type)
# estimate scale
if self.argmin_scale or \
(self.argmin_scale_test and not self.training) :
assert self.projection_type=='orthographic'
# assert False
projection, _ = self.camera_projection(shape_camera_coord)
scale_correct = argmin_scale(projection, kp_loc_orig, v=kp_vis)
scale = scale_correct * scale
shape_camera_coord = scale_correct[:,None,None] * shape_camera_coord
T = scale_correct[:,None] * T
if class_mask is not None and self.mult_shape_by_class_mask:
shape_camera_coord = shape_camera_coord * class_mask[:,None,:]
shape_canonical = shape_canonical * class_mask[:,None,:]
preds['R_log'] = R_log
preds['R'] = R
preds['scale'] = scale
preds['T'] = T
preds['shape_camera_coord'] = shape_camera_coord
preds['shape_coeff'] = shape_coeff
preds['shape_canonical'] = shape_canonical
return preds
def run_phi_dense(self, dense_basis, phi_out):
R, T, scale, shape_coeff = [phi_out[k] for k in ['R', 'T', 'scale', 'shape_coeff']]
preds = {}
ba, dim, he, wi = dense_basis.shape
shape_basis_size = dim // 3
dense_basis_ = dense_basis.view(ba, shape_basis_size, 3*he*wi)
shape_coeff_1 = Fu.pad(shape_coeff, (1,0), value=1.) # mean shape goes first
if False:
dense_basis_decomp = dense_basis_.permute(0, 2, 1).contiguous()
dense_basis_decomp = dense_basis_decomp.view(ba, 3, -1)
# only rotate the basis
dense_basis_decomp_t = \
self.apply_similarity_t(dense_basis_decomp,R,T*0.,scale*0.+1.)
dense_basis_decomp_t = \
dense_basis_decomp_t.view(ba,3,he,wi, shape_basis_size)
dense_basis_decomp_rot = dense_basis_decomp_t.permute(0,4,1,2,3)
preds['dense_basis_rot'] = dense_basis_decomp_rot
shape_canonical_dense = torch.bmm(shape_coeff_1[:,None,:],
dense_basis_).view(ba, 3, -1)
shape_camera_coord_dense = self.apply_similarity_t(shape_canonical_dense,R,T,scale)
preds['shape_canonical_dense'] = shape_canonical_dense.view(ba, 3, he, wi)
preds['shape_camera_coord_dense'] = shape_camera_coord_dense.view(ba, 3, he, wi)
return preds
def apply_similarity_t( self, S, R, T, s ):
return torch.bmm( R, s[:,None,None] * S ) + T[:,:,None]
def canonicalization_loss( self, phi_out, class_mask=None ):
shape_canonical = phi_out['shape_canonical']
dtype = shape_canonical.type()
ba = shape_canonical.shape[0]
n_sample = self.canonicalization['n_rand_samples']
# rotate the canonical point cloud
# generate random rotation around all axes
R_rand = rand_rot( ba * n_sample,
dtype=dtype,
max_rot_angle=self.canonicalization['rot_angle'],
axes=(1,1,1) )
unrotated = shape_canonical.repeat(n_sample, 1, 1)
rotated = torch.bmm( R_rand, unrotated )
psi_out = self.run_psi( rotated ) # psi3( Rrand X )
a , b = psi_out['shape_canonical'] , unrotated
if self.use_huber:
l_canonicalization = avg_l2_huber(a, b, \
scaling=self.huber_scaling,
mask=class_mask.repeat(n_sample,1) if class_mask is not None else None)
else:
l_canonicalization = avg_l2_dist(a, b, \
squared=self.squared_reprojection_loss,
mask=class_mask.repeat(n_sample,1) if class_mask is not None else None)
# reshape the outputs in the output list
psi_out = { k : v.view( \
self.canonicalization['n_rand_samples'] , \
ba, *v.shape[1:] ) for k,v in psi_out.items() }
return l_canonicalization, psi_out
def run_psi( self, shape_canonical ):
preds = {}
# batch size
ba = shape_canonical.shape[0]
assert shape_canonical.shape[1]==3, '3d inputs only please'
# reshape and pass to the network ...
l1_input = shape_canonical.view(ba,3*self.n_keypoints)
# pass to network
feats = self.psi( l1_input[:,:,None,None] )
# coefficients into the linear basis
shape_coeff = self.alpha_layer_psi(feats)[:,:,0,0]
preds['shape_coeff'] = shape_coeff
# use the shape_pred_layer from 2d predictor
shape_pred = self.shape_layer( \
shape_coeff[:,:,None,None])[:,:,0,0]
shape_pred = shape_pred.view(ba,3,self.n_keypoints)
preds['shape_canonical'] = shape_pred
return preds
def get_objective(self,preds):
losses_weighted = [ preds[k] * float(w) for k,w in \
self.loss_weights.items() \
if k in preds ]
if not hasattr(self,'_loss_weights_printed') or \
not self._loss_weights_printed:
print('-------\nloss_weights:')
for k,w in self.loss_weights.items():
print('%20s: %1.2e' % (k,w) )
print('-------')
self._loss_weights_printed = True
loss = torch.stack(losses_weighted).sum()
return loss
def get_alpha_mean_complement(self):
delta = self.shape_layer.weight.view(3, -1, self.shape_basis_size)
alpha_bias = self.alpha_layer.bias.data
mu_add = (delta * alpha_bias[None,None,:]).sum(2)
return mu_add
def reparametrize_mean_shape(self):
if self.alpha_layer.bias is None:
print('no alpha bias => skipping reparametrization')
return
else:
print('reparametrizing nrsfm model mean')
mu = self.shape_layer.bias.data.view(3, self.n_keypoints)
mu_add = self.get_alpha_mean_complement()
mu_new = mu + mu_add
self.shape_layer.bias.data = mu_new.view(-1)
self.alpha_layer.bias.data.fill_(0.)
self.reparametrized = True
def get_mean_shape(self):
mu = self.shape_layer.bias.data.view(3, self.n_keypoints)
mu_orig = mu.clone()
if self.alpha_layer.bias is not None:
mu_add = self.get_alpha_mean_complement()
mu = mu + mu_add
if hasattr(self, 'reparametrized') and self.reparametrized:
assert (mu - mu_orig).abs().max() <= 1e-6
return mu
def visualize( self, visdom_env, trainmode, \
preds, stats, clear_env=False ):
viz = get_visdom_connection(server=stats.visdom_server,\
port=stats.visdom_port )
if not viz.check_connection():
print("no visdom server! -> skipping batch vis")
return;
if clear_env: # clear visualisations
print(" ... clearing visdom environment")
viz.close(env=visdom_env,win=None)
print('vis into env:\n %s' % visdom_env)
it = stats.it[trainmode]
epoch = stats.epoch
idx_image = 0
title="e%d_it%d_im%d"%(stats.epoch,stats.it[trainmode],idx_image)
# get the connectivity pattern
sticks = STICKS[self.connectivity_setup] if \
self.connectivity_setup in STICKS else None
var_kp = { 'orthographic': 'kp_reprojected_image',
'perspective': 'kp_reprojected_image_uncal'}[self.projection_type]
# show reprojections
p = np.stack( \
[ preds[k][idx_image].detach().cpu().numpy() \
for k in (var_kp, 'kp_loc') ] )
v = preds['kp_vis'][idx_image].detach().cpu().numpy()
show_projections( viz, visdom_env, p, v=v,
title=title, cmap__='gist_ncar',
markersize=50, sticks=sticks,
stickwidth=1, plot_point_order=True,
image_path=preds['image_path'][idx_image],
win='projections' )
# show 3d reconstruction
if True:
var3d = { 'orthographic': 'shape_image_coord',
'perspective': 'shape_image_coord_cal'}[self.projection_type]
pcl = {'pred': preds[var3d][idx_image].detach().cpu().numpy().copy()}
if 'kp_loc_3d' in preds:
pcl['gt'] = preds['kp_loc_3d'][idx_image].detach().cpu().numpy().copy()
if self.projection_type=='perspective':
# for perspective projections, we dont know the scale
# so we estimate it here ...
scale = argmin_scale( torch.from_numpy(pcl['pred'][None]),
torch.from_numpy(pcl['gt'][None]) )
pcl['pred'] = pcl['pred'] * float(scale)
elif self.projection_type=='orthographic':
pcl['pred'] = pcl['pred'] - pcl['pred'].mean(1)
visdom_plot_pointclouds(viz, pcl, visdom_env, title, \
plot_legend=False, markersize=20, \
sticks=sticks, win='3d' )
#TODO: Make these layers nicer + move somewhere else ...
def make_trunk(
n_fully_connected=None,
dim_in=None,
n_layers=None,
use_bn=True,
l2_norm=False,
):
layer1 = ConvBNLayer( dim_in,
n_fully_connected,
use_bn=use_bn,
l2_norm=l2_norm )
layers = [layer1]
for l in range(n_layers):
layers.append(
ResLayer(n_fully_connected, int(n_fully_connected/4),
use_bn=use_bn, l2_norm=l2_norm)
)
# print('made a trunk net:')
# print(layers)
return layers
def conv1x1(in_planes, out_planes, init='no', cnv_args={'bias':True,'kernel_size':1},std=0.01):
"""1x1 convolution"""
cnv = nn.Conv2d(in_planes, out_planes, **cnv_args)
# init weights ...
if init=='no':
pass
elif init=='normal0.01':
# print("warning: N(0.0.01) conv weight init (different from previous exps)")
# print('init std = %1.2e' % std)
cnv.weight.data.normal_(0.,std)
if cnv.bias is not None:
cnv.bias.data.fill_(0.)
else:
assert False
return cnv
class ConvBNLayer(nn.Module):
def __init__(self, inplanes, planes, use_bn=True, stride=1, l2_norm=False):
super(ConvBNLayer, self).__init__()
# do a reasonable init
cnv_args = {'kernel_size':1, 'stride':stride, 'bias':True}
self.conv1 = conv1x1(inplanes, planes, init='normal0.01', cnv_args=cnv_args)
self.use_bn = use_bn
self.l2_norm = l2_norm
if use_bn: self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x):
out = self.conv1(x)
if self.l2_norm: out = Fu.normalize(out, dim=1)
if self.use_bn: out = self.bn1(out)
out = self.relu(out)
return out
class ResLayer(nn.Module):
def __init__(self, inplanes, planes, expansion=4, use_bn=True, l2_norm=False):
super(ResLayer, self).__init__()
self.expansion=expansion
self.conv1 = conv1x1(inplanes, planes,init='normal0.01')
if use_bn: self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv1x1(planes, planes, init='normal0.01' )
if use_bn: self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion, init='normal0.01')
if use_bn: self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.skip = inplanes==(planes*self.expansion)
self.use_bn = use_bn
self.l2_norm = l2_norm
# print( "reslayer skip = %d" % self.skip )
def forward(self, x):
residual = x
out = self.conv1(x)
if self.l2_norm: out = Fu.normalize(out, dim=1)
if self.use_bn: out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
if self.l2_norm: out = Fu.normalize(out, dim=1)
if self.use_bn: out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
if self.l2_norm: out = Fu.normalize(out, dim=1)
if self.use_bn: out = self.bn3(out)
if self.skip: out += residual
out = self.relu(out)
return out
|
c3dm-main
|
c3dm/c3dpo.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import inspect
import copy
import os
import yaml
import ast
import numpy as np
from tools.attr_dict import nested_attr_dict
from tools.utils import auto_init_args
def convert_to_stringval(cfg_,squeeze=None,stringify_vals=False):
out = {}
convert_to_stringval_rec( [('ROOT',cfg_)], out,
squeeze=squeeze,stringify_vals=stringify_vals)
return out
def convert_to_stringval_rec( flds, output, squeeze=None, stringify_vals=False):
for k,v in flds[-1][1].items():
if isinstance(v,dict):
flds_cp = copy.deepcopy(flds)
flds_cp.append( (k,v) )
convert_to_stringval_rec( flds_cp, output,
squeeze=squeeze, stringify_vals=stringify_vals)
else:
valname = [] ; valname_full = []
for f in flds[1:]:
valname_full.append(squeeze_string(f[0],squeeze))
valname_full.append(squeeze_string(k,squeeze))
valname_full = ".".join(valname_full)
if stringify_vals:
output[valname_full] = str(v)
else:
output[valname_full] = v
def squeeze_key_string(f,squeeze_inter,squeeze_tail):
keys = f.split('.')
tail = keys[-1]
inter = keys[0:-1]
nkeys = len(keys)
if nkeys > 1:
take_from_each = int(np.floor(float(squeeze_inter-nkeys)/float(nkeys-1)))
take_from_each = max(take_from_each,1)
for keyi in range(nkeys-1):
s = inter[keyi]
s = s[0:min(take_from_each,len(s))]
inter[keyi] = s
tail = squeeze_string(tail,squeeze_tail)
inter.append(tail)
out = ".".join( inter )
return out
def squeeze_string(f,squeeze):
if squeeze is None or squeeze > len(f): return f;
idx = np.round(np.linspace(0,len(f)-1,squeeze))
idx = idx.astype(int).tolist()
f_short = [ f[i] for i in idx ]
f_short = str("").join(f_short)
return f_short
def get_default_args(C):
# returns dict of keyword args of a callable C
sig = inspect.signature(C)
kwargs = {}
for pname,defval in dict(sig.parameters).items():
if defval.default==inspect.Parameter.empty:
print('skipping %s' % pname)
continue
else:
kwargs[pname] = copy.deepcopy(defval.default)
return kwargs
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def arg_as_list(s):
v = ast.literal_eval(s)
if type(v) is not list:
raise argparse.ArgumentTypeError("Argument \"%s\" is not a list" % (s))
return v
def get_arg_parser(cfg_constructor):
dargs = (get_default_args(cfg_constructor)
if inspect.isclass(cfg_constructor)
else cfg_constructor)
dargs_full_name = convert_to_stringval(dargs,stringify_vals=False)
parser = argparse.ArgumentParser(
description='Auto-initialized argument parser'
)
for darg, val in dargs_full_name.items():
tp = type(val) if val is not None else str
if tp==bool:
parser.add_argument(
'--%s' % darg,
dest=darg,
help=darg,
default=val,
type=str2bool,
)
elif tp == list:
parser.add_argument(
'--%s' % darg,
type=arg_as_list,
default=val,
help=darg)
else:
parser.add_argument(
'--%s' % darg,
dest=darg,
help=darg,
default=val,
type=tp,
)
return parser
def set_config_from_config(cfg,cfg_set):
# cfg_set ... dict with nested options
cfg_dot_separated = convert_to_stringval(cfg_set,stringify_vals=False)
set_config(cfg,cfg_dot_separated)
def set_config_rec(cfg,tgt_key,val,check_only=False):
if len(tgt_key) > 1:
k = tgt_key.pop(0)
if k not in cfg:
#raise ValueError('no such config key %s' % k )
cfg[k] = {}
set_config_rec(cfg[k],tgt_key,val,check_only=check_only)
else:
if check_only:
assert cfg[tgt_key[0]]==val
else:
cfg[tgt_key[0]] = val
def set_config(cfg,cfg_set):
# cfg_set ... dict with .-separated options
for cfg_key,cfg_val in cfg_set.items():
# print('setting %s = %s' % (cfg_key,str(cfg_val)) )
cfg_key_split = [ k for k in cfg_key.split('.') if len(k) > 0 ]
set_config_rec(cfg,copy.deepcopy(cfg_key_split),cfg_val)
set_config_rec(cfg,cfg_key_split,cfg_val,check_only=True)
def set_config_from_file(cfg,cfg_filename):
# set config from yaml file
with open(cfg_filename, 'r') as f:
yaml_cfg = yaml.load(f)
set_config_from_config(cfg,yaml_cfg)
def dump_config(cfg):
cfg_filename = os.path.join(cfg.exp_dir,'expconfig.yaml')
with open(cfg_filename, 'w') as yaml_file:
yaml.dump(cfg, yaml_file, default_flow_style=False)
|
c3dm-main
|
c3dm/config.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as Fu
import torchvision
from torchvision import models
from visdom import Visdom
import numpy as np
from tools.utils import auto_init_args
import torchvision
import collections
class HyperColumNet(nn.Module):
def __init__( self,
trunk_arch='resnet50',
n_upsample=2,
hc_layers=[1,2,3,4],
hcdim=512,
pose_confidence=True,
depth_offset=0.,
smooth=False,
encode_input_keypoints = False,
kp_encoding_sig=1.,
dimout=1,
dimout_glob = 0,
dimout_glob_alpha = 0,
n_keypoints=12,
architecture='hypercolumns',
dilate_start=2,
glob_inst_norm=False,
final_std=0.01,
final_bias=-1.,
glob_activation=True,
pretrained=True ):
super().__init__()
auto_init_args(self)
trunk = getattr(torchvision.models,trunk_arch)(pretrained=pretrained)
# nfc = trunk.fc.in_features
self.layer0 = torch.nn.Sequential( trunk.conv1,
trunk.bn1,
trunk.relu,
trunk.maxpool )
if self.architecture=='hypercolumns':
for l in [1, 2, 3, 4]:
lname = 'layer%d'%l
setattr(self, lname, getattr(trunk,lname))
for hcl in hc_layers:
lname = 'hc_layer%d'%hcl
indim = getattr(trunk,'layer%d'%hcl)[-1].conv1.in_channels
# if ((self.dimout_glob + self.dimout_glob_alpha) > 0 \
# and hcl==hc_layers[-1]):
# if not self.smooth:
# glob_layers = [ torch.nn.Conv2d(indim, indim,1,bias=True,padding=0),
# torch.nn.ReLU(),
# nn.Conv2d(indim, self.dimout_glob+self.dimout_glob_alpha, \
# 1, bias=True, padding=0) ]
# if self.glob_activation:
# glob_layers.insert(1, \
# torch.nn.InstanceNorm2d(indim) if self.glob_inst_norm \
# else torch.nn.BatchNorm2d(indim))
# else:
# glob_layers = [ nn.Conv2d(indim, self.dimout_glob+self.dimout_glob_alpha, \
# 1, bias=True, padding=0) ]
# self.final_glob = torch.nn.Sequential(*glob_layers )
if self.encode_input_keypoints:
indim += self.n_keypoints
if not self.smooth:
layer_ = torch.nn.Sequential( \
torch.nn.Conv2d(indim, hcdim, 3, bias=True, padding=1),
torch.nn.BatchNorm2d(hcdim),
torch.nn.ReLU(),
torch.nn.Conv2d(hcdim, hcdim, 3, bias=True, padding=1),
)
else:
layer_ = torch.nn.Sequential( \
torch.nn.Conv2d(indim, hcdim, 3, bias=True, padding=1),
)
setattr(self, lname, layer_)
if not self.smooth:
up_layers = [ torch.nn.Conv2d(hcdim,hcdim,3,bias=True,padding=1),
torch.nn.BatchNorm2d(hcdim),
torch.nn.ReLU(),
nn.Conv2d(hcdim, dimout, 3, bias=True, padding=1) ]
else:
up_layers = [ nn.Conv2d(hcdim, dimout, 3, bias=True, padding=1) ]
llayer = up_layers[-1]
llayer.weight.data = \
llayer.weight.data.normal_(0., self.final_std)
if self.final_bias > -1.:
llayer.bias.data = \
llayer.bias.data.fill_(self.final_bias)
print('hcnet: final bias = %1.2e, final std=%1.2e' % \
(llayer.bias.data.mean(),
llayer.weight.data.std())
)
self.final = torch.nn.Sequential(*up_layers)
elif self.architecture=='dilated':
if self.dimout_glob > 0:
raise NotImplementedError('not done yet')
# for l in [1, 2, 3, 4]:
# lname = 'layer%d'%l
# setattr(self, lname, getattr(trunk,lname))
if self.encode_input_keypoints:
c1 = self.layer0[0]
wsz = list(c1.weight.data.shape)
wsz[1] = self.n_keypoints
c1_add = c1.weight.data.new_zeros( wsz ).normal_(0.,0.0001)
c1.weight.data = torch.cat( (c1.weight.data, c1_add), dim=1 )
c1.in_channels += self.n_keypoints
layers = [self.layer0]
li = 0
for l in [1,2,3,4]:
lname = 'layer%d'%l
m = getattr(trunk,lname)
if l >= self.dilate_start:
for mm in m.modules():
if type(mm) == torch.nn.Conv2d:
mm.stride = (1,1)
if mm.kernel_size==(3,3):
dil = (li+2)**2
mm.dilation = ( dil, dil )
mm.padding = ( dil, dil )
li += 1
layers.append(m)
# setattr(self, lname, m)
for m in layers[-1][-1].modules():
if hasattr(m, 'out_channels'):
lastdim = m.out_channels
if True: # deconv for final layer (2x higher resol)
layers.append( torch.nn.ConvTranspose2d( \
lastdim, dimout, kernel_size=3, \
stride=2, output_padding=1, padding=1, bias=True) )
else: # classic conv
layers.append( torch.nn.Conv2d( \
lastdim, dimout, kernel_size=3, \
stride=1, padding=1, bias=True) )
layers[-1].weight.data = \
layers[-1].weight.data.normal_(0., self.final_std)
self.trunk = torch.nn.Sequential(*layers )
self.mean = torch.FloatTensor([0.485, 0.456, 0.406])
self.std = torch.FloatTensor([0.229, 0.224, 0.225])
def get_last_layer_numchannels(self):
return getattr(self,'layer4')[-1].conv1.in_channels
def norm_image(self, x):
mean = self.mean[None,:,None,None].type_as(x)
std = self.std[None,:,None,None].type_as(x)
return (x - mean) / std
def gkernel( self, sz, rel_scale, mu, sig ):
g = torch.linspace( 0.5, sz-0.5, sz ).type_as(mu)
g = ( (-(g[None,None,:] - mu[:,:,None]*rel_scale)**2) / \
(sig * rel_scale) ).exp()
return g
def make_kp_encoding(self, kp_loc_vis, im_size, grid_size):
rel_scale = [g/i for g,i in zip(grid_size, im_size)]
g_x = self.gkernel( grid_size[1], rel_scale[1], kp_loc_vis[:,0,:],
self.kp_encoding_sig )
g_y = self.gkernel( grid_size[0], rel_scale[0], kp_loc_vis[:,1,:],
self.kp_encoding_sig )
g = g_y[:,:,:,None] * g_x[:,:,None,:]
g *= kp_loc_vis[:,2,:,None, None]
return g
def run_hc(self, images, kp_loc_vis=None, only_glob=False, skip_norm_image=False):
if skip_norm_image:
x = self.layer0(images)
else:
x = self.layer0(self.norm_image(images))
x1 = self.layer1(x)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x4 = self.layer4(x3)
x4_avg = x4.mean((2,3), keepdim=True) # TODO: keepdim=False
if only_glob:
return _, x4_avg
# if (self.dimout_glob + self.dimout_glob_alpha) > 0:
# out_glob = self.final_glob(x4_avg)
# if only_glob:
# return out_glob
# else:
# assert not only_glob
xs = [x1, x2, x3, x4]
if self.encode_input_keypoints:
# append kp_encoding to all xs
kp_encoding = self.make_kp_encoding( \
kp_loc_vis, images.shape[2:], x.shape[2:] )
for i in range(len(xs)):
kp_up_ = Fu.interpolate( kp_encoding, size=xs[i].shape[2:],
mode='bilinear' )
xs[i] = torch.cat( (xs[i], kp_up_), dim=1 )
hc = 0.
upsize = None
for hcl in self.hc_layers:
if upsize==None:
upsize = xs[hcl-1].shape[2:]
lname = 'hc_layer%d'%hcl
f = getattr(self, lname)(xs[hcl-1])
fup = Fu.interpolate(f,size=upsize,mode='bilinear')
hc = hc + fup * (1./len(self.hc_layers))
out = self.final(hc)
return out, x4_avg
# if (self.dimout_glob+self.dimout_glob_alpha) > 0:
# return out, out_glob
# else:
# return out, None
def run_dil(self, images, kp_loc_vis=None, only_glob=False, skip_norm_image=False):
assert not only_glob, 'not yet implemented'
if skip_norm_image:
l1in = images
else:
l1in = self.norm_image(images)
if self.encode_input_keypoints:
kp_encoding = self.make_kp_encoding( \
kp_loc_vis, images.shape[2:], images.shape[2:] )
l1in = torch.cat( (l1in, kp_encoding), dim=1 )
return self.trunk(l1in)
def forward(self, images, kp_loc_vis=None, only_glob=False, skip_norm_image=False):
if self.architecture=='dilated':
out = self.run_dil(images, kp_loc_vis=kp_loc_vis, only_glob=only_glob, skip_norm_image=skip_norm_image)
elif self.architecture=='hypercolumns':
out = self.run_hc(images, kp_loc_vis=kp_loc_vis, only_glob=only_glob, skip_norm_image=skip_norm_image)
else:
raise ValueError()
return out
# taken from FCRN_pytorch on github
class FasterUpProj(nn.Module):
# Faster UpProj decorder using pixelshuffle
class faster_upconv(nn.Module):
def __init__(self, in_channel):
super(FasterUpProj.faster_upconv, self).__init__()
self.conv1_ = nn.Sequential(collections.OrderedDict([
('conv1', nn.Conv2d(in_channel, in_channel // 2, kernel_size=3)),
('bn1', nn.BatchNorm2d(in_channel // 2)),
]))
self.conv2_ = nn.Sequential(collections.OrderedDict([
('conv1', nn.Conv2d(in_channel, in_channel // 2, kernel_size=(2, 3))),
('bn1', nn.BatchNorm2d(in_channel // 2)),
]))
self.conv3_ = nn.Sequential(collections.OrderedDict([
('conv1', nn.Conv2d(in_channel, in_channel // 2, kernel_size=(3, 2))),
('bn1', nn.BatchNorm2d(in_channel // 2)),
]))
self.conv4_ = nn.Sequential(collections.OrderedDict([
('conv1', nn.Conv2d(in_channel, in_channel // 2, kernel_size=2)),
('bn1', nn.BatchNorm2d(in_channel // 2)),
]))
self.ps = nn.PixelShuffle(2)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
# print('Upmodule x size = ', x.size())
x1 = self.conv1_(nn.functional.pad(x, (1, 1, 1, 1)))
x2 = self.conv2_(nn.functional.pad(x, (1, 1, 0, 1)))
x3 = self.conv3_(nn.functional.pad(x, (0, 1, 1, 1)))
x4 = self.conv4_(nn.functional.pad(x, (0, 1, 0, 1)))
# print(x1.size(), x2.size(), x3.size(), x4.size())
x = torch.cat((x1, x2, x3, x4), dim=1)
x = self.ps(x)
return x
class FasterUpProjModule(nn.Module):
def __init__(self, in_channels, smooth=False):
super(FasterUpProj.FasterUpProjModule, self).__init__()
out_channels = in_channels // 2
self.smooth = smooth
self.upper_branch = nn.Sequential(collections.OrderedDict([
('faster_upconv', FasterUpProj.faster_upconv(in_channels)),
('relu', nn.ReLU(inplace=True)),
('conv', nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)),
('batchnorm', nn.BatchNorm2d(out_channels)),
]))
if self.smooth:
self.bottom_branch = None
else:
self.bottom_branch = FasterUpProj.faster_upconv(in_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x1 = self.upper_branch(x)
if self.smooth:
x2 = Fu.interpolate(x[:,:x1.shape[1],:,:],size=x1.shape[2:],mode='bilinear')
else:
x2 = self.bottom_branch(x)
x = x1 + x2
x = self.relu(x)
return x
def __init__(self, in_channel, n_layers=2, smooth=False, dimout=2):
super(FasterUpProj, self).__init__()
layers = []
for l in range(n_layers):
indim = in_channel // int(2**l)
layers.append(self.FasterUpProjModule(indim,smooth=smooth))
last = nn.Conv2d(indim//2, dimout, 3, padding=1)
layers.append( last )
self.trunk = nn.Sequential(*layers)
def forward(self,x):
return self.trunk(x)
# def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
# """3x3 convolution with padding"""
# return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
# padding=dilation, groups=groups, bias=False, dilation=dilation)
# def upconv(in_planes, out_planes, stride=2, groups=1, dilation=1):
# """up convolution"""
# kernel_size = 2*(stride-1)+1
# pad = int((kernel_size-1)/2)
# return nn.ConvTranspose2d(in_planes, out_planes, kernel_size, stride=stride, \
# padding=pad, output_padding=pad, groups=groups)
# def conv1x1(in_planes, out_planes, stride=1):
# """1x1 convolution"""
# return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
# class UpBottleneck(nn.Module):
# expansion = 4
# def __init__(self, inplanes, planes, stride=1, upfactor=2, groups=1,
# base_width=64, dilation=1, norm_layer=None):
# super(UpBottleneck, self).__init__()
# if norm_layer is None:
# norm_layer = nn.BatchNorm2d
# width = int(planes * (base_width / 64.)) * groups
# # Both self.conv2 and self.downsample layers downsample the input when stride != 1
# self.conv1 = conv1x1(inplanes, width)
# self.bn1 = norm_layer(width)
# self.conv2 = upconv(width, width, upfactor, groups)
# self.bn2 = norm_layer(width)
# self.conv3 = conv1x1(width, planes * self.expansion)
# self.bn3 = norm_layer(planes * self.expansion)
# self.relu = nn.ReLU(inplace=True)
# self.scale = scale
# self.stride = stride
# def forward(self, x):
# identity = x
# out = self.conv1(x)
# out = self.bn1(out)
# out = self.relu(out)
# out = self.conv2(out)
# out = self.bn2(out)
# out = self.relu(out)
# out = self.conv3(out)
# out = self.bn3(out)
# out += identity
# identity = Fu.interpolate(x,size=out.shape[2:],mode='bilinear')
# out = self.relu(out)
# return out
|
c3dm-main
|
c3dm/hypercolumnet.py
|
c3dm-main
|
c3dm/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
from functools import lru_cache
import math
import os
import yaml
import numpy as np
import torch
import torch.nn.functional as Fu
from pytorch3d.renderer import cameras
from pytorch3d.transforms import so3
from visdom import Visdom
import c3dpo
from hypercolumnet import HyperColumNet
from config import get_default_args
from tools import model_io
from tools import so3 as so3int # TODO: move random 2d rot elsewhere; use 6d from pt3d
from tools import vis_utils
import tools.eval_functions as eval_func
import tools.functions as func
from tools.loss_models import AppearanceLoss, GaussianLayer
from tools import utils
from tools.tensor_accumulator import TensorAccumulator
def conv1x1(in_planes, out_planes, init='no', cnv_args={
'bias': True,
'kernel_size': 1,
}, std=0.01):
"""1x1 convolution"""
cnv = torch.nn.Conv2d(in_planes, out_planes, **cnv_args)
# init weights ...
if init == 'no':
pass
elif init == 'normal0.01':
cnv.weight.data.normal_(0., std)
if cnv.bias is not None:
cnv.bias.data.fill_(0.)
else:
assert False
return cnv
# Module that predicts shape and texture parameters, along with rotation
class GlobalHead(torch.nn.Module):
def __init__(
self,
input_channels,
alpha_geom_size=0,
alpha_tex_size=0,
camera_code_size=0,
add_shared_layer=True,
glob_inst_norm=False,
):
super(GlobalHead, self).__init__()
if not(alpha_tex_size > 0 or alpha_geom_size >= 0 or camera_code_size > 0):
return
make_fc_layer = lambda dimout: conv1x1(input_channels, dimout, init='normal0.01')
# conv with dimout 0 does not work; use this instead
make_degenerate = lambda feat: feat.new_empty(feat.size()[0], 0, 1, 1)
# shared layer by all global stuff
self.shared_layer = None
if add_shared_layer:
self.shared_layer = torch.nn.Sequential(
make_fc_layer(input_channels),
torch.nn.InstanceNorm2d(input_channels)
if glob_inst_norm
else torch.nn.BatchNorm2d(input_channels),
torch.nn.ReLU(),
)
self.alpha_geom_layer = (
make_fc_layer(alpha_geom_size)
if alpha_geom_size > 0
else make_degenerate if alpha_geom_size == 0 else None
)
self.alpha_tex_layer = make_fc_layer(alpha_tex_size) if alpha_tex_size > 0 else None
self.rot_layer = make_fc_layer(camera_code_size) if camera_code_size else None
def forward(self, feat):
if self.shared_layer is not None:
feat = self.shared_layer(feat)
return tuple([
(head(feat)[:,:,0,0] if head is not None else None)
for head in (self.alpha_geom_layer, self.alpha_tex_layer, self.rot_layer)
])
class Model(torch.nn.Module):
def __init__( self,
TRUNK = get_default_args(HyperColumNet),
APPEARANCE_LOSS = get_default_args(AppearanceLoss),
nrsfm_exp_path = '',
huber_scaling_basis = 0.01,
huber_scaling_repro = 0.01,
photo_min_k = 6,
photo_reenact = False,
repro_loss_min_ray_length = 0.0,
app_mask_image = False,
detach_app = True,
uv_model_use_bn = True,
uv_model_l2_norm = False,
sampled_sil_n_samples = 1000,
sampled_sph_chamfer = 0,
spherical_embedding_radius = 1.,
c3dpo_flipped=True,
reparametrize_nrsfm_mean = True,
scale_aug_range = 0.2,
t_aug_range = 0.02,
rot_aug_range = 3.14/12.,
custom_basis_size = -1,
n_images_for_app_model = -1,
min_depth = 0.,
argmin_translation_min_depth = 0.,
argmin_translation_ray_projection = True,
ray_reprojection = False,
dilate_basis_loss = 0.,
EMBED_DB = get_default_args(TensorAccumulator),
embed_db_eval = False,
app_model_mask_gt = False,
loss_weights = {
'loss_basis': 1.,
'loss_alpha': 0.,
'loss_rotation': 0.,
'loss_repro': 0.0,
'loss_vgg': 0.0,
'loss_sph_emb_to_cam': 0.0,
'loss_sph_sample_mask': 0.0,
'loss_vgg_app': 0.0,
'loss_l1_app': 0.0,
'loss_ssim_app': 0.0,
'loss_repro_2d': 0.0,
'loss_repro_ray': 0.0,
},
log_vars=[ 'objective',
'loss_basis',
'loss_alpha',
'loss_rotation',
'loss_repro',
'loss_repro_2d',
'loss_repro_ray',
'loss_vgg',
'loss_sph_emb_to_cam',
'loss_sph_sample_mask',
'loss_vgg_app',
'loss_l1_app',
'loss_ssim_app',
'sig_avg',
# depth error metrics
'pclerr_dist',
],
**kwargs ):
super(Model, self).__init__()
# autoassign constructor params to self
utils.auto_init_args(self)
assert not uv_model_use_bn and uv_model_l2_norm, 'Do not use BN UV network!'
self._load_and_fix_nrsfm()
self.alpha_bias = None
self.basis_size = custom_basis_size if custom_basis_size >= 0 else self.nrsfm_model.shape_basis_size
if self.basis_size == self.nrsfm_model.shape_basis_size:
# will be able to compute basis matching loss
basis = torch.cat((
self.nrsfm_model.shape_layer.bias.data.view(3, -1, 1),
self.nrsfm_model.shape_layer.weight.data.view(3, -1, self.basis_size),
), dim=2)
self.nrsfm_model_basis = basis.permute(2,0,1).detach().cuda(0)
self.alpha_bias = self.nrsfm_model.alpha_layer.bias[None,:,None,None,None].cuda(0)
TRUNK['dimout'] = 3
self.trunk = HyperColumNet(**TRUNK)
self._make_glob_layers()
if self.trunk.dimout_glob > 0:
self._make_texture_model()
self._make_geom_deformation_model()
# appearance loss
self.appearance_loss = AppearanceLoss(**APPEARANCE_LOSS)
# init the embed database
EMBED_DB['db_dim'] = TRUNK['dimout']
self.embed_db = TensorAccumulator(**EMBED_DB)
def _load_and_fix_nrsfm(self):
self.nrsfm_model = load_nrsfm_model(self.nrsfm_exp_path)
self.nrsfm_model.z_augment = False
self.nrsfm_model.z_equivariance = False
self.nrsfm_model.canonicalization.use = False
self.nrsfm_model.perspective_depth_threshold = \
max(self.nrsfm_model.perspective_depth_threshold, self.min_depth)
self.nrsfm_model_kp_rescale = float(self.nrsfm_model.keypoint_rescale)
if self.reparametrize_nrsfm_mean:
self.nrsfm_model.reparametrize_mean_shape()
self.nrsfm_mean_radius = self._get_nrsfm_mean_radius()
for prm in self.nrsfm_model.parameters():
prm.requires_grad = False
self.nrsfm_model_basis = None
self.projection_type = self.nrsfm_model.projection_type
assert self.nrsfm_model.keypoint_rescale == 1.0 or self.projection_type == 'orthographic'
def _make_glob_layers(self):
indim = self.trunk.get_last_layer_numchannels()
# TODO: move the relevant config params from trunk
dimout_alpha_tex = self.trunk.dimout_glob
dimout_alpha_geom = self.basis_size
self.global_head = GlobalHead(
indim,
dimout_alpha_geom,
dimout_alpha_tex,
6,
glob_inst_norm=self.trunk.glob_inst_norm,
)
def _make_texture_model(self):
# make MLP mapping basis vectors + app encoding to colors
app_dim = 3 + self.trunk.dimout_glob
app_layers = c3dpo.make_trunk(
dim_in=app_dim,
n_fully_connected=512,
n_layers=3,
use_bn=self.uv_model_use_bn,
l2_norm=self.uv_model_l2_norm,
)
app_layers.append(torch.nn.Conv2d(512, 3, 1))
self.app_model = torch.nn.Sequential(*app_layers)
def _make_geom_deformation_model(self):
delta_layers = c3dpo.make_trunk(
dim_in=3,
n_fully_connected=512,
n_layers=3,
use_bn=self.uv_model_use_bn,
l2_norm=self.uv_model_l2_norm,
)
dim_out = (self.basis_size+1)*3
delta_layers.append( torch.nn.Conv2d(512, dim_out, 1) )
if self.trunk.final_std != 0.01:
ldelta = delta_layers[-1]
ldelta.weight.data = \
ldelta.weight.data.normal_(0., self.trunk.final_std)
ldelta.bias.data = \
ldelta.bias.data.fill_(self.trunk.final_bias)
print('deltanet: final bias = %1.2e, final std=%1.2e' % \
(ldelta.bias.data.mean(),
ldelta.weight.data.std())
)
# delta vectors predicted from the mean vectors
self.delta_model = torch.nn.Sequential(*delta_layers)
def _get_nrsfm_mean_radius(self):
mu = self.nrsfm_model.get_mean_shape().cuda().detach()
mumu = mu.mean(dim=1, keepdim=True)
return ((mu - mumu) ** 2).mean() ** 0.5
@lru_cache()
def _get_image_grid(self, image_size, grid_size):
imgrid = func.image_meshgrid( ((0, image_size[0]), (0, image_size[1])),
grid_size )
imgrid = imgrid[[1,0]] # convert from yx to xy
return imgrid
def _get_distance_from_grid(self, predicted_coord, image_size,
masks=None, K=None, ray_reprojection=True):
ba = predicted_coord.shape[0]
imgrid = self._get_image_grid(image_size, predicted_coord.size()[2:])
imgrid = imgrid.type_as(predicted_coord)[None].repeat(ba,1,1,1)
if masks is not None:
masks = masks.view(ba, -1)
if ray_reprojection:
#assert self.projection_type=='perspective'
imgrid_proj = func.calc_ray_projection(
predicted_coord.view(ba,3,-1),
imgrid.view(ba,2,-1),
K = K,
min_depth=self.min_depth,
min_r_len=self.repro_loss_min_ray_length,
)
err = func.avg_l2_huber(
imgrid_proj,
predicted_coord.view(ba,3,-1),
scaling=self.huber_scaling_repro,
mask=masks
)
else:
shape_reprojected_image, _ = self.nrsfm_model.camera_projection(
func.clamp_depth(predicted_coord, self.min_depth)
)
if self.projection_type=='perspective':
imgrid = self.nrsfm_model.calibrate_keypoints(imgrid, K)
err = func.avg_l2_huber(
shape_reprojected_image.view(ba,2,-1),
imgrid.view(ba,2,-1),
scaling=self.huber_scaling_repro,
mask=masks,
)
return err
def _get_mean_basis_embed(self, embed):
ba, _, he, wi = embed.shape
embed_re = embed.view(ba, self.basis_size+1, 3, he, wi)
embed_mean = embed_re[:, 0, :, :, :]
# add the bias from the alpha layer!
if self.alpha_bias is not None:
embed_mean_add = (embed_re[:,1:,:,:,:] * self.alpha_bias).sum(1)
embed_mean = embed_mean + embed_mean_add
return embed_mean
def _get_deltas_and_concat(self, embed):
return self.delta_model(embed)
def _gather_supervised_embeddings(self, embed, kp_loc, image_size):
# uses grid sampler now (grid of size KP x 1)
# outputs B x C x KP
ba = embed.shape[0]
image_size_tensor = torch.tensor(image_size).type_as(embed).flip(0)
grid_ = 2. * kp_loc / image_size_tensor[None,:,None] - 1.
grid_ = grid_.permute(0,2,1).view(ba, -1, 1, 2)
supervised_embed = Fu.grid_sample(embed, grid_, align_corners=False)[:,:,:,0]
return supervised_embed
def _get_basis_loss(self, kp_loc, kp_vis, embed, alpha, image_size):
assert self.nrsfm_model_basis is not None, "NRSFM basis not compatible."
ba = kp_loc.shape[0]
if self.dilate_basis_loss > 0.:
ga = GaussianLayer(sigma=self.dilate_basis_loss, separated=True).cuda()
embed = ga(embed)
kp_embed_view = self._gather_supervised_embeddings(
embed, kp_loc, image_size
)
gt_basis = self.nrsfm_model_basis.reshape(
-1, self.nrsfm_model.n_keypoints
)[None].repeat(ba,1,1).detach()
return func.avg_l2_huber( gt_basis, kp_embed_view,
scaling=self.huber_scaling_basis,
mask=kp_vis[:,None,:],
reduce_dims=[],
)
def _get_rotation_loss(self, est_rotation, nrsfm_rotation):
rel_rotation = torch.eye(3, 3).expand_as(est_rotation)
return 1.0 - torch.mean(
so3.so3_relative_angle(est_rotation, nrsfm_rotation, cos_angle=True)
)
def _adjust_nrsfm_model_kp_scale(self, orig_image_size, image_size):
if self.projection_type=='perspective':
# dont change ...
pass
elif self.projection_type=='orthographic':
rel_scale = 0.5 * sum( \
float(orig_image_size.mean(0)[i]) / image_size[i] \
for i in (0,1) )
self.nrsfm_model.keypoint_rescale = \
self.nrsfm_model_kp_rescale * rel_scale
else:
raise ValueError(self.projection_type)
def _similarity_aug(self, images, kp_loc, kp_vis, masks=None, depths=None):
"""
augment images, depths, masks and kp_loc using random
similarity transformation
"""
ba, _, he, wi = images.shape
# random scale
r_scl = images.new_zeros(ba,).uniform_(1., 1.+self.scale_aug_range)
r_rot = so3int.random_2d_rotation(ba, images.type(), self.rot_aug_range)
# random translation
imdiag = float(np.sqrt(he * wi))
r_t = images.new_zeros(ba,2).uniform_( \
-imdiag*self.t_aug_range, imdiag*self.t_aug_range)
# orig image grid
grid_ = self._get_image_grid(images.shape[2:], images.shape[2:])
grid_flat = grid_.type_as(images).repeat(ba,1,1,1).view(ba,2,-1)
# 1st transform the keypoints
kp_loc = torch.bmm(r_rot, kp_loc)
kp_loc = kp_loc * r_scl[:,None,None]
kp_loc = kp_loc - r_t[:,:,None]
# adjust the visibilities
ok = (kp_loc[:,0,:] >= 0.) * (kp_loc[:,1,:] >= 0.) * \
(kp_loc[:,0,:] < wi) * (kp_loc[:,1,:] < he)
kp_vis = kp_vis * ok.float()
kp_loc[kp_vis[:, None, :].expand_as(kp_loc) < 0.5] = 0.0
# then the image but with inverse trans
grid_t = torch.bmm(r_rot.permute(0,2,1), grid_flat)
grid_t = grid_t / r_scl[:,None,None]
grid_t = grid_t + r_t[:,:,None]
grid_t = grid_t / torch.FloatTensor([wi,he])[None,:,None].type_as(grid_t) # norm to 0, 1
grid_t = grid_t * 2. - 1. # norm to -1, 1
grid_t = grid_t.view(ba,2,he,wi).permute(0,2,3,1).contiguous()
# sample the images, depth, masks
images = Fu.grid_sample(images, grid_t, mode='bilinear', align_corners=False)
if depths is not None:
depths = Fu.grid_sample(depths, grid_t, mode='nearest', align_corners=False)
if masks is not None:
masks = Fu.grid_sample(masks, grid_t, mode='nearest', align_corners=False)
return images, kp_loc, kp_vis, masks, depths
def run_on_embed_db(self, preds, texture_desc, K, masks=None, image_size=None):
embed = self.embed_db.get_db()
embed = embed[None,:,:,None].repeat(preds['phi']['T'].size()[0], 1, 1, 1)
# we have to downscale the embeds to make everything well-behaved
embed_full = self._get_deltas_and_concat(embed)
phi_out = self._get_shapes_and_projections(embed_full, None, preds['phi'], K)
out = dict(
embed_db_mean=embed_full,
embed_db_shape_canonical=phi_out['shape_canonical_dense'],
embed_db_shape_camera_coord=phi_out['shape_camera_coord_dense'],
)
if texture_desc is not None:
app = self._run_app_model(embed_full, texture_desc, embed, skip_sph_assert=True)
out['embed_db_app'] = app
return out
def _merge_masked_tensors(self, pcl, masks):
c = pcl.size()[1]
pcl = pcl.transpose(0, 1).reshape(1, c, -1)
if masks is not None:
pcl = pcl[..., :, masks.reshape(-1) > 0.5]
return pcl
def _assert_spherical_embed(self, embed):
norms = (embed**2).sum(1).sqrt()
# we assert that the norms are constant (std <= 0.01)
# (in case we want to have different radius of the sphere)
assert (
embed.shape[1]==3
and float(norms.std()) <= 1e-2
), 'This can only run on spherical embeds!'
def _get_sph_embed_towards_camera_loss(self, embed, masks, R, eps=1e-8):
ba = embed.size()[0]
embed = embed.reshape(ba, 3, -1)
masks = masks.reshape(ba, 1, -1)
avg_emb = Fu.normalize((embed * masks).sum(dim=2) / (masks.sum(dim=2) + eps), dim=-1)
# Rotated by R, it should be ideally (0, 0, 1)
# swap - with + for the non-flipped C3DPO
sign = -1.0 if self.c3dpo_flipped else +1.0
loss = 1. + sign * torch.matmul(R, avg_emb[..., None])[:, 2].mean()
return loss
def _calc_depth_pcl_errs(self, pred, gt, masks=None):
# reshape the predicted depth to gt size (and rescale the values too)
pred_up = Fu.interpolate(pred, gt.shape[2:], mode='bilinear')
errs = eval_func.eval_depth_scale_inv(
pred_up.detach(), gt.detach(), masks=masks
)
return {'pclerr_dist': errs.mean()}
def _get_canonical_shape(self, dense_basis, alpha, masks, target_std=2.0):
ba, di, he, wi = dense_basis.size()
basis = dense_basis.reshape(ba, -1, 3*he*wi)
canon = basis[:, :1, :] + torch.bmm(alpha[:, None, :], basis[:, 1:, :])
return canon.reshape(ba, 3, he, wi)
def _argmin_translation(self, shape_camera_coord, shape_proj, shape_vis, K=None):
if self.projection_type=='orthographic':
projection, _ = self.nrsfm_model.camera_projection(shape_camera_coord)
T_amin = func.argmin_translation(projection, shape_proj, v=shape_vis)
T = Fu.pad(T_amin, (0,1), 'constant', float(0))
elif self.projection_type=='perspective':
ba = shape_camera_coord.size()[0]
if K is None:
K = torch.eye(3).type_as(shape_proj)[None].expand(ba, 3, 3)
if self.argmin_translation_ray_projection:
T = func.find_camera_T(
K, shape_camera_coord, shape_proj, v=shape_vis
)
else:
T = func.minimise_2d_residual_over_T(
K, shape_camera_coord, shape_proj, v=shape_vis
)
else:
raise ValueError(self.projection_type)
return T
def _argmin_camera(self, shape_canonical, masks, grid_normalised, phi):
ba = shape_canonical.size()[0]
centre = torch.sum(
shape_canonical.reshape(ba, 3, -1) * masks.reshape(ba, 1, -1),
dim=(0,2,),
keepdim=True,
) / masks.sum()
shape_centered = shape_canonical.reshape(ba, 3, -1) - centre
assert 'R' in phi, "Rotation should be given for argmin_T"
shape_camera_rotated = torch.bmm(phi['R'], shape_centered)
T = self._argmin_translation(
shape_camera_rotated,
grid_normalised.expand(shape_camera_rotated[:,:2,:].size()),
masks.reshape(ba, -1),
K=None, # ! points already calibrated
)
min_depth = self.argmin_translation_min_depth
if min_depth > 0.:
T = torch.cat((T[:,0:2], torch.clamp(T[:,2:3], min_depth)), dim=1)
T = T - torch.matmul(phi['R'], centre)[:, :, 0]
return T
def _get_shapes_and_projections(
self, dense_basis, masks, global_desc, K, image_repro_gt=None, alpha=None
):
masks = (
masks if masks is not None
else dense_basis.new_ones(dense_basis[:, :1, ...].size())
)
assert len(masks.size()) == 4
ba = dense_basis.size()[0]
kp_mean = global_desc['kp_mean']
phi = copy.copy(global_desc)
rescale = self.nrsfm_model.keypoint_rescale
if alpha is not None:
phi['shape_coeff'] = alpha
if self.projection_type=='perspective':
focal = torch.stack((K[:, 0, 0], K[:, 1, 1]), dim=1)
p0 = K[:, :2, 2]
camera = cameras.SfMPerspectiveCameras(
R=phi['R'].permute(0, 2, 1),
focal_length=focal, principal_point=p0,
device=dense_basis.device,
)
else:
camera = cameras.SfMOrthographicCameras(
R=phi['R'].permute(0, 2, 1),
device=dense_basis.device,
)
shape_canonical = self._get_canonical_shape(
dense_basis, phi['shape_coeff'], masks
)
if 'T' not in phi:
# the grid has to be calibrated (=pre-multiplied by K^{-1}) first!
grid_im_coord = Fu.pad(
image_repro_gt.reshape(1, 2, -1).permute(0,2,1), (0, 1), value=1.0
).repeat(ba, 1, 1)
grid_im_coord = camera.unproject_points(
grid_im_coord, world_coordinates=False
)[:,:,:2].permute(0,2,1)
grid_normalised = (grid_im_coord - kp_mean[:,:,None]) * rescale
phi['T'] = self._argmin_camera(
shape_canonical, masks, grid_normalised, phi
)
camera.T = phi['T']
shape_canonical_pt3d = shape_canonical.reshape(ba, 3, -1).permute(0, 2, 1)
shape_camera_coord = camera.get_world_to_view_transform().transform_points(
shape_canonical_pt3d
)
shape_image_coord_cal_dense = shape_camera_coord
depth_dense = shape_camera_coord[:,:,2:]
shape_proj_image = camera.transform_points(shape_canonical_pt3d)
shape_reprojected_image = shape_proj_image[:, :, :2]
# correct for the kp normalisation
if self.projection_type == 'perspective':
shape_image_coord_cal_dense = shape_image_coord_cal_dense + Fu.pad(
kp_mean[:,None] * shape_camera_coord[:,:,2:], (0, 1), value=0.0
)
shape_reprojected_image = shape_reprojected_image + (kp_mean * focal)[:, None]
else:
assert self.projection_type == 'orthographic'
shape_image_coord_cal_dense = (
shape_image_coord_cal_dense / rescale +
Fu.pad(kp_mean[:,None], (0, 1), value=0.0)
)
shape_reprojected_image = (
shape_reprojected_image / rescale + kp_mean[:, None]
)
return dict(
phi=phi,
shape_canonical_dense=shape_canonical,
shape_camera_coord_dense=shape_camera_coord.permute(0, 2, 1).reshape_as(shape_canonical),
depth_dense=depth_dense.reshape_as(shape_canonical[:, :1]),
shape_reprojected_image=shape_reprojected_image.permute(0, 2, 1).reshape_as(shape_canonical[:, :2]),
shape_image_coord_cal_dense=shape_image_coord_cal_dense.permute(0, 2, 1).reshape_as(shape_canonical),
)
def _get_best_scale(self, preds, image_size):
if self.projection_type=='orthographic':
shape_camera_coord = preds['shape_image_coord_cal_dense']
ba = shape_camera_coord.shape[0]
imgrid = self._get_image_grid(image_size, shape_camera_coord.size()[2:])
imgrid = imgrid.type_as(shape_camera_coord)[None].repeat(ba,1,1,1)
projection, depth = self.nrsfm_model.camera_projection(shape_camera_coord)
s, T = func.argmin_translation_scale(projection, imgrid, v=preds['embed_masks'])
shape_best = torch.cat((
s[:, None, None, None] * shape_camera_coord[:, :2] + T[:, :, None, None],
s[:, None, None, None] * shape_camera_coord[:, 2:]
), dim=1)
elif self.projection_type=='perspective':
# no scale opt here, won't help
shape_best = preds['shape_image_coord_cal_dense']
else:
raise ValueError(self.projection_type)
return shape_best
def _get_sampled_sph_loss(self, preds, K, image_size):
masks = preds['embed_masks']
ba = masks.shape[0]
embed_sphere = torch.randn(
size=(ba, 3, self.sampled_sil_n_samples*10, 1),
dtype=masks.dtype, device=masks.device)
embed_sphere = Fu.normalize(
embed_sphere, dim=1) * self.spherical_embedding_radius
# adjust the mean!
embed_full = self._get_deltas_and_concat(embed_sphere)
dense_phi = self._get_shapes_and_projections(embed_full, masks, preds, K)
image_coords = dense_phi['shape_reprojected_image']
shape = dense_phi['shape_image_coord_cal_dense']
image_size_tensor = torch.FloatTensor(
[s for s in image_size]).type_as(embed_sphere).flip(0)
grid = 2. * (image_coords / image_size_tensor[None,:,None,None]) - 1.
grid_prm = grid.permute(0, 2, 3, 1)
# get all scales until the smallest side is <= 5
samples = []
scl = -1
while min(masks.shape[2:]) > 4:
scl += 1
if scl > 0:
masks = (Fu.interpolate(
masks, scale_factor=0.5, mode='bilinear') > 0.).float()
samples.append(Fu.grid_sample(masks, grid_prm, align_corners=False).view(-1))
samples = torch.cat(samples, dim=0)
loss = (1 - samples).mean()
return {
'loss_sph_sample_mask': loss,
'sph_sample_projs': grid,
'sph_sample_3d': shape,
}
def _get_photometric_losses(
self,
images,
image_coords,
basis_embed,
embed_canonical=None,
n_min=5,
masks=None,
texture_desc=None,
):
ba = images.shape[0]
n_min = min(ba-1, n_min)
assert ba > 1, 'batch_size > 1 for photo losses!'
assert not (self.photo_reenact and texture_desc is None)
image_size = list(images.shape[2:])
image_size_render = list(basis_embed.shape[2:])
image_size_tensor = torch.FloatTensor(image_size).type_as(basis_embed).flip(0)
grid = 2. * (image_coords / image_size_tensor[None,:,None,None]) - 1.
grid = grid.permute(0, 2, 3, 1)
# image warping loss
if self.photo_reenact:
images_reenact = self._run_app_model(
basis_embed, texture_desc[0:1].repeat(ba, 1), embed_canonical
)
else:
images_reenact = images
images_reproject = Fu.grid_sample(images_reenact, grid, align_corners=False)
# resample ref image to images_resample resolution
images_ref = Fu.interpolate(images[:1], size=images_reproject.shape[2:])
images_ref = images_ref.expand_as(images_reproject)
loss_vgg, _, _ = self.appearance_loss(images_reproject, images_ref)
loss_vgg = loss_vgg[:, 0]
# transplant the rendered image by tokp pooling
assert (~torch.isnan(loss_vgg)).all(), "Some photometric loss values are NaN."
if masks is not None:
# weight the losses by seg masks
loss_vgg = masks[:1, 0] * loss_vgg
loss_topk, idx_render = torch.topk(loss_vgg[1:], n_min-1, dim=0, largest=False)
# make sure we include the target view
loss_vgg = (loss_topk.sum(0) + loss_vgg[0]) / n_min
idx_render = idx_render[:,None].expand(-1, 3, -1, -1)
im_render = {
'loss_vgg': (
torch.gather(images_reproject, 0, idx_render).sum(0) + images_reproject[0]
) / n_min
}
out = {}
out['loss_vgg'] = loss_vgg.mean()
out['images_reproject'] = images_reproject.detach()
out['images_gt'] = images_ref.detach()
out['image_ref_render'] = im_render
out['images'] = Fu.interpolate(images, size=images_reproject.shape[2:]).detach()
out['images_reenact'] = Fu.interpolate(images_reenact, size=images_reproject.shape[2:]).detach()
return out
def _mask_gt_image(self, image, mask):
avgcol = (image * mask).sum((2, 3)) / mask.sum((2, 3)).clamp(1)
image_m = image * mask + (1-mask) * avgcol[:, :, None, None]
# blur and mix
ga = GaussianLayer(sigma=5., separated=True).cuda()
image_mf = ga(image_m)
image_m = mask * image_m + (1-mask) * image_mf
return image_m
def _run_app_model(self, embed, texture_desc, embed_canonical, skip_sph_assert=False):
# run the appearance model taking as input per-pixel uv-like
# embeddings `embed` and the global appearance descriptor
# `texture_desc`
n_im_use = self.n_images_for_app_model if \
self.n_images_for_app_model > 0 else embed_canonical.size()[0]
texture_desc = texture_desc[:n_im_use]
embed_for_app = embed_canonical[:n_im_use]
if not skip_sph_assert:
self._assert_spherical_embed(embed_for_app)
if self.detach_app:
embed_for_app = embed_for_app.detach()
embed_app = torch.cat((
texture_desc[:,:,None,None].expand(-1,-1,*list(embed.shape[2:])),
embed_for_app,
), dim=1)
app = self.app_model(embed_app)
return app[:, :3] + 0.5
def _get_app_model_losses(
self,
images,
preds_app,
masks=None,
sigma=None,
):
# for now this is the same
images_pred = preds_app
ba = images_pred.shape[0]
image_size = list(images.shape[2:])
image_size_render = list(images_pred.shape[2:])
if masks is not None:
# weight the losses by seg masks
masks = Fu.interpolate(masks[:ba], size=image_size_render, mode='nearest')
# resample ref image to images_resample resolution
images_gt = Fu.interpolate(images[:ba], size=image_size_render)
# mask the images and do NN interp
if self.app_model_mask_gt:
images_gt = self._mask_gt_image(images_gt, masks)
loss_vgg, loss_rgb, _ = \
self.appearance_loss(
images_pred,
images_gt,
sig=sigma,
mask=masks if self.app_mask_image else None
)
if masks is not None:
# weight the losses by seg masks
loss_vgg, loss_rgb = \
[ (masks * l).sum() / torch.clamp(masks.sum(), 1e-1) \
for l in (loss_vgg, loss_rgb,) ]
else:
loss_vgg, loss_rgb = \
[ l.mean() \
for l in (loss_vgg, loss_rgb,) ]
out = {}
out['loss_vgg'] = loss_vgg
out['loss_l1'] = loss_rgb
out['loss_ssim'] = (loss_rgb * 0.0).detach() # not used
out['images_pred'] = images_pred
out['images_pred_clamp'] = torch.clamp(images_pred,0.,1.)
out['images_gt'] = images_gt
out['images'] = images_gt
return out
def forward(
self,
kp_loc=None,
kp_vis=None,
kp_conf=None,
images=None,
epoch_now=None,
orig_image_size=None,
masks=None,
depths=None,
K=None,
**kwargs
):
ba = images.shape[0] # batch size
image_size = images.size()[2:]
# adjust nrsfm model scale
self._adjust_nrsfm_model_kp_scale(orig_image_size, image_size)
preds = {}
preds['nrsfm_mean_shape'] = self.nrsfm_model.get_mean_shape()
if self.training and (
self.scale_aug_range > 0. or
self.t_aug_range > 0. or
self.rot_aug_range > 0.
):
images, kp_loc, kp_vis, masks, depths = \
self._similarity_aug(images, kp_loc, kp_vis,
masks=masks, depths=depths)
preds.update(
{ 'images_aug': images, 'kp_loc_aug': kp_loc,
'depths_aug': depths, 'masks_aug': masks }
)
embed, glob_features = self.trunk(
images, kp_loc_vis = torch.cat((kp_loc, kp_vis[:,None,:]), dim=1)
)
embed = Fu.normalize(embed, dim=1) * self.spherical_embedding_radius
embed_full = self._get_deltas_and_concat(embed)
#embed_masks = (Fu.interpolate(masks, embed.shape[2:], mode='bilinear') > 0.49).float()
embed_masks = Fu.interpolate(masks, embed.shape[2:], mode='nearest')
image_repro_gt = self._get_image_grid(image_size, embed_full.size()[2:])
preds['embed'] = embed
preds['embed_full'] = embed_full
preds['embed_masks'] = embed_masks
preds['embed_mean'] = self._get_mean_basis_embed(embed_full)
preds['image_repro_gt'] = image_repro_gt
alpha_geom, texture_desc, rotation_code = self.global_head(glob_features)
self.nrsfm_model.eval()
preds['nrsfm'] = self.nrsfm_model(
kp_loc=kp_loc,
kp_vis=kp_vis,
dense_basis=None, # estimate dense Phi here
K=K,
)
assert not self.nrsfm_model.camera_scale # so just ones
assert self.nrsfm_model.argmin_translation
#preds['kp_mean'] = preds['nrsfm']['kp_mean'] # TODO: this should go away
# override top-level preds if regressing directly
assert rotation_code is not None
assert alpha_geom is not None
global_desc = dict(
shape_coeff=alpha_geom,
R=so3int.so3_6d_to_rot(rotation_code),
kp_mean=preds['nrsfm']['kp_mean'],
)
preds.update(self._get_shapes_and_projections(
embed_full, embed_masks, global_desc, K, image_repro_gt
))
preds['shape_image_coord_cal'] = self._gather_supervised_embeddings(
preds['shape_image_coord_cal_dense'], # same as uncal for orthographic
kp_loc,
image_size,
)
preds['kp_reprojected_image'] = self._gather_supervised_embeddings(
preds['shape_reprojected_image'],
kp_loc,
image_size,
)
# compute NR-SFM Prior loss
if self.loss_weights['loss_basis'] > 0.:
preds['loss_basis'] = self._get_basis_loss(
kp_loc,
kp_vis,
embed_full,
preds['nrsfm']['phi']['shape_coeff'],
image_size,
)
if self.loss_weights.loss_alpha > 0.:
assert alpha_geom is not None
preds['loss_alpha'] = func.huber( \
(alpha_geom - preds['nrsfm']['phi']['shape_coeff'])**2,
scaling=self.huber_scaling_basis,
).mean()
if self.loss_weights.loss_rotation > 0.:
preds['loss_rotation'] = self._get_rotation_loss(
preds['phi']['R'],
preds['nrsfm']['phi']['R'],
)
# compute reprojection loss
preds['loss_repro_2d'] = self._get_distance_from_grid(
preds['shape_image_coord_cal_dense'],
image_size,
masks=embed_masks,
K=K,
ray_reprojection=False,
)
# preds['loss_repro_ray'] = 0.0
# if self.projection_type == 'perspective':
preds['loss_repro_ray'] = self._get_distance_from_grid(
preds['shape_image_coord_cal_dense'],
image_size,
masks=embed_masks,
K=K,
ray_reprojection=True,
)
preds['loss_repro'] = preds['loss_repro_ray'] if self.ray_reprojection else preds['loss_repro_2d']
# perceptual loss
preds['photo_out'] = None
if self.photo_min_k > 0 and ba > 1:
# use the first im as a loss as a target
basis_embed_ref = embed_full[:1].expand_as(embed_full)
masks_ref = embed_masks[:1].expand_as(embed_masks)
phi_onto_ref = self._get_shapes_and_projections(basis_embed_ref, masks_ref, preds['phi'], K)
preds['photo_out'] = self._get_photometric_losses(
images,
phi_onto_ref['shape_reprojected_image'],
embed_full,
texture_desc=texture_desc,
n_min=self.photo_min_k,
masks=embed_masks,
embed_canonical=embed,
)
preds['loss_vgg'] = preds['photo_out']['loss_vgg']
# embedding-camera alignment loss
if self.loss_weights['loss_sph_emb_to_cam'] > 0.:
preds['loss_sph_emb_to_cam'] = self._get_sph_embed_towards_camera_loss(
preds['embed'], embed_masks, preds['phi']['R'].detach()
)
# mask sampling loss
if self.loss_weights['loss_sph_sample_mask'] > 0.:
preds.update(self._get_sampled_sph_loss(preds, K, images.shape[2:]))
# appearance model
preds['app'] = None
if texture_desc is not None:
n_im_use = (
self.n_images_for_app_model
if self.n_images_for_app_model > 0
else ba
)
preds['app'] = self._run_app_model(
embed_full[:n_im_use], texture_desc[:n_im_use], embed
)
preds['app_out'] = self._get_app_model_losses(
images, preds['app'][:, :3], masks=masks,
)
for k in ('loss_vgg', 'loss_l1', 'loss_ssim'):
preds[k+'_app'] = preds['app_out'][k]
# finally get the optimization objective using self.loss_weights
preds['objective'] = self.get_objective(preds, epoch_now=epoch_now)
# =================
# the rest is only for visualisation/metrics
# run on cached embed_db
if self.embed_db is not None and self.embed_db_eval:
preds.update(self.run_on_embed_db(preds, texture_desc, K,
masks=embed_masks, image_size=image_size))
# accumulate into embed db
self.embed_db(embed, masks=embed_masks)
depth_pcl_metrics = self._calc_depth_pcl_errs(
preds['depth_dense'], depths, masks=masks
)
preds.update(depth_pcl_metrics)
# find the scale of shape_image_coord that minimizes the repro loss
preds['shape_image_coord_best_scale'] = self._get_best_scale(preds, image_size)
preds['nrsfm_shape_image_coord'] = preds['nrsfm'][{
'orthographic': 'shape_image_coord',
'perspective': 'shape_image_coord_cal',
}[self.projection_type]]
# a hack for vis purposes
preds['misc'] = {}
for k in ('images', 'images_app', 'images_geom', 'embed'):
if k in preds:
preds['misc'][k] = preds[k].detach()
elif k in vars():
preds['misc'][k] = vars()[k]
return preds
def get_objective(self, preds, epoch_now=None):
losses_weighted = {
k: preds[k] * float(w)
for k, w in self.loss_weights.items()
if k in preds and w != 0.0 # avoid adding NaN * 0
}
if not hasattr(self,'_loss_weights_printed') or \
not self._loss_weights_printed:
print('-------\nloss_weights:')
for k,w in self.loss_weights.items():
print('%20s: %1.2e' % (k,w) )
print('-------')
print('-------\nweighted losses:')
for k,w in losses_weighted.items():
print('%20s: %1.2e' % (k,w) )
print('-------')
self._loss_weights_printed = True
loss = torch.stack(list(losses_weighted.values())).sum()
return loss
def visualize( self, visdom_env_imgs, trainmode, \
preds, stats, clear_env=False ):
if stats is not None:
it = stats.it[trainmode]
epoch = stats.epoch
viz = vis_utils.get_visdom_connection(
server=stats.visdom_server,
port=stats.visdom_port,
)
else:
it = 0
epoch = 0
viz = vis_utils.get_visdom_connection()
if not viz.check_connection():
print("no visdom server! -> skipping batch vis")
return
idx_image = 0
title="e%d_it%d_im%d"%(epoch,it,idx_image)
imvar = 'images_aug' if 'images_aug' in preds else 'images'
dvar = 'depths_aug' if 'depths_aug' in preds else 'depths'
mvar = 'masks_aug' if 'masks_aug' in preds else 'masks'
# show depth
ds = preds['depth_dense'].cpu().detach().repeat(1,3,1,1)
ims = preds[imvar].cpu().detach()
ims = Fu.interpolate(ims,size=ds.shape[2:])
if mvar in preds: # mask depths, ims by masks
masks = Fu.interpolate(preds[mvar].cpu().detach(),
size=ds.shape[2:], mode='nearest' )
ims *= masks ; ds *= masks
ds = vis_utils.denorm_image_trivial(ds)
if 'pred_mask' in preds:
pred_mask = torch.sigmoid(preds['pred_mask'][:, None].detach()).cpu().expand_as(ims)
ims_ds = torch.cat( (ims, ds, pred_mask), dim=2 )
else:
ims_ds = torch.cat( (ims, ds), dim=2 )
viz.images(ims_ds, env=visdom_env_imgs, opts={'title':title}, win='depth')
# show aug images if present
imss = []
for k in (imvar, 'images_app', 'images_geom'):
if k in preds:
ims = preds[k].cpu().detach()
ims = Fu.interpolate(ims, scale_factor=0.25)
ims = vis_utils.denorm_image_trivial(ims)
R, R_gt = preds['phi']['R'], preds['nrsfm']['phi']['R']
angle_to_0 = np.rad2deg(
so3.so3_relative_angle(R[0].expand_as(R), R).data.cpu().numpy()
)
angle_to_0_gt = np.rad2deg(
so3.so3_relative_angle(R_gt[0].expand_as(R_gt), R_gt).data.cpu().numpy()
)
if ~np.isnan(angle_to_0).any():
ims = np.stack([
vis_utils.write_into_image(
(im*255.).astype(np.uint8), "%dΒ° / %dΒ°" % (d, d_gt), color=(255,0,255)
) for im, d, d_gt in zip(ims.data.numpy(), angle_to_0, angle_to_0_gt)
])
else:
ims = (ims.data.numpy()*255.).astype(np.uint8)
imss.append(ims)
if len(imss) > 0:
viz.images(
#torch.cat(imss, dim=2),
np.concatenate(imss, axis=2).astype(np.float32)/255.,
env=visdom_env_imgs,
opts={'title': title},
win='imaug',
)
# show reprojections
p1 = preds['kp_loc_aug' if 'kp_loc_aug' in preds else 'kp_loc'][idx_image]
p2 = preds['kp_reprojected_image'][idx_image,0:2]
p3 = preds['nrsfm']['kp_reprojected_image'][idx_image]
p = np.stack([p_.detach().cpu().numpy() for p_ in (p1, p2, p3)])
v = preds['kp_vis'][idx_image].detach().cpu().numpy()
vis_utils.show_projections( viz, visdom_env_imgs, p, v=v,
title=title, cmap__='rainbow',
markersize=50, sticks=None,
stickwidth=1, plot_point_order=False,
image=preds[imvar][idx_image].detach().cpu().numpy(),
win='projections' )
# dense reprojections
p1 = preds['image_repro_gt'].detach().cpu()
p2 = preds['shape_reprojected_image'][idx_image].detach().cpu()
# override mask with downsampled (augmentation applied if any)
mvar = 'embed_masks'
if mvar in preds:
masks = preds[mvar].detach().cpu()
#masks = Fu.interpolate(masks, size=p2.shape[1:], mode='nearest')
p1 = p1 * masks[idx_image]
p2 = p2 * masks[idx_image]
# TEMP
img = (preds[imvar][idx_image].cpu() * Fu.interpolate(
preds[mvar].cpu()[idx_image:idx_image+1], size=preds[imvar][0, 0].size(), mode='nearest'
)[0]).data.cpu().numpy()
p = np.stack([p_.view(2,-1).numpy() for p_ in (p1, p2)])
vis_utils.show_projections( viz, visdom_env_imgs, p, v=None,
title=title, cmap__='rainbow',
markersize=1, sticks=None,
stickwidth=1, plot_point_order=False,
image=img,
win='projections_dense' )
vis_utils.show_flow(viz, visdom_env_imgs, p,
image=preds[imvar][idx_image].detach().cpu().numpy(),
title='flow ' + title,
linewidth=1,
win='projections_flow',
)
if 'sph_sample_projs' in preds:
p = preds['sph_sample_projs'][idx_image].detach().cpu().view(2, -1)
if 'sph_sample_gt' in preds:
p_ = preds['sph_sample_gt'][idx_image].detach().cpu().view(2, -1)
p_ = p_.repeat(1, math.ceil(p.shape[1]/p_.shape[1]))
p = [p, p_[:, :p.shape[1]]]
else:
p = [p.view(2, -1)]
# p = (torch.stack(p) + 1.) / 2.
p = (torch.stack(p) + 1.) / 2.
imsize = preds[imvar][idx_image].shape[1:]
p[:, 0, :] *= imsize[1]
p[:, 1, :] *= imsize[0]
vis_utils.show_projections(viz, visdom_env_imgs,
p, v=None,
title=title + '_spl_sil',
cmap__='rainbow',
markersize=1, sticks=None,
stickwidth=1, plot_point_order=False,
image=preds[imvar][idx_image].detach().cpu().numpy(),
win='projections_spl_sil'
)
merged_embed = self._merge_masked_tensors(
preds['embed_full'], preds['embed_masks']
)[..., None]
gl_desc_0 = {k: v[:1] for k, v in preds['phi'].items()}
merged_with_pivot_phis = self._get_shapes_and_projections(
merged_embed, None, gl_desc_0, preds['K'][:1]
)
preds['shape_canonical_same_alphas'] = merged_with_pivot_phis[
'shape_canonical_dense'
][0 ,..., 0]
# dense 3d
pcl_show = {}
vis_list = ['dense3d', 'mean_shape', 'embed_db', 'batch_fused', 'sph_embed']
if self.loss_weights['loss_sph_sample_mask'] > 0:
vis_list.append('sph_sample_3d')
for vis in vis_list:
if vis=='canonical':
pcl = preds['shape_canonical_dense']
elif vis=='dense3d':
pcl = preds['shape_image_coord_cal_dense']
elif vis=='batch_fused':
pcl = preds['shape_canonical_same_alphas'].detach().cpu()
pcl = torch.cat((pcl, pcl), dim=0)
pcl[3:5,:] = 0.0
pcl[5,:] = 1.0
elif vis=='mean_shape':
pcl = preds['embed_mean']
elif vis=='mean_c3dpo_shape':
pcl = preds['nrsfm_mean_shape']
elif vis=='shape_canonical':
pcl = preds['shape_canonical_dense']
elif vis == 'sph_embed':
pcl = preds['embed'].detach().clone()
elif vis == 'sph_sample_3d':
pcl = preds['sph_sample_3d'][idx_image].detach().cpu().view(3, -1)
pcl = torch.cat((pcl, pcl.clone()), dim=0)
pcl[4:,:] = 0.0
pcl[3,:] = 1.0
# filtering outliers
pcl[:3] -= pcl[:3].mean(dim=1, keepdim=True) # will be centered anyway
std = pcl[:3].std(dim=1).max()
pcl[:3] = pcl[:3].clamp(-2.5*std, 2.5*std)
elif vis == 'embed_db':
pcl = self.embed_db.get_db(uniform_sphere=False).cpu().detach().view(3, -1)
pcl = torch.cat((pcl, pcl.clone()), dim=0)
pcl[3:5,:] = 0.0
pcl[4,:] = 1.0
else:
raise ValueError(vis)
if vis not in ('mean_c3dpo_shape', 'batch_fused', 'sph_sample_3d', 'embed_db'):
pcl_rgb = preds[imvar].detach().cpu()
#pcl = Fu.interpolate(pcl.detach().cpu(), pcl_rgb.shape[2:], mode='bilinear')
pcl_rgb = Fu.interpolate(pcl_rgb, size=pcl.shape[2:], mode='bilinear')
if (mvar in preds):
masks = preds[mvar].detach().cpu()
masks = Fu.interpolate(masks, \
size=pcl.shape[2:], mode='nearest')
else:
masks = None
pcl = pcl.detach().cpu()[idx_image].view(3,-1)
pcl_rgb = pcl_rgb[idx_image].view(3,-1)
pcl = torch.cat((pcl, pcl_rgb), dim=0)
if masks is not None:
masks = masks[idx_image].view(-1)
pcl = pcl[:,masks>0.]
# if vis == 'sph_embed':
# import pdb; pdb.set_trace()
if pcl.numel()==0:
continue
pcl_show[vis] = pcl.numpy()
vis_utils.visdom_plotly_pointclouds(viz, pcl_show, visdom_env_imgs,
title=title+'_'+vis,
markersize=1,
sticks=None, win=vis,
height=700, width=700 ,
normalise=True,
)
var3d = {
'orthographic': 'shape_image_coord',
'perspective': 'shape_image_coord_cal',
}[self.projection_type]
sparse_pcl = {
'nrsfm': preds['nrsfm'][var3d][idx_image].detach().cpu().numpy().copy(),
'dense': preds['shape_image_coord_cal'][idx_image].detach().cpu().numpy().copy(),
}
if 'kp_loc_3d' in preds:
sparse_pcl['gt'] = preds['kp_loc_3d'][idx_image].detach().cpu().numpy().copy()
if 'class_mask' in preds:
class_mask = preds['class_mask'][idx_image].detach().cpu().numpy()
sparse_pcl = {k: v*class_mask[None] for k,v in sparse_pcl.items()}
vis_utils.visdom_plotly_pointclouds(viz, sparse_pcl, visdom_env_imgs, \
title=title+'_sparse3d', \
markersize=5, \
sticks=None, win='nrsfm_3d',
height=500,
width=500 )
if 'photo_out' in preds and preds['photo_out'] is not None:
# show the source images and their renders
ims_src = preds['photo_out']['images'].detach().cpu()
ims_repro = preds['photo_out']['images_reproject'].detach().cpu()
ims_reenact = preds['photo_out']['images_reenact'].detach().cpu()
ims_gt = preds['photo_out']['images_gt'].detach().cpu()
# cat all the images
ims = torch.cat((ims_src,ims_reenact,ims_repro,ims_gt), dim=2)
ims = torch.clamp(ims,0.,1.)
viz.images(ims, env=visdom_env_imgs, opts={'title':title}, win='imrepro')
im_renders = preds['photo_out']['image_ref_render']
for l in im_renders:
im_gt = preds['photo_out']['images_gt'][0].detach().cpu()
im_render = im_renders[l].detach().cpu()
im = torch.cat((im_gt, im_render), dim=2)
im = torch.clamp(im, 0., 1.)
viz.image(im, env=visdom_env_imgs, \
opts={'title':title+'_min_render_%s' % l}, win='imrender_%s' % l)
if 'app_out' in preds and preds['app_out'] is not None:
# show the source images and their predictions
ims_src = preds['app_out']['images'].detach().cpu()
ims_pred = preds['app_out']['images_pred_clamp'].detach().cpu()
ims = torch.cat((ims_src,ims_pred), dim=2)
viz.images(ims, env=visdom_env_imgs, opts={'title':title}, win='impred')
def load_nrsfm_model(exp_name, get_cfg=False):
from dataset.dataset_configs import C3DPO_MODELS, C3DPO_URLS
if exp_name in C3DPO_MODELS:
exp_path = C3DPO_MODELS[exp_name]
else:
exp_path = exp_name
if not os.path.exists(exp_path):
url = C3DPO_URLS[exp_name]
print('Downloading C3DPO model %s from %s' % (exp_name, url))
utils.untar_to_dir(url, exp_path)
cfg_file = os.path.join(exp_path, 'expconfig.yaml')
assert os.path.isfile(cfg_file), 'no config for NR SFM %s!' % cfg_file
with open(cfg_file, 'r') as f:
cfg = yaml.load(f)
# exp = ExperimentConfig(cfg_file=cfg_file)
nrsfm_model = c3dpo.C3DPO(**cfg.MODEL)
model_path = model_io.find_last_checkpoint(exp_path)
assert model_path is not None, "cannot found previous NR SFM model %s" % model_path
print("Loading the model from", model_path)
model_state_dict, _, _ = model_io.load_model(model_path)
nrsfm_model.load_state_dict(model_state_dict, strict=True)
if get_cfg:
return nrsfm_model, cfg
else:
return nrsfm_model
|
c3dm-main
|
c3dm/model.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import copy, os, sys, time
import itertools as itt
import yaml
# torch imports
import numpy as np
import torch
from dataset.batch_samplers import SceneBatchSampler
from dataset.dataset_zoo import dataset_zoo
from dataset.eval_zoo import eval_zoo
from dataset.c3dpo_annotate import run_c3dpo_model_on_dset
from model import Model
from config import set_config_from_file, set_config, \
get_arg_parser, dump_config, get_default_args, auto_init_args
from tools.attr_dict import nested_attr_dict
from tools.utils import get_net_input, pprint_dict
from tools import utils
from tools.stats import Stats
from tools.vis_utils import get_visdom_env
from tools.model_io import find_last_checkpoint, purge_epoch, \
load_model, get_checkpoint, save_model
from tools.cache_preds import cache_preds
def init_model(cfg,force_load=False,clear_stats=False,add_log_vars=None):
# get the model
model = Model(**cfg.MODEL)
# obtain the network outputs that should be logged
if hasattr(model,'log_vars'):
log_vars = copy.deepcopy(model.log_vars)
else:
log_vars = ['objective']
if add_log_vars:
log_vars.extend(copy.deepcopy(add_log_vars))
visdom_env_charts = get_visdom_env(cfg) + "_charts"
# init stats struct
stats = Stats( log_vars, visdom_env=visdom_env_charts, \
verbose=False, visdom_server=cfg.visdom_server, \
visdom_port=cfg.visdom_port )
model_path = None
if cfg.resume_epoch > 0:
model_path = get_checkpoint(cfg.exp_dir,cfg.resume_epoch)
elif cfg.resume_epoch == -1: # find the last checkpoint
model_path = find_last_checkpoint(cfg.exp_dir)
optimizer_state = None
if model_path is None and force_load:
from dataset.dataset_configs import C3DM_URLS
url = C3DM_URLS[cfg.DATASET.dataset_name]
print('Downloading C3DM model %s from %s' % (cfg.DATASET.dataset_name, url))
utils.untar_to_dir(url, cfg.exp_dir)
model_path = find_last_checkpoint(cfg.exp_dir)
if model_path is not None:
print( "found previous model %s" % model_path )
if force_load or cfg.resume:
print( " -> resuming" )
model_state_dict, stats_load, optimizer_state = load_model(model_path)
if not clear_stats:
if stats_load is None:
print(" -> bad stats! -> clearing")
else:
stats = stats_load
else:
print(" -> clearing stats")
try:
model.load_state_dict(model_state_dict, strict=True)
except RuntimeError as e:
print('!!!!! cant load state dict in strict mode:')
print(e)
print('loading in non-strict mode ...')
model.load_state_dict(model_state_dict, strict=False)
model.log_vars = log_vars
else:
print( " -> but not resuming -> starting from scratch" )
elif force_load:
print('!! CANNOT RESUME FROM A CHECKPOINT !!')
# update in case it got lost during load:
stats.visdom_env = visdom_env_charts
stats.visdom_server = cfg.visdom_server
stats.visdom_port = cfg.visdom_port
#stats.plot_file = os.path.join(cfg.exp_dir,'train_stats.pdf')
stats.synchronize_logged_vars(log_vars)
return model, stats, optimizer_state
def init_optimizer(model,optimizer_state,
PARAM_GROUPS=(),
freeze_bn=False,
breed='sgd',
weight_decay=0.0005,
lr_policy='multistep',
lr=0.001,
gamma=0.1,
momentum=0.9,
betas=(0.9,0.999),
milestones=[100,],
max_epochs=300,
):
# init the optimizer
if hasattr(model,'_get_param_groups'): # use the model function
p_groups = model._get_param_groups(lr,wd=weight_decay)
else:
allprm = [prm for prm in model.parameters() if prm.requires_grad]
p_groups = [{'params': allprm, 'lr': lr}]
if breed=='sgd':
optimizer = torch.optim.SGD( p_groups, lr=lr, \
momentum=momentum, \
weight_decay=weight_decay )
elif breed=='adagrad':
optimizer = torch.optim.Adagrad( p_groups, lr=lr, \
weight_decay=weight_decay )
elif breed=='adam':
optimizer = torch.optim.Adam( p_groups, lr=lr, \
betas=betas, \
weight_decay=weight_decay )
else:
raise ValueError("no such solver type %s" % breed)
print(" -> solver type = %s" % breed)
if lr_policy=='multistep':
scheduler = torch.optim.lr_scheduler.MultiStepLR( \
optimizer, milestones=milestones, gamma=gamma)
else:
raise ValueError("no such lr policy %s" % lr_policy)
# add the max epochs here!
scheduler.max_epochs = max_epochs
if optimizer_state is not None:
print(" -> setting loaded optimizer state")
optimizer.load_state_dict(optimizer_state)
optimizer.param_groups[0]['momentum'] = momentum
optimizer.param_groups[0]['dampening'] = 0.0
optimizer.zero_grad()
return optimizer, scheduler
def run_training(cfg):
# run the training loops
# make the exp dir
os.makedirs(cfg.exp_dir,exist_ok=True)
# set the seed
np.random.seed(cfg.seed)
# dump the exp config to the exp dir
dump_config(cfg)
# setup datasets
dset_train, dset_val, dset_test = dataset_zoo(**cfg.DATASET)
# init loaders
if cfg.batch_sampler=='default':
trainloader = torch.utils.data.DataLoader( dset_train,
num_workers=cfg.num_workers, pin_memory=True,
batch_size=cfg.batch_size, shuffle=False )
elif cfg.batch_sampler=='sequence':
trainloader = torch.utils.data.DataLoader( dset_train,
num_workers=cfg.num_workers, pin_memory=True,
batch_sampler=SceneBatchSampler(
torch.utils.data.SequentialSampler(dset_train),
cfg.batch_size,
True,
) )
else:
raise BaseException()
if dset_val is not None:
if cfg.batch_sampler=='default':
valloader = torch.utils.data.DataLoader( dset_val,
num_workers=cfg.num_workers, pin_memory=True,
batch_size=cfg.batch_size, shuffle=False )
elif cfg.batch_sampler=='sequence':
valloader = torch.utils.data.DataLoader( dset_val,
num_workers=cfg.num_workers, pin_memory=True,
batch_sampler=SceneBatchSampler( \
torch.utils.data.SequentialSampler(dset_val),
cfg.batch_size,
True,
) )
else:
raise BaseException()
else:
valloader = None
# test loaders
if dset_test is not None:
testloader = torch.utils.data.DataLoader(dset_test,
num_workers=cfg.num_workers, pin_memory=True,
batch_size=cfg.batch_size, shuffle=False,
)
_,_,eval_vars = eval_zoo(cfg.DATASET.dataset_name)
else:
testloader = None
eval_vars = None
# init the model
model, stats, optimizer_state = init_model(cfg,add_log_vars=eval_vars)
start_epoch = stats.epoch + 1
# annotate dataset with c3dpo outputs
if cfg.annotate_with_c3dpo_outputs:
for dset in dset_train, dset_val, dset_test:
if dset is not None:
run_c3dpo_model_on_dset(dset, cfg.MODEL.nrsfm_exp_path)
# move model to gpu
model.cuda(0)
# init the optimizer
optimizer, scheduler = init_optimizer(\
model, optimizer_state=optimizer_state, **cfg.SOLVER)
# loop through epochs
scheduler.last_epoch = start_epoch
for epoch in range(start_epoch, cfg.SOLVER.max_epochs):
with stats: # automatic new_epoch and plotting of stats at every epoch start
print("scheduler lr = %1.2e" % float(scheduler.get_lr()[-1]))
# train loop
trainvalidate(model, stats, epoch, trainloader, optimizer, False, \
visdom_env_root=get_visdom_env(cfg), **cfg )
# val loop
if valloader is not None:
trainvalidate(model, stats, epoch, valloader, optimizer, True, \
visdom_env_root=get_visdom_env(cfg), **cfg )
# eval loop (optional)
if testloader is not None:
if cfg.eval_interval >= 0:
if cfg.eval_interval == 0 or \
((epoch % cfg.eval_interval)==0 and epoch > 0):
torch.cuda.empty_cache() # we have memory heavy eval ...
with torch.no_grad():
run_eval(cfg,model,stats,testloader)
assert stats.epoch==epoch, "inconsistent stats!"
# delete previous models if required
if cfg.store_checkpoints_purge > 0 and cfg.store_checkpoints:
for prev_epoch in range(epoch-cfg.store_checkpoints_purge):
period = cfg.store_checkpoints_purge_except_every
if (period > 0 and prev_epoch % period == period - 1):
continue
purge_epoch(cfg.exp_dir,prev_epoch)
# save model
if cfg.store_checkpoints:
outfile = get_checkpoint(cfg.exp_dir,epoch)
save_model(model,stats,outfile,optimizer=optimizer)
scheduler.step()
def run_evaluation(cfg):
np.random.seed(cfg.seed)
# setup datasets
dset_train, dset_val, dset_test = dataset_zoo(**cfg.DATASET)
# test loaders
testloader = torch.utils.data.DataLoader(
dset_test,
num_workers=cfg.num_workers, pin_memory=True,
batch_size=cfg.batch_size, shuffle=False,
)
_, _, eval_vars = eval_zoo(cfg.DATASET.dataset_name)
# init the model
model, _, _ = init_model(cfg, force_load=True, add_log_vars=eval_vars)
model.cuda(0)
model.eval()
# init the optimizer
#optimizer, scheduler = init_optimizer(model, optimizer_state=optimizer_state, **cfg.SOLVER)
# val loop
#trainvalidate(model, stats, 0, valloader, optimizer, True,
# visdom_env_root=get_visdom_env(cfg), **cfg )
with torch.no_grad():
run_eval(cfg, model, None, testloader)
def trainvalidate( model,
stats,
epoch,
loader,
optimizer,
validation,
bp_var='objective',
metric_print_interval=5,
visualize_interval=0,
visdom_env_root='trainvalidate',
**kwargs ):
if validation:
model.eval()
trainmode = 'val'
else:
model.train()
trainmode = 'train'
t_start = time.time()
# clear the visualisations on the first run in the epoch
clear_visualisations = True
# get the visdom env name
visdom_env_imgs = visdom_env_root + "_images_" + trainmode
#loader = itt.islice(loader, 1)
n_batches = len(loader)
for it, batch in enumerate(loader):
last_iter = it==n_batches-1
# move to gpu where possible
net_input = get_net_input(batch)
# add epoch to the set of inputs
net_input['epoch_now'] = int(epoch)
if (not validation):
optimizer.zero_grad()
preds = model(**net_input)
else:
with torch.no_grad():
preds = model(**net_input)
# make sure we dont overwrite something
assert not any( k in preds for k in net_input.keys() )
preds.update(net_input) # merge everything into one big dict
# update the stats logger
stats.update(preds,time_start=t_start,stat_set=trainmode)
assert stats.it[trainmode]==it, "inconsistent stat iteration number!"
# print textual status update
if (it%metric_print_interval)==0 or last_iter:
stats.print(stat_set=trainmode,max_it=n_batches)
# optimizer step
if (not validation):
loss = preds[bp_var]
loss.backward()
optimizer.step()
# visualize results
if (visualize_interval>0) and (it%visualize_interval)==0:
model.visualize( visdom_env_imgs, trainmode, \
preds, stats, clear_env=clear_visualisations )
clear_visualisations = False
def run_eval(cfg,model,stats,loader):
if hasattr(model, 'embed_db_eval'):
from dataset.dataset_configs import FILTER_DB_SETTINGS
dset_name = cfg['DATASET']['dataset_name']
if dset_name in FILTER_DB_SETTINGS:
filter_settings = FILTER_DB_SETTINGS[dset_name]
else:
filter_settings = FILTER_DB_SETTINGS['default']
print('filter settings: %s' % str(filter_settings))
print('turning embed_db eval on!')
prev_embed_db_eval = copy.deepcopy(model.embed_db_eval)
model.embed_db_eval = True
model.embed_db.filter_db(**filter_settings)
eval_script, cache_vars, eval_vars = eval_zoo(cfg.DATASET.dataset_name)
if True:
cached_preds = cache_preds(model, loader, stats=stats,
cache_vars=cache_vars)
else:
cached_preds = cache_preds(model, loader, stats=stats,
cache_vars=cache_vars, eval_mode=False)
assert False, 'make sure not to continue beyond here!'
results, _ = eval_script(cached_preds, eval_vars=eval_vars)
if stats is not None:
stats.update(results, stat_set='test') #, log_vars=results.keys())
stats.print(stat_set='test')
if hasattr(model, 'embed_db_eval'):
model.embed_db_eval = prev_embed_db_eval
class ExperimentConfig(object):
def __init__( self,
cfg_file=None,
model_zoo='./data/torch_zoo/',
exp_name='test',
exp_idx=0,
exp_dir='./data/exps/keypoint_densification/default/',
gpu_idx=0,
resume=True,
seed=0,
resume_epoch=-1,
eval_interval=1,
store_checkpoints=True,
store_checkpoints_purge=1,
store_checkpoints_purge_except_every=25,
batch_size=10,
num_workers=8,
visdom_env='',
collect_basis_before_eval=False,
visdom_server='http://localhost',
visdom_port=8097,
metric_print_interval=5,
visualize_interval=0,
mode='trainval',
batch_sampler='sequence',
annotate_with_c3dpo_outputs=True,
SOLVER = get_default_args(init_optimizer),
DATASET = get_default_args(dataset_zoo),
MODEL = get_default_args(Model),
):
self.cfg = get_default_args(ExperimentConfig)
if cfg_file is not None:
set_config_from_file(self.cfg,cfg_file)
else:
auto_init_args(self,tgt='cfg',can_overwrite=True)
self.cfg = nested_attr_dict(self.cfg)
if __name__ == '__main__':
torch.manual_seed(0)
np.random.seed(0)
# init the exp config
exp = ExperimentConfig()
set_config_from_file(exp.cfg, sys.argv[1])
mode = 'train'
if len(sys.argv) > 2 and sys.argv[2] == '--eval':
mode = 'eval'
pprint_dict(exp.cfg)
#with open('freicars.yaml', 'w') as yaml_file:
# yaml.dump(exp.cfg, yaml_file, default_flow_style=False)
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = str(exp.cfg.gpu_idx)
if not exp.cfg.model_zoo is None:
os.environ["TORCH_MODEL_ZOO"] = exp.cfg.model_zoo
if mode == 'eval':
run_evaluation(exp.cfg)
else:
run_training(exp.cfg)
|
c3dm-main
|
c3dm/experiment.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import torch.nn.functional as Fu
def image_meshgrid(bounds,resol):
"""
bounds in 3x2
resol in 3x1
"""
# he,wi,de = resol
# minw,maxw = bounds[0]
# minh,maxh = bounds[1]
# mind,maxd = bounds[2]
axis = [ ((torch.arange(sz).float())/(sz-1))*(b[1]-b[0])+b[0] \
for sz,b in zip(resol,bounds) ]
return torch.stack(torch.meshgrid(axis))
def append1(X, mask=1.):
"""
append 1 as the last dim
"""
X = torch.cat( (X, X[:,-2:-1]*0. + mask), dim=1 )
return X
def depth2pcl( D, K, image_size=None, projection_type='perspective' ):
"""
convert depth D in B x 1 x He x Wi
to a point cloud xyz_world in B x 3 x He x Wi
using projection matrix KRT in B x 3 x 7 (K,R,T stacked along dim=2)
the convention is: K[R|T] xyz_world = xyz_camera
"""
grid_size = D.shape[2:4]
ba = D.shape[0]
if image_size is None:
image_size = grid_size
he , wi = image_size
projection_bounds = torch.FloatTensor( \
[ [0.5,he-0.5],
[0.5,wi-0.5], ] )
yx_cam = image_meshgrid(projection_bounds,grid_size).type_as(D)
xy_cam = yx_cam[[1,0],:,:]
xy_cam = xy_cam[None].repeat(ba,1,1,1)
xyz_cam = torch.cat( (xy_cam, D), dim=1 )
if projection_type=='perspective':
xyz_world = unproject_from_camera( \
xyz_cam.view(ba,3,-1), K )
xyz_world = xyz_world.view(ba,3,grid_size[0],grid_size[1])
elif projection_type=='orthographic':
xyz_world = xyz_cam
else:
raise ValueError(projection_type)
return xyz_world
def unproject_from_camera( xyz_cam, K ):
"""
unprojects the points from the camera coordinates xyz_cam to
the world coordinates xyz_world
xyz_cam in (B,3,N), 3rd dimension is depth, first two x,y pixel coords
projection matrix KRT in B x 3 x 7 (K,R,T stacked along dim=2)
"""
# decompose KRT
xy_cam = xyz_cam[:,0:2,:]
depth = xyz_cam[:,2:3,:]
# calibrate the points
xyz_world = torch.bmm(torch.inverse(K),append1(xy_cam))
# append depth and mult by inverse of the transformation
xyz_world = xyz_world * depth
return xyz_world
|
c3dm-main
|
c3dm/tools/pcl_unproject.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import torch.nn.functional as Fu
import numpy as np
import collections
import warnings
def clamp_depth(X, min_depth):
xy, depth = X[:,0:2], X[:,2:]
depth = torch.clamp(depth, min_depth)
return torch.cat((xy,depth), dim=1)
def calc_ray_projection(X, Y, K=None, min_r_len=None, min_depth=None):
n = X.shape[2]
ba = X.shape[0]
append1 = lambda x: \
torch.cat((x,x.new_ones(x.shape[0],1,x.shape[2])), dim=1)
if K is None:
# Y is already calibrated
r = append1(Y)
else:
r = torch.bmm(torch.inverse(K), append1(Y))
r = Fu.normalize(r, dim=1)
if min_depth is not None:
X = clamp_depth(X, min_depth)
r_len = (X * r).sum(1, keepdim=True)
if min_r_len is not None:
r_len = torch.clamp(r_len, min_r_len)
r_proj = r_len * r
return r_proj
def minimise_2d_residual_over_T(K, X, Y, v=None):
ba, _, n = X.size()
append1 = lambda x: torch.cat((x, x.new_ones(x[:,:1,:].size())), dim=1)
Y_cam = torch.bmm(torch.inverse(K), append1(Y))
# construct a system AT = b
A_u = torch.cat((Y_cam.new_ones(ba, n, 1), Y_cam.new_zeros(ba, n, 1), -Y_cam[:,:1,:].permute(0,2,1)), dim=2)
A_v = torch.cat((Y_cam.new_zeros(ba, n, 1), Y_cam.new_ones(ba, n, 1), -Y_cam[:,1:2,:].permute(0,2,1)), dim=2)
b_u = (Y_cam[:,0:1,:] * X[:,2:,:] - X[:,0:1,:]).permute(0,2,1)
b_v = (Y_cam[:,1:2,:] * X[:,2:,:] - X[:,1:2,:]).permute(0,2,1)
res = Y_cam.new_empty(ba, 3)
for i in range(ba):
if v is not None:
A = torch.cat((A_u[i, v[i] > 0., :], A_v[i, v[i] > 0., :]), dim=0)
b = torch.cat((b_u[i, v[i] > 0., :], b_v[i, v[i] > 0., :]), dim=0)
else:
A = torch.cat((A_u[i, :, :], A_v[i, :, :]), dim=0)
b = torch.cat((b_u[i, :, :], b_v[i, :, :]), dim=0)
#res[i,:] = torch.lstsq(b, A)[0][:3, 0]
res[i,:] = torch.matmul(torch.pinverse(A), b)[:, 0]
return res
# TODO: if used, extract to test
def test_minimise_2d_residual_over_T():
K = torch.eye(3)[None,:,:]
X = torch.cat((Y, Y.new_ones(1,1,4)), dim=1)
Y = torch.tensor([[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]]).t()[None,:,:]
res = minimise_2d_residual_over_T(K, X, Y)
assert torch.allclose(res, torch.tensor([[0., 0., 0.]]))
X = torch.cat((Y, 2*Y.new_ones(1,1,4)), dim=1)
assert torch.allclose(res, torch.tensor([[0., 0., -1.]]))
X = torch.cat((Y, Y.new_ones(1,1,4)), dim=1)
Y[:,0,:] += 3
assert torch.allclose(res, torch.tensor([[3., 0., 0.]]))
def find_camera_T(K, X, Y, v=None, eps=1e-4):
"""
estimate camera translation given 3D-2D correspondences and cal matrix
"""
n = X.shape[2]
ba = X.shape[0]
append1 = lambda x: \
torch.cat((x,x.new_ones(x.shape[0],1,x.shape[2])), dim=1)
# projection rays
r = torch.bmm(torch.inverse(K), append1(Y))
r = Fu.normalize(r, dim=1)
# outer projection ray product (need to permute the array first)
rr = r.permute(0,2,1).contiguous().view(n*ba, 3)
rr = torch.bmm(rr[:,:,None], rr[:,None,:])
# I - rr
Irr = torch.eye(3)[None].type_as(X).repeat(ba*n,1,1) - rr
# [rr - I] x
rrIx = torch.bmm(-Irr, X.permute(0,2,1).contiguous().view(n*ba, 3, 1))
Irr = Irr.view(ba,-1,3,3)
rrIx = rrIx.view(ba,-1,3)
if v is not None:
Irr = Irr * v[:,:,None,None]
rrIx = rrIx * v[:,:,None]
Irr_sum = Irr.sum(1)
rrIx_sum = rrIx.sum(1)
if v is not None:
ok = v.sum(1) > 2 # at least three visible
rrI_sum_i = Irr_sum * 0.
rrI_sum_i[ok] = torch.inverse(Irr_sum[ok])
else:
rrI_sum_i = torch.inverse(Irr_sum)
T = torch.bmm(rrI_sum_i, rrIx_sum[:,:,None])[:,:,0]
return T
def image_meshgrid(bounds, resol):
"""
bounds in 3x2
resol in 3x1
"""
# he,wi,de = resol
# minw,maxw = bounds[0]
# minh,maxh = bounds[1]
# mind,maxd = bounds[2]
axis = []
for sz, b in zip(resol, bounds):
binw = (b[1]-b[0]) / sz
g = torch.arange(sz).float().cuda() * binw + 0.5 * binw
axis.append(g)
return torch.stack(torch.meshgrid(axis))
def masked_kp_mean(kp_loc,kp_vis):
visibility_mass = torch.clamp(kp_vis.sum(1),1e-4)
kp_mean = (kp_loc*kp_vis[:,None,:]).sum(2)
kp_mean = kp_mean / visibility_mass[:,None]
return kp_mean
def huber(dfsq, scaling=0.03):
loss = (safe_sqrt(1+dfsq/(scaling*scaling),eps=1e-4)-1) * scaling
return loss
def mod1(h):
ge1 = (h > 1.).float()
le0 = (h < 0.).float()
ok = ((h>=0.) * (h<=1.)).float()
rem_ge1 = h - h.long().float()
rem_le0 = 1. - (-h) - (-h).long().float()
h = ge1 * rem_ge1 + le0 * rem_le0 + ok * h
return h
def avg_l2_huber(x, y, mask=None, scaling=0.03, reduce_dims=[1]):
dist = (x - y) ** 2
if reduce_dims:
dist = dist.sum(reduce_dims)
dist = huber(dist, scaling=float(scaling))
if mask is not None:
dist = (dist*mask).sum(1) / \
torch.clamp(mask.sum(1),1.)
else:
if len(dist.shape)==2 and dist.shape[1] > 1:
dist = dist.mean(1)
dist = dist.mean()
return dist
def avg_l2_dist(x,y,squared=False,mask=None,eps=1e-4):
diff = x - y
dist = (diff*diff).sum(1)
if not squared: dist = safe_sqrt(dist,eps=eps)
if mask is not None:
dist = (dist*mask).sum(1) / \
torch.clamp(mask.sum(1),1.)
else:
if len(dist.shape)==2 and dist.shape[1] > 1:
dist = dist.mean(1)
dist = dist.mean()
return dist
def argmin_translation_scale(x, y, v=None):
# find translation/scale "T/s" st. s x + T = y
ba = x.shape[0]
x = x.view(ba, 2, -1)
y = y.view(ba, 2, -1)
if v is not None:
v = v.view(ba, -1)
x_mu = (x * v[:, None]).sum(2) / v.sum(1).clamp(1.)[:, None]
y_mu = (y * v[:, None]).sum(2) / v.sum(1).clamp(1.)[:, None]
else:
x_mu = x.mean(2)
y_mu = y.mean(2)
x = x - x_mu[:, :, None]
y = y - y_mu[:, :, None]
s = argmin_scale(x, y, v=v)
T = -x_mu * s[:, None] + y_mu
return s, T
def argmin_translation(x,y,v=None):
# find translation "T" st. x + T = y
x_mu = x.mean(2)
if v is not None:
vmass = torch.clamp(v.sum(1,keepdim=True),1e-4)
x_mu = (v[:,None,:]*x).sum(2) / vmass
y_mu = (v[:,None,:]*y).sum(2) / vmass
T = y_mu - x_mu
return T
def argmin_scale(x,y,v=None):
# find scale "s" st.: sx=y
if v is not None: # mask invisible
x = x * v[:,None,:]
y = y * v[:,None,:]
xtx = (x*x).sum(1).sum(1)
xty = (x*y).sum(1).sum(1)
s = xty / torch.clamp(xtx,1e-4)
return s
def logexploss(x,inv_lbd,coeff=1.,accum=True):
lbd = 1 / inv_lbd
conj = lbd.log()
prob = -x*lbd
logl = -(prob+coeff*conj) # neg loglikelyhood
if accum:
return logl.mean()
else:
return logl
def safe_sqrt(A,eps=float(1e-4)):
"""
performs safe differentiable sqrt
"""
return (torch.clamp(A,float(0))+eps).sqrt()
def rgb2hsv(im, eps=0.0000001):
# img = im * 0.5 + 0.5
img = im
# hue = torch.Tensor(im.shape[0], im.shape[2], im.shape[3]).to(im.device)
hue = im.new_zeros( im.shape[0], im.shape[2], im.shape[3] )
hue[ img[:,2]==img.max(1)[0] ] = 4.0 + ( (img[:,0]-img[:,1]) / ( img.max(1)[0] - img.min(1)[0] + eps) ) [ img[:,2]==img.max(1)[0] ]
hue[ img[:,1]==img.max(1)[0] ] = 2.0 + ( (img[:,2]-img[:,0]) / ( img.max(1)[0] - img.min(1)[0] + eps) ) [ img[:,1]==img.max(1)[0] ]
hue[ img[:,0]==img.max(1)[0] ] = (0.0 + ( (img[:,1]-img[:,2]) / ( img.max(1)[0] - img.min(1)[0] + eps) ) [ img[:,0]==img.max(1)[0] ]) % 6
hue[img.min(1)[0]==img.max(1)[0]] = 0.0
hue = hue/6
saturation = ( img.max(1)[0] - img.min(1)[0] ) / ( img.max(1)[0] + eps )
saturation[ img.max(1)[0]==0 ] = 0
value = img.max(1)[0]
hsv = torch.stack((hue, saturation, value), dim=1)
return hsv
def hsv2rgb(hsv):
C = hsv[:,2] * hsv[:,1]
X = C * ( 1 - ( (hsv[:,0]*6)%2 - 1 ).abs() )
m = hsv[:,2] - C
# zero tensor
z = hsv[:,0] * 0.
h = hsv[:,0]
RGB = \
((h <= 1/6) )[:,None,:,:].float() * torch.stack((C,X,z), dim=1) +\
((h > 1/6) * (h <= 2/6))[:,None,:,:].float() * torch.stack((X,C,z), dim=1) +\
((h > 2/6) * (h <= 3/6))[:,None,:,:].float() * torch.stack((z,C,X), dim=1) +\
((h > 3/6) * (h <= 4/6))[:,None,:,:].float() * torch.stack((z,X,C), dim=1) +\
((h > 4/6) * (h <= 5/6))[:,None,:,:].float() * torch.stack((X,z,C), dim=1) +\
((h > 5/6) * (h <= 6/6))[:,None,:,:].float() * torch.stack((C,z,X), dim=1)
# if self.hsv[0] < 1/6:
# R_hat, G_hat, B_hat = C, X, 0
# elif self.hsv[0] < 2/6:
# R_hat, G_hat, B_hat = X, C, 0
# elif self.hsv[0] < 3/6:
# R_hat, G_hat, B_hat = 0, C, X
# elif self.hsv[0] < 4/6:
# R_hat, G_hat, B_hat = 0, X, C
# elif self.hsv[0] < 5/6:
# R_hat, G_hat, B_hat = X, 0, C
# elif self.hsv[0] <= 6/6:
# R_hat, G_hat, B_hat = C, 0, X
RGB = RGB + m[:,None,:,:]
# R, G, B = (R_hat+m), (G_hat+m), (B_hat+m)
return RGB
def wmean(x, weight, dim=-1):
return (
x.mean(dim=dim, keepdim=True) if weight is None
else (x*weight[:,None,:]).sum(dim=dim, keepdim=True) /
weight[:,None,:].sum(dim=dim, keepdim=True)
)
def umeyama(X, Y, weight=None, center=True, allow_reflections=False, eps=1e-9):
"""
umeyama finds a rigid motion (rotation R and translation T) between two sets of points X and Y
s.t. RX+T = Y in the least squares sense
Inputs:
X ... Batch x 3 x N ... each column is a 3d point
Y ... Batch x 3 x N ... each column is a 3d point
Outputs:
R ... rotation component of rigid motion
T ... translation component of rigid motion
"""
assert X.shape[1]==Y.shape[1]
assert X.shape[2]==Y.shape[2]
assert X.shape[1]==3
b, _, n = X.size()
if center:
Xmu = wmean(X, weight)
Ymu = wmean(Y, weight)
X = X - Xmu
Y = Y - Ymu
Sxy = (
torch.bmm(Y, X.transpose(2,1)) / n if weight is None
else torch.bmm(Y*weight[:,None,:], X.transpose(2,1)*weight[:,:,None])
/ weight.sum(-1)[:,None,None]
)
U, _, V = torch.svd(Sxy)
R = torch.bmm(U, V.transpose(2,1))
if not allow_reflections:
s = torch.eye(3, dtype=X.dtype, device=X.device).repeat(b, 1, 1)
s[:,-1,-1] = torch.det(R)
# R = torch.matmul(s, R)
R = torch.matmul(torch.matmul(U, s), V.transpose(2,1))
assert torch.all(torch.det(R) >= 0)
T = (
Ymu - torch.bmm(R, Xmu[:,:])
if center else torch.zeros_like(X)
)[:,:,0]
return R, T
def get_edm(pts, pts2=None):
dtype = pts.data.type()
ba, dim, N = pts.shape
if pts2 is not None:
edm = torch.bmm(-2. * pts2.transpose(1,2), pts)
fNorm1 = (pts*pts).sum(1,keepdim=True)
fNorm2 = (pts2*pts2).sum(1,keepdim=True)
edm += fNorm2.transpose(1,2) # inplace saves memory
edm += fNorm1
# edm = (fNorm2.transpose(1,2) + fGram) + fNorm1
else:
fGram = torch.bmm(2 * pts.transpose(1,2), pts)
fNorm1 = (pts*pts).sum(1,keepdim=True)
edm = (fNorm1.transpose(1,2) - fGram) + fNorm1
return edm.contiguous()
def sample_random_xy(xy, mask, n=100):
ba = xy.shape[0]
xy = xy.reshape(ba, 2, -1)
mask = mask.reshape(ba, -1)
xy_sample = []
for m_, xy_ in zip(mask, xy):
ok = torch.nonzero(m_)
if ok.numel() <= 2:
warnings.warn('nothing in the mask!')
ok = torch.nonzero(m_ + 1).squeeze()
ok = ok.squeeze()
sel = torch.randint(low=0, high=len(ok), size=(n,), device=xy.device)
xy_sample.append(xy_[:, ok[sel]])
xy_sample = torch.stack(xy_sample)
return xy_sample
def get_mask_chamfer(xy_rdr, gt_mask, image_size, n=100):
ba = xy_rdr.shape[0]
render_size = gt_mask.shape[2:]
grid_gt = image_meshgrid(((0, 2), (0, 2)), render_size)
grid_gt = grid_gt.type_as(xy_rdr) - 1.
grid_gt = grid_gt[[1, 0]][None].repeat(ba, 1, 1, 1)
# sample random points from gt mask
gt_samples = sample_random_xy(grid_gt, gt_mask, n=n)
# compute chamfer
edm = get_edm(gt_samples, xy_rdr)
edm = huber(edm, scaling=0.1)
loss = 0.5 * (edm.min(dim=1)[0].mean() + edm.min(dim=2)[0].mean())
return loss, gt_samples
|
c3dm-main
|
c3dm/tools/functions.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from tools.utils import auto_init_args
import torch
import torch.nn.functional as Fu
from torch.nn import Parameter
from tools.utils import Timer
class TensorAccumulator(torch.nn.Module):
def __init__(self, db_size=30000, db_dim=3, perc_replace=0.01):
super().__init__()
auto_init_args(self)
db = torch.zeros(db_dim, db_size).float()
self.db = Parameter(db)
self.db.requires_grad = False
self.pointer = 0
self.uniform_sphere_sampling = False
def get_db(self, uniform_sphere=False):
if uniform_sphere or self.uniform_sphere_sampling:
mean_norm = (self.db.data**2).sum(0).sqrt().mean()
db = Fu.normalize(torch.randn_like(self.db), dim=0) * mean_norm
return db
else:
if hasattr(self, 'db_f'):
return self.db_f.clone()
else:
return self.db.data
def filter_db(self, nn=1e-3, perc_keep=0.9, sig=0.01, lap_size=10, lap_alpha=1.):
print('filtering db')
if nn < 1.: nn = int(self.db.shape[1] * nn)
print('nn size = %d' % nn)
db_f = self.density_filter(nn=nn, perc_keep=perc_keep, \
sig=sig, in_db=self.db.data.clone())
if lap_size < 1.: lap_size = int(self.db.shape[1] * lap_size)
db_f = self.lap_filter(lap_size=lap_size, lap_alpha=lap_alpha, in_db=db_f)
self.db_f = db_f
def get_edm(self, pts, pts2=None):
dtype = pts.data.type()
ba, dim, N = pts.shape
if not(pts2 is None):
edm = torch.bmm(-2. * pts2.transpose(1,2), pts)
fNorm1 = (pts*pts).sum(1,keepdim=True)
fNorm2 = (pts2*pts2).sum(1,keepdim=True)
edm += fNorm2.transpose(1,2)
edm += fNorm1
else:
edm = torch.bmm(-2 * pts.transpose(1,2), pts)
fNorm1 = (pts*pts).sum(1,keepdim=True)
edm += fNorm1.transpose(1,2)
edm += fNorm1
return edm.contiguous()
def reset(self):
self.db.data = torch.zeros(self.db_dim, self.db_size).type_as(self.db.data)
self.pointer = 0
def get_nns(self, pts, pts2, nn, bsize=int(1e4)):
# nb = int(np.ceil(pts.shape[1] / bsize))
chunks = torch.split(pts, bsize, dim=1)
indKNN = []
for chunk in chunks:
edm = self.get_edm(pts2[None], chunk[None])[0]
_, indKNN_ = torch.topk(edm, k=nn, dim=1, largest=False)
indKNN.append(indKNN_)
indKNN = torch.cat(indKNN, dim=0)
return indKNN
def density_filter(self, nn=50, perc_keep=0.9, sig=0.01, in_db=None):
print('density filter ...')
if in_db is None:
pcl = self.db.data
else:
pcl = in_db
indKNN = self.get_nns(pcl, pcl, nn=nn)
# edm = self.get_edm(pcl[None])[0]
# _, indKNN = torch.topk(edm, k=nn, dim=0, largest=False)
NNs = pcl[:,indKNN]
dsity = (-((NNs - pcl[:,:,None])**2).sum(0)/sig).exp().sum(1)
thr = torch.topk(dsity, \
int((1.-perc_keep)*dsity.shape[0]), largest=False)[0][-1]
pcl = pcl[:, dsity>=thr]
if in_db is None:
self.db.data = pcl
else:
return pcl
def lap_filter(self, lap_size=10, lap_alpha=1., in_db=None):
print('lap filter ...')
if in_db is None:
pcl = self.db.data
else:
pcl = in_db
indKNN = self.get_nns(pcl, pcl, nn=lap_size)
NNs = pcl[:,indKNN]
pclf = NNs.mean(dim=2)
pcl = lap_alpha * pclf + (1-lap_alpha) * pcl
if in_db is None:
self.db.data = pcl
else:
return pcl
def forward(self, embed=None, masks=None):
if not self.training: # gather only on the train set
return None
ba = embed.shape[0]
embed_flat = embed.view(ba,self.db_dim,-1).detach()
if masks is not None:
mask_flat = masks.view(ba, -1)
else:
mask_flat = embed_flat[:,0,:] * 0. + 1.
# with Timer():
# embed_flat = embed_flat.permute(1,2,0).contiguous().view(1,self.db_dim,-1)
# mask_flat = mask_flat.t().contiguous().view(1,-1)
for bi, (m, e) in enumerate(zip(mask_flat, embed_flat)):
sel = torch.nonzero(m).squeeze()
if sel.numel()<=2:
continue
nsel = max(int(self.db_size * self.perc_replace), 1)
if self.pointer >= self.db_size: # randomly replace
idx = sel[torch.LongTensor(nsel).random_(0, len(sel))]
idx_replace = torch.LongTensor(nsel).random_(0, self.db_size)
embed_sel = e[:,idx].detach().data
self.db.data[:, idx_replace] = embed_sel
else: # keep adding vectors
# print('filling db ...')
nsel = min(nsel, self.db_size - self.pointer)
idx = sel[torch.LongTensor(nsel).random_(0, len(sel))]
embed_sel = e[:,idx].detach().data
self.db.data[:, self.pointer:(self.pointer+nsel)] = embed_sel
self.pointer += nsel
# print(self.pointer)
return None
|
c3dm-main
|
c3dm/tools/tensor_accumulator.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
"""Pretty-print tabular data."""
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
from platform import python_version_tuple
import re
import math
if python_version_tuple() >= ("3", "3", "0"):
from collections.abc import Iterable
else:
from collections import Iterable
if python_version_tuple()[0] < "3":
from itertools import izip_longest
from functools import partial
_none_type = type(None)
_bool_type = bool
_int_type = int
_long_type = long
_float_type = float
_text_type = unicode
_binary_type = str
def _is_file(f):
return isinstance(f, file)
else:
from itertools import zip_longest as izip_longest
from functools import reduce, partial
_none_type = type(None)
_bool_type = bool
_int_type = int
_long_type = int
_float_type = float
_text_type = str
_binary_type = bytes
basestring = str
import io
def _is_file(f):
return isinstance(f, io.IOBase)
try:
import wcwidth # optional wide-character (CJK) support
except ImportError:
wcwidth = None
__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"]
__version__ = "0.8.4"
# minimum extra space in headers
MIN_PADDING = 2
# Whether or not to preserve leading/trailing whitespace in data.
PRESERVE_WHITESPACE = False
_DEFAULT_FLOATFMT="g"
_DEFAULT_MISSINGVAL=""
# if True, enable wide-character (CJK) support
WIDE_CHARS_MODE = wcwidth is not None
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
# A table structure is suppposed to be:
#
# --- lineabove ---------
# headerrow
# --- linebelowheader ---
# datarow
# --- linebewteenrows ---
# ... (more datarows) ...
# --- linebewteenrows ---
# last datarow
# --- linebelow ---------
#
# TableFormat's line* elements can be
#
# - either None, if the element is not used,
# - or a Line tuple,
# - or a function: [col_widths], [col_alignments] -> string.
#
# TableFormat's *row elements can be
#
# - either None, if the element is not used,
# - or a DataRow tuple,
# - or a function: [cell_values], [col_widths], [col_alignments] -> string.
#
# padding (an integer) is the amount of white space around data values.
#
# with_header_hide:
#
# - either None, to display all table elements unconditionally,
# - or a list of elements not to be displayed if the table has column headers.
#
TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader",
"linebetweenrows", "linebelow",
"headerrow", "datarow",
"padding", "with_header_hide"])
def _pipe_segment_with_colons(align, colwidth):
"""Return a segment of a horizontal line with optional colons which
indicate column's alignment (as in `pipe` output format)."""
w = colwidth
if align in ["right", "decimal"]:
return ('-' * (w - 1)) + ":"
elif align == "center":
return ":" + ('-' * (w - 2)) + ":"
elif align == "left":
return ":" + ('-' * (w - 1))
else:
return '-' * w
def _pipe_line_with_colons(colwidths, colaligns):
"""Return a horizontal line with optional colons to indicate column's
alignment (as in `pipe` output format)."""
segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)]
return "|" + "|".join(segments) + "|"
def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns):
alignment = { "left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| ' }
# hard-coded padding _around_ align attribute and value together
# rather than padding parameter which affects only the value
values_with_attrs = [' ' + alignment.get(a, '') + c + ' '
for c, a in zip(cell_values, colaligns)]
colsep = separator*2
return (separator + colsep.join(values_with_attrs)).rstrip()
def _textile_row_with_attrs(cell_values, colwidths, colaligns):
cell_values[0] += ' '
alignment = { "left": "<.", "right": ">.", "center": "=.", "decimal": ">." }
values = (alignment.get(a, '') + v for a, v in zip(colaligns, cell_values))
return '|' + '|'.join(values) + '|'
def _html_begin_table_without_header(colwidths_ignore, colaligns_ignore):
# this table header will be suppressed if there is a header row
return "\n".join(["<table>", "<tbody>"])
def _html_row_with_attrs(celltag, cell_values, colwidths, colaligns):
alignment = { "left": '',
"right": ' style="text-align: right;"',
"center": ' style="text-align: center;"',
"decimal": ' style="text-align: right;"' }
values_with_attrs = ["<{0}{1}>{2}</{0}>".format(celltag, alignment.get(a, ''), c)
for c, a in zip(cell_values, colaligns)]
rowhtml = "<tr>" + "".join(values_with_attrs).rstrip() + "</tr>"
if celltag == "th": # it's a header row, create a new table header
rowhtml = "\n".join(["<table>",
"<thead>",
rowhtml,
"</thead>",
"<tbody>"])
return rowhtml
def _moin_row_with_attrs(celltag, cell_values, colwidths, colaligns, header=''):
alignment = { "left": '',
"right": '<style="text-align: right;">',
"center": '<style="text-align: center;">',
"decimal": '<style="text-align: right;">' }
values_with_attrs = ["{0}{1} {2} ".format(celltag,
alignment.get(a, ''),
header+c+header)
for c, a in zip(cell_values, colaligns)]
return "".join(values_with_attrs)+"||"
def _latex_line_begin_tabular(colwidths, colaligns, booktabs=False):
alignment = { "left": "l", "right": "r", "center": "c", "decimal": "r" }
tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns])
return "\n".join(["\\begin{tabular}{" + tabular_columns_fmt + "}",
"\\toprule" if booktabs else "\\hline"])
LATEX_ESCAPE_RULES = {r"&": r"\&", r"%": r"\%", r"$": r"\$", r"#": r"\#",
r"_": r"\_", r"^": r"\^{}", r"{": r"\{", r"}": r"\}",
r"~": r"\textasciitilde{}", "\\": r"\textbackslash{}",
r"<": r"\ensuremath{<}", r">": r"\ensuremath{>}"}
def _latex_row(cell_values, colwidths, colaligns, escrules=LATEX_ESCAPE_RULES):
def escape_char(c):
return escrules.get(c, c)
escaped_values = ["".join(map(escape_char, cell)) for cell in cell_values]
rowfmt = DataRow("", "&", "\\\\")
return _build_simple_row(escaped_values, rowfmt)
def _rst_escape_first_column(rows, headers):
def escape_empty(val):
if isinstance(val, (_text_type, _binary_type)) and not val.strip():
return ".."
else:
return val
new_headers = list(headers)
new_rows = []
if headers:
new_headers[0] = escape_empty(headers[0])
for row in rows:
new_row = list(row)
if new_row:
new_row[0] = escape_empty(row[0])
new_rows.append(new_row)
return new_rows, new_headers
_table_formats = {"simple":
TableFormat(lineabove=Line("", "-", " ", ""),
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
with_header_hide=["lineabove", "linebelow"]),
"plain":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"grid":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("+", "=", "+", "+"),
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"fancy_grid":
TableFormat(lineabove=Line("β", "β", "β€", "β"),
linebelowheader=Line("β", "β", "βͺ", "β‘"),
linebetweenrows=Line("β", "β", "βΌ", "β€"),
linebelow=Line("β", "β", "β§", "β"),
headerrow=DataRow("β", "β", "β"),
datarow=DataRow("β", "β", "β"),
padding=1, with_header_hide=None),
"github":
TableFormat(lineabove=Line("|", "-", "|", "|"),
linebelowheader=Line("|", "-", "|", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"pipe":
TableFormat(lineabove=_pipe_line_with_colons,
linebelowheader=_pipe_line_with_colons,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"orgtbl":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"jira":
TableFormat(lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("||", "||", "||"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"presto":
TableFormat(lineabove=None,
linebelowheader=Line("", "-", "+", ""),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", ""),
datarow=DataRow("", "|", ""),
padding=1, with_header_hide=None),
"psql":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"rst":
TableFormat(lineabove=Line("", "=", " ", ""),
linebelowheader=Line("", "=", " ", ""),
linebetweenrows=None,
linebelow=Line("", "=", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"mediawiki":
TableFormat(lineabove=Line("{| class=\"wikitable\" style=\"text-align: left;\"",
"", "", "\n|+ <!-- caption -->\n|-"),
linebelowheader=Line("|-", "", "", ""),
linebetweenrows=Line("|-", "", "", ""),
linebelow=Line("|}", "", "", ""),
headerrow=partial(_mediawiki_row_with_attrs, "!"),
datarow=partial(_mediawiki_row_with_attrs, "|"),
padding=0, with_header_hide=None),
"moinmoin":
TableFormat(lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=partial(_moin_row_with_attrs,"||",header="'''"),
datarow=partial(_moin_row_with_attrs,"||"),
padding=1, with_header_hide=None),
"youtrack":
TableFormat(lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|| ", " || ", " || "),
datarow=DataRow("| ", " | ", " |"),
padding=1, with_header_hide=None),
"html":
TableFormat(lineabove=_html_begin_table_without_header,
linebelowheader="",
linebetweenrows=None,
linebelow=Line("</tbody>\n</table>", "", "", ""),
headerrow=partial(_html_row_with_attrs, "th"),
datarow=partial(_html_row_with_attrs, "td"),
padding=0, with_header_hide=["lineabove"]),
"latex":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"latex_raw":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "", ""),
headerrow=partial(_latex_row, escrules={}),
datarow=partial(_latex_row, escrules={}),
padding=1, with_header_hide=None),
"latex_booktabs":
TableFormat(lineabove=partial(_latex_line_begin_tabular, booktabs=True),
linebelowheader=Line("\\midrule", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\bottomrule\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"tsv":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", "\t", ""),
datarow=DataRow("", "\t", ""),
padding=0, with_header_hide=None),
"textile":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("|_. ", "|_.", "|"),
datarow=_textile_row_with_attrs,
padding=1, with_header_hide=None)}
tabulate_formats = list(sorted(_table_formats.keys()))
# The table formats for which multiline cells will be folded into subsequent
# table rows. The key is the original format specified at the API. The value is
# the format that will be used to represent the original format.
multiline_formats = {
"plain": "plain",
"simple": "simple",
"grid": "grid",
"fancy_grid": "fancy_grid",
"pipe": "pipe",
"orgtbl": "orgtbl",
"jira": "jira",
"presto": "presto",
"psql": "psql",
"rst": "rst",
}
# TODO: Add multiline support for the remaining table formats:
# - mediawiki: Replace \n with <br>
# - moinmoin: TBD
# - youtrack: TBD
# - html: Replace \n with <br>
# - latex*: Use "makecell" package: In header, replace X\nY with
# \thead{X\\Y} and in data row, replace X\nY with \makecell{X\\Y}
# - tsv: TBD
# - textile: Replace \n with <br/> (must be well-formed XML)
_multiline_codes = re.compile(r"\r|\n|\r\n")
_multiline_codes_bytes = re.compile(b"\r|\n|\r\n")
_invisible_codes = re.compile(r"\x1b\[\d+[;\d]*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
_invisible_codes_bytes = re.compile(b"\x1b\[\d+[;\d]*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
def simple_separated_format(separator):
"""Construct a simple TableFormat with columns separated by a separator.
>>> tsv = simple_separated_format("\\t") ; \
tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23'
True
"""
return TableFormat(None, None, None, None,
headerrow=DataRow('', separator, ''),
datarow=DataRow('', separator, ''),
padding=0, with_header_hide=None)
def _isconvertible(conv, string):
try:
n = conv(string)
return True
except (ValueError, TypeError):
return False
def _isnumber(string):
"""
>>> _isnumber("123.45")
True
>>> _isnumber("123")
True
>>> _isnumber("spam")
False
>>> _isnumber("123e45678")
False
>>> _isnumber("inf")
True
"""
if not _isconvertible(float, string):
return False
elif isinstance(string, (_text_type, _binary_type)) and (
math.isinf(float(string)) or math.isnan(float(string))):
return string.lower() in ['inf', '-inf', 'nan']
return True
def _isint(string, inttype=int):
"""
>>> _isint("123")
True
>>> _isint("123.45")
False
"""
return type(string) is inttype or\
(isinstance(string, _binary_type) or isinstance(string, _text_type))\
and\
_isconvertible(inttype, string)
def _isbool(string):
"""
>>> _isbool(True)
True
>>> _isbool("False")
True
>>> _isbool(1)
False
"""
return type(string) is _bool_type or\
(isinstance(string, (_binary_type, _text_type))\
and\
string in ("True", "False"))
def _type(string, has_invisible=True, numparse=True):
"""The least generic type (type(None), int, float, str, unicode).
>>> _type(None) is type(None)
True
>>> _type("foo") is type("")
True
>>> _type("1") is type(1)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
"""
if has_invisible and \
(isinstance(string, _text_type) or isinstance(string, _binary_type)):
string = _strip_invisible(string)
if string is None:
return _none_type
elif hasattr(string, "isoformat"): # datetime.datetime, date, and time
return _text_type
elif _isbool(string):
return _bool_type
elif _isint(string) and numparse:
return int
elif _isint(string, _long_type) and numparse:
return int
elif _isnumber(string) and numparse:
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
def _afterpoint(string):
"""Symbols after a decimal point, -1 if the string lacks the decimal point.
>>> _afterpoint("123.45")
2
>>> _afterpoint("1001")
-1
>>> _afterpoint("eggs")
-1
>>> _afterpoint("123e45")
2
"""
if _isnumber(string):
if _isint(string):
return -1
else:
pos = string.rfind(".")
pos = string.lower().rfind("e") if pos < 0 else pos
if pos >= 0:
return len(string) - pos - 1
else:
return -1 # no point
else:
return -1 # not a number
def _padleft(width, s):
"""Flush right.
>>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430'
True
"""
fmt = "{0:>%ds}" % width
return fmt.format(s)
def _padright(width, s):
"""Flush left.
>>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 '
True
"""
fmt = "{0:<%ds}" % width
return fmt.format(s)
def _padboth(width, s):
"""Center string.
>>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 '
True
"""
fmt = "{0:^%ds}" % width
return fmt.format(s)
def _padnone(ignore_width, s):
return s
def _strip_invisible(s):
"Remove invisible ANSI color codes."
if isinstance(s, _text_type):
return re.sub(_invisible_codes, "", s)
else: # a bytestring
return re.sub(_invisible_codes_bytes, "", s)
def _visible_width(s):
"""Visible width of a printed string. ANSI color codes are removed.
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
(5, 5)
"""
# optional wide-character support
if wcwidth is not None and WIDE_CHARS_MODE:
len_fn = wcwidth.wcswidth
else:
len_fn = len
if isinstance(s, _text_type) or isinstance(s, _binary_type):
return len_fn(_strip_invisible(s))
else:
return len_fn(_text_type(s))
def _is_multiline(s):
if isinstance(s, _text_type):
return bool(re.search(_multiline_codes, s))
else: # a bytestring
return bool(re.search(_multiline_codes_bytes, s))
def _multiline_width(multiline_s, line_width_fn=len):
"""Visible width of a potentially multiline content."""
return max(map(line_width_fn, re.split("[\r\n]", multiline_s)))
def _choose_width_fn(has_invisible, enable_widechars, is_multiline):
"""Return a function to calculate visible cell width."""
if has_invisible:
line_width_fn = _visible_width
elif enable_widechars: # optional wide-character support if available
line_width_fn = wcwidth.wcswidth
else:
line_width_fn = len
if is_multiline:
width_fn = lambda s: _multiline_width(s, line_width_fn)
else:
width_fn = line_width_fn
return width_fn
def _align_column_choose_padfn(strings, alignment, has_invisible):
if alignment == "right":
if not PRESERVE_WHITESPACE:
strings = [s.strip() for s in strings]
padfn = _padleft
elif alignment == "center":
if not PRESERVE_WHITESPACE:
strings = [s.strip() for s in strings]
padfn = _padboth
elif alignment == "decimal":
if has_invisible:
decimals = [_afterpoint(_strip_invisible(s)) for s in strings]
else:
decimals = [_afterpoint(s) for s in strings]
maxdecimals = max(decimals)
strings = [s + (maxdecimals - decs) * " "
for s, decs in zip(strings, decimals)]
padfn = _padleft
elif not alignment:
padfn = _padnone
else:
if not PRESERVE_WHITESPACE:
strings = [s.strip() for s in strings]
padfn = _padright
return strings, padfn
def _align_column(strings, alignment, minwidth=0,
has_invisible=True, enable_widechars=False, is_multiline=False):
"""[string] -> [padded_string]"""
strings, padfn = _align_column_choose_padfn(strings, alignment, has_invisible)
width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline)
s_widths = list(map(width_fn, strings))
maxwidth = max(max(s_widths), minwidth)
# TODO: refactor column alignment in single-line and multiline modes
if is_multiline:
if not enable_widechars and not has_invisible:
padded_strings = [
"\n".join([padfn(maxwidth, s) for s in ms.splitlines()])
for ms in strings]
else:
# enable wide-character width corrections
s_lens = [max((len(s) for s in re.split("[\r\n]", ms))) for ms in strings]
visible_widths = [maxwidth - (w - l) for w, l in zip(s_widths, s_lens)]
# wcswidth and _visible_width don't count invisible characters;
# padfn doesn't need to apply another correction
padded_strings = ["\n".join([padfn(w, s) for s in (ms.splitlines() or ms)])
for ms, w in zip(strings, visible_widths)]
else: # single-line cell values
if not enable_widechars and not has_invisible:
padded_strings = [padfn(maxwidth, s) for s in strings]
else:
# enable wide-character width corrections
s_lens = list(map(len, strings))
visible_widths = [maxwidth - (w - l) for w, l in zip(s_widths, s_lens)]
# wcswidth and _visible_width don't count invisible characters;
# padfn doesn't need to apply another correction
padded_strings = [padfn(w, s) for s, w in zip(strings, visible_widths)]
return padded_strings
def _more_generic(type1, type2):
types = { _none_type: 0, _bool_type: 1, int: 2, float: 3, _binary_type: 4, _text_type: 5 }
invtypes = { 5: _text_type, 4: _binary_type, 3: float, 2: int, 1: _bool_type, 0: _none_type }
moregeneric = max(types.get(type1, 5), types.get(type2, 5))
return invtypes[moregeneric]
def _column_type(strings, has_invisible=True, numparse=True):
"""The least generic type all column values are convertible to.
>>> _column_type([True, False]) is _bool_type
True
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
"""
types = [_type(s, has_invisible, numparse) for s in strings ]
return reduce(_more_generic, types, _bool_type)
def _format(val, valtype, floatfmt, missingval="", has_invisible=True):
"""Format a value accoding to its type.
Unicode is supported:
>>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \
good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
"""
if val is None:
return missingval
if valtype in [int, _text_type]:
return "{0}".format(val)
elif valtype is _binary_type:
try:
return _text_type(val, "ascii")
except TypeError:
return _text_type(val)
elif valtype is float:
is_a_colored_number = has_invisible and isinstance(val, (_text_type, _binary_type))
if is_a_colored_number:
raw_val = _strip_invisible(val)
formatted_val = format(float(raw_val), floatfmt)
return val.replace(raw_val, formatted_val)
else:
return format(float(val), floatfmt)
else:
return "{0}".format(val)
def _align_header(header, alignment, width, visible_width, is_multiline=False, width_fn=None):
"Pad string header to width chars given known visible_width of the header."
if is_multiline:
header_lines = re.split(_multiline_codes, header)
padded_lines = [_align_header(h, alignment, width, width_fn(h)) for h in header_lines]
return "\n".join(padded_lines)
# else: not multiline
ninvisible = len(header) - visible_width
width += ninvisible
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
elif not alignment:
return "{0}".format(header)
else:
return _padleft(width, header)
def _prepend_row_index(rows, index):
"""Add a left-most index column."""
if index is None or index is False:
return rows
if len(index) != len(rows):
print('index=', index)
print('rows=', rows)
raise ValueError('index must be as long as the number of data rows')
rows = [[v]+list(row) for v,row in zip(index, rows)]
return rows
def _bool(val):
"A wrapper around standard bool() which doesn't throw on NumPy arrays"
try:
return bool(val)
except ValueError: # val is likely to be a numpy array with many elements
return False
def _normalize_tabular_data(tabular_data, headers, showindex="default"):
"""Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* list of dicts (usually used with headers="keys")
* list of OrderedDicts (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
If showindex="default", show row indices of the pandas.DataFrame.
If showindex="always", show row indices for all types of data.
If showindex="never", don't show row indices for all types of data.
If showindex is an iterable, show its values as row indices.
"""
try:
bool(headers)
is_headers2bool_broken = False
except ValueError: # numpy.ndarray, pandas.core.index.Index, ...
is_headers2bool_broken = True
headers = list(headers)
index = None
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = tabular_data.keys()
rows = list(izip_longest(*tabular_data.values())) # columns have to be transposed
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)
keys = list(tabular_data)
if tabular_data.index.name is not None:
if isinstance(tabular_data.index.name, list):
keys[:0] = tabular_data.index.name
else:
keys[:0] = [tabular_data.index.name]
vals = tabular_data.values # values matrix doesn't need to be transposed
# for DataFrames add an index per default
index = list(tabular_data.index)
rows = [list(row) for row in vals]
else:
raise ValueError("tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type,keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if (headers == "keys" and not rows):
# an empty table (issue #81)
headers = []
elif (headers == "keys" and
hasattr(tabular_data, "dtype") and
getattr(tabular_data.dtype, "names")):
# numpy record array
headers = tabular_data.dtype.names
elif (headers == "keys"
and len(rows) > 0
and isinstance(rows[0], tuple)
and hasattr(rows[0], "_fields")):
# namedtuple
headers = list(map(_text_type, rows[0]._fields))
elif (len(rows) > 0
and isinstance(rows[0], dict)):
# dict or OrderedDict
uniq_keys = set() # implements hashed lookup
keys = [] # storage for set
if headers == "firstrow":
firstdict = rows[0] if len(rows) > 0 else {}
keys.extend(firstdict.keys())
uniq_keys.update(keys)
rows = rows[1:]
for row in rows:
for k in row.keys():
#Save unique items in input order
if k not in uniq_keys:
keys.append(k)
uniq_keys.add(k)
if headers == 'keys':
headers = keys
elif isinstance(headers, dict):
# a dict of headers for a list of dicts
headers = [headers.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
elif headers == "firstrow":
if len(rows) > 0:
headers = [firstdict.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
else:
headers = []
elif headers:
raise ValueError('headers for a list of dicts is not a dict or a keyword')
rows = [[row.get(k) for k in keys] for row in rows]
elif (headers == "keys"
and hasattr(tabular_data, "description")
and hasattr(tabular_data, "fetchone")
and hasattr(tabular_data, "rowcount")):
# Python Database API cursor object (PEP 0249)
# print tabulate(cursor, headers='keys')
headers = [column[0] for column in tabular_data.description]
elif headers == "keys" and len(rows) > 0:
# keys are column indices
headers = list(map(_text_type, range(len(rows[0]))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
if index is not None:
headers = [index[0]] + list(rows[0])
index = index[1:]
else:
headers = rows[0]
headers = list(map(_text_type, headers)) # headers should be strings
rows = rows[1:]
headers = list(map(_text_type,headers))
rows = list(map(list,rows))
# add or remove an index column
showindex_is_a_str = type(showindex) in [_text_type, _binary_type]
if showindex == "default" and index is not None:
rows = _prepend_row_index(rows, index)
elif isinstance(showindex, Iterable) and not showindex_is_a_str:
rows = _prepend_row_index(rows, list(showindex))
elif showindex == "always" or (_bool(showindex) and not showindex_is_a_str):
if index is None:
index = list(range(len(rows)))
rows = _prepend_row_index(rows, index)
elif showindex == "never" or (not _bool(showindex) and not showindex_is_a_str):
pass
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""]*(ncols - nhs) + headers
return rows, headers
def tabulate(tabular_data, headers=(), tablefmt="simple",
floatfmt=_DEFAULT_FLOATFMT, numalign="decimal", stralign="left",
missingval=_DEFAULT_MISSINGVAL, showindex="default", disable_numparse=False,
colalign=None):
"""Format a fixed width table for pretty printing.
>>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
--- ---------
1 2.34
-56 8.999
2 10001
--- ---------
The first required argument (`tabular_data`) can be a
list-of-lists (or another iterable of iterables), a list of named
tuples, a dictionary of iterables, an iterable of dictionaries,
a two-dimensional NumPy array, NumPy record array, or a Pandas'
dataframe.
Table headers
-------------
To print nice column headers, supply the second argument (`headers`):
- `headers` can be an explicit list of column headers
- if `headers="firstrow"`, then the first row of data is used
- if `headers="keys"`, then dictionary keys or column indices are used
Otherwise a headerless table is produced.
If the number of headers is less than the number of columns, they
are supposed to be names of the last columns. This is consistent
with the plain-text format of R and Pandas' dataframes.
>>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
... headers="firstrow"))
sex age
----- ----- -----
Alice F 24
Bob M 19
By default, pandas.DataFrame data have an additional column called
row index. To add a similar column to all other types of data,
use `showindex="always"` or `showindex=True`. To suppress row indices
for all types of data, pass `showindex="never" or `showindex=False`.
To add a custom row index column, pass `showindex=some_iterable`.
>>> print(tabulate([["F",24],["M",19]], showindex="always"))
- - --
0 F 24
1 M 19
- - --
Column alignment
----------------
`tabulate` tries to detect column types automatically, and aligns
the values properly. By default it aligns decimal points of the
numbers (or flushes integer numbers to the right), and flushes
everything else to the left. Possible column alignments
(`numalign`, `stralign`) are: "right", "center", "left", "decimal"
(only for `numalign`), and None (to disable alignment).
Table formats
-------------
`floatfmt` is a format specification used for columns which
contain numeric data with a decimal point. This can also be
a list or tuple of format strings, one per column.
`None` values are replaced with a `missingval` string (like
`floatfmt`, this can also be a list of values for different
columns):
>>> print(tabulate([["spam", 1, None],
... ["eggs", 42, 3.14],
... ["other", None, 2.7]], missingval="?"))
----- -- ----
spam 1 ?
eggs 42 3.14
other ? 2.7
----- -- ----
Various plain-text table formats (`tablefmt`) are supported:
'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
'latex', 'latex_raw' and 'latex_booktabs'. Variable `tabulate_formats`
contains the list of currently supported formats.
"plain" format doesn't use any pseudographics to draw tables,
it separates columns with a double space:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "plain"))
strings numbers
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
spam 41.9999
eggs 451
"simple" format is like Pandoc simple_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "simple"))
strings numbers
--------- ---------
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
---- --------
spam 41.9999
eggs 451
---- --------
"grid" is similar to tables produced by Emacs table.el package or
Pandoc grid_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "grid"))
+-----------+-----------+
| strings | numbers |
+===========+===========+
| spam | 41.9999 |
+-----------+-----------+
| eggs | 451 |
+-----------+-----------+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
+------+----------+
| spam | 41.9999 |
+------+----------+
| eggs | 451 |
+------+----------+
"fancy_grid" draws a grid using box-drawing characters:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "fancy_grid"))
βββββββββββββ€ββββββββββββ
β strings β numbers β
βββββββββββββͺββββββββββββ‘
β spam β 41.9999 β
βββββββββββββΌββββββββββββ€
β eggs β 451 β
βββββββββββββ§ββββββββββββ
"pipe" is like tables in PHP Markdown Extra extension or Pandoc
pipe_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "pipe"))
| strings | numbers |
|:----------|----------:|
| spam | 41.9999 |
| eggs | 451 |
"presto" is like tables produce by the Presto CLI:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "presto"))
strings | numbers
-----------+-----------
spam | 41.9999
eggs | 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
|:-----|---------:|
| spam | 41.9999 |
| eggs | 451 |
"orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
are slightly different from "pipe" format by not using colons to
define column alignment, and using a "+" sign to indicate line
intersections:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "orgtbl"))
| strings | numbers |
|-----------+-----------|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
| spam | 41.9999 |
| eggs | 451 |
"rst" is like a simple table format from reStructuredText; please
note that reStructuredText accepts also "grid" tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "rst"))
========= =========
strings numbers
========= =========
spam 41.9999
eggs 451
========= =========
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
==== ========
spam 41.9999
eggs 451
==== ========
"mediawiki" produces a table markup used in Wikipedia and on other
MediaWiki-based sites:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="mediawiki"))
{| class="wikitable" style="text-align: left;"
|+ <!-- caption -->
|-
! strings !! align="right"| numbers
|-
| spam || align="right"| 41.9999
|-
| eggs || align="right"| 451
|}
"html" produces HTML markup:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="html"))
<table>
<thead>
<tr><th>strings </th><th style="text-align: right;"> numbers</th></tr>
</thead>
<tbody>
<tr><td>spam </td><td style="text-align: right;"> 41.9999</td></tr>
<tr><td>eggs </td><td style="text-align: right;"> 451 </td></tr>
</tbody>
</table>
"latex" produces a tabular environment of LaTeX document markup:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
\\begin{tabular}{lr}
\\hline
spam & 41.9999 \\\\
eggs & 451 \\\\
\\hline
\\end{tabular}
"latex_raw" is similar to "latex", but doesn't escape special characters,
such as backslash and underscore, so LaTeX commands may embedded into
cells' values:
>>> print(tabulate([["spam$_9$", 41.9999], ["\\\\emph{eggs}", "451.0"]], tablefmt="latex_raw"))
\\begin{tabular}{lr}
\\hline
spam$_9$ & 41.9999 \\\\
\\emph{eggs} & 451 \\\\
\\hline
\\end{tabular}
"latex_booktabs" produces a tabular environment of LaTeX document markup
using the booktabs.sty package:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs"))
\\begin{tabular}{lr}
\\toprule
spam & 41.9999 \\\\
eggs & 451 \\\\
\\bottomrule
\end{tabular}
Number parsing
--------------
By default, anything which can be parsed as a number is a number.
This ensures numbers represented as strings are aligned properly.
This can lead to weird results for particular strings such as
specific git SHAs e.g. "42992e1" will be parsed into the number
429920 and aligned as such.
To completely disable number parsing (and alignment), use
`disable_numparse=True`. For more fine grained control, a list column
indices is used to disable number parsing only on those columns
e.g. `disable_numparse=[0, 2]` would disable number parsing only on the
first and third columns.
"""
if tabular_data is None:
tabular_data = []
list_of_lists, headers = _normalize_tabular_data(
tabular_data, headers, showindex=showindex)
# empty values in the first column of RST tables should be escaped (issue #82)
# "" should be escaped as "\\ " or ".."
if tablefmt == 'rst':
list_of_lists, headers = _rst_escape_first_column(list_of_lists, headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\t'.join(['\t'.join(map(_text_type, headers))] + \
['\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
enable_widechars = wcwidth is not None and WIDE_CHARS_MODE
if tablefmt in multiline_formats and _is_multiline(plain_text):
tablefmt = multiline_formats.get(tablefmt, tablefmt)
is_multiline = True
else:
is_multiline = False
width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline)
# format rows and columns, convert numeric values to strings
cols = list(izip_longest(*list_of_lists))
numparses = _expand_numparse(disable_numparse, len(cols))
coltypes = [_column_type(col, numparse=np) for col, np in
zip(cols, numparses)]
if isinstance(floatfmt, basestring): #old version
float_formats = len(cols) * [floatfmt] # just duplicate the string to use in each column
else: # if floatfmt is list, tuple etc we have one per column
float_formats = list(floatfmt)
if len(float_formats) < len(cols):
float_formats.extend( (len(cols)-len(float_formats)) * [_DEFAULT_FLOATFMT] )
if isinstance(missingval, basestring):
missing_vals = len(cols) * [missingval]
else:
missing_vals = list(missingval)
if len(missing_vals) < len(cols):
missing_vals.extend( (len(cols)-len(missing_vals)) * [_DEFAULT_MISSINGVAL] )
cols = [[_format(v, ct, fl_fmt, miss_v, has_invisible) for v in c]
for c, ct, fl_fmt, miss_v in zip(cols, coltypes, float_formats, missing_vals)]
# align columns
aligns = [numalign if ct in [int,float] else stralign for ct in coltypes]
if colalign is not None:
assert isinstance(colalign, Iterable)
for idx, align in enumerate(colalign):
aligns[idx] = align
minwidths = [width_fn(h) + MIN_PADDING for h in headers] if headers else [0]*len(cols)
cols = [_align_column(c, a, minw, has_invisible, enable_widechars, is_multiline)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
t_cols = cols or [['']] * len(headers)
t_aligns = aligns or [stralign] * len(headers)
minwidths = [max(minw, max(width_fn(cl) for cl in c)) for minw, c in zip(minwidths, t_cols)]
headers = [_align_header(h, a, minw, width_fn(h), is_multiline, width_fn)
for h, a, minw in zip(headers, t_aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [max(width_fn(cl) for cl in c) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows, minwidths, aligns, is_multiline)
def _expand_numparse(disable_numparse, column_count):
"""
Return a list of bools of length `column_count` which indicates whether
number parsing should be used on each column.
If `disable_numparse` is a list of indices, each of those indices are False,
and everything else is True.
If `disable_numparse` is a bool, then the returned list is all the same.
"""
if isinstance(disable_numparse, Iterable):
numparses = [True] * column_count
for index in disable_numparse:
numparses[index] = False
return numparses
else:
return [not disable_numparse] * column_count
def _pad_row(cells, padding):
if cells:
pad = " "*padding
padded_cells = [pad + cell + pad for cell in cells]
return padded_cells
else:
return cells
def _build_simple_row(padded_cells, rowfmt):
"Format row according to DataRow format without padding."
begin, sep, end = rowfmt
return (begin + sep.join(padded_cells) + end).rstrip()
def _build_row(padded_cells, colwidths, colaligns, rowfmt):
"Return a string which represents a row of data cells."
if not rowfmt:
return None
if hasattr(rowfmt, "__call__"):
return rowfmt(padded_cells, colwidths, colaligns)
else:
return _build_simple_row(padded_cells, rowfmt)
def _append_basic_row(lines, padded_cells, colwidths, colaligns, rowfmt):
lines.append(_build_row(padded_cells, colwidths, colaligns, rowfmt))
return lines
def _append_multiline_row(lines, padded_multiline_cells, padded_widths, colaligns, rowfmt, pad):
colwidths = [w - 2*pad for w in padded_widths]
cells_lines = [c.splitlines() for c in padded_multiline_cells]
nlines = max(map(len, cells_lines)) # number of lines in the row
# vertically pad cells where some lines are missing
cells_lines = [(cl + [' '*w]*(nlines - len(cl))) for cl, w in zip(cells_lines, colwidths)]
lines_cells = [[cl[i] for cl in cells_lines] for i in range(nlines)]
for ln in lines_cells:
padded_ln = _pad_row(ln, pad)
_append_basic_row(lines, padded_ln, colwidths, colaligns, rowfmt)
return lines
def _build_line(colwidths, colaligns, linefmt):
"Return a string which represents a horizontal line."
if not linefmt:
return None
if hasattr(linefmt, "__call__"):
return linefmt(colwidths, colaligns)
else:
begin, fill, sep, end = linefmt
cells = [fill*w for w in colwidths]
return _build_simple_row(cells, (begin, sep, end))
def _append_line(lines, colwidths, colaligns, linefmt):
lines.append(_build_line(colwidths, colaligns, linefmt))
return lines
def _format_table(fmt, headers, rows, colwidths, colaligns, is_multiline):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []
pad = fmt.padding
headerrow = fmt.headerrow
padded_widths = [(w + 2*pad) for w in colwidths]
if is_multiline:
pad_row = lambda row, _: row # do it later, in _append_multiline_row
append_row = partial(_append_multiline_row, pad=pad)
else:
pad_row = _pad_row
append_row = _append_basic_row
padded_headers = pad_row(headers, pad)
padded_rows = [pad_row(row, pad) for row in rows]
if fmt.lineabove and "lineabove" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.lineabove)
if padded_headers:
append_row(lines, padded_headers, padded_widths, colaligns, headerrow)
if fmt.linebelowheader and "linebelowheader" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.linebelowheader)
if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in padded_rows[:-1]:
append_row(lines, row, padded_widths, colaligns, fmt.datarow)
_append_line(lines, padded_widths, colaligns, fmt.linebetweenrows)
# the last row without a line below
append_row(lines, padded_rows[-1], padded_widths, colaligns, fmt.datarow)
else:
for row in padded_rows:
append_row(lines, row, padded_widths, colaligns, fmt.datarow)
if fmt.linebelow and "linebelow" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.linebelow)
if headers or rows:
return "\n".join(lines)
else: # a completely empty table
return ""
def _main():
"""\
Usage: tabulate [options] [FILE ...]
Pretty-print tabular data.
See also https://bitbucket.org/astanin/python-tabulate
FILE a filename of the file with tabular data;
if "-" or missing, read data from stdin.
Options:
-h, --help show this message
-1, --header use the first row of data as a table header
-o FILE, --output FILE print table to FILE (default: stdout)
-s REGEXP, --sep REGEXP use a custom column separator (default: whitespace)
-F FPFMT, --float FPFMT floating point number format (default: g)
-f FMT, --format FMT set output table format; supported formats:
plain, simple, grid, fancy_grid, pipe, orgtbl,
rst, mediawiki, html, latex, latex_raw,
latex_booktabs, tsv
(default: simple)
"""
import getopt
import sys
import textwrap
usage = textwrap.dedent(_main.__doc__)
try:
opts, args = getopt.getopt(sys.argv[1:],
"h1o:s:F:A:f:",
["help", "header", "output", "sep=", "float=", "align=",
"format="])
except getopt.GetoptError as e:
print(e)
print(usage)
sys.exit(2)
headers = []
floatfmt = _DEFAULT_FLOATFMT
colalign = None
tablefmt = "simple"
sep = r"\s+"
outfile = "-"
for opt, value in opts:
if opt in ["-1", "--header"]:
headers = "firstrow"
elif opt in ["-o", "--output"]:
outfile = value
elif opt in ["-F", "--float"]:
floatfmt = value
elif opt in ["-C", "--colalign"]:
colalign = value.split()
elif opt in ["-f", "--format"]:
if value not in tabulate_formats:
print("%s is not a supported table format" % value)
print(usage)
sys.exit(3)
tablefmt = value
elif opt in ["-s", "--sep"]:
sep = value
elif opt in ["-h", "--help"]:
print(usage)
sys.exit(0)
files = [sys.stdin] if not args else args
with (sys.stdout if outfile == "-" else open(outfile, "w")) as out:
for f in files:
if f == "-":
f = sys.stdin
if _is_file(f):
_pprint_file(f, headers=headers, tablefmt=tablefmt,
sep=sep, floatfmt=floatfmt, file=out,
colalign=colalign)
else:
with open(f) as fobj:
_pprint_file(fobj, headers=headers, tablefmt=tablefmt,
sep=sep, floatfmt=floatfmt, file=out,
colalign=colalign)
def _pprint_file(fobject, headers, tablefmt, sep, floatfmt, file, colalign):
rows = fobject.readlines()
table = [re.split(sep, r.rstrip()) for r in rows if r.strip()]
print(tabulate(table, headers, tablefmt, floatfmt=floatfmt,
colalign=colalign), file=file)
if __name__ == "__main__":
_main()
|
c3dm-main
|
c3dm/tools/tabulate.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import torch.nn.functional as Fu
def find_camera_T(K, X, Y):
n = X.shape[2]
ba = X.shape[0]
append1 = lambda x: \
torch.cat((x,x.new_ones(x.shape[0],1,x.shape[2])), dim=1)
# projection rays
r = torch.bmm(torch.inverse(K), append1(Y))
r = Fu.normalize(r, dim=1)
# outer projection ray product (need to permute the array first)
rr = r.permute(0,2,1).contiguous().view(n*ba, 3)
rr = torch.bmm(rr[:,:,None], rr[:,None,:])
# I - rr
Irr = torch.eye(3)[None].repeat(ba*n,1,1) - rr
# [rr - I] x
rrIx = torch.bmm(-Irr, X.permute(0,2,1).contiguous().view(n*ba, 3, 1))
Irr_sum = Irr.view(ba,-1,3,3,).sum(1)
rrIx_sum = rrIx.view(ba,-1,3).sum(1)
rrI_sum_i = torch.inverse(Irr_sum)
T = torch.bmm(rrI_sum_i, rrIx_sum[:,:,None])[:,:,0]
return T
n = 500 # n points
ba = 20 # batch size
# gt 3D points
X = torch.zeros(ba, 3, n).normal_(0., 1.)
for focal in torch.linspace(10.,0.1,20):
# cam K
K = torch.eye(3)
K[0,0] = focal
K[1,1] = focal
K = K[None].repeat(ba,1,1)
if False:
# persp projections - should give 0 error everywhere
T = torch.ones(ba, 3).uniform_()*10.
Y = torch.bmm(K, X + T[:,:,None])
Y = Y[:,0:2,:] / Y[:,2:3,:]
else:
# orth projections - should get higher error with lower focal
Y = X[:,0:2]
T = find_camera_T(K, X, Y)
## test the repro loss
# perspective projections
Yp = torch.bmm(K, X + T[:,:,None])
depth_ = Yp[:,2:3, :]
Yp = Yp[:,0:2, :] / depth_
# the diff between orth and persp
df = ((Y - Yp)**2).sum(1).sqrt().mean(1).mean()
print('focal = %1.2f, repro_df = %1.2e, mean_depth = %1.2f' % \
(focal, df, depth_.mean()) )
|
c3dm-main
|
c3dm/tools/test_orth2pers.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
def nested_attr_dict(dct):
if type(dct) in (dict,AttrDict):
dct = AttrDict(dct)
for k,v in dct.items():
dct[k] = nested_attr_dict(v)
return dct
class AttrDict(dict):
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
elif name in self:
return self[name]
else:
raise AttributeError(name)
def __setattr__(self, name, value):
if name in self.__dict__:
self.__dict__[name] = value
else:
self[name] = value
|
c3dm-main
|
c3dm/tools/attr_dict.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.