python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
"""Match items in a dictionary using fuzzy matching
Implemented for pywinauto.
This class uses difflib to match strings.
This class uses a linear search to find the items as it HAS to iterate over
every item in the dictionary (otherwise it would not be possible to know which
is the 'best' match).
If the exact item is in the dictionary (no fuzzy matching needed - then it
doesn't do the linear search and speed should be similar to standard Python
dictionaries.
>>> fuzzywuzzy = FuzzyDict({"hello" : "World", "Hiya" : 2, "Here you are" : 3})
>>> fuzzywuzzy['Me again'] = [1,2,3]
>>>
>>> fuzzywuzzy['Hi']
2
>>>
>>>
>>> # next one doesn't match well enough - so a key error is raised
...
>>> fuzzywuzzy['There']
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "pywinauto\fuzzydict.py", line 125, in __getitem__
raise KeyError(
KeyError: "'There'. closest match: 'hello' with ratio 0.400"
>>>
>>> fuzzywuzzy['you are']
3
>>> fuzzywuzzy['again']
[1, 2, 3]
>>>
"""
__revision__ = "$Rev$"
import difflib
class FuzzyDict(dict):
"Provides a dictionary that performs fuzzy lookup"
def __init__(self, items = None, cutoff = .6):
"""Construct a new FuzzyDict instance
items is an dictionary to copy items from (optional)
cutoff is the match ratio below which mathes should not be considered
cutoff needs to be a float between 0 and 1 (where zero is no match
and 1 is a perfect match)"""
super(FuzzyDict, self).__init__()
if items:
self.update(items)
self.cutoff = cutoff
# short wrapper around some super (dict) methods
self._dict_contains = lambda key: \
super(FuzzyDict,self).__contains__(key)
self._dict_getitem = lambda key: \
super(FuzzyDict,self).__getitem__(key)
def _search(self, lookfor, stop_on_first = False):
"""Returns the value whose key best matches lookfor
if stop_on_first is True then the method returns as soon
as it finds the first item
"""
# if the item is in the dictionary then just return it
if self._dict_contains(lookfor):
return True, lookfor, self._dict_getitem(lookfor), 1
# set up the fuzzy matching tool
ratio_calc = difflib.SequenceMatcher()
ratio_calc.set_seq1(lookfor)
# test each key in the dictionary
best_ratio = 0
best_match = None
best_key = None
for key in self:
# if the current key is not a string
# then we just skip it
try:
# set up the SequenceMatcher with other text
ratio_calc.set_seq2(key)
except TypeError:
continue
# we get an error here if the item to look for is not a
# string - if it cannot be fuzzy matched and we are here
# this it is defintely not in the dictionary
try:
# calculate the match value
ratio = ratio_calc.ratio()
except TypeError:
break
# if this is the best ratio so far - save it and the value
if ratio > best_ratio:
best_ratio = ratio
best_key = key
best_match = self._dict_getitem(key)
if stop_on_first and ratio >= self.cutoff:
break
return (
best_ratio >= self.cutoff,
best_key,
best_match,
best_ratio)
def __contains__(self, item):
"Overides Dictionary __contains__ to use fuzzy matching"
if self._search(item, True)[0]:
return True
else:
return False
def __getitem__(self, lookfor):
"Overides Dictionary __getitem__ to use fuzzy matching"
matched, key, item, ratio = self._search(lookfor)
if not matched:
raise KeyError(
"'%s'. closest match: '%s' with ratio %.3f"%
(str(lookfor), str(key), ratio))
return item
if __name__ == '__main__':
import unittest
class FuzzyTestCase(unittest.TestCase):
"Perform some tests"
test_dict = {
'Hiya' : 1,
u'hiy\xe4' : 2,
'test3' : 3,
1: 324}
def testCreation_Empty(self):
"Verify that not specifying any values creates an empty dictionary"
fd = FuzzyDict()
self.assertEquals(fd, {})
def testCreation_Dict(self):
"Test creating a fuzzy dict"
fd = FuzzyDict(self.test_dict)
self.assertEquals(fd, self.test_dict)
self.assertEquals(self.test_dict['Hiya'], fd['hiya'])
fd2 = FuzzyDict(self.test_dict, cutoff = .8)
self.assertEquals(fd, self.test_dict)
self.assertRaises(KeyError, fd2.__getitem__, 'hiya')
def testContains(self):
"Test checking if an item is in a FuzzyDict"
fd = FuzzyDict(self.test_dict)
self.assertEquals(True, fd.__contains__('hiya'))
self.assertEquals(True, fd.__contains__(u'test3'))
self.assertEquals(True, fd.__contains__(u'hiy\xe4'))
self.assertEquals(False, fd.__contains__('FuzzyWuzzy'))
self.assertEquals(True, fd.__contains__(1))
self.assertEquals(False, fd.__contains__(23))
def testGetItem(self):
"Test getting items from a FuzzyDict"
fd = FuzzyDict(self.test_dict)
self.assertEquals(self.test_dict["Hiya"], fd['hiya'])
self.assertRaises(KeyError, fd.__getitem__, 'FuzzyWuzzy')
fd2 = FuzzyDict(self.test_dict, cutoff = .14)
self.assertEquals(1, fd2['FuzzyWuzzy'])
self.assertEquals(324, fd2[1])
self.assertRaises(KeyError, fd2.__getitem__, 23)
unittest.main()
|
dd-genomics-master
|
archived/fuzzy_string_dict.py
|
import ddext
from ddext import SD
def init():
ddext.input('doc_id', 'text')
ddext.input('sent_id', 'int')
ddext.input('words', 'text[]')
ddext.input('lemmas', 'text[]')
ddext.input('poses', 'text[]')
ddext.input('ners', 'text[]')
ddext.returns('doc_id', 'text')
ddext.returns('sent_id', 'int')
ddext.returns('wordidxs', 'int[]')
ddext.returns('mention_id', 'text')
ddext.returns('type', 'text')
ddext.returns('entity', 'text')
ddext.returns('words', 'text[]')
ddext.returns('is_correct', 'boolean')
def run(doc_id, sent_id, words, lemmas, poses, ners):
if 'diseases' in SD:
trie = SD['trie']
diseases = SD['diseases']
diseases_bad = SD['diseases_bad']
genes = SD['genes']
delim_re = SD['delim_re']
else:
import os
APP_HOME = os.environ['DD_GENOMICS_HOME']
import re
diseases = {}
all_diseases = [x.strip().split('\t', 1) for x in open('%s/onto/data/all_diseases.tsv' % APP_HOME)]
diseases_en = set([x.strip() for x in open('%s/onto/data/all_diseases_en.tsv' % APP_HOME)])
diseases_en_good = set([x.strip() for x in open('%s/onto/manual/disease_en_good.tsv' % APP_HOME)])
diseases_bad = set([x.strip() for x in open('%s/onto/manual/disease_bad.tsv' % APP_HOME)])
SD['diseases_bad'] = diseases_bad
diseases_exclude = diseases_bad | diseases_en - diseases_en_good
delim_re = re.compile('[^\w-]+') # NOTE: this also removes apostrophe
SD['delim_re'] = delim_re
diseases_norm = {}
trie = {} # special key '$' means terminal nodes
for phrase, ids in all_diseases:
if phrase in diseases_exclude:
continue
diseases[phrase] = ids
phrase_norm = delim_re.sub(' ', phrase).strip()
# print phrase_norm
tokens = phrase_norm.split()
node = trie
for w in tokens:
if w not in node:
node[w] = {}
node = node[w]
if '$' not in node:
node['$'] = []
node['$'].append((ids, phrase))
if phrase_norm not in diseases_norm:
diseases_norm[phrase_norm] = ids
else:
diseases_norm[phrase_norm] = '|'.join(sorted(set(ids.split('|')) | set(diseases_norm[phrase_norm].split('|'))))
SD['diseases'] = diseases
SD['trie'] = trie
genes = set()
for line in open('%s/onto/data/genes.tsv' % APP_HOME):
#plpy.info(line)
name, synonyms, full_names = line.strip(' \r\n').split('\t')
synonyms = set(synonyms.split('|'))
genes.add(name.lower())
for s in synonyms:
genes.add(s.lower())
SD['genes'] = genes
# TODO: currently we do ignore-case exact match for single words; consider stemming.
# TODO: currently we do exact phrase matches; consider emitting partial matches.
for i in xrange(len(words)):
word = words[i]
iword = word.lower()
# single-token mention
if iword in diseases:
truth = True
mtype = 'ONE'
# http://www.ncbi.nlm.nih.gov/pubmed/23271346
# SCs for Stem Cells
# HFs for hair follicles
if word[-1] == 's' and word[:-1].isupper():
truth = False
mtype = 'PLURAL'
elif iword in genes:
truth = None
mtype = 'GSYM'
entity = diseases[iword] + ' ' + iword
mid = '%s_%s_%d_1' % (doc_id, sent_id, i)
yield doc_id, sent_id, [i], mid, mtype, entity, [word], truth
# multi-token mentions
node = trie
depth = 0
for j in xrange(i, len(words)):
word = words[j]
iword = word.lower()
sword = delim_re.sub(' ', iword).strip()
if not sword:
if j == i:
break
continue
if sword in node:
node = node[sword]
depth += 1
if '$' in node and depth > 1:
for ids, phrase in node['$']:
if phrase in diseases_bad:
continue
entity = ids + ' ' + phrase
mid = '%s_%s_%d_%d' % (doc_id, sent_id, i, j - i + 1)
wordids = range(i, j + 1)
yield doc_id, sent_id, wordids, mid, 'PHRASE', entity, words[i: j + 1], True
else:
break
|
dd-genomics-master
|
archived/v1/code/pheno_mentions.py
|
import ddext
def init():
ddext.input('doc_id', 'text')
ddext.input('sent_id', 'int')
ddext.input('words', 'text[]')
ddext.input('lemmas', 'text[]')
ddext.input('poses', 'text[]')
ddext.input('ners', 'text[]')
ddext.input('dep_paths', 'text[]')
ddext.input('dep_parents', 'int[]')
ddext.input('wordidxs', 'int[]')
ddext.input('relation_id', 'text')
ddext.input('wordidxs_1', 'int[]')
ddext.input('wordidxs_2', 'int[]')
ddext.returns('doc_id', 'text')
ddext.returns('relation_id', 'text')
ddext.returns('feature', 'text')
def run(doc_id, sent_id, words, lemmas, poses, ners, dep_paths, dep_parents, wordidxs, relation_id, wordidxs_1, wordidxs_2):
try:
import ddlib
except:
import os
DD_HOME = os.environ['DEEPDIVE_HOME']
from sys import path
path.append('%s/ddlib' % DD_HOME)
import ddlib
obj = dict()
obj['lemma'] = []
obj['words'] = []
obj['ner'] = []
obj['pos'] = []
obj['dep_graph'] = []
for i in xrange(len(words)):
obj['lemma'].append(lemmas[i])
obj['words'].append(words[i])
obj['ner'].append(ners[i])
obj['pos'].append(poses[i])
obj['dep_graph'].append(
str(int(dep_parents[i])) + "\t" + dep_paths[i] + "\t" + str(i))
word_obj_list = ddlib.unpack_words(
obj, lemma='lemma', pos='pos', ner='ner', words='words', dep_graph='dep_graph')
gene_span = ddlib.get_span(wordidxs_1[0], len(wordidxs_1))
pheno_span = ddlib.get_span(wordidxs_2[0], len(wordidxs_2))
features = set()
for feature in ddlib.get_generic_features_relation(word_obj_list, gene_span, pheno_span):
features.add(feature)
for feature in features:
yield doc_id, relation_id, feature
|
dd-genomics-master
|
archived/v1/code/pair_features.py
|
import ddext
from ddext import SD
def init():
ddext.input('doc_id', 'text')
ddext.input('sent_id_1', 'int')
ddext.input('mention_id_1', 'text')
ddext.input('wordidxs_1', 'int[]')
ddext.input('words_1', 'text[]')
ddext.input('entity_1', 'text')
ddext.input('type_1', 'text')
ddext.input('correct_1', 'boolean')
ddext.input('sent_id_2', 'int')
ddext.input('mention_id_2', 'text')
ddext.input('wordidxs_2', 'int[]')
ddext.input('words_2', 'text[]')
ddext.input('entity_2', 'text')
ddext.input('type_2', 'text')
ddext.input('correct_2', 'boolean')
ddext.returns('doc_id', 'text')
ddext.returns('sent_id_1', 'int')
ddext.returns('sent_id_2', 'int')
ddext.returns('relation_id', 'text')
ddext.returns('type', 'text')
ddext.returns('mention_id_1', 'text')
ddext.returns('mention_id_2', 'text')
ddext.returns('wordidxs_1', 'int[]')
ddext.returns('wordidxs_2', 'int[]')
ddext.returns('words_1', 'text[]')
ddext.returns('words_2', 'text[]')
ddext.returns('entity_1', 'text')
ddext.returns('entity_2', 'text')
ddext.returns('is_correct', 'boolean')
def run(doc_id, sent_id_1, mention_id_1, wordidxs_1, words_1, entity_1, mtype_1, correct_1, sent_id_2, mention_id_2, wordidxs_2, words_2, entity_2, mtype_2, correct_2):
if 'pos_pairs' in SD:
pos_pairs = SD['pos_pairs']
else:
import os
APP_HOME = os.environ['DD_GENOMICS_HOME']
pos_pairs = set()
gpheno = [x.strip().split('\t') for x in open('%s/onto/data/hpo_phenotype_genes.tsv' % APP_HOME)]
gdisease = [x.strip().split('\t') for x in open('%s/onto/data/hpo_disease_genes.tsv' % APP_HOME)]
for pheno, gene in gpheno + gdisease:
pos_pairs.add((gene, pheno))
SD['pos_pairs'] = pos_pairs
rid = '%s_%s_g%s_p%s' % (doc_id, sent_id_1,
'%d:%d' % (wordidxs_1[0], wordidxs_1[-1]),
'%d:%d' % (wordidxs_2[0], wordidxs_2[-1]),
)
truth = None
if correct_1 and correct_2:
gene = entity_1
for pheno in entity_2.split()[0].split('|'):
if (gene, pheno) in pos_pairs:
truth = True
elif correct_1 is False or correct_2 is False:
truth = False
yield (doc_id,
sent_id_1,
sent_id_2,
rid,
None,
mention_id_1,
mention_id_2,
wordidxs_1,
wordidxs_2,
words_1,
words_2,
entity_1,
entity_2,
truth
)
|
dd-genomics-master
|
archived/v1/code/gene_pheno_pairs.py
|
#! /usr/bin/env python3
#
# This script takes approved symbols, alternate symbols, and approved long names
# from the three dictionaries of genes we currently have, and tries to obtain a
# single dictionary that contains the union of the information available.
#
# The output is a TSV file where the first column is the approved symbols, the
# second column is a list of alternate symbols (separated by '|'), and the
# third is a list of possible long names (separated by '|').
#
import sys
from helper.easierlife import BASE_DIR
HUGO_alternate_symbolS_FILE = BASE_DIR + "/dicts/hugo_synonyms.tsv"
HGNC_APPROVED_NAMES_FILE = BASE_DIR + "/dicts/HGNC_approved_names.txt"
GENES_PHARM_FILE = BASE_DIR + "/dicts/genes_pharm.tsv"
alternate_symbols = dict()
alternate_symbols_inverted = dict()
long_names = dict()
with open(HUGO_alternate_symbolS_FILE, 'rt') as hugo_file:
for line in hugo_file:
tokens = line.rstrip().split("\t")
symbol = tokens[0]
if symbol not in alternate_symbols:
alternate_symbols[symbol] = set()
alternate_symbols[symbol].add(symbol)
alternate_symbols_inverted[symbol] = set([symbol,])
if len(tokens) > 1:
for alternate_symbol in tokens[1].split(","):
if alternate_symbol not in alternate_symbols:
alternate_symbols[alternate_symbol] = set()
alternate_symbols[alternate_symbol].add(symbol)
alternate_symbols_inverted[symbol].add(alternate_symbol)
long_names[symbol] = set()
with open(HGNC_APPROVED_NAMES_FILE, 'rt') as hgnc_file:
for line in hgnc_file:
tokens = line.rstrip().split("\t")
symbol = tokens[0]
if symbol.endswith("withdrawn"):
# TODO XXX (Matteo): or should we take care of withdrawn symbols?
continue
#sys.stderr.write("symbol:{}\n".format(symbol))
hgnc_alternate_symbols = set([x.strip() for x in tokens[1].split(",")])
if symbol in alternate_symbols and symbol in alternate_symbols[symbol]:
# the symbol is a 'main' symbol from hugo, use it as it is.
pass
elif symbol in alternate_symbols and symbol not in alternate_symbols[symbol]:
# the symbol is not a main symbol from hugo, use one of its main
# symbols (the first when the set is converted to list) as main symbol.
# XXX (Matteo) There's no reason to choose the "first", we could
# choose any.
new_symbol = list(alternate_symbols[symbol])[0]
hgnc_alternate_symbols.discard(new_symbol)
hgnc_alternate_symbols.add(symbol)
symbol = new_symbol
elif symbol not in alternate_symbols:
# the symbol did not appear at all in hugo
found_new_symbol = False
for candidate in hgnc_alternate_symbols:
if candidate in alternate_symbols and candidate in alternate_symbols[candidate]:
# we found a symbol that is an alternate symbol in hgnc and
# a main symbol in hugo. Elect it as main symbol for this entry
new_symbol = candidate
hgnc_alternate_symbols.discard(new_symbol)
hgnc_alternate_symbols.add(symbol)
symbol = new_symbol
found_new_symbol = True
break
if not found_new_symbol:
for candidate in hgnc_alternate_symbols:
if candidate in alternate_symbols and \
candidate not in alternate_symbols[candidate]:
# we found a symbol that is an alternate symbol in hgnc
# and an alternate symbol in hugo. Elect the 'first' main
# symbol for this candidate as main symbol for this
# entry
new_symbol = list(alternate_symbols[candidate])[0]
hgnc_alternate_symbols.discard(new_symbol)
hgnc_alternate_symbols.add(symbol)
symbol = new_symbol
found_new_symbol = True
break
if not found_new_symbol:
# the symbol is not in hugo at all and none of its alternate in
# hgnc appears in hugo either. This is a completely new symbol
alternate_symbols[symbol] = set([symbol,])
alternate_symbols_inverted[symbol] = set([symbol,])
long_names[symbol] = set()
else:
sys.stderr.write("WE SHOULDN'T BE HERE\n")
for alternate_symbol in hgnc_alternate_symbols:
if len(alternate_symbol) == 0:
continue
if alternate_symbol not in alternate_symbols:
alternate_symbols[alternate_symbol] = set([symbol,])
alternate_symbols_inverted[symbol].add(alternate_symbol)
# TODO XXX (Matteo) the format of the long names in hgnc is weird
long_names[symbol].add(tokens[2])
with open(GENES_PHARM_FILE, 'rt') as pharm_file:
## skip header
pharm_file.readline()
for line in pharm_file:
tokens = line.rstrip().split("\t")
symbol = tokens[4]
if symbol.endswith("withdrawn"):
# TODO XXX (Matteo): or should we take care of withdrawn symbols?
continue
pharm_alternate_symbols_tokens = tokens[6].split("\"")
pharm_alternate_symbols = set()
for token in pharm_alternate_symbols_tokens:
if token != "," and len(token) > 0:
pharm_alternate_symbols.add(token)
if symbol in alternate_symbols and symbol in alternate_symbols[symbol]:
# the symbol is a 'main' symbol from hugo/hgnc, use it as it is.
pass
elif symbol in alternate_symbols and symbol not in alternate_symbols[symbol]:
# the symbol is not a main symbol from hugo/hgnc, use one of its main
# symbols (the first when the set is converted to list) as main symbol.
# XXX (Matteo) There's no reason to choose the "first", we could
# choose any.
new_symbol = list(alternate_symbols[symbol])[0]
pharm_alternate_symbols.discard(new_symbol)
pharm_alternate_symbols.add(symbol)
symbol = new_symbol
elif symbol not in alternate_symbols:
# the symbol did not appear at all in hugo/hgnc
found_new_symbol = False
for candidate in hgnc_alternate_symbols:
if candidate in alternate_symbols and candidate in alternate_symbols[candidate]:
# we found a symbol that is an alternate symbol in pharm and
# a main symbol in hugo/hgnc. Elect it as main symbol for this entry
new_symbol = candidate
hgnc_alternate_symbols.discard(new_symbol)
hgnc_alternate_symbols.add(symbol)
symbol = new_symbol
found_new_symbol = True
break
if not found_new_symbol:
for candidate in pharm_alternate_symbols:
if candidate in alternate_symbols and \
candidate not in alternate_symbols[candidate]:
# we found a symbol that is an alternate symbol in
# pharm and an alternate symbol in hugo/hgnc. Elect the
# 'first' main symbol for this candidate as main symbol
# for this entry
new_symbol = list(alternate_symbols[candidate])[0]
pharm_alternate_symbols.discard(new_symbol)
pharm_alternate_symbols.add(symbol)
symbol = new_symbol
found_new_symbol = True
break
if not found_new_symbol:
# the symbol is not in hugo/hgnc at all and none of its alternate in
# pharm appears in hugo/hgnc either. This is a completely new symbol
alternate_symbols[symbol] = set([symbol,])
alternate_symbols_inverted[symbol] = set([symbol,])
long_names[symbol] = set()
else:
sys.stderr.write("WE SHOULDN'T BE HERE\n")
for alternate_symbol in pharm_alternate_symbols:
if len(alternate_symbol) == 0:
continue
if alternate_symbol not in alternate_symbols:
alternate_symbols[alternate_symbol] = set([symbol,])
alternate_symbols_inverted[symbol].add(alternate_symbol)
name = tokens[3]
long_names[symbol].add(name)
pharm_alternate_names_tokens = tokens[5].split("\"")
for token in pharm_alternate_names_tokens:
if token != "," and len(token) > 0 and token not in long_names[symbol]:
long_names[symbol].add(token)
for symbol in alternate_symbols:
if len(alternate_symbols[symbol]) > 1:
sys.stderr.write("WARNING: duplicate symbol {}: present as (alternate) symbol of {}\n".format(symbol, ", ".join(alternate_symbols[symbol])))
for symbol in alternate_symbols_inverted:
assert symbol in long_names
alternate_symbols_inverted[symbol].discard(symbol)
print("{}\t{}\t{}".format(symbol,
"|".join(alternate_symbols_inverted[symbol]),
"|".join(long_names[symbol])))
|
dd-genomics-master
|
archived/v0/dicts/merge_gene_dicts.py
|
#! /usr/bin/env python3
#
# Look for acronyms defined in a document that look like gene symbols
import fileinput
from dstruct.Sentence import Sentence
from helper.dictionaries import load_dict
from helper.easierlife import get_dict_from_TSVline, list2TSVarray, no_op, \
TSVstring2list
# Return acronyms from sentence
def extract(sentence):
acronyms = []
# First method: Look for a sentence that starts with "Abbreviations"
if len(sentence.words) > 2 and \
sentence.words[0].word.casefold() == "abbreviations" and \
(sentence.words[1].word.casefold() == ":" or
sentence.words[1].word.casefold() == "used"):
words = [x.word for x in sentence.words]
index = 2
while index < len(words):
acronym = dict()
acronym["acronym"] = words[index]
# There are a lot of typos and different separators used in the
# text, this is obviously a best effort ...
try:
comma_index = words.index(",", index + 1)
except:
comma_index = len(words)
try:
semicolon_index = words.index(";", index + 1)
except:
semicolon_index = len(words)
try:
colon_index = words.index(":", index + 1)
except:
colon_index = len(words)
definition_start = min(
[comma_index, semicolon_index, colon_index]) + 1
if definition_start > len(words):
definition_start = index + 1
try:
definition_end = words.index(";", definition_start + 1)
except:
if words[-1] == ".":
definition_end = len(words) - 1
else:
definition_end = len(words)
definition = " ".join(words[definition_start:definition_end])
if words[index] not in merged_genes_dict or \
words[index] in inverted_long_names:
index = definition_end + 1
continue
# If we didn't find a definition, give up
if definition.strip() == "":
index = definition_end + 1
continue
acronym["doc_id"] = sentence.doc_id
acronym["sent_id"] = sentence.sent_id
acronym["word_idx"] = sentence.words[index].in_sent_idx
acronym["definition"] = definition
acronyms.append(acronym)
index = definition_end + 1
else:
# Second method: find 'A Better Example (ABE)' type of definitions.
# Skip first and last word of sentence, to allow for "(" and ")".
for word in sentence.words[1:-1]:
acronym = None
# Look for definition only if
# - this word is in the genes dictionary AND
# - is uppercase AND
# - it only contains letters AND
# - it has length at least 2 AND
# - it comes between "(" and ")" or "(" and ";" # or "(" # and ","
if word.word in merged_genes_dict and \
word.word not in inverted_long_names and \
word.word.isupper() and word.word.isalpha() and \
len(word.word) >= 2 and \
((sentence.words[word.in_sent_idx - 1].word == "(" and
sentence.words[word.in_sent_idx + 1].word in [
")", ";" ",", "]"]) or
(sentence.words[word.in_sent_idx - 1].word == "[" and
sentence.words[word.in_sent_idx + 1].word == "]")):
word_idx = word.in_sent_idx
window_size = len(word.word)
# Look for a sequence of words coming before this one whose
# initials would create this acronym
start_idx = 0
while start_idx + window_size - 1 < word_idx:
window_words = sentence.words[start_idx:(start_idx +
window_size)]
is_definition = True
for window_index in range(window_size):
if window_words[window_index].word[0].lower() != \
word.word[window_index].lower():
is_definition = False
break
definition = " ".join([w.word for w in window_words])
# Only consider this acronym if the definition is valid
if is_definition:
acronym = dict()
acronym["acronym"] = word.word
acronym["definition"] = definition
acronyms.append(acronym)
break
start_idx += 1
return acronyms
# Load the genes dictionary
merged_genes_dict = load_dict("merged_genes")
inverted_long_names = load_dict("inverted_long_names")
if __name__ == "__main__":
# Process the input
with fileinput.input() as input_files:
for line in input_files:
# Parse the TSV line
line_dict = get_dict_from_TSVline(
line,
["doc_id", "sent_ids", "wordidxss", "wordss", "posess",
"nerss", "lemmass", "dep_pathss", "dep_parentss",
"bounding_boxess"],
[no_op, lambda x: TSVstring2list(x, int),
lambda x: TSVstring2list(x,sep='!~!'),
lambda x: TSVstring2list(x,sep='!~!'),
lambda x: TSVstring2list(x,sep='!~!'),
lambda x: TSVstring2list(x,sep='!~!'),
lambda x: TSVstring2list(x,sep='!~!'),
lambda x: TSVstring2list(x,sep='!~!'),
lambda x: TSVstring2list(x,sep='!~!'),
lambda x: TSVstring2list(x,sep='!~!')])
# Acronyms defined in the document
acronyms = dict()
for idx in range(len(line_dict["sent_ids"])):
wordidxs = TSVstring2list(line_dict["wordidxss"][idx], int)
words = TSVstring2list(line_dict["wordss"][idx])
poses = TSVstring2list(line_dict["posess"][idx])
ners = TSVstring2list(line_dict["nerss"][idx])
lemmas = TSVstring2list(line_dict["lemmass"][idx])
dep_paths = TSVstring2list(line_dict["dep_pathss"][idx])
dep_parents = TSVstring2list(line_dict["dep_parentss"][idx],
int)
bounding_boxes = TSVstring2list(line_dict["bounding_boxess"][idx])
# Create the Sentence object
sentence = Sentence(
line_dict["doc_id"], line_dict["sent_ids"][idx], wordidxs,
words, poses, ners, lemmas, dep_paths, dep_parents,
bounding_boxes)
# Extract the acronyms from the sentence
sen_acronyms = extract(sentence)
for acronym in sen_acronyms:
if acronym["acronym"] not in acronyms:
acronyms[acronym["acronym"]] = set()
acronyms[acronym["acronym"]].add(acronym["definition"])
# Classify the acronyms
for acronym in acronyms:
contains_kw = False
is_correct = None
for definition in acronyms[acronym]:
# If the definition is in the gene dictionary, supervise as
# correct
if definition in merged_genes_dict:
is_correct = True
break
else:
# Check if the definition contains some keywords that
# make us suspect that it is probably a gene/protein.
# This list is incomplete, and it would be good to add
# to it.
if contains_kw:
continue
for word in definition.split():
if word.endswith("ase") and len(word) > 5:
contains_kw = True
break
if " gene" in definition or \
"protein" in definition or \
"factor" in definition or \
"ligand" in definition or \
"enzyme" in definition or \
"receptor" in definition or \
"pseudogene" in definition:
contains_kw = True
# If no significant keyword in any definition, supervise as not
# correct
if not contains_kw and not is_correct:
is_correct = False
is_correct_str = "\\N"
if is_correct is not None:
is_correct_str = is_correct.__repr__()
print("\t".join(
(line_dict["doc_id"], acronym,
list2TSVarray(list(acronyms[acronym]), quote=True),
is_correct_str)))
|
dd-genomics-master
|
archived/v0/code/ext_gene_find_acronyms.py
|
#! /usr/bin/env python3
#
# Extract, add features to, and supervise mentions extracted from geneRifs.
#
import fileinput
from dstruct.Sentence import Sentence
from extract_gene_mentions import extract
from helper.easierlife import get_dict_from_TSVline, TSVstring2list, no_op
from helper.dictionaries import load_dict
if __name__ == "__main__":
# Load the merged genes dictionary
merged_genes_dict = load_dict("merged_genes")
# Process the input
with fileinput.input() as input_files:
for line in input_files:
# Parse the TSV line
line_dict = get_dict_from_TSVline(
line, ["doc_id", "sent_id", "wordidxs", "words", "gene"],
[no_op, int, lambda x: TSVstring2list(x, int), TSVstring2list,
no_op])
# Create the Sentence object
null_list = [None, ] * len(line_dict["wordidxs"])
sentence = Sentence(
line_dict["doc_id"], line_dict["sent_id"],
line_dict["wordidxs"], line_dict["words"], null_list,
null_list, null_list, null_list, null_list, null_list)
# This is the 'labelled' gene that we know is in the sentence
gene = line_dict["gene"]
# Get the main symbol (or list of symbols) for the labelled gene
if gene in merged_genes_dict:
gene = merged_genes_dict[gene]
else:
gene = [gene, ]
# Skip sentences that are "( GENE )", as they give no info about
# anything.
if (sentence.words[0].word == "-LRB-" and
sentence.words[-1].word == "-RRB-") or \
(sentence.words[0].word == "-LSB-" and
sentence.words[-1].word == "-RSB-"):
continue
# Extract mentions from sentence. This also adds the features
mentions = extract(sentence)
# Find the candidate(s) containing the "labelled" gene either in
# the words or in the entity, and supervise as True and print.
not_main_mentions = []
for mention in mentions:
mention.type = "GENERIFS"
for g in gene:
# If we find the labelled symbol in the words of the
# candidate, supervise as true and print
if g in mention.words[0].word or \
g in mention.entity.split("|"):
mention.is_correct = True
print(mention.tsv_dump())
break
|
dd-genomics-master
|
archived/v0/code/ext_geneRifs_candidates.py
|
#! /usr/bin/env python3
#
# Map phenotype abnormalities entities to mentions
import sys
from nltk.stem.snowball import SnowballStemmer
from helper.dictionaries import load_dict
ORDINALS = frozenset(
["1st", "2nd", "3rd", "4th" "5th", "6th" "7th", "8th", "9th", "first",
"second", "third", "fourth", "fifth", "sixth", "seventh", "eighth"
"ninth"])
def main():
# Load the dictionaries we need
stopwords_dict = load_dict("stopwords")
hpoterm_phenotype_abnormalities = load_dict(
"hpoterm_phenotype_abnormalities")
# Load the stemmer from NLTK
stemmer = SnowballStemmer("english")
if len(sys.argv) != 2:
sys.stderr.write("USAGE: {} DICT\n".format(sys.argv[0]))
sys.exit(1)
with open(sys.argv[1], 'rt') as dict_file:
for line in dict_file:
# Skip empty lines
if line.strip() == "":
continue
hpo_id, name, definition = line.strip().split("\t")
# Skip if this is not a phenotypic abnormality
if hpo_id not in hpoterm_phenotype_abnormalities:
continue
tokens = name.split()
if len(tokens) == 1:
name_stems = [tokens[0].casefold(), ]
else:
# Compute the stems of the name
name_stems = set()
for word in tokens:
# Remove parenthesis and commas and colons
if word[0] == "(":
word = word[1:]
if word[-1] == ")":
word = word[:-1]
if word[-1] == ",":
word = word[:-1]
if word[-1] == ":":
word = word[:-1]
# Only process non stop-words AND single letters
if (word.casefold() not in stopwords_dict and word not in
ORDINALS) or len(word) == 1:
# split words that contain a "/"
if word.find("/") != - 1:
for part in word.split("/"):
name_stems.add(stemmer.stem(part))
else:
name_stems.add(stemmer.stem(word))
print("\t".join([hpo_id, name, "|".join(name_stems)]))
if __name__ == "__main__":
sys.exit(main())
|
dd-genomics-master
|
archived/v0/code/hpoterms2mentions.py
|
#! /usr/bin/env python3
#
# Extract gene mention candidates and perform distant supervision
#
import fileinput
from dstruct.Mention import Mention
from dstruct.Sentence import Sentence
from helper.dictionaries import load_dict
from helper.easierlife import get_all_phrases_in_sentence, \
get_dict_from_TSVline, TSVstring2list, no_op
DOC_ELEMENTS = frozenset(
["figure", "table", "figures", "tables", "fig", "fig.", "figs", "figs.",
"file", "movie"])
INDIVIDUALS = frozenset(["individual", "individuals", "patient"])
TYPES = frozenset(["group", "type", "class", "method"])
# Load the dictionaries that we need
merged_genes_dict = load_dict("merged_genes")
inverted_long_names = load_dict("inverted_long_names")
hpoterms_with_gene = load_dict("hpoterms_with_gene")
english_dict = load_dict("english")
# Max mention length. We won't look at subsentences longer than this.
max_mention_length = 0
for key in merged_genes_dict:
length = len(key.split())
if length > max_mention_length:
max_mention_length = length
# doubling to take into account commas and who knows what
max_mention_length *= 2
# Supervise the candidates.
def supervise(mentions, sentence):
phrase = " ".join([x.word for x in sentence.words])
new_mentions = []
for mention in mentions:
new_mentions.append(mention)
if mention.is_correct is not None:
continue
# The candidate is a long name.
if " ".join([word.word for word in mention.words]) in \
inverted_long_names:
mention.is_correct = True
mention.type = "GENE_SUP_long"
continue
# The candidate is a MIM entry
if mention.words[0].word == "MIM":
mention_word_idx = mention.words[0].in_sent_idx
if mention_word_idx < len(sentence.words) - 1:
next_word = sentence.words[mention_word_idx + 1].word
if next_word.casefold() in ["no", "no.", "#", ":"] and \
mention_word_idx + 2 < len(sentence.words):
next_word = sentence.words[mention_word_idx + 2].word
try:
int(next_word)
mention.is_correct = False
mention.type = "GENE_SUP_MIM"
continue
except ValueError:
pass
# The candidate is an entry in Gene Ontology
if len(mention.words) == 1 and mention.words[0].word == "GO":
try:
if sentence.words[mention.words[0].in_sent_idx + 1][0] == ":":
mention.is_correct = False
mention.type = "GENE_SUP_go"
except:
pass
continue
# The phrase starts with words that are indicative of the candidate not
# being a mention of a gene
# We add a feature for this, as it is a context property
if phrase.startswith("Performed the experiments :") or \
phrase.startswith("Wrote the paper :") or \
phrase.startswith("W'rote the paper :") or \
phrase.startswith("Wlrote the paper") or \
phrase.startswith("Contributed reagents") or \
phrase.startswith("Analyzed the data :") or \
phrase.casefold().startswith("address"):
# An unsupervised copy with the special feature
# unsuper_enriched = Mention(
# "GENE_dontsup", mention.entity, mention.words)
# unsuper_enriched.features = mention.features.copy()
# unsuper_enriched.add_feature("IN_CONTRIB_PHRASE")
# new_mentions.append(unsuper_enriched)
# This candidate contain only the 'special' feature.
# super_spec = Mention(
# "GENE_SUP_contr_2", mention.entity, mention.words)
# super_spec.is_correct = False
# super_spec.add_feature("IN_CONTRIB_PHRASE")
# new_mentions.append(super_spec)
# Set is_correct and type.
mention.is_correct = False
mention.type = "GENE_SUP_contr_1"
continue
# Index of the word on the left
idx = mention.wordidxs[0] - 1
if idx >= 0:
# The candidate is preceded by a "%" (it's probably a quantity)
if sentence.words[idx].word == "%":
mention.is_correct = False
mention.type = "GENE_SUP_%"
continue
# The candidate comes after a "document element" (e.g., table, or
# figure)
if sentence.words[idx].word.casefold() in DOC_ELEMENTS:
mention.is_correct = False
mention.type = "GENE_SUP_doc"
continue
# The candidate comes after an "individual" word (e.g.,
# "individual")
if sentence.words[idx].word.casefold() in INDIVIDUALS and \
not mention.words[0].word.isalpha() and \
not len(mention.words[0].word) > 4:
mention.is_correct = False
mention.type = "GENE_SUP_indiv"
continue
# The candidate comes after a "type" word, and it is made only of
# the letters "I" and "V"
if sentence.words[idx].lemma.casefold() in TYPES and \
set(mention.words[0].word).issubset(set(["I", "V"])):
mention.is_correct = False
mention.type = "GENE_SUP_type"
continue
# Index of the word on the right
idx = mention.wordidxs[-1] + 1
if idx < len(sentence.words):
# The candidate is followed by a "=" (it's probably a quantity)
if sentence.words[idx].word == "=":
mention.is_correct = False
mention.type = "GENE_SUP_="
continue
# The candidate is followed by a ":" and the word after it is a
# number (it's probably a quantity)
if sentence.words[idx].word == ":":
try:
float(sentence.words[idx + 1].word)
mention.is_correct = False
mention.type = "GENE_SUP_:"
except: # both ValueError and IndexError
pass
continue
# The candidate comes before "et"
if sentence.words[idx].word == "et":
mention.is_correct = False
mention.type = "GENE_SUP_et"
continue
# The candidate is a DNA triplet
# We check this by looking at whether the word before or after is also
# a DNA triplet.
if len(mention.words) == 1 and len(mention.words[0].word) == 3 and \
set(mention.words[0].word) <= set("ACGT"):
done = False
idx = mention.wordidxs[0] - 1
if idx > 0:
if set(sentence.words[idx].word) <= set("ACGT"):
mention.is_correct = False
mention.type = "GENE_SUP_dna"
continue
idx = mention.wordidxs[-1] + 1
if not done and idx < len(sentence.words):
if set(sentence.words[idx].word) <= set("ACGT"):
mention.is_correct = False
mention.type = "GENE_SUP_dna"
continue
# If it's "II", it's most probably wrong.
if mention.words[0].word == "II":
mention.is_correct = False
mention.type = "GENE_SUP_ii"
continue
# The candidate comes after an organization, or a location, or a
# person. We skip commas as they may trick us.
comes_after = None
loc_idx = mention.wordidxs[0] - 1
while loc_idx >= 0 and sentence.words[loc_idx].lemma == ",":
loc_idx -= 1
if loc_idx >= 0 and \
sentence.words[loc_idx].ner in \
["ORGANIZATION", "LOCATION", "PERSON"] and \
sentence.words[loc_idx].word not in merged_genes_dict:
comes_after = sentence.words[loc_idx].ner
# The candidate comes before an organization, or a location, or a
# person. We skip commas, as they may trick us.
comes_before = None
loc_idx = mention.wordidxs[-1] + 1
while loc_idx < len(sentence.words) and \
sentence.words[loc_idx].lemma == ",":
loc_idx += 1
if loc_idx < len(sentence.words) and sentence.words[loc_idx].ner in \
["ORGANIZATION", "LOCATION", "PERSON"] and \
sentence.words[loc_idx].word not in merged_genes_dict:
comes_before = sentence.words[loc_idx].ner
# Not correct if it's most probably a person name.
if comes_before and comes_after:
mention.is_correct = False
mention.type = "GENE_SUP_name"
continue
# Comes after person and before "," or ":", so it's probably a person
# name
if comes_after == "PERSON" and \
mention.words[-1].in_sent_idx + 1 < len(sentence.words) and \
sentence.words[mention.words[-1].in_sent_idx + 1].word \
in [",", ":"]:
mention.is_correct = False
mention.type = "GENE_SUP_name2"
continue
if comes_after == "PERSON" and mention.words[0].ner == "PERSON":
mention.is_correct = False
mention.type = "GENE_SUP_name3"
continue
# Is a location and comes before a location so it's probably wrong
if comes_before == "LOCATION" and mention.words[0].ner == "LOCATION":
mention.is_correct = False
mention.type = "GENE_SUP_loc"
continue
return new_mentions
# Return a list of mention candidates extracted from the sentence
def extract(sentence):
mentions = []
# Skip the sentence if there are no English words in the sentence
no_english_words = True
for word in sentence.words:
if len(word.word) > 2 and \
(word.word in english_dict or
word.word.casefold() in english_dict):
no_english_words = False
break
if no_english_words:
return [] # Stop iteration
sentence_is_upper = False
if " ".join([x.word for x in sentence.words]).isupper():
sentence_is_upper = True
# The following set keeps a list of indexes we already looked at and which
# contained a mention
history = set()
words = sentence.words
# Scan all subsequences of the sentence of length up to max_mention_length
for start, end in get_all_phrases_in_sentence(sentence,
max_mention_length):
if start in history or end in history:
continue
phrase = " ".join([word.word for word in words[start:end]])
if sentence_is_upper: # XXX This may not be a great idea...
phrase = phrase.casefold()
mention = None
# If the phrase is a hpoterm name containing a gene, then it is a
# mention candidate to supervise as negative
if phrase in hpoterms_with_gene:
mention = Mention("GENE_SUP_HPO", phrase, words[start:end])
mention.is_correct = False
mentions.append(mention)
for i in range(start, end):
history.add(i)
# If the phrase is in the gene dictionary, then is a mention candidate
if len(phrase) > 1 and phrase in merged_genes_dict:
# The entity is a list of all the main symbols that could have the
# phrase as symbol. They're separated by "|".
mention = Mention("GENE",
"|".join(merged_genes_dict[phrase]),
words[start:end])
# Add mention to the list
mentions.append(mention)
# Add indexes to history so that they are not used for another
# mention
for i in range(start, end):
history.add(i)
return mentions
if __name__ == "__main__":
# Process the input
with fileinput.input() as input_files:
for line in input_files:
# Parse the TSV line
line_dict = get_dict_from_TSVline(
line,
["doc_id", "sent_id", "wordidxs", "words", "poses", "ners",
"lemmas"],
[no_op, int, lambda x: TSVstring2list(x, int), TSVstring2list,
TSVstring2list, TSVstring2list, TSVstring2list])
# Create the sentence object
null_list = [None, ] * len(line_dict["wordidxs"])
sentence = Sentence(
line_dict["doc_id"], line_dict["sent_id"],
line_dict["wordidxs"], line_dict["words"], line_dict["poses"],
line_dict["ners"], line_dict["lemmas"], null_list, null_list,
null_list)
# Skip weird sentences
if sentence.is_weird():
continue
# Get list of mentions candidates in this sentence
mentions = extract(sentence)
# Supervise them
new_mentions = supervise(mentions, sentence)
# Print!
for mention in new_mentions:
print(mention.tsv_dump())
|
dd-genomics-master
|
archived/v0/code/ext_gene_candidates.py
|
dd-genomics-master
|
archived/v0/code/__init__.py
|
|
#! /usr/bin/env python3
#
# Takes one directory containing parser output files and, for each file in that
# directory, emits TSV lines that can be loaded # in the 'sentences' table
# using the PostgreSQL COPY FROM command.
#
# Parser output files contain "blocks" which are separated by blank lines. Each
# "block" is a sentence. Each sentence spans over one or more lines. Each line
# represents a # "word" in the sentence (it can be punctuation, a symbol or
# anything). Each word line has *nine* fields:
# 1: index of the word in the sentence, starting from 1.
# 2: the text of the word as it appears in the document
# 3: Part of Speech (POS) tag of the word (see
# http://www.computing.dcu.ie/~acahill/tagset.html for a list)
# 4: Named Entity Recognition (NER) tag of the word
# 5: the lemmatized word
# 6: the label on the edge in dependency path between the parent of this word
# and the word
# 7: the word index of the *parent* of this word in the dependency path. 0
# means root
# 8: the sentence ID, unique in the document
# 9: the bounding box containing this word in the PDF document. The format is
# "[pXXXlXXXtXXXrXXXbXXX]," for page, left, top, right, bottom.
# Alternatively, the bounding box may have format
# "[pXXXlXXXtXXXrXXXbXXX, pYYYlYYYtYYYrYYYbYYY]" when the word is split
# between two lines
# An example line is:
# 1 Genome NNP O Genome nn 3 SENT_1 [p1l1669t172r1943b234],
#
# This script outputs TSV lines or JSON objects, one per sentence.
#
# Each TSV line has nine columns. The text in the columns is formatted so that
# the output can be given in input to the PostgreSQL 'COPY FROM' command. The
# columns are the following (between parentheses is the PostgreSQL type for the column):
# 1: document ID (text)
# 2: sentence ID (int)
# 3: word indexes (int[]). They now start from 0, like an array.
# 4: words, (text[])
# 5: POSes (text[])
# 6: NERs (text[])
# 7: dependency paths (text[])
# 8: dependency parent (int[]) -1 means root, so that each of them is an array index
# 9: bounding boxes (text[])
#
# This script can be spawn subprocesses to increase parallelism, which can be
# useful when having to convert a lot of files.
#
import json
import os
import os.path
import sys
from multiprocessing import Process
from helper.easierlife import list2TSVarray
def process_files(proc_id, input_files, input_dir, output_dir, mode):
with open(os.path.realpath("{}/sentences-{}.{}".format(output_dir, proc_id, mode)), 'wt') as out_file:
for filename in input_files:
# Docid assumed to be the filename.
docid = filename
with open(os.path.realpath(input_dir + "/" + filename), 'rt') as curr_file:
atEOF = False
# Check if the file is empty (we are at End of File)
curr_pos = curr_file.tell()
curr_file.read(1)
new_pos = curr_file.tell()
if new_pos == curr_pos:
atEOF = True
else:
curr_file.seek(curr_pos)
# One iteration of the following loop corresponds to one sentence
while not atEOF:
sent_id = -1
wordidxs = []
words = []
poses = []
ners = []
lemmas = []
dep_paths = []
dep_parents = []
bounding_boxes = []
curr_line = curr_file.readline().strip()
# Sentences are separated by empty lines in the parser output file
while curr_line != "":
tokens = curr_line.split("\t")
if len(tokens) != 9:
sys.stderr.write("ERROR: malformed line (wrong number of fields): {}\n".format(curr_line))
return 1
word_idx, word, pos, ner, lemma, dep_path, dep_parent, word_sent_id, bounding_box = tokens
# Normalize sentence id
word_sent_id = int(word_sent_id.replace("SENT_", ""))
# assign sentence id if this is the first word of the sentence
if sent_id == -1:
sent_id = word_sent_id
# sanity check for word_sent_id
elif sent_id != word_sent_id:
sys.stderr.write("ERROR: found word with mismatching sent_id w.r.t. sentence: {} != {}\n".format(word_sent_id, sent_id))
return 1
# Normalize bounding box, stripping initial '[' and
# final '],' and concatenating components
bounding_box = bounding_box[1:-2]
bounding_box = bounding_box.replace(", ", "-")
# Append contents of this line to the sentence arrays
wordidxs.append(int(word_idx) - 1) # Start from 0
words.append(word)
poses.append(pos)
ners.append(ner)
lemmas.append(lemma)
dep_paths.append(dep_path)
# Now "-1" means root and the rest correspond to array indices
dep_parents.append(int(dep_parent) - 1)
bounding_boxes.append(bounding_box)
# Read the next line
curr_line = curr_file.readline().strip()
# Write sentence to output
if mode == "tsv":
out_file.write("{}\n".format("\t".join([docid, str(sent_id),
list2TSVarray(wordidxs), list2TSVarray(words,
quote=True), list2TSVarray(poses, quote=True),
list2TSVarray(ners), list2TSVarray(lemmas, quote=True),
list2TSVarray(dep_paths, quote=True),
list2TSVarray(dep_parents),
list2TSVarray(bounding_boxes)])))
elif mode == "json":
out_file.write("{}\n".format(json.dumps({ "doc_id": docid, "sent_id": sent_id,
"wordidxs": wordidxs, "words": words, "poses": poses,
"ners": ners, "lemmas": lemmas, "dep_paths": dep_paths,
"dep_parents": dep_parents, "bounding_boxes":
bounding_boxes})))
# Check if we are at End of File
curr_pos = curr_file.tell()
curr_file.read(1)
new_pos = curr_file.tell()
if new_pos == curr_pos:
atEOF = True
else:
curr_file.seek(curr_pos)
# Process the input files. Output can be either tsv or json
def main():
script_name = os.path.basename(__file__)
# Check
if len(sys.argv) != 5:
sys.stderr.write("USAGE: {} MODE PARALLELISM INPUTDIR OUTPUTDIR\n".format(script_name))
return 1
parser_files = os.listdir(os.path.abspath(os.path.realpath(sys.argv[3])))
parallelism = int(sys.argv[2])
mode = sys.argv[1]
for i in range(parallelism):
files = []
for j in range(len(parser_files)):
if j % parallelism == i:
files.append(parser_files[j])
p = Process(target = process_files, args = (i, files,
os.path.abspath(os.path.realpath(sys.argv[3])),
os.path.abspath(os.path.realpath(sys.argv[4])), mode))
p.start()
return 0
if __name__ == "__main__":
sys.exit(main())
|
dd-genomics-master
|
archived/v0/code/parser2sentences.py
|
#! /usr/bin/env python3
#
# Takes as first and only argument a dump obtained using get_dump.sql and
# remove the entries where the gene symbol can be used to express multiple
# genes.
import sys
if len(sys.argv) != 2:
sys.stderr.write("USAGE: {} dump.tsv\n".format(sys.argv[0]))
sys.exit(1)
with open(sys.argv[1], 'rt') as dump:
skipped = 0
for line in dump:
tokens = line.strip().split("\t")
gene_entity = tokens[1]
if "|" not in gene_entity and "\\N" not in gene_entity:
print(line.strip())
else:
skipped += 1
sys.stderr.write("skipped: {}\n".format(skipped))
|
dd-genomics-master
|
archived/v0/code/filter_out_uncertain_genes.py
|
#! /usr/bin/env python3
#
# Extract gene mention candidates and perform distant supervision
#
import fileinput
import re
from dstruct.Sentence import Sentence
from helper.dictionaries import load_dict
from helper.easierlife import get_dict_from_TSVline, TSVstring2list, no_op, \
print_feature, BASE_DIR
import ddlib
def add_features_generic(mention_id, gene_words, sentence):
# Use the generic feature library (ONLY!)
# Load dictionaries for keywords
ddlib.load_dictionary(BASE_DIR + "/dicts/features/gene_var.tsv", "VARKW")
ddlib.load_dictionary(
BASE_DIR + "/dicts/features/gene_knock.tsv", "KNOCKKW")
ddlib.load_dictionary(
BASE_DIR + "/dicts/features/gene_amino.tsv", "AMINOKW")
ddlib.load_dictionary(
BASE_DIR + "/dicts/features/gene_antigene.tsv", "ANTIGENEKW")
ddlib.load_dictionary(BASE_DIR + "/dicts/features/gene_dna.tsv", "DNAKW")
ddlib.load_dictionary(
BASE_DIR + "/dicts/features/gene_downregulation.tsv", "DOWNREGKW")
ddlib.load_dictionary(
BASE_DIR + "/dicts/features/gene_upregulation.tsv", "UPREGKW")
ddlib.load_dictionary(
BASE_DIR + "/dicts/features/gene_tumor.tsv", "TUMORKW")
ddlib.load_dictionary(
BASE_DIR + "/dicts/features/gene_gene.tsv", "GENEKW")
ddlib.load_dictionary(
BASE_DIR + "/dicts/features/gene_expression.tsv", "EXPRESSKW")
# Create the objects used by ddlib. ddlib interface is so ugly.
obj = dict()
obj['lemma'] = []
obj['words'] = []
obj['ner'] = []
obj['pos'] = []
obj['dep_graph'] = []
for word in sentence.words:
obj['lemma'].append(word.lemma)
obj['words'].append(word.word)
obj['ner'].append(word.ner)
obj['pos'].append(word.pos)
obj['dep_graph'].append(
str(word.dep_parent + 1) + "\t" + word.dep_path + "\t" +
str(word.in_sent_idx + 1))
word_obj_list = ddlib.unpack_words(
obj, lemma='lemma', pos='pos', ner='ner', words='words',
dep_graph='dep_graph', dep_graph_parser=ddlib.dep_graph_parser_triplet)
gene_span = ddlib.get_span(gene_words[0].in_sent_idx, len(gene_words))
features = set()
for feature in ddlib.get_generic_features_mention(
word_obj_list, gene_span):
features.add(feature)
for feature in features:
print_feature(sentence.doc_id, mention_id, feature)
# Keywords that are often associated with genes
VAR_KWS = frozenset([
"acetylation", "activate", "activation", "adaptor", "agonist", "alignment",
"allele", "antagonist", "antibody", "asynonymous", "backbone", "binding",
"biomarker", "breakdown", "cell", "cleavage", "cluster", "cnv",
"coactivator", "co-activator", "complex", "dd-genotype", "DD-genotype",
"deletion", "determinant", "domain", "duplication", "dysfunction",
"effector", "enhancer", "enrichment", "enzyme", "excision", "factor",
"family", "function", "functionality", "genotype",
"growth", "haplotype", "haplotypes", "heterozygous", "hexons", "hexon",
"histone", "homologue", "homology", "homozygous" "human",
"hypermetylation", "hybridization", "induce", "inducer", "induction",
"inhibitor", "inhibition", "intron", "interaction", "isoform", "isoforms",
"kinase", "kinesin", "level", "ligand", "location", "locus",
"mammalian", "marker", "methilation", "modification", "moiety", "molecule",
"molecules", "morpheein", "motif", "mutant", "mutation",
"mutations", "nonsynonymous", "non-synonymous", "nucleotide",
"oligomerization", "oncoprotein", "pathway", "peptide",
"pharmacokinetic", "pharmacodynamic", "pharmacogenetic" "phosphorylation",
"polymorphism", "proliferation", "promoter", "protein", "receptor",
"receptors", "recruitment", "region", "regulator", "release", "repressor",
"resistance", "retention", "ribonuclease", "role", "sequence",
"sequences", "sequestration", "serum", "signaling", "SNP", "SNPs",
"staining", "sumoylation", "synonymous", "target", "T-cell", "transducer",
"translocation", "transcribe", "transcript", "transcription",
"transporter", "variant", "variation", "vivo", "vitro"
])
KNOCK_KWS = frozenset([
"knockdown", "knock-down", "knock-out", "knockout", "KO"])
AMINO_ACID_KWS = frozenset(["amino-acid", "aminoacid"])
ANTIGENE_KWS = frozenset(["antigen", "antigene", "anti-gen", "anti-gene"])
DNA_KWS = frozenset([
"cdna", "cDNA", "dna", "mrna", "mRNA", "rna", "rrna", "sirnas", "sirna",
"siRNA", "siRNAs"])
DOWNREGULATION_KWS = frozenset(["down-regulation", "downregulation"])
UPREGULATION_KWS = frozenset(["up-regulation", "upregulation"])
TUMOR_KWS = frozenset([
"tumor", "tumours", "tumour", "cancer", "carcinoma", "fibrosarcoma",
"sarcoma", "lymphoma"])
GENE_KWS = frozenset([
"gene", "oncogene", "protooncogene", "proto-oncogene", "pseudogene",
"transgene"])
COEXPRESSION_KWS = frozenset([
"expression", "overexpression", "over-expression", "co-expression",
"coexpression"])
KEYWORDS = VAR_KWS | KNOCK_KWS | AMINO_ACID_KWS | ANTIGENE_KWS | DNA_KWS | \
DOWNREGULATION_KWS | DOWNREGULATION_KWS | TUMOR_KWS | GENE_KWS | \
COEXPRESSION_KWS
# Load the dictionaries that we need
merged_genes_dict = load_dict("merged_genes")
long_names_dict = load_dict("long_names")
inverted_long_names = load_dict("inverted_long_names")
hpoterms_with_gene = load_dict("hpoterms_with_gene")
stopwords_dict = load_dict("stopwords")
# Add features to a gene mention candidate
def add_features(mention_id, mention_words, sentence):
# The verb closest to the candidate, with the path to it.
minl = 100
minp = None
minw = None
for word in mention_words:
for word2 in sentence.words:
if word2.lemma.isalpha() and re.search('^VB[A-Z]*$', word2.pos) \
and word2.lemma != 'be':
# Ignoring "be" comes from pharm (Emily)
(p, l) = sentence.get_word_dep_path(
word.in_sent_idx, word2.in_sent_idx)
if l < minl:
minl = l
minp = p
minw = word2.lemma
if minw:
print_feature(
sentence.doc_id, mention_id, 'VERB_[' + minw + ']' + minp)
# The keywords that appear in the sentence with the mention
minl = 100
minp = None
minw = None
for word in mention_words:
for word2 in sentence.words:
if word2.lemma in KEYWORDS:
(p, l) = sentence.get_word_dep_path(
word.in_sent_idx, word2.in_sent_idx)
kw = word2.lemma
if word2.lemma in KNOCK_KWS:
kw = "_KNOCKOUT"
elif word2.lemma in ANTIGENE_KWS:
kw = "_ANTIGENE"
elif word2.lemma in AMINO_ACID_KWS:
kw = "_AMINOACID"
# elif word2.lemma in DNA_KWS:
# kw = "_DNA"
elif word2.lemma in DOWNREGULATION_KWS:
kw = "_DOWNREGULATION"
elif word2.lemma in UPREGULATION_KWS:
kw = "_UPREGULATION"
# elif word2.lemma in TUMOR_KWS:
# kw = "_TUMOR"
# elif word2.lemma in GENE_KWS:
# kw = "_GENE"
# elif word2.lemma in COEXPRESSION_KWS:
# ke = "_COEXPRESSION"
if l < minl:
minl = l
minp = p
minw = kw
if len(p) < 100:
print_feature(
sentence.doc_id, mention_id,
"KEYWORD_[" + kw + "]" + p)
# Special features for the keyword on the shortest dependency path
if minw:
print_feature(
sentence.doc_id, mention_id,
'EXT_KEYWORD_MIN_[' + minw + ']' + minp)
print_feature(
sentence.doc_id, mention_id, 'KEYWORD_MIN_[' + minw + ']')
# If another gene is present in the sentence, add a feature with that gene
# and the path to it. This comes from pharm.
minl = 100
minp = None
minw = None
mention_wordidxs = []
for word in mention_words:
mention_wordidxs.append(word.in_sent_idx)
for word in mention_words:
for word2 in sentence.words:
if word2.in_sent_idx not in mention_wordidxs and \
word2.word in merged_genes_dict:
(p, l) = sentence.get_word_dep_path(
word.in_sent_idx, word2.in_sent_idx)
if l < minl:
minl = l
minp = p
minw = word2.lemma
if minw:
print_feature(
sentence.doc_id, mention_id, 'OTHER_GENE_['+minw+']' + minp)
# print_feature(sentence.doc_id, mention_id, 'OTHER_GENE_['+minw+']')
# The lemma on the left of the candidate, whatever it is
try:
left = sentence.words[mention_words[0].in_sent_idx-1].lemma
try:
float(left)
left = "_NUMBER"
except ValueError:
pass
print_feature(
sentence.doc_id, mention_id, "NGRAM_LEFT_1_[" + left + "]")
except IndexError:
pass
# The lemma on the right of the candidate, whatever it is
try:
right = sentence.words[mention_words[-1].in_sent_idx+1].lemma
try:
float(right)
right = "_NUMBER"
except ValueError:
pass
print_feature(
sentence.doc_id, mention_id, "NGRAM_RIGHT_1_[" + right + "]")
except IndexError:
pass
# We know check whether the lemma on the left and on the right are
# "special", for example a year or a gene.
# The concept of left or right is a little tricky here, as we are actually
# looking at the first word that contains only letters and is not a
# stopword.
idx = mention_words[0].in_sent_idx - 1
gene_on_left = None
gene_on_right = None
while idx >= 0 and \
((((not sentence.words[idx].lemma.isalnum() and not
sentence.words[idx] in merged_genes_dict) or
(not sentence.words[idx].word.isupper() and
sentence.words[idx].lemma in stopwords_dict)) and
not re.match("^[0-9]+(.[0-9]+)?$", sentence.words[idx].word)
and not sentence.words[idx] in merged_genes_dict) or
len(sentence.words[idx].lemma) == 1):
idx -= 1
if idx >= 0:
if sentence.words[idx].word in merged_genes_dict and \
len(sentence.words[idx].word) > 3:
gene_on_left = sentence.words[idx].word
try:
year = float(sentence.words[idx].word)
if round(year) == year and year > 1950 and year <= 2014:
print_feature(sentence.doc_id, mention_id, "IS_YEAR_LEFT")
except:
pass
# The word on the right of the mention, if present, provided it's
# alphanumeric but not a number
idx = mention_words[-1].in_sent_idx + 1
while idx < len(sentence.words) and \
((((not sentence.words[idx].lemma.isalnum() and not
sentence.words[idx] in merged_genes_dict) or
(not sentence.words[idx].word.isupper() and
sentence.words[idx].lemma in stopwords_dict)) and
not re.match("^[0-9]+(.[0-9]+)?$", sentence.words[idx].word)
and not sentence.words[idx] in merged_genes_dict) or
len(sentence.words[idx].lemma) == 1):
idx += 1
if idx < len(sentence.words):
if sentence.words[idx].word in merged_genes_dict and \
len(sentence.words[idx].word) > 3:
gene_on_right = sentence.words[idx].word
try:
year = float(sentence.words[idx].word)
if round(year) == year and year > 1950 and year <= 2014:
print_feature(sentence.doc_id, mention_id, "IS_YEAR_RIGHT")
except:
pass
if gene_on_left and gene_on_right:
print_feature(sentence.doc_id, mention_id, "IS_BETWEEN_GENES")
elif gene_on_left:
print_feature(sentence.doc_id, mention_id, "GENE_ON_LEFT")
elif gene_on_right:
print_feature(sentence.doc_id, mention_id, "GENE_ON_RIGHT")
# The candidate is a single word that appears many times (more than 4) in
# the sentence
if len(mention_words) == 1 and \
[w.word for w in sentence.words].count(mention_words[0].word) > 4:
print_feature(
sentence.doc_id, mention_id, "APPEARS_MANY_TIMES_IN_SENTENCE")
# There are many PERSONs/ORGANIZATIONs/LOCATIONs in the sentence
# for ner in ["PERSON", "ORGANIZATION", "LOCATION"]:
# if [x.ner for x in sentence.words].count(ner) > 4:
# print_feature(
# sentence.doc_id, mention_id, "MANY_{}_IN_SENTENCE".format(ner))
if __name__ == "__main__":
# Process the input
with fileinput.input() as input_files:
for line in input_files:
# Parse the TSV line
line_dict = get_dict_from_TSVline(
line, ["doc_id", "sent_id", "wordidxs", "words", "poses",
"ners", "lemmas", "dep_paths", "dep_parents",
"mention_id", "mention_wordidxs"],
[no_op, int, lambda x: TSVstring2list(x, int), TSVstring2list,
TSVstring2list, TSVstring2list, TSVstring2list,
TSVstring2list, lambda x: TSVstring2list(x, int),
no_op, lambda x: TSVstring2list(x, int)])
# Create the sentence object
null_list = [None, ] * len(line_dict["wordidxs"])
sentence = Sentence(
line_dict["doc_id"], line_dict["sent_id"],
line_dict["wordidxs"], line_dict["words"], line_dict["poses"],
line_dict["ners"], line_dict["lemmas"], line_dict["dep_paths"],
line_dict["dep_parents"], null_list)
if sentence.is_weird():
continue
mention_words = []
for mention_wordidx in line_dict["mention_wordidxs"]:
mention_words.append(sentence.words[mention_wordidx])
add_features(line_dict["mention_id"], mention_words, sentence)
# add_features_generic( line_dict["mention_id"], mention_words,
# sentence)
|
dd-genomics-master
|
archived/v0/code/ext_gene_features.py
|
#! /usr/bin/env python3
#
# Canonicalize a dump using the HPO dag
#
# Use the output of filter_out_uncertain_genes.py
import sys
from helper.dictionaries import load_dict
if len(sys.argv) != 2:
sys.stderr.write("USAGE: {} dump.tsv\n".format(sys.argv[0]))
sys.exit(1)
hpoancestors = load_dict("hpoancestors")
with open(sys.argv[1], 'rt') as dump:
for line in dump:
tokens = line.strip().split("\t")
relation_id = tokens[0]
gene_entity = tokens[1]
hpo_entity = tokens[3]
if "|" not in hpo_entity:
continue
hpo_id = hpo_entity.split("|")[0]
if hpo_id not in hpoancestors:
continue
print("{}\t{}\t{}".format(relation_id, gene_entity, hpo_entity))
for ancestor in hpoancestors[hpo_id]:
print("{}\t{}\t{}".format(relation_id, gene_entity, ancestor))
|
dd-genomics-master
|
archived/v0/code/canonicalize.py
|
#! /usr/bin/env python3
from helper.dictionaries import load_dict
if __name__ == "__main__":
merged_genes_dict = load_dict("merged_genes")
inverted_long_names = load_dict("inverted_long_names")
hpoterms_orig = load_dict("hpoterms_orig")
for hpoterm_name in hpoterms_orig:
for long_name in inverted_long_names:
if hpoterm_name in long_name.split() and \
hpoterm_name.casefold() != long_name.casefold:
print("\t".join((hpoterm_name, long_name)))
|
dd-genomics-master
|
archived/v0/code/find_hpoterms_in_genes.py
|
#! /usr/bin/env python3
import fileinput
import random
import re
from nltk.stem.snowball import SnowballStemmer
from dstruct.Mention import Mention
from dstruct.Sentence import Sentence
from helper.easierlife import get_all_phrases_in_sentence, \
get_dict_from_TSVline, TSVstring2list, no_op
from helper.dictionaries import load_dict
max_mention_length = 8 # This is somewhat arbitrary
NEG_PROB = 0.005 # Probability of generating a random negative mention
# Load the dictionaries that we need
english_dict = load_dict("english")
stopwords_dict = load_dict("stopwords")
inverted_hpoterms = load_dict("hpoterms_inverted")
hponames_to_ids = load_dict("hponames_to_ids")
genes_with_hpoterm = load_dict("genes_with_hpoterm")
# hpodag = load_dict("hpoparents")
stems = set()
for hpo_name in inverted_hpoterms:
stem_set = inverted_hpoterms[hpo_name]
stems |= stem_set
stems = frozenset(stems)
# The keys of the following dictionary are sets of stems, and the values are
# sets of hpoterms whose name, without stopwords, gives origin to the
# corresponding set of stems (as key)
hpoterms_dict = load_dict("hpoterms")
# Initialize the stemmer
stemmer = SnowballStemmer("english")
# Perform the supervision
def supervise(mentions, sentence):
for mention in mentions:
# Skip if we already supervised it (e.g., random mentions or
# gene long names)
if mention.is_correct is not None:
continue
# The next word is 'gene' or 'protein', so it's actually a gene
if mention.words[-1].in_sent_idx < len(sentence.words) - 1:
next_word = sentence.words[mention.words[-1].in_sent_idx + 1].word
if next_word.casefold() in ["gene", "protein"]:
mention.is_correct = False
mention.type = "PHENO_SUP_GENE"
continue
mention_lemmas = set([x.lemma.casefold() for x in mention.words])
name_words = set([x.casefold() for x in
mention.entity.split("|")[1].split()])
# The mention is exactly the HPO name
if mention_lemmas == name_words and \
mention.words[0].lemma != "pneunomiae":
mention.is_correct = True
mention.type = "PHENO_SUP_FULL"
return mentions
# Return a list of mention candidates extracted from the sentence
def extract(sentence):
mentions = []
mention_ids = set()
# If there are no English words in the sentence, we skip it.
no_english_words = True
for word in sentence.words:
word.stem = stemmer.stem(word.word) # Here so all words have stem
if len(word.word) > 2 and \
(word.word in english_dict or
word.word.casefold() in english_dict):
no_english_words = False
if no_english_words:
return mentions
history = set()
# Iterate over each phrase of length at most max_mention_length
for start, end in get_all_phrases_in_sentence(sentence,
max_mention_length):
if start in history or end - 1 in history:
continue
phrase = " ".join([word.word for word in sentence.words[start:end]])
# If the phrase is a gene long name containing a phenotype name, create
# a candidate that we supervise as negative
if len(phrase) > 1 and phrase in genes_with_hpoterm:
mention = Mention("HPOTERM_SUP_GENEL",
phrase,
sentence.words[start:end])
mention.is_correct = False
mentions.append(mention)
for word in sentence.words[start:end]:
history.add(word.in_sent_idx)
continue
# Iterate over each phrase of length at most max_mention_length
for start, end in get_all_phrases_in_sentence(sentence,
max_mention_length):
should_continue = False
for i in range(start, end):
if i in history:
should_continue = True
break
if should_continue:
continue
phrase = " ".join([word.word for word in sentence.words[start:end]])
# The list of stems in the phrase (not from stopwords or symbols, and
# not already used for a mention)
phrase_stems = []
for word in sentence.words[start:end]:
if not re.match("^(_|\W)+$", word.word) and \
(len(word.word) == 1 or
word.lemma.casefold() not in stopwords_dict):
phrase_stems.append(word.stem)
phrase_stems_set = frozenset(phrase_stems)
if phrase_stems_set in hpoterms_dict:
# Find the word objects of that match
mention_words = []
mention_lemmas = []
mention_stems = []
for word in sentence.words[start:end]:
if word.stem in phrase_stems_set and \
word.lemma.casefold() not in mention_lemmas and \
word.stem not in mention_stems:
mention_lemmas.append(word.lemma.casefold())
mention_words.append(word)
mention_stems.append(word.stem)
if len(mention_words) == len(phrase_stems_set):
break
entity = list(hpoterms_dict[phrase_stems_set])[0]
mention = Mention(
"PHENO", hponames_to_ids[entity] + "|" + entity,
mention_words)
# The following is a way to avoid duplicates.
# It's ugly and not perfect
if mention.id() in mention_ids:
continue
mention_ids.add(mention.id())
mentions.append(mention)
for word in mention_words:
history.add(word.in_sent_idx)
# Generate some negative candidates at random, if this sentences didn't
# contain any other candidate. We want the candidates to be nouns.
if len(mentions) == 0 and random.random() <= NEG_PROB:
index = random.randint(0, len(sentence.words) - 1)
# We may not get a noun at random, so we try again if we don't.
tries = 10
while not sentence.words[index].pos.startswith("NN") and tries > 0:
index = random.randint(0, len(sentence.words) - 1)
tries -= 1
if sentence.words[index].pos.startswith("NN"):
mention = Mention(
"PHENO_SUP_rand", sentence.words[index].lemma.casefold(),
sentence.words[index:index+1])
mention.is_correct = False
mentions.append(mention)
return mentions
if __name__ == "__main__":
# Process the input
with fileinput.input() as input_files:
for line in input_files:
# Parse the TSV line
line_dict = get_dict_from_TSVline(
line,
["doc_id", "sent_id", "wordidxs", "words", "poses", "lemmas"],
[no_op, int, lambda x: TSVstring2list(x, int), TSVstring2list,
TSVstring2list, TSVstring2list])
# Create the sentence object
null_list = [None, ] * len(line_dict["wordidxs"])
sentence = Sentence(
line_dict["doc_id"], line_dict["sent_id"],
line_dict["wordidxs"], line_dict["words"], line_dict["poses"],
null_list, line_dict["lemmas"], null_list, null_list,
null_list)
# Skip weird sentences
if sentence.is_weird():
continue
# Get list of mentions candidates in this sentence
mentions = extract(sentence)
# Supervise them
new_mentions = supervise(mentions, sentence)
# Print!
for mention in new_mentions:
print(mention.tsv_dump())
|
dd-genomics-master
|
archived/v0/code/ext_pheno_candidates.py
|
#! /usr/bin/env python3
#
# Convert geneRifs file to a file that can be given in input to the NLPparser
# extractor.
import fileinput
import json
import sys
if len(sys.argv) < 2:
sys.stderr.write("USAGE: {} FILE [FILE [FILE [...]]]\n".format(sys.argv[0]))
sys.exit(1)
DOCUMENT_ID = "geneRifs-"
i = 0
with fileinput.input() as input_files:
for line in input_files:
tokens = line.split("\t")
line_dict = {"id": DOCUMENT_ID + str(i), "text": tokens[2]}
print(json.dumps(line_dict))
i += 1
|
dd-genomics-master
|
archived/v0/code/geneRifs2NLPparser.py
|
#! /usr/bin/env python3
import fileinput
import re
from dstruct.Sentence import Sentence
from helper.easierlife import get_dict_from_TSVline, TSVstring2list, no_op, \
print_feature
import ddlib
def add_features_generic(relation_id, gene_words, pheno_words, sentence):
# Use the generic feature library (ONLY!)
obj = dict()
obj['lemma'] = []
obj['words'] = []
obj['ner'] = []
obj['pos'] = []
obj['dep_graph'] = []
for word in sentence.words:
obj['lemma'].append(word.lemma)
obj['words'].append(word.word)
obj['ner'].append(word.ner)
obj['pos'].append(word.pos)
obj['dep_graph'].append(
str(word.dep_parent + 1) + "\t" + word.dep_path + "\t" +
str(word.in_sent_idx + 1))
word_obj_list = ddlib.unpack_words(
obj, lemma='lemma', pos='pos', ner='ner', words='words',
dep_graph='dep_graph', dep_graph_parser=ddlib.dep_graph_parser_triplet)
gene_span = ddlib.get_span(gene_words[0].in_sent_idx, len(gene_words))
pheno_span = ddlib.get_span(pheno_words[0].ins_sent_idx, len(pheno_words))
features = set()
for feature in ddlib.get_generic_feature_relation(
word_obj_list, gene_span, pheno_span):
features.add(feature)
for feature in features:
print_feature(sentence.doc_id, relation_id, feature)
# Add features (few)
def add_features_few(relation, gene_mention, hpoterm_mention, sentence):
# Find the start/end indices of the mentions composing the relation
gene_start = gene_mention.wordidxs[0]
hpoterm_start = hpoterm_mention.wordidxs[0]
gene_end = gene_mention.wordidxs[-1]
hpoterm_end = hpoterm_mention.wordidxs[-1]
limits = sorted((gene_start, hpoterm_start, gene_end, hpoterm_end))
start = limits[0]
betw_start = limits[1]
betw_end = limits[2]
# If the gene comes first, we do not prefix, otherwise we do.
if start == gene_start:
inv = ""
else:
inv = "INV_"
# The following features are only added if the two mentions are "close
# enough" to avoid overfitting. The concept of "close enough" is somewhat
# arbitrary.
if betw_end - betw_start - 1 < 15:
# The sequence of lemmas between the two mentions and the sequence of
# lemmas between the two mentions but using the NERs, if present, and
# the sequence of POSes between the mentions
seq_list_ners = []
seq_list_lemmas = []
seq_list_poses = []
for word in sentence.words[betw_start+1:betw_end]:
if word.ner != "O":
seq_list_ners.append(word.ner)
else:
seq_list_ners.append(word.lemma)
seq_list_lemmas.append(word.lemma)
seq_list_poses.append(word.pos)
seq_ners = " ".join(seq_list_ners)
seq_lemmas = " ".join(seq_list_lemmas)
seq_poses = "_".join(seq_list_poses)
relation.add_feature(inv + "WORD_SEQ_[" + seq_lemmas + "]")
if seq_ners != seq_lemmas:
relation.add_feature(inv + "WORD_SEQ_NER_[" + seq_ners + "]")
relation.add_feature(inv + "POS_SEQ_[" + seq_poses + "]")
else:
relation.add_feature(inv + "WORD_SEQ_[TOO_FAR_AWAY]")
# relation.add_feature(inv + "WORD_SEQ_NER_[TOO_FAR_AWAY]")
# relation.add_feature(inv + "POS_SEQ_[TOO_FAR_AWAY]")
# Shortest dependency path between the two mentions
(dep_path, dep_path_len) = sentence.dep_path(gene_mention, hpoterm_mention)
if dep_path_len < 10: # XXX 10 is arbitrary
relation.add_feature(inv + "DEP_PATH_[" + dep_path + "]")
(dep_path_pos, dep_path_pos_len) = sentence.dep_path(
gene_mention, hpoterm_mention, use_pos=True)
relation.add_feature(inv + "DEP_PATH_POS_[" + dep_path_pos + "]")
else:
relation.add_feature(inv + "DEP_PATH_[TOO_FAR_AWAY]")
# relation.add_feature(inv + "DEP_PATH_POS_[TOO_FAR_AWAY]")
# For each verb in the sentence compute the dependency path from the
# mentions to the verb
for i in range(len(sentence.words)):
# The filtering of the brackets and commas is from Emily's code.
if re.search('^VB[A-Z]*$', sentence.words[i].pos) and \
sentence.words[i].word.isalpha():
# sentence.words[i].word not in ["{", "}", "(", ")", "[", "]"]
# and "," not in sentence.words[i].word:
min_len_g = 10000
min_path_g = None
min_path_pos_g = None
for wordidx in gene_mention.wordidxs:
(path, length) = sentence.get_word_dep_path(
wordidx, sentence.words[i].in_sent_idx)
if length < min_len_g:
min_path_g = path
min_len_g = length
(min_path_pos_g, l) = sentence.get_word_dep_path(
wordidx, sentence.words[i].in_sent_idx, use_pos=True)
min_len_h = 10000
min_path_h = None
min_path_pos_h = None
for wordidx in hpoterm_mention.wordidxs:
(path, length) = sentence.get_word_dep_path(
wordidx, sentence.words[i].in_sent_idx)
if length < min_len_h:
min_path_h = path
min_len_h = length
(min_path_pos_h, l) = sentence.get_word_dep_path(
wordidx, sentence.words[i].in_sent_idx, use_pos=True)
if min_len_g < 5 and min_len_h < 5:
relation.add_feature(
inv + "VERB_DEP_PATH_[" + sentence.words[i].lemma + "]_[" +
min_path_g + "]_[" + min_path_h + "]")
relation.add_feature(
inv + "VERB_DEP_PATH_POS_[" + sentence.words[i].lemma +
"]_[" + min_path_pos_g + "]_[" + min_path_pos_h + "]")
# Add features
def add_features(relation_id, gene_words, pheno_words, sentence):
# Find the start/end indices of the mentions composing the relation
gene_start = gene_words[0].in_sent_idx
pheno_start = pheno_words[0].in_sent_idx
gene_end = gene_words[-1].in_sent_idx
pheno_end = pheno_words[-1].in_sent_idx
limits = sorted((gene_start, pheno_start, gene_end, pheno_end))
start = limits[0]
betw_start = limits[1]
betw_end = limits[2]
end = limits[3]
# If the gene comes first, we do not prefix, otherwise we do.
if start == gene_start:
inv = ""
else:
inv = "INV_"
# Verbs between the mentions
verbs_between = []
minl_gene = 100
minp_gene = None
minw_gene = None
mini_gene = None
minl_pheno = 100
# minp_pheno = None
minw_pheno = None
mini_pheno = None
neg_found = False
# Look all the words, as in the dependency path there could be words that
# are close to both mentions but not between them
for i in range(len(sentence.words)):
# The filtering of the brackets and commas is from Emily's code.
if re.search('^VB[A-Z]*$', sentence.words[i].pos) and \
sentence.words[i].word not in ["{", "}", "(", ")", "[", "]"] \
and "," not in sentence.words[i].word:
(p_gene, l_gene) = sentence.get_word_dep_path(
betw_start, sentence.words[i].in_sent_idx)
(p_pheno, l_pheno) = sentence.get_word_dep_path(
sentence.words[i].in_sent_idx, betw_end)
if l_gene < minl_gene:
minl_gene = l_gene
minp_gene = p_gene
minw_gene = sentence.words[i].lemma
mini_gene = sentence.words[i].in_sent_idx
if l_pheno < minl_pheno:
minl_pheno = l_pheno
# minp_pheno = p_pheno
minw_pheno = sentence.words[i].lemma
mini_pheno = sentence.words[i].in_sent_idx
# Look for negation.
if i > 0 and sentence.words[i-1].lemma in \
["no", "not", "neither", "nor"]:
if i < betw_end - 2:
neg_found = True
print_feature(
relation_id,
inv + "NEG_VERB_[" + sentence.words[i-1].word + "]-" +
sentence.words[i].lemma)
else:
verbs_between.append(sentence.words[i])
if len(verbs_between) == 1 and not neg_found:
print_feature(
sentence.doc_id, relation_id,
inv + "SINGLE_VERB_[%s]" % verbs_between[0].lemma)
else:
for verb in verbs_between:
if verb.in_sent_idx > betw_start and \
verb.in_sent_idx < betw_end:
print_feature(
sentence.doc_id, relation_id,
inv + "VERB_[%s]" % verb.lemma)
if mini_pheno == mini_gene and mini_gene is not None and \
len(minp_gene) < 50: # and "," not in minw_gene:
# feature = inv + 'MIN_VERB_[' + minw_gene + ']' + minp_gene
# features.append(feature)
feature = inv + 'MIN_VERB_[' + minw_gene + ']'
print_feature(sentence.doc_id, relation_id, feature)
else:
feature = inv
if mini_gene is not None:
# feature = 'MIN_VERB_GENE_[' + minw_gene + ']' + minp_gene
# print_feature(sentence.doc_id, relation_id, feature)
feature += 'MIN_VERB_GENE_[' + minw_gene + ']'
else:
feature += 'MIN_VERB_GENE_[NULL]'
if mini_pheno is not None:
# feature = 'MIN_VERB_pheno_[' + minw_pheno + ']' + minp_pheno)
# print_feature(sentence.doc_id, relation_id, feature)
feature += '_pheno_[' + minw_pheno + ']'
else:
feature += '_pheno_[NULL]'
print_feature(sentence.doc_id, relation_id, feature)
# The following features are only added if the two mentions are "close
# enough" to avoid overfitting. The concept of "close enough" is somewhat
# arbitrary.
neg_word_index = -1
if betw_end - betw_start - 1 < 8:
for i in range(betw_start+1, betw_end):
# Feature for separation between entities.
# TODO Think about merging these?
# I think these should be some kind of supervision rule instead?
if "while" == sentence.words[i].lemma:
print_feature(sentence.doc_id, relation_id, "SEP_BY_[while]")
if "whereas" == sentence.words[i].lemma:
print_feature(sentence.doc_id, relation_id, "SEP_BY_[whereas]")
if sentence.words[i].lemma in ["no", "not", "neither", "nor"]:
neg_word_index = i
# Features for the negative words
# TODO: We would probably need distant supervision for these
if neg_word_index > -1:
gene_p = None
gene_l = 100
for word in sentence.words[gene_start:gene_end+1]:
(p, l) = sentence.get_word_dep_path(
word.in_sent_idx, neg_word_index)
if l < gene_l:
gene_p = p
gene_l = l
if gene_p:
print_feature(
sentence.doc_id, relation_id, inv + "NEG_[" + gene_p + "]")
# pheno_p = None
# pheno_l = 100
# for word in sentence.words[pheno_start:pheno_end+1]:
# p = sentence.get_word_dep_path(
# word.in_sent_idx, neg_word_index)
# if len(p) < pheno_l:
# pheno_p = p
# pheno_l = len(p)
# if pheno_p:
# print_feature(
# relation_id, inv + "pheno_TO_NEG_[" + pheno_p + "]")
# The sequence of lemmas between the two mentions and the sequence of
# lemmas between the two mentions but using the NERs, if present, and
# the sequence of POSes between the mentions
seq_list_ners = []
seq_list_lemmas = []
seq_list_poses = []
for word in sentence.words[betw_start+1:betw_end]:
if word.ner != "O":
seq_list_ners.append(word.ner)
else:
seq_list_ners.append(word.lemma)
seq_list_lemmas.append(word.lemma)
seq_list_poses.append(word.pos)
seq_ners = " ".join(seq_list_ners)
seq_lemmas = " ".join(seq_list_lemmas)
seq_poses = "_".join(seq_list_poses)
print_feature(
sentence.doc_id, relation_id,
inv + "WORD_SEQ_[" + seq_lemmas + "]")
print_feature(
sentence.doc_id, relation_id,
inv + "WORD_SEQ_NER_[" + seq_ners + "]")
print_feature(
sentence.doc_id, relation_id, inv + "POS_SEQ_[" + seq_poses + "]")
# Shortest dependency path between the two mentions
(path, length) = sentence.dep_path(gene_words, pheno_words)
print_feature(
sentence.doc_id, relation_id, inv + "DEP_PATH_[" + path + "]")
# Number of words between the mentions
# TODO I think this should be some kind of supervision rule instead?
# print_feature(sentence.doc_id, relation_id,
# inv + "WORD_SEQ_LEN_[" + str(betw_end - betw_start - 1) + "]")
# 2-gram between the mentions
if betw_end - betw_start - 1 > 4 and betw_start - betw_end - 1 < 15:
for i in range(betw_start + 1, betw_end - 1):
print_feature(
sentence.doc_id, relation_id,
"BETW_2_GRAM_[" + sentence.words[i].lemma + "_" +
sentence.words[i+1].lemma + "]")
# Lemmas on the exterior of the mentions and on the interior
feature = inv
if start > 0:
feature += "EXT_NGRAM_[" + sentence.words[start - 1].lemma + "]"
else:
feature += "EXT_NGRAM_[NULL]"
if end < len(sentence.words) - 1:
feature += "_[" + sentence.words[end + 1].lemma + "]"
else:
feature += "_[NULL]"
print_feature(sentence.doc_id, relation_id, feature)
feature = inv + "INT_NGRAM_[" + sentence.words[betw_start + 1].lemma + \
"]" + "_[" + sentence.words[betw_end - 1].lemma + "]"
print_feature(sentence.doc_id, relation_id, feature)
if __name__ == "__main__":
# Process the input
with fileinput.input() as input_files:
for line in input_files:
# Parse the TSV line
line_dict = get_dict_from_TSVline(
line, ["doc_id", "sent_id", "wordidxs", "words", "poses",
"ners", "lemmas", "dep_paths", "dep_parents",
"relation_id", "gene_wordidxs", "pheno_wordidxs"],
[no_op, int, lambda x: TSVstring2list(x, int), TSVstring2list,
TSVstring2list, TSVstring2list, TSVstring2list,
TSVstring2list, lambda x: TSVstring2list(x, int),
no_op, lambda x: TSVstring2list(x, int), lambda x:
TSVstring2list(x, int)])
# Create the sentence object
null_list = [None, ] * len(line_dict["wordidxs"])
sentence = Sentence(
line_dict["doc_id"], line_dict["sent_id"],
line_dict["wordidxs"], line_dict["words"], line_dict["poses"],
line_dict["ners"], line_dict["lemmas"], line_dict["dep_paths"],
line_dict["dep_parents"], null_list)
if sentence.is_weird():
continue
gene_words = []
for gene_wordidx in line_dict["gene_wordidxs"]:
gene_words.append(sentence.words[gene_wordidx])
pheno_words = []
for pheno_wordidx in line_dict["pheno_wordidxs"]:
pheno_words.append(sentence.words[pheno_wordidx])
add_features(
line_dict["relation_id"], gene_words, pheno_words, sentence)
|
dd-genomics-master
|
archived/v0/code/ext_genepheno_features.py
|
#! /usr/bin/env python3
import fileinput
import random
import re
from dstruct.Mention import Mention
from dstruct.Sentence import Sentence
from dstruct.Relation import Relation
from helper.dictionaries import load_dict
from helper.easierlife import get_dict_from_TSVline, no_op, TSVstring2list
# Load the gene<->hpoterm dictionary
genehpoterms_dict = load_dict("genehpoterms")
# Supervise the candidates
def supervise(relation, gene_mention, hpoterm_mention, sentence):
# One of the two mentions is labelled as False
if gene_mention.is_correct is False and \
hpoterm_mention.is_correct is not False:
relation.is_correct = False
relation.type = "GENEPHENO_SUP_F_G"
elif hpoterm_mention.is_correct is False and \
gene_mention.is_correct is not False:
relation.is_correct = False
relation.type = "GENEPHENO_SUP_F_H"
elif hpoterm_mention.is_correct is False and \
gene_mention.is_correct is False:
relation.is_correct = False
relation.type = "GENEPHENO_SUP_F_GH"
else:
# Present in the existing HPO mapping
in_mapping = False
hpo_entity_id = hpoterm_mention.entity.split("|")[0]
if frozenset([gene_mention.words[0].word, hpo_entity_id]) in \
genehpoterms_dict:
in_mapping = True
else:
for gene in gene_mention.entity.split("|"):
if frozenset([gene, hpo_entity_id]) in \
genehpoterms_dict:
in_mapping = True
break
if in_mapping:
relation.is_correct = True
relation.type = "GENEPHENO_SUP_MAP"
if __name__ == "__main__":
# Process input
with fileinput.input() as input_files:
for line in input_files:
# Parse the TSV line
line_dict = get_dict_from_TSVline(
line, ["doc_id", "sent_id", "wordidxs", "words", "poses",
"ners", "lemmas", "dep_paths", "dep_parents",
"bounding_boxes", "gene_entities", "gene_wordidxss",
"gene_is_corrects", "gene_types",
"hpoterm_entities", "hpoterm_wordidxss",
"hpoterm_is_corrects", "hpoterm_types"],
[no_op, int, lambda x: TSVstring2list(x, int), TSVstring2list,
TSVstring2list, TSVstring2list, TSVstring2list,
TSVstring2list, lambda x: TSVstring2list(x, int),
TSVstring2list, # these are for the sentence
TSVstring2list, lambda x: TSVstring2list(x, sep="!~!"),
TSVstring2list, TSVstring2list, # these are for the genes
TSVstring2list, lambda x: TSVstring2list(x, sep="!~!"),
TSVstring2list, TSVstring2list, # these are for the HPO
])
# Remove the genes that are unsupervised copies or duplicates
supervised_idxs = set()
unsupervised_idxs = set()
for i in range(len(line_dict["gene_is_corrects"])):
if line_dict["gene_is_corrects"][i] == "n":
unsupervised_idxs.add(i)
else:
if line_dict["gene_types"][i] != "GENE_SUP_contr_2":
# The above condition is to avoid duplicates
supervised_idxs.add(i)
survived_unsuperv_idxs = set()
for i in unsupervised_idxs:
wordidxs = line_dict["gene_wordidxss"][i]
found = False
for j in supervised_idxs:
if line_dict["gene_wordidxss"][j] == wordidxs:
found = True
break
if not found:
survived_unsuperv_idxs.add(i)
to_keep = sorted(survived_unsuperv_idxs | supervised_idxs)
new_gene_entities = []
new_gene_wordidxss = []
new_gene_is_corrects = []
new_gene_types = []
for i in to_keep:
new_gene_entities.append(line_dict["gene_entities"][i])
new_gene_wordidxss.append(line_dict["gene_wordidxss"][i])
new_gene_is_corrects.append(line_dict["gene_is_corrects"][i])
new_gene_types.append(line_dict["gene_types"][i])
line_dict["gene_entities"] = new_gene_entities
line_dict["gene_wordidxss"] = new_gene_wordidxss
line_dict["gene_is_corrects"] = new_gene_is_corrects
line_dict["gene_types"] = new_gene_types
# Remove the hpoterms that are unsupervised copies
supervised_idxs = set()
unsupervised_idxs = set()
for i in range(len(line_dict["hpoterm_is_corrects"])):
if line_dict["hpoterm_is_corrects"][i] == "n":
unsupervised_idxs.add(i)
else:
supervised_idxs.add(i)
survived_unsuperv_idxs = set()
for i in unsupervised_idxs:
wordidxs = line_dict["hpoterm_wordidxss"][i]
found = False
for j in supervised_idxs:
if line_dict["hpoterm_wordidxss"][j] == wordidxs:
found = True
break
if not found:
survived_unsuperv_idxs.add(i)
to_keep = sorted(survived_unsuperv_idxs | supervised_idxs)
new_hpoterm_entities = []
new_hpoterm_wordidxss = []
new_hpoterm_is_corrects = []
new_hpoterm_types = []
for i in to_keep:
new_hpoterm_entities.append(line_dict["hpoterm_entities"][i])
new_hpoterm_wordidxss.append(line_dict["hpoterm_wordidxss"][i])
new_hpoterm_is_corrects.append(
line_dict["hpoterm_is_corrects"][i])
new_hpoterm_types.append(line_dict["hpoterm_types"][i])
line_dict["hpoterm_entities"] = new_hpoterm_entities
line_dict["hpoterm_wordidxss"] = new_hpoterm_wordidxss
line_dict["hpoterm_is_corrects"] = new_hpoterm_is_corrects
line_dict["hpoterm_types"] = new_hpoterm_types
# Create the sentence object where the two mentions appear
sentence = Sentence(
line_dict["doc_id"], line_dict["sent_id"],
line_dict["wordidxs"], line_dict["words"], line_dict["poses"],
line_dict["ners"], line_dict["lemmas"], line_dict["dep_paths"],
line_dict["dep_parents"], line_dict["bounding_boxes"])
# Skip weird sentences
if sentence.is_weird():
continue
gene_mentions = []
hpoterm_mentions = []
positive_relations = []
gene_wordidxs = set()
hpoterm_wordidxs = set()
# Iterate over each pair of (gene,phenotype) mentions
for g_idx in range(len(line_dict["gene_is_corrects"])):
g_wordidxs = TSVstring2list(
line_dict["gene_wordidxss"][g_idx], int)
for idx in g_wordidxs:
gene_wordidxs.add(idx)
gene_mention = Mention(
"GENE", line_dict["gene_entities"][g_idx],
[sentence.words[j] for j in g_wordidxs])
if line_dict["gene_is_corrects"][g_idx] == "n":
gene_mention.is_correct = None
elif line_dict["gene_is_corrects"][g_idx] == "f":
gene_mention.is_correct = False
elif line_dict["gene_is_corrects"][g_idx] == "t":
gene_mention.is_correct = True
else:
assert False
gene_mention.type = line_dict["gene_types"][g_idx]
assert not gene_mention.type.endswith("_UNSUP")
gene_mentions.append(gene_mention)
for h_idx in range(len(line_dict["hpoterm_is_corrects"])):
h_wordidxs = TSVstring2list(
line_dict["hpoterm_wordidxss"][h_idx], int)
for idx in h_wordidxs:
hpoterm_wordidxs.add(idx)
hpoterm_mention = Mention(
"hpoterm", line_dict["hpoterm_entities"][h_idx],
[sentence.words[j] for j in h_wordidxs])
if line_dict["hpoterm_is_corrects"][h_idx] == "n":
hpoterm_mention.is_correct = None
elif line_dict["hpoterm_is_corrects"][h_idx] == "f":
hpoterm_mention.is_correct = False
elif line_dict["hpoterm_is_corrects"][h_idx] == "t":
hpoterm_mention.is_correct = True
else:
assert False
hpoterm_mention.type = line_dict["hpoterm_types"][h_idx]
assert not hpoterm_mention.type.endswith("_UNSUP")
hpoterm_mentions.append(hpoterm_mention)
# Skip if the word indexes overlab
if set(g_wordidxs) & set(h_wordidxs):
continue
# Skip if the mentions are too far away
gene_start = gene_mention.wordidxs[0]
hpoterm_start = hpoterm_mention.wordidxs[0]
gene_end = gene_mention.wordidxs[-1]
hpoterm_end = hpoterm_mention.wordidxs[-1]
limits = sorted(
(gene_start, hpoterm_start, gene_end, hpoterm_end))
start = limits[0]
betw_start = limits[1]
betw_end = limits[2]
if betw_end - betw_start > 50:
continue
relation = Relation(
"GENEPHENO", gene_mention, hpoterm_mention)
# Supervise
supervise(relation, gene_mention, hpoterm_mention,
sentence)
if relation.is_correct:
positive_relations.append(
(gene_mention, hpoterm_mention))
# Print!
print(relation.tsv_dump())
# Create some artificial negative examples:
# for each (gene, phenotype) pair that is labelled as positive
# example, select one word w in the same sentence that (1) is not a
# gene mention candidate and (2) is not a phenotype mention
# candidate, add (gene, w) and (w, phenotype) as negative example
avail_wordidxs = (
set(line_dict["wordidxs"]) - set(hpoterm_wordidxs)) - \
set(gene_wordidxs)
avail_wordidxs = list(avail_wordidxs)
if len(avail_wordidxs) > 0:
fake_rels = []
for (gene_mention, hpoterm_mention) in positive_relations:
other_word = sentence.words[random.choice(avail_wordidxs)]
fake_gene_mention = Mention(
"FAKE_GENE", other_word.lemma, [other_word, ])
fake_hpo_mention = Mention(
"FAKE_HPOTERM", other_word.lemma, [other_word, ])
fake_rel_1 = Relation(
"GENEPHENO_SUP_POSFAKEGENE", fake_gene_mention,
hpoterm_mention)
fake_rel_2 = Relation(
"GENEPHENO_SUP_POSFAKEHPO", gene_mention,
fake_hpo_mention)
fake_rel_1.is_correct = False
fake_rel_2.is_correct = False
# Print!
print(fake_rel_1.tsv_dump())
print(fake_rel_2.tsv_dump())
# Create more artificial negative examples:
# for each gene candidate G in the sentence, if the pattern G
# <Verb> X appears in the same sentence and X is not a phenotype
# mention candidate, add (gene, X) as negative examples
for gene_mention in gene_mentions:
try:
next_word = sentence.words[gene_mention.wordidxs[-1] + 1]
except IndexError:
continue
if re.search('^VB[A-Z]*$', next_word.pos) and \
next_word.word not in ["{", "}", "(", ")", "[", "]"]:
try:
after_next_word = sentence.words[
next_word.in_sent_idx + 1]
except IndexError:
continue
if after_next_word.in_sent_idx in hpoterm_wordidxs:
continue
fake_hpo_mention = Mention(
"FAKE_HPOTERM", after_next_word.lemma,
[after_next_word, ])
fake_rel = Relation(
"GENEPHENO_SUP_FAKEHPO", gene_mention,
fake_hpo_mention)
fake_rel.is_correct = False
print(fake_rel.tsv_dump())
# Create more artificial negative examples:
# as before but for phenotypes
for hpo_mention in hpoterm_mentions:
try:
next_word = sentence.words[hpo_mention.wordidxs[-1] + 1]
except IndexError:
continue
if re.search('^VB[A-Z]*$', next_word.pos) and \
next_word.word not in ["{", "}", "(", ")", "[", "]"]:
try:
after_next_word = sentence.words[
next_word.in_sent_idx + 1]
except IndexError:
continue
if after_next_word.in_sent_idx in gene_wordidxs:
continue
fake_gene_mention = Mention(
"FAKE_GENE", after_next_word.lemma,
[after_next_word, ])
fake_rel = Relation(
"GENEPHENO_SUP_FAKEGENE", fake_gene_mention,
hpo_mention)
fake_rel.is_correct = False
print(fake_rel.tsv_dump())
|
dd-genomics-master
|
archived/v0/code/ext_genepheno_candidates.py
|
#! /usr/bin/env python3
import fileinput
import re
from dstruct.Sentence import Sentence
from helper.easierlife import get_dict_from_TSVline, TSVstring2list, no_op, \
print_feature, BASE_DIR
import ddlib
def add_features_generic(mention_id, pheno_words, sentence):
# Use the generic feature library (ONLY!)
# Load dictionaries for keywords
ddlib.load_dictionary(BASE_DIR + "/dicts/features/pheno_var.tsv", "VARKW")
ddlib.load_dictionary(
BASE_DIR + "/dicts/features/pheno_patient.tsv", "PATIENTKW")
# Create the objects used by ddlib. ddlib interface is so ugly.
obj = dict()
obj['lemma'] = []
obj['words'] = []
obj['ner'] = []
obj['pos'] = []
obj['dep_graph'] = []
for word in sentence.words:
obj['lemma'].append(word.lemma)
obj['words'].append(word.word)
obj['ner'].append(word.ner)
obj['pos'].append(word.pos)
obj['dep_graph'].append(
str(word.dep_parent + 1) + "\t" + word.dep_path + "\t" +
str(word.in_sent_idx + 1))
word_obj_list = ddlib.unpack_words(
obj, lemma='lemma', pos='pos', ner='ner', words='words',
dep_graph='dep_graph', dep_graph_parser=ddlib.dep_graph_parser_triplet)
gene_span = ddlib.get_span(pheno_words[0].in_sent_idx, len(pheno_words))
features = set()
for feature in ddlib.get_generic_features_mention(
word_obj_list, gene_span):
features.add(feature)
for feature in features:
print_feature(sentence.doc_id, mention_id, feature)
# Add features
def add_features(mention_id, mention_words, sentence):
mention_wordidxs = []
for word in mention_words:
mention_wordidxs.append(word.in_sent_idx)
# The first alphanumeric lemma on the left of the mention, if present,
idx = mention_words[0].in_sent_idx - 1
left_lemma_idx = -1
left_lemma = ""
while idx >= 0 and not sentence.words[idx].word.isalnum():
idx -= 1
try:
left_lemma = sentence.words[idx].lemma
try:
float(left_lemma)
left_lemma = "_NUMBER"
except ValueError:
pass
left_lemma_idx = idx
print_feature(
sentence.doc_id, mention_id, "NGRAM_LEFT_1_[{}]".format(
left_lemma))
except IndexError:
pass
# The first alphanumeric lemma on the right of the mention, if present,
idx = mention_wordidxs[-1] + 1
right_lemma_idx = -1
right_lemma = ""
while idx < len(sentence.words) and not sentence.words[idx].word.isalnum():
idx += 1
try:
right_lemma = sentence.words[idx].lemma
try:
float(right_lemma)
right_lemma = "_NUMBER"
except ValueError:
pass
right_lemma_idx = idx
print_feature(
sentence.doc_id, mention_id, "NGRAM_RIGHT_1_[{}]".format(
right_lemma))
except IndexError:
pass
# The lemma "two on the left" of the mention, if present
try:
print_feature(sentence.doc_id, mention_id, "NGRAM_LEFT_2_[{}]".format(
sentence.words[left_lemma_idx - 1].lemma))
print_feature(
sentence.doc_id, mention_id, "NGRAM_LEFT_2_C_[{} {}]".format(
sentence.words[left_lemma_idx - 1].lemma, left_lemma))
except IndexError:
pass
# The lemma "two on the right" on the left of the mention, if present
try:
print_feature(
sentence.doc_id, mention_id, "NGRAM_RIGHT_2_[{}]".format(
sentence.words[right_lemma_idx + 1].lemma))
print_feature(
sentence.doc_id, mention_id, "NGRAM_RIGHT_2_C_[{} {}]".format(
right_lemma, sentence.words[right_lemma_idx + 1].lemma))
except IndexError:
pass
# The keywords that appear in the sentence with the mention
minl = 100
minp = None
minw = None
for word in mention_words:
for word2 in sentence.words:
if word2.lemma in KEYWORDS:
(p, l) = sentence.get_word_dep_path(
word.in_sent_idx, word2.in_sent_idx)
kw = word2.lemma
if word2.lemma in PATIENT_KWS:
kw = "_HUMAN"
print_feature(
sentence.doc_id, mention_id, "KEYWORD_[" + kw + "]" + p)
if l < minl:
minl = l
minp = p
minw = kw
# Special feature for the keyword on the shortest dependency path
if minw:
print_feature(
sentence.doc_id, mention_id,
'EXT_KEYWORD_MIN_[' + minw + ']' + minp)
print_feature(
sentence.doc_id, mention_id, 'KEYWORD_MIN_[' + minw + ']')
# The verb closest to the candidate
minl = 100
minp = None
minw = None
for word in mention_words:
for word2 in sentence.words:
if word2.word.isalpha() and re.search('^VB[A-Z]*$', word2.pos) \
and word2.lemma != 'be':
(p, l) = sentence.get_word_dep_path(
word.in_sent_idx, word2.in_sent_idx)
if l < minl:
minl = l
minp = p
minw = word2.lemma
if minw:
print_feature(
sentence.doc_id, mention_id, 'VERB_[' + minw + ']' + minp)
# Keyword that seems to appear with phenotypes
VAR_KWS = frozenset([
"abnormality", "affect", "apoptosis", "association", "cancer", "carcinoma",
"case", "cell", "chemotherapy", "clinic", "clinical", "chromosome",
"cronic", "deletion", "detection", "diagnose", "diagnosis", "disease",
"drug", "family", "gene", "genome", "genomic", "genotype", "give", "grade",
"group", "history", "infection", "inflammatory", "injury", "mutation",
"pathway", "phenotype", "polymorphism", "prevalence", "protein", "risk",
"severe", "stage", "symptom", "syndrome", "therapy", "therapeutic",
"treat", "treatment", "variant" "viruses", "virus"])
PATIENT_KWS = frozenset(
["boy", "girl", "man", "woman", "men", "women", "patient", "patients"])
KEYWORDS = VAR_KWS | PATIENT_KWS
if __name__ == "__main__":
# Process the input
with fileinput.input() as input_files:
for line in input_files:
# Parse the TSV line
line_dict = get_dict_from_TSVline(
line, ["doc_id", "sent_id", "wordidxs", "words", "poses",
"ners", "lemmas", "dep_paths", "dep_parents",
"mention_id", "mention_wordidxs"],
[no_op, int, lambda x: TSVstring2list(x, int), TSVstring2list,
TSVstring2list, TSVstring2list, TSVstring2list,
TSVstring2list, lambda x: TSVstring2list(x, int),
no_op, lambda x: TSVstring2list(x, int)])
# Create the sentence object
null_list = [None, ] * len(line_dict["wordidxs"])
sentence = Sentence(
line_dict["doc_id"], line_dict["sent_id"],
line_dict["wordidxs"], line_dict["words"], line_dict["poses"],
line_dict["ners"], line_dict["lemmas"], line_dict["dep_paths"],
line_dict["dep_parents"], null_list)
if sentence.is_weird():
continue
mention_words = []
for mention_wordidx in line_dict["mention_wordidxs"]:
mention_words.append(sentence.words[mention_wordidx])
add_features(line_dict["mention_id"], mention_words, sentence)
|
dd-genomics-master
|
archived/v0/code/ext_pheno_features.py
|
#! /usr/bin/env python3
from helper.dictionaries import load_dict
if __name__ == "__main__":
merged_genes_dict = load_dict("merged_genes")
inverted_long_names = load_dict("inverted_long_names")
hpoterms_orig = load_dict("hpoterms_orig")
for long_name in inverted_long_names:
for hpoterm_name in hpoterms_orig:
if long_name in hpoterm_name.split() and \
long_name.casefold() != hpoterm_name.casefold():
print("\t".join((long_name, hpoterm_name)))
|
dd-genomics-master
|
archived/v0/code/find_genes_in_hpoterms.py
|
#! /usr/bin/env python3
#
# Perform comparision between existing HPO mapping and dump from DeepDive
#
# Take the output from canonicalize.py
import sys
if len(sys.argv) != 3:
sys.stderr.write("USAGE: {} hpo dump\n".format(sys.argv[0]))
sys.exit(1)
hpo_genes = set()
hpo_ids = set()
hpo_mappings = set()
with open(sys.argv[1], 'rt') as hpo:
for line in hpo:
tokens = line.strip().split("\t")
gene = tokens[0]
hpo_id = tokens[1]
assert hpo_id.startswith("HP:")
hpo_genes.add(gene)
hpo_ids.add(hpo_id)
hpo_mappings.add("_".join((gene, hpo_id)))
dump_genes = set()
dump_ids = set()
dump_mappings = set()
relation_ids = dict()
with open(sys.argv[2], 'rt') as dump:
for line in dump:
tokens = line.strip().split("\t")
relation_id = tokens[0]
gene = tokens[1]
assert "\\N" not in gene
hpo_id = tokens[2]
assert hpo_id.startswith("HP:")
dump_genes.add(gene)
dump_ids.add(hpo_id)
rel_string = "_".join((gene, hpo_id))
dump_mappings.add(rel_string)
if rel_string not in relation_ids:
relation_ids[rel_string] = set()
relation_ids[rel_string].add(relation_id)
print("### HPO (existing mapping) ###")
print("Non-zero Entries: {}".format(len(hpo_mappings)))
print("\"Covered\" Genes: {}".format(len(hpo_genes)))
print("\"Covered\" Phenotypes: {}".format(len(hpo_ids)))
print("### DeepDive Dump ###")
print("Non-zero Entries: {}".format(len(dump_mappings)))
print("\"Covered\" Genes: {}".format(len(dump_genes)))
print("\"Covered\" Phenotypes: {}".format(len(dump_ids)))
print("### Comparison ###")
print("Non-zero Entries in both: {}".format(
len(hpo_mappings & dump_mappings)))
print("Non-zero Entries only in HPO: {}".format(
len(hpo_mappings - dump_mappings)))
print("Non-zero Entries only in DD Dump: {}".format(
len(dump_mappings - hpo_mappings)))
print("\"Covered\" Genes in both: {}".format(len(hpo_genes & dump_genes)))
print("\"Covered\" Genes only in HPO: {}".format(len(hpo_genes - dump_genes)))
print("\"Covered\" Genes only in DD Dump: {}".format(
len(dump_genes - hpo_genes)))
print("\"Covered\" Phenotypes in both: {}".format(len(hpo_ids & dump_ids)))
print("\"Covered\" Phenotypes only in HPO: {}".format(len(hpo_ids - dump_ids)))
print("\"Covered\" Phenotypes only in DD Dump: {}".format(
len(dump_ids - hpo_ids)))
print("#######")
printed_rels = set()
for new_rel in dump_mappings - hpo_mappings:
for rel_id in set(relation_ids[new_rel]) - printed_rels:
print(rel_id)
printed_rels.add(rel_id)
|
dd-genomics-master
|
archived/v0/code/compare_dump_to_hpo.py
|
#! /usr/bin/env python3
#
# Take the json output of the NLPextractor extractor and convert it to TSV that
# we can feed to the database using COPY FROM. The schema of the table is equal
# to the 'sentences' table except for an additional column at the end which is
# the gene that we know the geneRif contains.
import fileinput
import json
import sys
from helper.easierlife import list2TSVarray
if len(sys.argv) < 2:
sys.stderr.write("USAGE: {} GENE_RIFS_DICT PARSER_OUTPUT [PARSER_OUTPUT [PARSER_OUTPUT [...]]]\n".format(sys.argv[0]))
sys.exit(1)
genes = []
with open(sys.argv[1], 'rt') as gene_rifs_dict_file:
for line in gene_rifs_dict_file:
tokens = line.strip().split("\t")
genes.append(tokens[1])
with fileinput.input(sys.argv[2:]) as input_files:
for line in input_files:
line_dict = json.loads(line)
doc_id = line_dict["doc_id"]
sent_id = line_dict["sent_id"]
words = line_dict["words"]
wordidxs = [x for x in range(len(words))]
poses = line_dict["poses"]
ners = line_dict["ners"]
lemmas = line_dict["lemmas"]
dep_paths_orig = line_dict["dep_paths"]
bounding_boxes = ["empty"] * len(words)
gene_index = int(doc_id.split("-")[-1])
# Compute dependency path edge labels and node parents
dep_paths = ["_"] * len(words)
dep_parents = [0] * len(words)
for dep_path in dep_paths_orig:
tokens = dep_path.split("(")
dep_parent = int((tokens[1].split(", ")[0]).split("-")[-1]) - 1
dep_child = int((tokens[1].split(", ")[-1]).split("-")[-1][:-1]) - 1
dep_paths[dep_child] = tokens[0]
dep_parents[dep_child] = dep_parent
print("{}".format("\t".join([doc_id, str(sent_id),
list2TSVarray(wordidxs), list2TSVarray(words,
quote=True), list2TSVarray(poses, quote=True),
list2TSVarray(ners), list2TSVarray(lemmas, quote=True),
list2TSVarray(dep_paths, quote=True),
list2TSVarray(dep_parents),
list2TSVarray(bounding_boxes), genes[gene_index]])))
|
dd-genomics-master
|
archived/v0/code/parser2geneRifs.py
|
#! /usr/bin/env python3
import fileinput
import re
from dstruct.Mention import Mention
from dstruct.Sentence import Sentence
from dstruct.Relation import Relation
from helper.dictionaries import load_dict
from helper.easierlife import get_dict_from_TSVline, no_op, TSVstring2bool, \
TSVstring2list
# Add features
# 20141125 As of now, we use the same features from the gene/hpoterm relation
# extractor.
# TODO Look at Emily's code to understand what features to add
def add_features(relation, gene_1_mention, gene_2_mention, sentence):
# Find the start/end indices of the mentions composing the relation
gene_1_start = gene_1_mention.wordidxs[0]
gene_2_start = gene_2_mention.wordidxs[0]
gene_1_end = gene_1_mention.wordidxs[-1]
gene_2_end = gene_2_mention.wordidxs[-1]
limits = sorted((gene_1_start, gene_2_start, gene_1_end, gene_2_end))
start = limits[0]
betw_start = limits[1]
betw_end = limits[2]
end = limits[3]
# If the gene comes first, we do not prefix, otherwise we do.
# TODO We should think about it because it may not be necessary: the order
# shouldn't really matter, but I may be wrong.
# TODO We should also be careful when specifying the input query, to avoid
# creating first on one order and then on the other.
if start == gene_1_start:
inv = ""
else:
inv = "INV_"
# Verbs between the mentions
# A lot of this comes from Emily's code
ws = []
verbs_between = []
minl_gene_1 = 100
minp_gene_1 = None
minw_gene_1 = None
mini_gene_1 = None
minl_gene_2 = 100
minp_gene_2 = None
minw_gene_2 = None
mini_gene_2 = None
neg_found = False
# Emily's code was only looking at the words between the mentions, but it
# is more correct (in my opinion) to look all the words, as in the
# dependency path there could be words that are close to both mentions but
# not between them
#for i in range(betw_start+1, betw_end):
for i in range(len(sentences.words)):
if "," not in sentence.words[i].lemma:
ws.append(sentence.words[i].lemma)
# Feature for separation between entities
# TODO Think about merging these?
if "while" == sentence.words[i].lemma:
relation.add_feature("SEP_BY_[while]")
if "whereas" == sentence.words[i].lemma:
relation.add_feature("SEP_BY_[whereas]")
# The filtering of the brackets and commas is from Emily's code. I'm
# not sure it is actually needed, but it won't hurt.
if re.search('^VB[A-Z]*', sentence.words[i].pos) and \
sentence.words[i].word != "{" and \
sentence.words[i].word != "}" and \
"," not in sentence.words[i].word:
p_gene_1 = sentence.get_word_dep_path(betw_start,
sentence.words[i].in_sent_idx)
p_gene_2 = sentence.get_word_dep_path(
sentence.words[i].in_sent_idx, betw_end)
if len(p_gene_1) < minl_gene_1:
minl_gene_1 = len(p_gene_1)
minp_gene_1 = p_gene_1
minw_gene_1 = sentence.words[i].lemma
mini_gene_1 = sentence.words[i].in_sent_idx
if len(p_gene_2) < minl_gene_2:
minl_gene_2 = len(p_gene_2)
minp_gene_2 = p_gene_2
minw_gene_2 = sentence.words[i].lemma
mini_gene_2 = sentence.words[i].in_sent_idx
# Look for negation.
if i > 0:
if sentence.words[i-1].lemma in ["no", "not", "neither", "nor"]:
if i < betw_end - 2:
neg_found = True
relation.add_feature(inv + "NEG_VERB_[" +
sentence.words[i-1].word + "]-" +
sentence.words[i].lemma)
elif sentence.words[i] != "{" and sentence.words[i] != "}":
verbs_between.append(sentence.words[i].lemma)
# TODO This idea of 'high_quality_verb' is taken from Emily's code, but
# it's still not clear to me what it implies
high_quality_verb = False
if len(verbs_between) == 1 and not neg_found:
relation.add_feature(inv + "SINGLE_VERB_[%s]" % verbs_between[0])
if verbs_between[0] in ["interact", "associate", "bind", "regulate", "phosporylate", "phosphorylated"]:
high_quality_verb = True
else:
for verb in verbs_between:
relation.add_feature(inv + "VERB_[%s]" % verb)
if mini_gene_2 == mini_gene_1 and mini_gene_1 != None and len(minp_gene_1) < 50: # and "," not in minw_gene_1:
# feature = inv + 'MIN_VERB_[' + minw_gene_1 + ']' + minp_gene_1
# features.append(feature)
feature = inv + 'MIN_VERB_[' + minw_gene_1 + ']'
relation.add_feature(feature)
else:
if mini_gene_1 != None:
# feature = 'MIN_VERB_gene_1_[' + minw_gene_1 + ']' + minp_gene_1
# relation.add_feature(feature)
feature = inv + 'MIN_VERB_GENE_1_[' + minw_gene_1 + ']'
relation.add_feature(feature)
if mini_gene_2 != None:
# feature = 'MIN_VERB_gene_2_[' + minw_gene_2 + ']' + minp_gene_2)
# relation.add_feature(feature)
feature = inv + 'MIN_VERB_GENE_2_[' + minw_gene_2 + ']'
relation.add_feature(feature)
# Shortest dependency path between the two mentions
relation.add_feature(inv + "DEP_PATH_[" + sentence.dep_path(gene_1_mention,
gene_2_mention) + "]")
# The sequence of lemmas between the two mentions
if len(ws) < 7 and len(ws) > 0 and "{" not in ws and "}" not in ws and \
"\"" not in ws and "/" not in ws and "\\" not in ws and \
"," not in ws and \
" ".join(ws) not in ["_ and _", "and", "or", "_ or _"]:
relation.add_feature(inv + "WORD_SEQ_[%s]" % " ".join(ws))
# Number of words between the mentions
relation.add_feature(inv + "WORD_SEQ_LEN_[%d]" % len(ws))
# The sequence of lemmas between the two mentions but using the NERs, if
# present
seq_list = []
for word in sentence.words[betw_start+1:betw_end]:
if word.ner != "O":
seq_list.append(word.ner)
else:
seq_list.append(word.lemma)
seq = "_".join(seq_list)
relation.add_feature(inv + "WORD_SEQ_NER_[" + seq + "]")
# Lemmas on the left and on the right
if gene_1_start > 0:
relation.add_feature("GENE_1_NGRAM_LEFT_1_[" +
sentence.words[gene_1_start-1].lemma + "]")
if gene_1_end < len(sentence.words) - 1:
relation.add_feature("GENE_1_NGRAM_RIGHT_1_[" +
sentence.words[gene_1_end+1].lemma + "]")
if gene_2_start > 0:
relation.add_feature("GENE_2_NGRAM_LEFT_1_[" +
sentence.words[gene_2_start-1].lemma + "]")
if gene_2_end < len(sentence.words) - 1:
relation.add_feature("GENE_2_NGRAM_RIGHT_1_[" +
sentence.words[gene_2_end+1].lemma + "]")
if __name__ == "__main__":
# Process input
with fileinput.input() as input_files:
for line in input_files:
# Parse the TSV line
line_dict = get_dict_from_TSVline(
line, ["doc_id", "sent_id", "wordidxs", "words", "poses",
"ners", "lemmas", "dep_paths", "dep_parents",
"bounding_boxes", "gene_1_entity", "gene_1_wordidxs",
"gene_1_is_correct", "gene_1_type",
"gene_2_entity", "gene_2_wordidxs",
"gene_2_is_correct", "gene_2_type"],
[no_op, int, lambda x: TSVstring2list(x, int), TSVstring2list,
TSVstring2list, TSVstring2list, TSVstring2list,
TSVstring2list, lambda x: TSVstring2list(x, int),
TSVstring2list, no_op, lambda x: TSVstring2list(x, int),
TSVstring2bool, no_op, no_op, lambda x: TSVstring2list(x,
int), TSVstring2bool, no_op])
# Create the sentence object where the two mentions appear
sentence = Sentence(
line_dict["doc_id"], line_dict["sent_id"],
line_dict["wordidxs"], line_dict["words"], line_dict["poses"],
line_dict["ners"], line_dict["lemmas"], line_dict["dep_paths"],
line_dict["dep_parents"], line_dict["bounding_boxes"])
# Create the mentions
gene_1_mention = Mention(
"GENE", line_dict["gene_1_entity"],
[sentence.words[j] for j in line_dict["gene_1_wordidxs"]])
gene_1_mention.is_correct = line_dict["gene_1_is_correct"]
gene_1_mention.type = line_dict["gene_1_type"]
gene_2_mention = Mention(
"GENE", line_dict["gene_2_entity"],
[sentence.words[j] for j in line_dict["gene_2_wordidxs"]])
gene_2_mention.is_correct = line_dict["gene_2_is_correct"]
gene_2_mention.type = line_dict["gene_2_type"]
# If the word indexes do not overlap, create the relation candidate
# TODO there may be other cases. Check with Emily.
if not set(line_dict["gene_1_wordidxs"]) & \
set(line_dict["gene_2_wordidxs"]):
relation = Relation(
"GENEGENE", gene_1_mention, gene_2_mention)
# Add features
add_features(relation, gene_1_mention, gene_2_mention,
sentence)
# Supervise
# One of the two mentions (or both) is labelled as False
# We do not create a copy in this case because there will
# already be an unsupervised copy built on the unsupervised
# copies of the mentions.
if gene_1_mention.is_correct is False or \
gene_2_mention.is_correct is False:
relation.is_correct = False
relation.type = "GENEGENE_SUP_F"
# TODO Check in Emily's code how to supervise as True
# Print!
print(relation.tsv_dump())
|
dd-genomics-master
|
archived/v0/code/gene_gene_relations.py
|
#! /usr/bin/env python3
""" An object representing a relation
"""
import json
from helper.easierlife import list2TSVarray
class Relation(object):
doc_id = None
sent_id_1 = None
sent_id_2 = None
type = None
mention_1_id = None
mention_2_id = None
mention_1_words = None
mention_2_words = None
is_correct = None
def __init__(self, _type, mention_1, mention_2):
self.doc_id = mention_1.words[0].doc_id
self.sent_id_1 = mention_1.words[0].sent_id
self.sent_id_2 = mention_2.words[0].sent_id
self.mention_1_id = mention_1.id()
self.mention_2_id = mention_2.id()
self.type = _type
self.mention_1_words = mention_1.words
self.mention_2_words = mention_2.words
def id(self):
return "RELATION_{}_{}_{}_{}_{}_{}_{}_{}".format(
self.type, self.doc_id, self.sent_id_1, self.sent_id_2,
self.mention_1_words[0].in_sent_idx,
self.mention_1_words[-1].in_sent_idx,
self.mention_2_words[0].in_sent_idx,
self.mention_2_words[-1].in_sent_idx)
def json_dump(self):
return json.dumps(
{"id": None, "doc_id": self.doc_id, "sent_id_1": self.sent_id_1,
"sent_id_2": self.sent_id_2, "relation_id": self.id(),
"type": self.type, "mention_id_1": self.mention_1_id,
"mention_id_2": self.mention_2_id,
"wordidxs_1": [x.in_sent_idx for x in self.mention_1_words],
"wordidxs_2": [x.in_sent_idx for x in self.mention_2_words],
"words_1": [x.word for x in self.mention_1_words],
"words_2": [x.word for x in self.mention_2_words],
"is_correct": self.is_correct})
def tsv_dump(self):
is_correct_str = "\\N"
if self.is_correct is not None:
is_correct_str = self.is_correct.__repr__()
return "\t".join(
["\\N", self.doc_id, str(self.sent_id_1), str(self.sent_id_2),
self.id(), self.type, self.mention_1_id, self.mention_2_id,
list2TSVarray([x.in_sent_idx for x in self.mention_1_words]),
list2TSVarray([x.in_sent_idx for x in self.mention_2_words]),
list2TSVarray([x.word for x in self.mention_1_words], True),
list2TSVarray([x.word for x in self.mention_2_words], True),
is_correct_str])
|
dd-genomics-master
|
archived/v0/code/dstruct/Relation.py
|
dd-genomics-master
|
archived/v0/code/dstruct/__init__.py
|
|
#! /usr/bin/env python3
""" A Sentence class
Basically a container for an array of Word objects, plus doc_id and sent_id.
Originally obtained from the 'pharm' repository, but modified.
"""
from dstruct.Word import Word
class Sentence(object):
# to avoid bad parse tree that have self-recursion
_MAX_DEP_PATH_LEN = 1000
doc_id = None
sent_id = None
words = []
def __init__(self, _doc_id, _sent_id, _wordidxs, _words, _poses, _ners,
_lemmas, _dep_paths, _dep_parents, _bounding_boxes):
self.doc_id = _doc_id
self.sent_id = _sent_id
wordidxs = _wordidxs
words = _words
poses = _poses
ners = _ners
lemmas = _lemmas
dep_paths = _dep_paths
dep_parents = _dep_parents
bounding_boxes = _bounding_boxes
self.words = []
if _wordidxs: # checking for None
for i in range(len(wordidxs)):
word = Word(self.doc_id, self.sent_id, wordidxs[i], words[i],
poses[i], ners[i], lemmas[i], dep_paths[i],
dep_parents[i], bounding_boxes[i])
self.words.append(word)
# Return a list of the indexes of all words in the dependency path from
# the word at index word_index to the root
def get_path_till_root(self, word_index):
path = []
c = word_index
MAX_DEP_PATH_LEN = self._MAX_DEP_PATH_LEN
while MAX_DEP_PATH_LEN > 0:
MAX_DEP_PATH_LEN = MAX_DEP_PATH_LEN - 1
try:
# c == -1 means we found the root
if c == -1:
break
path.append(c)
c = self.words[c].dep_parent
except:
break
return path
# Given two paths returned by get_path_till_root, find the least common
# ancestor, i.e., the one farthest away from the root. If there is no
# common ancestor, return None
def get_common_ancestor(self, path1, path2):
# The paths are sorted from leaf to root, so reverse them
path1_rev = path1[:]
path1_rev.reverse()
path2_rev = path2[:]
path2_rev.reverse()
i = 0
while i < min(len(path1_rev), len(path2_rev)) and \
path1_rev[i] == path2_rev[i]:
i += 1
if path1_rev[i-1] != path2_rev[i-1]:
# No common ancestor found
return None
else:
return path1_rev[i-1]
# XXX (Matteo) The following is the function as it was in pharma.
# The logic seemed more complicated to understand for me.
# parent = None
# for i in range(max(len(path1), len(path2))):
# tovisit = 0 - i - 1
# if i >= len(path1) or i >= len(path2):
# break
# if path1[tovisit] != path2[tovisit]:
# break
# parent = path1[tovisit]
# return parent
# Given two word idx1 and idx2, where idx2 is an ancestor of idx1, return,
# for each word 'w' on the dependency path between idx1 and idx2, the label
# on the edge to 'w' and the NER tag of 'w' or its lemma if the NER tag
# is 'O' (see Word.get_feature())
# the dependency path labels on the path from idx1 to idx2
def get_direct_dependency_path_between_words(
self, idx1, idx2, use_pos=False):
words_on_path = []
c = idx1
length = 0
MAX_DEP_PATH_LEN = self._MAX_DEP_PATH_LEN
while MAX_DEP_PATH_LEN > 0:
MAX_DEP_PATH_LEN -= 1
try:
if c == -1:
break
elif c == idx2:
break
elif c == idx1:
# we do not include the NER tag/lemma for idx1
words_on_path.append(str(self.words[c].dep_path))
else:
words_on_path.append(str(self.words[c].dep_path) + "|" +
self.words[c].get_feature(use_pos))
c = self.words[c].dep_parent
length += 1
except:
break
return (words_on_path, length)
# Given two word idx1 and idx2, return the dependency path feature between
# them
def get_word_dep_path(self, idx1, idx2, use_pos=False):
path1 = self.get_path_till_root(idx1)
path2 = self.get_path_till_root(idx2)
parent = self.get_common_ancestor(path1, path2)
(words_from_idx1_to_parents, length_1) = \
self.get_direct_dependency_path_between_words(
idx1, parent, use_pos)
(words_from_idx2_to_parents, length_2) = \
self.get_direct_dependency_path_between_words(
idx2, parent, use_pos)
if parent is None:
root_str = "@ROOT@"
else:
root_str = "@"
return ("-".join(words_from_idx1_to_parents) + root_str +
"-".join(words_from_idx2_to_parents), length_1 + length_2)
# Given a mention, return the word before the first word of the mention,
# if present
def get_prev_wordobject(self, mention):
begin = mention.words[0].in_sent_idx
if begin - 1 < 0:
return None
else:
return self.words[begin - 1]
# Given a mention, return the word after the last word of the mention, if
# present
def get_next_wordobject(self, mention):
end = mention.words[-1].in_sent_idx
if end == len(self.words) - 1:
return None
else:
return self.words[end + 1]
def dep_parent(self, mention):
begin = mention.words[0].in_sent_idx
end = mention.words[-1].in_sent_idx
paths = []
for i in range(begin, end+1):
for j in range(0, len(self.words)):
if j >= begin and j <= end:
continue
(path, length) = self.get_word_dep_path(i, j)
paths.append(path)
return sorted(paths, key=len)[0:min(5, len(paths))]
# Given two entities, return the feature of the shortest dependency path
# between a word from one of to a word of the other.
def dep_path(self, entity1_words, entity2_words, use_pos=False):
begin1 = entity1_words[0].in_sent_idx
end1 = entity1_words[-1].in_sent_idx
begin2 = entity2_words[0].in_sent_idx
end2 = entity2_words[-1].in_sent_idx
min_len = 10000000000
min_p = None
for idx1 in range(begin1, end1+1):
for idx2 in range(begin2, end2+1):
(path, length) = self.get_word_dep_path(idx1, idx2, use_pos)
if length < min_len:
min_p = path
min_len = length
return (min_p, min_len)
# Return True if the sentence is 'weird', according to the following
# criteria:
# 1) It contains more than 12 floats
# 2) It contains many "no" / "yes" / "na"
# 3) It contains many "—"
# 4) It contains many ";"
# 5) It is longer than 150 words
def is_weird(self):
if len(self.words) > 150:
return True
count_floats = 0
count_NA = 0
count_minus = 0
count_semicolon = 0
for word in self.words:
try:
float(word.word)
count_floats += 1
except ValueError:
pass
if word.word in ["NA", "Yes", "No"]:
count_NA += 1
elif word.word == "—":
count_minus += 1
elif word.word == ";":
count_semicolon += 1
if count_floats > 12 or count_NA > 6 or count_minus > 10 or \
count_semicolon > 6:
return True
else:
return False
|
dd-genomics-master
|
archived/v0/code/dstruct/Sentence.py
|
#! /usr/bin/env python3
""" A generic Mention class
Originally obtained from the 'pharm' repository, but modified.
"""
import json
from helper.easierlife import list2TSVarray
class Mention(object):
doc_id = None
sent_id = None
wordidxs = None
type = None
entity = None
words = None
is_correct = None
right_lemma = ""
left_lemma = ""
def __init__(self, _type, _entity, _words):
self.doc_id = _words[0].doc_id
self.sent_id = _words[0].sent_id
self.wordidxs = sorted([word.in_sent_idx for word in _words])
self.type = _type
self.entity = _entity
# These are Word objects
self.words = _words
self.is_correct = None
def __repr__(self):
return " ".join([w.word for w in self.words])
def id(self):
return "MENTION_{}_{}_{}_{}_{}".format(
self.type, self.doc_id, self.sent_id, self.wordidxs[0],
self.wordidxs[-1])
# Dump self to a json object
def json_dump(self):
json_obj = {"id": None, "doc_id": self.doc_id, "sent_id": self.sent_id,
"wordidxs": self.wordidxs, "mention_id": self.id(), "type":
self.type, "entity": self.entity,
"words": [w.word for w in self.words],
"is_correct": self.is_correct}
return json.dumps(json_obj)
# Dump self to a TSV line
def tsv_dump(self):
is_correct_str = "\\N"
if self.is_correct is not None:
is_correct_str = self.is_correct.__repr__()
tsv_line = "\t".join(
["\\N", self.doc_id, str(self.sent_id),
list2TSVarray(self.wordidxs), self.id(), self.type,
self.entity, list2TSVarray(self.words, quote=True),
is_correct_str])
return tsv_line
|
dd-genomics-master
|
archived/v0/code/dstruct/Mention.py
|
#! /usr/bin/env python3
""" A Word class
Originally obtained from the 'pharm' repository, but modified.
"""
class Word(object):
doc_id = None
sent_id = None
in_sent_idx = None
word = None
pos = None
ner = None
lemma = None
dep_path = None
dep_parent = None
sent_id = None
box = None
def __init__(self, _doc_id, _sent_id, _in_sent_idx, _word, _pos, _ner,
_lemma, _dep_path, _dep_parent, _box):
self.doc_id = _doc_id
self.sent_id = _sent_id
self.in_sent_idx = _in_sent_idx
self.word = _word
self.pos = _pos
self.ner = _ner
self.dep_parent = _dep_parent
self.dep_path = _dep_path
self.box = _box
self.lemma = _lemma
# If do not do the following, outputting an Array in the language will
# crash
# XXX (Matteo) This was in the pharm code, not sure what it means
# I actually don't think this should go here.
# self.lemma = self.lemma.replace('"', "''")
# self.lemma = self.lemma.replace('\\', "_")
def __repr__(self):
return self.word
# Return the NER tag if different than 'O', otherwise return the lemma
def get_feature(self, use_pos=False):
if use_pos:
return self.pos
elif self.ner == 'O':
return self.lemma
else:
return self.ner
|
dd-genomics-master
|
archived/v0/code/dstruct/Word.py
|
dd-genomics-master
|
archived/v0/code/helper/__init__.py
|
|
#! /usr/bin/env python3
""" Helper functions to make our life easier.
Originally obtained from the 'pharm' repository, but modified.
"""
import fileinput
import json
import os.path
import sys
from dstruct.Sentence import Sentence
# BASE_DIR denotes the application directory
BASE_DIR, throwaway = os.path.split(os.path.realpath(__file__))
BASE_DIR = os.path.realpath(BASE_DIR + "/../..")
# Return the start and end indexes of all subsets of words in the sentence
# sent, with size at most max_phrase_length
def get_all_phrases_in_sentence(sent, max_phrase_length):
for start in range(len(sent.words)):
for end in reversed(range(start + 1, min(
len(sent.words), start + 1 + max_phrase_length))):
yield (start, end)
# Return Sentence objects from input lines
def get_input_sentences(input_files=sys.argv[1:]):
with fileinput.input(files=input_files) as f:
for line in f:
sent_dict = json.loads(line)
yield Sentence(
sent_dict["doc_id"], sent_dict["sent_id"],
sent_dict["wordidxs"], sent_dict["words"], sent_dict["poses"],
sent_dict["ners"], sent_dict["lemmas"], sent_dict["dep_paths"],
sent_dict["dep_parents"], sent_dict["bounding_boxes"])
# Given a TSV line, a list of keys, and a list of functions, return a dict
# like the one returned by json.loads()
def get_dict_from_TSVline(line, keys, funcs):
assert len(keys) == len(funcs)
line_dict = dict()
while line[-1] == '\n':
line = line[:-1]
tokens = line.split("\t")
assert len(tokens) == len(keys)
for i in range(len(tokens)):
token = tokens[i]
# skip the token if it's NULL
if token != "":
try:
line_dict[keys[i]] = funcs[i](token)
except:
pass
return line_dict
# Return the argument
def no_op(x):
return x
# Transform a TSV string into a boolean
def TSVstring2bool(string):
if string == "t":
return True
elif string == "f":
return False
else:
return None
# Transform a string obtained by postgresql array_str() into a list.
# The parameter func() gets applied to the elements of the list
def TSVstring2list(string, func=(lambda x: x), sep="|^|"):
tokens = string.split(sep)
return [func(x) for x in tokens]
# Transform a string obtained by postgresql into a dictionary whose keys are in
# the form "TSV_X" where X is an integer from 0. Each value of the dict is a
# list obtained using TSVstring2list.
def TSVstring2dict(string, func=(lambda x: x), sep="|^^"):
tsv_dict = dict()
i = 0
tokens = string.split(sep)
for token in tokens:
tsv_dict["TSV_" + str(i)] = TSVstring2list(token)
i += 1
return tsv_dict
# Convert a list to a string that can be used in a TSV column and intepreted as
# an array by the PostreSQL COPY FROM command.
# If 'quote' is True, then double quote the string representation of the
# elements of the list, and escape double quotes and backslashes.
def list2TSVarray(a_list, quote=False):
if quote:
for index in range(len(a_list)):
if "\\" in str(a_list[index]):
# Replace '\' with '\\\\"' to be accepted by COPY FROM
a_list[index] = str(a_list[index]).replace("\\", "\\\\\\\\")
# This must happen the previous substitution
if "\"" in str(a_list[index]):
# Replace '"' with '\\"' to be accepted by COPY FROM
a_list[index] = str(a_list[index]).replace("\"", "\\\\\"")
string = ",".join(list(map(lambda x: "\"" + str(x) + "\"", a_list)))
else:
string = ",".join(list(map(lambda x: str(x), a_list)))
return "{" + string + "}"
def print_feature(doc_id, mention_id, feature):
if "\\" in feature:
# Replace '\' with '\\\\"' to be accepted by COPY FROM
feature = feature.replace("\\", "\\\\\\\\")
# This must happen the previous substitution
if "\"" in feature:
# Replace '"' with '\\"' to be accepted by COPY FROM
feature = feature.replace("\"", "\\\\\"")
print(doc_id + "\t" + mention_id + "\t\"" + feature + "\"")
|
dd-genomics-master
|
archived/v0/code/helper/easierlife.py
|
#! /usr/bin/env python3
from helper.easierlife import BASE_DIR
# Load an example dictionary
# 1st column is doc id, 2nd is sentence ids (separated by '|'), 3rd is entity
def load_examples_dictionary(filename):
examples = dict()
with open(filename, 'rt') as examples_dict_file:
for line in examples_dict_file:
tokens = line.rstrip().split("\t")
sent_ids = frozenset(tokens[1].split("|"))
examples[frozenset([tokens[0], tokens[2]])] = sent_ids
if tokens[1] == "":
examples[frozenset([tokens[0], tokens[2]])] = None
return examples
# Load the merged genes dictionary
def load_merged_genes_dictionary(filename):
merged_genes_dict = dict()
with open(filename, 'rt') as merged_genes_dict_file:
for line in merged_genes_dict_file:
tokens = line[:-1].split("\t")
symbol = tokens[0]
alternate_symbols = tokens[1].split("|")
names = tokens[2].split("|")
for sym in [symbol, ] + alternate_symbols + names:
if sym not in merged_genes_dict:
merged_genes_dict[sym] = []
merged_genes_dict[sym].append(symbol)
return merged_genes_dict
# Load the genes dictionary
def load_genes_dictionary(filename):
genes_dict = dict()
with open(filename, 'rt') as genes_dict_file:
for line in genes_dict_file:
tokens = line.strip().split("\t")
# first token is symbol, second is csv list of synonyms
symbol = tokens[0]
genes_dict[symbol] = symbol
for synonym in tokens[1].split(","):
genes_dict[synonym] = symbol
return genes_dict
# Load the gene long names dictionary
def load_long_names_dictionary(filename):
long_names_dict = dict()
with open(filename, 'rt') as long_names_dict_file:
for line in long_names_dict_file:
tokens = line[:-1].split("\t")
symbol = tokens[0]
alternate_symbols = tokens[1].split("|")
names = tokens[2].split("|")
for sym in [symbol, ] + alternate_symbols:
if sym not in long_names_dict:
long_names_dict[sym] = []
long_names_dict[sym] += names
return long_names_dict
# Load the inverted gene long names dictionary
def load_inverted_long_names_dictionary(filename):
long_names_dict = dict()
with open(filename, 'rt') as long_names_dict_file:
for line in long_names_dict_file:
tokens = line[:-1].split("\t")
symbol = tokens[0]
names = tokens[2].split("|")
for name in names:
if name not in long_names_dict:
long_names_dict[name] = []
long_names_dict[name].append(symbol)
return long_names_dict
def load_genes_in_hpoterms_dictionary(filename):
genes_in_hpoterms_dict = dict()
with open(filename, 'rt') as dict_file:
for line in dict_file:
tokens = line.strip().split("\t")
if tokens[0] not in genes_in_hpoterms_dict:
genes_in_hpoterms_dict[tokens[0]] = set()
genes_in_hpoterms_dict[tokens[0]].add(tokens[1])
return genes_in_hpoterms_dict
def load_hpoterms_with_gene_dictionary(filename):
hpoterms_with_gene_dict = dict()
with open(filename, 'rt') as dict_file:
for line in dict_file:
tokens = line.strip().split("\t")
hpoterms_with_gene_dict[tokens[1]] = tokens[0]
return hpoterms_with_gene_dict
def load_hpoterms_in_genes_dictionary(filename):
hpoterms_in_genes_dict = dict()
with open(filename, 'rt') as dict_file:
for line in dict_file:
tokens = line.strip().split("\t")
if tokens[0] not in hpoterms_in_genes_dict:
hpoterms_in_genes_dict[tokens[0]] = set()
hpoterms_in_genes_dict[tokens[0]].add(tokens[1])
return hpoterms_in_genes_dict
def load_genes_with_hpoterm_dictionary(filename):
genes_with_hpoterm_dict = dict()
with open(filename, 'rt') as dict_file:
for line in dict_file:
tokens = line.strip().split("\t")
genes_with_hpoterm_dict[tokens[1]] = tokens[0]
return genes_with_hpoterm_dict
# Load the HPO term levels
def load_hpoterm_levels_dictionary(filename):
hpo_level_dict = dict()
with open(filename, 'rt') as hpo_level_dict_file:
for line in hpo_level_dict_file:
hpo_id, name, c, level = line.strip().split("\t")
level = int(level)
if level not in hpo_level_dict:
hpo_level_dict[level] = set()
hpo_level_dict[level].add(hpo_id)
return hpo_level_dict
# Load the HPO parents
def load_hpoparents_dictionary(filename):
hpoparents_dict = dict()
with open(filename, 'rt') as hpoparents_dict_file:
for line in hpoparents_dict_file:
child, is_a, parent = line.strip().split("\t")
if child not in hpoparents_dict:
hpoparents_dict[child] = set()
hpoparents_dict[child].add(parent)
# Add 'All'
hpoparents_dict["HP:0000001"] = set(["HP:0000001", ])
return hpoparents_dict
# Load the HPO ancestors
def load_hpoancestors_dictionary(filename):
hpoparents_dict = load_hpoparents_dictionary(filename)
def get_ancestors(key):
if hpoparents_dict[key] == set([key, ]):
return hpoparents_dict[key]
else:
parents = hpoparents_dict[key]
ancestors = set(parents)
for parent in parents:
ancestors |= get_ancestors(parent)
return ancestors
hpoancestors_dict = dict()
with open(filename, 'rt') as hpoancestors_dict_file:
for line in hpoancestors_dict_file:
child, is_a, parent = line.strip().split("\t")
if child not in hpoancestors_dict:
hpoancestors_dict[child] = get_ancestors(child)
# Add 'All'
hpoancestors_dict["HP:0000001"] = set(["HP:0000001", ])
return hpoancestors_dict
# Load the HPO children
def load_hpochildren_dictionary(filename):
hpochildren_dict = dict()
with open(filename, 'rt') as hpochildren_dict_file:
for line in hpochildren_dict_file:
child, is_a, parent = line.strip().split("\t")
if parent not in hpochildren_dict:
hpochildren_dict[parent] = set()
hpochildren_dict[parent].add(child)
return hpochildren_dict
# Load the HPOterms original dictionary
# Terms are converted to lower case
def load_hpoterms_orig_dictionary(filename):
hpoterms_dict = dict()
with open(filename, 'rt') as hpoterms_dict_file:
for line in hpoterms_dict_file:
tokens = line.strip().split("\t")
# 1st token is name, 2nd is description, 3rd is 'C' and 4th is
# (presumably) the distance from the root of the DAG.
name = tokens[0]
description = tokens[1]
# Skip "All"
# XXX (Matteo) There may be more generic terms that we want to skip
if description == "All":
continue
description_words = description.split()
variants = get_variants(description_words)
for variant in variants:
hpoterms_dict[variant.casefold()] = name
return hpoterms_dict
# Load the HPOterms 'mentions' dictionary (output of hpoterms2mentions.py)
# Maps stem sets to hpo names
def load_hpoterms_dictionary(filename):
_hpoterms_dict = dict()
with open(filename, 'rt') as _hpoterms_dict_file:
for line in _hpoterms_dict_file:
hpoterm_id, name, stems = line[:-1].split("\t")
stems_set = frozenset(stems.split("|"))
if stems_set not in _hpoterms_dict:
_hpoterms_dict[stems_set] = set()
_hpoterms_dict[stems_set].add(name)
return _hpoterms_dict
# Load the inverted HPOterms 'mentions' dictionary
# Map hpo names to stem sets
def load_hpoterms_inverted_dictionary(filename):
_hpoterms_dict = dict()
with open(filename, 'rt') as _hpoterms_dict_file:
for line in _hpoterms_dict_file:
hpoterm_id, name, stems = line[:-1].split("\t")
stems_set = frozenset(stems.split("|"))
_hpoterms_dict[name] = stems_set
return _hpoterms_dict
# Load the HPO "name" to "id" dictionary
def load_hponames_to_ids_dictionary(filename):
_hpoterms_dict = dict()
with open(filename, 'rt') as _hpoterms_dict_file:
for line in _hpoterms_dict_file:
hpoterm_id, name, stems = line[:-1].split("\t")
_hpoterms_dict[name] = hpoterm_id
return _hpoterms_dict
# Load the medical acronyms dictionary
def load_medacrons_dictionary(filename):
medacrons_dict = dict()
with open(filename, 'rt') as medacrons_dict_file:
for line in medacrons_dict_file:
tokens = line.strip().split("\t")
# 1st token is acronym, 2nd is definition
name = tokens[0]
definition = tokens[1].casefold()
medacrons_dict[definition] = name
return medacrons_dict
# Load a dictionary which is a set.
def load_set(filename):
_set = set()
with open(filename, 'rt') as set_file:
for line in set_file:
line = line.rstrip()
_set.add(line)
return _set
# Load a dictionary which is a set, but convert the entries to lower case
def load_set_lower_case(filename):
case_set = load_set(filename)
lower_case_set = set()
for entry in case_set:
lower_case_set.add(entry.casefold())
return lower_case_set
# Load a dictionary which is a set of pairs, where the pairs are frozensets
def load_set_pairs(filename):
pair_set = set()
with open(filename, 'rt') as set_file:
for line in set_file:
tokens = line.rstrip().split("\t")
pair_set.add(frozenset(tokens[0:2]))
return pair_set
# Dictionaries
GENES_DICT_FILENAME = BASE_DIR + "/dicts/hugo_synonyms.tsv"
GENES_IN_HPOTERMS_DICT_FILENAME = BASE_DIR + "/dicts/genes_in_hpoterms.tsv"
ENGLISH_DICT_FILENAME = BASE_DIR + "/dicts/english_words.tsv"
GENEHPOTERM_DICT_FILENAME = BASE_DIR + \
"/dicts/genes_to_hpo_terms_with_synonyms.tsv"
HPOPARENTS_DICT_FILENAME = BASE_DIR + "/dicts/hpo_dag.tsv"
HPOTERMS_ORIG_DICT_FILENAME = BASE_DIR + "/dicts/hpo_terms.tsv"
# NON PRUNED HPOTERMS_DICT_FILENAME = BASE_DIR + "/dicts/hpoterm_mentions.tsv"
HPOTERMS_DICT_FILENAME = BASE_DIR + "/dicts/hpoterm_abnormalities_mentions.tsv"
HPOTERM_PHENOTYPE_ABNORMALITIES_DICT_FILENAME = BASE_DIR + \
"/dicts/hpoterm_phenotype_abnormalities.tsv"
HPOTERMS_IN_GENES_DICT_FILENAME = BASE_DIR + "/dicts/hpoterms_in_genes.tsv"
MED_ACRONS_DICT_FILENAME = BASE_DIR + "/dicts/med_acronyms_pruned.tsv"
MERGED_GENES_DICT_FILENAME = BASE_DIR + "/dicts/merged_genes_dict.tsv"
NIH_GRANTS_DICT_FILENAME = BASE_DIR + "/dicts/grant_codes_nih.tsv"
NSF_GRANTS_DICT_FILENAME = BASE_DIR + "/dicts/grant_codes_nsf.tsv"
STOPWORDS_DICT_FILENAME = BASE_DIR + "/dicts/english_stopwords.tsv"
POS_GENE_MENTIONS_DICT_FILENAME = BASE_DIR + \
"/dicts/positive_gene_mentions.tsv"
NEG_GENE_MENTIONS_DICT_FILENAME = BASE_DIR + \
"/dicts/negative_gene_mentions.tsv"
# Dictionary of dictionaries. First argument is the filename, second is the
# function to call to load the dictionary. The function must take the filename
# as input and return an object like a dictionary, or a set, or a list, ...
dictionaries = dict()
dictionaries["genes"] = [GENES_DICT_FILENAME, load_genes_dictionary]
dictionaries["genes_in_hpoterms"] = [GENES_IN_HPOTERMS_DICT_FILENAME,
load_genes_in_hpoterms_dictionary]
dictionaries["genes_with_hpoterm"] = [HPOTERMS_IN_GENES_DICT_FILENAME,
load_genes_with_hpoterm_dictionary]
dictionaries["english"] = [ENGLISH_DICT_FILENAME, load_set_lower_case]
dictionaries["genehpoterms"] = [GENEHPOTERM_DICT_FILENAME, load_set_pairs]
dictionaries["hpoparents"] = [HPOPARENTS_DICT_FILENAME,
load_hpoparents_dictionary]
dictionaries["hpoancestors"] = [HPOPARENTS_DICT_FILENAME,
load_hpoancestors_dictionary]
dictionaries["hpochildren"] = [HPOPARENTS_DICT_FILENAME,
load_hpochildren_dictionary]
dictionaries["hpolevels"] = [HPOTERMS_ORIG_DICT_FILENAME,
load_hpoterm_levels_dictionary]
dictionaries["hponames_to_ids"] = [HPOTERMS_DICT_FILENAME,
load_hponames_to_ids_dictionary]
dictionaries["hpoterms"] = [HPOTERMS_DICT_FILENAME, load_hpoterms_dictionary]
dictionaries["hpoterms_inverted"] = [HPOTERMS_DICT_FILENAME,
load_hpoterms_inverted_dictionary]
dictionaries["hpoterm_phenotype_abnormalities"] = [
HPOTERM_PHENOTYPE_ABNORMALITIES_DICT_FILENAME, load_set]
dictionaries["hpoterms_orig"] = [HPOTERMS_ORIG_DICT_FILENAME,
load_hpoterms_orig_dictionary]
dictionaries["hpoterms_in_genes"] = [HPOTERMS_IN_GENES_DICT_FILENAME,
load_hpoterms_in_genes_dictionary]
dictionaries["hpoterms_with_gene"] = [GENES_IN_HPOTERMS_DICT_FILENAME,
load_hpoterms_with_gene_dictionary]
dictionaries["nih_grants"] = [NIH_GRANTS_DICT_FILENAME, load_set]
dictionaries["nsf_grants"] = [NSF_GRANTS_DICT_FILENAME, load_set]
dictionaries["med_acrons"] = [MED_ACRONS_DICT_FILENAME,
load_medacrons_dictionary]
dictionaries["merged_genes"] = [MERGED_GENES_DICT_FILENAME,
load_merged_genes_dictionary]
dictionaries["long_names"] = [MERGED_GENES_DICT_FILENAME,
load_long_names_dictionary]
dictionaries["inverted_long_names"] = [MERGED_GENES_DICT_FILENAME,
load_inverted_long_names_dictionary]
dictionaries["stopwords"] = [STOPWORDS_DICT_FILENAME, load_set]
dictionaries["pos_gene_mentions"] = [POS_GENE_MENTIONS_DICT_FILENAME,
load_examples_dictionary]
dictionaries["neg_gene_mentions"] = [NEG_GENE_MENTIONS_DICT_FILENAME,
load_examples_dictionary]
# Load a dictionary using the appropriate filename and load function
def load_dict(dict_name):
filename = dictionaries[dict_name][0]
load = dictionaries[dict_name][1]
return load(filename)
# Given a list of words, return a list of variants built by splitting words
# that contain the separator.
# An example is more valuable:
# let words = ["the", "cat/dog", "is", "mine"], the function would return ["the
# cat is mine", "the dog is mine"]
# XXX (Matteo) Maybe goes in a different module
def get_variants(words, separator="/"):
if len(words) == 0:
return []
variants = []
base = []
i = 0
# Look for a word containing a "/"
while words[i].find(separator) == -1:
base.append(words[i])
i += 1
if i == len(words):
break
# If we found a word containing a "/", call recursively
if i < len(words):
variants_starting_words = words[i].split("/")
following_variants = get_variants(words[i+1:])
for variant_starting_word in variants_starting_words:
variant_base = base + [variant_starting_word]
if len(following_variants) > 0:
for following_variant in following_variants:
variants.append(" ".join(variant_base +
[following_variant]))
else:
variants.append(" ".join(variant_base))
else:
variants = [" ".join(base)]
return variants
|
dd-genomics-master
|
archived/v0/code/helper/dictionaries.py
|
#!/usr/bin/env python
# A script for seeing basic statistics about the number and type of gene mentions extracted
# Author: Alex Ratner <ajratner@stanford.edu>
# Created: 2015-01-25
import sys
import csv
import re
# Kinds of statistics tracked automatically by postgres "ANALYZE" command
# https://github.com/postgres/postgres/blob/master/src/include/catalog/pg_statistic.h
STAT_KINDS = {
1: 'most_common_values',
2: 'histogram',
3: 'correlation_to_row_order',
4: 'most_common_elements',
5: 'distinct_elements_count_histogram',
6: 'range_length_histogram',
7: 'bounds_histogram'}
def unnest(row):
return re.split(r'\"*,\"*', re.sub(r'^\"*\{\"*|\"*\}\"*$', '', row))
def process_pg_statistics(path_to_data, root_path_out):
with open(path_to_data, 'rb') as csvf:
csv_reader = csv.reader(csvf)
NUM_STATS = 0
STA_START = 0
for i,row in enumerate(csv_reader):
# figure out how many different statistics are contained in the table
if i == 0:
for j,cell in enumerate(row):
cell_match = re.match(r'stakind(\d+)', cell)
if cell_match is not None:
NUM_STATS = int(cell_match.group(1))
if NUM_STATS == 1:
STA_START = j
continue
# get the column this row is referring to
col = int(row[1])
# for each row, unpack each seperate statistic and output to file
for s in range(NUM_STATS):
# for each of the rows, identify which type of statistic
stat_kind = int(row[STA_START + s])
if STAT_KINDS.has_key(stat_kind):
stat_label = STAT_KINDS[stat_kind]
elif stat_kind == 0:
continue
else:
stat_label = "Other_%s" % (stat_kind,)
# unpack and zip value / stat lists
vals = unnest(row[STA_START+(3*NUM_STATS)+s])
stats = unnest(row[STA_START+(2*NUM_STATS)+s])
data = zip(vals, stats)
if len(data) < 2:
continue
# output as csv file
with open("%s_%s_%s.csv" % (root_path_out, col, stat_label), 'wb') as data_out:
csv_writer = csv.writer(data_out)
for d in data:
csv_writer.writerow(d)
|
dd-genomics-master
|
archived/analysis/util/dd_analysis_utils.py
|
#!/usr/bin/env python
# Author: Alex Ratner <ajratner@stanford.edu>
# Created: 2015-01-25
import sys
import os
import csv
from collections import defaultdict
# there is 1 group_by col + 1 total_count + 1 labeled_true + 1 labeled_false + 10 bucket_n
N_COLS = 14
if __name__ == '__main__':
if len(sys.argv) < 4:
print "Process.py: Insufficient arguments"
else:
# get the correct dict
if sys.argv[3] == 'gene_mentions':
dict_name = 'merged_genes_dict'
elif sys.argv[3] == 'pheno_mentions' or sys.argv[3] == 'hpoterm_mentions':
dict_name = 'hpo_terms'
else:
dict_name = None
if dict_name is not None:
# load the data & mark seen entities
data = []
seen = defaultdict(lambda: None)
with open(sys.argv[1], 'rb') as f_in:
csv_reader = csv.reader(f_in)
for row in csv_reader:
data.append(row)
seen[row[0]] = True
# load the dict & append zero values to data for unseen entities
# NOTE: assume entity names in col 0
DICT_PATH = "%s/dicts/%s.tsv" % (os.environ['GDD_HOME'], dict_name)
with open(DICT_PATH, 'rb') as f_in:
tsv_reader = csv.reader(f_in, delimiter='\t')
for row in tsv_reader:
if seen[row[0]] is None:
data.append([row[0]] + [0]*(N_COLS-1))
# output the appended data
with open("%s_appended.csv" % (sys.argv[2],), 'wb') as f_out:
csv_writer = csv.writer(f_out)
for row in data:
csv_writer.writerow(row)
|
dd-genomics-master
|
archived/analysis/analyses/mentions-by-entity/process.py
|
#!/usr/bin/env python
# A script for seeing basic statistics about the number and type of gene mentions extracted
# Author: Alex Ratner <ajratner@stanford.edu>
# Created: 2015-01-25
import sys
from dd_analysis_utils import process_pg_statistics
if __name__ == '__main__':
if len(sys.argv) < 3:
print "Process.py: Insufficient arguments"
else:
process_pg_statistics(sys.argv[1], sys.argv[2])
|
dd-genomics-master
|
archived/analysis/analyses/postgres-stats/process.py
|
#!/usr/bin/env python
# Author: Alex Ratner <ajratner@stanford.edu>
# Created: 2015-01-25
import sys
import os
import csv
from collections import defaultdict
# there is 1 group_by col + 1 total_count + 1 labeled_true + 1 labeled_false + 10 bucket_n
N_COLS = 14
if __name__ == '__main__':
if len(sys.argv) < 4:
print "Process.py: Insufficient arguments"
else:
# get the correct dict
if sys.argv[3] == 'gene_mentions':
dict_name = 'merged_genes_dict'
elif sys.argv[3] == 'pheno_mentions' or sys.argv[3] == 'hpoterm_mentions':
dict_name = 'hpo_terms'
else:
dict_name = None
if dict_name is not None:
# load the data & mark seen entities
data = []
seen = defaultdict(lambda: None)
with open(sys.argv[1], 'rb') as f_in:
csv_reader = csv.reader(f_in)
for row in csv_reader:
data.append(row)
seen[row[0]] = True
# load the dict & append zero values to data for unseen entities
# NOTE: assume entity names in col 0
DICT_PATH = "%s/dicts/%s.tsv" % (os.environ['GDD_HOME'], dict_name)
with open(DICT_PATH, 'rb') as f_in:
tsv_reader = csv.reader(f_in, delimiter='\t')
for row in tsv_reader:
if seen[row[0]] is None:
data.append([row[0]] + [0]*(N_COLS-1))
# output the appended data
with open("%s_appended.csv" % (sys.argv[2],), 'wb') as f_out:
csv_writer = csv.writer(f_out)
for row in data:
csv_writer.writerow(row)
|
dd-genomics-master
|
archived/analysis/analyses/docs-by-entity/process.py
|
#!/bin/python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
#
# This is the, uh, setup for librarian
#
setup(name="librarian",
version="0.0.dev1",
description="The Librarian maintains important datasets",
author="Jaeho, Abhinav, Mike",
author_email="michael.cafarella@gmail.com",
packages=["librarian"],
install_requires=["boto", "MySQL-python"],
#packages=["boto", "distutils"],
entry_points={
'console_scripts': ['librarian=librarian.librarian:main']
},
)
|
librarian-master
|
setup.py
|
import os
def list_files(directory):
if os.path.isfile(directory):
yield directory
raise StopIteration
for f in os.listdir(directory):
name = directory + '/' + f
if os.path.isfile(name):
yield name
elif os.path.isdir(name):
for f in list_files(name):
yield f
if __name__ == '__main__':
for f in list_files(os.getcwd()):
print f[len(os.getcwd()):]
|
librarian-master
|
librarian/listfiles.py
|
######################################################################################
# #
# A temprorary implementation which uses google sheets as the database for librarian #
# #
# #
# Written by - Abhinav Rastogi, 02/16/2015 #
# #
######################################################################################
import os
from urllib2 import urlopen
from subprocess import call
import json
import re
import string
read_key = os.getenv('SHEETSDB_KEY_READ')
LIBRARIAN_SCHEMA = {
'Engagements':
['id', 'name', 'date_originated', 'owner', 'comments'],
'IncomingData':
['engagement_id', 'name, version', 'part', 'incoming_id', 'date_ingested', 'checksum', 's3path', 'comments'],
'OutgoingData':
['engagement_id', 'name', 'version', 'outgoing_id', 'date_produced', 'checksum', 's3path', 'published_report_s3path', 'comments'],
'ActivityLog':
['action_id', 'action_type', 'date', 'description', 'engagement_id', 'incoming_id', 'outoging_id']
}
def colname(i):
if i < 26:
return chr(ord('A') + i)
else:
colname(i / 26) + colname(i % 26)
def col_names(num):
for c1 in [''] + list(string.ascii_uppercase):
for c2 in list(string.ascii_uppercase):
if num > 0:
yield c1 + c2
num -= 1
else:
raise StopIteration
# Insert into sheet
def insert(table, **kwargs):
row = dict(kwargs)
if table not in LIBRARIAN_SCHEMA:
raise Exception("Table not found.")
if not set(row.keys()) <= set(LIBRARIAN_SCHEMA[table]):
raise Exception("Schema doesn't match. Insert aborted.")
cmd = ['./sheetsdb', 'insert', table]
for k, v in row.iteritems():
cmd.append(k + '=' + str(v))
call(cmd)
# Query from sheet based on an attribute
def query(table, **kwargs):
row = dict(kwargs)
schema = LIBRARIAN_SCHEMA[table]
col_dict = dict(zip(schema, col_names(len(schema))))
params = []
for k, v in row.iteritems():
params.append(col_dict[k] + '%3D' + repr(v))
query = 'select%20*'
if len(params) > 0:
query += '%20where%20' + '%20and%20'.join(params)
url = 'https://docs.google.com/spreadsheets/d/' + read_key + \
'/gviz/tq?tq=' + query + '&sheet=' + table
resp = urlopen(url)
res = resp.read()[39:-2]
# Parse the javascript dates
hasDate = re.search(r'new Date\([0-9,]*\)', res)
while hasDate:
a,b = hasDate.span()
y, m, d = map(int, res[a+9:b-1].split(','))
res = res[:a]+'"'+str(m+1)+'-'+str(d)+'-'+str(y)+'"'+res[b:]
hasDate = re.search(r'new Date\([0-9,]*\)', res)
# load the json object
res = json.loads(res)
cols = map(lambda x:x[u'label'], res[u'table'][u'cols'])
rows = [map(lambda x:x[u'v'] if x else None, r[u'c']) for r in res[u'table'][u'rows']]
return cols, rows
# id, name, date_originated, owner, comments
def add_Engagement(id, name, date, owner, comments):
args_dict = {'id':id, 'name':name, 'date_originated':date,
'owner':owner, 'comments':comments}
insert('Engagements', **args_dict)
# engagement_id, name, version, part, incoming_id, date_ingested, checksum, s3path, comments
def add_Incoming(id, name, ver, part, in_id, date, checksum, s3, comments):
args_dict = {'engagement_id':id, 'name':name, 'version':ver,
'part':part, 'incoming_id':in_id, 'date_ingested':date,
'checksum':checksum, 's3path':s3, 'comments':comments}
insert('IncomingData', **args_dict)
# engagement_id, name, version, outgoing_id, date_produced, checksum, s3path, published_report_s3path, comments
def add_Outgoing(id, name, ver, out_id, date, checksum, s3, report, comments):
args_dict = {'engagement_id':id, 'name':name, 'version':ver, 'outgoing_id':out_id,
'date_produced':date, 'checksum':checksum, 's3path':s3,
'published_report_s3path':report, 'comments':comments}
insert('OutgoingData', **args_dict)
# action_id, action_type, date, description, engagement_id, incoming_id, outoging_id
def add_ActivityLog(act_id, act_type, date, desc, eng_id, inc_id, out_id):
args_dict = {'action_id':act_id, 'action_type':act_type, 'date':date,
'description':desc, 'engagement_id':eng_id, 'incoming_id':inc_id,
'outoging_id':out_id}
insert('ActivityLog', **args_dict)
if __name__=='__main__':
# add_Engagement(0, 'memex', 'Sept-1-2014', 'mjc', 'This will be big!')
# add_Incoming(0, 'small sample of backpage', 0, 0, 0, 'Sept-5-2014', 'MD5...', 's3:dd-incoming/memex/small_sample_of_backpage/v_0000/data.tsv', 'more data expected later')
# add_Outgoing(0, 'Memex ht basic attrs', 0, 0, 'Nov-30-2014', 'MD5:...', 's3:dd-outgoing/memex/memex_ht_basic_attrs/v_0000/data_0000.tsv', 's3:dd-outgoing/memex/memex_ht_basic_attrs/v_0000/report.pdf', 'First shipment! This reflects input backpage data from the large-backpage-crawl up through Nov-1-2014')
# add_ActivityLog(0, 'CREATE', 'Jan-20-2015', 'Add new engagement', 3, 'NULL', 'NULL')
print query('ActivityLog', action_id=1)
|
librarian-master
|
librarian/database.py
|
#!/usr/bin/env python
"""Librarian Client Version 0.01
Librarian takes care of all files that leave/enter engagements. When
a partner provides a new datafile (as with Memex ads), they get added
to Librarian. When we ship extracted data elsewhere, they get added
to Librarian.
It can also be used to track standard utility files, like Wikipedia or
Freebase dumps.
It should NOT be used to hold temporary or working files.
"""
import argparse, boto, json, os.path, dbconn
##
# GLOBALS
##
configFilename = os.path.abspath(os.path.expanduser("~/.librarian"))
configDict = {"credentials":{}}
###############################################
class ConfigError(Exception):
"""ConfigError is a basic Exception wrapper class for this application"""
def __init__(self, msg):
self.msg = msg
##################################################
def loadConfig():
"""Grab config info from the ondisk file."""
if not os.path.exists(configFilename):
raise ConfigError("Librarian config file does not exist. Invoke librarian --init to create")
configFile = open(configFilename)
try:
global configDict
configDict = json.loads(configFile.read())
finally:
configFile.close()
def saveConfig():
"""Save config info to disk file."""
configFile = open(configFilename, "w")
try:
configFile.write(json.dumps(configDict))
finally:
configFile.close()
def configInit():
"""Create the local .librarian config file, if none previously existed"""
if os.path.exists(configFilename):
raise ConfigError("Cannot init. Librarian config file already exists at", configFilename)
saveConfig()
def addCredentials(credentialName, **credential):
"""Add a new credential to the config file for later use"""
loadConfig()
global configDict
configDict["credentials"][credentialName] = credential
saveConfig()
def checkS3():
"""Ensure we have valid S3 access"""
loadConfig()
def checkMetadata():
"""Ensure we have valid access to the Librarian metadata."""
loadConfig()
global dbconn
c = configDict["credentials"]["mysql"]
dbconn = dbconn.DBConn(c["user"], c["password"], c["host"], c["port"])
def put(fname, project, comment):
"""Check a file into Librarian"""
checkMetadata()
print "Put a file called", fname, "into project", project, "with comment", comment
def get(fname, project):
"""Get a file from Librarian"""
checkMetadata()
print "Get a file called", fname, "from project", project
def projectLs():
"""List all Librarian projects"""
checkMetadata()
print "List of all known projects:"
for name in dbconn.projectLs():
print name
def ls(projectname):
"""List all Librarian files for a single project"""
checkMetadata()
print "List of all datasets in a project named", projectname
for name, version, urls in dbconn.ls(projectname):
print name, version, urls
#
# main()
#
def main():
usage = "usage: %prog [options]"
# Setup cmdline parsing
parser = argparse.ArgumentParser(description="Librarian stores data")
parser.add_argument("--put", nargs=3, metavar=("filename", "project", "comment"), help="Puts a <filename> into a <project> with a <comment>")
parser.add_argument("--get", nargs=2, metavar=("filename", "project"), help="Gets a <filename> from a <project>")
parser.add_argument("--config", nargs=1, metavar=("configfile"), help="Location of the Librarian config file")
parser.add_argument("--lscreds", action="store_true", help="List all known credentials")
parser.add_argument("--set_aws_cred", nargs=2, metavar=("aws_access_key_id", "aws_secret_access_key"), help="Stores an AWS credential pair in the Librarian config file")
parser.add_argument("--set_mysql_cred", nargs=4, metavar=("mysql_host", "mysql_port", "mysql_user", "mysql_password"), help="Stores a MySQL connection info quadruple in the Librarian config file")
parser.add_argument("--ls", nargs=1, metavar=("project"), help="List all the files in a <project>")
parser.add_argument("--pls", action="store_true", default=False, help="List all projects")
parser.add_argument("--init", action="store_true", default=False, help="Create the initial config file")
parser.add_argument("--version", action="version", version="%(prog)s 0.1")
# Invoke either get() or put()
args = parser.parse_args()
try:
if args.config is not None:
configFilename = os.path.abspath(args.config)
if args.init:
configInit()
elif args.set_aws_cred is not None and len(args.set_aws_cred) == 2:
addCredentials("aws", access_key_id=args.set_aws_cred[0]
, secret_access_key=args.set_aws_cred[1]
)
elif args.set_mysql_cred is not None and len(args.set_mysql_cred) == 4:
addCredentials("mysql", host=args.set_mysql_cred[0]
, port=args.set_mysql_cred[1]
, user=args.set_mysql_cred[2]
, password=args.set_mysql_cred[3]
)
elif args.pls:
projectLs()
elif args.ls is not None:
ls(args.ls[0])
elif args.put is not None and len(args.put) > 0:
put(args.put[0], args.put[1], args.put[2])
elif args.get is not None and len(args.get) > 0:
get(args.get[0], args.get[1])
elif args.lscreds:
loadConfig()
print "There are", len(configDict["credentials"]), "credential(s) available"
for name, cred in configDict["credentials"].iteritems():
# TODO print prettier
print " ", name, cred
else:
parser.print_help()
except ConfigError as e:
print e.msg
if __name__ == "__main__":
main()
|
librarian-master
|
librarian/librarian.py
|
librarian-master
|
librarian/__init__.py
|
|
#!/bin/python
"""Database connectivity for Librarian.
This module contains all the classes and miscellany necessary for
Librarian to connect to a shared backend RDBMS for metadata. It
is not designed to hold raw content, just the file names, version
history, checksums, etc.
Schema:
'Engagements': ['id', 'name', 'date_started', 'owner', 'comments'],
'IncomingData': ['id', 'project', 'name', 'version', 'timestamp',
'urls', 'checksums', 'metadata_url', 'comments', 'user', 'hostname']
'OutgoingData': ['id', 'project', 'name', 'version', 'timestamp',
'urls', 'checksums', 'metadata_url', 'comments', 'user', 'hostname']
"""
import MySQLdb, datetime
class DBConn:
"""Represents a live database conn with Librarian-specific operators."""
def __init__(self, username, password, host, port = 3306):
self.user = username
self.pswd = password
self.host = host
self.port = int(port)
try:
self.db = MySQLdb.connect(host=self.host, port=self.port, user=self.user,
passwd=self.pswd, db='librarian')
except:
raise Exception('Invalid credentials for librarian database')
def projectLs(self):
''' returns a generator listing all librarian projects '''
c = self.db.cursor()
try:
for _ in xrange(c.execute( '''select name from Engagements''')):
yield c.fetchone()[0]
except:
raise Exception('Database not available')
c.close()
def ls(self, project):
''' returns a generator listing all datasets and their versions in
a librarian project. The generator yield a (name, version) tuple.
'''
c = self.db.cursor()
def datasetQueryFor(table):
return '''
select ds.name, ds.version, ds.urls
from %s ds
join Engagements on ds.project = Engagements.id
where Engagements.name = %%s
''' % (table)
try:
for _ in xrange(c.execute(datasetQueryFor('IncomingData') +
' union ' + datasetQueryFor('OutgoingData'),
(project, project))):
yield c.fetchone()
except Exception as e:
raise Exception('Database not available', e)
c.close()
def createProject(self, project, comments=''):
''' Creates a new project in the database. This function should be called
after appropriate space has been allocated on the S3 bucket
'''
if project in self.ls():
raise Exception('Project already exists!')
c = self.db.cursor()
date = datetime.date.today()
owner = self.user
c.execute('''insert into Engagements values (%s, %s, %s, %s)''',
(project, date, owner, comments))
c.close()
|
librarian-master
|
librarian/dbconn.py
|
#!/usr/bin/env python
# upload-s3.py -- Librarian script that takes care of uploading data to AWS S3
import boto
import boto.s3.connection
import os
import datetime
def list_files(directory):
''' Generator to recursively list all the files in a directory. '''
if os.path.isfile(directory):
yield directory
raise StopIteration
for f in os.listdir(directory):
name = directory + '/' + f
if os.path.isfile(name):
yield name
elif os.path.isdir(name):
for f in list_files(name):
yield f
def upload(local_paths, project, dataset, timestamp):
''' Upload a file/directory to S3 path maintaining the directory
structure
'''
# get the s3 access keys
access_key = os.getenv('AWS_ACCESS_KEY_ID')
secret_key = os.getenv('AWS_SECRET_ACCESS_KEY')
conn = boto.s3.connection.S3Connection(
aws_access_key_id = access_key,
aws_secret_access_key = secret_key,
calling_format = boto.s3.connection.OrdinaryCallingFormat(),
)
# get the storage bucket
bucket = conn.get_bucket('librarian_upload_test')
# make a separate directory for the installation
store_dir = '/'.join([project, dataset, timestamp])
urls = []
checksums = []
# Upload all the files and directories pointed to by local_path
for local_path in local_paths:
for f in list_files(local_path):
key = bucket.new_key(store_dir + f[len(local_path):])
key.set_contents_from_filename(f)
key.set_acl('public-read')
url = key.generate_url(expires_in=0, query_auth=False)
urls.append(url)
checksums.append('') # TODO
# return the urls and checksums for uploaded objects
return urls, checksums
if __name__=='__main__':
print upload('/home/abhinav/Dropbox/github/librarian', 'folder_test')
|
librarian-master
|
librarian/storage_s3.py
|
librarian-master
|
tests/__init__.py
|
|
from nose.tools import *
import librarian
def setup():
print "SETUP!"
def teardown():
print "TEAR DOWN!"
def test_basic():
print "I RAN!"
|
librarian-master
|
tests/librarian_tests.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import cv2
import sys
import numpy as np
from utils import (
initialize_render, merge_meshes,
load_motion
)
import torch
from PIL import Image
from model import JOHMRLite
import os
import glob
import json
from pathlib import Path
import argparse
import re
import matplotlib.pyplot as plt
global model, index, alpha
index = 0
alpha = 0.5
def isfloat(x):
try:
a = float(x)
except (TypeError, ValueError):
return False
else:
return True
def isint(x):
try:
a = float(x)
b = int(a)
except (TypeError, ValueError):
return False
else:
return a == b
def find_files(folder, extension):
return sorted([Path(os.path.join(folder, f)) for f in os.listdir(folder) if f.endswith(extension)])
def read_data():
"""
Load all annotated data for visualization
"""
# load gt part motion values (degree or cm)
gt_partmotion = []
fp = open(os.path.join(args.data_folder, 'jointstate.txt'))
for i, line in enumerate(fp):
line = line.strip('\n')
if isfloat(line) or isint(line):
gt_partmotion.append(float(line))
gt_partmotion = np.asarray(gt_partmotion)
with open(os.path.join(args.data_folder, '3d_info.txt')) as myfile:
gt_data = [next(myfile).strip('\n') for x in range(14)]
# GT global object rotation
gt_pitch = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[3])[0])
gt_yaw = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[4])[0])
gt_roll = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[5])[0])
# GT global object translation (cm)
gt_x = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[6])[0])
gt_y = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[7])[0])
gt_z = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[8])[0])
# GT object dimension (cm)
gt_xdim = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[9])[0])
gt_ydim = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[9])[1])
gt_zdim = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[9])[2])
gt_cad = re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[10])[0]
gt_part = int(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[11])[0])
gt_focalX = int(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[-2])[0])
gt_focalY = int(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[-1])[0])
assert gt_focalX == gt_focalY
data = {'part_motion': gt_partmotion,
'pitch': gt_pitch,
'yaw': gt_yaw,
'roll': gt_roll,
'x_offset': gt_x,
'y_offset': gt_y,
'z_offset': gt_z,
'obj_size': [gt_xdim, gt_ydim, gt_zdim],
'cad': gt_cad,
'part': gt_part,
'focal': gt_focalX}
return data
def create_model(gt_data):
"""
create initial models
"""
global model, index, alpha
x_offset = gt_data['x_offset']
y_offset = gt_data['y_offset']
z_offset = gt_data['z_offset']
yaw = gt_data['yaw']
pitch = gt_data['pitch']
roll = gt_data['roll']
part_motion = gt_data['part_motion']
obj_size = gt_data['obj_size'] # length, height, width (x, y, z), cm
focal_x = gt_data['focal']
focal_y = gt_data['focal']
device = torch.device("cuda:0")
obj_path = os.path.join(args.cad_folder, gt_data['cad'])
verts, faces, vertexSegs, faceSegs = merge_meshes(obj_path, device)
verts[:,1:] *= -1 # pytorch3d -> world coordinate
obj_verts = verts.to(device)
obj_faces = faces.to(device)
# load motion json file
with open(os.path.join(args.cad_folder, gt_data['cad'], 'motion.json')) as json_file:
motions = json.load(json_file)
assert len(motions) + 2 == len(vertexSegs)
rot_o, rot_axis, rot_type, limit_a, limit_b, contact_list = load_motion(motions, device)
frames = find_files(os.path.join(args.data_folder, 'frames'), '.jpg')
image_bg = np.array(Image.open(frames[index]))/255.0
img_h = image_bg.shape[0]
img_w = image_bg.shape[1]
img_square = max(img_h, img_w)
img_small = 256
# render
_, phong_renderer = initialize_render(device, focal_x, focal_y, img_square, img_small)
# Model >_<
model = JOHMRLite(x_offset, y_offset, z_offset, yaw, pitch, roll, part_motion, obj_size, \
obj_verts, obj_faces, phong_renderer, gt_data['part'], rot_o, rot_axis, \
vertexSegs, rot_type)
return len(frames)
def display_img():
global model, index, alpha
frames = find_files(os.path.join(args.data_folder, 'frames'), '.jpg')
image_bg = np.array(Image.open(frames[index]))/255.0
img_h = image_bg.shape[0]
img_w = image_bg.shape[1]
img_square = max(img_h, img_w)
img_small = 256
with torch.no_grad():
image = model(index)
rgb_mask = image_bg.astype(np.float32) #cv2.addWeighted(objmask.astype(np.float32), 0.5, image_bg.astype(np.float32), 0.5, 0.0)
frame_img = np.zeros((img_square, img_square,3))
start = int((max(img_h, img_w) - min(img_h, img_w))/2) - 1
end = start + min(img_h, img_w)
if img_h > img_w:
frame_img[:, start:end, :] = rgb_mask
else:
frame_img[start:end, :, :] = rgb_mask
rgb_mask = frame_img
alpha = min(1.0, max(0.0,alpha))
img_blend = cv2.addWeighted(image.astype(np.float32), alpha, rgb_mask.astype(np.float32), 1-alpha, 0.0)
img_blend = cv2.resize(img_blend, dsize=(800, 800), interpolation=cv2.INTER_NEAREST)
return img_blend
parser = argparse.ArgumentParser()
parser.add_argument("--data_folder", type=str, help="annotation data folder")
parser.add_argument("--cad_folder", type=str, help="cad data folder")
args = parser.parse_args()
gt_data = read_data()
num_frames = create_model(gt_data)
for index in range(num_frames):
img_blend = display_img()
plt.imshow(img_blend)
plt.show()
|
d3d-hoi-main
|
visualization/visualize_data.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch.nn as nn
import torch
import numpy as np
from pytorch3d.renderer import (
look_at_view_transform, TexturesVertex
)
from pytorch3d.structures import Meshes
from utils import rotation_matrix
from pytorch3d.io import save_obj
from pytorch3d.transforms import (
RotateAxisAngle, matrix_to_euler_angles
)
from pytorch3d.transforms.rotation_conversions import (
rotation_6d_to_matrix, matrix_to_rotation_6d
)
import os
from pytorch3d.transforms import (
euler_angles_to_matrix
)
from utils import (
rotation_matrix
)
class JOHMRLite(nn.Module):
def __init__(self, x_offset, y_offset, z_offset, yaw, pitch, roll, part_motion, obj_size, \
obj_verts, obj_faces, vis_render, part_idx, rot_o, axis, vertexSegs, rot_type):
super().__init__()
self.device = obj_verts.device
self.vis_render = vis_render
self.obj_verts = obj_verts.detach()
self.obj_faces = obj_faces.detach()
self.rot_type = rot_type
self.x_offset = x_offset
self.y_offset = y_offset
self.z_offset = z_offset
self.part_motion = part_motion
# camera is almost at the center (distance can't be zero for diff render)
self.R, self.T = look_at_view_transform(0.1, 0.0, 0.0,device=self.device)
self.T[0,2] = 0.0 # manually set to zero
x_diff = torch.max(obj_verts[:,0]) - torch.min(obj_verts[:,0])
self.x_ratio = float(obj_size[0]) / x_diff
y_diff = torch.max(obj_verts[:,1]) - torch.min(obj_verts[:,1])
self.y_ratio = float(obj_size[1]) / y_diff
z_diff = torch.max(obj_verts[:,2]) - torch.min(obj_verts[:,2])
self.z_ratio = float(obj_size[2]) / z_diff
# predefined object CAD part and axis
self.vertexStart = vertexSegs[part_idx]
self.vertexEnd = vertexSegs[part_idx+1]
self.rot_o = rot_o[part_idx]
self.axis = axis[part_idx]
# pytorch3d -> world coordinate
self.rot_o[1:] *= -1
self.axis[1:] *= -1
# rescale object
self.obj_verts[:, 0] *= self.x_ratio
self.obj_verts[:, 1] *= self.y_ratio
self.obj_verts[:, 2] *= self.z_ratio
self.rot_o[0] *= self.x_ratio
self.rot_o[1] *= self.y_ratio
self.rot_o[2] *= self.z_ratio
euler_angle = torch.tensor([pitch, yaw, roll]).reshape(1,3)
self.objR = euler_angles_to_matrix(euler_angle, ["X","Y","Z"]).to(self.device)[0]
return
def forward(self, index):
partmotion = self.part_motion[index]
obj_verts = self.obj_verts.clone()
# part motion
if self.rot_type[0] == 'prismatic':
part_state = torch.tensor(partmotion).to(self.device)
obj_verts_t1 = obj_verts[self.vertexStart:self.vertexEnd, :] - self.rot_o
obj_verts_t2 = obj_verts_t1 + self.axis * part_state #/float(annotation['obj_dim'][2]) * z_ratio
obj_verts[self.vertexStart:self.vertexEnd, :] = obj_verts_t2 + self.rot_o
else:
part_state = torch.tensor(partmotion*0.0174533)
part_rot_mat = rotation_matrix(self.axis, part_state)
obj_verts_t1 = obj_verts[self.vertexStart:self.vertexEnd, :] - self.rot_o
obj_verts_t2 = torch.mm(part_rot_mat.to(self.device), obj_verts_t1.permute(1,0)).permute(1,0)
obj_verts[self.vertexStart:self.vertexEnd, :] = obj_verts_t2 + self.rot_o
# step 3: object orientation
obj_verts = torch.mm(self.objR, obj_verts.permute(1,0)).permute(1,0)
# step 4: object offset
obj_verts[:, 0] += 100.0*self.x_offset
obj_verts[:, 1] += 100.0*self.y_offset
obj_verts[:, 2] += 100.0*self.z_offset
obj_verts[:,1:] *= -1
# create object mesh for diff render and visualization
tex = torch.ones_like(obj_verts).unsqueeze(0)
tex[:, :, 0] = 0
tex[:, :, 1] = 1
tex[:, :, 2] = 0
textures = TexturesVertex(verts_features=tex).to(self.device)
self.obj_mesh = Meshes(verts=[obj_verts],faces=[self.obj_faces],textures=textures)
vis_image = self.vis_render(meshes_world=self.obj_mesh, R=self.R, T=self.T)
silhouette = vis_image[0,:,:,:3]
return silhouette.detach().cpu().numpy()
|
d3d-hoi-main
|
visualization/model.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import torch
import natsort
import glob
import open3d as o3d
# rendering components
from pytorch3d.renderer import (
FoVPerspectiveCameras,RasterizationSettings,
MeshRenderer, MeshRasterizer, BlendParams,
SoftSilhouetteShader, HardPhongShader, PointLights, DirectionalLights,
PerspectiveCameras
)
from pytorch3d.io import save_obj
import math
import cv2
import matplotlib.pyplot as plt
import os
import imageio
from decimal import Decimal
import pdb
import json
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from mpl_toolkits.mplot3d import axes3d
from scipy.ndimage.filters import gaussian_filter1d
from numpy.linalg import svd
from multiprocessing import Pool, Manager, cpu_count
from pytorch3d.transforms import Rotate, Translate
from matplotlib.image import imsave
from pathlib import Path
def planeFit(points):
"""
p, n = planeFit(points)
Given an array, points, of shape (d,...)
representing points in d-dimensional space,
fit an d-dimensional plane to the points.
Return a point, p, on the plane (the point-cloud centroid),
and the normal, n.
"""
points = np.reshape(points, (np.shape(points)[0], -1)) # Collapse trialing dimensions
assert points.shape[0] <= points.shape[1], "There are only {} points in {} dimensions.".format(points.shape[1], points.shape[0])
ctr = points.mean(axis=1)
x = points - ctr[:,np.newaxis]
M = np.dot(x, x.T) # Could also use np.cov(x) here.
return ctr, svd(M)[0][:,-1]
def initialize_render(device, focal_x, focal_y, img_square_size, img_small_size):
""" initialize camera, rasterizer, and shader. """
# Initialize an OpenGL perspective camera.
#cameras = FoVPerspectiveCameras(znear=1.0, zfar=9000.0, fov=20, device=device)
#cameras = FoVPerspectiveCameras(device=device)
#cam_proj_mat = cameras.get_projection_transform()
img_square_center = int(img_square_size/2)
shrink_ratio = int(img_square_size/img_small_size)
focal_x_small = int(focal_x/shrink_ratio)
focal_y_small = int(focal_y/shrink_ratio)
img_small_center = int(img_small_size/2)
camera_sfm = PerspectiveCameras(
focal_length=((focal_x, focal_y),),
principal_point=((img_square_center, img_square_center),),
image_size = ((img_square_size, img_square_size),),
device=device)
camera_sfm_small = PerspectiveCameras(
focal_length=((focal_x_small, focal_y_small),),
principal_point=((img_small_center, img_small_center),),
image_size = ((img_small_size, img_small_size),),
device=device)
# To blend the 100 faces we set a few parameters which control the opacity and the sharpness of
# edges. Refer to blending.py for more details.
blend_params = BlendParams(sigma=1e-4, gamma=1e-4)
# Define the settings for rasterization and shading. Here we set the output image to be of size
# 256x256. To form the blended image we use 100 faces for each pixel. We also set bin_size and max_faces_per_bin to None which ensure that
# the faster coarse-to-fine rasterization method is used. Refer to rasterize_meshes.py for
# explanations of these parameters. Refer to docs/notes/renderer.md for an explanation of
# the difference between naive and coarse-to-fine rasterization.
raster_settings = RasterizationSettings(
image_size=img_small_size,
blur_radius=np.log(1. / 1e-4 - 1.) * blend_params.sigma,
faces_per_pixel=100,
)
# Create a silhouette mesh renderer by composing a rasterizer and a shader.
silhouette_renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=camera_sfm_small,
raster_settings=raster_settings
),
shader=SoftSilhouetteShader(blend_params=blend_params)
)
# We will also create a phong renderer. This is simpler and only needs to render one face per pixel.
raster_settings = RasterizationSettings(
image_size=img_square_size,
blur_radius=0.0,
faces_per_pixel=1,
)
# We can add a point light in front of the object.
lights = PointLights(device=device, location=((2.0, 2.0, -2.0),))
#lights = DirectionalLights(device=device, direction=((0, 0, 1),))
phong_renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=camera_sfm,
raster_settings=raster_settings
),
shader=HardPhongShader(device=device, cameras=camera_sfm, lights=lights)
)
return silhouette_renderer, phong_renderer
def merge_meshes(obj_path, device):
""" helper function for loading and merging meshes. """
verts_list = torch.empty(0,3)
faces_list = torch.empty(0,3).long()
num_vtx = [0]
num_faces = [0]
# merge meshes, load in ascending order
meshes = natsort.natsorted(glob.glob(obj_path+'/final/*_rescaled_sapien.obj'))
for part_mesh in meshes:
#print('loading %s' %part_mesh)
mesh = o3d.io.read_triangle_mesh(part_mesh)
verts = torch.from_numpy(np.asarray(mesh.vertices)).float()
faces = torch.from_numpy(np.asarray(mesh.triangles)).long()
faces = faces + verts_list.shape[0]
verts_list = torch.cat([verts_list, verts])
faces_list = torch.cat([faces_list, faces])
num_vtx.append(verts_list.shape[0])
num_faces.append(faces_list.shape[0])
verts_list = verts_list.to(device)
faces_list = faces_list.to(device)
return verts_list, faces_list, num_vtx, num_faces
def load_motion(motions, device):
""" load rotation axis, origin, and limit. """
rot_origin = []
rot_axis = []
rot_type = []
limit_a = []
limit_b = []
contact_list = []
# load all meta data
for idx, key in enumerate(motions.keys()):
jointData = motions[key]
# if contains movable parts
if jointData is not None:
origin = torch.FloatTensor(jointData['axis']['origin']).to(device)
axis = torch.FloatTensor(jointData['axis']['direction']).to(device)
mobility_type = jointData['type']
if 'contact' in jointData:
contact_list.append(jointData['contact'])
# convert to radians if necessary
if mobility_type == 'revolute':
mobility_a = math.pi*jointData['limit']['a'] / 180.0
mobility_b = math.pi*jointData['limit']['b'] / 180.0
else:
assert mobility_type == 'prismatic'
mobility_a = jointData['limit']['a']
mobility_b = jointData['limit']['b']
rot_origin.append(origin)
rot_axis.append(axis)
rot_type.append(mobility_type)
limit_a.append(mobility_a)
limit_b.append(mobility_b)
return rot_origin, rot_axis, rot_type, limit_a, limit_b, contact_list
def visualize(mask, image, alpha):
#mask = np.repeat(mask[:,:,np.newaxis], 3, axis=2)
mask_img_blend = cv2.addWeighted(mask, alpha, image.astype(np.float32), 1.0-alpha, 0)
mask_img_blend = mask_img_blend*mask + image*(1-mask)
return mask_img_blend
def visualize_curve(data, output, save_folder, title):
mask_model = output['obj_mask']
spin_points = output['spin_points']
# plot curve
obj_curve = output['obj_curve']
spin_curve = output['spin_curve']
x_offset = spin_curve[0,0] - obj_curve[0,0]
y_offset = spin_curve[0,1] - obj_curve[0,1]
z_offset = spin_curve[0,2] - obj_curve[0,2]
obj_curve[:,0] += x_offset
obj_curve[:,1] += y_offset
obj_curve[:,2] += z_offset
fig = plt.figure()
ax = plt.axes(projection='3d')
#obj_curves = obj_curve_norm
ax.scatter(spin_curve[0,0], spin_curve[0,1], spin_curve[0,2], color='red')
ax.scatter(obj_curve[0,0], obj_curve[0,1], obj_curve[0,2], color='red')
ax.plot(obj_curve[:,0], obj_curve[:,1], obj_curve[:,2], label='object curve')
ax.plot(spin_curve[:,0], spin_curve[:,1], spin_curve[:,2], label='hand curve')
ax.legend()
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
plt.show()
def save_mesh(id):
global obj_verts_dict
global obj_faces_dict
global human_verts_dict
global human_faces_dict
global save_path_mesh
verts1 = obj_verts_dict[str(id+1)]
verts2 = human_verts_dict[str(id+1)]
faces1 = obj_faces_dict[str(id+1)]
faces2 = human_faces_dict[str(id+1)]
verts = np.concatenate((verts1, verts2), axis=0)
faces = np.concatenate((faces1, faces2 + verts1.shape[0]), axis=0)
path = os.path.join(save_path_mesh, str(id+1)+'_object.obj')
save_obj(path, torch.from_numpy(verts1), torch.from_numpy(faces1))
path = os.path.join(save_path_mesh, str(id+1)+'_person.obj')
save_obj(path, torch.from_numpy(verts2), torch.from_numpy(faces2))
path = os.path.join(save_path_mesh, str(id+1)+'_joint.obj')
save_obj(path, torch.from_numpy(verts), torch.from_numpy(faces))
def save_meshes(meshes, save_folder, video_name, title):
global obj_verts_dict
global obj_faces_dict
global human_verts_dict
global human_faces_dict
global save_path_mesh
save_path_mesh = os.path.join(save_folder, title)
if not os.path.exists(save_path_mesh):
os.makedirs(save_path_mesh)
obj_meshes = meshes['obj_mesh']
spin_meshes = meshes['spin_mesh']
# merge object + SPIN meshes
obj_verts = {}
obj_faces = {}
human_verts = {}
human_faces = {}
for idx in range(len(obj_meshes)):
obj_verts[str(idx+1)] = obj_meshes[idx].verts_list()[0].detach().cpu().numpy()
obj_faces[str(idx+1)] = obj_meshes[idx].faces_list()[0].detach().cpu().numpy()
human_verts[str(idx+1)] = spin_meshes[idx].verts_list()[0].detach().cpu().numpy()
human_faces[str(idx+1)] = spin_meshes[idx].faces_list()[0].detach().cpu().numpy()
manager = Manager()
obj_verts_dict = manager.dict(obj_verts)
obj_faces_dict = manager.dict(obj_faces)
human_verts_dict = manager.dict(human_verts)
human_faces_dict = manager.dict(human_faces)
ids = [item for item in range(len(obj_meshes))]
pool = Pool(processes=12)
pool.map(save_mesh, ids)
'''
eft_cmd = 'python -m demo.demo_bodymocap --render wire --bg rgb --videoname '+video_name+' --vPath '+save_folder
os.chdir('/home/xuxiangx/research/eft')
os.system(eft_cmd)
save_path = os.path.join(save_folder, 'eft', 'front')
ffmpeg_cmd = 'ffmpeg -r 3 -i '+save_path+'/scene_%08d.jpg '+save_folder+'/frontview.mp4'
os.system(ffmpeg_cmd)
save_path = os.path.join(save_folder, 'eft', 'side')
ffmpeg_cmd = 'ffmpeg -r 3 -i '+save_path+'/scene_%08d.jpg '+save_folder+'/sideview.mp4'
os.system(ffmpeg_cmd)
'''
return
def save_parameters(model, save_folder, title):
save_path = os.path.join(save_folder, title)
if not os.path.exists(save_path):
os.makedirs(save_path)
obj_offset = 1000.0*model.obj_offset.detach().cpu().numpy() #(3,)
smpl_offset = 1000.0*model.smpl_offset.detach().cpu().numpy() #(bs,3)
obj_scale = 3000.0*model.obj_scale
smpl_scale = 3000.0
focal_len = model.focal
part_rot_angle = model.part_rot_params.detach().cpu().numpy() #(bs,)
obj_rot_mat = model.obj_rot_angle_mat[0].detach().cpu().numpy() #(3,3)
part_rot_mat = model.part_rot_mat.detach().cpu().numpy() #(bs,3,3)
K_mat = model.K.detach().cpu().numpy() #(3,3)
rot_o = model.rot_o.detach().cpu().numpy() #(3,)
rot_axis = model.axis.detach().cpu().numpy() #(3,)
parameters = {}
parameters['obj_offset'] = obj_offset
parameters['smpl_offset'] = smpl_offset
parameters['obj_scale'] = obj_scale
parameters['smpl_scale'] = smpl_scale
parameters['focal_length'] = focal_len
parameters['part_rot_angle'] = part_rot_angle
parameters['obj_rot_matrix'] = obj_rot_mat
parameters['part_rot_matrix'] = part_rot_mat
parameters['K_matrix'] = K_mat
parameters['rot_origin'] = rot_o
parameters['rot_axis'] = rot_axis
np.save(os.path.join(save_path, 'parameters.npy'), parameters)
return
def save_img(idx):
global shared_dict1
global shared_dict2
global save_path
roi_image = shared_dict1['image'].permute(0,2,3,1)[idx].numpy()
silhouette = shared_dict1['objmask'].permute(0,2,3,1)[idx]
mask_model = shared_dict2['obj_mask']
gt_points = shared_dict1['smplv2d']
spin_points = shared_dict2['spin_points']
silhouette_init = mask_model.detach().cpu().squeeze()[idx].numpy()
mask_img_blend = visualize(silhouette_init, roi_image, 0.8)
# save image
#plt.subplots_adjust(hspace = 0.2, left=0.01, right=0.99, top=0.95, bottom=0.05)
imsave(os.path.join(save_path, str(idx)+'.png'), mask_img_blend)
return
def save_imgs(data, output, save_folder):
global shared_dict1
global shared_dict2
global save_path
save_path = save_folder
manager = Manager()
shared_dict1 = manager.dict(data)
shared_dict1 = data
shared_dict2 = manager.dict(output)
shared_dict2 = output
ids = [item for item in range(data['image'].shape[0])]
pool = Pool(processes=12)
pool.map(save_img, ids)
#sceneflow = shared_dict2['sceneflow']
#objSurfaceFlow = shared_dict2['objSurfaceFlow']
#synFlow = shared_dict2['synFlow']
#sceneflowMaskSquareShrink = shared_dict2['sceneflowMaskSquareShrink']
#part_diff_images = shared_dict2['part_diff_images']
# save object part suface raft flow visualization
#save_path = os.path.join(save_folder, title, 'objSurfaceFlow')
#if not os.path.exists(save_path):
#os.makedirs(save_path)
#for idx in range(objSurfaceFlow.shape[0]):
#imsave(os.path.join(save_path, str(idx)+'.png'), objSurfaceFlow[idx])
#ffmpeg_cmd = 'ffmpeg -r 3 -i '+save_path+'/%d.png '+save_folder+'/objSurfaceFlow.mp4'
#os.system(ffmpeg_cmd)
# save synthetic rendering flow visualization
#save_path = os.path.join(save_folder, title, 'synFlow')
#if not os.path.exists(save_path):
#os.makedirs(save_path)
#for idx in range(synFlow.shape[0]):
#imsave(os.path.join(save_path, str(idx)+'.png'), synFlow[idx])
#ffmpeg_cmd = 'ffmpeg -r 3 -i '+save_path+'/%d.png '+save_folder+'/synFlow.mp4'
#os.system(ffmpeg_cmd)
# save visualize images
#for idx in range(data['image'].shape[0]):
#save_img(idx, shared_dict1, shared_dict2)
#save_path = os.path.join(save_folder, title, 'render')
#ffmpeg_cmd = 'ffmpeg -r 3 -i '+save_path+'/%d.png '+save_folder+'/render.mp4'
#os.system(ffmpeg_cmd)
#vid1 = os.path.join(save_folder, 'objSurfaceFlow.mp4')
#vid2 = os.path.join(save_folder, 'synFlow.mp4')
#vid3 = os.path.join(save_folder, 'visual.mp4')
#ffmpeg_cmd = 'ffmpeg -i '+vid1+' -i '+vid2+' -filter_complex hstack=inputs=2 '+vid3
#os.system(ffmpeg_cmd)
return
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = axis / torch.sqrt(torch.dot(axis, axis))
a = torch.cos(theta / 2.0)
b, c, d = -axis * torch.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
rot_mat = torch.empty(3,3)
rot_mat[0,0] = aa + bb - cc - dd
rot_mat[0,1] = 2 * (bc + ad)
rot_mat[0,2] = 2 * (bd - ac)
rot_mat[1,0] = 2 * (bc - ad)
rot_mat[1,1] = aa + cc - bb - dd
rot_mat[1,2] = 2 * (cd + ab)
rot_mat[2,0] = 2 * (bd + ac)
rot_mat[2,1] = 2 * (cd - ab)
rot_mat[2,2] = aa + dd - bb - cc
return rot_mat
def rotation_matrix_batch(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = axis / torch.sqrt(torch.dot(axis, axis))
a = torch.cos(theta / 2.0)
b = -axis[0] * torch.sin(theta / 2.0)
c = -axis[1] * torch.sin(theta / 2.0)
d = -axis[2] * torch.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
rot_mat = torch.empty(aa.shape[0],3,3)
rot_mat[:,0,0] = aa + bb - cc - dd
rot_mat[:,0,1] = 2 * (bc + ad)
rot_mat[:,0,2] = 2 * (bd - ac)
rot_mat[:,1,0] = 2 * (bc - ad)
rot_mat[:,1,1] = aa + cc - bb - dd
rot_mat[:,1,2] = 2 * (cd + ab)
rot_mat[:,2,0] = 2 * (bd + ac)
rot_mat[:,2,1] = 2 * (cd - ab)
rot_mat[:,2,2] = aa + dd - bb - cc
return rot_mat
def flow_confidence(threshold, forwardFlow, backwardFlow, img_w, img_h):
# I_t -> I_(t+1), wrap with forward flow
It1 = forwardFlow.clone()
It1[:,:,0] += torch.arange(img_w)
It1[:,:,1] += torch.arange(img_h).unsqueeze(1) # (u, v) coordinate
It1 = torch.round(It1)
withinFrameMask = (It1[:,:,0] < img_w) * (It1[:,:,0] > 0) *\
(It1[:,:,1] < img_h) * (It1[:,:,1] > 0)
pdb.set_trace()
withinFrameCoord = torch.nonzero(withinFrameMask==1) # (x, y) coordinate of within frame flow
nextCoord = It1[withinFrameCoord[:, 0], withinFrameCoord[:,1]].astype(int) # u, v order
# I_(t+1) -> I_t, wrap back with backward flow
nextCoordBackwardFlow = backwardFlow[nextCoord[:,1], nextCoord[:,0],:]
nextbackCoord = nextCoord + nextCoordBackwardFlow # u, v coord
nextbackCoord[:,[1,0]] = nextbackCoord[:,[0,1]] # swap to x,y coord
# filter out noisy flow
stableFlowMask = np.sum(np.abs(nextbackCoord - withinFrameCoord), 1) < threshold
stableFlowCoord = withinFrameCoord[stableFlowMask] # (x,y) coord
return stableFlowCoord
def make_colorwheel():
"""
Generates a color wheel for optical flow visualization as presented in:
Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007)
URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf
Code follows the original C++ source code of Daniel Scharstein.
Code follows the the Matlab source code of Deqing Sun.
Returns:
np.ndarray: Color wheel
"""
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros((ncols, 3))
col = 0
# RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.floor(255*np.arange(0,RY)/RY)
col = col+RY
# YG
colorwheel[col:col+YG, 0] = 255 - np.floor(255*np.arange(0,YG)/YG)
colorwheel[col:col+YG, 1] = 255
col = col+YG
# GC
colorwheel[col:col+GC, 1] = 255
colorwheel[col:col+GC, 2] = np.floor(255*np.arange(0,GC)/GC)
col = col+GC
# CB
colorwheel[col:col+CB, 1] = 255 - np.floor(255*np.arange(CB)/CB)
colorwheel[col:col+CB, 2] = 255
col = col+CB
# BM
colorwheel[col:col+BM, 2] = 255
colorwheel[col:col+BM, 0] = np.floor(255*np.arange(0,BM)/BM)
col = col+BM
# MR
colorwheel[col:col+MR, 2] = 255 - np.floor(255*np.arange(MR)/MR)
colorwheel[col:col+MR, 0] = 255
return colorwheel
def flow_uv_to_colors(u, v, convert_to_bgr=False):
"""
Applies the flow color wheel to (possibly clipped) flow components u and v.
According to the C++ source code of Daniel Scharstein
According to the Matlab source code of Deqing Sun
Args:
u (np.ndarray): Input horizontal flow of shape [H,W]
v (np.ndarray): Input vertical flow of shape [H,W]
convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False.
Returns:
np.ndarray: Flow visualization image of shape [H,W,3]
"""
flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8)
colorwheel = make_colorwheel() # shape [55x3]
ncols = colorwheel.shape[0]
rad = np.sqrt(np.square(u) + np.square(v))
a = np.arctan2(-v, -u)/np.pi
fk = (a+1) / 2*(ncols-1)
k0 = np.floor(fk).astype(np.int32)
k1 = k0 + 1
k1[k1 == ncols] = 0
f = fk - k0
for i in range(colorwheel.shape[1]):
tmp = colorwheel[:,i]
col0 = tmp[k0] / 255.0
col1 = tmp[k1] / 255.0
col = (1-f)*col0 + f*col1
idx = (rad <= 1)
col[idx] = 1 - rad[idx] * (1-col[idx])
col[~idx] = col[~idx] * 0.75 # out of range
# Note the 2-i => BGR instead of RGB
ch_idx = 2-i if convert_to_bgr else i
flow_image[:,:,ch_idx] = np.floor(255 * col)
return flow_image
def flow_to_image(flow_uv, clip_flow=None, convert_to_bgr=False):
"""
Expects a two dimensional flow image of shape.
Args:
flow_uv (np.ndarray): Flow UV image of shape [H,W,2]
clip_flow (float, optional): Clip maximum of flow values. Defaults to None.
convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False.
Returns:
np.ndarray: Flow visualization image of shape [H,W,3]
"""
assert flow_uv.ndim == 3, 'input flow must have three dimensions'
assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]'
if clip_flow is not None:
flow_uv = np.clip(flow_uv, 0, clip_flow)
u = flow_uv[:,:,0]
v = flow_uv[:,:,1]
rad = np.sqrt(np.square(u) + np.square(v))
rad_max = 40.0#,
epsilon = 1e-5
u = u / (rad_max + epsilon)
v = v / (rad_max + epsilon)
return flow_uv_to_colors(u, v, convert_to_bgr)
|
d3d-hoi-main
|
visualization/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import cv2
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
import numpy as np
from utils import (
initialize_render, merge_meshes,
load_motion
)
import torch
from PIL import Image
from natsort import natsorted
from model import JOHMRLite
import os
import json
import pdb
import scipy.misc
import matplotlib.pyplot as plt
import open3d as o3d
import torch.optim as optim
from pytorch3d.io import save_obj
# load cad model
device = torch.device("cuda:0")
obj_path = 'processed_cads/storagefurniture/45132'
verts, faces, part_segs, _ = merge_meshes(obj_path, device)
verts[:,1:] *= -1 # pytorch3d -> world coordinate
obj_verts = verts.to(device)
obj_faces = faces.to(device)
obj_size = np.asarray([74.0, 77.5, 68.5]) # length, height, width (x, y, z), cm
# fridge (home) np.asarray([600, 1450, 620])
# dishwasher (yang) obj_size = np.asarray([600, 800, 600])
# laptop (large) obj_size = np.asarray([415, 15, 280])
# fridge (small) obj_size = np.asarray([450, 825, 472])
# trashcan (outside) obj_size = np.asarray([650, 1050, 635])
# 677 x 1100 x 665
# load image
img_path = 'storagefurniture/b008-0109/frames/images-0001.jpg'
focal_x = 983#1505 #1680
focal_y = 983#1505 #1680
img_square = 1920
img_small = 256
# render
_, phong_renderer = initialize_render(device, focal_x, focal_y, img_square, img_small)
global x_offset, y_offset, z_offset, yaw, pitch, roll, rot_alpha, rot_beta, rot_gamma
x_offset = 0.0
y_offset = 0.0
z_offset = 2.0
yaw = 0.0
pitch = 0.0
roll = 0.0
valstep = 0.01
# initialize model
image_bg = np.array(Image.open(img_path))/255.0
img_h = image_bg.shape[0]
img_w = image_bg.shape[1]
model = JOHMRLite(obj_verts, obj_faces, phong_renderer, img_h, img_w)
def display_img(model, alpha):
image_bg = np.array(Image.open(img_path))/255.0
img_h = image_bg.shape[0]
img_w = image_bg.shape[1]
global x_offset, y_offset, z_offset, yaw, pitch, roll, rot_alpha, rot_beta, rot_gamma
vis_image, rot_alpha, rot_beta, rot_gamma = model(obj_size, x_offset, y_offset, z_offset, yaw, pitch, roll)
image = vis_image.detach().cpu().numpy().squeeze()
#objmask = np.array(Image.open(mask_path))#/255.0 # object mask
#objmask = np.repeat(objmask[:,:,np.newaxis], 3, axis=2)
#objmask[:,:,0] *= 0
#objmask[:,:,1] *= 0
rgb_mask = image_bg.astype(np.float32) #cv2.addWeighted(objmask.astype(np.float32), 0.5, image_bg.astype(np.float32), 0.5, 0.0)
frame_img = np.zeros((img_square, img_square,3))
start = int((max(img_h, img_w) - min(img_h, img_w))/2) - 1
end = start + min(img_h, img_w)
if img_h > img_w:
frame_img[:, start:end, :] = rgb_mask
else:
frame_img[start:end, :, :] = rgb_mask
rgb_mask = frame_img
alpha = min(1.0, max(0.0,alpha))
img_blend = cv2.addWeighted(image.astype(np.float32), alpha, rgb_mask.astype(np.float32), 1-alpha, 0.0)
img_blend = cv2.resize(img_blend, dsize=(800, 800), interpolation=cv2.INTER_NEAREST)
return img_blend
img_blend = display_img(model, 0.5)
h,w,_ = img_blend.shape
img_blend = np.uint8(img_blend*255)
qimage = QtGui.QImage(img_blend.data, h, w, 3*h, QtGui.QImage.Format_RGB888)
class Annotate(QtWidgets.QWidget):
def __init__(self):
super(Annotate, self).__init__()
self.initUI()
def initUI(self):
QtWidgets.QToolTip.setFont(QtGui.QFont('Test', 10))
# Show image
self.pic = QtWidgets.QLabel(self)
self.pic.setGeometry(10, 10, 800, 800)
self.pic.setPixmap(QtGui.QPixmap(qimage))
self.alpha = 0.5
# Show button
btn1 = QtWidgets.QPushButton('Offset Z-', self)
btn1.resize(btn1.sizeHint())
btn1.clicked.connect(lambda: self.fun('dec_oz'))
btn1.move(900, 10)
btn2 = QtWidgets.QPushButton('Offset Z+', self)
btn2.resize(btn2.sizeHint())
btn2.clicked.connect(lambda: self.fun('inc_oz'))
btn2.move(1000, 10)
self.textbox1 = QtWidgets.QLineEdit(self)
self.textbox1.move(1150, 10)
self.textbox1.resize(100,25)
self.textbox1.setText(str(z_offset))
btn7 = QtWidgets.QPushButton('Offset X-', self)
btn7.resize(btn7.sizeHint())
btn7.clicked.connect(lambda: self.fun('dec_ox'))
btn7.move(900, 150)
btn8 = QtWidgets.QPushButton('Offset X+', self)
btn8.resize(btn8.sizeHint())
btn8.clicked.connect(lambda: self.fun('inc_ox'))
btn8.move(1000, 150)
self.textbox4 = QtWidgets.QLineEdit(self)
self.textbox4.move(1150, 150)
self.textbox4.resize(100,25)
self.textbox4.setText(str(x_offset))
btn9 = QtWidgets.QPushButton('Offset Y-', self)
btn9.resize(btn9.sizeHint())
btn9.clicked.connect(lambda: self.fun('dec_oy'))
btn9.move(900, 190)
btn10 = QtWidgets.QPushButton('Offset Y+', self)
btn10.resize(btn10.sizeHint())
btn10.clicked.connect(lambda: self.fun('inc_oy'))
btn10.move(1000, 190)
self.textbox5 = QtWidgets.QLineEdit(self)
self.textbox5.move(1150, 190)
self.textbox5.resize(100,25)
self.textbox5.setText(str(y_offset))
btn11 = QtWidgets.QPushButton('Yaw-', self)
btn11.resize(btn11.sizeHint())
btn11.clicked.connect(lambda: self.fun('dec_yaw'))
btn11.move(900, 250)
btn12 = QtWidgets.QPushButton('Yaw+', self)
btn12.resize(btn12.sizeHint())
btn12.clicked.connect(lambda: self.fun('inc_yaw'))
btn12.move(1000, 250)
self.textbox6 = QtWidgets.QLineEdit(self)
self.textbox6.move(1150, 250)
self.textbox6.resize(100,25)
self.textbox6.setText(str(yaw))
btn13 = QtWidgets.QPushButton('Pitch-', self)
btn13.resize(btn13.sizeHint())
btn13.clicked.connect(lambda: self.fun('dec_pitch'))
btn13.move(900, 290)
btn14 = QtWidgets.QPushButton('Pitch+', self)
btn14.resize(btn14.sizeHint())
btn14.clicked.connect(lambda: self.fun('inc_pitch'))
btn14.move(1000, 290)
self.textbox7 = QtWidgets.QLineEdit(self)
self.textbox7.move(1150, 290)
self.textbox7.resize(100,25)
self.textbox7.setText(str(pitch))
btn15 = QtWidgets.QPushButton('Roll-', self)
btn15.resize(btn15.sizeHint())
btn15.clicked.connect(lambda: self.fun('dec_roll'))
btn15.move(900, 330)
btn16 = QtWidgets.QPushButton('Roll+', self)
btn16.resize(btn16.sizeHint())
btn16.clicked.connect(lambda: self.fun('inc_roll'))
btn16.move(1000, 330)
self.textbox8 = QtWidgets.QLineEdit(self)
self.textbox8.move(1150, 330)
self.textbox8.resize(100,25)
self.textbox8.setText(str(roll))
btn22 = QtWidgets.QPushButton('Vis-', self)
btn22.resize(btn22.sizeHint())
btn22.clicked.connect(lambda: self.fun('dec_vis'))
btn22.move(900, 550)
btn23 = QtWidgets.QPushButton('Vis+', self)
btn23.resize(btn23.sizeHint())
btn23.clicked.connect(lambda: self.fun('inc_vis'))
btn23.move(1000, 550)
btn21 = QtWidgets.QPushButton('Save', self)
btn21.resize(btn21.sizeHint())
btn21.clicked.connect(lambda: self.fun('save'))
btn21.move(1000, 500)
self.setGeometry(300, 300, 2000, 1500)
self.setWindowTitle('JOHMR Annotation Tool -- Sam Xu')
self.show()
# Connect button to image updating
def fun(self, arguments):
global x_offset, y_offset, z_offset, yaw, pitch, roll, rot_alpha, rot_beta, rot_gamma
if arguments == 'dec_oz':
z_offset -= valstep
elif arguments == 'inc_oz':
z_offset += valstep
elif arguments == 'dec_ox':
x_offset -= valstep
elif arguments == 'inc_ox':
x_offset += valstep
elif arguments == 'dec_oy':
y_offset -= valstep
elif arguments == 'inc_oy':
y_offset += valstep
elif arguments == 'dec_yaw':
yaw -= valstep
elif arguments == 'inc_yaw':
yaw += valstep
elif arguments == 'dec_pitch':
pitch -= valstep
elif arguments == 'inc_pitch':
pitch += valstep
elif arguments == 'dec_roll':
roll -= valstep
elif arguments == 'inc_roll':
roll += valstep
elif arguments == 'save':
# save obj orientation
text_file = './3d_info.txt'
with open(text_file, "w") as myfile:
myfile.write('yaw: '+str(round(yaw,3))+'\n')
myfile.write('pitch: '+str(round(pitch,3))+'\n')
myfile.write('roll: '+str(round(roll,3))+'\n')
myfile.write('rot_alpha: '+str(round(rot_alpha,3))+'\n')
myfile.write('rot_beta: '+str(round(rot_beta,3))+'\n')
myfile.write('rot_gamma: '+str(round(rot_gamma,3))+'\n')
myfile.write('x_offset: '+str(round(x_offset,3))+'\n')
myfile.write('y_offset: '+str(round(y_offset,3))+'\n')
myfile.write('z_offset: '+str(round(z_offset,3))+'\n')
myfile.write('obj_size: '+str(obj_size[0])+','+str(obj_size[1])+','+str(obj_size[2])+'\n')
myfile.write('\n')
# save human & obj meshes
#save_hverts = model.smpl_verts_output.detach().cpu().numpy()
#save_hfaces = hfaces.detach().cpu().numpy()
save_objverts = model.obj_verts_output.detach().cpu().numpy()
save_objfaces = obj_faces.detach().cpu().numpy()
#verts = np.concatenate((save_hverts, save_objverts), axis=0)
#faces = np.concatenate((save_hfaces, save_objfaces + save_hverts.shape[0]), axis=0)
save_obj('./object.obj', torch.from_numpy(save_objverts), torch.from_numpy(save_objfaces))
#save_obj('annotate/person.obj', torch.from_numpy(save_hverts), torch.from_numpy(save_hfaces))
#save_obj('annotate/joint.obj', torch.from_numpy(verts), torch.from_numpy(faces))
elif arguments == 'dec_vis':
self.alpha -= 0.1
elif arguments == 'inc_vis':
self.alpha += 0.1
else:
print('not implemented')
self.textbox4.setText(str(round(x_offset,3)))
self.textbox5.setText(str(round(y_offset,3)))
self.textbox6.setText(str(round(yaw,3)))
self.textbox7.setText(str(round(pitch,3)))
self.textbox8.setText(str(round(roll,3)))
self.textbox1.setText(str(round(z_offset,3)))
img = display_img(model, self.alpha)
img = np.uint8(img*255)
h,w,_ = img.shape
qimage = QtGui.QImage(img.data, h, w, 3*h, QtGui.QImage.Format_RGB888)
self.pic.setPixmap(QtGui.QPixmap(qimage))
def main():
app = QtWidgets.QApplication(sys.argv)
ex = Annotate()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
d3d-hoi-main
|
visualization/annotation/qt.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch.nn as nn
import torch
import pdb
import numpy as np
from pytorch3d.renderer import (
look_at_view_transform, TexturesVertex
)
import math
from pytorch3d.structures import Meshes
import cv2
import matplotlib.pyplot as plt
from utils import rotation_matrix
from scipy.ndimage.filters import gaussian_filter1d
from pytorch3d.io import save_obj
from pytorch3d.transforms import (
RotateAxisAngle, matrix_to_euler_angles
)
from pytorch3d.transforms.rotation_conversions import (
rotation_6d_to_matrix, matrix_to_rotation_6d
)
from utils import (
flow_to_image, flow_confidence
)
import time
from matplotlib.image import imsave
import os
from torch.autograd import Variable
from pytorch3d.transforms import (
euler_angles_to_matrix
)
from utils import (
rotation_matrix
)
class JOHMRLite(nn.Module):
def __init__(self, obj_verts, obj_faces, vis_render, img_h, img_w):
super().__init__()
self.device = obj_verts.device
self.vis_render = vis_render
self.obj_verts = obj_verts.detach()
self.obj_faces = obj_faces.detach()
self.img_h = img_h
self.img_w = img_w
# camera is almost at the center (distance can't be zero for diff render)
self.R, self.T = look_at_view_transform(0.1, 0.0, 0.0,device=self.device)
self.T[0,2] = 0.0 # manually set to zero
return
def forward(self, obj_size, x_offset, y_offset, z_offset, yaw, pitch, roll):
obj_verts = self.obj_verts.clone()
# step 1: rescale object
x_diff = torch.max(obj_verts[:,0]) - torch.min(obj_verts[:,0])
x_ratio = float(obj_size[0]) / x_diff
y_diff = torch.max(obj_verts[:,1]) - torch.min(obj_verts[:,1])
y_ratio = float(obj_size[1]) / y_diff
z_diff = torch.max(obj_verts[:,2]) - torch.min(obj_verts[:,2])
z_ratio = float(obj_size[2]) / z_diff
obj_verts[:, 0] *= x_ratio
obj_verts[:, 1] *= y_ratio
obj_verts[:, 2] *= z_ratio
# step 2: part motion
#part_state = torch.tensor(90 * (math.pi/180)).cuda()
#axis = torch.tensor([0, -0.9999999999999999, -0]).cuda().float()
#rot_o = torch.tensor([0.37487859368179954*x_ratio, -0.859491*y_ratio, -0.24141621508844158*z_ratio]).cuda()
#assert(part_state>=0) # non negative value
#start = 380
#end = 380+198
#partrot_mat = rotation_matrix(axis, part_state).cuda() # part rotation matrix
#obj_verts_part = obj_verts[start:end, :] - rot_o
#obj_verts_part2 = torch.mm(partrot_mat, obj_verts_part.permute(1,0)).permute(1,0)
#obj_verts[start:end, :] = obj_verts_part2 + rot_o
# step 3: object orientation
euler_angle = torch.tensor([pitch, yaw, roll]).reshape(1,3)
objrot_mat = euler_angles_to_matrix(euler_angle, ["X","Y","Z"]).to(self.device)
rot_alpha, rot_beta, rot_gamma = matrix_to_euler_angles(objrot_mat, ["X","Y","Z"])[0]
rot_alpha = float(rot_alpha)
rot_beta = float(rot_beta)
rot_gamma = float(rot_gamma)
objrot_mat = objrot_mat[0]
obj_verts = torch.mm(objrot_mat, obj_verts.permute(1,0)).permute(1,0)
# step 4: object offset
obj_verts[:, 0] += 100.0*x_offset
obj_verts[:, 1] += 100.0*y_offset
obj_verts[:, 2] += 100.0*z_offset
self.obj_verts_output = obj_verts.clone()
obj_verts[:,1:] *= -1
# create object mesh for diff render and visualization
tex = torch.ones_like(obj_verts).unsqueeze(0)
textures = TexturesVertex(verts_features=tex).to(self.device)
self.obj_mesh = Meshes(verts=[obj_verts],faces=[self.obj_faces],textures=textures)
vis_image = self.vis_render(meshes_world=self.obj_mesh, R=self.R, T=self.T)
return vis_image[...,:3], rot_alpha, rot_beta, rot_gamma
|
d3d-hoi-main
|
visualization/annotation/model.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import torch
import natsort
import glob
import open3d as o3d
# rendering components
from pytorch3d.renderer import (
FoVPerspectiveCameras,RasterizationSettings,
MeshRenderer, MeshRasterizer, BlendParams,
SoftSilhouetteShader, HardPhongShader, PointLights, DirectionalLights,
PerspectiveCameras
)
from pytorch3d.io import save_obj
import math
import cv2
import matplotlib.pyplot as plt
import os
import imageio
from decimal import Decimal
import pdb
import json
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from mpl_toolkits.mplot3d import axes3d
from scipy.ndimage.filters import gaussian_filter1d
from numpy.linalg import svd
from multiprocessing import Pool, Manager, cpu_count
from pytorch3d.transforms import Rotate, Translate
from matplotlib.image import imsave
from pathlib import Path
def planeFit(points):
"""
p, n = planeFit(points)
Given an array, points, of shape (d,...)
representing points in d-dimensional space,
fit an d-dimensional plane to the points.
Return a point, p, on the plane (the point-cloud centroid),
and the normal, n.
"""
points = np.reshape(points, (np.shape(points)[0], -1)) # Collapse trialing dimensions
assert points.shape[0] <= points.shape[1], "There are only {} points in {} dimensions.".format(points.shape[1], points.shape[0])
ctr = points.mean(axis=1)
x = points - ctr[:,np.newaxis]
M = np.dot(x, x.T) # Could also use np.cov(x) here.
return ctr, svd(M)[0][:,-1]
def initialize_render(device, focal_x, focal_y, img_square_size, img_small_size):
""" initialize camera, rasterizer, and shader. """
# Initialize an OpenGL perspective camera.
#cameras = FoVPerspectiveCameras(znear=1.0, zfar=9000.0, fov=20, device=device)
#cameras = FoVPerspectiveCameras(device=device)
#cam_proj_mat = cameras.get_projection_transform()
img_square_center = int(img_square_size/2)
shrink_ratio = int(img_square_size/img_small_size)
focal_x_small = int(focal_x/shrink_ratio)
focal_y_small = int(focal_y/shrink_ratio)
img_small_center = int(img_small_size/2)
camera_sfm = PerspectiveCameras(
focal_length=((focal_x, focal_y),),
principal_point=((img_square_center, img_square_center),),
image_size = ((img_square_size, img_square_size),),
device=device)
camera_sfm_small = PerspectiveCameras(
focal_length=((focal_x_small, focal_y_small),),
principal_point=((img_small_center, img_small_center),),
image_size = ((img_small_size, img_small_size),),
device=device)
# To blend the 100 faces we set a few parameters which control the opacity and the sharpness of
# edges. Refer to blending.py for more details.
blend_params = BlendParams(sigma=1e-4, gamma=1e-4)
# Define the settings for rasterization and shading. Here we set the output image to be of size
# 256x256. To form the blended image we use 100 faces for each pixel. We also set bin_size and max_faces_per_bin to None which ensure that
# the faster coarse-to-fine rasterization method is used. Refer to rasterize_meshes.py for
# explanations of these parameters. Refer to docs/notes/renderer.md for an explanation of
# the difference between naive and coarse-to-fine rasterization.
raster_settings = RasterizationSettings(
image_size=img_small_size,
blur_radius=np.log(1. / 1e-4 - 1.) * blend_params.sigma,
faces_per_pixel=100,
)
# Create a silhouette mesh renderer by composing a rasterizer and a shader.
silhouette_renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=camera_sfm_small,
raster_settings=raster_settings
),
shader=SoftSilhouetteShader(blend_params=blend_params)
)
# We will also create a phong renderer. This is simpler and only needs to render one face per pixel.
raster_settings = RasterizationSettings(
image_size=img_square_size,
blur_radius=0.0,
faces_per_pixel=1,
)
# We can add a point light in front of the object.
lights = PointLights(device=device, location=((2.0, 2.0, -2.0),))
#lights = DirectionalLights(device=device, direction=((0, 0, 1),))
phong_renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=camera_sfm,
raster_settings=raster_settings
),
shader=HardPhongShader(device=device, cameras=camera_sfm, lights=lights)
)
return silhouette_renderer, phong_renderer
def merge_meshes(obj_path, device):
""" helper function for loading and merging meshes. """
verts_list = torch.empty(0,3)
faces_list = torch.empty(0,3).long()
num_vtx = [0]
num_faces = [0]
# merge meshes, load in ascending order
meshes = natsort.natsorted(glob.glob(obj_path+'/final/*_rescaled_sapien.obj'))
for part_mesh in meshes:
#print('loading %s' %part_mesh)
mesh = o3d.io.read_triangle_mesh(part_mesh)
verts = torch.from_numpy(np.asarray(mesh.vertices)).float()
faces = torch.from_numpy(np.asarray(mesh.triangles)).long()
faces = faces + verts_list.shape[0]
verts_list = torch.cat([verts_list, verts])
faces_list = torch.cat([faces_list, faces])
num_vtx.append(verts_list.shape[0])
num_faces.append(faces_list.shape[0])
verts_list = verts_list.to(device)
faces_list = faces_list.to(device)
return verts_list, faces_list, num_vtx, num_faces
def load_motion(motions, device):
""" load rotation axis, origin, and limit. """
rot_origin = []
rot_axis = []
rot_type = []
limit_a = []
limit_b = []
contact_list = []
# load all meta data
for idx, key in enumerate(motions.keys()):
jointData = motions[key]
# if contains movable parts
if jointData is not None:
origin = torch.FloatTensor(jointData['axis']['origin']).to(device)
axis = torch.FloatTensor(jointData['axis']['direction']).to(device)
mobility_type = jointData['type']
if 'contact' in jointData:
contact_list.append(jointData['contact'])
# convert to radians if necessary
if mobility_type == 'revolute':
mobility_a = math.pi*jointData['limit']['a'] / 180.0
mobility_b = math.pi*jointData['limit']['b'] / 180.0
else:
assert mobility_type == 'prismatic'
mobility_a = jointData['limit']['a']
mobility_b = jointData['limit']['b']
rot_origin.append(origin)
rot_axis.append(axis)
rot_type.append(mobility_type)
limit_a.append(mobility_a)
limit_b.append(mobility_b)
return rot_origin, rot_axis, rot_type, limit_a, limit_b, contact_list
def visualize(mask, image, alpha):
#mask = np.repeat(mask[:,:,np.newaxis], 3, axis=2)
mask_img_blend = cv2.addWeighted(mask, alpha, image.astype(np.float32), 1.0-alpha, 0)
mask_img_blend = mask_img_blend*mask + image*(1-mask)
return mask_img_blend
def visualize_curve(data, output, save_folder, title):
mask_model = output['obj_mask']
spin_points = output['spin_points']
# plot curve
obj_curve = output['obj_curve']
spin_curve = output['spin_curve']
x_offset = spin_curve[0,0] - obj_curve[0,0]
y_offset = spin_curve[0,1] - obj_curve[0,1]
z_offset = spin_curve[0,2] - obj_curve[0,2]
obj_curve[:,0] += x_offset
obj_curve[:,1] += y_offset
obj_curve[:,2] += z_offset
fig = plt.figure()
ax = plt.axes(projection='3d')
#obj_curves = obj_curve_norm
ax.scatter(spin_curve[0,0], spin_curve[0,1], spin_curve[0,2], color='red')
ax.scatter(obj_curve[0,0], obj_curve[0,1], obj_curve[0,2], color='red')
ax.plot(obj_curve[:,0], obj_curve[:,1], obj_curve[:,2], label='object curve')
ax.plot(spin_curve[:,0], spin_curve[:,1], spin_curve[:,2], label='hand curve')
ax.legend()
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
plt.show()
def save_mesh(id):
global obj_verts_dict
global obj_faces_dict
global human_verts_dict
global human_faces_dict
global save_path_mesh
verts1 = obj_verts_dict[str(id+1)]
verts2 = human_verts_dict[str(id+1)]
faces1 = obj_faces_dict[str(id+1)]
faces2 = human_faces_dict[str(id+1)]
verts = np.concatenate((verts1, verts2), axis=0)
faces = np.concatenate((faces1, faces2 + verts1.shape[0]), axis=0)
path = os.path.join(save_path_mesh, str(id+1)+'_object.obj')
save_obj(path, torch.from_numpy(verts1), torch.from_numpy(faces1))
path = os.path.join(save_path_mesh, str(id+1)+'_person.obj')
save_obj(path, torch.from_numpy(verts2), torch.from_numpy(faces2))
path = os.path.join(save_path_mesh, str(id+1)+'_joint.obj')
save_obj(path, torch.from_numpy(verts), torch.from_numpy(faces))
def save_meshes(meshes, save_folder, video_name, title):
global obj_verts_dict
global obj_faces_dict
global human_verts_dict
global human_faces_dict
global save_path_mesh
save_path_mesh = os.path.join(save_folder, title)
if not os.path.exists(save_path_mesh):
os.makedirs(save_path_mesh)
obj_meshes = meshes['obj_mesh']
spin_meshes = meshes['spin_mesh']
# merge object + SPIN meshes
obj_verts = {}
obj_faces = {}
human_verts = {}
human_faces = {}
for idx in range(len(obj_meshes)):
obj_verts[str(idx+1)] = obj_meshes[idx].verts_list()[0].detach().cpu().numpy()
obj_faces[str(idx+1)] = obj_meshes[idx].faces_list()[0].detach().cpu().numpy()
human_verts[str(idx+1)] = spin_meshes[idx].verts_list()[0].detach().cpu().numpy()
human_faces[str(idx+1)] = spin_meshes[idx].faces_list()[0].detach().cpu().numpy()
manager = Manager()
obj_verts_dict = manager.dict(obj_verts)
obj_faces_dict = manager.dict(obj_faces)
human_verts_dict = manager.dict(human_verts)
human_faces_dict = manager.dict(human_faces)
ids = [item for item in range(len(obj_meshes))]
pool = Pool(processes=12)
pool.map(save_mesh, ids)
'''
eft_cmd = 'python -m demo.demo_bodymocap --render wire --bg rgb --videoname '+video_name+' --vPath '+save_folder
os.chdir('/home/xuxiangx/research/eft')
os.system(eft_cmd)
save_path = os.path.join(save_folder, 'eft', 'front')
ffmpeg_cmd = 'ffmpeg -r 3 -i '+save_path+'/scene_%08d.jpg '+save_folder+'/frontview.mp4'
os.system(ffmpeg_cmd)
save_path = os.path.join(save_folder, 'eft', 'side')
ffmpeg_cmd = 'ffmpeg -r 3 -i '+save_path+'/scene_%08d.jpg '+save_folder+'/sideview.mp4'
os.system(ffmpeg_cmd)
'''
return
def save_parameters(model, save_folder, title):
save_path = os.path.join(save_folder, title)
if not os.path.exists(save_path):
os.makedirs(save_path)
obj_offset = 1000.0*model.obj_offset.detach().cpu().numpy() #(3,)
smpl_offset = 1000.0*model.smpl_offset.detach().cpu().numpy() #(bs,3)
obj_scale = 3000.0*model.obj_scale
smpl_scale = 3000.0
focal_len = model.focal
part_rot_angle = model.part_rot_params.detach().cpu().numpy() #(bs,)
obj_rot_mat = model.obj_rot_angle_mat[0].detach().cpu().numpy() #(3,3)
part_rot_mat = model.part_rot_mat.detach().cpu().numpy() #(bs,3,3)
K_mat = model.K.detach().cpu().numpy() #(3,3)
rot_o = model.rot_o.detach().cpu().numpy() #(3,)
rot_axis = model.axis.detach().cpu().numpy() #(3,)
parameters = {}
parameters['obj_offset'] = obj_offset
parameters['smpl_offset'] = smpl_offset
parameters['obj_scale'] = obj_scale
parameters['smpl_scale'] = smpl_scale
parameters['focal_length'] = focal_len
parameters['part_rot_angle'] = part_rot_angle
parameters['obj_rot_matrix'] = obj_rot_mat
parameters['part_rot_matrix'] = part_rot_mat
parameters['K_matrix'] = K_mat
parameters['rot_origin'] = rot_o
parameters['rot_axis'] = rot_axis
np.save(os.path.join(save_path, 'parameters.npy'), parameters)
return
def save_img(idx):
global shared_dict1
global shared_dict2
global save_path
roi_image = shared_dict1['image'].permute(0,2,3,1)[idx].numpy()
silhouette = shared_dict1['objmask'].permute(0,2,3,1)[idx]
mask_model = shared_dict2['obj_mask']
gt_points = shared_dict1['smplv2d']
spin_points = shared_dict2['spin_points']
silhouette_init = mask_model.detach().cpu().squeeze()[idx].numpy()
mask_img_blend = visualize(silhouette_init, roi_image, 0.8)
# save image
#plt.subplots_adjust(hspace = 0.2, left=0.01, right=0.99, top=0.95, bottom=0.05)
imsave(os.path.join(save_path, str(idx)+'.png'), mask_img_blend)
return
def save_imgs(data, output, save_folder):
global shared_dict1
global shared_dict2
global save_path
save_path = save_folder
manager = Manager()
shared_dict1 = manager.dict(data)
shared_dict1 = data
shared_dict2 = manager.dict(output)
shared_dict2 = output
ids = [item for item in range(data['image'].shape[0])]
pool = Pool(processes=12)
pool.map(save_img, ids)
#sceneflow = shared_dict2['sceneflow']
#objSurfaceFlow = shared_dict2['objSurfaceFlow']
#synFlow = shared_dict2['synFlow']
#sceneflowMaskSquareShrink = shared_dict2['sceneflowMaskSquareShrink']
#part_diff_images = shared_dict2['part_diff_images']
# save object part suface raft flow visualization
#save_path = os.path.join(save_folder, title, 'objSurfaceFlow')
#if not os.path.exists(save_path):
#os.makedirs(save_path)
#for idx in range(objSurfaceFlow.shape[0]):
#imsave(os.path.join(save_path, str(idx)+'.png'), objSurfaceFlow[idx])
#ffmpeg_cmd = 'ffmpeg -r 3 -i '+save_path+'/%d.png '+save_folder+'/objSurfaceFlow.mp4'
#os.system(ffmpeg_cmd)
# save synthetic rendering flow visualization
#save_path = os.path.join(save_folder, title, 'synFlow')
#if not os.path.exists(save_path):
#os.makedirs(save_path)
#for idx in range(synFlow.shape[0]):
#imsave(os.path.join(save_path, str(idx)+'.png'), synFlow[idx])
#ffmpeg_cmd = 'ffmpeg -r 3 -i '+save_path+'/%d.png '+save_folder+'/synFlow.mp4'
#os.system(ffmpeg_cmd)
# save visualize images
#for idx in range(data['image'].shape[0]):
#save_img(idx, shared_dict1, shared_dict2)
#save_path = os.path.join(save_folder, title, 'render')
#ffmpeg_cmd = 'ffmpeg -r 3 -i '+save_path+'/%d.png '+save_folder+'/render.mp4'
#os.system(ffmpeg_cmd)
#vid1 = os.path.join(save_folder, 'objSurfaceFlow.mp4')
#vid2 = os.path.join(save_folder, 'synFlow.mp4')
#vid3 = os.path.join(save_folder, 'visual.mp4')
#ffmpeg_cmd = 'ffmpeg -i '+vid1+' -i '+vid2+' -filter_complex hstack=inputs=2 '+vid3
#os.system(ffmpeg_cmd)
return
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = axis / torch.sqrt(torch.dot(axis, axis))
a = torch.cos(theta / 2.0)
b, c, d = -axis * torch.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
rot_mat = torch.empty(3,3)
rot_mat[0,0] = aa + bb - cc - dd
rot_mat[0,1] = 2 * (bc + ad)
rot_mat[0,2] = 2 * (bd - ac)
rot_mat[1,0] = 2 * (bc - ad)
rot_mat[1,1] = aa + cc - bb - dd
rot_mat[1,2] = 2 * (cd + ab)
rot_mat[2,0] = 2 * (bd + ac)
rot_mat[2,1] = 2 * (cd - ab)
rot_mat[2,2] = aa + dd - bb - cc
return rot_mat
def rotation_matrix_batch(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = axis / torch.sqrt(torch.dot(axis, axis))
a = torch.cos(theta / 2.0)
b = -axis[0] * torch.sin(theta / 2.0)
c = -axis[1] * torch.sin(theta / 2.0)
d = -axis[2] * torch.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
rot_mat = torch.empty(aa.shape[0],3,3)
rot_mat[:,0,0] = aa + bb - cc - dd
rot_mat[:,0,1] = 2 * (bc + ad)
rot_mat[:,0,2] = 2 * (bd - ac)
rot_mat[:,1,0] = 2 * (bc - ad)
rot_mat[:,1,1] = aa + cc - bb - dd
rot_mat[:,1,2] = 2 * (cd + ab)
rot_mat[:,2,0] = 2 * (bd + ac)
rot_mat[:,2,1] = 2 * (cd - ab)
rot_mat[:,2,2] = aa + dd - bb - cc
return rot_mat
def flow_confidence(threshold, forwardFlow, backwardFlow, img_w, img_h):
# I_t -> I_(t+1), wrap with forward flow
It1 = forwardFlow.clone()
It1[:,:,0] += torch.arange(img_w)
It1[:,:,1] += torch.arange(img_h).unsqueeze(1) # (u, v) coordinate
It1 = torch.round(It1)
withinFrameMask = (It1[:,:,0] < img_w) * (It1[:,:,0] > 0) *\
(It1[:,:,1] < img_h) * (It1[:,:,1] > 0)
pdb.set_trace()
withinFrameCoord = torch.nonzero(withinFrameMask==1) # (x, y) coordinate of within frame flow
nextCoord = It1[withinFrameCoord[:, 0], withinFrameCoord[:,1]].astype(int) # u, v order
# I_(t+1) -> I_t, wrap back with backward flow
nextCoordBackwardFlow = backwardFlow[nextCoord[:,1], nextCoord[:,0],:]
nextbackCoord = nextCoord + nextCoordBackwardFlow # u, v coord
nextbackCoord[:,[1,0]] = nextbackCoord[:,[0,1]] # swap to x,y coord
# filter out noisy flow
stableFlowMask = np.sum(np.abs(nextbackCoord - withinFrameCoord), 1) < threshold
stableFlowCoord = withinFrameCoord[stableFlowMask] # (x,y) coord
return stableFlowCoord
def make_colorwheel():
"""
Generates a color wheel for optical flow visualization as presented in:
Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007)
URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf
Code follows the original C++ source code of Daniel Scharstein.
Code follows the the Matlab source code of Deqing Sun.
Returns:
np.ndarray: Color wheel
"""
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros((ncols, 3))
col = 0
# RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.floor(255*np.arange(0,RY)/RY)
col = col+RY
# YG
colorwheel[col:col+YG, 0] = 255 - np.floor(255*np.arange(0,YG)/YG)
colorwheel[col:col+YG, 1] = 255
col = col+YG
# GC
colorwheel[col:col+GC, 1] = 255
colorwheel[col:col+GC, 2] = np.floor(255*np.arange(0,GC)/GC)
col = col+GC
# CB
colorwheel[col:col+CB, 1] = 255 - np.floor(255*np.arange(CB)/CB)
colorwheel[col:col+CB, 2] = 255
col = col+CB
# BM
colorwheel[col:col+BM, 2] = 255
colorwheel[col:col+BM, 0] = np.floor(255*np.arange(0,BM)/BM)
col = col+BM
# MR
colorwheel[col:col+MR, 2] = 255 - np.floor(255*np.arange(MR)/MR)
colorwheel[col:col+MR, 0] = 255
return colorwheel
def flow_uv_to_colors(u, v, convert_to_bgr=False):
"""
Applies the flow color wheel to (possibly clipped) flow components u and v.
According to the C++ source code of Daniel Scharstein
According to the Matlab source code of Deqing Sun
Args:
u (np.ndarray): Input horizontal flow of shape [H,W]
v (np.ndarray): Input vertical flow of shape [H,W]
convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False.
Returns:
np.ndarray: Flow visualization image of shape [H,W,3]
"""
flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8)
colorwheel = make_colorwheel() # shape [55x3]
ncols = colorwheel.shape[0]
rad = np.sqrt(np.square(u) + np.square(v))
a = np.arctan2(-v, -u)/np.pi
fk = (a+1) / 2*(ncols-1)
k0 = np.floor(fk).astype(np.int32)
k1 = k0 + 1
k1[k1 == ncols] = 0
f = fk - k0
for i in range(colorwheel.shape[1]):
tmp = colorwheel[:,i]
col0 = tmp[k0] / 255.0
col1 = tmp[k1] / 255.0
col = (1-f)*col0 + f*col1
idx = (rad <= 1)
col[idx] = 1 - rad[idx] * (1-col[idx])
col[~idx] = col[~idx] * 0.75 # out of range
# Note the 2-i => BGR instead of RGB
ch_idx = 2-i if convert_to_bgr else i
flow_image[:,:,ch_idx] = np.floor(255 * col)
return flow_image
def flow_to_image(flow_uv, clip_flow=None, convert_to_bgr=False):
"""
Expects a two dimensional flow image of shape.
Args:
flow_uv (np.ndarray): Flow UV image of shape [H,W,2]
clip_flow (float, optional): Clip maximum of flow values. Defaults to None.
convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False.
Returns:
np.ndarray: Flow visualization image of shape [H,W,3]
"""
assert flow_uv.ndim == 3, 'input flow must have three dimensions'
assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]'
if clip_flow is not None:
flow_uv = np.clip(flow_uv, 0, clip_flow)
u = flow_uv[:,:,0]
v = flow_uv[:,:,1]
rad = np.sqrt(np.square(u) + np.square(v))
rad_max = 40.0#,
epsilon = 1e-5
u = u / (rad_max + epsilon)
v = v / (rad_max + epsilon)
return flow_uv_to_colors(u, v, convert_to_bgr)
|
d3d-hoi-main
|
visualization/annotation/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import os
from model import JOHMRModel
from utils import (
initialize_render, merge_meshes,
load_motion,
save_meshes, save_parameters
)
import json
import tqdm
from matplotlib.image import imsave
import matplotlib.pyplot as plt
import cv2
import re
import numpy as np
from PIL import Image
import glob
from dataloader import MyOwnDataset
import torch.nn as nn
import torch.optim as optim
import argparse
import itertools
def isfloat(x):
try:
a = float(x)
except (TypeError, ValueError):
return False
else:
return True
def isint(x):
try:
a = float(x)
b = int(a)
except (TypeError, ValueError):
return False
else:
return a == b
#################
# training code #
#################
def run_exp(inputvideo):
# Initialize gpu device
assert torch.cuda.is_available()
device = torch.device("cuda:"+str(args.device))
global available_category
vidpath = inputvideo[1]
video_name = inputvideo[0]
if(video_name[:4]=='b001'):
video_category = 'dishwasher'
elif(video_name[:4]=='b003'):
video_category = 'laptop'
elif(video_name[:4]=='b004'):
video_category = 'microwave'
elif(video_name[:4]=='b005'):
video_category = 'refrigerator'
elif(video_name[:4]=='b006'):
video_category = 'trashcan'
elif(video_name[:4]=='b007'):
video_category = 'washingmachine'
elif(video_name[:4]=='b008'):
video_category = 'storage_revolute'
elif(video_name[:4]=='b108'):
video_category = 'storage_prismatic'
elif(video_name[:4]=='b009'):
video_category = 'oven'
else:
print('not available category...')
print('processing '+video_name+' for category '+video_category)
# load gt annotation, find the correct object size, cad model, part id, and focal len
with open(os.path.join(vidpath, '3d_info.txt')) as myfile:
gt_data = [next(myfile).strip('\n') for x in range(14)]
# Initialize object scale (x, y, z)
obj_sizeX = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[9])[0])
obj_sizeY = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[9])[1])
obj_sizeZ = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[9])[2])
obj_dimension = [obj_sizeX, obj_sizeY, obj_sizeZ] # in cm
# initialize object cad model and part id
if args.use_gt_objmodel:
if args.use_gt_objpart:
cad_object = os.path.join(args.cadpath, video_category, re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[10])[0])
cad_part = int(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[11])[0])
cad_models = [(cad_object, cad_part)]
else:
cad_models = []
cad_object = os.path.join(args.cadpath, video_category, re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[10])[0])
with open(os.path.join(cad_object, 'motion.json')) as json_file:
cad_parts = len(json.load(json_file))
for cad_part in range(cad_parts):
cad_models.append((cad_object,cad_part))
else:
# iter through all cad models in that category
cad_models = []
cad_objects = [f.path for f in os.scandir(os.path.join(args.cadpath, video_category))]
for cad_object in cad_objects:
with open(os.path.join(cad_object, 'motion.json')) as json_file:
cad_parts = len(json.load(json_file))
for cad_part in range(cad_parts):
cad_models.append((cad_object,cad_part))
# initialize focal len
focal_len = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[12])[0])
assert(focal_len == float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[13])[0])) # in pixel (for 1280x720 only)
# initalize data loader (use all frames at once)
dataset = MyOwnDataset(inputvideo[1])
img_square, img_small = dataset.correct_image_size(200,300)
if img_small <= 0:
print('can not find small image size')
return False
trainloader = torch.utils.data.DataLoader(dataset, batch_size=len(dataset), pin_memory=True, shuffle=False, num_workers=4)
# initialize render
silhouette_renderer, phong_renderer = initialize_render(device, focal_len, focal_len, img_square, img_small)
# load all data per video
for idx, data in enumerate(trainloader):
imgs = data['image'].permute(0,2,3,1).to(device)
batch_size = imgs.shape[0]
img_h = imgs.shape[1]
img_w = imgs.shape[2]
points = data['smplv2d'].to(device).float()
smpl_verts = data['ver'].to(device).float()
smpl_faces = data['f'].to(device).float()
joints = data['joint3d'].to(device).float()
normal = data['normal'].to(device).float()
normal2 = data['normal2'].to(device).float()
objmask = data['objmask'].permute(0,2,3,1).to(device).float()
print('data loaded...')
# load gt part motion
gt_partmotion = []
fp = open(os.path.join(vidpath, 'jointstate.txt'))
for i, line in enumerate(fp):
line = line.strip('\n')
if isfloat(line) or isint(line):
gt_partmotion.append(float(line))
gt_partmotion = np.asarray(gt_partmotion)
# Infer HOI snippet from GT part motions
diff = gt_partmotion[:-1] - gt_partmotion[1:] # (i - i+1)
if video_category == 'storage_prismatic':
large_diff = np.where(abs(diff)>0.5)[0]
else:
large_diff = np.where(abs(diff)>2)[0]
care_idx = np.union1d(large_diff, large_diff+1)
care_idx = np.clip(care_idx, 0, len(gt_partmotion)-1)
# compute object mask center
obj_x_center = 0
obj_y_center = 0
count = 1e-5
for mask in objmask:
if torch.sum(mask) > 0:
count += 1
small_img = mask.squeeze().detach().cpu().numpy()
large_img = cv2.resize(small_img, dsize=(img_square, img_square), interpolation=cv2.INTER_NEAREST)
x, y, w, h = cv2.boundingRect(np.uint8(large_img))
obj_x_center += int(x+0.5*w)
obj_y_center += int(y+0.5*h)
obj_x_center /= count
obj_y_center /= count
###############################################
# optimize different cad model configurations #
###############################################
final_losses = []
folders = []
for (obj_path, part_idx) in cad_models:
cad_name = re.findall(r'\d+', obj_path)[-1]
# load object mesh
verts, faces, vertexSegs, faceSegs = merge_meshes(obj_path)
verts[:,1:] *= -1 # pytorch3d -> world coordinate
if args.use_gt_objscale:
# compute object rescale value if using gt dimension (cm)
x_diff = torch.max(verts[:,0]) - torch.min(verts[:,0])
x_ratio = obj_dimension[0] / x_diff
y_diff = torch.max(verts[:,1]) - torch.min(verts[:,1])
y_ratio = obj_dimension[1] / y_diff
z_diff = torch.max(verts[:,2]) - torch.min(verts[:,2])
z_ratio = obj_dimension[2] / z_diff
else:
if video_category == 'laptop':
initial_dim = 5.0
elif cad_name == '10797':
initial_dim = 20.0 # small fridge
elif video_category == 'refrigerator':
initial_dim = 100.0 # large fridge
else:
initial_dim = 50.0
x_diff = torch.max(verts[:,0]) - torch.min(verts[:,0])
x_ratio = x_diff * initial_dim
y_diff = torch.max(verts[:,1]) - torch.min(verts[:,1])
y_ratio = y_diff * initial_dim
z_diff = torch.max(verts[:,2]) - torch.min(verts[:,2])
z_ratio = z_diff * initial_dim
obj_verts = verts.to(device)
obj_faces = faces.to(device)
# load motion json file
with open(os.path.join(obj_path, 'motion.json')) as json_file:
motions = json.load(json_file)
assert len(motions) + 2 == len(vertexSegs)
rot_origin, rot_axis, rot_type, limit_a, limit_b, contact_list = load_motion(motions, device)
# Hand, object contact vertex id
handcontact = [2005, 5466] # left, right hand from SMPL
objcontact = contact_list[part_idx]
# Optimize for all possible settings
for handcontact_v in handcontact:
for objcontact_v in objcontact:
meta_info = str(part_idx)+'_'+str(objcontact_v)+'_'+str(handcontact_v)
# initalize model
model = JOHMRModel(imgs.detach(), obj_verts.detach(), obj_faces.detach(),
smpl_verts.detach(), smpl_faces.detach(), points.detach(),
silhouette_renderer, phong_renderer, normal.detach(), normal2.detach(), objmask.detach(),
rot_origin, rot_axis, rot_type, vertexSegs, faceSegs, limit_a, limit_b,
img_small ,focal_len, joints.detach())
# initialize optimizer
optimizer = optim.Adam(model.parameters(), lr=0.05) # 0.05
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=int(0.75*args.iter), gamma=0.1)
# Start optimizing
for iteration in range(args.iter):
loss, loss_meta = model(iteration, args, cad_name, care_idx, part_idx,
handcontact_v, objcontact_v, obj_x_center, obj_y_center,
x_ratio, y_ratio, z_ratio)
if loss_meta is not None:
print('Iteration %d lr %.4f, total loss %.4f, smpl %.4f, mask %.4f, hfacing %.4f, depth %.4f, gamma %.4f, alpha %.4f, size %.3f, contact %.4f'
% (iteration, optimizer.param_groups[0]['lr'], loss.data, loss_meta['l_points'], loss_meta['l_mask'], loss_meta['l_direction'],
loss_meta['l_depth'],loss_meta['l_gamma'],loss_meta['l_alpha'], loss_meta['l_prior'],loss_meta['l_contact']))
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
# save results
param_path = os.path.join(args.exp_name, 'params', video_name,cad_name, meta_info)
save_parameters(model, param_path)
final_losses.append(loss_meta['final_loss'])
folders.append(param_path)
# Only keep best result
best_run = final_losses.index(min(final_losses))
folders.remove(folders[best_run])
for folder in folders:
os.system('rm -r '+folder)
if __name__ == "__main__":
global available_category
parser = argparse.ArgumentParser()
parser.add_argument('--iter', type=int)
parser.add_argument('--use_gt_objscale', action='store_true')
parser.add_argument('--use_gt_objmodel', action='store_true')
parser.add_argument('--use_gt_objpart', action='store_true')
parser.add_argument('--objmask', type=float)
parser.add_argument('--hfacing', type=float)
parser.add_argument('--depth', type=float)
parser.add_argument('--gamma', type=float)
parser.add_argument('--alpha', type=float)
parser.add_argument('--range', type=float)
parser.add_argument('--smpl', type=float)
parser.add_argument('--contact', type=float)
parser.add_argument('--size', type=float)
parser.add_argument('--center', type=float)
parser.add_argument('--smooth', type=float)
parser.add_argument('--scale', type=float)
parser.add_argument('--category', type=str, help="which category to run")
parser.add_argument('--exp_name', type=str, help="experiment main folder")
parser.add_argument('--datapath', type=str, help="experiment data folder")
parser.add_argument('--cadpath', type=str, help="experiment data folder")
parser.add_argument("--device", type=int, help="CUDA Device Index")
args = parser.parse_args()
available_category = ['dishwasher', 'laptop', 'microwave', 'refrigerator', 'trashcan', 'washingmachine', 'oven', 'storage_revolute', 'storage_prismatic']
if args.category not in available_category and args.category!='all':
print('please choose a vaild category')
# create main exp folder
args.exp_path = os.path.join(os.getcwd(), args.exp_name)
if not os.path.exists(args.exp_path):
os.makedirs(args.exp_path)
videopath = []
# run on single object category
if args.category != 'all':
videopath = sorted([(f.name,f.path) for f in os.scandir(os.path.join(args.datapath, args.category))])
# run on all object categories
else:
videopath = []
for obj_class in available_category:
videopath.append(sorted([(f.name, f.path) for f in os.scandir(os.path.join(args.datapath, obj_class))]))
videopath = sorted(list(itertools.chain.from_iterable(videopath)))
print('total of '+str(len(videopath))+' experiments...')
# run locally
for i in range(len(videopath)):
run_exp(videopath[i])
|
d3d-hoi-main
|
optimization/optimize.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch.nn as nn
import torch
import numpy as np
from pytorch3d.renderer import (
look_at_view_transform, TexturesVertex
)
import math
from pytorch3d.structures import Meshes
import cv2
import matplotlib.pyplot as plt
from utils import rotation_matrix_batch
from scipy.ndimage.filters import gaussian_filter1d
from pytorch3d.io import save_obj
from pytorch3d.transforms import (
RotateAxisAngle, matrix_to_euler_angles, euler_angles_to_matrix
)
from pytorch3d.transforms.rotation_conversions import (
rotation_6d_to_matrix, matrix_to_rotation_6d
)
import time
from matplotlib.image import imsave
import os
from torch.autograd import Variable
import open3d as o3d
class JOHMRModel(nn.Module):
""" Differentiable render for fitting CAD model based on silhouette and human. """
def __init__(self, imgs, obj_verts, obj_faces, smpl_verts, smpl_faces, points,
diff_render, vis_render, normal, normal2, objmask,
rot_o, axis, rot_type, vertexSegs, faceSegs, limit_a, limit_b,
img_size_small ,focal_len, joints):
super(JOHMRModel, self).__init__()
self.imgs = imgs
self.objmask = objmask[..., 0]
self.objmask.requires_grad = False
self.device = smpl_verts.device
self.diff_render = diff_render
self.vis_render = vis_render
self.obj_verts_orig = obj_verts
self.obj_faces = obj_faces
self.smpl_verts_orig = smpl_verts
self.smpl_faces = smpl_faces
self.points = points
self.rot_origs = rot_o
self.rot_axises = axis
self.vertexSegs = vertexSegs
self.faceSegs = faceSegs
self.limit_as = limit_a
self.limit_bs = limit_b
self.rot_type = rot_type
self.bs = self.imgs.shape[0]
self.normal = normal
self.normal2 = normal2
self.img_h = self.imgs.shape[1]
self.img_w = self.imgs.shape[2]
self.new_s = int((max(self.img_h, self.img_w) - min(self.img_h, self.img_w))/2)-1
self.img_small = img_size_small
self.focal = focal_len
self.joints = joints
self.normalize = 1.0/(0.5*(self.img_h+self.img_w))
K = torch.from_numpy(np.array([[self.focal, 0, self.img_w/2],
[0, self.focal, self.img_h/2],
[0,0,1]]))
self.K = K.float().to(self.device)
# camera is at the center
self.R, self.T = look_at_view_transform(0.1, 0.0, 0.0, device=self.device)
self.T[0,2] = 0.0 # manually set to zero
# object CAD x, y, z offset in 3D
obj_offset = np.array([0.0, 0.0, 2.5], dtype=np.float32)
self.obj_offset = nn.Parameter(torch.from_numpy(obj_offset).to(self.device))
# object CAD scale in 3D
obj_scale = np.array([1.0, 1.0, 1.0], dtype=np.float32)
self.obj_scale = nn.Parameter(torch.from_numpy(obj_scale).to(self.device))
# SPIN mesh x, y, z offset in 3D
smpl_offset = np.zeros((self.bs,3), dtype=np.float32)
smpl_offset[:,0] = 0.0
smpl_offset[:,1] = 0.0
smpl_offset[:,2] = 2.5
self.smpl_offset = nn.Parameter(torch.from_numpy(smpl_offset).to(self.device))
# local rotation angle or translation offset for the parts
part_motion = 0.0*np.ones(self.bs, dtype=np.float32)
self.part_motion = nn.Parameter(torch.from_numpy(part_motion).to(self.device))
# global rotation angle for the object CAD
yaw_degree = 0.0 * 180/np.pi #-20.0# * 180/np.pi #0.0* 180/np.pi
rot_mat = RotateAxisAngle(yaw_degree, axis='X').get_matrix()
rot_mat = rot_mat[0,:3,:3].unsqueeze(0)
ortho6d = matrix_to_rotation_6d(rot_mat)
self.obj_rot_angle = nn.Parameter(ortho6d.to(self.device))
# curve rotation in 3D
yaw_degree2 = 0.0 * 180/np.pi #0.0* 180/np.pi
rot_mat2 = RotateAxisAngle(yaw_degree2, axis='Y').get_matrix()
rot_mat2 = rot_mat2[0,:3,:3].unsqueeze(0)
ortho6d2 = matrix_to_rotation_6d(rot_mat2)
self.curve_rot_angle = nn.Parameter(ortho6d2.to(self.device))
curve_offset = np.array([0.0, 0.0, 0.0], dtype=np.float32)
self.curve_offset = nn.Parameter(torch.from_numpy(curve_offset).to(self.device))
self.cos = nn.CosineSimilarity(dim=1, eps=1e-6)
self.relu = nn.ReLU()
return
def forward(self, iteration, args, cad_name, care_idx, part_idx, handcontact_v, objcontact_v,
obj_x_center, obj_y_center, x_ratio, y_ratio, z_ratio):
# Predefined CAD segmentation and motion axis from SAPIEN
self.vertexStart = self.vertexSegs[part_idx]
self.vertexEnd = self.vertexSegs[part_idx+1]
faceStart = self.faceSegs[part_idx]
faceEnd = self.faceSegs[part_idx+1]
self.rot_o = self.rot_origs[part_idx].clone().to(self.device).detach()
self.axis = self.rot_axises[part_idx].clone().to(self.device).detach()
limit_a = self.limit_as[part_idx]
limit_b = self.limit_bs[part_idx]
self.rot_o.requires_grad = False
self.axis.requires_grad = False
# Transform pytorch3d -> world coordinate
self.rot_o[1:] *= -1
self.axis[1:] *= -1
####################
## fit human mesh ##
####################
self.smpl_verts = self.smpl_verts_orig.clone()
# Resize human mesh
smplmesh_calibrate_path = 'smplmesh-calibrate.obj'
smplmesh_calibrate = o3d.io.read_triangle_mesh(smplmesh_calibrate_path) # load smpl mesh
hverts_cal = torch.from_numpy(np.asarray(smplmesh_calibrate.vertices)).float()
human_height = 175 #cm
h_diff = torch.max(hverts_cal[:,1]) - torch.min(hverts_cal[:,1])
h_ratio = (human_height / h_diff).detach()
self.smpl_verts *= h_ratio
# Add x y z offsets to SMPL mesh (camera looking at positive depth z)
smpl_offset = self.smpl_offset.reshape(-1,1,3).repeat(1,self.smpl_verts_orig.shape[1],1) # (bs, 6890, 3)
self.smpl_verts[:,:,0] += args.scale*smpl_offset[:,:,0]
self.smpl_verts[:,:,1] += args.scale*smpl_offset[:,:,1]
self.smpl_verts[:,:,2] += args.scale*smpl_offset[:,:,2] #smpl_offset[:,:,2] #smpl_offset[0,:,2]
# Compute projection matrix K
K_batch = self.K.expand(self.smpl_verts.shape[0],-1,-1)
# Prespective projection
points_out_v = torch.bmm(self.smpl_verts, K_batch.permute(0,2,1))
self.smpl_2d = points_out_v[...,:2] / points_out_v[...,2:]
# Human fitting error
l_points = torch.mean(self.normalize*(self.points - self.smpl_2d)**2)
#####################
## optimize object ##
#####################
self.obj_rot_mat = rotation_6d_to_matrix(self.obj_rot_angle)[0].to(self.device)
# pitch, yaw, roll
alpha,beta,gamma = matrix_to_euler_angles(rotation_6d_to_matrix(self.obj_rot_angle), ["X","Y","Z"])[0]
obj_verts_batch = self.obj_verts_orig.reshape(1,-1,3).repeat(self.bs,1,1) # (bs, ver, 3)
# Step 1: rescale object and rotation orig
if not args.use_gt_objscale:
sx = self.obj_scale[0] * x_ratio
sy = self.obj_scale[1] * y_ratio
sz = self.obj_scale[2] * z_ratio
else:
sx = x_ratio
sy = y_ratio
sz = z_ratio
obj_verts_batch[:,:,0] *= sx
obj_verts_batch[:,:,1] *= sy
obj_verts_batch[:,:,2] *= sz
self.rot_o[0] *= sx
self.rot_o[1] *= sy
self.rot_o[2] *= sz
# Oject real-world dimension after scaling
self.x_dim = torch.max(obj_verts_batch[0,:,0]) - torch.min(obj_verts_batch[0,:,0])
self.y_dim = torch.max(obj_verts_batch[0,:,1]) - torch.min(obj_verts_batch[0,:,1])
self.z_dim = torch.max(obj_verts_batch[0,:,2]) - torch.min(obj_verts_batch[0,:,2])
# Step 2: add part motion (prismatic or revolute)
if cad_name == '45261' or cad_name == '45132':
obj_verts_batch_t1 = obj_verts_batch[:, self.vertexStart:self.vertexEnd, :] - self.rot_o
self.part_motion_scaled = self.part_motion * args.scale
batch_offset = self.axis.unsqueeze(0).repeat(self.bs,1) * self.part_motion_scaled.unsqueeze(-1).repeat(1,3)
obj_verts_batch_t2 = obj_verts_batch_t1 + batch_offset.unsqueeze(1).repeat(1,obj_verts_batch_t1.shape[1], 1)
obj_verts_batch[:, self.vertexStart:self.vertexEnd, :] = obj_verts_batch_t2 + self.rot_o
else:
self.part_rot_mat = rotation_matrix_batch(self.axis, self.part_motion, self.device)
obj_verts_batch_t1 = obj_verts_batch[:, self.vertexStart:self.vertexEnd, :] - self.rot_o
obj_verts_batch_t2 = torch.bmm(self.part_rot_mat, obj_verts_batch_t1.permute(0,2,1)).permute(0,2,1)
obj_verts_batch[:, self.vertexStart:self.vertexEnd, :] = obj_verts_batch_t2 + self.rot_o
# Step 3: add global object rotation
obj_verts_batch = torch.bmm(self.obj_rot_mat.reshape(1,3,3).repeat(self.bs,1,1),
obj_verts_batch.permute(0,2,1)).permute(0,2,1)
# Step 4: add global object translation
self.obj_verts_batch = obj_verts_batch + args.scale*self.obj_offset
# Object center error
obj_2d = torch.bmm(self.obj_verts_batch, K_batch.permute(0,2,1))
self.obj_2d = (obj_2d[...,:2] / (obj_2d[...,2:])) # (bs, objV, 2)
obj_2d_x_center = torch.mean(self.obj_2d[:,:,0])
obj_2d_y_center = torch.mean(self.obj_2d[:,:,1])
if self.img_w > self.img_h:
obj_2d_y_center += self.new_s
else:
obj_2d_x_center += self.new_s
l_mask_center = self.normalize*(obj_y_center - obj_2d_y_center)**2 + self.normalize*(obj_x_center - obj_2d_x_center)**2
# Object & human orientation error
if '10213' in cad_name or '9968' in cad_name:
# Difficult to predefine orientation for laptop
# Use CAD base part
front_vertex = self.obj_verts_orig[645+581].detach()
top_vertex = self.obj_verts_orig[645+285].detach()
base_center = self.obj_verts_orig[self.vertexSegs[-2]:self.vertexSegs[-1]].detach()
obj_norm = torch.mean(base_center, 0) - front_vertex
obj_norm_rot = torch.mm(self.obj_rot_mat, obj_norm.float().reshape(-1,1)).permute(1,0)
output = self.cos(self.normal, obj_norm_rot.repeat(self.bs, 1))
l_direction = torch.mean((1.0 - output)[care_idx])
obj_norm2 = top_vertex - torch.mean(base_center, 0)
obj_norm_rot2 = torch.mm(self.obj_rot_mat, obj_norm2.float().reshape(-1,1)).permute(1,0)
output2 = self.cos(self.normal2, obj_norm_rot2.repeat(self.bs, 1))
l_direction2 = torch.mean((1.0 - output2))
else:
obj_norm = torch.from_numpy(np.asarray([0,0,1])).to(self.device)
obj_norm2 = torch.from_numpy(np.asarray([0,-1,0])).to(self.device)
obj_norm_rot = torch.mm(self.obj_rot_mat, obj_norm.float().reshape(-1,1)).permute(1,0)
obj_norm_rot2 = torch.mm(self.obj_rot_mat, obj_norm2.float().reshape(-1,1)).permute(1,0)
output = self.cos(self.normal, obj_norm_rot.repeat(self.bs, 1))
output2 = self.cos(self.normal2, obj_norm_rot2.repeat(self.bs, 1))
l_direction = torch.mean((1.0 - output)[care_idx])
l_direction2 = torch.mean((1.0 - output2))
# Differentiable mask error
diff_images = []
for index in range(self.bs):
# convert object mesh for diff render, opengl -> pytorch3d
p3d_obj_verts = self.obj_verts_batch[index].clone()
p3d_obj_verts[:,1] *= -1
p3d_obj_verts[:,2] *= -1
# pass through diff render
tex = torch.ones_like(p3d_obj_verts).unsqueeze(0)
textures = TexturesVertex(verts_features=tex).to(self.device)
obj_mesh = Meshes(verts=[p3d_obj_verts],faces=[self.obj_faces],textures=textures)
diff_img = self.diff_render(meshes_world=obj_mesh, R=self.R, T=self.T)
diff_img = diff_img[..., 3:]
diff_img = diff_img.permute(0,3,1,2)[0,0,:,:] #(h,w)
diff_images.append(diff_img)
diff_images = torch.stack(diff_images) #(bs,h,w)
mask2 = (diff_images>0).detach()
l_gtMask = torch.mean(self.objmask*(diff_images-self.objmask)**2)
l_rendMask = torch.mean(mask2*((diff_images-self.objmask)**2))
mask_diff = torch.mean((diff_images-self.objmask)**2)
l_mask = 0.3*l_rendMask + 0.6*l_gtMask + 0.1*mask_diff
# Hand & object 3D contact error
self.curve_rot_angle_mat = rotation_6d_to_matrix(self.curve_rot_angle)[0].to(self.device)
l_contact = torch.zeros(1).to(self.device)
if '102156' not in cad_name and '103635' not in cad_name:
obj_contact_curve = self.obj_verts_batch[care_idx, objcontact_v, :].clone()
smpl_contact_curve = self.smpl_verts[care_idx, handcontact_v, :].clone().detach()
obj_contact_curve_after = torch.t(torch.mm(self.curve_rot_angle_mat, torch.t(obj_contact_curve))) + 5.0*self.curve_offset
l_contact = self.normalize * torch.mean((obj_contact_curve_after- smpl_contact_curve)**2)
# Smoothing error
nomotion_idx = list(set(list(range(0, len(self.part_motion)-1))) - set(care_idx.tolist()))
partrot_first = self.part_motion[:-1]
partrot_second = self.part_motion[1:]
l_smooth = torch.mean((partrot_first - partrot_second)[np.array(nomotion_idx)]**2)
# Motion range error
l_range = torch.mean(self.relu(limit_a - self.part_motion) + self.relu(self.part_motion-limit_b))
# Roll, pitch constraint (except for laptop)
if '10213' in cad_name or '9968' in cad_name:
l_gamma = torch.zeros(1).to(self.device)
l_alpha = torch.zeros(1).to(self.device)
else:
l_alpha = self.relu(-alpha-0.2)**2
l_gamma = self.relu(torch.abs(gamma)-0.2)**2
# Depth constraint
l_depth = torch.mean((self.smpl_offset[care_idx,2].detach() - self.obj_offset[2])**2)
# Object size constraint
#l_size = torch.sum(self.relu(0.1 - self.obj_scale))
l_size = torch.mean(self.relu(self.obj_scale-0.1)**2)
# Overall error
overall_loss = args.smpl*l_points + args.objmask*l_mask +\
args.depth*l_depth + args.smooth*l_smooth + args.range*l_range +\
args.gamma*l_gamma + args.alpha*l_alpha +\
args.hfacing*(l_direction+l_direction2) + args.contact*l_contact
if iteration <= int(0.5*args.iter):
overall_loss += args.center*l_mask_center
if iteration > int(0.5*args.iter):
overall_loss += (args.size*l_size )
loss_meta = {}
loss_meta['l_mask'] = args.objmask*l_mask.data.detach().cpu().numpy()
loss_meta['l_center'] = args.center*l_mask_center.data.detach().cpu().numpy()
loss_meta['l_contact'] = args.contact*l_contact.data.detach().cpu().numpy()
loss_meta['l_depth'] = args.depth*l_depth.data.detach().cpu().numpy()
loss_meta['l_gamma'] = args.gamma*l_gamma.data.detach().cpu().numpy()
loss_meta['l_alpha'] = args.alpha*l_alpha.data.detach().cpu().numpy()
loss_meta['l_range'] = args.alpha*l_range.data.detach().cpu().numpy()
loss_meta['l_smooth'] = args.alpha*l_smooth.data.detach().cpu().numpy()
loss_meta['l_prior'] = args.size*l_size.data.detach().cpu().numpy()
loss_meta['l_direction'] = args.hfacing*(l_direction.data.detach().cpu().numpy() + l_direction2.data.detach().cpu().numpy() )
loss_meta['l_points'] = args.smpl*l_points.data.detach().cpu().numpy()
loss_meta['overall_loss'] = overall_loss.data.detach().cpu().item()
loss_meta['final_loss'] = loss_meta['l_mask'] + 0.3*loss_meta['l_contact'] + loss_meta['l_depth'] + loss_meta['l_range'] + loss_meta['l_smooth'] +\
loss_meta['l_gamma'] + loss_meta['l_alpha'] + loss_meta['l_prior'] + 0.3*loss_meta['l_direction']
return overall_loss, loss_meta
def render(self, save_folder=None):
obj_meshes = []
smpl_meshes = []
for index in range(self.bs):
smpl_verts = self.smpl_verts[index]
obj_verts = self.obj_verts_batch[index]
# create SPIN mesh (opengl)
tex = torch.ones_like(smpl_verts).unsqueeze(0)
textures = TexturesVertex(verts_features=tex).to(self.device)
smpl_mesh = Meshes(verts=[smpl_verts],faces=[self.smpl_faces[index]],textures=textures).detach()
smpl_meshes.append(smpl_mesh)
# create object mesh for diff render and visualization
tex = torch.ones_like(obj_verts).unsqueeze(0)
textures = TexturesVertex(verts_features=tex).to(self.device)
obj_mesh = Meshes(verts=[obj_verts],faces=[self.obj_faces],textures=textures).detach()
obj_meshes.append(obj_mesh)
meshes = {'obj_mesh':obj_meshes, 'spin_mesh':smpl_meshes}
return meshes
|
d3d-hoi-main
|
optimization/model.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import torch
import natsort
import glob
import open3d as o3d
# rendering components
from pytorch3d.renderer import (
FoVPerspectiveCameras,RasterizationSettings,
MeshRenderer, MeshRasterizer, BlendParams,
SoftSilhouetteShader, HardPhongShader, PointLights, DirectionalLights,
PerspectiveCameras
)
from pytorch3d.io import save_obj, load_obj
import math
import cv2
import matplotlib.pyplot as plt
import os
import imageio
from decimal import Decimal
import json
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from mpl_toolkits.mplot3d import axes3d
from scipy.ndimage.filters import gaussian_filter1d
from numpy.linalg import svd
from multiprocessing import Pool, Manager, cpu_count
from pytorch3d.transforms import Rotate, Translate
from matplotlib.image import imsave
from pathlib import Path
def planeFit(points):
"""
p, n = planeFit(points)
Given an array, points, of shape (d,...)
representing points in d-dimensional space,
fit an d-dimensional plane to the points.
Return a point, p, on the plane (the point-cloud centroid),
and the normal, n.
"""
points = np.reshape(points, (np.shape(points)[0], -1)) # Collapse trialing dimensions
assert points.shape[0] <= points.shape[1], "There are only {} points in {} dimensions.".format(points.shape[1], points.shape[0])
ctr = points.mean(axis=1)
x = points - ctr[:,np.newaxis]
M = np.dot(x, x.T) # Could also use np.cov(x) here.
return ctr, svd(M)[0][:,-1]
def initialize_render(device, focal_x, focal_y, img_square_size, img_small_size):
""" initialize camera, rasterizer, and shader. """
# Initialize an OpenGL perspective camera.
#cameras = FoVPerspectiveCameras(znear=1.0, zfar=9000.0, fov=20, device=device)
#cameras = FoVPerspectiveCameras(device=device)
#cam_proj_mat = cameras.get_projection_transform()
img_square_center = int(img_square_size/2)
shrink_ratio = int(img_square_size/img_small_size)
focal_x_small = int(focal_x/shrink_ratio)
focal_y_small = int(focal_y/shrink_ratio)
img_small_center = int(img_small_size/2)
camera_sfm = PerspectiveCameras(
focal_length=((focal_x, focal_y),),
principal_point=((img_square_center, img_square_center),),
image_size = ((img_square_size, img_square_size),),
device=device)
camera_sfm_small = PerspectiveCameras(
focal_length=((focal_x_small, focal_y_small),),
principal_point=((img_small_center, img_small_center),),
image_size = ((img_small_size, img_small_size),),
device=device)
# To blend the 100 faces we set a few parameters which control the opacity and the sharpness of
# edges. Refer to blending.py for more details.
blend_params = BlendParams(sigma=1e-4, gamma=1e-4)
# Define the settings for rasterization and shading. Here we set the output image to be of size
# 256x256. To form the blended image we use 100 faces for each pixel. We also set bin_size and max_faces_per_bin to None which ensure that
# the faster coarse-to-fine rasterization method is used. Refer to rasterize_meshes.py for
# explanations of these parameters. Refer to docs/notes/renderer.md for an explanation of
# the difference between naive and coarse-to-fine rasterization.
raster_settings = RasterizationSettings(
image_size=img_small_size,
blur_radius=np.log(1. / 1e-4 - 1.) * blend_params.sigma,
faces_per_pixel=50,
)
# Create a silhouette mesh renderer by composing a rasterizer and a shader.
silhouette_renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=camera_sfm_small,
raster_settings=raster_settings
),
shader=SoftSilhouetteShader(blend_params=blend_params)
)
# We will also create a phong renderer. This is simpler and only needs to render one face per pixel.
raster_settings = RasterizationSettings(
image_size=img_square_size,
blur_radius=0.0,
faces_per_pixel=1,
)
# We can add a point light in front of the object.
lights = PointLights(device=device, location=((2.0, 2.0, -2.0),))
#lights = DirectionalLights(device=device, direction=((0, 0, 1),))
phong_renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=camera_sfm,
raster_settings=raster_settings
),
shader=HardPhongShader(device=device, cameras=camera_sfm, lights=lights)
)
return silhouette_renderer, phong_renderer
def merge_meshes(obj_path):
""" helper function for loading and merging meshes. """
verts_list = torch.empty(0,3)
faces_list = torch.empty(0,3).long()
num_vtx = [0]
num_faces = [0]
# merge meshes, load in ascending order
meshes = natsort.natsorted(glob.glob(obj_path+'/final/*_rescaled_sapien.obj'))
for part_mesh in meshes:
verts, faces, aux = load_obj(part_mesh)
faces = faces.verts_idx
faces = faces + verts_list.shape[0]
verts_list = torch.cat([verts_list, verts])
faces_list = torch.cat([faces_list, faces])
num_vtx.append(verts_list.shape[0])
num_faces.append(faces_list.shape[0])
return verts_list, faces_list, num_vtx, num_faces
def load_motion(motions, device):
""" load rotation axis, origin, and limit. """
rot_origin = []
rot_axis = []
rot_type = []
limit_a = []
limit_b = []
contact_list = []
# load all meta data
for idx, key in enumerate(motions.keys()):
jointData = motions[key]
# if contains movable parts
if jointData is not None:
origin = torch.FloatTensor(jointData['axis']['origin']).to(device)
axis = torch.FloatTensor(jointData['axis']['direction']).to(device)
mobility_type = jointData['type']
contact_list.append(jointData['contact'])
# convert to radians if necessary
if mobility_type == 'revolute':
mobility_a = math.pi*jointData['limit']['a'] / 180.0
mobility_b = math.pi*jointData['limit']['b'] / 180.0
else:
assert mobility_type == 'prismatic'
mobility_a = jointData['limit']['a']
mobility_b = jointData['limit']['b']
rot_origin.append(origin)
rot_axis.append(axis)
rot_type.append(mobility_type)
limit_a.append(mobility_a)
limit_b.append(mobility_b)
return rot_origin, rot_axis, rot_type, limit_a, limit_b, contact_list
def save_object(id):
global obj_verts_dict
global obj_faces_dict
global save_path_mesh
verts = obj_verts_dict[str(id+1)]
faces = obj_faces_dict[str(id+1)]
path = os.path.join(save_path_mesh, str(id+1)+'_object.obj')
save_obj(path, torch.from_numpy(verts), torch.from_numpy(faces))
def save_human(id):
global human_verts_dict
global human_faces_dict
global save_path_mesh
verts = human_verts_dict[str(id+1)]
faces = human_faces_dict[str(id+1)]
path = os.path.join(save_path_mesh, str(id+1)+'_person.obj')
save_obj(path, torch.from_numpy(verts), torch.from_numpy(faces))
def save_meshes(meshes, save_folder, video_name, title):
global obj_verts_dict
global obj_faces_dict
global human_verts_dict
global human_faces_dict
global save_path_mesh
save_path_mesh = os.path.join(save_folder, title)
if not os.path.exists(save_path_mesh):
os.makedirs(save_path_mesh)
obj_meshes = meshes['obj_mesh']
spin_meshes = meshes['spin_mesh']
# merge object + SPIN meshes
obj_verts = {}
obj_faces = {}
human_verts = {}
human_faces = {}
for idx in range(len(obj_meshes)):
path = os.path.join(save_path_mesh, str(idx+1)+'_person.obj')
save_obj(path, spin_meshes[idx].verts_list()[0], spin_meshes[idx].faces_list()[0])
path = os.path.join(save_path_mesh, str(idx+1)+'_object.obj')
save_obj(path, obj_meshes[idx].verts_list()[0], obj_meshes[idx].faces_list()[0])
eft_cmd = 'python -m demo.demo_bodymocapnewnew --render solid --videoname '+video_name+' --vPath '+save_folder
os.chdir('/local-scratch/projects/d3dhoi/eft')
os.system(eft_cmd)
os.chdir('/local-scratch/projects/d3dhoi')
'''
save_path = os.path.join(save_folder, 'eft', 'front')
ffmpeg_cmd = 'ffmpeg -r 3 -i '+save_path+'/scene_%08d.jpg '+save_folder+'/frontview.mp4'
os.system(ffmpeg_cmd)
'''
return
def save_parameters(model, save_path):
if not os.path.exists(save_path):
os.makedirs(save_path)
obj_offset = model.obj_offset.detach().cpu().numpy()
x_dim = model.x_dim.item()
y_dim = model.y_dim.item()
z_dim = model.z_dim.item()
obj_rot_angle = model.obj_rot_angle.detach().cpu().numpy() #(3,3)
part_motion = model.part_motion.detach().cpu().numpy()
parameters = {}
parameters['obj_offset'] = obj_offset
parameters['obj_dim'] = [x_dim, y_dim, z_dim]
parameters['obj_rot_angle'] = obj_rot_angle
parameters['part_motion'] = part_motion
np.save(os.path.join(save_path, 'params.npy'), parameters)
return
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = axis / torch.sqrt(torch.dot(axis, axis))
a = torch.cos(theta / 2.0)
b, c, d = -axis * torch.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
rot_mat = torch.empty(3,3)
rot_mat[0,0] = aa + bb - cc - dd
rot_mat[0,1] = 2 * (bc + ad)
rot_mat[0,2] = 2 * (bd - ac)
rot_mat[1,0] = 2 * (bc - ad)
rot_mat[1,1] = aa + cc - bb - dd
rot_mat[1,2] = 2 * (cd + ab)
rot_mat[2,0] = 2 * (bd + ac)
rot_mat[2,1] = 2 * (cd - ab)
rot_mat[2,2] = aa + dd - bb - cc
return rot_mat
def rotation_matrix_batch(axis, theta, device):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = axis / torch.sqrt(torch.dot(axis, axis))
a = torch.cos(theta / 2.0)
b = -axis[0] * torch.sin(theta / 2.0)
c = -axis[1] * torch.sin(theta / 2.0)
d = -axis[2] * torch.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
rot_mat = torch.empty(aa.shape[0],3,3).to(device)
rot_mat[:,0,0] = aa + bb - cc - dd
rot_mat[:,0,1] = 2 * (bc + ad)
rot_mat[:,0,2] = 2 * (bd - ac)
rot_mat[:,1,0] = 2 * (bc - ad)
rot_mat[:,1,1] = aa + cc - bb - dd
rot_mat[:,1,2] = 2 * (cd + ab)
rot_mat[:,2,0] = 2 * (bd + ac)
rot_mat[:,2,1] = 2 * (cd - ab)
rot_mat[:,2,2] = aa + dd - bb - cc
return rot_mat
|
d3d-hoi-main
|
optimization/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
import torch
from pytorch3d.transforms import (
so3_relative_angle,
euler_angles_to_matrix
)
from scipy.spatial.distance import cdist
import json
from utils import (
load_motion,
)
import re
import argparse
from pytorch3d.transforms.rotation_conversions import (
rotation_6d_to_matrix
)
def isfloat(x):
try:
a = float(x)
except (TypeError, ValueError):
return False
else:
return True
def isint(x):
try:
a = float(x)
b = int(a)
except (TypeError, ValueError):
return False
else:
return a == b
parser = argparse.ArgumentParser()
parser.add_argument('--cad_path', type=str, help="experiment cad folder")
parser.add_argument('--result_folder', type=str, help="experiment result folder")
parser.add_argument('--data_path', type=str, help="experiment data folder")
parser.add_argument('--scale', type=float)
args = parser.parse_args()
cad_path = args.cad_path
result_folder = args.result_folder
anno_path = args.data_path
videos = sorted([(f.name, f.path) for f in os.scandir(result_folder)])
results = {}
# loop through all videos
for idx, video in enumerate(videos):
vidname = video[0]
vidpath = video[1]
cads = sorted([(f.name, f.path) for f in os.scandir(vidpath)])
if(vidname[:4]=='b001'):
category = 'dishwasher'
elif(vidname[:4]=='b003'):
category = 'laptop'
elif(vidname[:4]=='b004'):
category = 'microwave'
elif(vidname[:4]=='b005'):
category = 'refrigerator'
elif(vidname[:4]=='b006'):
category = 'trashcan'
elif(vidname[:4]=='b007'):
category = 'washingmachine'
elif(vidname[:4]=='b008'):
category = 'storage_revolute'
elif(vidname[:4]=='b108'):
category = 'storage_prismatic'
elif(vidname[:4]=='b009'):
category = 'oven'
# loop through all cad models
for cad in cads:
cadname = cad[0]
cadpath = cad[1]
settings = sorted([(f.name, f.path) for f in os.scandir(cadpath)])
# loop through all settings
for setting in settings:
expname = setting[0]
exppath = setting[1]
partid = int(setting[0][0])
# load experiment meta
if not os.path.exists(os.path.join(exppath, 'params.npy')):
print('missing '+vidname +' for setting '+expname)
continue
expmeta = np.load(os.path.join(exppath, 'params.npy'), allow_pickle=True)
expmeta = expmeta.item()
# load estimated global object rotation
exp_rot_angle = torch.from_numpy(expmeta['obj_rot_angle'])
exp_rot_mat = rotation_6d_to_matrix(exp_rot_angle)
# load estimated global object translation (cm)
exp_t = expmeta['obj_offset'] * args.scale
# load estimated object dimension (cm)
exp_dim = expmeta['obj_dim']
# load estimated part motion (degree or cm)
if cadname == '45132' or cadname == '45261':
exp_partmotion = expmeta['part_motion'] * args.scale
else:
exp_partmotion = expmeta['part_motion'] * 57.296
# load gt part motion values (degree or cm)
gt_partmotion = []
fp = open(os.path.join(anno_path, category, vidname, 'jointstate.txt'))
for i, line in enumerate(fp):
line = line.strip('\n')
if isfloat(line) or isint(line):
gt_partmotion.append(float(line))
gt_partmotion = np.asarray(gt_partmotion)
with open(os.path.join(anno_path, category, vidname, '3d_info.txt')) as myfile:
gt_data = [next(myfile).strip('\n') for x in range(14)]
# GT global object rotation
gt_alpha = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[3])[0])
gt_beta = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[4])[0])
gt_gamma = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[5])[0])
gt_alpha_tensor = torch.tensor(gt_alpha).reshape(-1)
gt_beta_tensor = torch.tensor(gt_beta).reshape(-1)
gt_gamma_tensor = torch.tensor(gt_gamma).reshape(-1)
euler_angle = torch.cat((gt_alpha_tensor,gt_beta_tensor,gt_gamma_tensor),0).reshape(1,3)
rot_mat_gt = euler_angles_to_matrix(euler_angle, ["X","Y","Z"])
# GT global object translation (cm)
gt_x = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[6])[0])*100.0
gt_y = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[7])[0])*100.0
gt_z = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[8])[0])*100.0
# GT object dimension (cm)
gt_xdim = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[9])[0])
gt_ydim = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[9])[1])
gt_zdim = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[9])[2])
gt_cad = re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[10])[0]
gt_part = int(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[11])[0])
# CAD model correctness
correctness = gt_cad==cadname #and gt_part == partid
# Avg part motion abs error (degree or cm)
motion_error = np.mean(np.abs(gt_partmotion - exp_partmotion))
# Global object rotation error [relative angle (in degree) between the rotation matrixs in so3 space]
R_dist = (so3_relative_angle(rot_mat_gt, exp_rot_mat, cos_angle=False).numpy()*57.296)[0]
# Global object translation error (in cm)
x_error = np.square(gt_x - exp_t[0])
y_error = np.square(gt_y - exp_t[1])
z_error = np.square(gt_z - exp_t[2])
T_dist = np.sqrt(x_error+y_error+z_error)
# Avg object dimension abs error (in cm)
xdim_error = np.abs(gt_xdim - exp_dim[0])
ydim_error = np.abs(gt_ydim - exp_dim[1])
zdim_error = np.abs(gt_zdim - exp_dim[2])
dim_error = (xdim_error + ydim_error + zdim_error)/3.0
# print per video result
with open(os.path.join(os.path.dirname(result_folder),"result.txt"), 'a') as f:
print(vidname+': ', file=f)
print('model: '+str(cadname)+', part: '+str(partid), file=f)
print('correctness: '+str(correctness), file=f)
print('orientation (degree): '+str(round(R_dist,4)), file=f)
print('location (cm): '+str(round(T_dist,4)), file=f)
if cadname == '45132' or cadname == '45261':
print('motion (cm): '+str(round(motion_error,4)), file=f)
else:
print('motion (degree): '+str(round(motion_error,4)), file=f)
print('dimension (cm): '+str(round(dim_error,4)), file=f)
print('--------------------------', file=f)
if not category in results:
results[category] = {}
results[category]['correctness'] = []
results[category]['orientation'] = []
results[category]['location'] = []
results[category]['motion'] = []
results[category]['dimension'] = []
results[category]['correctness'].append(int(correctness))
if not correctness:
continue
results[category]['orientation'].append(R_dist)
results[category]['location'].append(T_dist)
results[category]['motion'].append(motion_error)
results[category]['dimension'].append(dim_error)
# per-category results:
for key, value in results.items():
correct_percent = sum(value['correctness'])/len(value['correctness'])*100.0
motion_mean = sum(value['motion'])/len(value['motion'])
oriens_mean = sum(value['orientation'])/len(value['orientation'])
locs_mean = sum(value['location'])/len(value['location'])
dims_mean = sum(value['dimension'])/len(value['dimension'])
with open(os.path.join(os.path.dirname(result_folder),"result.txt"), 'a') as f:
print('--------------------------', file=f)
print(key+' model correctness: '+str(correct_percent)+'%', file=f)
print('motion_mean: '+str(motion_mean), file=f)
print('orientation_mean: '+str(oriens_mean), file=f)
print('location_mean: '+str(locs_mean), file=f)
print('dimension_mean: '+str(dims_mean), file=f)
print('--------------------------', file=f)
|
d3d-hoi-main
|
optimization/evaluate.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from skimage import io
from torch.utils.data import Dataset
import json
import os
import numpy as np
import torch
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
from PIL import Image
import cv2
from natsort import natsorted
from utils import planeFit
from numpy.linalg import norm
import glob
from pytorch3d.io import load_obj
class MyOwnDataset(Dataset):
""" My Own data loader. """
def __init__(self, root_dir):
"""
Args:
root_dir (string): Directory with all the images, masks and meta file.
category (string): Category class.
"""
self.img_paths = sorted(glob.glob(os.path.join(root_dir, 'frames', '*.jpg')))
self.smplv2d_paths = sorted(glob.glob(os.path.join(root_dir, 'smplv2d', '*.npy')))
self.smplmesh_paths = sorted(glob.glob(os.path.join(root_dir, 'smplmesh', '*.obj')))
self.joint3d_paths = sorted(glob.glob(os.path.join(root_dir, 'joints3d', '*.npy')))
self.objmask_paths = sorted(glob.glob(os.path.join(root_dir, 'gt_mask', '*object_mask.npy')))
# transformations
transform_list = [transforms.ToTensor()]
self.transforms = transforms.Compose(transform_list)
def correct_image_size(self,low,high):
# automatically finds a good ratio in the given range
image = np.array(Image.open(self.img_paths[0]))
img_h = image.shape[0]
img_w = image.shape[1]
img_square = max(img_h,img_w)
img_small = -1
for i in range(low, high):
if img_square % i == 0:
img_small = i
break
return img_square, img_small
def __len__(self):
return len(self.img_paths)
def getImgPath(self):
return self.img_paths
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
image = np.array(Image.open(self.img_paths[idx])) # load image
img_h = image.shape[0]
img_w = image.shape[1]
square_len = max(img_h, img_w) # compute add in region for square
new_s = int((max(img_h, img_w) - min(img_h, img_w))/2)-1
add_l = min(img_h, img_w)
objmask = np.load(self.objmask_paths[idx]).astype(np.uint8)
smplv2d = np.load(self.smplv2d_paths[idx]) # load 2D points
joint3d = np.load(self.joint3d_paths[idx])
if (joint3d.shape[0] == 147):
pdb.set_trace()
# no avaiable frame
normal = np.zeros((3))
normal2 = np.zeros((3))
else:
# estimate the body fitting plane and its normal vector
joints_np = np.transpose(joint3d) # (3xN)
lhip_to_rShoulder = joint3d[33] - joint3d[28]
rhip_to_lShoulder = joint3d[34] - joint3d[27]
normal = np.cross(lhip_to_rShoulder, rhip_to_lShoulder)
normal = normal / np.sqrt(np.sum(normal**2))
arm = joint3d[31,:] - joint3d[33,:]
cos_sim = np.inner(normal, arm)/(norm(normal)*norm(arm))
if cos_sim < 0:
normal *= -1
lankle_to_rtoe = joint3d[22] - joint3d[30]
rankle_to_ltoe = joint3d[19] - joint3d[25]
normal2 = np.cross(lankle_to_rtoe, rankle_to_ltoe)
normal2 = normal2 / np.sqrt(np.sum(normal2**2))
leg = joint3d[29,:] - joint3d[30,:]
cos_sim2 = np.inner(normal2, leg)/(norm(normal2)*norm(leg))
if cos_sim2 < 0:
normal2 *= -1
# SMPL mesh
verts, faces, aux = load_obj(self.smplmesh_paths[idx])
faces = faces.verts_idx
verts = verts.float()
faces = faces.long()
joints = torch.from_numpy(joint3d).float()
normal = torch.from_numpy(normal).float()
normal2 = torch.from_numpy(normal2).float()
# apply transformations
image = self.transforms(np.uint8(image))
objmask = self.transforms(np.uint8(objmask))
objmask[objmask>0.0] = 1.0
data = {'image': image, 'objmask': objmask,
'smplv2d': smplv2d, 'ver': verts, 'f': faces,
'normal': normal, 'normal2': normal2, 'joint3d': joints}
return data
|
d3d-hoi-main
|
optimization/dataloader.py
|
import os
import argparse
import ntpath
import common
import pdb
import open3d as o3d
import numpy as np
class Simplification:
"""
Perform simplification of watertight meshes.
"""
def __init__(self):
"""
Constructor.
"""
parser = self.get_parser()
self.options = parser.parse_args()
self.simplification_script = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'simplification.mlx')
def get_parser(self):
"""
Get parser of tool.
:return: parser
"""
parser = argparse.ArgumentParser(description='Scale a set of meshes stored as OFF files.')
parser.add_argument('--in_dir', type=str, help='Path to input directory.')
parser.add_argument('--out_dir', type=str, help='Path to output directory; files within are overwritten!')
return parser
def read_directory(self, directory):
"""
Read directory.
:param directory: path to directory
:return: list of files
"""
files = []
for filename in os.listdir(directory):
files.append(os.path.normpath(os.path.join(directory, filename)))
return files
def run(self):
"""
Run simplification.
"""
if not os.path.exists(self.options.in_dir):
return
common.makedir(self.options.out_dir)
files = self.read_directory(self.options.in_dir)
# count number of faces
num_faces = []
for filepath in files:
mesh = o3d.io.read_triangle_mesh(filepath)
faces = np.asarray(mesh.triangles).shape[0]
num_faces.append(faces)
num_faces = np.array(num_faces)
total_faces = np.sum(num_faces)
num_faces = np.around(2500 * (num_faces / (total_faces+0.0))).astype(int) # total 2500 faces
for idx, filepath in enumerate(files):
# write new simply mlx file
with open(os.path.join(self.options.out_dir,'tmp.mlx'), 'w') as out_file:
with open(self.simplification_script, 'r') as in_file:
Lines = in_file.readlines()
for count, line in enumerate(Lines):
# modify target face number according to ratio
if count == 3:
front = line[:51]
back = line[57:]
line = front+"\""+str(num_faces[idx])+"\""+back
out_file.write(line)
os.system('meshlabserver -i %s -o %s -s %s' % (
filepath,
os.path.join(self.options.out_dir, ntpath.basename(filepath)),
os.path.join(self.options.out_dir,'tmp.mlx')
))
if __name__ == '__main__':
app = Simplification()
app.run()
|
d3d-hoi-main
|
preprocess/3_simplify.py
|
# Copyright (c) Facebook, Inc. and its affiliates.import math
import os
import torch
import numpy as np
from tqdm import tqdm_notebook
import imageio
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from skimage import img_as_ubyte
import pdb
import glob
import natsort
from torch.autograd import Variable
import trimesh
import copy
import re
# io utils
from pytorch3d.io import load_obj, save_obj, save_ply, load_ply
# datastructures
from pytorch3d.structures import Meshes
# 3D transformations functions
from pytorch3d.transforms import Rotate, Translate
# rendering components
from pytorch3d.renderer import (
OpenGLPerspectiveCameras, look_at_view_transform, look_at_rotation,
RasterizationSettings, MeshRenderer, MeshRasterizer, BlendParams,
SoftSilhouetteShader, HardPhongShader, PointLights, HardFlatShader, DirectionalLights, cameras
)
import json
import csv
import open3d as o3d
device = torch.device("cuda:0")
torch.cuda.set_device(device)
# helper function for computing roation matrix in 3D
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = axis / torch.sqrt(torch.dot(axis, axis))
a = torch.cos(theta / 2.0)
b, c, d = -axis * torch.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
rot_mat = torch.empty(3,3)
rot_mat[0,0] = aa + bb - cc - dd
rot_mat[0,1] = 2 * (bc + ad)
rot_mat[0,2] = 2 * (bd - ac)
rot_mat[1,0] = 2 * (bc - ad)
rot_mat[1,1] = aa + cc - bb - dd
rot_mat[1,2] = 2 * (cd + ab)
rot_mat[2,0] = 2 * (bd + ac)
rot_mat[2,1] = 2 * (cd - ab)
rot_mat[2,2] = aa + dd - bb - cc
return rot_mat
# helper function for loading and merging meshes
def merge_meshes(obj_path):
verts_list = torch.empty(0,3)
faces_list = torch.empty(0,3).long()
num_vtx = [0]
# merge meshes, load in ascending order
meshes = natsort.natsorted(glob.glob(obj_path+'/final/*_rescaled_sapien.obj'))
for part_mesh in meshes:
print('loading %s' %part_mesh)
mesh = o3d.io.read_triangle_mesh(part_mesh)
verts = torch.from_numpy(np.asarray(mesh.vertices)).float()
faces = torch.from_numpy(np.asarray(mesh.triangles)).long()
faces = faces + verts_list.shape[0]
verts_list = torch.cat([verts_list, verts])
faces_list = torch.cat([faces_list, faces])
num_vtx.append(verts_list.shape[0])
verts_list = verts_list.to(device)
faces_list = faces_list.to(device)
return verts_list, faces_list, num_vtx
cad_folder = 'test' # cad data folder (after mesh fusion)
cad_classes = [f.name for f in os.scandir(cad_folder)]
for cad_category in cad_classes:
folder_path = os.path.join(cad_folder, cad_category)
object_paths = [f.path for f in os.scandir(folder_path)]
for obj_path in object_paths:
print('processing %s' % obj_path)
# load merged mesh and number of vtx for each part
verts_list, faces_list, num_vtx = merge_meshes(obj_path)
# load motion json file
with open(os.path.join(obj_path, 'motion.json')) as json_file:
motion = json.load(json_file)
# create gif writer
filename_output = os.path.join(obj_path, 'motion.gif')
writer = imageio.get_writer(filename_output, mode='I', duration=0.3)
vis = o3d.visualization.Visualizer()
vis.create_window(height=500, width=500)
distance = 2.4 # distance from camera to the object
elevation = 25 # angle of elevation in degrees
azimuth = 20 # No rotation so the camera is positioned on the +Z axis.
# at least render one frame
if len(motion) == 0:
motion['placeholder'] = {}
# rotate or translate individual part
for idx, key in enumerate(motion.keys()):
jointData = motion[key]
# rotation part
if jointData and jointData['type'] == 'revolute':
start = num_vtx[idx]
end = num_vtx[idx+1]
rot_orig = torch.FloatTensor(jointData['axis']['origin']).to(device)
rot_axis = torch.FloatTensor(jointData['axis']['direction']).to(device)
aa = math.pi*jointData['limit']['a'] / 180.0
bb = math.pi*jointData['limit']['b'] / 180.0
print(aa)
print(bb)
rot_angles = np.linspace(aa, bb, num=5)
rot_angles_rev = np.linspace(bb, aa, num=5)
angles = np.concatenate((rot_angles, rot_angles_rev),0)
for angle in angles:
verts = verts_list.clone()
faces = faces_list.clone()
# world coordinate to local coordinate (rotation origin)
verts[start:end, 0] -= rot_orig[0]
verts[start:end, 1] -= rot_orig[1]
verts[start:end, 2] -= rot_orig[2]
# rotate around local axis [-1 0 0]
init_value = torch.tensor([angle])
theta = Variable(init_value.cuda())
rot_mat = rotation_matrix(rot_axis, theta).float() # 3x3
verts[start:end,:] = torch.t(torch.mm(rot_mat.to(device),
torch.t(verts[start:end,:])))
# local coordinate to world coordinate
verts[start:end, 0] += rot_orig[0]
verts[start:end, 1] += rot_orig[1]
verts[start:end, 2] += rot_orig[2]
R, T = look_at_view_transform(distance, elevation, azimuth, device=device)
T = Translate(T, device=T.device)
R = Rotate(R, device=R.device)
MM = R.compose(T)
opt_mesh = o3d.geometry.TriangleMesh()
# transform
tmp = MM.transform_points(verts).detach().cpu().numpy()
tmp[:,0] *= -1
tmp[:,2] *= -1
# visualize
opt_mesh.vertices = o3d.utility.Vector3dVector(tmp)
opt_mesh.triangles = o3d.utility.Vector3iVector(faces_list.cpu().numpy())
opt_mesh.compute_vertex_normals()
vis.clear_geometries()
vis.add_geometry(opt_mesh)
vis.poll_events()
img = np.asarray(vis.capture_screen_float_buffer(True))
image = img_as_ubyte(img)
writer.append_data(image)
# translation part
elif jointData and jointData['type'] == 'prismatic':
start = num_vtx[idx]
end = num_vtx[idx+1]
trans_orig = torch.FloatTensor(jointData['axis']['origin']).to(device)
trans_axis = torch.FloatTensor(jointData['axis']['direction']).to(device)
aa = jointData['limit']['a']
bb = jointData['limit']['b']
trans_len = np.linspace(aa, bb, num=5)
trans_len_rev = np.linspace(bb, aa, num=5)
trans_lens = np.concatenate((trans_len, trans_len_rev),0)
for tran_len in trans_lens:
verts = verts_list.clone()
faces = faces_list.clone()
# world coordinate to local coordinate (rotation origin)
verts[start:end, 0] -= trans_orig[0]
verts[start:end, 1] -= trans_orig[1]
verts[start:end, 2] -= trans_orig[2]
# add value in translation direction
verts[start:end, 0] += (trans_axis[0] * tran_len)
verts[start:end, 1] += (trans_axis[1] * tran_len)
verts[start:end, 2] += (trans_axis[2] * tran_len)
# local coordinate to world coordinate
verts[start:end, 0] += trans_orig[0]
verts[start:end, 1] += trans_orig[1]
verts[start:end, 2] += trans_orig[2]
R, T = look_at_view_transform(distance, elevation, azimuth, device=device)
T = Translate(T, device=T.device)
R = Rotate(R, device=R.device)
MM = R.compose(T)
opt_mesh = o3d.geometry.TriangleMesh()
# transform
tmp = MM.transform_points(verts).detach().cpu().numpy()
tmp[:,0] *= -1
tmp[:,2] *= -1
# visualize
opt_mesh.vertices = o3d.utility.Vector3dVector(tmp)
opt_mesh.triangles = o3d.utility.Vector3iVector(faces_list.cpu().numpy())
opt_mesh.compute_vertex_normals()
vis.clear_geometries()
vis.add_geometry(opt_mesh)
vis.poll_events()
img = np.asarray(vis.capture_screen_float_buffer(True))
image = img_as_ubyte(img)
writer.append_data(image)
# no motion
else:
assert not jointData
# world --> view coordinate
R, T = look_at_view_transform(distance, elevation, azimuth, device=device)
T = Translate(T, device=T.device)
R = Rotate(R, device=R.device)
MM = R.compose(T)
opt_mesh = o3d.geometry.TriangleMesh()
# transform
tmp = MM.transform_points(verts_list).detach().cpu().numpy()
tmp[:,0] *= -1
tmp[:,2] *= -1
# visualize
opt_mesh.vertices = o3d.utility.Vector3dVector(tmp)
opt_mesh.triangles = o3d.utility.Vector3iVector(faces_list.cpu().numpy())
opt_mesh.compute_vertex_normals()
vis.clear_geometries()
vis.add_geometry(opt_mesh)
vis.poll_events()
img = np.asarray(vis.capture_screen_float_buffer(True))
image = img_as_ubyte(img)
writer.append_data(image)
vis.destroy_window()
writer.close()
|
d3d-hoi-main
|
preprocess/visualize_data.py
|
import math
import numpy as np
import os
from scipy import ndimage
import common
import argparse
import ntpath
# Import shipped libraries.
import librender
import libmcubes
use_gpu = True
if use_gpu:
import libfusiongpu as libfusion
from libfusiongpu import tsdf_gpu as compute_tsdf
else:
import libfusioncpu as libfusion
from libfusioncpu import tsdf_cpu as compute_tsdf
class Fusion:
"""
Performs TSDF fusion.
"""
def __init__(self):
"""
Constructor.
"""
parser = self.get_parser()
self.options = parser.parse_args()
self.render_intrinsics = np.array([
self.options.focal_length_x,
self.options.focal_length_y,
self.options.principal_point_x,
self.options.principal_point_x
], dtype=float)
# Essentially the same as above, just a slightly different format.
self.fusion_intrisics = np.array([
[self.options.focal_length_x, 0, self.options.principal_point_x],
[0, self.options.focal_length_y, self.options.principal_point_y],
[0, 0, 1]
])
self.image_size = np.array([
self.options.image_height,
self.options.image_width,
], dtype=np.int32)
# Mesh will be centered at (0, 0, 1)!
self.znf = np.array([
1 - 0.75,
1 + 0.75
], dtype=float)
# Derive voxel size from resolution.
self.voxel_size = 1./self.options.resolution
self.truncation = self.options.truncation_factor*self.voxel_size
def get_parser(self):
"""
Get parser of tool.
:return: parser
"""
parser = argparse.ArgumentParser(description='Scale a set of meshes stored as OFF files.')
parser.add_argument('--mode', type=str, default='render', help='Operation mode: render or fuse.')
parser.add_argument('--in_dir', type=str, help='Path to input directory.')
parser.add_argument('--depth_dir', type=str, help='Path to depth directory; files are overwritten!')
parser.add_argument('--out_dir', type=str, help='Path to output directory; files within are overwritten!')
parser.add_argument('--n_views', type=int, default=100, help='Number of views per model.')
parser.add_argument('--image_height', type=int, default=640, help='Depth image height.')
parser.add_argument('--image_width', type=int, default=640, help='Depth image width.')
parser.add_argument('--focal_length_x', type=float, default=640, help='Focal length in x direction.')
parser.add_argument('--focal_length_y', type=float, default=640, help='Focal length in y direction.')
parser.add_argument('--principal_point_x', type=float, default=320, help='Principal point location in x direction.')
parser.add_argument('--principal_point_y', type=float, default=320, help='Principal point location in y direction.')
parser.add_argument('--depth_offset_factor', type=float, default=1.5, help='The depth maps are offsetted using depth_offset_factor*voxel_size.')
parser.add_argument('--resolution', type=float, default=256, help='Resolution for fusion.')
parser.add_argument('--truncation_factor', type=float, default=10, help='Truncation for fusion is derived as truncation_factor*voxel_size.')
return parser
def read_directory(self, directory):
"""
Read directory.
:param directory: path to directory
:return: list of files
"""
files = []
for filename in os.listdir(directory):
files.append(os.path.normpath(os.path.join(directory, filename)))
return files
def get_points(self):
"""
See https://stackoverflow.com/questions/9600801/evenly-distributing-n-points-on-a-sphere.
:param n_points: number of points
:type n_points: int
:return: list of points
:rtype: numpy.ndarray
"""
rnd = 1.
points = []
offset = 2. / self.options.n_views
increment = math.pi * (3. - math.sqrt(5.));
for i in range(self.options.n_views):
y = ((i * offset) - 1) + (offset / 2);
r = math.sqrt(1 - pow(y, 2))
phi = ((i + rnd) % self.options.n_views) * increment
x = math.cos(phi) * r
z = math.sin(phi) * r
points.append([x, y, z])
# visualization.plot_point_cloud(np.array(points))
return np.array(points)
def get_views(self):
"""
Generate a set of views to generate depth maps from.
:param n_views: number of views per axis
:type n_views: int
:return: rotation matrices
:rtype: [numpy.ndarray]
"""
Rs = []
points = self.get_points()
for i in range(points.shape[0]):
# https://math.stackexchange.com/questions/1465611/given-a-point-on-a-sphere-how-do-i-find-the-angles-needed-to-point-at-its-ce
longitude = - math.atan2(points[i, 0], points[i, 1])
latitude = math.atan2(points[i, 2], math.sqrt(points[i, 0] ** 2 + points[i, 1] ** 2))
R_x = np.array([[1, 0, 0], [0, math.cos(latitude), -math.sin(latitude)], [0, math.sin(latitude), math.cos(latitude)]])
R_y = np.array([[math.cos(longitude), 0, math.sin(longitude)], [0, 1, 0], [-math.sin(longitude), 0, math.cos(longitude)]])
R = R_y.dot(R_x)
Rs.append(R)
return Rs
def render(self, mesh, Rs):
"""
Render the given mesh using the generated views.
:param base_mesh: mesh to render
:type base_mesh: mesh.Mesh
:param Rs: rotation matrices
:type Rs: [numpy.ndarray]
:return: depth maps
:rtype: numpy.ndarray
"""
depthmaps = []
for i in range(len(Rs)):
np_vertices = Rs[i].dot(mesh.vertices.astype(np.float64).T)
np_vertices[2, :] += 1
np_faces = mesh.faces.astype(np.float64)
np_faces += 1
depthmap, mask, img = librender.render(np_vertices.copy(), np_faces.T.copy(), self.render_intrinsics, self.znf, self.image_size)
# This is mainly result of experimenting.
# The core idea is that the volume of the object is enlarged slightly
# (by subtracting a constant from the depth map).
# Dilation additionally enlarges thin structures (e.g. for chairs).
depthmap -= self.options.depth_offset_factor * self.voxel_size
depthmap = ndimage.morphology.grey_erosion(depthmap, size=(3, 3))
depthmaps.append(depthmap)
return depthmaps
def fusion(self, depthmaps, Rs):
"""
Fuse the rendered depth maps.
:param depthmaps: depth maps
:type depthmaps: numpy.ndarray
:param Rs: rotation matrices corresponding to views
:type Rs: [numpy.ndarray]
:return: (T)SDF
:rtype: numpy.ndarray
"""
Ks = self.fusion_intrisics.reshape((1, 3, 3))
Ks = np.repeat(Ks, len(depthmaps), axis=0).astype(np.float32)
Ts = []
for i in range(len(Rs)):
Rs[i] = Rs[i]
Ts.append(np.array([0, 0, 1]))
Ts = np.array(Ts).astype(np.float32)
Rs = np.array(Rs).astype(np.float32)
depthmaps = np.array(depthmaps).astype(np.float32)
views = libfusion.PyViews(depthmaps, Ks, Rs, Ts)
# Note that this is an alias defined as libfusiongpu.tsdf_gpu or libfusioncpu.tsdf_cpu!
return compute_tsdf(views, self.options.resolution, self.options.resolution, self.options.resolution,
self.voxel_size, self.truncation, False)
def run(self):
"""
Run the tool.
"""
if self.options.mode == 'render':
self.run_render()
elif self.options.mode == 'fuse':
self.run_fuse()
else:
print('Invalid model, choose render or fuse.')
exit()
def run_render(self):
"""
Run rendering.
"""
assert os.path.exists(self.options.in_dir)
common.makedir(self.options.depth_dir)
files = self.read_directory(self.options.in_dir)
timer = common.Timer()
Rs = self.get_views()
for filepath in files:
timer.reset()
mesh = common.Mesh.from_off(filepath)
depths = self.render(mesh, Rs)
depth_file = os.path.join(self.options.depth_dir, os.path.basename(filepath) + '.h5')
common.write_hdf5(depth_file, np.array(depths))
print('[Data] wrote %s (%f seconds)' % (depth_file, timer.elapsed()))
def run_fuse(self):
"""
Run fusion.
"""
assert os.path.exists(self.options.depth_dir)
common.makedir(self.options.out_dir)
files = self.read_directory(self.options.depth_dir)
timer = common.Timer()
Rs = self.get_views()
for filepath in files:
# As rendering might be slower, we wait for rendering to finish.
# This allows to run rendering and fusing in parallel (more or less).
depths = common.read_hdf5(filepath)
timer.reset()
tsdf = self.fusion(depths, Rs)
tsdf = tsdf[0]
vertices, triangles = libmcubes.marching_cubes(-tsdf, 0)
vertices /= self.options.resolution
vertices -= 0.5
off_file = os.path.join(self.options.out_dir, ntpath.basename(filepath)[:-3])
libmcubes.export_off(vertices, triangles, off_file)
print('[Data] wrote %s (%f seconds)' % (off_file, timer.elapsed()))
if __name__ == '__main__':
app = Fusion()
app.run()
|
d3d-hoi-main
|
preprocess/2_fusion.py
|
import os
import subprocess
from tqdm import tqdm
from multiprocessing import Pool
def convert(obj_path):
try:
load_folder = os.path.join(obj_path, 'parts_ply')
save_folder = os.path.join(obj_path, 'parts_off')
part_paths = [f.path for f in os.scandir(load_folder)]
if not os.path.exists(save_folder):
os.makedirs(save_folder)
for part in part_paths:
target_mesh = save_folder+'/'+part[-5:-3]+'off'
subprocess.run(["meshlabserver", "-i", part, "-o", target_mesh])
except Exception as ex:
return
cad_folder = './cad_sapien'
cad_classes = [f.name for f in os.scandir(cad_folder)]
for cad_category in cad_classes:
folder_path = os.path.join(cad_folder, cad_category)
object_paths = [f.path for f in os.scandir(folder_path)]
# Parallel
threads = 16 # number of threads in your computer
convert_iter = Pool(threads).imap(convert, object_paths)
for _ in tqdm(convert_iter, total=len(object_paths)):
pass
|
d3d-hoi-main
|
preprocess/convert_off.py
|
import pdb
import subprocess
import scandir
from multiprocessing import Pool
import json
import common
def remesh(obj_path):
in_dir = os.path.join(obj_path, 'parts_off/')
scaled_dir = os.path.join(obj_path, 'parts_scaled_off/')
depth_dir = os.path.join(obj_path, 'parts_depth_off/')
fused_dir = os.path.join(obj_path, 'parts_watertight_off/')
out_dir = os.path.join(obj_path, 'parts_out_off/')
final_dir = os.path.join(obj_path, 'final/')
rescale_dir = os.path.join(obj_path, 'rescale/')
# scale to .5 cube
subprocess.call(["python", "1_scale.py", "--in_dir", in_dir, "--out_dir", scaled_dir])
# re-mesh using tsdf
subprocess.call(["python", "2_fusion.py", "--mode", "render", "--in_dir", scaled_dir, "--depth_dir", depth_dir, "--out_dir", fused_dir])
subprocess.call(["python", "2_fusion.py", "--mode", "fuse", "--in_dir", scaled_dir, "--depth_dir", depth_dir, "--out_dir", fused_dir])
# simplify mesh
subprocess.call(["python", "3_simplify.py", "--in_dir", fused_dir, "--out_dir", out_dir])
if not os.path.exists(final_dir):
os.makedirs(final_dir)
for file in os.listdir(rescale_dir):
if file.endswith("rescale.json"):
with open(os.path.join(rescale_dir, file)) as json_file:
# load rescale value
rescale_dict = json.load(json_file)
scales = (1.0/rescale_dict['scales'][0], 1.0/rescale_dict['scales'][1], 1.0/rescale_dict['scales'][2])
translation = (-rescale_dict['translation'][2], -rescale_dict['translation'][1], -rescale_dict['translation'][0])
# load mesh
mesh = common.Mesh.from_off(os.path.join(out_dir, file[0]+'.off'))
# apply rescaling
mesh.scale(scales)
mesh.translate(translation)
mesh.to_off(os.path.join(final_dir, file[0]+'_rescaled.off'))
# change axis
apply_script = "change_axis.mlx"
source_mesh = os.path.join(final_dir, file[0]+'_rescaled.off')
target_mesh = os.path.join(final_dir, file[0]+'_rescaled_sapien.off')
subprocess.call(["meshlabserver", "-i", source_mesh, "-o", target_mesh, "-s", apply_script])
# convert to obj
source_mesh = os.path.join(final_dir, file[0]+'_rescaled_sapien.off')
target_mesh = os.path.join(final_dir, file[0]+'_rescaled_sapien.obj')
subprocess.call(["meshlabserver", "-i", source_mesh, "-o", target_mesh])
return
cad_folder = 'test' # cad data path (after convert_off)
cad_classes = [f.name for f in scandir.scandir(cad_folder)]
Processors = 10 # n of processors you want to use
for cad_category in cad_classes:
folder_path = os.path.join(cad_folder, cad_category)
object_paths = [f.path for f in scandir.scandir(folder_path)]
pool = Pool(processes=Processors)
pool.map(remesh, object_paths)
print('All jobs finished...')
|
d3d-hoi-main
|
preprocess/re-meshing.py
|
"""
Some I/O utilities.
"""
import os
import time
import h5py
import math
import numpy as np
def write_hdf5(file, tensor, key = 'tensor'):
"""
Write a simple tensor, i.e. numpy array ,to HDF5.
:param file: path to file to write
:type file: str
:param tensor: tensor to write
:type tensor: numpy.ndarray
:param key: key to use for tensor
:type key: str
"""
assert type(tensor) == np.ndarray, 'expects numpy.ndarray'
h5f = h5py.File(file, 'w')
chunks = list(tensor.shape)
if len(chunks) > 2:
chunks[2] = 1
if len(chunks) > 3:
chunks[3] = 1
if len(chunks) > 4:
chunks[4] = 1
h5f.create_dataset(key, data = tensor, chunks = tuple(chunks), compression = 'gzip')
h5f.close()
def read_hdf5(file, key = 'tensor'):
"""
Read a tensor, i.e. numpy array, from HDF5.
:param file: path to file to read
:type file: str
:param key: key to read
:type key: str
:return: tensor
:rtype: numpy.ndarray
"""
assert os.path.exists(file), 'file %s not found' % file
h5f = h5py.File(file, 'r')
assert key in h5f.keys(), 'key %s not found in file %s' % (key, file)
tensor = h5f[key][()]
h5f.close()
return tensor
def write_off(file, vertices, faces):
"""
Writes the given vertices and faces to OFF.
:param vertices: vertices as tuples of (x, y, z) coordinates
:type vertices: [(float)]
:param faces: faces as tuples of (num_vertices, vertex_id_1, vertex_id_2, ...)
:type faces: [(int)]
"""
num_vertices = len(vertices)
num_faces = len(faces)
assert num_vertices > 0
assert num_faces > 0
with open(file, 'w') as fp:
fp.write('OFF\n')
fp.write(str(num_vertices) + ' ' + str(num_faces) + ' 0\n')
for vertex in vertices:
assert len(vertex) == 3, 'invalid vertex with %d dimensions found (%s)' % (len(vertex), file)
fp.write(str(vertex[0]) + ' ' + str(vertex[1]) + ' ' + str(vertex[2]) + '\n')
for face in faces:
assert face[0] == 3, 'only triangular faces supported (%s)' % file
assert len(face) == 4, 'faces need to have 3 vertices, but found %d (%s)' % (len(face), file)
for i in range(len(face)):
assert face[i] >= 0 and face[i] < num_vertices, 'invalid vertex index %d (of %d vertices) (%s)' % (face[i], num_vertices, file)
fp.write(str(face[i]))
if i < len(face) - 1:
fp.write(' ')
fp.write('\n')
# add empty line to be sure
fp.write('\n')
def read_off(file):
"""
Reads vertices and faces from an off file.
:param file: path to file to read
:type file: str
:return: vertices and faces as lists of tuples
:rtype: [(float)], [(int)]
"""
assert os.path.exists(file), 'file %s not found' % file
with open(file, 'r') as fp:
lines = fp.readlines()
lines = [line.strip() for line in lines]
# Fix for ModelNet bug were 'OFF' and the number of vertices and faces are
# all in the first line.
if len(lines[0]) > 3:
assert lines[0][:3] == 'OFF' or lines[0][:3] == 'off', 'invalid OFF file %s' % file
parts = lines[0][3:].split(' ')
assert len(parts) == 3
num_vertices = int(parts[0])
assert num_vertices > 0
num_faces = int(parts[1])
assert num_faces > 0
start_index = 1
# This is the regular case!
else:
assert lines[0] == 'OFF' or lines[0] == 'off', 'invalid OFF file %s' % file
parts = lines[1].split(' ')
assert len(parts) == 3
num_vertices = int(parts[0])
assert num_vertices > 0
num_faces = int(parts[1])
assert num_faces > 0
start_index = 2
vertices = []
for i in range(num_vertices):
vertex = lines[start_index + i].split(' ')
vertex = [float(point.strip()) for point in vertex if point != '']
assert len(vertex) == 3
vertices.append(vertex)
faces = []
for i in range(num_faces):
face = lines[start_index + num_vertices + i].split(' ')
face = [index.strip() for index in face if index != '']
# check to be sure
for index in face:
assert index != '', 'found empty vertex index: %s (%s)' % (lines[start_index + num_vertices + i], file)
face = [int(index) for index in face]
assert face[0] == len(face) - 1, 'face should have %d vertices but as %d (%s)' % (face[0], len(face) - 1, file)
assert face[0] == 3, 'only triangular meshes supported (%s)' % file
for index in face:
assert index >= 0 and index < num_vertices, 'vertex %d (of %d vertices) does not exist (%s)' % (index, num_vertices, file)
assert len(face) > 1
faces.append(face)
return vertices, faces
assert False, 'could not open %s' % file
def write_obj(file, vertices, faces):
"""
Writes the given vertices and faces to OBJ.
:param vertices: vertices as tuples of (x, y, z) coordinates
:type vertices: [(float)]
:param faces: faces as tuples of (num_vertices, vertex_id_1, vertex_id_2, ...)
:type faces: [(int)]
"""
num_vertices = len(vertices)
num_faces = len(faces)
assert num_vertices > 0
assert num_faces > 0
with open(file, 'w') as fp:
for vertex in vertices:
assert len(vertex) == 3, 'invalid vertex with %d dimensions found (%s)' % (len(vertex), file)
fp.write('v' + ' ' + str(vertex[0]) + ' ' + str(vertex[1]) + ' ' + str(vertex[2]) + '\n')
for face in faces:
assert len(face) == 3, 'only triangular faces supported (%s)' % file
fp.write('f ')
for i in range(len(face)):
assert face[i] >= 0 and face[i] < num_vertices, 'invalid vertex index %d (of %d vertices) (%s)' % (face[i], num_vertices, file)
# face indices are 1-based
fp.write(str(face[i] + 1))
if i < len(face) - 1:
fp.write(' ')
fp.write('\n')
# add empty line to be sure
fp.write('\n')
def read_obj(file):
"""
Reads vertices and faces from an obj file.
:param file: path to file to read
:type file: str
:return: vertices and faces as lists of tuples
:rtype: [(float)], [(int)]
"""
assert os.path.exists(file), 'file %s not found' % file
with open(file, 'r') as fp:
lines = fp.readlines()
lines = [line.strip() for line in lines if line.strip()]
vertices = []
faces = []
for line in lines:
parts = line.split(' ')
parts = [part.strip() for part in parts if part]
if parts[0] == 'v':
assert len(parts) == 4, \
'vertex should be of the form v x y z, but found %d parts instead (%s)' % (len(parts), file)
assert parts[1] != '', 'vertex x coordinate is empty (%s)' % file
assert parts[2] != '', 'vertex y coordinate is empty (%s)' % file
assert parts[3] != '', 'vertex z coordinate is empty (%s)' % file
vertices.append([float(parts[1]), float(parts[2]), float(parts[3])])
elif parts[0] == 'f':
assert len(parts) == 4, \
'face should be of the form f v1/vt1/vn1 v2/vt2/vn2 v2/vt2/vn2, but found %d parts (%s) instead (%s)' % (len(parts), line, file)
components = parts[1].split('/')
assert len(components) >= 1 and len(components) <= 3, \
'face component should have the forms v, v/vt or v/vt/vn, but found %d components instead (%s)' % (len(components), file)
assert components[0].strip() != '', \
'face component is empty (%s)' % file
v1 = int(components[0])
components = parts[2].split('/')
assert len(components) >= 1 and len(components) <= 3, \
'face component should have the forms v, v/vt or v/vt/vn, but found %d components instead (%s)' % (len(components), file)
assert components[0].strip() != '', \
'face component is empty (%s)' % file
v2 = int(components[0])
components = parts[3].split('/')
assert len(components) >= 1 and len(components) <= 3, \
'face component should have the forms v, v/vt or v/vt/vn, but found %d components instead (%s)' % (len(components), file)
assert components[0].strip() != '', \
'face component is empty (%s)' % file
v3 = int(components[0])
#assert v1 != v2 and v2 != v3 and v3 != v2, 'degenerate face detected: %d %d %d (%s)' % (v1, v2, v3, file)
if v1 == v2 or v2 == v3 or v1 == v3:
print('[Info] skipping degenerate face in %s' % file)
else:
faces.append([v1 - 1, v2 - 1, v3 - 1]) # indices are 1-based!
else:
assert False, 'expected either vertex or face but got line: %s (%s)' % (line, file)
return vertices, faces
assert False, 'could not open %s' % file
def makedir(dir):
"""
Creates directory if it does not exist.
:param dir: directory path
:type dir: str
"""
if not os.path.exists(dir):
os.makedirs(dir)
class Mesh:
"""
Represents a mesh.
"""
def __init__(self, vertices = [[]], faces = [[]]):
"""
Construct a mesh from vertices and faces.
:param vertices: list of vertices, or numpy array
:type vertices: [[float]] or numpy.ndarray
:param faces: list of faces or numpy array, i.e. the indices of the corresponding vertices per triangular face
:type faces: [[int]] fo rnumpy.ndarray
"""
self.vertices = np.array(vertices, dtype = float)
""" (numpy.ndarray) Vertices. """
self.faces = np.array(faces, dtype = int)
""" (numpy.ndarray) Faces. """
assert self.vertices.shape[1] == 3
assert self.faces.shape[1] == 3
def extents(self):
"""
Get the extents.
:return: (min_x, min_y, min_z), (max_x, max_y, max_z)
:rtype: (float, float, float), (float, float, float)
"""
min = [0]*3
max = [0]*3
for i in range(3):
min[i] = np.min(self.vertices[:, i])
max[i] = np.max(self.vertices[:, i])
return tuple(min), tuple(max)
def switch_axes(self, axis_1, axis_2):
"""
Switch the two axes, this is usually useful for switching y and z axes.
:param axis_1: index of first axis
:type axis_1: int
:param axis_2: index of second axis
:type axis_2: int
"""
temp = np.copy(self.vertices[:, axis_1])
self.vertices[:, axis_1] = self.vertices[:, axis_2]
self.vertices[:, axis_2] = temp
def mirror(self, axis):
"""
Mirror given axis.
:param axis: axis to mirror
:type axis: int
"""
self.vertices[:, axis] *= -1
def scale(self, scales):
"""
Scale the mesh in all dimensions.
:param scales: tuple of length 3 with scale for (x, y, z)
:type scales: (float, float, float)
"""
assert len(scales) == 3
for i in range(3):
self.vertices[:, i] *= scales[i]
def translate(self, translation):
"""
Translate the mesh.
:param translation: translation as (x, y, z)
:type translation: (float, float, float)
"""
assert len(translation) == 3
for i in range(3):
self.vertices[:, i] += translation[i]
def _rotate(self, R):
self.vertices = np.dot(R, self.vertices.T)
self.vertices = self.vertices.T
def rotate(self, rotation):
"""
Rotate the mesh.
:param rotation: rotation in (angle_x, angle_y, angle_z); angles in radians
:type rotation: (float, float, float
:return:
"""
assert len(rotation) == 3
x = rotation[0]
y = rotation[1]
z = rotation[2]
# rotation around the x axis
R = np.array([[1, 0, 0], [0, math.cos(x), -math.sin(x)], [0, math.sin(x), math.cos(x)]])
self._rotate(R)
# rotation around the y axis
R = np.array([[math.cos(y), 0, math.sin(y)], [0, 1, 0], [-math.sin(y), 0, math.cos(y)]])
self._rotate(R)
# rotation around the z axis
R = np.array([[math.cos(z), -math.sin(z), 0], [math.sin(z), math.cos(z), 0], [0, 0, 1]])
self._rotate(R)
def inv_rotate(self, rotation):
"""
Rotate the mesh.
:param rotation: rotation in (angle_x, angle_y, angle_z); angles in radians
:type rotation: (float, float, float
:return:
"""
assert len(rotation) == 3
x = rotation[0]
y = rotation[1]
z = rotation[2]
# rotation around the x axis
R = np.array([[1, 0, 0], [0, math.cos(x), -math.sin(x)], [0, math.sin(x), math.cos(x)]])
R = R.T
self._rotate(R)
# rotation around the y axis
R = np.array([[math.cos(y), 0, math.sin(y)], [0, 1, 0], [-math.sin(y), 0, math.cos(y)]])
R = R.T
self._rotate(R)
# rotation around the z axis
R = np.array([[math.cos(z), -math.sin(z), 0], [math.sin(z), math.cos(z), 0], [0, 0, 1]])
R = R.T
self._rotate(R)
def copy(self):
"""
Copy the mesh.
:return: copy of the mesh
:rtype: Mesh
"""
mesh = Mesh(self.vertices.copy(), self.faces.copy())
return mesh
@staticmethod
def from_off(filepath):
"""
Read a mesh from OFF.
:param filepath: path to OFF file
:type filepath: str
:return: mesh
:rtype: Mesh
"""
vertices, faces = read_off(filepath)
real_faces = []
for face in faces:
assert len(face) == 4
real_faces.append([face[1], face[2], face[3]])
return Mesh(vertices, real_faces)
def to_off(self, filepath):
"""
Write mesh to OFF.
:param filepath: path to write file to
:type filepath: str
"""
faces = np.ones((self.faces.shape[0], 4), dtype = int)*3
faces[:, 1:4] = self.faces[:, :]
write_off(filepath, self.vertices.tolist(), faces.tolist())
@staticmethod
def from_obj(filepath):
"""
Read a mesh from OBJ.
:param filepath: path to OFF file
:type filepath: str
:return: mesh
:rtype: Mesh
"""
vertices, faces = read_obj(filepath)
return Mesh(vertices, faces)
def to_obj(self, filepath):
"""
Write mesh to OBJ file.
:param filepath: path to OBJ file
:type filepath: str
"""
write_obj(filepath, self.vertices.tolist(), self.faces.tolist())
class Timer:
"""
Simple wrapper for time.clock().
"""
def __init__(self):
"""
Initialize and start timer.
"""
self.start = time.clock()
""" (float) Seconds. """
def reset(self):
"""
Reset timer.
"""
self.start = time.clock()
def elapsed(self):
"""
Get elapsed time in seconds
:return: elapsed time in seconds
:rtype: float
"""
return (time.clock() - self.start)
|
d3d-hoi-main
|
preprocess/common.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import math
import os
import torch
import numpy as np
from tqdm import tqdm_notebook
import imageio
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from skimage import img_as_ubyte
from tqdm import tqdm
import re
import open3d as o3d
import itertools
# io utils
from pytorch3d.io import load_obj, save_obj
# datastructures
from pytorch3d.structures import Meshes
# 3D transformations functions
from pytorch3d.transforms import Rotate, Translate
# rendering components
from pytorch3d.renderer import (
OpenGLPerspectiveCameras, look_at_view_transform, look_at_rotation,
RasterizationSettings, MeshRenderer, MeshRasterizer, BlendParams,
SoftSilhouetteShader, HardPhongShader, PointLights
)
import json
import csv
SAPIEN_FOLDER = './partnet-mobility-v0'
OUT_FOLDER = './cad_sapien'
# helper function for computing roation matrix in 3D
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = np.asarray(axis)
axis = axis / math.sqrt(np.dot(axis, axis))
a = math.cos(theta / 2.0)
b, c, d = -axis * math.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
# helper function for traversing a tree
def traverse_tree(current_node, mesh_dict):
# further traverse the tree if not at leaf node yet
if 'children' in current_node.keys():
for idx in range(len(current_node['children'])):
traverse_tree(current_node['children'][idx], mesh_dict)
else:
# insert meshes associated with an unique part id
assert current_node['id'] not in mesh_dict.keys()
mesh_dict[current_node['id']] = current_node['objs']
return
# helper function for loading and merging meshes
def merge_meshes(save_folder, ids, mesh_dict):
for count, part_ids in enumerate(ids):
part_meshes = [mesh_dict[x] for x in part_ids]
part_meshes = list(itertools.chain(*part_meshes))
verts_list = np.empty((0,3))
faces_list = np.empty((0,3))#.long()
for part_mesh in part_meshes:
obj_path = os.path.join(part_folder, 'textured_objs', part_mesh,)+'.obj'
# check if mesh exist
if not os.path.exists(obj_path):
print(obj_path)
continue
mesh = o3d.io.read_triangle_mesh(obj_path)
verts = np.asarray(mesh.vertices)
faces = np.asarray(mesh.triangles)
faces = faces + verts_list.shape[0]
verts_list = np.concatenate([verts_list, verts])
faces_list = np.concatenate([faces_list, faces])
mesh = o3d.geometry.TriangleMesh(vertices=o3d.utility.Vector3dVector(verts_list),
triangles=o3d.utility.Vector3iVector(faces_list))
mesh.compute_vertex_normals()
save_path = os.path.join(save_folder, 'parts_ply')
if not os.path.exists(save_path):
os.makedirs(save_path)
o3d.io.write_triangle_mesh(save_path+'/'+str(count)+'.ply', mesh)
return
part_home = SAPIEN_FOLDER
save_home = OUT_FOLDER
classes = ['StorageFurniture','Microwave','Laptop','WashingMachine','TrashCan','Oven',
'Dishwasher','Refrigerator'] # 8 categories
# Manually verify the part category
careParts = {}
careParts['Refrigerator'] = ['door', 'other_leaf', 'display_panel', 'door_frame',
'control_panel', 'glass']
careParts['Microwave'] = ['door']
careParts['Laptop'] = ['shaft', 'other_leaf', 'screen_side', 'screen', 'screen_frame']
careParts['WashingMachine'] = ['door']
careParts['TrashCan'] = ['opener', 'lid', 'drawer', 'cover', 'cover_lid',
'frame_vertical_bar', 'container', 'other_leaf']
careParts['Oven'] = ['door', 'door_frame']
careParts['Dishwasher'] = ['door', 'shelf', 'display_panel', 'door_frame']
careParts['StorageFurniture'] = ['cabinet_door', 'mirror', 'drawer', 'drawer_box',
'door', 'shelf', 'handle', 'glass', 'cabinet_door_surface',
'other_leaf', 'countertop']
careParts['Toilet'] = ['lid', 'seat']
#careParts['Table'] = ['drawer', 'cabinet_door_surface', 'drawer_box', 'handle',
#'drawer_front', 'board', 'cabinet_door', 'shelf', 'keyboard_tray_surface']
#careParts['Box'] = ['rotation_lid', 'drawer', 'countertop', 'lid_surface'] # font on top
#careParts['FoldingChair'] = ['seat']
#careParts['Suitcase'] = ['lid', 'pull-out_handle']
count = 0
# all dirIDs within this class
with open('partnetsim.models.csv', 'r') as file:
reader = csv.DictReader(file)
for row in reader:
if row['category'] in classes:
part_dir = row['category']
part_id = row['dirId']
part_folder = os.path.join(part_home, str(part_id))
save_folder = os.path.join(save_home, part_dir, str(part_id))
if not os.path.exists(save_folder):
os.makedirs(save_folder)
count+=1
# load meshes referenced json file
if not os.path.isfile(os.path.join(part_folder, 'result.json')):
continue
with open(os.path.join(part_folder, 'result.json')) as json_file:
part_meshes = json.load(json_file)
# traverse through a tree
mesh_dict = {}
root = part_meshes[0]
traverse_tree(root, mesh_dict)
types = []
with open(os.path.join(part_folder, 'mobility.urdf')) as f:
our_lines = f.readlines()
for line in our_lines:
myString = re.sub('\s+',' ',line)
if '<joint name=' in myString:
m_type = myString.split("type=",1)[1][1:-3]
types.append(m_type)
type_idx = 0
details = {}
details_saved = {}
# load mobility_v2 json file
with open(os.path.join(part_folder, 'mobility_v2.json')) as json_file:
mobility_parts = json.load(json_file)
print('processing %s' % part_folder)
part_div = []
for idx, joint_part in enumerate(mobility_parts):
# visual names belonging to one joint part
joint_part_names = joint_part['parts']
assert(joint_part_names) # make sure not empty
# parse ids for each part
ids = [x['id'] for x in joint_part_names]
part_div.append(ids)
# save motion information
details[str(idx)] = joint_part['jointData'].copy()
details_saved[str(idx)] = joint_part['jointData'].copy()
# set type for care part
if type_idx<len(types):
if joint_part['name'] in careParts[part_dir]:
details[str(idx)]['type'] = types[type_idx]
details_saved[str(idx)]['type'] = types[type_idx]
type_idx += 1
else:
if details[str(idx)]:
assert type_idx>=len(types)
assert joint_part['name'] not in careParts[part_dir]
# remove non-care part
if not joint_part['jointData'] or joint_part['name'] not in careParts[part_dir]:
details[str(idx)] = {}
details_saved.pop(str(idx), None)
with open(os.path.join(save_folder, 'motion.json'), 'w') as outfile:
json.dump(details_saved, outfile)
assert len(details) == len(part_div)
part_idx = 0
fix_part = []
parts = []
for key, value in details.items():
if value == {}:
fix_part.append(part_div[part_idx])
else:
parts.append(part_div[part_idx])
part_idx += 1
fix_part = list(itertools.chain(*fix_part))
parts.append(fix_part)
# load, merge, and save part mesh file
merge_meshes(save_folder, parts, mesh_dict)
print(count)
print('all done...')
|
d3d-hoi-main
|
preprocess/process_data.py
|
import os
import common
import argparse
import numpy as np
import json
class Scale:
"""
Scales a bunch of meshes.
"""
def __init__(self):
"""
Constructor.
"""
parser = self.get_parser()
self.options = parser.parse_args()
def get_parser(self):
"""
Get parser of tool.
:return: parser
"""
parser = argparse.ArgumentParser(description='Scale a set of meshes stored as OFF files.')
parser.add_argument('--in_dir', type=str, help='Path to input directory.')
parser.add_argument('--out_dir', type=str, help='Path to output directory; files within are overwritten!')
parser.add_argument('--padding', type=float, default=0.1, help='Relative padding applied on each side.')
return parser
def read_directory(self, directory):
"""
Read directory.
:param directory: path to directory
:return: list of files
"""
files = []
for filename in os.listdir(directory):
files.append(os.path.normpath(os.path.join(directory, filename)))
return files
def run(self):
"""
Run the tool, i.e. scale all found OFF files.
"""
assert os.path.exists(self.options.in_dir)
common.makedir(self.options.out_dir)
files = self.read_directory(self.options.in_dir)
for filepath in files:
mesh = common.Mesh.from_off(filepath)
# Get extents of model.
min, max = mesh.extents()
total_min = np.min(np.array(min))
total_max = np.max(np.array(max))
# Set the center (although this should usually be the origin already).
centers = (
(min[0] + max[0]) / 2,
(min[1] + max[1]) / 2,
(min[2] + max[2]) / 2
)
# Scales all dimensions equally.
sizes = (
total_max - total_min,
total_max - total_min,
total_max - total_min
)
translation = (
-centers[0],
-centers[1],
-centers[2]
)
scales = (
1 / (sizes[0] + 2 * self.options.padding * sizes[0]),
1 / (sizes[1] + 2 * self.options.padding * sizes[1]),
1 / (sizes[2] + 2 * self.options.padding * sizes[2])
)
mesh.translate(translation)
mesh.scale(scales)
print('[Data] %s extents before %f - %f, %f - %f, %f - %f' % (os.path.basename(filepath), min[0], max[0], min[1], max[1], min[2], max[2]))
min, max = mesh.extents()
print('[Data] %s extents after %f - %f, %f - %f, %f - %f' % (os.path.basename(filepath), min[0], max[0], min[1], max[1], min[2], max[2]))
# May also switch axes if necessary.
#mesh.switch_axes(1, 2)
mesh.to_off(os.path.join(self.options.out_dir, os.path.basename(filepath)))
# save parameters
rescale = {}
rescale['scales'] = scales
rescale['translation'] = translation
if not os.path.exists(self.options.out_dir[:-18]+'/rescale'):
os.makedirs(self.options.out_dir[:-18]+'/rescale')
path = self.options.out_dir[:-18]+'/rescale/'+os.path.basename(filepath)[0]+'_rescale.json'
with open(path, 'w') as outfile:
json.dump(rescale, outfile)
if __name__ == '__main__':
app = Scale()
app.run()
|
d3d-hoi-main
|
preprocess/1_scale.py
|
import re
# Defining labels
ABSTAIN = 0
ABNORMAL = 1
NORMAL= 2
def LF_report_is_short(report):
"""
Checks if report is short.
"""
return NORMAL if len(report) < 280 else ABSTAIN
negative_inflection_words = ["but", "however", "otherwise"]
def LF_negative_inflection_words_in_report(report):
return ABNORMAL if any(word in report.lower() \
for word in negative_inflection_words) else ABSTAIN
def LF_is_seen_or_noted_in_report(report):
return ABNORMAL if any(word in report.lower() \
for word in ["is seen", "noted"]) else ABSTAIN
def LF_disease_in_report(report):
return ABNORMAL if "disease" in report.lower() else ABSTAIN
def LF_recommend_in_report(report):
return ABNORMAL if "recommend" in report.lower() else ABSTAIN
def LF_mm_in_report(report):
return ABNORMAL if any(word in report.lower() \
for word in ["mm", "cm"]) else ABSTAIN
abnormal_mesh_terms = ["opacity", "cardiomegaly", "calcinosis",
"hypoinflation", "calcified granuloma",
"thoracic vertebrae", "degenerative",
"hyperdistention", "catheters",
"granulomatous", "nodule", "fracture"
"surgical", "instruments", "emphysema"]
def LF_abnormal_mesh_terms_in_report(report):
if any(mesh in report.lower() for mesh in abnormal_mesh_terms):
return ABNORMAL
else:
return ABSTAIN
words_indicating_normalcy = ['clear', 'no', 'normal', 'unremarkable',
'free', 'midline']
def LF_consistency_in_report(report):
'''
The words 'clear', 'no', 'normal', 'free', 'midline' in
findings section of the report
'''
report = report
findings = report[report.find('FINDINGS:'):]
findings = findings[:findings.find('IMPRESSION:')]
sents = findings.split('.')
num_sents_without_normal = ABSTAIN
for sent in sents:
sent = sent.lower()
if not any(word in sent for word in words_indicating_normalcy):
num_sents_without_normal += 1
elif 'not' in sent:
num_sents_without_normal += 1
return NORMAL if num_sents_without_normal < 2 else ABNORMAL
categories = ['normal','opacity','cardiomegaly','calcinosis',
'lung/hypoinflation','calcified granuloma',
'thoracic vertebrae/degenerative','lung/hyperdistention',
'spine/degenerative','catheters, indwelling',
'granulomatous disease','nodule','surgical instruments',
'scoliosis', 'osteophyte', 'spondylosis','fractures, bone']
def LF_normal(report):
r = re.compile('No acute cardiopulmonary abnormality',re.IGNORECASE)
for s in report.split("."):
if r.search(s):
return NORMAL
return ABSTAIN
reg_equivocation = re.compile('unlikely|likely|suggests|questionable|concerning|possibly|potentially|could represent|may represent|may relate|cannot exclude|can\'t exclude|may be',re.IGNORECASE)
def LF_positive_MeshTerm(report):
for idx in range(1,len(categories)):
reg_pos = re.compile(categories[idx],re.IGNORECASE)
reg_neg = re.compile('(No|without|resolution)\\s([a-zA-Z0-9\-,_]*\\s){0,10}'+categories[idx],re.IGNORECASE)
for s in report.split("."):
if reg_pos.search(s) and (not reg_neg.search(s)) and (not reg_equivocation.search(s)):
return ABNORMAL
return ABSTAIN
def LF_fracture(report):
reg_pos = re.compile('fracture',re.IGNORECASE)
reg_neg = re.compile('(No|without|resolution)\\s([a-zA-Z0-9\-,_]*\\s){0,10}fracture',re.IGNORECASE)
for s in report.split("."):
if reg_pos.search(s) and (not reg_neg.search(s)) and (not reg_equivocation.search(s)):
return ABNORMAL
return ABSTAIN
def LF_calcinosis(report):
reg_01 = re.compile('calc',re.IGNORECASE)
reg_02 = re.compile('arter|aorta|muscle|tissue',re.IGNORECASE)
for s in report.split("."):
if reg_01.search(s) and reg_02.search(s):
return ABNORMAL
return ABSTAIN
def LF_degen_spine(report):
reg_01 = re.compile('degen',re.IGNORECASE)
reg_02 = re.compile('spine',re.IGNORECASE)
for s in report.split("."):
if reg_01.search(s) and reg_02.search(s):
return ABNORMAL
return ABSTAIN
def LF_lung_hypoinflation(report):
#reg_01 = re.compile('lung|pulmonary',re.IGNORECASE)
reg_01 = re.compile('hypoinflation|collapse|(low|decrease|diminish)\\s([a-zA-Z0-9\-,_]*\\s){0,4}volume',re.IGNORECASE)
for s in report.split("."):
if reg_01.search(s):
return ABNORMAL
return ABSTAIN
def LF_lung_hyperdistention(report):
#reg_01 = re.compile('lung|pulmonary',re.IGNORECASE)
reg_01 = re.compile('increased volume|hyperexpan|inflated',re.IGNORECASE)
for s in report.split("."):
if reg_01.search(s):
return ABNORMAL
return ABSTAIN
def LF_catheters(report):
reg_01 = re.compile(' line|catheter|PICC',re.IGNORECASE)
for s in report.split("."):
if reg_01.search(s):
return ABNORMAL
return ABSTAIN
def LF_surgical(report):
reg_01 = re.compile('clip',re.IGNORECASE)
for s in report.split("."):
if reg_01.search(s):
return ABNORMAL
return ABSTAIN
def LF_granuloma(report):
reg_01 = re.compile('granuloma',re.IGNORECASE)
for s in report.split("."):
if reg_01.search(s):
return ABNORMAL
return ABSTAIN
|
cross-modal-ws-demo-master
|
openi_demo/labeling_functions.py
|
import os
import numpy as np
import torch
import torchvision.transforms as transforms
try:
from PIL import Image as pil_image
except ImportError:
pil_image = None
def load_ids(filename):
fin = open(filename, "r")
return [_.strip() for _ in fin]
class StdNormalize(object):
"""
Normalize torch tensor to have zero mean and unit std deviation
"""
def __call__(self, *inputs):
outputs = []
for idx, _input in enumerate(inputs):
_input = _input.sub(_input.mean()).div(_input.std())
outputs.append(_input)
return outputs if idx > 1 else outputs[0]
def transform(input_size):
return transforms.Compose(
[transforms.Resize(input_size), transforms.ToTensor(), StdNormalize()]
)
# default xray loader from png
def default_xray_loader(xray_path, img_rows=224, img_cols=224):
xray = load_img(
xray_path, color_mode="grayscale", target_size=(img_rows, img_cols, 1)
)
xray = img_to_array(xray)
xray = np.dstack([xray, xray, xray])
xray = array_to_img(xray, "channels_last")
return xray
class CXRFileList(torch.utils.data.Dataset):
def __init__(self, paths, label=None, transform=None, loader=default_xray_loader, ref=None, lfs=None, slice_mode=None, get_slice_labels=False):
self.paths = paths
self.label = label
self.transform = transform
self.loader = loader
self.ref = ref
# Note: slice_labels and labels in same order!
if lfs is not None:
self.lfs = torch.from_numpy(np.array(lfs).astype(np.float32))
def __getitem__(self, index):
idx = 0
if self.ref is not None and isinstance(self.paths[index],list):
for i in range(len(self.paths[index])):
impath = self.paths[index][i]
if impath in self.ref:
idx=i
break
else:
impath = self.paths[index]
y = self.label[index]
img = self.loader(impath)
if self.transform is not None:
img = self.transform(img)
return img, y
def __len__(self):
return len(self.paths)
def get_data_loader(
paths,
labels,
batch_size=32,
input_size=224,
shuffle=False,
):
# Load front image index
fin=open('./data/front_view_ids.txt', "r")
front_view_ids = [_.strip() for _ in fin]
fin.close()
dataset = CXRFileList(
paths=paths,
label=labels,
transform=transform(input_size),
ref=front_view_ids,
)
# Build data loader
data_loader = torch.utils.data.DataLoader(
dataset,
sampler=None,
batch_size=batch_size,
shuffle=shuffle,
)
return data_loader
def img_to_array(img, data_format="channels_last", dtype="float32"):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
dtype: Dtype to use for the returned array.
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format not in {"channels_first", "channels_last"}:
raise ValueError("Unknown data_format: %s" % data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=dtype)
if len(x.shape) == 3:
if data_format == "channels_first":
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == "channels_first":
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError("Unsupported image shape: %s" % (x.shape,))
return x
def array_to_img(x, data_format="channels_last", scale=True, dtype="float32"):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
dtype: Dtype to use.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError(
"Could not import PIL.Image. " "The use of `array_to_img` requires PIL."
)
x = np.asarray(x, dtype=dtype)
if x.ndim != 3:
raise ValueError(
"Expected image array to have rank 3 (single image). "
"Got array with shape: %s" % (x.shape,)
)
if data_format not in {"channels_first", "channels_last"}:
raise ValueError("Invalid data_format: %s" % data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == "channels_first":
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 4:
# RGBA
return pil_image.fromarray(x.astype("uint8"), "RGBA")
elif x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype("uint8"), "RGB")
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype("uint8"), "L")
else:
raise ValueError("Unsupported channel number: %s" % (x.shape[2],))
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
"nearest": pil_image.NEAREST,
"bilinear": pil_image.BILINEAR,
"bicubic": pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, "HAMMING"):
_PIL_INTERPOLATION_METHODS["hamming"] = pil_image.HAMMING
if hasattr(pil_image, "BOX"):
_PIL_INTERPOLATION_METHODS["box"] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, "LANCZOS"):
_PIL_INTERPOLATION_METHODS["lanczos"] = pil_image.LANCZOS
def load_img(
path, grayscale=False, color_mode="rgb", target_size=None, interpolation="nearest"
):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
color_mode: One of "grayscale", "rgb", "rgba". Default: "rgb".
The desired image format.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if grayscale is True:
warnings.warn("grayscale is deprecated. Please use " 'color_mode = "grayscale"')
color_mode = "grayscale"
if pil_image is None:
raise ImportError(
"Could not import PIL.Image. " "The use of `array_to_img` requires PIL."
)
img = pil_image.open(path)
if color_mode == "grayscale":
if img.mode != "L":
img = img.convert("L")
elif color_mode == "rgba":
if img.mode != "RGBA":
img = img.convert("RGBA")
elif color_mode == "rgb":
if img.mode != "RGB":
img = img.convert("RGB")
else:
raise ValueError('color_mode must be "grayscale", "rgb", or "rgba"')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
"Invalid interpolation method {} specified. Supported "
"methods are {}".format(
interpolation, ", ".join(_PIL_INTERPOLATION_METHODS.keys())
)
)
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
|
cross-modal-ws-demo-master
|
openi_demo/utils.py
|
import re
import spacy
spacy_en = spacy.load('en_core_web_sm')
# Setting LF output values
ABSTAIN_VAL = 0
SEIZURE_VAL = 1
NO_SEIZURE_VAL = -1
######################################################################################################
##### HELPFUL REGEXES AND ONTOLOGIES
######################################################################################################
# Defining useful regular expressions.
SIMPLE_NORMAL_RE = re.compile('\snormal\s', re.IGNORECASE)
# Nouns indicating an EEG
EEGSYN = r'(EEG|study|record|electroencephalogram|ambulatory\s+EEG|video.EEG\sstudy)'
# Phrases indicating a normal study
NORMAL_STUDY_PHRASES = re.compile(rf'\snormal\s+{EEGSYN}'
rf'|\snormal\s+awake\s+and\s+asleep\s+{EEGSYN}'
rf'|\snormal\s+awake\s+{EEGSYN}'
rf'|\snormal\s+awake\s+and\s+drowsy\s+{EEGSYN}'
rf'|\snormal\s+asleep\s+{EEGSYN}'
rf'|\s{EEGSYN}\s+(is|was)\s+normal'
rf'|\srange\s+of\s+normal' # generous
rf'|\s(is|was)\s+normal\s+for\s+age'
#rf'|(EEG|study|record)\s+(is|was)\s+normal\s+for\s+age'
#rf'|(EEG|study|record)\s+(is|was)\s+normal\s+for\s+age'
rf'|{EEGSYN}\s+(is|was)\s+within\s+normal\s+'
rf'|{EEGSYN}\s+(is|was)\s+borderline\+snormal'
rf'|{EEGSYN}\s+(is|was)\s+at\s+the\s+borderline\s+of\s+being\s+normal'
rf'|{EEGSYN}\s+capturing\s+wakefulness\s+and\s+sleep\s+(is|was)\s+normal'
rf'|{EEGSYN}\s+capturing\s+wakefulness\s+(is|was)\s+normal',
re.IGNORECASE)
# Regex for abnormal
ABNORMAL_RE = re.compile(r'abnormal', re.IGNORECASE)
# Regex for seizure synonyms
SEIZURE_SYNONYMS = r'seizure|seizures|spasm|spasms|status\sepilepticus|epilepsia\spartialis\scontinua|drop\sattack'
SEIZURE_SYNONYMS_RE = re.compile(SEIZURE_SYNONYMS, re.IGNORECASE|re.UNICODE)
# Regex for negation
NEG_DET = ['no', 'not', 'without']
# Regex for no seizure in study
NEG_SEIZURE = r'no seizures|no epileptiform activity or seizures'.replace(' ','\s')
NEG_SEIZURE_RE = re.compile(NEG_SEIZURE, re.IGNORECASE)
# Alternate section keys for INTERPRATION section of report
candidate_interps = ['INTERPRETATION', 'Interpretation', 'Summary', 'impression', 'IMPRESSION', 'conclusion', 'conclusions']
CANDIDATE_INTERPS_LOWER = list({ss.lower() for ss in candidate_interps})
# Alternate regex for no seizures
NOSEIZURE_PHRASE_RE = re.compile(r'\bno seizures\b|\bno\sepileptiform\sactivity\sor\sseizures\b'
r'|\bno findings to indicate seizures\b'
r'|no findings to indicate'
r'|no new seizures'
r'|with no seizures'
r'|no evidence to support seizures'
r'|nonepileptic'
r'|non-epileptic'
,
re.IGNORECASE|re.UNICODE)
# Defining negexes
NEG_DET= r'(\bno\b|\bnot\b|\bwithout\sfurther\b|\bno\sfurther\b|without|neither)'
BASIC_NEGEX_RE = re.compile(NEG_DET + '.*('+ SEIZURE_SYNONYMS + ')', re.IGNORECASE|re.UNICODE)
REVERSED_NEGEX_RE = re.compile('('+ SEIZURE_SYNONYMS + ').*' + NEG_DET, re.IGNORECASE|re.UNICODE)
######################################################################################################
##### HELPER FUNCTIONS
######################################################################################################
def is_not_abnormal_interp(interp):
"""
Check text of interpretation for abnormal mentions
"""
m = ABNORMAL_RE.search(interp)
if not m:
return True
else:
return False
def abnormal_interp_with_seizure(interp_text):
"""
Tests for abnormal interpretation with seizure synonym
"""
if ABNORMAL_RE.search(interp_text):
if SEIZURE_SYNONYMS_RE.search(interp_text):
return SEIZURE_VAL
else:
return NO_SEIZURE_VAL
else:
return NO_SEIZURE_VAL
def abnormal_interp_test(interp_text):
"""
Tests for abnormal text
"""
return ABNORMAL_RE.search(interp_text)
def eval_interp_with_negex(interp):
"""
Looks at each sentence, if a sentence says there is a seizure,
then that overrides all the negative sentences
"""
if is_not_abnormal_interp(interp):
return NO_SEIZURE_VAL
parsed_interp = spacy_en(interp)
neg_found = 0
seizure_found_and_no_neg = 0
for sent in parsed_interp.sents:
s = str(sent)
m1 = BASIC_NEGEX_RE.search(s)
if m1:
neg_found=1
m2 = REVERSED_NEGEX_RE.search(s)
if m2:
neg_found =2
if not neg_found:
m3 = SEIZURE_SYNONYMS_RE.search(s)
if m3:
seizure_found_and_no_neg = 1
if neg_found and not seizure_found_and_no_neg:
return NO_SEIZURE_VAL
elif seizure_found_and_no_neg:
return SEIZURE_VAL
return NO_SEIZURE_VAL
def get_section_with_name(section_names, doc):
"""
Check exact matches for keys in section_names;
this presumes a certain structure in EEGNote doc object
"""
text = ''
for section in section_names:
try:
text = ' '.join([text, doc.sections[section]['text']])
except:
pass
try:
text = ' '.join([text, doc.sections['narrative'][section]])
except:
pass
try:
text = ' '.join([text, doc.sections['findings'][section]])
except:
pass
return ' '.join(text.split())
######################################################################################################
##### LABELING FUNCTIONS (LFs)
######################################################################################################
def lf_normal_interp_not_seizure(report):
"""
This LF looks for a top level interpretation section -- if none, no seizure
"""
for keyinterp in CANDIDATE_INTERPS_LOWER:
if keyinterp in report.sections.keys():
interpretation = report.sections[keyinterp]
interp_text = interpretation['text']
if SIMPLE_NORMAL_RE.search(interp_text):
if NORMAL_STUDY_PHRASES.search(interp_text):
return NO_SEIZURE_VAL
else:
return ABSTAIN_VAL
else:
return ABSTAIN_VAL
return ABSTAIN_VAL
def lf_abnormal_interp_with_seizure(report):
"""
Searching for abnormal interpretation section with seizure synonym
"""
if 'interpretation' in report.sections.keys():
interpretation = report.sections['interpretation']
interp_text = interpretation['text']
return abnormal_interp_with_seizure(interp_text)
elif 'summary' in report.sections:
return abnormal_interp_with_seizure(report.sections['summary']['text'])
elif 'findings' in report.sections: # fall back to look in the findings
if 'summary' in report.sections['findings']: # fall back to look for a summary instead
return abnormal_interp_with_seizure(report.sections['findings']['summary'])
if 'impression' in report.sections['findings']:
return abnormal_interp_with_seizure(report.sections['findings']['impression'])
return ABSTAIN_VAL
elif 'narrative' in report.sections: # fall back to look in the findings
ky = 'narrative'
if 'summary' in report.sections[ky]: # fall back to look for a summary instead
return abnormal_interp_with_seizure(report.sections[ky]['summary'])
if 'impression' in report.sections[ky]:
return abnormal_interp_with_seizure(report.sections[ky]['impression'])
return ABSTAIN_VAL
else:
return ABSTAIN_VAL
def lf_findall_interp_with_seizure(report):
"""
Check if interpretation sections are abnormal,
then look for words indicating a seizure
"""
if 'interpretation' in report.sections.keys():
interpretation = report.sections['interpretation']
interp_text = interpretation['text']
return abnormal_interp_with_seizure(interp_text)
else:
candtext = get_section_with_name(CANDIDATE_INTERPS_LOWER, report)
if candtext:
return abnormal_interp_with_seizure(candtext)
else:
return ABSTAIN_VAL
def lf_findall_abnl_interp_without_seizure(report):
"""
Check if interpretation sections are abnormal,
then look for words indicating NO seizure
"""
if 'interpretation' in report.sections.keys():
interpretation = report.sections['interpretation']
interp_text = interpretation['text']
if abnormal_interp_test(interp_text):
if NOSEIZURE_PHRASE_RE.search(interp_text):
return NO_SEIZURE_VAL
else:
return ABSTAIN_VAL
else:
return ABSTAIN_VAL
else:
candtext = get_section_with_name(CANDIDATE_INTERPS_LOWER, report)
if candtext:
if abnormal_interp_test(candtext):
if NOSEIZURE_PHRASE_RE.search(candtext):
return NO_SEIZURE_VAL
else:
return ABSTAIN_VAL
else:
return ABSTAIN_VAL
else:
return ABSTAIN_VAL
def lf_abnl_interp_negexsp_seizure(report):
"""
Check if top interpretation section is abnormal and if so,
use negex to find indications that there is no seizure
"""
for topkey in CANDIDATE_INTERPS_LOWER:
if topkey in report.sections.keys():
interpretation = report.sections[topkey]
interp_text = interpretation['text']
return eval_interp_with_negex(interp_text)
return ABSTAIN_VAL
def lf_findall_interp_negex_seizure(report):
"""
Check if lower sections have abnormal text and if so,
use negex to find indications of no seizure
"""
candtext = get_section_with_name(CANDIDATE_INTERPS_LOWER, report)
if candtext:
return eval_interp_with_negex(candtext)
else:
return ABSTAIN_VAL
def lf_seizure_section(report):
"""
Checking to see if there is a 'seizure' section in the report
"""
if 'findings' in report.sections.keys():
seizure_keys = [key for key in report.sections['findings'].keys() if 'seizure' in key ]
if not seizure_keys:
return ABSTAIN_VAL
else:
for ky in seizure_keys:
seizure_text = report.sections['findings'][ky]
if 'None' in seizure_text:
return NO_SEIZURE_VAL
elif 'Many' in seizure_text:
return SEIZURE_VAL
elif len(seizure_text.split()) > 30:
return SEIZURE_VAL
else:
return NO_SEIZURE_VAL
else:
return ABSTAIN_VAL
def lf_impression_section_negative(report):
"""
Getting impression section, checking for specific terms
"""
impression_words = ['impression','interpretation','comments']
impression = get_section_with_name(impression_words, report)
reg_normal = ['no epileptiform', 'absence of epileptiform', 'not epileptiform',
'normal EEG', 'normal aEEG','benign','non-specific','nonepileptic','idiopathic',
'no seizures','EEG is normal','normal study']
if any([re.search(reg, impression, re.IGNORECASE) for reg in reg_normal] ):
return NO_SEIZURE_VAL
else:
return ABSTAIN_VAL
def lf_impression_section_positive(report):
"""
Getting impression section, checking for specific terms
"""
impression_words = ['impression','interpretation','comments']
impression = get_section_with_name(impression_words, report)
reg_abnormal = ['status epilepticus','spasms','abnormal continuous',
'tonic','subclinical','spike-wave', 'markedly abnormal']
if any([re.search(reg, impression, re.IGNORECASE) for reg in reg_abnormal] ):
return SEIZURE_VAL
else:
return ABSTAIN_VAL
def lf_spikes_in_impression(report):
"""
Checking for indications of spikes in the impression section
"""
impression_words = ['impression','interpretation','comments']
impression = get_section_with_name(impression_words, report)
if re.search('spike',impression,re.IGNORECASE):
return SEIZURE_VAL
else:
return ABSTAIN_VAL
def lf_extreme_words_in_impression(report):
"""
Checking for words indicating extreme events in the impression section
"""
impression_words = ['impression','interpretation','comments']
impression = get_section_with_name(impression_words, report)
reg_abnormal = ['excessive','frequent']
if any([re.search(reg, impression, re.IGNORECASE) for reg in reg_abnormal] ):
return SEIZURE_VAL
else:
return ABSTAIN_VAL
|
cross-modal-ws-demo-master
|
lfs/lfs_eeg.py
|
import re
# Setting LF output values
ABSTAIN_VAL = 0
ABNORMAL_VAL = 1
NORMAL_VAL = -1
######################################################################################################
##### HELPFUL REGEXES AND ONTOLOGIES
######################################################################################################
reg_equivocation = re.compile('unlikely|likely|suggests|questionable|concerning|possibly|potentially|could represent|may represent|may relate|cannot exclude|can\'t exclude|may be',re.IGNORECASE)
######################################################################################################
##### LABELING FUNCTIONS (LFs)
######################################################################################################
def LF_no_degenerative(report):
"""
Checking for degenerative change
"""
r = re.compile('No significant degenerative change',re.IGNORECASE)
for s in report.report_text.text.split("."):
if r.search(s):
return NORMAL_VAL
return ABSTAIN_VAL
def LF_degen_spine(report):
"""
Checking for degenerative spine
"""
reg_01 = re.compile('degen',re.IGNORECASE)
reg_02 = re.compile('spine',re.IGNORECASE)
for s in report.report_text.text.split("."):
if reg_01.search(s) and reg_02.search(s):
return ABNORMAL_VAL
return ABSTAIN_VAL
def LF_fracture_general(report):
reg_pos = re.compile('fracture',re.IGNORECASE)
reg_neg = re.compile('(No|without|resolution)\\s([a-zA-Z0-9\-,_]*\\s){0,10}fracture',re.IGNORECASE)
for s in report.report_text.text.split("."):
if reg_pos.search(s) and (not reg_neg.search(s)) and (not reg_equivocation.search(s)):
return ABNORMAL_VAL
return ABSTAIN_VAL
def LF_fracture_1(report):
reg_pos = re.compile('(linear|curvilinear)\\slucency',re.IGNORECASE)
reg_neg = re.compile('(No|without|resolution)\\s([a-zA-Z0-9\-,_]*\\s){0,10}fracture',re.IGNORECASE)
for s in report.report_text.text.split("."):
if reg_pos.search(s) and (not reg_neg.search(s)) and (not reg_equivocation.search(s)):
return ABNORMAL_VAL
return ABSTAIN_VAL
def LF_fracture_2(report):
reg_pos = re.compile('(impaction|distraction|diastasis|displaced|foreshortened|angulation|rotation)',re.IGNORECASE)
reg_neg = re.compile('(No|without|resolution)\\s([a-zA-Z0-9\-,_]*\\s){0,10}fracture',re.IGNORECASE)
for s in report.report_text.text.split("."):
if reg_pos.search(s) and (not reg_neg.search(s)) and (not reg_equivocation.search(s)):
return ABNORMAL_VAL
return ABSTAIN_VAL
def LF_fracture_3(report):
reg_pos = re.compile('(transverse|oblique)',re.IGNORECASE)
reg_neg = re.compile('(No|without|resolution)\\s([a-zA-Z0-9\-,_]*\\s){0,10}fracture',re.IGNORECASE)
for s in report.report_text.text.split("."):
if reg_pos.search(s) and (not reg_neg.search(s)) and (not reg_equivocation.search(s)):
return ABNORMAL_VAL
return ABSTAIN_VAL
def LF_lesion_1(report):
reg_pos = re.compile('(moth-eaten|permeative|chondroid|ground-glass|lucent|sclerotic)',re.IGNORECASE)
reg_neg = re.compile('(No|without|resolution)\\s([a-zA-Z0-9\-,_]*\\s){0,10}(lesion|tumor|mass)',re.IGNORECASE)
for s in report.report_text.text.split("."):
if reg_pos.search(s) and (not reg_neg.search(s)) and (not reg_equivocation.search(s)):
return ABNORMAL_VAL
return ABSTAIN_VAL
def LF_lesion_2(report):
reg_pos = re.compile('(margin|circumscribed|indistinct)',re.IGNORECASE)
reg_neg = re.compile('(No|without|resolution)\\s([a-zA-Z0-9\-,_]*\\s){0,10}(lesion|tumor|mass)',re.IGNORECASE)
for s in report.report_text.text.split("."):
if reg_pos.search(s) and (not reg_neg.search(s)) and (not reg_equivocation.search(s)):
return ABNORMAL_VAL
return ABSTAIN_VAL
def LF_lesion_3(report):
reg_pos = re.compile('(non-linear|lucency)',re.IGNORECASE)
reg_neg = re.compile('(No|without|resolution)\\s([a-zA-Z0-9\-,_]*\\s){0,10}(lesion|tumor|mass)',re.IGNORECASE)
for s in report.report_text.text.split("."):
if reg_pos.search(s) and (not reg_neg.search(s)) and (not reg_equivocation.search(s)):
return ABNORMAL_VAL
return ABSTAIN_VAL
def LF_surgical(report):
"""
Checking for post-surgical change
"""
reg_01 = re.compile('surgical',re.IGNORECASE)
for s in report.report_text.text.split("."):
if reg_01.search(s):
return ABNORMAL_VAL
return ABSTAIN_VAL
def LF_no_significant(c):
"""
Checking for indications of negligible issues
"""
report = c.report_text.text
report = report[:report.find("SUMMARY:")]
if "no significant" in report.lower()\
or "no immediate" in report.lower()\
or "demonstrate no" in report.lower():
return NORMAL_VAL
else:
return ABSTAIN_VAL
def LF_no_evidence(c):
"""
Checking for evidence of fracture
"""
if "no evidence of fracture" in c.report_text.text.lower():
return NORMAL_VAL
else:
return ABSTAIN_VAL
def LF_report_length(c):
"""
Separating based on report length
"""
long_cut = 600
short_cut = 500
if len(c.report_text.text) < short_cut:
return NORMAL_VAL
elif len(c.report_text.text) > long_cut:
return ABNORMAL_VAL
else :
return ABSTAIN_VAL
def LF_negative_quantifiers_in_report(c):
"""
Searching for indications of multiple or sever pathologies
"""
negative_quantifiers = ["severe", "multiple"]
return ABNORMAL_VAL if any(word in c.report_text.text.lower() \
for word in negative_quantifiers) else ABSTAIN_VAL
def LF_disease_in_report(c):
"""
Checking for word "disease"
"""
return ABNORMAL_VAL if "disease" in c.report_text.text.lower() else ABSTAIN_VAL
def LF_positive_disease_term(report):
"""
Checking for positive disease term
"""
categories = ['normal','opacity','cardiomegaly','calcinosis',
'lung/hypoinflation','calcified granuloma',
'thoracic vertebrae/degenerative','lung/hyperdistention',
'spine/degenerative','catheters, indwelling',
'granulomatous disease','nodule','surgical instruments',
'scoliosis', 'osteophyte', 'spondylosis','fractures, bone']
for idx in range(1,len(categories)):
reg_pos = re.compile(categories[idx],re.IGNORECASE)
reg_neg = re.compile('(No|without|resolution)\\s([a-zA-Z0-9\-,_]*\\s){0,10}'+categories[idx],re.IGNORECASE)
for s in report.report_text.text.split("."):
if reg_pos.search(s) and (not reg_neg.search(s)) and (not reg_equivocation.search(s)):
return ABNORMAL_VAL
return ABSTAIN_VAL
def LF_consistency_in_report(c):
"""
Checking for the words 'clear', 'no', 'normal', 'free', 'midline' in
findings section of the report
"""
words_indicating_normalcy = ['clear', 'no', 'normal', 'unremarkable',
'preserved', 'mild']
report = c.report_text.text
findings = report
sents = findings.split('.')
num_sents_without_normal = 0
for sent in sents:
sent = sent.lower()
if not any(word in sent for word in words_indicating_normalcy):
num_sents_without_normal += 1
elif 'not' in sent:
num_sents_without_normal += 1
if len(sents) - num_sents_without_normal < 2:
return ABNORMAL_VAL
else:
return ABSTAIN_VAL
def LF_screw(c):
"""
Checking if a screw is mentioned
"""
return ABNORMAL_VAL if "screw" in c.report_text.text.lower() else ABSTAIN_VAL
|
cross-modal-ws-demo-master
|
lfs/lfs_msk.py
|
import re
# Setting LF output values
ABSTAIN_VAL = 0
ABNORMAL_VAL = 1
NORMAL_VAL = -1
######################################################################################################
##### HELPFUL REGEXES AND ONTOLOGIES
######################################################################################################
# Equivocation terms
equiv_str = 'unlikely|likely|suggests|questionable|concerning|possibly|potentially|could represent|may represent|may relate|cannot exclude|can\'t exclude|may be'
equiv_lst = equiv_str.split('|')
reg_equivocation = re.compile(equiv_str,re.IGNORECASE)
# Terms indicating followup required
followup_terms = ["followup","referred", "paged", "contacted","contact"]
# Things rarely mentioned unless they exist...
abnormal_disease_terms = ["opacity", "cardiomegaly","hypoinflation","hyperdistention",\
"edema", "nodule", "fracture",\
"emphysema","empyema","dissection","pneumomediastinum",\
"pneumoperitineum","widening of the mediastinum","abcess",\
"hemorrhage","malpositioned","greater than",\
"asymmetric","urgent","mediastinal shift","Fleischner","foreign body"\
"differential"]
# Longer phrases indicating normalcy
normal_words_and_phrases = ["no acute cardiopulmonary","no significant interval change",\
"cardiomediastinal silhouette is within normal limits","without evidence of ",\
"no pleural effusion or pneumothorax","without evidence of","gross"]
# Words indicating normalcy
words_indicating_normalcy = ['clear', 'no', 'normal', 'unremarkable',
'free', 'midline','without evidence of','absent','gross']
# Disease categories
categories = ['normal','opacity','cardiomegaly','calcinosis',
'lung/hypoinflation','calcified granuloma',
'thoracic vertebrae/degenerative','lung/hyperdistention',
'spine/degenerative','catheters, indwelling',
'granulomatous disease','nodule','surgical instruments',
'scoliosis', 'osteophyte', 'spondylosis','fractures, bone']
# Words with negative inflection
negative_inflection_words = ["but", "however", "otherwise",equiv_str]
######################################################################################################
##### LABELING FUNCTIONS (LFs)
######################################################################################################
def LF_report_length(c):
"""
Separating by report length
"""
short_cutoff = 425
long_cutoff = 700
ln = len(c.report_text.text)
if ln<short_cutoff:
return NORMAL_VAL
elif ln>long_cutoff:
return ABNORMAL_VAL
else:
return ABSTAIN_VAL
def LF_equivocation(c):
"""
Checking for equivocation
"""
return ABNORMAL_VAL if any(word in c.report_text.text \
for word in equiv_lst) else ABSTAIN_VAL
def LF_negative_inflection_words_in_report(c):
"""
Checking for negative inflection words
"""
return ABNORMAL_VAL if any(word in c.report_text.text \
for word in negative_inflection_words) else ABSTAIN_VAL
def LF_is_seen_or_noted_in_report(c):
"""
Checking for indications of a phenomenology
"""
return ABNORMAL_VAL if any(word in c.report_text.text \
for word in ["seen", "noted","observed"]) else ABSTAIN_VAL
def LF_disease_in_report(c):
"""
Checking for mentions of disease
"""
return ABNORMAL_VAL if "disease" in c.report_text.text else ABSTAIN_VAL
def LF_recommend_in_report(c):
"""
Checking for recommended followup
"""
return ABNORMAL_VAL if "recommend" in c.report_text.text else ABSTAIN_VAL
def LF_mm_in_report(c):
"""
Checking for anything that was measured
"""
return ABNORMAL_VAL if any(word in c.report_text.text \
for word in ["mm", "cm","millimeter","centimeter"]) else ABSTAIN_VAL
def LF_abnormal_disease_terms_in_report(c):
"""
Checking for abnormal disease terms
"""
if any(mesh in c.report_text.text for mesh in abnormal_disease_terms):
return ABNORMAL_VAL
else:
return ABSTAIN_VAL
def LF_consistency_in_report(c):
"""
Checking for the words 'clear', 'no', 'normal', 'free', 'midline' in
findings section of the report
"""
report = c.report_text.text
findings = report[report.find('FINDINGS:'):]
findings = findings[:findings.find('IMPRESSION:')]
sents = findings.split('.')
num_sents_without_normal = 0
for sent in sents:
sent = sent.lower()
if not any(word in sent for word in words_indicating_normalcy):
num_sents_without_normal += 1
elif 'not' in sent:
num_sents_without_normal += 1
norm_cut = 1
if num_sents_without_normal<norm_cut:
return NORMAL_VAL
elif num_sents_without_normal>norm_cut:
return ABNORMAL_VAL
else:
return ABSTAIN_VAL
def LF_gross(report):
"""
Checking for word "gross"
"""
reg_pos = re.compile('gross',re.IGNORECASE)
for s in report.report_text.text.split("."):
if reg_pos.search(s):
return ABNORMAL_VAL
return ABSTAIN_VAL
def LF_normal(report):
"""
Checking for various standard indications of normality
"""
r = re.compile('No acute cardiopulmonary abnormality',re.IGNORECASE)
r2 = re.compile('normal chest X-ray',re.IGNORECASE)
for s in report.report_text.text.split("."):
if r.search(s) or r2.search(s): # or r3.search(s) or r4.search(s):
return NORMAL_VAL
return ABSTAIN_VAL
def LF_positive_MeshTerm(report):
"""
Looking for positive mesh terms
"""
for idx in range(1,len(categories)):
reg_pos = re.compile(categories[idx],re.IGNORECASE)
reg_neg = re.compile('(No|without|resolution)\\s([a-zA-Z0-9\-,_]*\\s){0,10}'+categories[idx],re.IGNORECASE)
for s in report.report_text.text.split("."):
if reg_pos.search(s) and (not reg_neg.search(s)) and (not reg_equivocation.search(s)):
return ABNORMAL_VAL
return ABSTAIN_VAL
def LF_fracture(report):
"""
Looking for evidence of fracture
"""
reg_pos = re.compile('fracture',re.IGNORECASE)
reg_neg = re.compile('(No|without|resolution)\\s([a-zA-Z0-9\-,_]*\\s){0,10}fracture',re.IGNORECASE)
for s in report.report_text.text.split("."):
if reg_pos.search(s) and (not reg_neg.search(s)) and (not reg_equivocation.search(s)):
return ABNORMAL_VAL
return ABSTAIN_VAL
def LF_calcinosis(report):
"""
Looking for evidence of calcinosis
"""
reg_01 = re.compile('calc',re.IGNORECASE)
reg_02 = re.compile('arter|aorta|muscle|tissue',re.IGNORECASE)
for s in report.report_text.text.split("."):
if reg_01.search(s) and reg_02.search(s):
return ABNORMAL_VAL
return ABSTAIN_VAL
def LF_degen_spine(report):
"""
Looking for degenerative spinal disease
"""
reg_01 = re.compile('degen',re.IGNORECASE)
reg_02 = re.compile('spine',re.IGNORECASE)
for s in report.report_text.text.split("."):
if reg_01.search(s) and reg_02.search(s):
return ABNORMAL_VAL
return ABSTAIN_VAL
def LF_lung_hypoinflation(report):
"""
Looking for lung hypoinflation
"""
#reg_01 = re.compile('lung|pulmonary',re.IGNORECASE)
reg_01 = re.compile('hypoinflation|collapse|(low|decrease|diminish)\\s([a-zA-Z0-9\-,_]*\\s){0,4}volume',re.IGNORECASE)
for s in report.report_text.text.split("."):
if reg_01.search(s):
return ABNORMAL_VAL
return ABSTAIN_VAL
def LF_lung_hyperdistention(report):
"""
Looking for lung hyperdistention
"""
#reg_01 = re.compile('lung|pulmonary',re.IGNORECASE)
reg_01 = re.compile('increased volume|hyperexpan|inflated',re.IGNORECASE)
for s in report.report_text.text.split("."):
if reg_01.search(s):
return ABNORMAL_VAL
return ABSTAIN_VAL
def LF_catheters(report):
"""
Looking for mentions of catheters
"""
reg_01 = re.compile(' line|catheter|PICC',re.IGNORECASE)
for s in report.report_text.text.split("."):
if reg_01.search(s):
return ABNORMAL_VAL
return ABSTAIN_VAL
def LF_surgical(report):
"""
Looking for mentions of surgical hardware
"""
reg_01 = re.compile('clip',re.IGNORECASE)
for s in report.report_text.text.split("."):
if reg_01.search(s):
return ABNORMAL_VAL
return ABSTAIN_VAL
def LF_granuloma(report):
"""
Looking for instances of granuloma
"""
reg_01 = re.compile('granuloma',re.IGNORECASE)
for s in report.report_text.text.split("."):
if reg_01.search(s):
return ABNORMAL_VAL
return ABSTAIN_VAL
|
cross-modal-ws-demo-master
|
lfs/lfs_cxr.py
|
import re
# Setting LF output values
ABSTAIN_VAL = 0
HEMORRHAGE_VAL = 1
NO_HEMORRHAGE_VAL = -1
######################################################################################################
##### LABELING FUNCTIONS (LFs)
######################################################################################################
def LF_normal_V01(report):
"""
Checking for specific normal phrase
"""
r = re.compile('Normal CT of the Head',re.IGNORECASE)
for s in report.report.sentences:
if r.search(s.text):
return NO_HEMORRHAGE_VAL
return ABSTAIN_VAL
def LF_normal_V02(report):
"""
Checking for specific normal phrase
"""
r = re.compile('No acute intracranial abnormality',re.IGNORECASE)
for s in report.report.sentences:
if r.search(s.text):
return NO_HEMORRHAGE_VAL
return ABSTAIN_VAL
def LF_normal_V03(report):
"""
Checking for specific normal phrase
"""
r = re.compile('Normal noncontrast and postcontrast CT',re.IGNORECASE)
for s in report.report.sentences:
if r.search(s.text):
return NO_HEMORRHAGE_VAL
return ABSTAIN_VAL
def LF_normal_V04(report):
"""
Checking for specific normal phrase
"""
r = re.compile('Negative acute CT of the head',re.IGNORECASE)
for s in report.report.sentences:
if r.search(s.text):
return NO_HEMORRHAGE_VAL
return ABSTAIN_VAL
def LF_positive_hemorrhage(report):
"""
Checking for words indicating hemorrhage
"""
r1 = re.compile('(No|without|resolution)\\s([\S]*\\s){0,10}(hemorrhage)',re.IGNORECASE)
r = re.compile('hemorrhage',re.IGNORECASE)
for s in report.report.sentences:
if r.search(s.text) and (not r1.search(s.text)):
return HEMORRHAGE_VAL
return ABSTAIN_VAL
def LF_positive_hematoma(report):
"""
Checking for words indicating hematoma
"""
r1 = re.compile('(No|without|resolution|scalp|subgaleal)\\s([\S]*\\s){0,10}(hematoma)',re.IGNORECASE)
r = re.compile('hematoma',re.IGNORECASE)
for s in report.report.sentences:
if r.search(s.text) and (not r1.search(s.text)):
return HEMORRHAGE_VAL
return ABSTAIN_VAL
def LF_hemorrhage_hi_cover(report):
"""
Checking for both hemorrhage and hematoma
"""
if LF_positive_hemorrhage(report) == 0 and LF_positive_hematoma(report) == 0:
return NO_HEMORRHAGE_VAL
return HEMORRHAGE_VAL
|
cross-modal-ws-demo-master
|
lfs/lfs_hct.py
|
import os, requests, sys, unittest
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import cPickle
from snorkel.parser import *
ROOT = os.environ['SNORKELHOME']
class TestParsers(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sp = SentenceParser()
@classmethod
def tearDownClass(cls):
cls.sp._kill_pserver()
def test_parser_1(self):
"""Tests the XMLDocParser and SentenceParser subclasses"""
# Load correct parses
with open(ROOT + '/test/data/CDR_TestSet_docs.pkl', 'rb') as f:
gold_docs = cPickle.load(f)
with open(ROOT + '/test/data/CDR_TestSet_sents.pkl', 'rb') as f:
gold_sents = cPickle.load(f)
# Set up the doc parser
xml_parser = XMLDocParser(
path=ROOT + '/test/data/CDR_TestSet.xml',
doc='.//document',
text='.//passage/text/text()',
id='.//id/text()',
keep_xml_tree=False)
sent_parser = SentenceParser()
corpus = Corpus(xml_parser, sent_parser, max_docs=20)
print len(corpus.get_docs())
print len(corpus.get_sentences())
self.assertEqual(corpus.get_docs(), gold_docs)
self.assertEqual(corpus.get_sentences(), gold_sents)
if __name__ == '__main__':
unittest.main()
|
ddbiolib-master
|
test/parser_tests.py
|
"""
Bioinformatics Tools for Data Programming
==================================
ddioblib is a library for creating and interaction with
onotologies to create forms for weak supervision for
machine learning systems like DeepDive and Snorkel
See http://deepdive.stanford.edu/ for more information
"""
__version__ = "0.10.dev"
|
ddbiolib-master
|
ddbiolib/__init__.py
|
from .serialization import *
|
ddbiolib-master
|
ddbiolib/parsers/__init__.py
|
import os
import sys
import glob
import codecs
import cPickle
import numpy as np
#################################################
# Parser Serializers
#################################################
class SerializedParser(object):
def __init__(self,parser,encoding="utf-8"):
'''Interface for persisting mark-up data generated by parser object
Parameters
----------
parser : object
DDlite parser object.
encoding: string, "utf-8" by default
String encoding
'''
self.parser = parser
self.encoding = encoding
self.doc_cache = {}
def _write(self,sents,doc_id):
raise NotImplementedError()
def _read(self,doc_id):
raise NotImplementedError()
def parse(self,s,doc_id):
'''Parse provided string or use cached version'''
if doc_id in self.doc_cache:
sents = self._read(doc_id)
else:
try:
sents = [s for s in self.parser.parse(s,doc_id=doc_id)]
except Exception as e:
print "Fatal Parser Error {} {}".format(doc_id,e)
sys.exit()
self._write(sents,doc_id)
self.doc_cache[doc_id] = 0
return sents
class PickleSerializedParser(SerializedParser):
def __init__(self,parser,rootdir):
super(PickleSerializedParser, self).__init__(parser)
self.rootdir = rootdir
# initialize previously cached documents
filelist = glob.glob("{}*.pkl".format(self.rootdir))
self.doc_cache = {n.split("/")[-1].split(".")[0]:0 for n in filelist}
def _write(self,sents,doc_id):
'''write pickle file to disk'''
fname = "%s/%s.pkl" % (self.rootdir, doc_id)
with open(fname, 'w+') as f:
cPickle.dump(sents,f)
def _read(self,doc_id):
fname = "%s/%s.pkl" % (self.rootdir, doc_id)
with open(fname, 'rb') as f:
return cPickle.load(f)
class TextSerializedParser(SerializedParser):
'''Write results to offset indexed text file'''
def __init__(self,parser,rootdir):
super(TextSerializedParser, self).__init__(parser)
self.idx_dtype = np.int64
self.rootdir = rootdir
self._load_index()
self.datfile = "{}/docs.dat".format(self.rootdir)
def _load_index(self):
'''Loading index of doc_id to file offsets'''
fname = "{}/docs.idx".format(self.rootdir)
if os.path.exists(fname):
tmp = open(fname,"rU").readlines()
self.doc_idx = dict([line.strip().split("\t") for line in tmp])
self.doc_idx = {uid:int(idx) for uid,idx in self.doc_idx.items()}
self.doc_cache = {uid:0 for uid in self.doc_idx}
else:
self.doc_cache = {}
self.doc_idx = {}
def __del__(self):
try:
# store previous version if it exists
fname = "{}/docs.idx".format(self.rootdir)
if os.path.exists(fname):
os.rename(fname,fname+".bak")
# write index to disk
with open(fname,"w") as f:
for uid,idx in self.doc_idx.items():
f.write("{}\t{}\n".format(uid,idx))
except Exception as e:
print "Error writing index to disk {}".format(e)
def _write(self, sents, doc_id):
with codecs.open(self.datfile,"a+",self.encoding) as f:
self.doc_idx[doc_id] = f.tell()
f.write("{}\n".format(sents))
def _read(self,doc_id):
idx = self.doc_idx[doc_id]
with codecs.open(self.datfile,"r",self.encoding) as f:
f.seek(idx)
return eval(f.readline())
class DatabaseSerializedParser(SerializedParser):
def __init__(self, parser, host, user, database, password=""):
super(DatabaseSerializedParser, self).__init__(parser)
self.params = (host,user,database,password)
def __getitem__(self,doc_id):
'''query database'''
raise NotImplementedError()
|
ddbiolib-master
|
ddbiolib/parsers/serialization.py
|
from .umls import *
from .bioportal import *
from .ctd import *
from .specialist import *
|
ddbiolib-master
|
ddbiolib/ontologies/__init__.py
|
from .base import *
|
ddbiolib-master
|
ddbiolib/ontologies/bioportal/__init__.py
|
import unicodecsv
def load_bioportal_dictionary(filename, ignore_case=True):
'''BioPortal Ontologies
http://bioportal.bioontology.org/'''
reader = unicodecsv.reader(open(filename,"rb"), delimiter=',', quotechar='"', encoding='utf-8')
d = [line for line in reader]
dictionary = {}
for line in d[1:]:
#line = line.strip()
if not line:
continue
row = dict(zip(d[0],line))
pref_label = row["Preferred Label"].lower() if ignore_case else row["Preferred Label"]
dictionary[pref_label] = 1
dictionary.update({t.lower() if ignore_case else t:1 for t in row["Synonyms"].split("|")})
# HACK
if "" in dictionary:
del dictionary[""]
return dictionary
|
ddbiolib-master
|
ddbiolib/ontologies/bioportal/base.py
|
from .base import *
|
ddbiolib-master
|
ddbiolib/ontologies/specialist/__init__.py
|
'''
The SPECIALIST Lexicon
"The SPECIALIST lexicon is a large syntactic lexicon of biomedical and general English,
designed/developed to provide the lexical information needed for the SPECIALIST
Natural Language Processing System (NLP) which includes SemRep, MetaMap, and the
Lexical Tools. It is intended to be a general English lexicon that includes many
biomedical terms. Coverage includes both commonly occurring English words and
biomedical vocabulary from a variety of sources. These include (not limited to)
MEDLINE citation records, terms in the Dorland's Illustrated Medical dictionary,
the 10,000 most frequent words listed in the American Heritage Word Frequency book
and the 2,000 lexical items used in the controlled definitions of Longman's Dictionary
of Contemporary English. The lexicon entry for each lexical item (word or term) records
the syntactic, morphological (inflection and derivation), and orthographic
(spelling variants) information needed by the SPECIALIST NLP System."
https://lexsrv3.nlm.nih.gov/LexSysGroup/Projects/lexicon/current/web/index.html
@author: jason-fries [at] stanford [dot] edu
'''
import os
from ...utils import download
from urllib2 import urlopen, URLError, HTTPError
def download_dataset(version="2016"):
'''Download SPECIALIST files.
TODO: get full dataset. for now only get abbreviations'''
url = "https://lexsrv3.nlm.nih.gov/LexSysGroup/Projects/lexicon/{}/release/LEX/LRABR"
url = url.format(version)
outfname = "{}/data/{}".format(os.path.dirname(__file__),os.path.basename(url))
if not os.path.exists(outfname):
download(url,outfname)
class SpecialistLexicon(object):
def __init__(self):
download_dataset()
self._abbrv2text,self._text2abbrv = self._parse_lrabr()
def _parse_lrabr(self):
'''Load UMLS SPECIALIST Lexicon of abbreviations. Format:
E0000048|AA|acronym|E0006859|achievement age|
'''
fname = "{}/data/{}".format(os.path.dirname(__file__),"LRABR")
d = [line.strip().strip("|").split("|") for line in open(fname, 'rb').readlines()]
abbrv2text,text2abbrv = {},{}
for row in d:
uid1,abbrv,atype,uid2,text = row
text = text.lower()
if atype not in ["acronym","abbreviation"]:
continue
if abbrv not in abbrv2text:
abbrv2text[abbrv] = {}
if text not in text2abbrv:
text2abbrv[text] = {}
abbrv2text[abbrv][text] = 1
text2abbrv[text][abbrv] = 1
return abbrv2text,text2abbrv
@property
def abbrv2text(self):
return self._abbrv2text
@property
def text2abbrv(self):
return self._text2abbrv
if __name__ == "__main__":
s = SpecialistLexicon()
print len(s.abbrv2text)
print len(s.text2abbrv)
print s.abbrv2text["AMOX"]
|
ddbiolib-master
|
ddbiolib/ontologies/specialist/base.py
|
# Comparative Toxicogenomics Database
# http://ctdbase.org/downloads/
from .base import *
|
ddbiolib-master
|
ddbiolib/ontologies/ctd/__init__.py
|
import codecs
def load_ctd_dictionary(filename, ignore_case=True):
'''Comparative Toxicogenomics Database'''
d = {}
header = ['DiseaseName', 'DiseaseID', 'AltDiseaseIDs', 'Definition',
'ParentIDs', 'TreeNumbers', 'ParentTreeNumbers', 'Synonyms',
'SlimMappings']
synonyms = {}
dnames = {}
with codecs.open(filename,"rU","utf-8",errors="ignore") as fp:
for i,line in enumerate(fp):
line = line.strip()
if line[0] == "#":
continue
row = line.split("\t")
if len(row) != 9:
continue
row = dict(zip(header,row))
synonyms.update( dict.fromkeys(row["Synonyms"].strip().split("|")))
dnames[row["DiseaseName"].strip()] = 1
return {term.lower() if ignore_case else term:1 for term in synonyms.keys()+dnames.keys() if term}
|
ddbiolib-master
|
ddbiolib/ontologies/ctd/base.py
|
from collections import namedtuple
DatabaseConfig = namedtuple("DatabaseConfig",["host","username","dbname","password"])
DEFAULT_UMLS_CONFIG = DatabaseConfig(host="127.0.0.1",
username="umls",
dbname="2014AB",
password="")
STANFORD_UMLS_CONFIG = DatabaseConfig(host="rocky.stanford.edu",
username="umls_guest",
dbname="publicUMLSsnapshot",
password="umls")
|
ddbiolib-master
|
ddbiolib/ontologies/umls/config.py
|
import os
import networkx as nx
from ...utils import database
class SemanticNetwork(object):
"""
The UMLS Semantic Network defines 133 semantic types and 54 relationships
found in the UMLS Metathesaurus. There are two branches: Entity and Event
https://www.ncbi.nlm.nih.gov/books/NBK9679/
"""
def __init__(self, config=None):
self.conn = database.MySqlConn(host=config.host,
user=config.username,
database=config.dbname,
password=config.password)
self.conn.connect()
self._networks = {}
self.abbrv, self.groups = self.__load_sem_groups() # load semantic group definitions
def __load_sem_groups(self):
'''UMLS Semantic Groups '''
module_path = os.path.dirname(__file__)
fname = "%s/data/SemGroups.txt" % (module_path)
abbrvs = {}
isas = {}
with open(fname,"rU") as f:
for line in f:
# format: ACTI|Activities & Behaviors|T051|Event
abbrv,parent,tid,child = line.strip().split("|")
abbrvs[abbrv] = parent
if parent not in isas:
isas[parent] = {}
isas[parent][child] = 1
isas = {parent:isas[parent].keys() for parent in isas}
return abbrvs,isas
def __build_semantic_network(self, relation="isa", directed=True,
simulate_root=True):
"""Load semantic network structure for a given relation."""
sql_tmpl = """SELECT C.STY_RL1,C.RL,C.STY_RL2 FROM SRSTR AS C INNER
JOIN SRDEF AS A ON C.STY_RL1=A.STY_RL
INNER JOIN SRDEF AS B ON C.STY_RL2=B.STY_RL
WHERE A.RT='STY' AND B.RT='STY' AND C.RL='%s'"""
query = sql_tmpl % (relation)
results = self.conn.query(query)
G = nx.DiGraph() if directed else nx.Graph()
for row in results:
child,_,parent = map(str,row)
G.add_edge(parent,child)
# Some concept graphs lack a shared root, so add one.
root_nodes = [node for node in G if not G.predecessors(node)]
if len(root_nodes) > 1 and simulate_root:
root = "ROOT"
for child in root_nodes:
G.add_edge(root,child)
return G
def graph(self, relation="isa", directed=True, simulate_root=True):
"""Build a semantic network (graph) given the provided relation"""
if relation not in self._networks:
self._networks[relation] = self.__build_semantic_network(relation,directed)
return self._networks[relation]
|
ddbiolib-master
|
ddbiolib/ontologies/umls/semantic_network.py
|
from .config import *
from .metathesaurus import *
from .semantic_network import *
from .lf_factory import *
from .dictionary import *
|
ddbiolib-master
|
ddbiolib/ontologies/umls/__init__.py
|
'''
Noise-aware Dictionary
Create a snapshot of all UMLS terminology, broken down by
semantic type (STY) and source vocabulary (SAB).
Treat these as competing experts, generating labeling
functions for each
@author: jason-fries [at] stanford [dot] edu
'''
import os
import re
import bz2
import sys
import glob
import codecs
import itertools
import subprocess
from functools import partial
from collections import defaultdict
from ...utils import database
from .config import DEFAULT_UMLS_CONFIG
from .metathesaurus import MetaNorm
def dict_function_factory(dictionary,rvalue,name,ignore_case=True):
'''Dynamically create a labeling function object'''
def function_template(m):
mention = " ".join(m.get_attrib_tokens('words')).lower() if ignore_case else " ".join(m.get_attrib_tokens('words'))
return rvalue if mention in dictionary else 0
function_template.__name__ = name
return function_template
def build_umls_dictionaries(config,min_occur=1):
'''Create UMLS dictionaries broken down by semantic type and
source vocabulary. Term types (TTY) are used to filter out
obselete terms an '''
module_path = os.path.dirname(__file__)
filelist = glob.glob("{}/data/cache/*/*.txt".format(module_path))
if len(filelist) > 0:
return
#mn = MetaNorm()
abbrv_tty = dict.fromkeys(['AA','AB','ACR'])
not_term_tty = dict.fromkeys(['AA','AB','ACR','AUN'])
conn = database.MySqlConn(config.host, config.username,
config.dbname, config.password)
conn.connect()
sql_tmpl = "{}/sql_tmpl/sty_sab_dictionaries.sql".format(module_path)
sql = "".join(open(sql_tmpl,"rU").readlines())
results = conn.query(sql)
abbrv = defaultdict(partial(defaultdict, defaultdict))
terms = defaultdict(partial(defaultdict, defaultdict))
for row in results:
text,sty,sab,tty = row
sty = sty.lower().replace(" ","_")
# TODO: Move to interace so that this is done on reload
#text = mn.normalize(text) # normalize to remove junk characters
if tty in abbrv_tty:
abbrv[sty][sab][text] = 1
elif tty not in not_term_tty:
terms[sty][sab][text] = 1
for sty in abbrv:
for sab in abbrv[sty]:
outfname = "{}/data/cache/abbrvs/{}.{}.abbrv.txt".format(module_path,sty,sab)
t = sorted(abbrv[sty][sab].keys())
if len(t) < min_occur:
continue
with codecs.open(outfname,"w","utf-8",errors="ignore") as f:
f.write("\n".join(t))
subprocess.call(["bzip2", outfname]) # compress file
for sty in terms:
for sab in terms[sty]:
outfname = "{}/data/cache/terms/{}.{}.txt".format(module_path,sty,sab)
t = sorted(terms[sty][sab].keys())
if len(t) < min_occur:
continue
with codecs.open(outfname,"w","utf-8",errors="ignore") as f:
f.write("\n".join(t))
subprocess.call(["bzip2", outfname]) # compress file
class UmlsNoiseAwareDict(object):
'''Use UMLS semantic types and source vocabulary information
to create labeling functions for providing supervision for
tagging tasks'''
def __init__(self, positive=[], negative=[], name="", rm_sab=[],
rootdir=None, ignore_case=True, normalize=True):
module_path = os.path.dirname(__file__)
self.rootdir = rootdir if rootdir else "{}/data/cache/{}/".format(module_path,name)
self.positive = [self._norm_sty_name(x) for x in positive]
self.negative = [self._norm_sty_name(x) for x in negative]
self.name = name
self.encoding = "utf-8"
self.ignore_case = ignore_case
self._dictionary = self._load_dictionaries(normalize)
self.rm_sab = rm_sab
def _norm_sty_name(self,s):
return s.lower().replace(" ","_")
def _load_dictionaries(self,normalize):
'''Load dictionaries base on provided positive/negative
entity examples'''
d = defaultdict(defaultdict)
filelist = glob.glob("{}*.txt.bz2".format(self.rootdir))
mn = MetaNorm()
for fpath in filelist:
fname = fpath.split("/")[-1].rstrip(".txt.bz2")
i = fname.index(".")
sty,sab = fname[0:i], fname[i+1:].rstrip(".abbrv")
dict_type = "terms" if "abbrv" not in fname else "abbrv"
# skip semantic types we don't flag as postive of negative
if sty not in self.positive and sty not in self.negative:
continue
terms = []
with bz2.BZ2File(fpath,"rb") as f:
for line in f:
if not line.strip():
continue
try:
line = line.strip().decode('utf-8')
t = line.strip().lower() if self.ignore_case else line.strip()
if normalize:
t = mn.normalize(t)
terms += [t]
except:
print>>sys.stderr,"Warning: unicode conversion error"
if sty in d and sab in d[sty]:
d[sty][sab].update(dict.fromkeys(terms))
else:
d[sty][sab] = dict.fromkeys(terms)
return d
def get_semantic_types(self,term):
'''Return all matching semantic types for this term '''
stys = {}
for sty in self._dictionary:
for sab in self._dictionary[sty]:
if term in self._dictionary[sty][sab]:
stys[sty] = stys.get(sty,0) + 1
return stys
def get_dictionary(self,semantic_types=[],source_vocab=[], min_size=1):
return self.dictionary(semantic_types,source_vocab, min_size)
def dictionary(self, semantic_types=[], source_vocab=[], min_size=1,
positive_only=True):
'''Create a single dictionary building using the provided semantic types
and source vocabularies. If either are None, use all available types.
'''
# normalize semantic type names
semantic_types = [self._norm_sty_name(x) for x in semantic_types]
semantic_types = self._dictionary if not semantic_types else semantic_types
sabs = list(itertools.chain.from_iterable([self._dictionary[sty].keys() for sty in self._dictionary]))
source_vocab = dict.fromkeys(sabs) if not source_vocab else source_vocab
# filter ontologies by size and source
d = []
for sty in self._dictionary:
if sty not in self.positive and positive_only:
continue
if sty not in semantic_types:
continue
for sab in self._dictionary[sty]:
if sab not in source_vocab:
continue
if len(self._dictionary[sty][sab]) <= min_size:
continue
if "ears" in self._dictionary[sty][sab]:
print sty,sab
d += self._dictionary[sty][sab].keys()
return dict.fromkeys(d)
def lfs(self,min_size=0):
'''Create labeling functions for each semantic type/source vocabulary'''
for sty in self._dictionary:
for sab in self._dictionary[sty]:
if sab in self.rm_sab:
continue
if len(self._dictionary[sty][sab]) <= min_size:
continue
label = "pos" if sty in self.positive else "neg"
prefix = "{}_".format(self.name) if self.name else ""
func_name = "LF_{}{}_{}_{}".format(prefix,sty,sab,label)
rvalue = 1 if label=="pos" else -1
yield dict_function_factory(self._dictionary[sty][sab],rvalue,
func_name,self.ignore_case)
if __name__ == "__main__":
# create dictionary cache
build_umls_dictionaries(DEFAULT_UMLS_CONFIG)
|
ddbiolib-master
|
ddbiolib/ontologies/umls/lf_factory.py
|
import re
import os
import networkx as nx
from ...utils import database
from .config import DEFAULT_UMLS_CONFIG
from .semantic_network import SemanticNetwork
class Metathesaurus(object):
"""
This class hides a bunch of messy SQL queries that interface with a UMLS
Metathesaurus database instance, snapshots available at:
https://www.nlm.nih.gov/research/umls/licensedcontent/umlsknowledgesources.html
The NLM also provides a REST API for similar functionality, but since we
look at global term sets, network structures, etc. its better to query
from a local UMLS database instance.
All source abbreviations:
https://www.nlm.nih.gov/research/umls/knowledge_sources/metathesaurus/release/abbreviations.html
TODO: optimize queries. make less hacky overall
"""
def __init__(self, config, source_vocab=[]):
self.conn = database.MySqlConn(host=config.host,
user=config.username,
database=config.dbname,
password=config.password)
self.conn.connect()
self.norm = MetaNorm()
self.semantic_network = SemanticNetwork(config)
self.source_vocab = source_vocab # source vocabularies (SAB)
self._concepts = {}
self._networks = {}
self.term_types = None
self.source_vocab_defs = None
self._sql_tmpl = {}
def _load_sql_tmpl(self,fname):
'''Load SQL templates'''
if fname in self._sql_tmpl:
return self._sql_tmpl[fname]
module_path = os.path.dirname(__file__)
with open("{}/sql_tmpl/{}".format(module_path,fname),"rU") as f:
tmpl = " ".join(f.readlines())
self._sql_tmpl[fname] = tmpl
return tmpl
def concept_graph(self, level="CUI", relation=["CHD"],
source_vocab=[], simulate_root=True):
"""Build a concept graph for the target relation.
The REL field contains the relation type and RELA encodes the relation
attribute (e.g., may_treat, part_of). Note - relationships are
incomplete and under-specified in many cases.
Parameters
----------
level : string, optional (default=CUI)
Define nodes as concepts (CUI) or atoms (AUI).
relation : array, optional (default=["PAR","CHD"])
Relationship type. Common relation sets include heirarchical
relations: PAR/CHD (parent/child) and RB/RN (related broadly/narrowly)
or non-heirarchical SY/RQ (synonym).
source_vocab : array, optional (default=[])
Choice of source vocabulary (SAB) impacts network structure. By
default, use all UMLS source vocabularies. Useful isolated sets
include: MSH, SNOMEDCT_US, RXNORM
simulate_root : Some concept graphs lack a shared root node which
breaks several measures. This adds a simulated root.
"""
# load cached graph
key = "%s_%s_%s" % (level,".".join(source_vocab),".".join(relation))
if key in self._networks:
return self._networks[key]
# source vocabulary (override class default)
sab = self._source_vocab_sql(source_vocab) if source_vocab else \
self._source_vocab_sql(self.source_vocab)
sab = "" if not sab else sab + " AND"
rel_types ="(%s)" % " OR ".join(["REL='%s'" % rel for rel in relation])
sql = """SELECT %s1,%s2,REL,RELA FROM MRREL
WHERE %s %s;"""
sql = sql % (level,level,sab,rel_types)
results = self.conn.query(sql)
G = nx.DiGraph()
for row in results:
parent,child,rel,rela = row
G.add_edge(parent,child,rel=rel,attribute=rela)
self._networks[key] = G
return G
def _source_vocab_sql(self,source_vocab):
"""Build source vocabulary sql"""
if source_vocab:
sab = " OR ".join(["SAB='%s'" % x for x in source_vocab])
sab = "(%s)" % sab
else:
sab = ""
return sab
def get_source_vocabulary_defs(self):
"""Return dictionary of UMLS source vocabularies descriptions."""
if self.source_vocab_defs:
return self.source_vocab_defs
sql = "SELECT RSAB,SON,SF,SVER,CENC FROM MRSAB"
results = self.conn.query(sql)
summary = {}
for row in results:
sab,name,key,ver,enc = row
summary[sab] = summary.get(sab,[]) + [name]
self.source_vocab_defs = summary
return summary
def get_relations_list(self, counts=False):
""""Get distinct UMLS relation types and their occurrence count."""
sql = "SELECT DISTINCT(RELA),count(RELA) FROM MRREL %s GROUP BY RELA"
# restrict source vocabularies
if self.source_vocab:
sab = " OR ".join(["SAB='%s'" % x for x in self.source_vocab])
sql = sql % ("WHERE (%s)" % sab)
else:
sql = sql % ""
results = self.conn.query(sql)
return results if counts else zip(*results)[0]
def get_tty_list(self,ignore=['OAS','OAP','OAF','FN','OF',
'MTH_OF','MTH_IS','LPN','AUN']):
if self.term_types:
return self.term_types
sql = "SELECT distinct TTY FROM MRCONSO;"
results = self.conn.query(sql)
ignore = dict.fromkeys(ignore)
self.term_types = [tty[0] for tty in results if tty not in ignore]
return self.term_types
def get_semtypes_list(self, counts=False):
""" Get distinct UMLS semantic types and their occurrence counts."""
sql = "SELECT DISTINCT(STY),count(STY) FROM MRSTY GROUP BY STY"
results = self.conn.query(sql)
return results if counts else zip(*results)[0]
def match_concepts(self, s, source_vocab=[], match_substring=False):
"""Find exact matches to provided string and return CUI set"""
if not match_substring:
sql = "SELECT DISTINCT(CUI) FROM MRCONSO WHERE %s STR='%s'"
else:
sql = "SELECT DISTINCT(CUI) FROM MRCONSO WHERE %s STR LIKE '%s%%'"
# override object default source vocabulary?
if source_vocab:
sab = self._source_vocab_sql(source_vocab)
else:
sab = self._source_vocab_sql(self.source_vocab)
sab = "" if not sab else sab + " AND"
sql = sql % (sab,s)
results = self.conn.query(sql)
return [cui[0] for cui in results]
def dictionary(self, semantic_type, source_vocab=[], cui_dict=False,
include_children=True, exclude_subtrees=[],
term_types=[]):
"""Build dictionary of UMLS entities
Parameters
----------
semantic_type: string
Target UMLS semantic type root
source_vocab: array
Override object source vocabularies (SAB) used for building
lexical variations dictionary.
exclude_subtrees: array
List of subtree root nodes to remove from ontology
cui_dict: boolean
Instead of strings, return dictionary of CUIs
include_children: boolean
Include all child nodes from target semantic type. This should
always remain True
term_types: array
Ignore certain term types (see docs/concept_schema.txt)
"""
tmpl = self._load_sql_tmpl("dictionary.sql")
term_types = term_types if term_types else self.get_tty_list()
network = self.semantic_network.graph("isa")
# exclude subtrees
if include_children:
children = [node for node in nx.bfs_tree(network, semantic_type)]
else:
children = [semantic_type]
if exclude_subtrees:
rm = [nx.bfs_tree(network, subtree_root).nodes() for subtree_root in exclude_subtrees]
children = [node for node in children if node not in reduce(lambda x,y:x+y,rm)]
children = "STY IN ({})".format(",".join(map(lambda x:"'%s'" % x, children)))
# override object default source vocabulary?
if source_vocab:
sab = self._source_vocab_sql(source_vocab)
else:
sab = self._source_vocab_sql(self.source_vocab)
tty = "TTY IN (%s)" % ",".join(map(lambda x:"'%s'" % x, term_types))
sql = tmpl.format(sab,tty,children)
results = self.conn.query(sql)
# collapse to unique strings
if not cui_dict:
vocab = {self.norm.normalize(row[2]):1 for row in results}
if "" in vocab:
del vocab[""]
else:
vocab = {row[0]:1 for row in results}
return vocab.keys()
def concept(self, cui, source_vocab=[]):
"""Build UMLS concept, including abbreviations, synonyms, and preferred
forms."""
if cui in self._concepts:
return self._concepts[cui]
else:
return Concept(cui, self.conn, source_vocab)
def relations_on_cui(self, cui, source_vocab=[]):
"""Return set of relations associated with this concept."""
sql = """SELECT RUI,SL,RELA,CUI1,CUI2 FROM MRREL R
WHERE %s (R.CUI1="%s" OR R.CUI2="%s")
AND RELA!='NULL';"""
# override object default source vocabulary?
if source_vocab:
sab = self._source_vocab_sql(source_vocab)
else:
sab = self._source_vocab_sql(self.source_vocab)
sab = "" if not sab else sab + " AND"
sql = sql % (sab,cui,cui)
results = self.conn.query(sql)
return results
def relations_between_cui(self, cui1, cui2):
"""
Return set of relations between provided concepts
TODO
"""
pass
def relations(self, sty1, sty2, rela, source_vocab=[]):
"""Return set of relations between provided semantic types"""
# collect descendant/child types for each semantic type
network = self.semantic_network.graph("isa")
sty1 = [node for node in nx.bfs_tree(network, sty1)]
sty2 = [node for node in nx.bfs_tree(network, sty2)]
sty1 = " OR ".join(map(lambda x:"STY='%s'" % x, sty1))
sty2 = " OR ".join(map(lambda x:"STY='%s'" % x, sty2))
# override object default source vocabulary?
if source_vocab:
sab = self._source_vocab_sql(source_vocab)
else:
sab = self._source_vocab_sql(self.source_vocab)
sab = "" if not sab else sab + " AND"
sql = """
SELECT DISTINCT CUI2,CUI1 FROM
(SELECT * FROM MRREL WHERE RELA='%s') AS R,
(SELECT L.CUI FROM MRCONSO AS L, MRSTY AS LS WHERE (%s) AND L.CUI=LS.CUI) AS LARG,
(SELECT R.CUI FROM MRCONSO AS R, MRSTY AS RS WHERE (%s) AND R.CUI=RS.CUI) AS RARG
WHERE %s ((LARG.CUI=CUI2) AND (RARG.CUI=CUI1));"""
sql = sql % (rela,sty1,sty2,sab)
results = self.conn.query(sql)
return results
class TextNorm(object):
def __init__(self, function=lambda x:x):
self.function = function
def normalize(self,s):
# custom normalize function
s = self.function(s)
return s
def apply(self,s):
return self.normalize(s)
class MetaNorm(TextNorm):
"""
Normalize UMLS Metathesaurus concept strings.
"""
def __init__(self, function=lambda x:x):
super(MetaNorm, self).__init__(function)
# TTY in [OF,FN] suffixes
suffixes = ['qualifier value', 'life style', 'cell structure',
'context\\-dependent category', 'inactive concept',
'navigational concept', 'lck', 'record artifact',
'core metadata concept', 'substance', 'event',
'organism', 'person', 'attribute', 'procedure',
'tumor staging', 'a', 'cell', 'chloroaniline',
'product', 'specimen', 'observable entity',
'racial group', 'si', 'namespace concept',
'environment', 'social concept', 'ras',
'special concept', 'staging scale', 'disorder',
'geographic location', 'occupation', 'ethnic group',
'body structure', 'situation', 'physical force',
'trans', 'finding', 'epoxymethano', 'linkage concept',
'assessment scale', 'metadata', 'link assertion',
'dithiocarbamates', 'foundation metadata concept',
'morphologic abnormality', 'physical object']
self.of_fn_rgx = "\(({})\)$".format("|".join(suffixes))
def normalize(self,s):
"""Heuristics for stripping non-essential UMLS string clutter"""
s = s.replace("--"," ")
s = re.sub("[(\[<].+[>)\]]$", "", s)
s = re.sub("(\[brand name\]|[,]* NOS)+","", s).strip()
s = s.strip().strip("_").strip(":")
s = re.sub("(\[.{1}\])+","", s).strip()
s = re.sub("\-RETIRED\-$","",s).strip()
# normalize TTY in [OF,FN]
s = re.sub(self.of_fn_rgx,"",s).strip()
# custom normalize function
s = self.function(s)
return s
class Concept(object):
def __init__(self, cui, conn, source_vocab=[]):
self.cui = cui
self.source_vocab = source_vocab
self.conn = conn
# see docs/concept_schema for an explanation of these term type flags
self.ignore_tty ={x:0 for x in ['OAS','OAP','OAF','OAS','FN','OF',
'MTH_OF','MTH_IS','CSN',
'PCE','N1','AUN','IS']}
self.synset = {x:0 for x in ['SY','SYN','SS','VSY','USY','RSY']}
self.abbrvset = {x:0 for x in ['AA','AB','ACR']}
self.term_types = ",".join(["'%s'" % tty for tty in self.ignore_tty])
sql = """SELECT TTY,STR,ISPREF FROM MRCONSO
WHERE CUI='%s' AND TTY NOT IN (%s)"""
sql = sql % (self.cui,self.term_types)
results = self.conn.query(sql)
self._definition = {}
self._preferred, self._terms = {},{}
for row in results:
tty,string,ispref = row
self._terms[string] = tty
def __repr__(self):
return "[{}] {}".format(self.cui, self.preferred_term()[0])
def definition(self,source_vocab=[]):
"""There are often multiple definitions conditioned on source vocabulary."""
source_vocab = self.source_vocab if not source_vocab else source_vocab
sab = "(%s)" % (" OR ".join(["SAB='%s'" % x for x in source_vocab]))
sab = "" if not source_vocab else sab + " AND"
sql = "SELECT DEF FROM MRDEF WHERE %s CUI='%s'" % (sab,self.cui)
results = self.conn.query(sql)
return results
def preferred_term(self):
"""Preferred name. Don't know what this practically translates too since
it isn't unique with concepts, atoms or source ontologies."""
sql = """SELECT STR FROM MRCONSO WHERE CUI='%s' AND
STT='PF' AND ISPREF='Y' AND TS='P';""" % self.cui
results = self.conn.query(sql)
return [x[0] for x in results]
def synonyms(self):
"""UMLS defines several classes of synonymy, use only subset"""
return [s for s in self._terms
if self._terms[s] in self.synset]
def abbrvs(self):
"""Abbreviations and acronyms"""
return [s for s in self._terms
if self._terms[s] in self.abbrvset]
def all_terms(self):
"""All unique terms linked to this concept"""
return list(set(self._terms))
def print_summary(self):
"""Ugly function to print concept object attributes."""
print("-----------------------------")
# use longest string for description
definition = self.definition()
if definition:
definition = sorted(definition,key=lambda x:len(x[0]),reverse=1)[0][0]
else:
definition = "N/A"
fmt = '{0:16} {1:>1}'
print(fmt.format("CUI:",self.cui))
print(fmt.format("Definition:", definition))
print(fmt.format("Preferred Term:", ", ".join(self.preferred_term())))
print(fmt.format("Synonyms:", ", ".join(self.synonyms())))
print(fmt.format("Abbreviations:", ", ".join(self.abbrvs())))
print(fmt.format("TERMS:", ", ".join(self.all_terms()) ))
print("-----------------------------")
if __name__ == "__main__":
meta = Metathesaurus(config=DEFAULT_UMLS_CONFIG)
print meta.concept(cui="C0699142")
|
ddbiolib-master
|
ddbiolib/ontologies/umls/metathesaurus.py
|
'''
UMLS Dictionary
TODO: all dictionaries should be persisted in Snorkel's
eventual "context" ORM interface
@author: jason-fries [at] stanford [dot] edu
'''
import os
import re
import bz2
import sys
import glob
import codecs
import itertools
from functools import partial
from collections import defaultdict
from .metathesaurus import TextNorm
def dict_lf_factory(dictionary, rvalue, name, ignore_case=True):
'''
Dynamically create a labeling function object form a dictionary
'''
def func_template(m):
mention = m.sentence["text"][m.sent_char_start:m.sent_char_end+1]
if ignore_case:
mention = mention.lower()
#mention = " ".join(m.get_attrib_tokens('words')).lower()
# mention = m.sentence["text"][m.sent_char_start:m.sent_char_end+1]
return rvalue if mention in dictionary else 0
func_template.__name__ = name
return func_template
class UmlsDict(object):
def __init__(self, term_type, sem_types=[],
source_vocabs=[], rootdir=None,
ignore_case=False, normalizer=TextNorm()):
module_path = os.path.dirname(__file__)
self.rootdir = rootdir if rootdir else "{}/data/cache/{}/".format(module_path,term_type)
self.term_type = term_type
self.sem_types = [self._norm_sty_name(s) for s in sem_types]
self.source_vocabs = source_vocabs
self.encoding = "utf-8"
self.ignore_case = ignore_case
self._dictionary = self._load_dictionaries()
self.normalizer = normalizer
def _norm_sty_name(self,s):
return s.lower().replace(" ","_")
def _load_dictionaries(self):
'''
Load dictionaries
'''
d = defaultdict(defaultdict)
filelist = glob.glob("{}*.txt.bz2".format(self.rootdir))
for fpath in filelist:
fname = fpath.split("/")[-1].rstrip(".txt.bz2")
i = fname.index(".")
sty,sab = fname[0:i], fname[i+1:].rstrip(".abbrv")
#print sty,sab
# only include specified semantic types and source vocabularies
if self.sem_types and sty not in self.sem_types:
continue
if self.source_vocabs and sab not in self.source_vocabs:
continue
terms = []
with bz2.BZ2File(fpath,"rb") as f:
for line in f:
if not line.strip():
continue
try:
line = line.strip().decode('utf-8')
terms += [line.strip().lower() if self.ignore_case else line.strip()]
except:
print>>sys.stderr,"Warning: unicode conversion error"
if sty in d and sab in d[sty]:
d[sty][sab].update(dict.fromkeys(terms))
else:
d[sty][sab] = dict.fromkeys(terms)
return d
def __iter__(self):
for sty in self._dictionary:
for sab in self._dictionary[sty]:
for term in self._dictionary[sty][sab]:
t = self.normalizer.apply(term)
yield t.lower() if self.ignore_case else t
def __getitem__(self,key):
'''
'''
d = {}
key = self._norm_sty_name(key)
# collapse into single dictionary by semantic type
for sab in self._dictionary[key]:
d.update(self._dictionary[key][sab])
# apply normalization
d = {self.normalizer.apply(term).lower() if self.ignore_case else self.normalizer.apply(term):1 for term in d}
return d
def get_dictionary(self, sem_types=[]):
'''
Collapse into single dictionary
'''
d = []
sem_types = [self._norm_sty_name(s) for s in sem_types]
for sty in self._dictionary:
if sem_types and sty not in sem_types:
continue
for sab in self._dictionary[sty]:
d += self._dictionary[sty][sab].keys()
d = [self.normalizer.apply(term).lower() if self.ignore_case else \
self.normalizer.apply(term) for term in d]
return dict.fromkeys(d)
def get_lfs(self, rvalue_default, sem_types=[],
rvalue_map={}, min_size=1):
'''
Given a dictionary of dictionaries from the UMLS,
create a set of labeling functions.
'''
sem_types = [self._norm_sty_name(s) for s in sem_types]
for sty in self._dictionary:
if sem_types and sty not in sem_types:
continue
for sab in self._dictionary[sty]:
if len(self._dictionary[sty][sab]) <= min_size:
continue
rvalue = rvalue_default if (sty,sab) not in rvalue_map else rvalue_map[(sty,sab)]
label = "pos" if rvalue == 1 else "neg"
prefix = "{}_".format(self.term_type) if self.term_type else ""
func_name = "LF_{}{}_{}_{}".format(prefix,sty,sab,label)
yield dict_lf_factory(self._dictionary[sty][sab], rvalue,
func_name, self.ignore_case)
# OLD
class UmlsDictionary(object):
def __init__(self, term_type="*", sem_types=[],
source_vocabs=[], rootdir=None, ignore_case=False):
'''UMLS dictionary
Load cached dictionary files broken down by semantic type (sty),
source vocabulary (sab), and term type (abbrvs|terms)
Parameters
----------
term_type : str
abbrvs|terms|*, Default * is to load both
sem_types : array, optional
List of semantic types to load. Default is to load all.
source_vocabs : array, optional
Source input vocabularies. Default is to load all.
rootdir : string, optional
Source directory for cached dictionaries
ignore_case : boolean, optional
Lowercase all text if True, default = False
Attributes
----------
Examples
--------
'''
module_path = os.path.dirname(__file__)
self.rootdir = rootdir if rootdir else "{}/data/cache/{}/".format(module_path,term_type)
self.term_type = term_type
self.sem_types = [self._norm_sty_name(s) for s in sem_types]
self.source_vocabs = source_vocabs
self.encoding = "utf-8"
self.ignore_case = ignore_case
self._dictionary = self._load_dictionaries()
def _norm_sty_name(self,s):
return s.lower().replace(" ","_")
def _load_dictionaries(self):
'''Load dictionaries'''
d = defaultdict(defaultdict)
filelist = glob.glob("{}*.txt.bz2".format(self.rootdir))
for fpath in filelist:
fname = fpath.split("/")[-1].rstrip(".txt.bz2")
i = fname.index(".")
sty,sab = fname[0:i], fname[i+1:].rstrip(".abbrv")
# only include specified semantic types and source vocabularies
if self.sem_types and sty not in self.sem_types:
continue
if self.source_vocabs and sab not in self.source_vocabs:
continue
terms = []
with bz2.BZ2File(fpath,"rb") as f:
for line in f:
if not line.strip():
continue
try:
line = line.strip().decode('utf-8')
terms += [line.strip().lower() if self.ignore_case else line.strip()]
except:
print>>sys.stderr,"Warning: unicode conversion error"
if sty in d and sab in d[sty]:
d[sty][sab].update(dict.fromkeys(terms))
else:
d[sty][sab] = dict.fromkeys(terms)
return d
def get_sem_types(self,term):
'''Return all matching semantic types for this term '''
stys = {}
for sty in self._dictionary:
for sab in self._dictionary[sty]:
if term in self._dictionary[sty][sab]:
stys[sty] = stys.get(sty,0) + 1
return stys
def coverage(self,terms,ignore_case=True):
'''Score a list of terms by source dictionary coverage. We're
not doing a set cover optimization, just returning a ranked list
of percent covered by dictionary sty/sab'''
scores = {}
terms = [t.lower() if ignore_case else t for t in terms]
for sty in self._dictionary:
for sab in self._dictionary[sty]:
#dictionary = [t.lower() if ignore_case else t for t in self._dictionary[sty][sab]]
dictionary = self._dictionary[sty][sab]
intersection = sum([1 for t in terms if t in dictionary])
if intersection > 0:
scores[(sty,sab)] = intersection / float(len(terms))
return sorted(scores.items(),key=lambda x:x[1], reverse=1)
def get_dictionary(self):
'''Collapse into single dictionary'''
# normalize semantic type names
d = [[self._dictionary[sty][sab].keys() for sab in self._dictionary[sty]] for sty in self._dictionary]
d = map(lambda x:list(itertools.chain.from_iterable(x)),d)
return dict.fromkeys(itertools.chain.from_iterable(d))
if __name__ == "__main__":
abbrvs = UmlsDictionary("abbrvs",sem_types=["Disease or Syndrome"])
terms = UmlsDictionary("terms",sem_types=["Disease or Syndrome"],
source_vocabs=['SNOMEDCT_US'])
d = terms.get_dictionary()
print len(d)
|
ddbiolib-master
|
ddbiolib/ontologies/umls/dictionary.py
|
import os
import sys
import glob
import codecs
import subprocess
from collections import namedtuple
from ..utils import download
from ..corpora import Corpus,Document,DocParser
from ..parsers import PickleSerializedParser
class NcbiDiseaseParser(DocParser):
'''
The NCBI disease corpus is fully annotated at the mention and concept level
to serve as a research resource for the biomedical natural language processing
community. -- from http://www.ncbi.nlm.nih.gov/CBBresearch/Dogan/DISEASE/
793 PubMed abstracts
6,892 disease mentions
790 unique disease concepts
'''
def __init__(self, inputpath=None):
super(NcbiDiseaseParser, self).__init__(inputpath, "utf-8")
if not inputpath:
self.inputpath = "{}/data/ncbi_disease_corpus/".format(os.path.dirname(__file__))
else:
self.inputpath = inputpath
self._docs = {}
self._download()
self._preload()
def _download(self):
'''If corpus files aren't available, automatically download them'''
url = "http://www.ncbi.nlm.nih.gov/CBBresearch/Dogan/DISEASE/"
filelist = ["NCBItrainset_corpus.zip","NCBItestset_corpus.zip","NCBIdevelopset_corpus.zip"]
for fname in filelist:
outfname = "{}{}".format(self.inputpath,fname)
if os.path.exists(outfname):
continue
print("Downloading NCBI Disease Corpus dataset [{}]...".format(os.path.basename(outfname)))
download(url+fname,outfname)
cwd = os.getcwd()
os.chdir(self.inputpath)
subprocess.call(["unzip", outfname])
os.chdir(cwd)
def _preload(self):
'''Load corpus into memory'''
Annotation = namedtuple('Annotation', ['text_type','start','end','text','mention_type'])
# holdout set definitions
cvdefs = {"NCBIdevelopset_corpus.txt":"development",
"NCBItestset_corpus.txt":"testing",
"NCBItrainset_corpus.txt":"training"}
filelist = glob.glob("%s/*.txt" % self.inputpath)
for fname in filelist:
setname = cvdefs[fname.split("/")[-1]]
documents = []
with codecs.open(fname,"rU",self.encoding) as f:
doc = []
for line in f:
row = line.strip()
if not row and doc:
documents += [doc]
doc = []
elif row:
row = row.split("|") if (len(row.split("|")) > 1 and
row.split("|")[1] in ["t","a"]) else row.split("\t")
doc += [row]
documents += [doc]
for doc in documents:
pmid,title,abstract = doc[0][0],doc[0][2],doc[1][2]
text = "%s %s" % (title, abstract)
attributes = {"set":setname,"title":title,"abstract":abstract}
attributes["annotations"] = []
# load annotation tuples
for row in doc[2:]:
pmid, start, end, mention, mention_type, duid = row
start,end = int(start),int(end)
text_type = "T" if end <= len(title) else "A"
label = Annotation(text_type, start, end, mention, mention_type)
attributes["annotations"] += [label]
# warning if PMID is already loaded
if pmid in self._docs:
print >> sys.stderr, "WARNING: Duplicate PMID {} found".format(pmid)
doc = Document(pmid,text,attributes=attributes)
self._docs[pmid] = doc
def __getitem__(self,key):
return self._docs[key]
def _load(self, filename):
for pmid in self._docs:
yield self._docs[pmid]
def load_corpus(parser):
'''Load NCBI Disease Corpus
'''
# init cache directory and parsers
cache_dir = "{}/data/ncbi_disease_corpus/cache/".format(os.path.dirname(__file__))
doc_parser = NcbiDiseaseParser()
text_parser = PickleSerializedParser(parser,rootdir=cache_dir)
# create cross-validation set information
attributes = {"sets":{"testing":[],"training":[],"development":[]}}
for pmid in doc_parser._docs:
setname = doc_parser._docs[pmid].attributes["set"]
attributes["sets"][setname] += [pmid]
return Corpus(doc_parser,text_parser,attributes)
|
ddbiolib-master
|
ddbiolib/datasets/ncbi_disease.py
|
from .ncbi_disease import *
from .ncbi_legacy import *
|
ddbiolib-master
|
ddbiolib/datasets/__init__.py
|
import os
import sys
import glob
import codecs
import subprocess
from collections import namedtuple
from ..utils import download
from ..corpora import Corpus,Document,DocParser
from ..parsers import PickleSerializedParser
class CdrParser(DocParser):
'''
The CDR disease corpus
-- f
1500 PubMed abstracts
X disease mentions
X chemical mentions
'''
def __init__(self, inputpath=None, entity_type="Disease"):
super(CdrParser, self).__init__(inputpath, "utf-8")
if not inputpath:
self.inputpath = "{}/data/CDR.Corpus.v010516/".format(os.path.dirname(__file__))
else:
self.inputpath = inputpath
self._docs = {}
# download CDR data
if not os.path.exists(self.inputpath):
self._download()
self._preload(entity_type)
def _download(self):
print>>sys.stderr,"CDR files require a Biocreative account. See http://www.biocreative.org/accounts/register/"
def _preload(self, et):
'''Load entire corpus into memory'''
Annotation = namedtuple('Annotation', ['text_type','start','end','text','mention_type'])
cvdefs = {"CDR_DevelopmentSet.PubTator.txt":"development",
"CDR_TestSet.PubTator.txt":"testing",
"CDR_TrainingSet.PubTator.txt":"training"}
filelist = glob.glob("%s/*.txt" % self.inputpath)
for fname in filelist:
setname = cvdefs[fname.split("/")[-1]]
documents = []
with codecs.open(fname,"rU",self.encoding) as f:
doc = []
for line in f:
row = line.strip()
if not row and doc:
documents += [doc]
doc = []
elif row:
row = row.split("|") if (len(row.split("|")) > 1 and
row.split("|")[1] in ["t","a"]) else row.split("\t")
doc += [row]
if doc:
documents += [doc]
for doc in documents:
pmid,title,abstract = doc[0][0],doc[0][2],doc[1][2]
text = "%s %s" % (title, abstract)
attributes = {"set":setname,"title":title,"abstract":abstract}
attributes["annotations"] = []
# load annotation tuples
for row in doc[2:]:
# relation
# ----------------------------
if len(row) <= 4:# or row[4] == "Chemical":
pmid,rela,m1,m2 = row
continue
# entity
# ----------------------------
if len(row) == 6:
pmid, start, end, mention, mention_type, duid = row
norm_names = []
elif len(row) == 7:
pmid, start, end, mention, mention_type, duid, norm_names = row
duid = duid.split("|")
start,end = int(start),int(end)
text_type = "T" if end <= len(title) else "A"
if mention_type != et:
continue
label = Annotation(text_type, start, end, mention, mention_type)
attributes["annotations"] += [label]
doc = Document(pmid,text,attributes=attributes)
self._docs[pmid] = doc
def __getitem__(self,key):
return self._docs[key]
def _load(self, filename):
for pmid in self._docs:
yield self._docs[pmid]
def load_corpus(parser,entity_type="Disease"):
'''Load CDR Disease Corpus
'''
# init cache directory and parsers
cache_dir = "{}/data/CDR.Corpus.v010516/cache/".format(os.path.dirname(__file__))
doc_parser = CdrParser(entity_type=entity_type)
text_parser = PickleSerializedParser(parser,rootdir=cache_dir)
# create cross-validation set information
attributes = {"sets":{"testing":[],"training":[],"development":[]}}
for pmid in doc_parser._docs:
setname = doc_parser._docs[pmid].attributes["set"]
attributes["sets"][setname]+= [pmid]
return Corpus(doc_parser,text_parser,attributes)
|
ddbiolib-master
|
ddbiolib/datasets/cdr.py
|
'''
DEPRICATED
Include only for backwards compatibility with TACL experiments
'''
import os
import re
import sys
import glob
import codecs
import cPickle
import operator
import itertools
import numpy as np
from collections import namedtuple
Annotation = namedtuple('Annotation', ['text_type','start','end','text','mention_type'])
CdrAnnotation = namedtuple('Annotation', ['text_type','start','end','text','mention_type', "mesh_ids", "mesh_names"])
Document = namedtuple('Document',['doc_id','title','body','sentences'])
class Corpus(object):
def __init__(self, path, parser, encoding="utf-8"):
self.path = path
self.parser = parser
self.encoding = encoding
def _get_files(self):
if os.path.isfile(self.path):
return [self.path]
elif os.path.isdir(self.path):
return [os.path.join(self.path, f) for f in os.listdir(self.path)]
else:
return glob.glob(self.path)
class PlainTextCorpus(Corpus):
def __init__(self, path, parser, cache_path=None):
'''
PubMed abstracts corpus. File format assumed to be
PMID title body
'''
super(PlainTextCorpus, self).__init__(path, parser)
self.documents = {}
self._load_files()
self.cache_path = cache_path
def __iter__(self):
for pmid in self.documents:
yield self.__getitem__(pmid)
def __getitem__(self, uid):
pkl_file = "%s/%s.pkl" % (self.cache_path, uid)
# load cached parse if it exists
if os.path.exists(pkl_file):
with open(pkl_file, 'rb') as f:
self.documents[uid] = cPickle.load(f)
else:
sentences = [s for s in self.parser.parse(self.documents[uid]["text"])]
self.documents[uid]["sentences"] = sentences
with open(pkl_file, 'w+') as f:
cPickle.dump(self.documents[uid], f)
return self.documents[uid]
def _load_files(self):
filelist = glob.glob("{}/*.txt".format(self.path))
for fname in filelist:
uid = fname.split("/").rstrip(".txt")
self.documents[uid] = {}
self.documents[uid]["text"] = "".join(codecs.open(self.path,"r").readlines())
def overlaps(a,b):
return len(set(a).intersection(b)) != 0
def unescape_penn_treebank(words):
'''Replace PennTreeBank tags and other CoreNLP modifications. This is
pretty much a hack. '''
repl = dict([('-LRB-',u'('), ('-RRB-',u')'), ('-LCB-',u'{'), ('-RCB-',u'}'),("`",u"'"),
('-LSB-',u'['),('-RSB-',u']')]) #,("``",'"'),("''",'"')
words = [repl[w] if w in repl else w for w in words]
# deal with quotation marks
rm = False
for i in range(0,len(words)):
if words[i] == "``":
rm = True
words[i] = '"'
if rm and words[i] == "''":
words[i] = '"'
rm = False
return words
class NcbiDiseaseCorpus(Corpus):
'''The NCBI disease corpus is fully annotated at the mention and concept level
to serve as a research resource for the biomedical natural language processing
community.
-- from http://www.ncbi.nlm.nih.gov/CBBresearch/Dogan/DISEASE/
793 PubMed abstracts
6,892 disease mentions
790 unique disease concepts
'''
def __init__(self, path, parser, cache_path="/tmp/"):
super(NcbiDiseaseCorpus, self).__init__(path, parser)
self.path = path
self.cv = {"training":{},"development":{},"testing":{}}
self.documents = {}
self.annotations = {}
self._load_files()
self.cache_path = cache_path
def __getitem__(self,pmid):
"""Use PMID as key and load parsed document object"""
pkl_file = "%s/%s.pkl" % (self.cache_path, pmid)
# load cached parse if it exists
if os.path.exists(pkl_file):
with open(pkl_file, 'rb') as f:
self.documents[pmid] = cPickle.load(f)
else:
self.documents[pmid]["title"] = self.documents[pmid]["title"]
self.documents[pmid]["body"] = self.documents[pmid]["body"]
# align gold annotations
title = self.documents[pmid]["title"]
body = self.documents[pmid]["body"]
doc_str = "%s %s" % (title,body)
self.documents[pmid]["sentences"] = [s for s in self.parser.parse(doc_str,doc_id=pmid)]
self.documents[pmid]["tags"] = []
if pmid in self.annotations:
self.documents[pmid]["tags"] = self._label(self.annotations[pmid],self.documents[pmid]["sentences"])
else:
self.documents[pmid]["tags"] += [[] for _ in range(len(self.documents[pmid]["sentences"]))]
with open(pkl_file, 'w+') as f:
cPickle.dump(self.documents[pmid], f)
return self.documents[pmid]
def _ground_truth(self,doc_ids):
'''Build ground truth (doc_id,sent_id,char_offset) mentions'''
ground_truth = []
for pmid in doc_ids:
doc = self.__getitem__(pmid)["sentences"]
labels = self.__getitem__(pmid)["tags"]
for sent_id in range(0,len(doc)):
for tag in labels[sent_id]:
# assess ground truth on token
label = tag[0] # gold standard annotation text
span = tag[-1]
char_idx = doc[sent_id].char_offsets[span[0]]
char_span = tuple([char_idx, char_idx+len(label)])
ground_truth += [(pmid, sent_id, tuple(range(*span)), char_span, label.replace(" ",""))]
#ground_truth += [(pmid, sent_id, tuple(range(*span)),label.replace(" ",""))]
return ground_truth
def gold_labels(self,candidates):
'''Given a set of candidates, generate -1,1 labels
using internal gold label data'''
doc_ids = {c.doc_id:1 for c in candidates}
true_labels = set(self._ground_truth(doc_ids))
gold = [0] * len(candidates)
for idx,c in enumerate(candidates):
text = "".join([c.words[i] for i in c.idxs])
char_span = [c.char_offsets[i] for i in c.idxs]
char_span = (char_span[0], char_span[-1] + len(c.words[c.idxs[-1]]))
char_span = tuple(char_span)
mention = (c.doc_id, c.sent_id, tuple(c.idxs), char_span, text)
#mention = (c.doc_id, c.sent_id, tuple(c.idxs), text)
gold[idx] = 1 if mention in true_labels else -1
return np.array(gold)
def score(self, candidates, prediction, doc_ids=None):
'''Given a set of candidates, compute true precision, recall, f1
using gold labeled benchmark data (this includes non-candidate entities,
which aren't captured by ddlite metrics). If holdout (a list of
document PMIDs) is provided, us that as the document collection for scoring.
'''
print "Candidates N:{}".format(len(candidates))
# create doc set from candidate pool or a provided doc_id set
doc_ids = {c.doc_id:1 for c in candidates} if not doc_ids else dict.fromkeys(doc_ids)
# compute original document character offsets for each mention
mentions = {}
for i,c in enumerate(candidates):
if c.doc_id not in doc_ids:
continue
if prediction[i] != 1:
continue
mentions[self.getkey(c)] = 1
# score
mentions = set(mentions.keys())
true_labels = set(self._ground_truth(doc_ids))
tp = true_labels.intersection(mentions)
fp = mentions.difference(tp)
fn = true_labels.difference(tp)
print "-----------------------------"
print "TP:{} FP:{} FN:{} True_N:{}".format(len(tp),len(fp),len(fn),len(true_labels))
print "-----------------------------"
r = len(tp) / float(len(true_labels))
p = len(tp) / float(len(tp) + len(fp))
f1 = 2.0 * (p * r) / (p + r)
return {"precision":p, "recall":r,"f1":f1,
"tp":len(tp), "fp":len(fp), "fn":len(fn)}
def classification_errors(self, candidates, prediction, doc_ids=None):
# create doc set from candidate pool or a provided doc_id set
doc_ids = {c.doc_id:1 for c in candidates} if not doc_ids else dict.fromkeys(doc_ids)
# compute original document character offsets for each mention
mentions = {}
for i,c in enumerate(candidates):
if c.doc_id not in doc_ids:
continue
if prediction[i] != 1:
continue
mentions[self.getkey(c)] = 1
#score
mentions = set(mentions.keys())
true_labels = set(self._ground_truth(doc_ids))
tp = true_labels.intersection(mentions)
fp = mentions.difference(tp)
fn = true_labels.difference(tp)
return (tp,fp,fn)
def _label_index(self,doc_ids):
'''Ground truth annotation index'''
label_idx = {}
for pmid in doc_ids:
label_idx[pmid] = {}
doc = self.__getitem__(pmid)
for sentence,tags in zip(doc["sentences"],doc["tags"]):
if sentence.sent_id not in label_idx[pmid]:
label_idx[pmid][sentence.sent_id] = {}
for text,offset in tags:
label_idx[pmid][sentence.sent_id][offset] = text
return label_idx
def _candidate_index(self,candidates):
candidate_idx = {}
for i,c in enumerate(candidates):
if c.doc_id not in candidate_idx:
candidate_idx[c.doc_id] = {}
if c.sent_id not in candidate_idx[c.doc_id]:
candidate_idx[c.doc_id][c.sent_id] = {}
span = (min(c.idxs),max(c.idxs)+1)
candidate_idx[c.doc_id][c.sent_id][span] = c
return candidate_idx
def match(self, label, candidates, c_index=None, partial=True):
''' '''
m = []
c_index = self._candidate_index(candidates) if not c_index else c_index
doc_id,sent_id,idxs,_,_ = label
if doc_id in c_index and sent_id in c_index[doc_id]:
lspan = (min(idxs),max(idxs)+1)
if lspan in c_index[doc_id][sent_id]:
m += [c_index[doc_id][sent_id][lspan]]
if partial:
for cspan in c_index[doc_id][sent_id]:
if overlaps(range(*lspan),range(*cspan)) and lspan!=cspan:
m += [c_index[doc_id][sent_id][cspan]]
return m
def getkey(self,c):
'''Generate unique key for mention so that our evaluation
can use set semantics'''
mention = unescape_penn_treebank(c.mention())
txt = " ".join(mention)
char_span = [c.char_offsets[i] for i in c.idxs]
#char_span = (min(char_span),min(char_span)+len(txt))
char_span = (min(char_span), max(char_span) + len(mention[-1]))
return (c.doc_id, c.sent_id, tuple(c.idxs), char_span, "".join(mention))
def force_longest_match(self, candidates, probability, doc_ids=None):
'''Only use longest correct match for any set of overlapping or
adjoining mentions'''
c_index = self._candidate_index(candidates)
true_labels = set(self._ground_truth(doc_ids))
pred_idx = {self.getkey(c):i for i,c in enumerate(candidates)}
mapping = {}
for label in true_labels:
mapping[label] = self.match(label,candidates,c_index)
for label in mapping:
lengths = [len(c.mention()) for c in mapping[label]]
proba = [probability[pred_idx[self.getkey(c)]] for c in mapping[label]]
scores = zip(proba,lengths,mapping[label])
scores = [c for p,l,c, in sorted(zip(proba,lengths,mapping[label]),reverse=1) if p > 0.499]
if not scores:
continue
mapping[label].remove(scores[0])
probability[pred_idx[self.getkey(scores[0])]] = 1
for c in mapping[label]:
probability[pred_idx[self.getkey(c)]] = -1
def error_analysis_v1(self, candidates, prediction, doc_ids=None):
c_index = self._candidate_index(candidates)
true_labels = set(self._ground_truth(doc_ids))
mapping,claimed = {},{}
for label in true_labels:
# NOTE: candidates can touch multiple labels
mapping[label] = self.match(label,candidates,c_index)
claimed.update({self.getkey(c):1 for c in mapping[label]})
#pred_idx = {self.getkey(c):prediction[i] for i,c in enumerate(candidates)}
def error_analysis(self, candidates, prediction, doc_ids=None):
c_index = self._candidate_index(candidates)
l_index = self._label_index(doc_ids)
true_labels = set(self._ground_truth(doc_ids))
mapping,claimed = {},{}
for label in true_labels:
# NOTE: candidates can touch multiple labels
mapping[label] = self.match(label,candidates,c_index)
claimed.update({self.getkey(c):1 for c in mapping[label]})
partial,complete = [],[]
for label in true_labels:
matches = self.match(label, candidates, c_index)
mentions = [" ".join(c.mention()) for c in mapping[label]]
# true label
doc_id,sent_id,idxs,char_span,_ = label
span = (min(idxs),max(idxs)+1)
mtext = l_index[doc_id][sent_id][span]
# partial match
if mtext not in mentions and len(matches) != 0:
partial += [list(label)[0:-1] + [mtext]]
# missed entirely
elif mtext not in mentions:
complete += [list(label)[0:-1] + [mtext]]
return (partial,complete)
'''
print "-----------------------------------"
print "FN: Partial Matches"
print "-----------------------------------"
print len(partial)
for item in partial:
print item
print "-----------------------------------"
print "FN: Complete Misses"
print "-----------------------------------"
print len(complete)
for item in complete:
print item
'''
def conll(self,doc_ids):
'''Export docs to CoNLL format'''
outstr = []
for doc_id in doc_ids:
doc = self.__getitem__(doc_id)
tagged = zip(doc["sentences"], doc["tags"])
for sentence,labels in tagged:
# create label index
idx = {}
for term,(i,j) in labels:
if i not in idx:
idx[i] = {}
idx[i][term] = j
# fix overlapping gold entity spans (due to tokenziation errors)
words = sentence.words
tags = [u'O'] * len(words)
for i in idx:
for label in idx[i]:
bio2 = [u"<B-DISEASE>"]
bio2 += [u"<I-DISEASE>"] * (len(label.split())-1)
tags[i:idx[i][label]] = bio2
s = zip(words,tags)
for word,tag in s:
outstr += [u"{} {}".format(word,tag)]
outstr += [u""]
return "\n".join(outstr)
def _label(self,annotations,sentences):
'''Convert annotations from NCBI offsets to parsed token offsets.
NOTE: This isn't perfect, since tokenization can fail to correctly split
some tags.
'''
tags = [[] for i,_ in enumerate(sentences)]
sents = {min(sent.char_offsets):sent for sent in sentences}
sent_offsets = sorted(sents)
for label in annotations:
# find target sentence
for i in range(len(sent_offsets)):
start = sent_offsets[i]
end = sents[start].char_offsets[-1] + 1
# determine span match (assume potentially overlapping spans)
if label.start >= start and label.start <= end:
span = [label.start, label.start + len(label.text)]
idx = len(sents[start].words)-1
for j in range(0,len(sents[start].words)-1):
if span[0] >= sents[start].char_offsets[j] and span[0] < sents[start].char_offsets[j+1]:
idx = j
break
s_start = idx
s_end = len(sents[start].words)
for j in range(idx,len(sents[start].words)):
if span[1] > sents[start].char_offsets[j]:
s_end = j + 1
else:
break
tags[i] += [ (label.text, (s_start,s_end)) ]
return tags
def __iter__(self):
for pmid in self.documents:
yield self.__getitem__(pmid)
def _load_files(self):
'''
'''
cvdefs = {"NCBIdevelopset_corpus.txt":"development",
"NCBItestset_corpus.txt":"testing",
"NCBItrainset_corpus.txt":"training"}
filelist = glob.glob("%s/*.txt" % self.path)
for fname in filelist:
setname = cvdefs[fname.split("/")[-1]]
documents = []
with codecs.open(fname,"rU",self.encoding) as f:
doc = []
for line in f:
row = line.strip()
if not row and doc:
documents += [doc]
doc = []
elif row:
row = row.split("|") if (len(row.split("|")) > 1 and row.split("|")[1] in ["t","a"]) else row.split("\t")
doc += [row]
documents += [doc]
for doc in documents:
pmid,title,body = doc[0][0],doc[0][2],doc[1][2]
if pmid in self.documents:
print "Warning: duplicate {} PMID {}".format(setname,pmid)
self.cv[setname][pmid] = 1
self.documents[pmid] = {"title":title,"body":body}
doc_str = "%s %s" % (title, body)
for row in doc[2:]:
pmid, start, end, text, mention_type, duid = row
start = int(start)
end = int(end)
# title or abstract mention?
text_type = "T" if end <= len(title) else "A"
if pmid not in self.annotations:
self.annotations[pmid] = []
label = Annotation(text_type, start, end, text, mention_type)
self.annotations[pmid] += [label]
# validate there are no duplicate annotations
labels = [ map(lambda x:(pmid,x), self.annotations[pmid]) for pmid in self.cv[setname]]
labels = list(itertools.chain.from_iterable(labels))
print setname,len(labels)
|
ddbiolib-master
|
ddbiolib/datasets/ncbi_legacy.py
|
ddbiolib-master
|
ddbiolib/datasets/chemdner.py
|
|
import psycopg2
import mysql.connector
class DatabaseI(object):
'''Simple database wrapper. This mostly mirrors psycopg2 / mysql.connector
functionality with some assurances built in for closing connections
upon object destruction.
TODO: check if this is actually required'''
def __init__(self, host, user, database, password="", encoding='latin1'):
self.host = host
self.user = user
self.password = password
self.database = database
self.encoding = encoding
self.conn = None
def connect(self):
raise NotImplementedError()
def closed(self):
return False
def cursor(self):
raise NotImplementedError()
def query(self, sql):
raise NotImplementedError()
def __enter__(self):
return self
def __exit__(self, _type, value, traceback):
pass
def __del__(self):
pass
class PostgresSqlConn(DatabaseI):
def connect(self):
'''Generate a connection to a PostgreSQL database.
'''
conn_string = "host='%s' database='%s'" % (self.host, self.database)
self.conn = psycopg2.connect(conn_string)
self.conn.set_client_encoding(self.encoding)
def closed(self):
return self.conn.closed
def cursor(self):
return self.conn.cursor()
def __exit__(self, _type, value, traceback):
if not self.closed():
self.conn.close()
def __del__(self):
if not self.closed():
self.conn.close()
class MySqlConn(DatabaseI):
def connect(self):
'''Generate a connection to a MySQL database.
'''
#self.conn.set_client_encoding(self.encoding)
self.conn = mysql.connector.connect(user=self.user, host=self.host,
password=self.password,
database=self.database)
def closed(self):
'''TODO -- don't think this function is correct'''
#print(self.conn)
#for x in self.conn.__dict__:
# print x
return self.conn
def cursor(self):
return self.conn.cursor()
def query(self, sql):
cursor = self.conn.cursor()
cursor.execute(sql)
return [row for row in cursor.fetchall()]
def __exit__(self, _type, value, traceback):
if not self.closed():
print("exiting")
self.conn.close()
def __del__(self):
if not self.closed():
self.conn.close()
|
ddbiolib-master
|
ddbiolib/utils/database.py
|
from .base import *
from .database import *
|
ddbiolib-master
|
ddbiolib/utils/__init__.py
|
import os
from urllib2 import urlopen, URLError, HTTPError
def download(url,outfname):
try:
data = urlopen(url)
with open(outfname, "wb") as f:
f.write(data.read())
except HTTPError, e:
print "HTTP Error:", e.code, url
except URLError, e:
print "URL Error:", e.reason, url
def unescape_penn_treebank(words):
'''Replace PennTreeBank tags and other CoreNLP modifications. This is
pretty much a hack. '''
repl = dict([('-LRB-',u'('), ('-RRB-',u')'), ('-LCB-',u'{'), ('-RCB-',u'}'),("`",u"'"),
('-LSB-',u'['),('-RSB-',u']')]) #,("``",'"'),("''",'"')
words = [repl[w] if w in repl else w for w in words]
# deal with quotation marks
rm = False
for i in range(0,len(words)):
if words[i] == "``":
rm = True
words[i] = '"'
if rm and words[i] == "''":
words[i] = '"'
rm = False
return words
|
ddbiolib-master
|
ddbiolib/utils/base.py
|
#from .base import *from .base import *z
from .base_snorkel import *
|
ddbiolib-master
|
ddbiolib/versioning/__init__.py
|
'''
Snorkel Candidate Version
'''
import os
import sys
import glob
import hashlib
import cPickle
from datetime import datetime
def dict2str(d):
'''Convert dictionary to tuple pair string'''
return str(d).encode("utf-8",errors="ignore")
def checksum(s):
'''Create checksum for input object'''
if type(s) is dict:
s = dict2str(s)
elif type(s) in [list,tuple]:
s = "|".join(sorted(list(s)))
m = hashlib.md5()
m.update(s)
return m.hexdigest()
def cands2str(candidates):
'''Convert DeepDive Relations object to string'''
convert = lambda x:x.encode("utf-8",errors="ignore")
rela_func = lambda x:["{}:{}".format(x.sentence["doc_id"], x.sentence["sent_id"])] + map(convert,x.mention1("words")) + map(convert,x.mention2("words"))
entity_func = lambda x:["{}:{}".format(x.sentence["doc_id"], x.sentence["sent_id"])] + map(convert,x.get_span())
get_row = rela_func if str(type(candidates)) == "Relations" else entity_func
# create string versions of candidates
s = [":".join(get_row(c)) for c in candidates]
return "|".join(sorted(s))
class CandidateVersioner(object):
'''Create unique version ID for candidate set while saving to disk'''
def __init__(self, rootdir, prefix="", dicts={}):
self.rootdir = rootdir
self.prefix = prefix
self.dicts = dicts
self._candidates = {}
self.filename = None
self.checksum = None
def snapshot(self, name, candidates):
self._candidates[name] = candidates
def save(self):
'''Save checksummed version of candidate set. This computes
checksums based on dictionaries, input documents, and final
candidate set'''
candidates = reduce(lambda x,y:x+y, self._candidates.values())
manifest = self._checksums(candidates, self.dicts)
# dump candidates and log file
ctype = "RELATIONS." if str(type(self._candidates)) == "Relations" else "ENTITIES."
prefix = self.prefix + "." if self.prefix else ""
self.filename = "{}/{}{}{}".format(self.rootdir,prefix,ctype,manifest["uid"])
cPickle.dump(self._candidates, open("{}.pkl".format(self.filename),"w"))
self._write_log(self.filename,manifest)
self.checksum = manifest["uid"]
def load(self,checksum):
filelist = glob.glob("{}*{}.pkl".format(self.rootdir,checksum))
if len(filelist) > 1:
print>>sys.stderr,"Warning: multiple matching checksums"
elif not len(filelist):
print>>sys.stderr,"Error: snapshot not found"
return {}
fname = filelist[0]
self._candidates = cPickle.load(open(fname,"rb"))
self.filename = fname.strip(".pkl")
self.checksum = checksum
return self._candidates
def _checksums(self, candidates, dicts):
'''Compute MD5 checksums for all assets used to
create this candidate set'''
manifest = {}
# dictionary checksums
for name,d in dicts.items():
manifest["dictionary:{}".format(name)] = checksum(d)
# doc and candidate checksum
doc_ids = sorted(set([c.sentence["doc_id"] for c in candidates]))
manifest["doc_ids"] = checksum(doc_ids)
manifest["candidates"] = checksum(cands2str(candidates))
# some count data about candidates
manifest["num_docs"] = len(doc_ids)
manifest["num_candidates"] = len(candidates)
# create unique checksum ID
_,values = zip(*sorted(manifest.items()))
values = map(str,values)
manifest["uid"] = checksum(reduce(lambda x,y:x+y,values))
return manifest
def _write_log(self,filename,manifest):
# write checksums to text file
ts = datetime.now()
outfile = "{}.checksums".format(filename)
with open(outfile,'w') as f:
f.write("{0:<22}{1:^11}{2:<32}\n".format("ts","=",str(ts)))
for key,value in sorted(manifest.items()):
f.write("{0:<22}{1:^11}{2:<32}\n".format(key,"=",value))
|
ddbiolib-master
|
ddbiolib/versioning/base_snorkel.py
|
import os
import random
import hashlib
from datetime import datetime
from ddlite import *
def dict2str(d):
'''Convert dictionary to tuple pair string'''
return str(d).encode("utf-8",errors="ignore")
def checksum(s):
'''Create checksum for input object'''
if type(s) is dict:
s = dict2str(s)
elif type(s) in [list,tuple]:
s = "|".join(sorted(list(s)))
m = hashlib.md5()
m.update(s)
return m.hexdigest()
def cands2str(candidates):
'''Convert DeepDive Relations object to string'''
convert = lambda x:x.encode("utf-8",errors="ignore")
rela_func = lambda x:["{}:{}".format(x.doc_id, x.sent_id)] + map(convert,x.mention1("words")) + map(convert,x.mention2("words"))
entity_func = lambda x:["{}:{}".format(x.doc_id, x.sent_id)] + map(convert,x.mention("words"))
get_row = rela_func if type(candidates) is Relations else entity_func
# create string versions of candidates
s = [":".join(get_row(c)) for c in candidates]
return "|".join(sorted(s))
class CandidateVersioner(object):
'''Create unique version ID for candidate set while saving to disk'''
def __init__(self,rootdir,prefix=""):
self.rootdir = rootdir
self.prefix = prefix
self.filename = None
def dump_candidates(self, candidates, dicts):
'''Save checksummed version of candidate set. This computes
checksums based on dictionaries, input documents, and final
candidate set'''
manifest = self._checksums(candidates, dicts)
# dump candidates and log file
ctype = "RELATIONS." if type(candidates) is Relations else "ENTITIES."
prefix = self.prefix + "." if self.prefix else ""
self.filename = "{}/{}{}{}".format(self.rootdir,prefix,ctype,manifest["uid"])
candidates.dump_candidates("{}.pkl".format(self.filename))
self._write_log(self.filename,manifest)
def _checksums(self, candidates, dicts):
'''Compute MD5 checksums for all assets used to
create this candidate set'''
manifest = {}
# dictionary checksums
for name,d in dicts.items():
manifest["dictionary:{}".format(name)] = checksum(d)
# doc and candidate checksum
doc_ids = sorted(set([c.doc_id for c in candidates]))
manifest["doc_ids"] = checksum(doc_ids)
manifest["candidates"] = checksum(cands2str(candidates))
# some count data about candidates
manifest["num_docs"] = len(doc_ids)
manifest["num_candidates"] = len(candidates)
# create unique checksum ID
_,values = zip(*sorted(manifest.items()))
values = map(str,values)
manifest["uid"] = checksum(reduce(lambda x,y:x+y,values))
return manifest
def _write_log(self,filename,manifest):
# write checksums to text file
ts = datetime.now()
outfile = "{}.checksums".format(filename)
with open(outfile,'w') as f:
f.write("{0:<22}{1:^11}{2:<32}\n".format("ts","=",str(ts)))
for key,value in sorted(manifest.items()):
f.write("{0:<22}{1:^11}{2:<32}\n".format(key,"=",value))
|
ddbiolib-master
|
ddbiolib/versioning/base.py
|
from .base import *
from .doc_parsers import *
from .utils import *
|
ddbiolib-master
|
ddbiolib/corpora/__init__.py
|
import re
# Originally from http://effbot.org/zone/unicode-gremlins.htm
# Replaced definitions to conform to:
# ftp://ftp.unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1250.TXT
# http://www.microsoft.com/typography/unicode/1252.htm
cp1252 = {
u"\x80": u"\u20AC", # EURO SIGN
u"\x81": u"", # UNDEFINED
u"\x82": u"\u201A", # SINGLE LOW-9 QUOTATION MARK
u"\x83": u"", # UNDEFINED
u"\x84": u"\u201E", # DOUBLE LOW-9 QUOTATION MARK
u"\x85": u"\u2026", # HORIZONTAL ELLIPSIS
u"\x86": u"\u2020", # DAGGER
u"\x87": u"\u2021", # DOUBLE DAGGER
u"\x88": u"", # UNDEFINED
u"\x89": u"\u2030", # PER MILLE SIGN
u"\x8A": u"\u0160", # LATIN CAPITAL LETTER S WITH CARON
u"\x8B": u"\u2039", # SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u"\x8C": u"\u015A", # LATIN CAPITAL LETTER S WITH ACUTE
u"\x8D": u"\u0164", # LATIN CAPITAL LETTER T WITH CARON
u"\x8E": u"\u017D", #LATIN CAPITAL LETTER Z WITH CARON
u"\x8F": u"\u0179", # LATIN CAPITAL LETTER Z WITH ACUTE
u"\x90": u"", # UNDEFINED
u"\x91": u"\u2018", # LEFT SINGLE QUOTATION MARK
u"\x92": u"\u2019", # RIGHT SINGLE QUOTATION MARK
u"\x93": u"\u201C", # LEFT DOUBLE QUOTATION MARK
u"\x94": u"\u201D", # RIGHT DOUBLE QUOTATION MARK
u"\x95": u"\u2022", # BULLET
u"\x96": u"\u2013", # EN DASH
u"\x97": u"\u2014", # EM DASH
u"\x98": u"", # UNDEFINED
u"\x99": u"\u2122", # TRADE MARK SIGN
u"\x9A": u"\u0161", # LATIN SMALL LETTER S WITH CARON
u"\x9B": u"\u203A", # SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u"\x9C": u"\u015B", # LATIN SMALL LETTER S WITH ACUTE
u"\x9D": u"\u0165", # LATIN SMALL LETTER T WITH CARON
u"\x9E": u"\u017E", # LATIN SMALL LETTER Z WITH CARON
u"\x9F": u"\u017A", # LATIN SMALL LETTER Z WITH ACUTE
}
def kill_gremlins(text):
# map cp1252 gremlins to real unicode characters
if re.search(u"[\x80-\x9f]", text):
def fixup(m):
s = m.group(0)
return cp1252.get(s, s)
if isinstance(text, type("")):
# make sure we have a unicode string
text = unicode(text, "iso-8859-1")
text = re.sub(u"[\x80-\x9f]", fixup, text)
# remove ASCII control chars
return text
def rm_ascii_control_chars(t):
return re.sub(u"[\x00-\x1F]|[\x7F]", u"", t)
def to_conll(corpus, labels, label_key, doc_ids=None):
'''
Given a corpus and label key, generate a CoNLL format document
'''
# index labels by doc/sent
idx = {}
for l in labels:
doc_id = l.sentence["doc_id"]
sent_id = l.sentence["sent_id"]
if doc_id not in idx:
idx[doc_id] = {}
if sent_id not in idx[doc_id]:
idx[doc_id][sent_id] = []
idx[doc_id][sent_id] += [l.idxs]
n = 0
sentences = []
for doc in corpus:
if doc_ids and doc.doc_id not in doc_ids:
continue
for i,sent in enumerate(doc.sentences):
tags = len(sent.words) * ['O']
if sent.doc_id in idx and i in idx[sent.doc_id]:
labels = idx[sent.doc_id][i]
for lbl in labels:
t = [u"B-{}".format(label_key)]
t += [u"I-{}".format(label_key)] * (len(lbl)-1)
tags[min(lbl):max(lbl)+1] = t
n += 1
# HACK unicode whitespace character fix (for conlleval script compatibility)
words = map(lambda x:x.replace(u'\xa0',u'_'), sent.words)
sentences += [zip(words,tags) ]
outstr = []
for sent in sentences:
outstr += ["\n".join(map(lambda x: u"{} {}".format(*x),sent))]
outstr += [""]
print n
return "\n".join(outstr)
|
ddbiolib-master
|
ddbiolib/corpora/utils.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.