python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
import os
import glob
import codecs
class Document(object):
''' '''
def __init__(self, doc_id, text, sentences=[], attributes={}):
self.doc_id = doc_id
self.text = text
self.sentences = sentences
self.attributes = attributes
def __repr__(self):
return "<Document(id={} text={}...)>".format(self.doc_id,self.text[0:50])
class DocParser(object):
''' '''
def __init__(self, inputpath, encoding="utf-8"):
self.inputpath = inputpath
self.encoding=encoding
def __iter__(self):
for fpath in self._get_files(self.inputpath):
for doc in self._load(fpath):
yield doc
def __getitem__(self,key):
raise NotImplementedError
def _load(self, source):
raise NotImplementedError
def _get_files(self, file_input):
if type(file_input) is list:
return file_input
elif os.path.isfile(file_input):
return [file_input]
else:
return glob.glob(file_input)
def _filename2uid(self, s):
return os.path.basename(s).split(".")[0]
class TextFileParser(DocParser):
'''Parse plain text documents, assuming one document per file'''
def __init__(self, inputpath, doc_id_func=None, encoding="utf-8"):
super(TextFileParser, self).__init__(inputpath, encoding)
self.doc_id_func = self._filename2uid if not doc_id_func else doc_id_func
def _load(self, source):
uid = self.doc_id_func(source)
text = u''.join(codecs.open(source,"rU",self.encoding).readlines())
yield Document(doc_id=uid, text=text)
class RowParser(DocParser):
'''One document per tab delimited row'''
def __init__(self, inputpath, doc_id_func=None, delimiter="\t", header=False,
text_columns=['text'], encoding="utf-8"):
super(RowParser, self).__init__(inputpath, encoding)
self.header = header
self.doc_id_func = (lambda row:row[0]) if not doc_id_func else doc_id_func
self.delimiter = delimiter
self.text_columns = text_columns
def _load(self, source):
with codecs.open(source,"rU",self.encoding) as f:
for i,line in enumerate(f):
row = line.split(self.delimiter)
if i == 0 and self.header:
colnames = row
continue
uid = self.doc_id_func(row)
text = u' '.join([row[col if not self.header else colnames.index(col)] \
for col in self.text_columns])
attributes = dict(zip(colnames if self.header else range(len(row)),row))
yield Document(doc_id=uid, text=text, attributes=attributes)
class BioCParser(DocParser):
'''
"BioC is a simple format to share text data and annotations. It allows a large
number of different annotations to be represented. We provide simple code to
hold this data, read it and write it back to XML, and perform some
sample processing."
http://bioc.sourceforge.net/
'''
pass
|
ddbiolib-master
|
ddbiolib/corpora/doc_parsers.py
|
import itertools
class Corpus(object):
'''Simple iterator class for loading and parsing documents'''
def __init__(self, doc_parser, text_parser=None, attributes={}):
self.doc_parser = doc_parser
self.text_parser = text_parser
self.attributes = attributes
def __getitem__(self,key):
doc = self.doc_parser.__getitem__(key)
doc.sentences = self.text_parser.parse(doc.text,doc.doc_id) if self.text_parser else []
return doc
def __iter__(self):
for doc in self.doc_parser:
doc.sentences = self.text_parser.parse(doc.text,doc.doc_id) if self.text_parser else []
yield doc
def get_sentences(self,doc_ids=None):
sentences = [doc.sentences for doc in self if not doc_ids or doc.doc_id in doc_ids]
return list(itertools.chain.from_iterable(sentences))
|
ddbiolib-master
|
ddbiolib/corpora/base.py
|
# -*- coding: utf-8 -*-
import bz2
import sys
import codecs
import itertools
from ddlite import *
from datasets import CdrCorpus
def build_hypenated_dict(labels,stopwords={}):
hypenated = {}
for label in labels:
pmid,sent_id,idxs,span,text = label
mention = [corpus[pmid]["sentences"][sent_id].words[i] for i in idxs]
m = re.search("\w+[-]([A-Za-z]{3,})"," ".join(mention))
if m:
t = m.group(1).lower()
if t in stopwords:
continue
hypenated[t] = hypenated.get(t,0) + 1
return [phrase for phrase,n in sorted(hypenated.items(),key=lambda x:x[1],reverse=1)]
def load_dictionary(filename, col_idx=0, delimiter="\t", stopwords={}):
'''Load dictionary file (optionally compressed with bzip2)'''
if filename.split("/")[-1].split(".")[-1] == "bz2":
d = {line.strip().split(delimiter)[col_idx]:1 for line in
bz2.BZ2File(filename, 'rb').readlines()}
else:
d = {line.strip().split(delimiter)[col_idx]:1 for line in
codecs.open(filename, 'rb',"utf-8", errors="ignore").readlines()}
d = {word:1 for word in d if not word.isupper() and word.lower() not in stopwords}
return d
####################################################################
# Load/Parse Corpus
####################################################################
SET = "training"
corpus = CdrCorpus(path="corpus/", cache_path="corpus/cache/")
sentences = [corpus[doc_id]["sentences"] for doc_id in corpus.cv[SET]]
sentences = list(itertools.chain.from_iterable(sentences))
'''
terms = []
for doc_id in corpus.cv[SET]:
terms += [t.text for t in corpus.annotations[doc_id]]
oracle = dict.fromkeys(terms)
'''
####################################################################
# Match Candidates
####################################################################
stopwords = load_dictionary("dicts/stopwords.txt")
stopwords.update(dict.fromkeys(["V","IV","III","II","I","cm","mg","pH"]))
#ChEBI chemical ontology and UMLS substances
dictionary = load_dictionary("dicts/chemicals/names.tsv", col_idx=5, stopwords=stopwords)
dictionary.update(load_dictionary("dicts/chemicals/umls.all.substances.bz2",
stopwords=stopwords))
dictionary.update(load_dictionary("dicts/chemicals/umls.all.substances.penn-treebank.txt",
stopwords=stopwords))
# filter out some noisy dictionary matches
for phrase in dictionary.keys():
if re.search("(^[A-Za-z0-9][.-]*$)",phrase):
del dictionary[phrase]
matcher = DictionaryMatch(label='Ch', dictionary=dictionary, ignore_case=True)
candidates = Entities(sentences, matcher)
num_cands = candidates.num_candidates()
num_sents = len(sentences)
outfname = "candidates/chemicals/chemical-candidates.s{}.c{}.pkl".format(num_sents,num_cands)
candidates.dump_candidates(outfname)
print("Wrote {}".format(outfname))
####################################################################
# Candidate Recall
####################################################################
holdout_ids = corpus.cv[SET].keys()
prediction = [1] * len(candidates)
scores = corpus.score(candidates, prediction, "chemicals", holdout_ids )
print "Candidate Recall: {:.3f}".format(scores["recall"])
tp,fp,fn = corpus.classification_errors(candidates, prediction, "chemicals", holdout_ids)
print "TP: {} FP: {} FN: {}".format(len(tp),len(fp),len(fn))
# print out some missed entities
for label in fn:
pmid,sent_id,idxs,span,text = label
mention = [corpus[pmid]["sentences"][sent_id].words[i] for i in idxs]
#print text, mention
|
ddbiolib-master
|
demos/relations/cdr/cdr_chemical_extraction.py
|
'''
Created on Jun 17, 2016
@author: fries
'''
import bz2
import sys
import csv
import re
import os
import numpy as np
import itertools
import cPickle
import ddlite
from ddlite import SentenceParser,Entities
from ddlite import Union, DictionaryMatch, RegexNgramMatch
from utils import unescape_penn_treebank
from datasets import CdrCorpus
def parse_record(r):
d = {}
for line in r:
if re.search("^\w+\s+=\s+",line):
m = re.split("(^\w+)\s+=\s+",line.strip())[1:]
key,value = m
value = value.split("|")
if key in d:
d[key] += value
else:
d[key] = value
return d
def load_mesh_defs(filename="d2015.bin"):
mesh = {}
record = []
for line in open(filename,"rU"):
line = line.strip()
if not line:
r = parse_record(record)
mesh[r["UI"][0]] = r
record = []
continue
record += [line]
return mesh
mesh = load_mesh_defs("/Users/fries/Dropbox/Datasets/mesh_headings/2015/d2015.bin")
mesh_ids = {uid:mesh[uid]["MH"][0] for uid in mesh}
mesh_names = {name.lower():uid for uid,name in mesh_ids.items()}
# --------------------------------------------
# Load/Parse Corpus
# --------------------------------------------
cache = "/Users/fries/Desktop/CDR_cache/"
infile = "/Users/fries/Desktop/CDR_Data/CDR.Corpus.v010516/"
parser = SentenceParser()
corpus = CdrCorpus(infile, parser, cache_path=cache)
n,N = [],[]
print corpus
for pmid in corpus.annotations:
for mention in corpus.annotations[pmid]:
if len(mention.mesh_ids) > 1:
t = mention.text.lower()
N += [t]
if "and" in t or "or" in t or "and/or" in t:
n += [t]
# multi-ids
print len(n),len(N)
print len(set(N).difference(n))
print "DONE"
n = 0
N = 0
for pmid in corpus.annotations:
for mention in corpus.annotations[pmid]:
N += 1
if mention.text.lower() in mesh_names:
n += 1.0
print n,N, n/N * 100
|
ddbiolib-master
|
demos/relations/cdr/cdr_disease_extraction.py
|
'''
This demo requires access to the "BioCreative V Chemical-Disease Relation (CDR) Task Corpus"
(http://www.biocreative.org/resources/corpora/biocreative-v-cdr-corpus/) available after
signing up for an account at http://www.biocreative.org
This script extracts candidate relations from sentences. It assumes we
already have an input candidate set of entities for diseases and chemicals.
This can either be an:
1) Oracle set (all gold/true mentions)
2) Pretagged set using PubTator, DNorm, tmChem, etc.
@author: jason-fries [at] stanford [dot] edu
'''
import sys
import itertools
from ddlite import *
from datasets import CdrCorpus
class AnnotationMatcher(CandidateExtractor):
'''Use a set of pre-tagged named entities to create candidate matches.'''
def init(self):
# Load opts- this is from the kwargs dict
self.label = self.opts['label']
self.annotations = self.opts['annotations']
# create label index
def _(self,doc):
'''Ground truth annotation index'''
entity_idx = {}
for doc_id in self.annotations:
entity_idx[doc_id] = {}
doc = self.__getitem__(pmid)
for sentence,tags in zip(doc["sentences"],doc["tags"]):
if sentence.sent_id not in label_idx[pmid]:
label_idx[pmid][sentence.sent_id] = {}
for text,offset in tags:
label_idx[pmid][sentence.sent_id][offset] = text
return label_idx
def _apply(self, s, idxs=None):
''' '''
if doc_id not in self.annotations:
return
SET = "training"
ORACLE = False
corpus = CdrCorpus(path="corpus/", cache_path="corpus/cache/")
for doc_id in corpus.cv[SET]:
print corpus[doc_id]["tags"]
print corpus[doc_id]["relations"]
for s in corpus[doc_id]["sentences"]:
print s
#for label in corpus.annotations[doc_id]:
# print label
sys.exit()
sentences = [corpus[doc_id]["sentences"] for doc_id in corpus.cv[SET]]
sentences = sentences = list(itertools.chain.from_iterable(sentences))
for s in sentences:
print s.doc_id, s.sent_id, s.words
break
#if ORACLE:
|
ddbiolib-master
|
demos/relations/cdr/cdr_extraction.py
|
import bz2
import sys
import csv
import codecs
import numpy as np
import itertools
import cPickle
from ddlite import SentenceParser,DictionaryMatch,Entities,Union
from utils import unescape_penn_treebank
from datasets import PubMedCentralCorpus
def load_stopwords():
dictfile = "dicts/stopwords.txt"
return [line.strip().split("\t")[0] for line in open(dictfile).readlines()]
def load_dictionary(filename, stopwords=[]):
if filename.split("/")[-1].split(".")[-1] == "bz2":
d = {line.strip().split("\t")[0]:1 for line in bz2.BZ2File(filename, 'rb').readlines()}
else:
d = {line.strip().split("\t")[0]:1 for line in codecs.open(filename, 'rb',"utf-8", errors="ignore").readlines()}
d = {word:1 for word in d if not word.isupper() and word.lower() not in stopwords}
return d
def load_acronym_dictionary(filename):
if filename.split("/")[-1].split(".")[-1] == "bz2":
d = {line.strip().split("\t")[0]:1 for line in bz2.BZ2File(filename, 'rb').readlines()}
else:
d = {line.strip().split("\t")[0]:1 for line in codecs.open(filename, 'rb',"utf-8", errors="ignore").readlines()}
# filter by char length and stopwords
d = {word:1 for word in d if len(word) > 1 and word.isupper()}
return d
# --------------------------------------------
# Load/Parse Corpus
# --------------------------------------------
cache = "/Users/fries/Desktop/pmc_cache/"
inputdir = "/Users/fries/Desktop/articles.I-N/"
# orthopedics subset
cache = "/Users/fries/workspace/dd-bio-examples/data/documents/pmc-ortho-cache/"
inputdir = "/Users/fries/workspace/dd-bio-examples/data/documents/pmc-ortho/"
parser = SentenceParser()
corpus = PubMedCentralCorpus(inputdir,parser,cache_path=cache)
sentences = [corpus[uid]["sentences"] for uid in corpus.documents.keys()[0:1000]] #
sentences = list(itertools.chain.from_iterable(sentences))
# --------------------------------------------
# Match Candidates
# --------------------------------------------
stopwords = load_stopwords()
entities = load_dictionary("dicts/fma_human_anatomy.bz2",stopwords)
acronyms = load_acronym_dictionary("dicts/umls_human_anatomy.bz2")
matcher_d = DictionaryMatch(label='A', dictionary=entities, ignore_case=True)
matcher_a = DictionaryMatch(label='A', dictionary=acronyms, ignore_case=False)
matcher = Union(matcher_a, matcher_d)
candidates = Entities(sentences, matcher)
candidates.dump_candidates("candidates/ortho-candidates.{}.pkl".format(len(sentences)))
|
ddbiolib-master
|
demos/taggers/anatomy/anatomy_extraction.py
|
# -*- coding: utf-8 -*-
import sys
import cPickle
from ddlite import *
from datasets import *
# ---------------------------------------------------------------------
#
# I. Load Candidates
#
# ---------------------------------------------------------------------
candidates = Entities("cache/pmc-ortho-candidates.pkl")
model = CandidateModel(candidates)
msg = "Extracted {} features for each of {} mentions"
print msg.format(model.num_feats(), model.num_candidates())
|
ddbiolib-master
|
demos/taggers/chemicals/chemical_learning.py
|
# -*- coding: utf-8 -*-
import sys
import codecs
import operator
import itertools
from ddlite import *
from datasets import *
import ontologies.umls
from ddlite.ddbiolib.utils import unescape_penn_treebank
from lexicons import RuleTokenizedDictionaryMatch
def rule_tokenizer(s):
s = re.sub("([,?!:;] )",r" \1",s)
s = re.sub("([.]$)",r" .",s)
return s.split()
# ---------------------------------------------------------------------
#
# I. Load and Parse Corpus
#
# ---------------------------------------------------------------------
DICT_ROOT = "../../datasets/dictionaries/"
CORPUS_ROOT = "../../datasets/chemdner_corpus/"
parser = SentenceParser()
corpus = ChemdnerCorpus(CORPUS_ROOT, parser=parser,
cache_path="/tmp/")
# ChemNDER has pre-defined cross-validation folds
dev_set = corpus.cv["development"].keys()
# load training documents and collapse all sentences into a single list
documents = {doc_id:(corpus[doc_id]["sentences"],corpus[doc_id]["tags"]) for doc_id in dev_set}
sentences, gold_entities = zip(*documents.values())
sentences = list(itertools.chain.from_iterable(sentences))
gold_entities = list(itertools.chain.from_iterable(gold_entities))
# summary statistics
gold_entity_n = len(list(itertools.chain.from_iterable(gold_entities)))
word_n = sum([len(sent.words) for sent in sentences])
print("%d PubMed abstracts" % len(documents))
print("%d ChemNDER gold entities" % gold_entity_n)
print("%d tokens" % word_n)
# ---------------------------------------------------------------------
#
# II. Build Matchers
#
# ---------------------------------------------------------------------
# dictionaries from tmChem & the UMLS
dict_fnames = ["%s/chemdner/mention_chemical.txt",
"%s/chemdner/chebi.txt",
"%s/chemdner/addition.txt",
"%s/umls/substance-sab-all.txt",
"%s/chemdner/train.chemdner.vocab.txt"]
dict_fnames = [s % DICT_ROOT for s in dict_fnames]
chemicals = []
for fname in dict_fnames:
chemicals += [line.strip().split("\t")[0] for line in open(fname,"rU").readlines()]
chemicals = {term:1 for term in chemicals}
# create matchers and extract candidates
extr1 = DictionaryMatch('C', chemicals, ignore_case=True)
extr2 = RuleTokenizedDictionaryMatch('C', chemicals, ignore_case=True, tokenizer=rule_tokenizer)
extr3 = RegexMatch('C', "[αβΓγΔδεϝζηΘθικΛλμνΞξοΠπρΣστυΦφχΨψΩω]+[-]+[A-Za-z]+", ignore_case=True)
extr4 = RegexMatch('C', "([-]*(\d[,]*)+[-])", ignore_case=True)
extr5 = RegexMatch('C', "([-]*(\d[,]*)+[-])", ignore_case=True)
matcher = MultiMatcher(extr1,extr2,extr3,extr4,extr5)
# ---------------------------------------------------------------------
#
# III. Extract Candidates
#
# ---------------------------------------------------------------------
candidates = Entities(sentences, matcher)
# Crude recall estimate (ignores actual span match and tokenization problems)
mentions = [" ".join(unescape_penn_treebank([e.words[i] for i in e.idxs])) for e in candidates]
gold_mentions = list(zip(*itertools.chain.from_iterable(gold_entities))[0])
for m in mentions:
if m in gold_mentions:
gold_mentions.remove(m)
tp = gold_entity_n - len(gold_mentions)
print("Found %d candidate entities" % len(candidates))
print("Candidates: %.2f%% of all tokens" % (len(candidates)/float(word_n) * 100) )
print("Annotations %.2f%% of all tokens" % (gold_entity_n/float(word_n) * 100) )
print("~recall: %.2f (%d/%d)" % (float(tp) / gold_entity_n, tp, gold_entity_n))
candidates.dump_candidates("cache/candidates.pkl")
# ---------------------------------------------------------------------
#
# IV. Error Analysis
#
# ---------------------------------------------------------------------
# What are we missing due to tokenization errors?
regexes = [re.compile("[αβΓγΔδεϝζηΘθικΛλμνΞξοΠπρΣστυΦφχΨψΩω]+[-]+[A-Za-z]+")]
regexes += [re.compile("([-]*(\d[,]*)+[-])")]
def regex_match(t):
for regex in regexes:
if regex.search(t):
return True
return False
tokenization_errors = [term for term in gold_mentions if term in chemicals or regex_match(term)]
tokenization_errors = {term:tokenization_errors.count(term) for term in tokenization_errors}
oov_errors = [term for term in gold_mentions if term not in tokenization_errors]
oov_errors = {term:oov_errors.count(term) for term in oov_errors}
print("Tokenization Errors: %d" % (sum(tokenization_errors.values())))
print("Out-of-vocabulary Errors: %d" % (sum(oov_errors.values())))
for term in sorted(oov_errors.items(),key=operator.itemgetter(1),reverse=1):
print("%s: %d" % (term[0], oov_errors[term[0]]))
|
ddbiolib-master
|
demos/taggers/chemicals/chemical_extraction.py
|
from __future__ import print_function
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import itertools
import numpy as np
#from ddlite import *
from ddlite_candidates import Candidates
from ddlite_candidates import Ngrams,Ngram
from ddlite_matchers import DictionaryMatch,Union,Concat,RegexMatchSpan
from ddbiolib.datasets import ncbi_disease
from versioning import CandidateVersioner
from ontologies.umls import UmlsNoiseAwareDict
from ontologies.ctd import load_ctd_dictionary
from ontologies.bioportal import load_bioportal_csv_dictionary
def clean_dictionary(dictionary ,stopwords, ignore_case=True):
'''Remove stopwords'''
rm = []
for term in dictionary:
t = term.lower() if ignore_case else term
if t in stopwords:
rm += [term]
for t in rm:
del dictionary[t]
def get_stopwords():
dictfiles = ["../data/dicts/diseases/cell_molecular_dysfunction.txt",
"../data/dicts/diseases/umls_geographic_areas.txt",
"../data/dicts/diseases/stopwords.txt"]
stopwords = {}
for fname in dictfiles:
d = [line.strip().split("\t")[0].lower() for line in open(fname).readlines()]
d = [t for t in d if len(t) > 1 and t]
stopwords.update(dict.fromkeys(d))
return stopwords
common_disease_acronyms = ["AAPC", "AAS", "ABCD syndrome", "ACC", "ACS", "ACTH deficiency", "AD", "AD", "ADD",
"ADD-RT", "ADEM", "ADHD", "AF", "AGS", "AHC", "AIDS", "AIP", "ALA DD", "ALI", "ALS",
"AMD", "AOS", "APA", "APS", "ARBD", "ARD", "ARDS", "Acute respiratory distress syndrome",
"ARND", "AS", "ASD", "ASDs", "A-T", "AVMs", "B-NHL", "BBS", "BEB", "BD", "BEH", "BFIC", "BH", "BMD", "BPD",
"BPH", "BS", "BSE", "BSS", "BV", "CACH", "CAD", "CADSIL", "CAPD", "CCD", "CCHF", "CCHS",
"CCM", "CDG", "CDGS", "CEP", "CES", "CESD", "CF", "CFIDS", "CFS", "CGBD", "CHD", "CHF", "CHS"
"CIDP", "CIPA", "CJD", "CLD", "COFS", "COPD", "CP", "CP/CPPS", "CPDD", "CPM", "CPPS", "CRF",
"CRKP", "CRPS", "CSA", "CSD", "CVD", "DAS", "DBA", "DBMD", "DD", "DEF", "DF", "DH",
"DHF", "DiG", "DLB", "DM", "DMD", "DP", "DRSP disease", "DS", "DSPS", "DTs", "DVD",
"DVT", "ED", "EDS", "EEE", "EHK", "EMH", "EMR", "ENS", "EPM", "EPP", "ESRD", "ESS", "EVA"
"FAE", "FASDs", "FFI", "FMA", "FMD", "FMF", "FMS", "FNDI", "FSP", "FTD", "FVS", "FXS",
"GAD", "GAN", "GAS disease", "GBS", "GBS disease", "GCE", "GERD", "GI", "GIB", "GN",
"GRMD", "GSS disease", "GT/LD", "GVHD", "GWD", "HAS", "HBL", "HCP", "HD", "HDL2", "HEELP syndrome",
"HFA", "HFMD", "HFRS", "HI", "HiB disease", "HMSN Type III", "HMS", "HOH", "HTN",
"HPRT deficiency", "HPS", "HPV Infection", "HSP", "IBD", "IBIDS syndrome", "IBM",
"IBS", "IBS", "IC/PBS", "ICF syndrome", "IDMS", "IHA", "IED", "IFAP syndrome", "INAD", "IP",
"IRD", "IS", "ITP", "JAS", "JE", "JHD", "JRA", "JT", "KS", "KSS", "KTW Syndrome",
"LCM", "LEMS", "LFA", "LGV", "LKS", "LNS", "LP", "MAC", "MBD", "MCS", "MD",
"MDD", "MDR TB", "MEF", "MHP", "MID", "MJD", "ML", "MLD", "MMA", "MMR", "MMRV", "MND", "MOH", "MPD",
"MPS I", "MPS II", "MPS III", "MPS IV", "MPS VI", "MPS VII", "MR/DD", "MS", "MSA",
"MSDD", "NAS", "NBIA", "NCL", "NF1", "NF2", "NKH", "NLD", "NMDs", "NMO", "NMS", "NP",
"NPC1", "NPH", "NTD", "NTDs", "OA", "OCD", "ODD", "OMA", "ON", "OPC", "OPCA", "OSA",
"PBC", "PBD", "PCOS", "PCT", "PDD", "PDD-NOS", "PDD/NOS", "PKAN", "PLMD", "PLS",
"PMD", "PML", "PMS", "POTS", "PPMA", "PPS", "PSC", "PSP", "PVL", "PW", "Q fever", "RA",
"RAD", "RIND", "RLF", "RLS", "RMDs", "ROP", "RS", "RSD", "RTI", "SARS", "SB", "SBS", "SC",
"SIDS", "SIS", "SLE", "SM", "SMA", "SMEI", "SMS", "SOD", "SPS", "SSPE", "STEMI",
"STD", "SUNCT", "SWS", "TAC", "TB", "TBI", "TCD", "TCS", "TEF", "TIA", "TMH", "TMJ/TMD", "TMR",
"TN", "TOS", "TS", "TS", "TSC", "TSEs", "TSP", "TTH", "TTP", "UCPPS", "UDA", "UTIs", "UC",
"URI", "VCFS", "vCJD", "VD", "VHF", "VP", "VSD", "VVC", "VWM disease",
"WAGR syndrome", "WD", "WEE", "WFS", "WS", "XLT", "XDR TB", "XLDCM", "XLSA", "XP", "YSS", "YVS",
"ZBLS", "SCA1", "SCA2", "C1D", "C3D", "C4D", "C5D", "C6D", "C7D", "CCALD", "CL/P",
"SJS type 2"]
def clean_dictionary(d,stopwords,ignore_case=True):
'''Remove some stopwords'''
rm = []
for term in d:
t = term.lower() if ignore_case else term
if t in stopwords:
rm += [term]
for t in rm:
del d[t]
def get_stopwords():
dictfile = "dicts/cell_molecular_dysfunction.txt"
stopwords = dict.fromkeys([line.strip().split("\t")[0].lower() for line in open(dictfile).readlines()])
#stopwords = {}
dictfile = "dicts/umls_geographic_areas.txt"
terms = [line.strip().split("\t")[0].lower() for line in open(dictfile).readlines()]
stopwords.update(dict.fromkeys(terms))
dictfile = "dicts/stopwords.txt"
terms = [line.strip().split("\t")[0].lower() for line in open(dictfile).readlines()]
stopwords.update(dict.fromkeys(terms))
stopwords[""] = 1
stopwords["a"] = 1
return stopwords
def get_gold_labels(corpus,doc_ids=None):
'''Generate gold labels for the provided corpus. Note: requires an "annotations"
attribute'''
labels = []
for doc in corpus:
if not doc_ids or doc.doc_id not in doc_ids:
continue
sent_offsets = [s._asdict()["char_offsets"][0] for s in doc.sentences]
for label in doc.attributes["annotations"]:
sidx = -1
for i in range(len(sent_offsets) - 1):
if label.start >= sent_offsets[i] and label.end - 1 < sent_offsets[i+1]:
sidx = i
break
if label.start >= sent_offsets[-1]:
sidx = len(sent_offsets) - 1
# label crosses multiple sentence boundaries
if sidx == -1:
print("WARNING sentence boundary error",file=sys.stderr)
metadata = {"mention_type":label.mention_type}
labels += [Ngram(label.start, label.end - 1, doc.sentences[sidx], metadata=metadata)]
if labels[-1].get_span() == "":
print(labels[-1])
print(label)
print(doc.sentences[sidx])
print("--------")
return labels
# --------------------------------------------
# Load/Parse Corpus
# --------------------------------------------
corpus = ncbi_disease.load_corpus()
# --------------------------------------------
# Load Dictionaries / Ontologies
# --------------------------------------------
# UMLS semantic types that map to diseases or disorders
positive = ["Acquired Abnormality",
"Anatomical Abnormality",
"Congenital Abnormality",
"Disease or Syndrome",
"Experimental Model of Disease",
"Finding",
"Injury or Poisoning",
"Mental or Behavioral Dysfunction",
"Neoplastic Process",
"Pathologic Function",
"Sign or Symptom"]
negative = ["Physiologic Function","Molecular Function","Genetic Function",
"Cell Function","Organ or Tissue Function","Organism Function", "Food",
"Mental Process","Molecular Sequence","Nucleotide Sequence", "Animal",
"Carbohydrate Sequence","Amino Acid Sequence","Body Substance",
"Cell","Gene or Genome","Cell Component","Functional Concept",
"Spatial Concept","Molecular Biology Research Technique",
"Laboratory or Test Result","Laboratory or Test Result",
"Animal","Therapeutic or Preventive Procedure","Bacterium","Phenomenon or Process",
"Quantitative Concept","Temporal Concept","Natural Phenomenon or Process",
"Body Part, Organ, or Organ Component","Body Location or Region",
"Body Space or Junction", "Pathologic Function"]
umls_terms = UmlsNoiseAwareDict(positive=positive, name="terms", ignore_case=False)
umls_abbrv = UmlsNoiseAwareDict(positive=positive, name="abbrvs", ignore_case=False)
#umls_stopwords = UmlsNoiseAwareDict(negative=negative, name="terms", ignore_case=True)
diseases = umls_terms.dictionary() # create stand alone dictionaries
#abbrvs = umls_abbrv.dictionary()
#stopwords = umls_stopwords.dictionary()
#diseases = load_disease_dictionary()
abbrvs = load_acronym_dictionary()
abbrvs.update(dict.fromkeys(common_disease_acronyms))
# Load various other disease ontologies
ordo = load_bioportal_csv_dictionary("dicts/ordo.csv")
doid = load_bioportal_csv_dictionary("dicts/DOID.csv")
ctd = load_ctd_dictionary("dicts/CTD_diseases.tsv") # CTD's MEDIC disease vocabulary
diseases.update(ordo)
diseases.update(doid)
diseases.update(ctd)
clean_dictionary(diseases,get_stopwords())
print("DICTIONARY Disease Terms: {}".format(len(diseases)))
print("DICTIONARY Abbreviation/Acronym Terms: {}".format(len(abbrvs)))
# --------------------------------------------
# Match Candidates
# --------------------------------------------
# Define a candidate space
ngrams = Ngrams(n_max=8)
longest_match = True
dict_diseases = DictionaryMatch(label='disease', d=diseases,
ignore_case=True, longest_match=longest_match)
dict_abbrvs = DictionaryMatch(label='disease_acronyms', d=abbrvs,
ignore_case=False, longest_match=longest_match)
stem_forms = DictionaryMatch(label='disease_stems', d=dict.fromkeys(diseases.keys() + abbrvs.keys()),
ignore_case=False, longest_match=longest_match)
# prefix concatenatior matchers
suffixes = DictionaryMatch(label="prefixes", d=['deficiency', 'deficient', 'deficienty', 'syndrome'],
ignore_case=True, longest_match=longest_match)
disease_deficiency = Concat(stem_forms, suffixes)
# disease types
digits = map(unicode,range(1,10))
types = DictionaryMatch(label="prefixes", d=['type', 'class', 'stage', 'factor'],
ignore_case=True, longest_match=longest_match)
type_nums = DictionaryMatch(label="prefixes", d=['i', 'ii', 'iii', 'vi', 'v', 'vi', '1a', 'iid', 'a', 'b', 'c', 'd'] + digits,
ignore_case=True, longest_match=longest_match)
disease_types_right = Concat(stem_forms, Concat(types,type_nums))
disease_types_left = Concat(Concat(types,type_nums),stem_forms)
# deficiency of
prefixes = DictionaryMatch(label="prefixes",d=['deficiency of',"inherited"],
ignore_case=True, longest_match=longest_match)
deficiency_of = Concat(prefixes,stem_forms)
# sex_linked
gene_linked = DictionaryMatch(label="x-linked", d=["x-linked","x linked","x linked recessive","semidominant",
"hereditary",
"recessive","x-linked recessive", "x-linked dominant",
"dominant",'autosomal recessive',"autosomal dominant",
"autossomal recessive"],
ignore_case=True, longest_match=longest_match)
gene_linked = Concat(gene_linked,stem_forms)
cancers = RegexMatchSpan(label="cancers",rgx="(^[A-Za-z]+ (and|and/or|or) [A-Za-z]+ cancer[s]*$)|(^[A-Za-z]+[\/-][A-Za-z]+ cancer[s]*$)")
deficient = RegexMatchSpan(label="deficient",rgx="^[A-Za-z0-9]+[-]deficient$")
matcher = Union(dict_diseases, dict_abbrvs, disease_deficiency, deficient, cancers,
disease_types_left, disease_types_right, deficiency_of, gene_linked)
holdouts = ["training","development"]#,"testing"]
for setname in holdouts:
doc_ids = corpus.attributes["sets"][setname]
cs = Candidates(ngrams, matcher, corpus.get_sentences(doc_ids))
candidates = cs.get_candidates()
gold = frozenset(get_gold_labels(corpus,doc_ids))
print("----------------------------------")
print(setname)
print("----------------------------------")
print("%d PubMed abstracts" % len(doc_ids))
print("%d Disease gold entities" % len(gold))
print("%d Candidates" % len(candidates))
cs.gold_stats(gold)
fn = []
for c in gold:
if c not in candidates:
fn += [c.get_span()]
fn = {term:fn.count(term) for term in set(fn)}
for item in sorted(fn.items(),key=lambda x:x[1],reverse=0):
print(item)
#for c in candidates:
# print(c)
'''
for label in gold:
if label not in candidates:
print(label.get_span(), label.get_span().lower() in diseases)
'''
sys.exit()
|
ddbiolib-master
|
demos/taggers/diseases/disease_extraction.py
|
import bz2
from ontologies.bioportal import load_bioportal_csv_dictionary
def load_disease_dictionary():
# UMLS SemGroup Disorders
#dictfile = "dicts/umls_disorders.bz2"
#dictfile = "dicts/umls_disorders_snomed_msh_mth.bz2"
dictfile = "dicts/umls_disorders_v2.bz2"
diseases = {line.strip().split("\t")[0]:1 for line in bz2.BZ2File(dictfile, 'rb').readlines()}
diseases = {word:1 for word in diseases if not word.isupper()}
# Orphanet Rare Disease Ontology
ordo = load_bioportal_csv_dictionary("dicts/ordo.csv")
ordo = {word:1 for word in ordo if not word.isupper()}
diseases.update(ordo)
# Human Disease Ontology
doid = load_bioportal_csv_dictionary("dicts/DOID.csv")
doid = {word:1 for word in doid if not word.isupper()}
diseases.update(doid)
# ------------------------------------------------------------
# remove cell dysfunction terms
dictfile = "dicts/cell_molecular_dysfunction.txt"
terms = [line.strip().split("\t")[0] for line in open(dictfile).readlines()]
diseases = {word:1 for word in diseases if word not in terms}
dictfile = "dicts/umls_geographic_areas.txt"
terms = [line.strip().split("\t")[0] for line in open(dictfile).readlines()]
diseases = {word:1 for word in diseases if word not in terms}
# ------------------------------------------------------------
# remove stopwords
dictfile = "dicts/stopwords.txt"
stopwords = [line.strip().split("\t")[0] for line in open(dictfile).readlines()]
diseases = {word:1 for word in diseases if word.lower() not in stopwords}
return diseases
def load_acronym_dictionary():
dictfile = "dicts/umls_disorders_v2.bz2"
diseases = {line.strip().split("\t")[0]:1 for line in bz2.BZ2File(dictfile, 'rb').readlines()}
diseases = {word:1 for word in diseases if word.isupper()}
# Orphanet Rare Disease Ontology
ordo = load_bioportal_csv_dictionary("dicts/ordo.csv")
ordo = {word:1 for word in ordo if word.isupper()}
diseases.update(ordo)
# Human Disease Ontology
doid = load_bioportal_csv_dictionary("dicts/DOID.csv")
doid = {word:1 for word in doid if word.isupper()}
diseases.update(doid)
# filter by char length
diseases = {word:1 for word in diseases if len(word) > 1}
return diseases
|
ddbiolib-master
|
demos/taggers/diseases/tools.py
|
import bz2
from ontologies.umls import UmlsNoiseAwareDict
from ontologies.ctd import load_ctd_dictionary
from ontologies.bioportal import load_bioportal_csv_dictionary
#from tools import load_disease_dictionary,load_acronym_dictionary
def search(term,dictionary):
m = {}
for sty in dictionary:
for sab in dictionary[sty]:
if term in dictionary[sty][sab]:
m[sty] = 1
return m.keys()
# UMLS semantic types that map to diseases or disorders
positive = ["Acquired Abnormality",
"Anatomical Abnormality",
"Congenital Abnormality",
"Disease or Syndrome",
"Experimental Model of Disease",
"Finding",
"Injury or Poisoning",
"Mental or Behavioral Dysfunction",
"Neoplastic Process",
"Pathologic Function",
"Sign or Symptom"]
umls_terms = UmlsNoiseAwareDict(positive=positive, prefix="terms")
umls_abbrv = UmlsNoiseAwareDict(positive=positive, prefix="abbrvs", ignore_case=False)
diseases = umls_terms.dictionary() # create stand alone dictionaries
abbrvs = umls_abbrv.dictionary()
print len(diseases)
dictfile = "dicts/umls_disorders_v2.bz2"
disorders = {line.strip().split("\t")[0]:1 for line in bz2.BZ2File(dictfile, 'rb').readlines()}
#disorders = {word:1 for word in disorders if word.isupper()}
disorders = {word.lower():1 for word in disorders if not word.isupper()}
print len(disorders)
for term in diseases:
if term not in disorders:
print term, search(term,umls_terms._dictionary)
|
ddbiolib-master
|
demos/taggers/diseases/dictionary_tests.py
|
import bisect, re
from ddlite_matchers import *
from collections import defaultdict
class NcbiDiseaseDictionaryMatch(NgramMatcher):
"""Selects according to ngram-matching against a dictionary i.e. list of words"""
def init(self):
# Load opts- this is from the kwargs dict
self.label = self.opts['label']
self.dictionary = self.opts['d']
self.match_attrib = self.opts.get('match_attrib', 'words')
self.ignore_case = self.opts.get('ignore_case', True)
self.longest_match = self.opts.get('longest_match', True)
# Split the dictionary up by phrase length (i.e. # of tokens)
self.dl = defaultdict(lambda : set())
for phrase in self.dictionary:
self.dl[len(phrase.split())].add(phrase.lower() if self.ignore_case else phrase)
self.dl.update((k, frozenset(v)) for k,v in self.dl.iteritems())
# Get the *DESC order* ngram range for this dictionary
self.ngr = range(max(1, min(self.dl.keys())), max(self.dl.keys())+1)[::-1]
def _apply(self, s, idxs=None):
"""
Take in an object or dictionary which contains match_attrib
and get the index lists of matching phrases
If idxs=None, consider all indices, otherwise constrain to subset of idxs
"""
seq = self._get_attrib_seq(s)
# If idxs=None, consider the full index range, otherwise only subseqs of idxs
start = 0 if idxs is None else min(idxs)
end = len(seq) if idxs is None else max(idxs)+1
L = len(seq) if idxs is None else len(idxs)
# NOTE: We assume that idxs is a range of consecutive indices!
if L != end - start:
raise ValueError("Candidates must be over consecutive spans of indices")
# Keep track of indexes we've already matched so that we can e.g. keep longest match only
matched_seqs = []
# Loop over all ngrams
for l in filter(lambda n : n <= L, self.ngr):
for i in range(start, end-l+1):
ssidx = range(i, i+l)
# If we are only taking the longest match, skip if a subset of already-tagged idxs
if self.longest_match and any(set(ssidx) <= ms for ms in matched_seqs):
continue
phrase = ' '.join(seq[i:i+l])
if phrase.lower() == 'deficiency' and i+l<L and seq[i+l].lower() in ['in', 'of']:
continue
phrase = phrase.lower() if self.ignore_case else phrase
if phrase in self.dl[l]:
min_idx = min(ssidx)
max_idx = max(ssidx)
if (max_idx+1<L and seq[max_idx+1].lower() in ['deficiency', 'deficient', 'deficienty', 'syndrome']):
ssidx = range(min_idx, max_idx+2)
min_idx = min(ssidx)
max_idx = max(ssidx)
if (max_idx+2<L and seq[max_idx+1].lower() in ['type', 'class', 'stage', 'factor'] and (seq[max_idx+2].lower() in ['i', 'ii', 'iii', 'vi', 'v', 'vi', '1a', 'iid', 'a', 'b', 'c', 'd'] or seq[max_idx+2].isdigit())):
ssidx = range(min_idx, max_idx+3)
min_idx = min(ssidx)
max_idx = max(ssidx)
if (min_idx-2>=0 and seq[min_idx-2].lower() in ['type', 'class', 'stage', 'factor'] and (seq[min_idx-1].lower() in ['i', 'ii', 'iii', 'iv', 'v', 'vi', '1a', 'iid', 'a', 'b', 'c', 'd'] or seq[min_idx-1].isdigit())):
ssidx = range(min_idx-2, max_idx+1)
min_idx = min(ssidx)
max_idx = max(ssidx)
if (min_idx-2>=0 and seq[min_idx-2].lower() in ['deficiency'] and seq[min_idx-1].lower() in ['of']):
ssidx = range(min_idx-2, max_idx+1)
min_idx = min(ssidx)
max_idx = max(ssidx)
if (min_idx-1>=0 and seq[min_idx-1].lower() in ['inherited']):
ssidx = range(min_idx-1, max_idx+1)
if any(set(ssidx) <= ms for ms in matched_seqs):
continue
matched_seqs.append(frozenset(ssidx))
yield list(ssidx), self.label
checkbox = []
matched_seqs = sorted(matched_seqs, key=lambda x:min(x))
for i in range(len(matched_seqs)):
if i in checkbox: continue
min_idx = min(list(matched_seqs[i]))
max_idx = max(list(matched_seqs[i]))
haveone = True
checked = False
checkbox.append(i)
while haveone:
haveone = False
for j in range(len(matched_seqs)):
if j not in checkbox and (max_idx + 1 == min(list(matched_seqs[j])) or (max_idx + 2 == min(list(matched_seqs[j])) and seq[max_idx + 1].lower() in ['and/or', 'of', 'and', 'or'])):
checkbox.append(j)
max_idx = max(list(matched_seqs[j]))
haveone = True
checked = True
break
if checked:
pass
#yield list(range(min_idx, max_idx + 1)), self.label
|
ddbiolib-master
|
demos/taggers/diseases/matchers.py
|
import bz2
import sys
import csv
import re
import codecs
import numpy as np
import itertools
import cPickle
import ddlite
from ddlite import SentenceParser,Entities
from ddlite import Union, DictionaryMatch, RegexNgramMatch, CandidateExtractor
from utils import unescape_penn_treebank
from datasets import NcbiDiseaseCorpus
class NounPhraseMatcher(CandidateExtractor):
def init(self):
self.pattern = re.compile("(NN[SP]*|JJ[S]*|DT|CC)")
self.label = self.opts['label']
self.match_attrib = self.opts.get('match_attrib', 'words')
self.ignore_case = self.opts.get('ignore_case', True)
self.longest_match = self.opts.get('longest_match', True)
def _apply(self, s, idxs=None):
noun_phrases = []
np = []
for i,tag in enumerate(s.poses):
if self.pattern.search(tag):
np += [(i,tag)]
elif np:
noun_phrases += [np]
np = []
for np in noun_phrases:
while np and re.search("(CC|[PW]*DT)",np[0][1]):
np.pop(0)
while np and re.search("(CC|[PW]*DT)",np[-1][1]):
np.pop()
if not np or re.search("JJ[RS]*",np[0][1]):
continue
idxs,poses = zip(*np)
#print np, s.words[min(idxs):max(idxs)+1]
#print "----"
yield list(idxs), 'x'
np_regex = re.compile("^((NN[SP]*|JJ[S]*|DT|CC)\s*)+$")
pos_regex = re.compile("(NN[SP]*|JJ[S]*|DT|CC)")
np_regex = re.compile("^((NN[SP]*|JJ[S]*|DT)\s*)+$")
pos_regex = re.compile("(NN[SP]*|JJ[S]*|DT)")
np_regex = re.compile("^((NN[SP]*|JJ[S]*|DT)\s*)+$")
pos_regex = re.compile("(NN[SP]*|JJ[S]*|DT)")
def is_noun_phrase(t):
seq = " ".join(t)
return np_regex.search(seq) is not None
def multiset_intersection(a,b):
a = [t for t in a]
b = [t for t in b]
c = []
while len(a) > 0 and len(b) > 0:
i = a.pop(0)
if i in b:
c += [i]
b.remove(i)
return c
def noun_phrases(s):
noun_phrases = []
np = []
for i,tag in enumerate(s.poses):
if pos_regex.search(tag):
np += [(i,tag)]
elif np:
noun_phrases += [np]
np = []
for np in noun_phrases:
while np and re.search("(CC|[PW]*DT)",np[0][1]):
np.pop(0)
while np and re.search("(CC|[PW]*DT)",np[-1][1]):
np.pop()
if not np:
continue
idxs,_ = zip(*np)
# remove stopwords
idxs = list(idxs)
while idxs and s.lemmas[idxs[0]] in left_args:
idxs.pop(0)
while idxs and s.lemmas[idxs[-1]] in right_args:
idxs.pop()
if not idxs:
continue
yield [s.words[i] for i in idxs]
ROOT = "/Users/fries/dd-bio-examples/"
INDIR = "/Users/fries/Desktop/ncbi/"
OUTDIR = "/Users/fries/Desktop/ncbi/candidates/"
pubterms = ["background","methods","introduction"]
left_args = ["novel","symptomatic","common","severe","major","primary",
"distinct","gross","progressive","late-onset","classical",
"recurrent","significant","moderate","mild","subsequent"]
left_args += ["dog","mouse","human"]
left_args += pubterms
right_args = ["gene","mutation","locus","allele","cell","line", "heterozygotes","pairs",
"type","protein","phenotype","deletion", "region","codon", "inhibition","inhibitor",
"chromosome", "gene", "expression","suppressor","wild-type"]
right_args += ["individual","patient","carrier","parent",'offspring',"cases"]
right_args += ["dog","mouse","human","female","male","model"]
right_args += pubterms
#consistent, associated, caused,characterized
# <Qualitative Concept>
# --------------------------------------------
# Load/Parse Corpus
# --------------------------------------------
cache = "{}/cache/".format(INDIR)
infile = "{}/corpora/".format(INDIR)
holdouts = ["training"]
corpus = NcbiDiseaseCorpus(infile, parser=None, cache_path=cache)
dev_set = list(itertools.chain.from_iterable([corpus.cv[setdef].keys() for setdef in holdouts]))
tagged = [(corpus[doc_id]["sentences"],corpus[doc_id]["tags"]) for doc_id in dev_set]
n,p,N = 0.0,0.0,0.0
num_noun_phrases = 0
for doc,labels in tagged:
sentences = zip(doc,labels)
for s,tags in zip(doc,labels):
np_ners,ners = [],[]
for text,span in tags:
seq = s.poses[min(span):max(span)]
if is_noun_phrase(seq):
phrase = s.words[min(span):max(span)]
np_ners += [phrase]
n += 1
ners += [s.words[min(span):max(span)]]
N += 1
# extract sentence noun phrases
np = [np for np in noun_phrases(s)]
np = map(tuple,np)
num_noun_phrases += len(np)
# NERs vs. NP NERs
np_ners = map(tuple,np_ners)
ners = map(tuple,ners)
# exact NER / NP sequence match
intersection = multiset_intersection(np,np_ners)
p += len(intersection)
# missed noun phrases
mismatched = [x for x in np_ners if x not in intersection]
difference = [x for x in np if x not in intersection]
if mismatched:
print "MISSED", mismatched
print "FP NP ", difference
print zip(s.words, s.poses)
print "----------"
#Population Group
#Family Group
print n/N, n, N
print p/N, p, N
print num_noun_phrases
# --------------------------------------------
# Match Candidates
# --------------------------------------------
longest_match = True
matcher = NounPhraseMatcher(label='NP')
gold_labels = []
scores = {"num_candidates":0, "num_cand_tokens":0,"class_balance":{}}
for cv_set in ["training"]:
sentences = [corpus[doc_id]["sentences"] for doc_id in corpus.cv[cv_set]]
sentences = list(itertools.chain.from_iterable(sentences))
|
ddbiolib-master
|
demos/taggers/diseases/old/candidate_generation.py
|
import bz2
import sys
import csv
import re
import codecs
import numpy as np
import itertools
import cPickle
import ddlite
from ddlite import SentenceParser,Entities
from ddlite import Union, DictionaryMatch, RegexNgramMatch, CandidateExtractor
from utils import unescape_penn_treebank
from datasets import NcbiDiseaseCorpus
class PrefixDictionaryMatcher(RegexNgramMatch):
def init(self):
super(PrefixDictionaryMatcher, self).init()
self.dictionary = self.opts['dictionary']
self.ignore_case = self.opts.get('ignore_case', True)
def _apply(self, s, idxs=None):
candidates = [c for c in super(PrefixDictionaryMatcher, self)._apply(s,idxs)]
for idxs,label in candidates:
matched = False
words = unescape_penn_treebank([s.words[i] for i in idxs])
matched = reduce(lambda x,y:x or y,[t in self.dictionary for t in words])
'''
for i in range(len(idxs)):
for j in range(i+1,len(idxs)):
phrase = " ".join(unescape_penn_treebank(s.words[idxs[i]:idxs[j]+1]))
phrase = phrase.lower() if self.ignore_case else phrase
print phrase
if phrase in self.dictionary:
matched = True
#break
'''
if matched:
yield idxs, label
class HackDictionaryMatcher(RegexNgramMatch):
def init(self):
super(HackDictionaryMatcher, self).init()
self.dictionary = self.opts['dictionary']
self.ignore_case = self.opts.get('ignore_case', True)
def _apply(self, s, idxs=None):
candidates = [c for c in super(PrefixDictionaryMatcher, self)._apply(s,idxs)]
for idxs,label in candidates:
matched = False
words = unescape_penn_treebank([s.words[i] for i in idxs])
matched = reduce(lambda x,y:x or y,[t in self.dictionary for t in words])
s = " ".join(words)
if matched and (" in the " in s or " of the " in s):
yield idxs, label
'''
def _apply(self, s, idxs=None):
candidates = [c for c in super(HackDictionaryMatcher, self)._apply(s,idxs)]
for idxs,label in candidates:
matched = False
yield idxs, label
words = unescape_penn_treebank([s.words[i] for i in idxs])
matched = reduce(lambda x,y:x or y,[t in self.dictionary for t in words])
s = " ".join(words)
if re.search(" (of|in) the ", s) != None and matched:
yield idxs, label
'''
class NounPhraseMatcher(CandidateExtractor):
""" Match Noun Phrases, L/R stripped of DTs and CCs"""
def init(self):
self.pattern = re.compile("(NN[SP]*|JJ[S]*|DT|CC)")
self.label = self.opts['label']
self.match_attrib = self.opts.get('match_attrib', 'words')
self.ignore_case = self.opts.get('ignore_case', True)
self.longest_match = self.opts.get('longest_match', True)
def _apply(self, s, idxs=None):
noun_phrases = []
np = []
for i,tag in enumerate(s.poses):
if self.pattern.search(tag):
np += [(i,tag)]
elif np:
noun_phrases += [np]
np = []
for np in noun_phrases:
while np and re.search("(CC|[PW]*DT)",np[0][1]):
np.pop(0)
while np and re.search("(CC|[PW]*DT)",np[-1][1]):
np.pop()
if not np or re.search("JJ[RS]*",np[0][1]):
continue
idxs,poses = zip(*np)
yield list(idxs), 'x'
def load_bioportal_csv_dictionary(filename):
'''BioPortal Ontologies
http://bioportal.bioontology.org/'''
reader = csv.reader(open(filename,"rU"),delimiter=',', quotechar='"')
d = [line for line in reader]
dictionary = {}
for line in d[1:]:
row = dict(zip(d[0],line))
dictionary[row["Preferred Label"]] = 1
dictionary.update({t:1 for t in row["Synonyms"].split("|")})
return dictionary
def load_disease_dictionary(rootdir):
# UMLS SemGroup Disorders
#dictfile = "dicts/umls_disorders.bz2"
#dictfile = "dicts/umls_disorders_snomed_msh_mth.bz2"
dictfile = "dicts/umls_disorders_v2.bz2"
diseases = {line.strip().split("\t")[0]:1 for line in bz2.BZ2File(dictfile, 'rb').readlines()}
diseases = {word:1 for word in diseases if not word.isupper()}
# Orphanet Rare Disease Ontology
ordo = load_bioportal_csv_dictionary("dicts/ordo.csv")
ordo = {word:1 for word in ordo if not word.isupper()}
diseases.update(ordo)
# Human Disease Ontology
doid = load_bioportal_csv_dictionary("dicts/DOID.csv")
doid = {word:1 for word in doid if not word.isupper()}
diseases.update(doid)
# ------------------------------------------------------------
# remove cell dysfunction terms
dictfile = "dicts/cell_molecular_dysfunction.txt"
terms = [line.strip().split("\t")[0] for line in open(dictfile).readlines()]
diseases = {word:1 for word in diseases if word not in terms}
dictfile = "dicts/umls_geographic_areas.txt"
terms = [line.strip().split("\t")[0] for line in open(dictfile).readlines()]
diseases = {word:1 for word in diseases if word not in terms}
# ------------------------------------------------------------
# NCBI training set vocabulary
dictfile = "dicts/ncbi_training_diseases.txt"
terms = [line.strip().split("\t")[0] for line in open(dictfile).readlines()]
terms = {word:1 for word in terms if not word.isupper()}
diseases.update(terms)
# remove stopwords
dictfile = "dicts/stopwords.txt".format(rootdir)
stopwords = [line.strip().split("\t")[0] for line in open(dictfile).readlines()]
diseases = {word:1 for word in diseases if word.lower() not in stopwords}
return diseases
def load_acronym_dictionary(rootdir):
#dictfile = "dicts/umls_disorders.bz2"
#dictfile = "dicts/umls_disorders_snomed_msh_mth.bz2" # candidate recall: 74.59% (587/787)
dictfile = "dicts/umls_disorders_v2.bz2"
diseases = {line.strip().split("\t")[0]:1 for line in bz2.BZ2File(dictfile, 'rb').readlines()}
diseases = {word:1 for word in diseases if word.isupper()}
# Orphanet Rare Disease Ontology
ordo = load_bioportal_csv_dictionary("dicts/ordo.csv")
ordo = {word:1 for word in ordo if word.isupper()}
diseases.update(ordo)
# Human Disease Ontology
doid = load_bioportal_csv_dictionary("dicts/DOID.csv")
doid = {word:1 for word in doid if word.isupper()}
diseases.update(doid)
dictfile = "dicts/ncbi_training_diseases.txt".format(rootdir)
terms = [line.strip().split("\t")[0] for line in open(dictfile).readlines()]
terms = {word:1 for word in terms if word.isupper()}
diseases.update(terms)
# filter by char length
diseases = {word:1 for word in diseases if len(word) > 1}
return diseases
def create_corpus_dict(corpus, setdef="training"):
'''Create dictionary using annotated corpus data'''
dev_set = list(itertools.chain.from_iterable([corpus.cv[setdef].keys() for setdef in [setdef]]))
documents = [(doc_id,corpus[doc_id]["sentences"],corpus[doc_id]["tags"]) for doc_id in dev_set]
print len(dev_set),len(corpus.documents)
d = {}
for pmid,doc,labels in documents:
for i in range(0,len(doc)):
for tag in labels[i]:
mention = doc[i].words[tag[-1][0]:tag[-1][1]]
v1 = "".join(unescape_penn_treebank(mention))
v2 = tag[0].replace(" ","")
if v1 != v2:
# problem with tokenization
#print " ".join(unescape_penn_treebank(mention)), tag
pass
else:
d[" ".join(mention)] = 1
return d
ROOT = "/Users/fries/dd-bio-examples/"
INDIR = "/Users/fries/Desktop/ncbi/"
OUTDIR = "/Users/fries/Desktop/ncbi/candidates/"
# --------------------------------------------
# Load/Parse Corpus
# --------------------------------------------
cache = "{}/cache/".format(INDIR)
infile = "{}/corpora/".format(INDIR)
holdouts = ["training","development","testing"]
#parser = SentenceParser()
corpus = NcbiDiseaseCorpus(infile, parser=None, cache_path=cache)
'''
for doc_id in ['10923035']:# corpus.cv["training"]:
doc = corpus[doc_id]
print doc_id
for sentence in doc["sentences"]:
print sentence.parse
print "--------"
sys.exit()
'''
#oracle = create_corpus_dict(corpus,"development")
#oracle.update(create_corpus_dict(corpus,"testing"))
#dev_set = list(itertools.chain.from_iterable([corpus.cv[setdef].keys() for setdef in holdouts]))
#documents, gold_entities = zip(*[(corpus[doc_id]["sentences"],corpus[doc_id]["tags"]) for doc_id in dev_set])
dev_set = list(itertools.chain.from_iterable([corpus.cv[setdef].keys() for setdef in holdouts]))
documents, gold_entities = zip(*[(corpus[doc_id]["sentences"],corpus[doc_id]["tags"]) for doc_id in dev_set])
'''
# summary statistics
num_gold_entities = sum([len(s) for s in list(itertools.chain.from_iterable(gold_entities))])
num_tokens = sum([len(sent.words) for sent in list(itertools.chain.from_iterable(documents))])
print("%d PubMed abstracts" % len(documents))
print("%d Disease gold entities" % num_gold_entities)
print("%d tokens" % num_tokens)
'''
# --------------------------------------------
# Match Candidates
# --------------------------------------------
longest_match = True
'''
diseases = load_disease_dictionary(ROOT)
acronyms = load_acronym_dictionary(ROOT)
matcher_d = DictionaryMatch(label='disease', dictionary=diseases, ignore_case=True, longest_match=longest_match)
matcher_a = DictionaryMatch(label='disease_acronyms', dictionary=acronyms, ignore_case=False, longest_match=longest_match)
#matcher_oracle = DictionaryMatch(label='disease_oracle', dictionary=oracle, ignore_case=False, longest_match=longest_match)
pattern = re.compile("((NN[SP]*|JJ)\s*){2,}")
disease_terms = diseases.keys()
disease_terms = {t:1 for t in disease_terms if len(t.split()) == 1}
matcher_jj = PrefixDictionaryMatcher(label="D",match_attrib="poses",
dictionary=disease_terms, longest_match=longest_match,
regex_pattern=pattern,ignore_case=False)
'''
'''
pattern = re.compile("((NN[SP]*|JJ)\s*){2,}")
matcher_np = RegexNgramMatch(label="D",match_attrib="poses",
longest_match=longest_match,
regex_pattern=pattern,ignore_case=False)
'''
'''
pattern = re.compile("((NN[SP]*|JJ[S]*|DT|CC)\s*){2,}")
matcher_np = RegexNgramMatch(label="D",match_attrib="poses",
longest_match=longest_match,
regex_pattern=pattern,ignore_case=False)
'''
matcher_np = NounPhraseMatcher(label='NP') #matcher_np #Union(matcher_a, matcher_d, matcher_oracle)
print "matcher"
matcher = matcher_np
#matcher = Union(matcher_a,matcher_d,matcher_np)
gold_labels = []
scores = {"num_candidates":0, "num_cand_tokens":0,"class_balance":{}}
for cv_set in ["training"]:# holdouts:
scores["class_balance"][cv_set] = {}
sentences = [corpus[doc_id]["sentences"] for doc_id in corpus.cv[cv_set]]
sentences = list(itertools.chain.from_iterable(sentences))
candidates = Entities(sentences, matcher)
gold_labels = corpus.gold_labels(candidates)
pred = [1] * len(candidates)
scores[cv_set] = corpus.score(candidates, pred)
scores["num_candidates"] += len(candidates)
scores["num_cand_tokens"] += sum([len(c.idxs) for c in candidates])
tp,fp,fn = corpus.classification_errors(candidates, pred)
scores["class_balance"][cv_set]["T"] = len(tp)
scores["class_balance"][cv_set]["F"] = len(candidates) - len(tp)
# training (1650+1351)/4993 60 %
# dev (231+328)/773 72 %
# testing (286+237)/943 55 %
'''
for item in fn:
doc_id,sent_id,idxs,char_span,txt = item
print "FN"
print [corpus[doc_id]['sentences'][sent_id].words[i] for i in idxs]
print [corpus[doc_id]['sentences'][sent_id].poses[i] for i in idxs]
print
'''
np.save("{}/{}-ncbi-diseases-gold.npy".format(OUTDIR,cv_set), gold_labels)
candidates.dump_candidates("{}/{}-ncbi-candidates.pkl".format(OUTDIR,cv_set))
partial,complete = corpus.error_analysis(candidates, pred, doc_ids=corpus.cv[cv_set])
print cv_set, len(tp),len(fp),len(fn)
print len(partial)
print len(complete)
for item in complete:
uid,sent_id,idxs,char_span,text = item
s = corpus[uid]["sentences"][sent_id]
print text
print s.text
print s.poses
print
print "--------------------"
num_tokens = 1
# candidate recall
print("Found %d candidate entities (%.2f%% of all tokens)" % (scores["num_candidates"],
scores["num_cand_tokens"]/float(num_tokens)*100))
for cv_set in ["training","development","testing"]:
print "Positives:{} Negatives:{}".format(scores["class_balance"][cv_set]["T"],scores["class_balance"][cv_set]["F"])
print("[{0}] candidate recall: {1:0.2f}% ({2}/{3})".format(cv_set.upper(),scores[cv_set]["recall"]*100,
scores[cv_set]["tp"],
scores[cv_set]["tp"]+ scores[cv_set]["fn"]))
|
ddbiolib-master
|
demos/taggers/diseases/old/disease_extraction_trees.py
|
import bz2
import sys
import csv
import re
import os
import numpy as np
import itertools
import cPickle
import ddlite
from ddlite import SentenceParser,Entities
from ddlite import Union, DictionaryMatch, RegexNgramMatch
from utils import unescape_penn_treebank
from datasets import PubMedAbstractCorpus
def load_bioportal_csv_dictionary(filename):
'''BioPortal Ontologies
http://bioportal.bioontology.org/'''
reader = csv.reader(open(filename,"rU"),delimiter=',', quotechar='"')
d = [line for line in reader]
dictionary = {}
for line in d[1:]:
row = dict(zip(d[0],line))
dictionary[row["Preferred Label"]] = 1
dictionary.update({t:1 for t in row["Synonyms"].split("|")})
return dictionary
def load_disease_dictionary():
# UMLS SemGroup Disorders
#dictfile = "dicts/umls_disorders.bz2"
#dictfile = "dicts/umls_disorders_snomed_msh_mth.bz2"
dictfile = "dicts/umls_disorders_v2.bz2"
diseases = {line.strip().split("\t")[0]:1 for line in bz2.BZ2File(dictfile, 'rb').readlines()}
diseases = {word:1 for word in diseases if not word.isupper()}
# Orphanet Rare Disease Ontology
ordo = load_bioportal_csv_dictionary("dicts/ordo.csv")
ordo = {word:1 for word in ordo if not word.isupper()}
diseases.update(ordo)
# Human Disease Ontology
doid = load_bioportal_csv_dictionary("dicts/DOID.csv")
doid = {word:1 for word in doid if not word.isupper()}
diseases.update(doid)
# ------------------------------------------------------------
# remove cell dysfunction terms
dictfile = "dicts/cell_molecular_dysfunction.txt"
terms = [line.strip().split("\t")[0] for line in open(dictfile).readlines()]
diseases = {word:1 for word in diseases if word not in terms}
dictfile = "dicts/umls_geographic_areas.txt"
terms = [line.strip().split("\t")[0] for line in open(dictfile).readlines()]
diseases = {word:1 for word in diseases if word not in terms}
# ------------------------------------------------------------
# NCBI training set vocabulary
dictfile = "dicts/ncbi_training_diseases.txt"
terms = [line.strip().split("\t")[0] for line in open(dictfile).readlines()]
terms = {word:1 for word in terms if not word.isupper()}
diseases.update(terms)
# remove stopwords
dictfile = "dicts/stopwords.txt"
stopwords = [line.strip().split("\t")[0] for line in open(dictfile).readlines()]
diseases = {word:1 for word in diseases if word.lower() not in stopwords}
return diseases
def load_acronym_dictionary():
#dictfile = "dicts/umls_disorders.bz2"
#dictfile = "dicts/umls_disorders_snomed_msh_mth.bz2" # candidate recall: 74.59% (587/787)
dictfile = "dicts/umls_disorders_v2.bz2"
diseases = {line.strip().split("\t")[0]:1 for line in bz2.BZ2File(dictfile, 'rb').readlines()}
diseases = {word:1 for word in diseases if word.isupper()}
# Orphanet Rare Disease Ontology
ordo = load_bioportal_csv_dictionary("dicts/ordo.csv")
ordo = {word:1 for word in ordo if word.isupper()}
diseases.update(ordo)
# Human Disease Ontology
doid = load_bioportal_csv_dictionary("dicts/DOID.csv")
doid = {word:1 for word in doid if word.isupper()}
diseases.update(doid)
dictfile = "dicts/ncbi_training_diseases.txt"
terms = [line.strip().split("\t")[0] for line in open(dictfile).readlines()]
terms = {word:1 for word in terms if word.isupper()}
diseases.update(terms)
# filter by char length
diseases = {word:1 for word in diseases if len(word) > 1}
return diseases
def create_corpus_dict(corpus, setdef="training"):
'''Create dictionary using annotated corpus data'''
dev_set = list(itertools.chain.from_iterable([corpus.cv[setdef].keys() for setdef in [setdef]]))
documents = [(doc_id,corpus[doc_id]["sentences"],corpus[doc_id]["tags"]) for doc_id in dev_set]
print len(dev_set),len(corpus.documents)
d = {}
for pmid,doc,labels in documents:
for i in range(0,len(doc)):
for tag in labels[i]:
mention = doc[i].words[tag[-1][0]:tag[-1][1]]
v1 = "".join(unescape_penn_treebank(mention))
v2 = tag[0].replace(" ","")
if v1 != v2:
# problem with tokenization
#print " ".join(unescape_penn_treebank(mention)), tag
pass
else:
d[" ".join(mention)] = 1
return d
# --------------------------------------------
# Load/Parse Corpus
# --------------------------------------------
OUTDIR = "/Users/fries/Desktop/pubmed_abstracts/"
cache = "/Users/fries/Desktop/pubmed_abstract_cache/"
infile = "../../../datasets/chemdner_corpus/silver.abstracts.txt"
parser = SentenceParser()
corpus = PubMedAbstractCorpus(infile, parser, cache_path=cache)
# --------------------------------------------
# Match Candidates
# --------------------------------------------
diseases = load_disease_dictionary()
acronyms = load_acronym_dictionary()
matcher_d = DictionaryMatch(label='D', dictionary=diseases, ignore_case=True)
matcher_a = DictionaryMatch(label='D', dictionary=acronyms, ignore_case=False)
matcher = Union(matcher_a, matcher_d)
#pattern = re.compile("((JJ|VBN)\s)+(NN[SP]*\s*)+")
#pattern = re.compile("((NN[SP]*|JJ)\s*){2,}")
#matcher_tags = RegexNgramMatch(label="D",match_attrib="poses",regex_pattern=pattern,ignore_case=False)
matcher = Union(matcher_a, matcher_d)#, matcher_tags)
N = 1000
sentences = [doc["sentences"] for i,doc in enumerate(corpus) if i < N]
sentences = list(itertools.chain.from_iterable(sentences))
candidates = Entities(sentences, matcher)
candidates.dump_candidates("{}/v4/pubmed-candidates-sample-{}.pkl".format(OUTDIR,N))
# candidate recall
print("Found {} candidate entities".format(len(candidates)))
|
ddbiolib-master
|
demos/taggers/diseases/old/disease_extraction_pmc.py
|
import bz2
import sys
import csv
import re
import codecs
import numpy as np
import itertools
import cPickle
import ddlite
from ddlite import SentenceParser,Entities
from ddlite import Union, DictionaryMatch, RegexNgramMatch, CandidateExtractor
from utils import unescape_penn_treebank
from datasets import NcbiDiseaseCorpus
class PrefixDictionaryMatcher(RegexNgramMatch):
def init(self):
super(PrefixDictionaryMatcher, self).init()
self.dictionary = self.opts['dictionary']
self.ignore_case = self.opts.get('ignore_case', True)
def _apply(self, s, idxs=None):
candidates = [c for c in super(PrefixDictionaryMatcher, self)._apply(s,idxs)]
for idxs,label in candidates:
matched = False
words = unescape_penn_treebank([s.words[i] for i in idxs])
matched = reduce(lambda x,y:x or y,[t in self.dictionary for t in words])
'''
for i in range(len(idxs)):
for j in range(i+1,len(idxs)):
phrase = " ".join(unescape_penn_treebank(s.words[idxs[i]:idxs[j]+1]))
phrase = phrase.lower() if self.ignore_case else phrase
print phrase
if phrase in self.dictionary:
matched = True
#break
'''
if matched:
yield idxs, label
class HackDictionaryMatcher(RegexNgramMatch):
def init(self):
super(HackDictionaryMatcher, self).init()
self.dictionary = self.opts['dictionary']
self.ignore_case = self.opts.get('ignore_case', True)
def _apply(self, s, idxs=None):
candidates = [c for c in super(PrefixDictionaryMatcher, self)._apply(s,idxs)]
for idxs,label in candidates:
matched = False
words = unescape_penn_treebank([s.words[i] for i in idxs])
matched = reduce(lambda x,y:x or y,[t in self.dictionary for t in words])
s = " ".join(words)
if matched and (" in the " in s or " of the " in s):
yield idxs, label
'''
def _apply(self, s, idxs=None):
candidates = [c for c in super(HackDictionaryMatcher, self)._apply(s,idxs)]
for idxs,label in candidates:
matched = False
yield idxs, label
words = unescape_penn_treebank([s.words[i] for i in idxs])
matched = reduce(lambda x,y:x or y,[t in self.dictionary for t in words])
s = " ".join(words)
if re.search(" (of|in) the ", s) != None and matched:
yield idxs, label
'''
def load_bioportal_csv_dictionary(filename):
'''BioPortal Ontologies
http://bioportal.bioontology.org/'''
reader = csv.reader(open(filename,"rU"),delimiter=',', quotechar='"')
d = [line for line in reader]
dictionary = {}
for line in d[1:]:
row = dict(zip(d[0],line))
dictionary[row["Preferred Label"]] = 1
dictionary.update({t:1 for t in row["Synonyms"].split("|")})
return dictionary
def load_disease_dictionary(rootdir):
# UMLS SemGroup Disorders
#dictfile = "dicts/umls_disorders.bz2"
#dictfile = "dicts/umls_disorders_snomed_msh_mth.bz2"
dictfile = "dicts/umls_disorders_v2.bz2"
diseases = {line.strip().split("\t")[0]:1 for line in bz2.BZ2File(dictfile, 'rb').readlines()}
diseases = {word:1 for word in diseases if not word.isupper()}
# Orphanet Rare Disease Ontology
ordo = load_bioportal_csv_dictionary("dicts/ordo.csv")
ordo = {word:1 for word in ordo if not word.isupper()}
diseases.update(ordo)
# Human Disease Ontology
doid = load_bioportal_csv_dictionary("dicts/DOID.csv")
doid = {word:1 for word in doid if not word.isupper()}
diseases.update(doid)
# ------------------------------------------------------------
# remove cell dysfunction terms
dictfile = "dicts/cell_molecular_dysfunction.txt"
terms = [line.strip().split("\t")[0] for line in open(dictfile).readlines()]
diseases = {word:1 for word in diseases if word not in terms}
dictfile = "dicts/umls_geographic_areas.txt"
terms = [line.strip().split("\t")[0] for line in open(dictfile).readlines()]
diseases = {word:1 for word in diseases if word not in terms}
# ------------------------------------------------------------
# NCBI training set vocabulary
dictfile = "dicts/ncbi_training_diseases.txt"
terms = [line.strip().split("\t")[0] for line in open(dictfile).readlines()]
terms = {word:1 for word in terms if not word.isupper()}
diseases.update(terms)
# remove stopwords
dictfile = "dicts/stopwords.txt".format(rootdir)
stopwords = [line.strip().split("\t")[0] for line in open(dictfile).readlines()]
diseases = {word:1 for word in diseases if word.lower() not in stopwords}
return diseases
def load_acronym_dictionary(rootdir):
#dictfile = "dicts/umls_disorders.bz2"
#dictfile = "dicts/umls_disorders_snomed_msh_mth.bz2" # candidate recall: 74.59% (587/787)
dictfile = "dicts/umls_disorders_v2.bz2"
diseases = {line.strip().split("\t")[0]:1 for line in bz2.BZ2File(dictfile, 'rb').readlines()}
diseases = {word:1 for word in diseases if word.isupper()}
# Orphanet Rare Disease Ontology
ordo = load_bioportal_csv_dictionary("dicts/ordo.csv")
ordo = {word:1 for word in ordo if word.isupper()}
diseases.update(ordo)
# Human Disease Ontology
doid = load_bioportal_csv_dictionary("dicts/DOID.csv")
doid = {word:1 for word in doid if word.isupper()}
diseases.update(doid)
dictfile = "dicts/ncbi_training_diseases.txt".format(rootdir)
terms = [line.strip().split("\t")[0] for line in open(dictfile).readlines()]
terms = {word:1 for word in terms if word.isupper()}
diseases.update(terms)
# filter by char length
diseases = {word:1 for word in diseases if len(word) > 1}
return diseases
def create_corpus_dict(corpus, setdef="training"):
'''Create dictionary using annotated corpus data'''
dev_set = list(itertools.chain.from_iterable([corpus.cv[setdef].keys() for setdef in [setdef]]))
documents = [(doc_id,corpus[doc_id]["sentences"],corpus[doc_id]["tags"]) for doc_id in dev_set]
print len(dev_set),len(corpus.documents)
d = {}
for pmid,doc,labels in documents:
for i in range(0,len(doc)):
for tag in labels[i]:
mention = doc[i].words[tag[-1][0]:tag[-1][1]]
v1 = "".join(unescape_penn_treebank(mention))
v2 = tag[0].replace(" ","")
if v1 != v2:
# problem with tokenization
#print " ".join(unescape_penn_treebank(mention)), tag
pass
else:
d[" ".join(mention)] = 1
return d
ROOT = "/Users/fries/dd-bio-examples/"
INDIR = "/Users/fries/Desktop/dnorm/"
#OUTDIR = "/users/fries/desktop/dnorm/"
OUTDIR = "/Users/fries/Desktop/dnorm/candidates/v4-oracle/"
# --------------------------------------------
# Load/Parse Corpus
# --------------------------------------------
cache = "{}/cache3/".format(INDIR)
infile = "{}/disease_names/".format(INDIR)
holdouts = ["training","development","testing"]
parser = SentenceParser()
corpus = NcbiDiseaseCorpus(infile, parser, cache_path=cache)
'''
d = create_corpus_dict(corpus,"development")
fname = "dicts/ncbi_development_diseases.txt"
with codecs.open(fname,"w",'utf-8') as f:
f.write("\n".join(d.keys()))
d = create_corpus_dict(corpus,"testing")
fname = "dicts/ncbi_testing_diseases.txt"
with codecs.open(fname,"w",'utf-8') as f:
f.write("\n".join(d.keys()))
sys.exit()
'''
oracle = create_corpus_dict(corpus,"development")
oracle.update(create_corpus_dict(corpus,"testing"))
#dev_set = list(itertools.chain.from_iterable([corpus.cv[setdef].keys() for setdef in holdouts]))
#documents, gold_entities = zip(*[(corpus[doc_id]["sentences"],corpus[doc_id]["tags"]) for doc_id in dev_set])
dev_set = list(itertools.chain.from_iterable([corpus.cv[setdef].keys() for setdef in holdouts]))
documents, gold_entities = zip(*[(corpus[doc_id]["sentences"],corpus[doc_id]["tags"]) for doc_id in dev_set])
# summary statistics
num_gold_entities = sum([len(s) for s in list(itertools.chain.from_iterable(gold_entities))])
num_tokens = sum([len(sent.words) for sent in list(itertools.chain.from_iterable(documents))])
print("%d PubMed abstracts" % len(documents))
print("%d Disease gold entities" % num_gold_entities)
print("%d tokens" % num_tokens)
# --------------------------------------------
# Match Candidates
# --------------------------------------------
diseases = load_disease_dictionary(ROOT)
acronyms = load_acronym_dictionary(ROOT)
longest_match = True
matcher_d = DictionaryMatch(label='disease', dictionary=diseases, ignore_case=True, longest_match=longest_match)
matcher_a = DictionaryMatch(label='disease_acronyms', dictionary=acronyms, ignore_case=False, longest_match=longest_match)
matcher_oracle = DictionaryMatch(label='disease_oracle', dictionary=oracle, ignore_case=False, longest_match=longest_match)
pattern = re.compile("((NN[SP]*|JJ)\s*){2,}")
disease_terms = diseases.keys()
disease_terms = {t:1 for t in disease_terms if len(t.split()) == 1}
matcher_jj = PrefixDictionaryMatcher(label="D",match_attrib="poses",
dictionary=disease_terms, longest_match=longest_match,
regex_pattern=pattern,ignore_case=False)
pattern = re.compile("((NN[SP]*|JJ)\s*){2,}")
matcher_np = RegexNgramMatch(label="D",match_attrib="poses",
longest_match=longest_match,
regex_pattern=pattern,ignore_case=False)
matcher = Union(matcher_a, matcher_d, matcher_oracle)
gold_labels = []
scores = {"num_candidates":0, "num_cand_tokens":0,"class_balance":{}}
for cv_set in holdouts:
scores["class_balance"][cv_set] = {}
sentences = [corpus[doc_id]["sentences"] for doc_id in corpus.cv[cv_set]]
sentences = list(itertools.chain.from_iterable(sentences))
candidates = Entities(sentences, matcher)
gold_labels = corpus.gold_labels(candidates)
pred = [1] * len(candidates)
scores[cv_set] = corpus.score(candidates, pred)
scores["num_candidates"] += len(candidates)
scores["num_cand_tokens"] += sum([len(c.idxs) for c in candidates])
tp,fp,fn = corpus.classification_errors(candidates, pred)
scores["class_balance"][cv_set]["T"] = len(tp)
scores["class_balance"][cv_set]["F"] = len(candidates) - len(tp)
for item in fn:
doc_id,sent_id,idxs,char_span,txt = item
print "FN"
print [corpus[doc_id]['sentences'][sent_id].words[i] for i in idxs]
print [corpus[doc_id]['sentences'][sent_id].poses[i] for i in idxs]
print
np.save("{}/{}-ncbi-diseases-gold.npy".format(OUTDIR,cv_set), gold_labels)
candidates.dump_candidates("{}/{}-ncbi-candidates.pkl".format(OUTDIR,cv_set))
# candidate recall
print("Found %d candidate entities (%.2f%% of all tokens)" % (scores["num_candidates"],
scores["num_cand_tokens"]/float(num_tokens)*100))
for cv_set in ["training","development","testing"]:
print "Positives:{} Negatives:{}".format(scores["class_balance"][cv_set]["T"],scores["class_balance"][cv_set]["F"])
print("[{0}] candidate recall: {1:0.2f}% ({2}/{3})".format(cv_set.upper(),scores[cv_set]["recall"]*100,
scores[cv_set]["tp"],
scores[cv_set]["tp"]+ scores[cv_set]["fn"]))
|
ddbiolib-master
|
demos/taggers/diseases/old/disease_extraction.py
|
import bz2
import sys
import cPickle
import numpy as np
import itertools
from ddlite import SentenceParser,DictionaryMatch,Entities,CandidateModel
from utils import unescape_penn_treebank
from datasets import NcbiDiseaseCorpus
from sklearn.metrics import precision_score,recall_score
def find_duplicates(candidates):
for i in range(0,len(candidates)):
for j in range(i+1,len(candidates)):
a = candidates[i]
b = candidates[j]
if a.idxs==b.idxs and a.doc_id==b.doc_id and a.sent_id==b.sent_id:
print a.doc_id, a.sent_id, a.mention()
print b.doc_id, b.sent_id, b.mention()
print
def corpus_mention_summary(corpus):
'''The raw corpus doesn't match the statistics provided at
http://www.ncbi.nlm.nih.gov/CBBresearch/Dogan/DISEASE/corpus.html
they report: 5148/791/961 we get 5145/787/960. The original stats
are either from a different version of the corpus or wrong. Note errors:
1) duplicate document in training data: PMID 8528200 (11 labels)
'''
# verify number of annotations
for holdout in corpus.cv:
doc_ids = corpus.cv[holdout]
num_mentions = sum([len(corpus.annotations[doc_id]) for doc_id in doc_ids])
print holdout,num_mentions, len(doc_ids)
ROOT = "../../../"
OUTDIR = "/users/fries/desktop/dnorm/"
#CANDIDATE_DIR = "/Users/fries/debug/dd-bio-examples/candidates/jason/diseases/v4-oracle/"
CANDIDATE_DIR = "/Users/fries/debug/dd-bio-examples/candidates/jason/diseases/v4-subsequences/"
cache = "/Users/fries/Desktop/dnorm/cache3/"
infile = "/Users/fries/Desktop/dnorm/disease_names/"
parser = None# SentenceParser()
corpus = NcbiDiseaseCorpus(infile, parser, cache_path=cache)
num_training = len(Entities("{}{}-ncbi-candidates.pkl".format(CANDIDATE_DIR,"training")))
num_developent = len(Entities("{}{}-ncbi-candidates.pkl".format(CANDIDATE_DIR,"development")))
num_testing = len(Entities("{}{}-ncbi-candidates.pkl".format(CANDIDATE_DIR,"testing")))
print num_training, num_developent
#candidates/jason/diseases/{}/training-ncbi-candidates.pkl
'''
# -------------------------------------------------------
# candidate recall
print "CANDIDATE RECALL"
prediction = len(candidates) * [1]
scores = corpus.score(candidates,prediction,holdout)
print scores
# -------------------------------------------------------
'''
cv = "development"
candidates = Entities("{}{}-ncbi-candidates.pkl".format(CANDIDATE_DIR,cv))
holdout = corpus.cv[cv].keys()
prediction = np.load("/users/fries/desktop/ncbi-dev-predictions.npy")
probability = np.load("/users/fries/desktop/ncbi-dev-probability.npy")
#prediction = np.load("/users/fries/desktop/ncbi-test-predictions.npy")
prediction = prediction[num_training:]
#prediction = prediction[num_training+num_developent:]
gold_labels = corpus.gold_labels(candidates)
#prediction = [1] * len(candidates)
#print len(candidates._candidates), len(prediction), len(gold_labels)
'''
# sklearn santity check (should match ddlite scores)
# -------------------------------------------------------
gold_labels = [1 if x==1 else 0 for x in gold_labels]
prediction = [1 if x==1 else 0 for x in prediction]
print "ddlite precision:", precision_score(gold_labels, prediction)
print "ddlite recall: ", recall_score(gold_labels, prediction)
# -------------------------------------------------------
'''
scores = corpus.score(candidates,prediction,holdout)
print scores
#corpus.error_analysis(candidates,prediction,holdout)
corpus.force_longest_match(candidates,probability,holdout)
scores = corpus.score(candidates,probability,holdout)
print scores
#774
#TP:637 FP:137 FN:150 True_N:787
|
ddbiolib-master
|
demos/taggers/diseases/old/disease_learning.py
|
import re
import os
import sys
import bz2
import csv
import codecs
import cPickle
import itertools
from operator import itemgetter
from itertools import groupby
sys.path.insert(1, "/users/fries/code/")
#import ddlite
from ddlite import *
rootdir = "/Users/fries/Code/HILDA-2016/candidates/"
#
# With training vocabulary
#
fname = "{}sen_v4/training-ncbi-candidates.pkl".format(rootdir)
#candidates = cPickle.load(open(fname,"rb"))
candidates = Entities(fname)
train_vocab = {}
for c in candidates:
term = " ".join(c.mention()).lower()
train_vocab[term] = train_vocab.get(term,0) + 1
#
# Without training vocabulary
#
fname = "{}sen_sans_training_v4/training-ncbi-candidates.pkl".format(rootdir)
#candidates = cPickle.load(open(fname,"rb"))
candidates = Entities(fname)
no_train_vocab = {}
for c in candidates:
term = " ".join(c.mention()).lower()
no_train_vocab[term] = train_vocab.get(term,0) + 1
missing = set(train_vocab.keys()).difference(set(no_train_vocab.keys()))
for item in missing:
print item
|
ddbiolib-master
|
demos/taggers/diseases/old/debug_dictionaries.py
|
'''
Given a parse and aligned gold annotations, export
CoNLL format files
'''
import sys
import codecs
import numpy as np
import cPickle
from ddlite import SentenceParser
from datasets import NcbiDiseaseCorpus
INDIR = "/Users/fries/Desktop/dnorm/"
cache = "{}/cache3/".format(INDIR)
infile = "{}/disease_names/".format(INDIR)
holdouts = ["training","development","testing"]
parser = None#SentenceParser()
corpus = NcbiDiseaseCorpus(infile, parser, cache_path=cache)
for cvset in holdouts:
doc_ids = corpus.cv[cvset].keys()
print len(doc_ids)
conll = corpus.conll(doc_ids)
outfile = "data/{}.conll.txt".format(cvset)
with codecs.open(outfile,"w","utf-8") as f:
f.write(conll)
|
ddbiolib-master
|
demos/taggers/diseases/old/export_conll.py
|
from nltk.tree import Tree
import re
s = '''
(ROOT
(S
(S
(NP (PRP It))
(VP (VBD was)
(UCP
(ADJP (JJ rare))
(, ,)
(CONJP (IN as) (RB not))
(VP (VBN observed)
(PP (IN in)
(NP (NNS controls)))))))
(, ,)
(CC but)
(S (RB not)
(VP (VBG segregating)
(PP (IN with)
(NP (DT the) (NN BFIC) (NN phenotype)))))
(. .)))
'''
def parse_tree(t):
#if t[0]=="(" and t[-1]==")":
m = re.search("(^\(.+)\s+(.+\)$)",t.strip(),re.DOTALL)
if m:
print t.replace(m.group(1),"")
#parse_tree(m.group(0))
#print ""
#print s
#parse_tree(s)
m = re.search("(^\(.+)\s+(.+\)$)",s.strip(),re.DOTALL)
print m.group(1)
#m = re.search("^\((.+)\)$",t,re.DOTALL)
|
ddbiolib-master
|
demos/taggers/diseases/old/tree_parse.py
|
'''
Created on Jun 17, 2016
@author: fries
'''
import bz2
import sys
import csv
import re
import os
import numpy as np
import itertools
import cPickle
import ddlite
from ddlite import SentenceParser,Entities
from ddlite import Union, DictionaryMatch, RegexNgramMatch
from utils import unescape_penn_treebank
from datasets import CdrCorpus
# --------------------------------------------
# Load/Parse Corpus
# --------------------------------------------
cache = "/Users/fries/Desktop/CDR_cache/"
infile = "/Users/fries/Desktop/CDR_Data/CDR.Corpus.v010516/"
parser = SentenceParser()
corpus = CdrCorpus(infile, parser, cache_path=cache)
for doc in corpus:
print doc["annotations"]
print "DONE"
|
ddbiolib-master
|
demos/taggers/diseases/old/naive_entity_linking.py
|
#!/usr/bin/env python
'''
<SAB>.<ACRONYMS>.<STY>.txt
snomedct.terms.enzyme.txt
snomedct.abbrv.enzyme.txt
'''
from __future__ import print_function
import os
import re
import sys
import codecs
import argparse
import ddbiolib.ontologies.umls as umls
def create_dictionaries(term_types,outdir,dict_type="",min_size=250):
metathesaurus = umls.Metathesaurus()
semantic_network = metathesaurus.semantic_network.graph("isa")
source_vocabs = metathesaurus.get_source_vocabulary_defs()
for sty in semantic_network.nodes():
for sab in source_vocabs:
sty_str = sty.lower().replace(" ","_")
dict_type_str = ".{}".format(dict_type) if dict_type else ""
outfname = "{}{}.{}{}.txt".format(args.outdir,sty_str,sab,dict_type_str)
if os.path.exists(outfname):
print("{} exists, skipping".format(outfname))
continue
d = metathesaurus.dictionary(sty, source_vocab=[sab], term_types=term_types,
include_children=False)
if len(d) < min_size:
print("SKIPPING",sty,sab,len(d))
continue
with codecs.open(outfname,"w","utf-8",errors="ignore") as f:
f.write("\n".join(d))
print(outfname)
def main(args):
# UMLS metathesaurus
#meta = umls.Metathesaurus()
#norm = umls.MetaNorm(function=lambda x:x.lower()) if args.normalize else umls.MetaNorm(function=lambda x:x)
#semantic_network = meta.semantic_network.graph("isa")
#source_vocabs = meta.get_source_vocabulary_defs()
min_size = 100
# acronyms, abbreviations
term_types = ['AA','AB','ACR']
create_dictionaries(term_types,args.outdir,dict_type="abbrv",min_size=min_size)
# all terms
#term_types = [tty for tty in umls.Metathesaurus().get_tty_list() if tty not in ['AA','AB','ACR']]
#create_dictionaries(term_types,args.outdir,min_size=min_size)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-t","--target", type=str, help="Target entity types, delimit list by |", default=None)
parser.add_argument("-s","--source_vocab", type=str,
help="limit to source vocabularies, delimit list by |", default=None)
parser.add_argument("-n","--normalize", type=bool,
help="make lowercase", default=False)
parser.add_argument("-o","--outdir", type=bool,
help="output directory", default=False)
parser.add_argument("-r","--exclude_subtrees", type=str,
help="List of subtree root nodes to remove from the target ontology, delimit list by |", default=None)
parser.add_argument("-e","--embeddings", type=str, help="word embeddings (default: none)", default=None)
parser.add_argument("-k","--knn", type=int, help="expand dictionary with k nearest neighbors (default: none)", default=None)
args = parser.parse_args()
main(args)
|
ddbiolib-master
|
demos/umls/noise_aware_dict.py
|
'''
UMLS Metathesaurus
Current Functionality
+ Simple Concept object (interface for definitions, term sets, etc.)
+ Build dictionaries of UMLS semantic types
+ Build positive examples of UMLS relations
There are many other tools that do similar things, though none have a
distant supervision focus (and there is a bias towards Perl-based tools)
UMLS::Interface http://search.cpan.org/~btmcinnes/UMLS-Interface-1.47/
UMLS::Similarity http://search.cpan.org/~btmcinnes/UMLS-Similarity-1.45/
This functionality should be eventually incorporated into bio-ddlib, esp.
the graph and path-based similarity measures.
TODO
+ implement similarity and relatedness measures from UMLS::Similarity
+ path-based feature generator
+ keyword expansion/similarity using word embeddings
@author Jason Alan Fries
'''
from __future__ import print_function
import sys
import ontologies.umls as umls
from ontologies.umls.graph import pprint_tree
meta = umls.Metathesaurus()
norm = umls.MetaNorm(function=lambda x:x.lower())
#
# UMLS Concepts and Relations
#
# UMLS Semantic Network
taxonomy = meta.semantic_network.graph(relation="isa")
root_nodes = ([node for node in taxonomy if not taxonomy.predecessors(node)])
for node in root_nodes:
pprint_tree(taxonomy,node)
# UMLS semantic groups
print( meta.semantic_network.groups["Anatomy"] )
# UMLS normalizes concepts with a CUI (Concept Unique Identifier)
# e.g., C0002645 is 'Amoxycillin'
concept = meta.concept(cui='C3539739')
concept.print_summary()
# List all relations on a specific concept
relations = meta.relations_on_cui("C0002645",source_vocab=["SNOMEDCT_US"])
rel = list(set([rela[2] for rela in relations]))
print(rel)
# Search for concepts (CUIs) by string. This is a noisy match. We don't perform
# any disambiguation so CUIs are not guaranteed to actually be related.
matches = meta.match_concepts("Cu", match_substring=False)
print("Found %d string->concept matches" % len(matches))
for cui in matches:
concept = meta.concept(cui=cui)
concept.print_summary()
#
# Building Dictionaries and Relation Tuples
#
'''
# List all semantic types, relations, and source vocabularies
print(meta.get_source_vocabulary_defs())
print(sorted(meta.get_semtypes_list()))
print(sorted(meta.get_relations_list()))
'''
'''
# Build dictionaries for a given semantic type (i.e., entity)
d = meta.dictionary("Disease or Syndrome")
d = map(norm.normalize,d)
for term in sorted(d,key=lambda x:len(x.split()),reverse=1):
print(term)
'''
# Generate relation examples from given semantic types. This is
# highly dependent on the choice of source vocabulary.
attribute ="may_prevent"
relations = meta.relations("Substance","Biologic Function",attribute)
print("Found %d distinct relation pairs" % len(relations))
# Print concept terms (this can be expanded by using concept.all_terms()
# rather than just the preferred term).
for cui1,cui2 in relations:
terms_cui1 = map(norm.normalize, meta.concept(cui1).all_terms())
terms_cui2 = map(norm.normalize, meta.concept(cui2).all_terms())
terms_cui1 = "|".join(terms_cui1)
terms_cui2 = "|".join(terms_cui2)
row= [cui1, terms_cui1, attribute, cui2, terms_cui2 ]
print("\t".join(row).encode("utf-8",errors="ignore"))
|
ddbiolib-master
|
demos/umls/umls_demo.py
|
'''
Simple demonstration of instantiating a concept graph and
computing some concept similarity measures
'''
from __future__ import print_function
import networkx as nx
from ddbiolib.ontologies.umls import Metathesaurus
from ddbiolib.ontologies.umls.config import DatabaseConfig
def pprint_path(path, ontology):
"""Print UMLS CUI paths using preferred terms"""
terms = []
for cui in path:
terms += [ "%s (%s)" % (cui, ontology.concept(cui).preferred_term()[0]) ]
print("=>".join(terms))
# local database connection configuration
config = DatabaseConfig(host="127.0.0.1",username="umls",
dbname="2014AB", password="")
meta = Metathesaurus(config)
# some example unique identifiers for UMLS concepts
cui1 = "C0016129" # finger
cui2 = "C0446516" # arm
cui1 = "C0028643" # numbness
cui2 = "C0030193" # pain
cui2 = "C0085624" # burning
c1 = meta.concept(cui="C0016129") # Finger
c1.print_summary()
c2 = meta.concept(cui="C0446516") # Arm
c2.print_summary()
# build CUI-level concept graph using MeSH (Medical Subject Headings)
cui_graph = meta.concept_graph(level="CUI",source_vocab=["MSH","RXNORM","SNOMEDCT-US"])
# shortest path connecting concepts
# Finger and Arm
path = nx.shortest_path(nx.Graph(cui_graph), c1.cui, c2.cui)
pprint_path(path,meta)
|
ddbiolib-master
|
demos/umls/kb_demo.py
|
#!/usr/bin/env python
'''
Simple UMLS Metathesaurus Dictionary Builder
Build dictionaries of UMLS semantic types.
See umls/docs for list of UMLS Semantic Types
Example usage:
python create_dictionary.py -t "Disease or Syndrome" -s "SNOMEDCT_US" > outfile.txt
'''
from __future__ import print_function
import re
import sys
import ontologies.umls as umls
import argparse
from sklearn.neighbors import *
from gensim.models.word2vec import Word2Vec
def term_expansion(fpath, terms, knn):
'''Expand term list by creating list of nearest neighbors in provided embeddings
representation. This is usually very noisy and there is a fuzzy distinction between
semantic similarity and "relatedness". Bacteria names, for example, often neighbor
diseases caused by those organisms.
'''
model = Word2Vec.load(fpath)
model.init_sims()
nbrs = NearestNeighbors(n_neighbors=knn+1, algorithm='ball_tree', metric='l2')
nbrs.fit(model.syn0norm)
expansion = []
for phrase in terms:
# space replaced with underscore in PMC/PubMed embeddings
phrase = phrase.replace(" ","_")
if phrase not in model.vocab:
continue
idx = model.vocab[phrase].index
vec = model.syn0norm[idx]
_,indices = nbrs.kneighbors(vec)
neighbors = [model.index2word[j] for j in indices.flatten()]
neighbors.remove(phrase)
expansion += neighbors
# transform words back to whitespace separators
return map(lambda x:x.replace("_"," "), expansion)
def main(args):
meta = umls.Metathesaurus()
norm = umls.MetaNorm(function=lambda x:x.lower()) if args.normalize else umls.MetaNorm(function=lambda x:x)
# Build dictionaries for a given a set of semantic types (i.e., entities)
dictionary = []
for sty in args.target:
d = meta.dictionary(sty, source_vocab=args.source_vocab, exclude_subtrees=args.exclude_subtrees)
dictionary += map(norm.normalize,d)
dictionary = {t:1 for t in dictionary}
print(len(dictionary), file=sys.stderr)
# Use expanded
if args.embeddings:
terms = term_expansion(args.embeddings, dictionary, args.knn)
dictionary = {t:1 for t in terms if t not in dictionary and t.lower() not in dictionary}.keys()
# remove terms that are just digits
dictionary = [term for term in dictionary if not re.match("^(\d+[.]*\d*)|([;:\.!?\-\+]+)$",term)]
for term in sorted(dictionary,key=lambda x:len(x.split()),reverse=1):
print(term.encode("utf-8"))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-t","--target", type=str, help="Target entity types, delimit list by |", default=None)
parser.add_argument("-s","--source_vocab", type=str,
help="limit to source vocabularies, delimit list by |", default=None)
parser.add_argument("-n","--normalize", type=bool,
help="make lowercase", default=False)
parser.add_argument("-r","--exclude_subtrees", type=str,
help="List of subtree root nodes to remove from the target ontology, delimit list by |", default=None)
parser.add_argument("-e","--embeddings", type=str, help="word embeddings (default: none)", default=None)
parser.add_argument("-k","--knn", type=int, help="expand dictionary with k nearest neighbors (default: none)", default=None)
args = parser.parse_args()
if not args.target:
parser.print_help()
else:
args.target = args.target.split("|")
args.source_vocab = args.source_vocab.split("|") if args.source_vocab else []
args.exclude_subtrees = args.exclude_subtrees.split("|") if args.exclude_subtrees else []
main(args)
|
ddbiolib-master
|
demos/umls/create_dictionary.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys, os
import time
import re
mode="analyze"
m=3
pre_k=1
main_k=5
def run_infer(infer_out, k, model, quiet):
total_time, total_alarms = 0, 0
try:
if not os.path.isdir(infer_out):
print(f' * Error: infer-out does not exist for {infer_out}')
exit(1)
else:
start_t = time.time()
use_model = f'--pulse-join-select {model}'
threads = "-j 1"
threads = ""
verbose_opt = ""
if quiet:
verbose_opt = " -q 2>&1 > /dev/null"
cmd = f'infer analyze {threads} --pulse-only --pulse-max-disjuncts {str(k)} -o {infer_out} {use_model} {verbose_opt}'
print(f" - cmd: {cmd}", file=sys.stderr)
os.system(cmd)
end_t = time.time()
elapsed_time = end_t - start_t
report = os.path.join(infer_out, "report.txt")
f = open (report, "r")
text = f.read().replace('\n', '')
issues = re.findall('Found ([0-9]+) issue', text)
if len(issues) == 0:
issues_count = 0
else:
issues_count = int(issues[0])
total_time = total_time + elapsed_time
total_alarms = total_alarms + issues_count
return total_time, total_alarms
except:
print(f"Skipping {p} due to unknown exceptions")
return 0, 0
def run_infer_pre(path, k, model, quiet):
return run_infer(path, k, model, quiet)
def run_infer_main(path, k, model, quiet):
return run_infer(path, k, model, quiet)
def pre_analysis(pgm, pre_k, models):
opt_model = None
opt_alarms = -1
pt = 0
if len(models) == 1:
return models[0], 0, 0
for model in models:
print(f" * Pre-analysis with model {model}")
t, a = run_infer_pre(pgm, pre_k, model, True)
print(f" # time(sec): {t}")
print(f" # alarms(#): {a}")
if opt_alarms < a:
opt_alarms = a
opt_model = model
pt = pt + t
return opt_model, opt_alarms, pt
def run_dd_infer(path, pre_k, main_k, models):
print("* Pre-analysis")
model, alarms, pretime = pre_analysis(path, pre_k, models)
print(f"# total pre-time(sec): {pretime}")
print("* Main analysis")
maintime, mainalarms = run_infer_main(path, main_k, model, False)
print(f"# Main analysis time(sec): {maintime}")
print(f"# alarms(#): {mainalarms}")
def main(target_path, model_path):
files = os.listdir(f'{model_path}/{m}')
pattern = ".*\.model"
models = [f'{model_path}/{m}/{s}' for s in files if re.match(pattern, s)]
print(f"prek = {pre_k}, maink = {main_k}, models = {models}", flush=True)
run_dd_infer(target_path, pre_k, main_k, models)
def usage():
print("usage:")
print("python DDInfer.py ~/best_models ~/infer-outs/gawk-5.1.0")
if len(sys.argv) < 2:
usage()
exit(1)
model_path = sys.argv[1]
target_path = sys.argv[2]
if not os.path.isdir(model_path):
print(f'Cannot find a model in {model_path}')
usage()
exit(1)
if not os.path.isdir(target_path):
print(f'Cannot find a captured target in {target_path}')
usage()
exit(1)
main(target_path, model_path)
|
data_driven_infer-main
|
bin/DDInfer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys, os
import time
import re
def run(path, p, k, model):
total_time, total_alarms = 0, 0
try:
infer_out = path + p
if not os.path.isdir(infer_out):
print(" * Error: infer-out does not exist for " + p)
exit(1)
else:
start_t = time.time()
use_model = ""
if model != None:
use_model = "--pulse-join-select " + model
os.system("infer analyze -q -j 1 --pulse-only --pulse-max-disjuncts " + str(k) + " -o " + infer_out + " " + use_model)
end_t = time.time()
elapsed_time = end_t - start_t
report = path + p + "/report.txt"
f = open (report, "r")
text = f.read().replace('\n', '')
issues = re.findall('Found ([0-9]+) issue', text)
if len(issues) == 0:
issues_count = 0
else:
issues_count = int(issues[0])
os.system(f"cp {infer_out}/report.json ./data/{p}_1_{k}.json")
total_time = total_time + elapsed_time
total_alarms = total_alarms + issues_count
return total_time, total_alarms
except:
print(f"Skipping {p} due to unkonwn exceptions")
return 0, 0
pgm = None
k = 10
model = None
filename = None
if len(sys.argv) < 4:
print("Insufficient arguments")
exit(1)
elif len(sys.argv) == 4:
filename = sys.argv[1]
model = sys.argv[2]
k = sys.argv[3]
else:
print("Invalid arguments")
exit(1)
path = "/home/vagrant/infer-outs/"
f = open(filename, "r")
pgms_str = f.read().replace('\n', ' ')
pgms = pgms_str.split()[:]
for pgm in pgms:
t0, a0 = run(path, pgm, k, model)
print(f'** {pgm}\t{a0}\t{t0}')
|
data_driven_infer-main
|
Table2/bin/eval_ml_infer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import sklearn
from sklearn.ensemble import GradientBoostingClassifier
import pickle
import itertools
import sys, os
import time
import random
## usage) python3 collect.py programs_training.txt 20 1 training_data (use the pgms in 'pgms.txt', with k=10, trials=1)
training_data_folder = ""
if len(sys.argv) < 4:
print("Insufficient arguments")
exit(1)
elif len(sys.argv) == 4:
filename = str(sys.argv[1])
trials = 1
training_data_folder = str(sys.argv[2])
k = str(sys.argv[3])
else:
print("Invalid arguments")
exit(1)
if not os.path.isdir(f'./{training_data_folder}'):
os.system(f'mkdir ./{training_data_folder}')
f = open(filename, "r")
pgms_str = f.read().replace('\n', ' ')
pgms = pgms_str.split()[:]
print(pgms)
random.seed()
pre_classifier = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1, max_depth=2)
def read_file(data_file):
X, y = [], []
with open(data_file, "r") as f:
for line in f:
data = list(map(lambda x: int(x), line.split()))
fv = data[:len(data)-1]
label = data[len(data)-1]
X.append(fv)
y.append(label)
return X, y
def make_balance(X, y):
lst = list(zip(X, y))
pos = [(x,y) for (x,y) in lst if y == 1]
neg = [(x,y) for (x,y) in lst if y == 0]
#print(f'Original: #pos = {len(pos)}, #neg = {len(neg)}')
assert (len(neg) >= len(pos))
random.shuffle(neg)
neg = neg[:len(pos)]
#print(f'Balanced: #pos = {len(pos)}, #neg = {len(neg)}')
pos.extend(neg)
return zip(*pos)
def unique_sorted(values):
"Return a sorted list of the given values, without duplicates."
values = sorted(values)
if not values:
return []
consecutive_pairs = zip(values, itertools.islice(values, 1, len(values)))
result = [a for (a, b) in consecutive_pairs if a != b]
result.append(values[-1])
return result
def uniq(X, y):
lst = list(zip(X, y))
lst_uniq = unique_sorted(lst)
print(f'before: {len(lst)}, uniq: {len(lst_uniq)}')
return zip(*lst_uniq)
def preprocess(X, y):
X, y = make_balance(X, y)
return X, y
def trim_data(X, y, r):
lst = list(zip(X, y))
res = []
for e in lst:
if random.random() <= r:
res.append(e)
return zip(*res)
def train_clf (clf, X, y):
X = np.array(X)
y = np.array(y)
clf.fit(X, y)
return clf
def train_run(X, y, model_name):
trained_clf = train_clf (pre_classifier, X, y)
pickle.dump(trained_clf, open(model_name, "wb"))
def model(accum, model_path):
train_X, train_y = read_file(accum)
if (train_X == []):
return
train_X, train_y = preprocess(train_X, train_y)
train_X, train_y = trim_data(train_X, train_y, 0.7)
train_run(train_X, train_y, model_path)
mode = " --pulse-random-mode"
if os.path.isfile(f"./{training_data_folder}/acc.model"):
#mode = f' --pulse-random-mode --pulse-cover-load history.txt'
#mode = f' --pulse-join-train ./{training_data_folder}/acc.model --pulse-cover-load history.txt'
mode = f' --pulse-join-train ./{training_data_folder}/acc.model --pulse-cover-load history.txt --pulse-repeat-mode'
def train(path, pgms, k):
total_time = 0
os.system(f'touch ./{training_data_folder}/accum.txt')
for p in pgms:
print(f"Training for {p}")
infer_out = path + p
if not os.path.isdir(infer_out):
print("Error: infer-out does not exist for " + p)
continue
else:
if os.path.isfile(f'./{training_data_folder}/{p}/history.dat'):
os.system(f'mv ./{training_data_folder}/{p}/history.dat ./history.txt')
start_t = time.time()
cmd = ("infer analyze -j 1 --pulse-train-mode --pulse-only --pulse-max-disjuncts " + str(k) + " -o " + infer_out + " --pulse-cover-mode" + mode)
print(cmd)
os.system(cmd)
end_t = time.time()
elapsed_time = end_t - start_t
if not os.path.isfile("./train.txt"):
print("Error: train.txt does not exist for " + p)
continue
os.system(f'sort -u -r ./{training_data_folder}/accum.txt train.txt -o temp.txt')
os.system(f'mv temp.txt ./{training_data_folder}/accum.txt')
r = random.randint(1,100000)
if not os.path.isdir(f'./{training_data_folder}/{p}'):
os.system(f'mkdir ./{training_data_folder}/{p}')
os.system(f'mv train.txt ./{training_data_folder}/{p}/{r}.txt')
os.system(f'mv history.txt ./{training_data_folder}/{p}/history.dat')
path = "/home/vagrant/infer-outs/"
train(path, pgms, k)
model(f'./{training_data_folder}/accum.txt', f'./{training_data_folder}/acc.model')
|
data_driven_infer-main
|
Table2/bin/collect.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from multiprocessing import Process
import sklearn
from sklearn.ensemble import GradientBoostingClassifier
import sys
import random
import time
import pickle
import os
import itertools
from os.path import exists
from infer import *
random.seed()
#m0 |-> GradientBoostingClassifier(learning_rate=0.01, max_depth=1)
#m1 |-> GradientBoostingClassifier(learning_rate=0.01, max_depth=2)
#m2 |-> GradientBoostingClassifier(learning_rate=0.01, max_depth=4)
#m3 |-> GradientBoostingClassifier(max_depth=1)
#m4 |-> GradientBoostingClassifier(max_depth=2)
#m5 |-> GradientBoostingClassifier(max_depth=4)
#m6 |-> GradientBoostingClassifier(learning_rate=1.0, max_depth=1)
#m7 |-> GradientBoostingClassifier(learning_rate=1.0, max_depth=2)
#m8 |-> GradientBoostingClassifier(learning_rate=1.0, max_depth=4)
#m9 |-> GradientBoostingClassifier(learning_rate=0.01, max_depth=1, n_estimators=200)
#m10 |-> GradientBoostingClassifier(learning_rate=0.01, max_depth=2, n_estimators=200)
#m11 |-> GradientBoostingClassifier(learning_rate=0.01, max_depth=4, n_estimators=200)
#m12 |-> GradientBoostingClassifier(max_depth=1, n_estimators=200)
#m13 |-> GradientBoostingClassifier(max_depth=2, n_estimators=200)
#m14 |-> GradientBoostingClassifier(max_depth=4, n_estimators=200)
#m15 |-> GradientBoostingClassifier(learning_rate=1.0, max_depth=1, n_estimators=200)
#m16 |-> GradientBoostingClassifier(learning_rate=1.0, max_depth=2, n_estimators=200)
#m17 |-> GradientBoostingClassifier(learning_rate=1.0, max_depth=4, n_estimators=200)
classifiers = [GradientBoostingClassifier(n_estimators=ns, learning_rate=lr, max_depth=md)
for ns in [100, 200]
for lr in [0.01, 0.1, 1.0]
for md in [1, 2, 4]]
classifiers = list(zip (range(len(classifiers)), classifiers))
for idx, clf in classifiers:
print(f'm{idx} |-> {clf}')
def get_model_filename (folder, model_id):
filename = folder + "/" + str(model_id) + ".model"
return filename
def train_and_save (model_id, clf, X, y, folder):
filename = get_model_filename (folder, model_id)
if exists(filename):
print(f'Skip training {model_id} {clf}: model already exists in {filename}')
else:
start_t = time.time()
trained_clf = train_clf (clf, X, y)
end_t = time.time()
print(f'Training {model_id} {clf} finishes in {end_t-start_t} seconds', flush=True)
pickle.dump(trained_clf, open(filename, "wb"))
return
def split_list(a, n):
k, m = divmod(len(a), n)
return list(a[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(n))
def train_and_save_models (classifiers, X, y, folder):
for model_id, clf in classifiers:
train_and_save (model_id, clf, X, y, folder)
def train_parallel (classifiers, X, y, folder, cpus):
clfs = classifiers[:]
random.shuffle(clfs)
splits = split_list(clfs, cpus)
i = 0
for split in splits:
i = i + 1
print(f'CPU {i} : {split}')
jobs = []
for i in range(cpus):
th = Process(target=train_and_save_models, args=(splits[i], X, y, folder))
jobs.append(th)
for i in range(cpus):
jobs[i].start()
for i in range(cpus):
jobs[i].join()
def get_pgm_name(fullpath):
basename = os.path.basename(fullpath)
assert (basename.endswith(".merged.txt"))
return basename[:-11]
def unique_sorted(values):
"Return a sorted list of the given values, without duplicates."
values = sorted(values)
if not values:
return []
consecutive_pairs = zip(values, itertools.islice(values, 1, len(values)))
result = [a for (a, b) in consecutive_pairs if a != b]
result.append(values[-1])
return result
def train_clf (clf, X, y):
X = np.array(X)
y = np.array(y)
clf.fit(X, y)
return clf
def read_file(data_file):
X, y = [], []
with open(data_file, "r") as f:
for line in f:
data = list(map(lambda x: int(x), line.split()))
fv = data[:len(data)-1]
label = data[len(data)-1]
X.append(fv)
y.append(label)
return X, y
def read_files(data_files):
X, y = [], []
for data_file in data_files:
_X, _y = read_file(data_file)
X.extend(_X)
y.extend(_y)
return X, y
def get_pos_neg(X, y):
lst = list(zip(X, y))
pos = [(x,y) for (x,y) in lst if y == 1]
neg = [(x,y) for (x,y) in lst if y == 0]
return pos, neg
def make_balance(X, y):
lst = list(zip(X, y))
pos = [(x,y) for (x,y) in lst if y == 1]
neg = [(x,y) for (x,y) in lst if y == 0]
assert (len(neg) >= len(pos))
random.shuffle(neg)
neg = neg[:len(pos)]
pos.extend(neg)
return zip(*pos)
def uniq(X, y):
lst = list(zip(X, y))
lst_uniq = unique_sorted(lst)
print(f'before: {len(lst)}, uniq: {len(lst_uniq)}')
return zip(*lst_uniq)
def preprocess(X, y):
X, y = uniq(X, y)
X, y = make_balance(X, y)
return X, y
def trim_data(X, y, r):
lst = list(zip(X, y))
res = []
for e in lst:
if random.random() <= r:
res.append(e)
return zip(*res)
def get_train_data(train_files, ratio):
train_X, train_y = read_files(train_files)
assert (train_X != [])
train_X, train_y = preprocess(train_X, train_y)
pos, neg = get_pos_neg (train_X, train_y)
print(f'#pos : {len(pos)}, #neg : {len(neg)}')
train_X, train_y = trim_data(train_X, train_y, ratio)
pos, neg = get_pos_neg (train_X, train_y)
print(f'#pos : {len(pos)}, #neg : {len(neg)}')
return train_X, train_y
def evaluate_clf_for_parallel(clf, valid_files, return_dict):
for valid_file in valid_files:
valid_X, valid_y = read_file(valid_file)
try:
valid_X, valid_y = make_balance(valid_X, valid_y)
except:
return_dict[valid_file] = (0, 0, 0, 0, 0)
return
predict_y = clf.predict(valid_X)
pgm = get_pgm_name(valid_file)
path = "/home/vagrant/infer-outs/"
model = f"/tmp/model_tmp_{pgm}.model"
if exists(model):
os.system(f"rm {model}")
pickle.dump(clf, open(model, "wb"))
infer_time, infer_alarms = run_infer_main(path, pgm, 5, model, True)
TP, FN, FP, TN = 0, 0, 0, 0
for (gt, predict) in zip(valid_y, predict_y):
if gt == 1:
if predict == 1:
TP = TP + 1
else:
FN = FN + 1
else:
if predict == 1:
FP = FP + 1
else:
TN = TN + 1
n_pos = sum(predict_y)
n_neg = len(predict_y) - n_pos
if TP + FN == 0:
recall = -1
else:
recall = int(TP / (TP + FN) * 100)
f1score = 0
if TP + FP == 0:
precision = -1
f1score = 0
else:
precision = int(TP / (TP + FP) * 100)
if precision + recall > 0:
f1score = int(2 * recall * precision / (precision + recall))
else:
f1score = 0
print(f' - validation on {valid_file}', flush=True)
print(f' - predict: #pos={n_pos}, #neg={n_neg}')
print(f' - TP={TP}, FN={FN}, FP={FP}, TN={TN}')
print(f' - Recall={recall}, Precision={precision}, f1score={f1score}')
print(f' - Infer alarms={infer_alarms}, Infer time={infer_time}')
return_dict[valid_file] = (TP, FN, FP, TN, infer_alarms)
def evaluate_clf_parallel(clf, valid_files, cpus):
files = valid_files[:]
random.shuffle(files)
splits = split_list(files, cpus)
print(splits)
i = 0
for split in splits:
i = i + 1
print(f'CPU {i} : {split}')
jobs = []
manager = Manager()
return_dict = manager.dict()
for i in range(cpus):
th = Process(target=evaluate_clf_for_parallel, args=(clf, splits[i], return_dict))
jobs.append(th)
for i in range(cpus):
jobs[i].start()
for i in range(cpus):
jobs[i].join()
return return_dict
def report(header, TP, FN, FP, TN, IA):
sensitivity = 0
if TP + FN != 0:
sensitivity = TP / (TP + FN)
recall = sensitivity
specitivity = 0
if TN + FP != 0:
specitivity = TN / (TN + FP)
precision = 0
if TP + FP != 0:
precision = TP / (TP + FP)
accuracy = 0
if TP + FP + FN + TN != 0:
accuracy = (TP + TN) / (TP + FP + FN + TN)
f1score = 0
if recall + precision != 0:
f1score = 2 * (recall * precision) / (recall + precision)
print()
print("**********************************")
print(header)
print("**********************************")
print(f'TP={TP}, FP={FP}, FN={FN}, TN={TN}')
print("Sensitivity/Recall = TP / (TP + FN) = %.2f" % sensitivity)
print("Specificity = TN / (TN + FP) = %.2f" % specitivity)
print("Precision = TP / (TP + FP) = %.2f" % precision)
print("Accuracy = (TP + TN) / (TP+FP+FN+TN) = %.2f" % accuracy)
print("F1-score = %.2f" % f1score)
print("Infer alarms = %d" % IA)
print("**********************************")
def load_clf(clf_id, folder):
model_file = get_model_filename (folder, clf_id)
clf = pickle.load(open(model_file, 'rb'))
return clf
def run_cv(data_files, folder_to_save_models, cpus, ratio_data, b_eval):
train_files = data_files[:int(len(data_files)*0.7)]
valid_files = [f for f in data_files if not f in train_files]
print(f'training programs ({len(train_files)}) = {train_files}')
print(f'validation programs ({len(valid_files)}) = {valid_files}')
print(f'Processing training data', flush=True)
start = time.time()
train_X, train_y = get_train_data(train_files, ratio_data)
end = time.time()
print(f'Processing training data finishes in {end-start}s', flush=True)
print(f'Training begins', flush=True)
start = time.time()
train_parallel(classifiers, train_X, train_y, folder_to_save_models, cpus)
end = time.time()
print(f'Training finished in {end-start} seconds', flush=True)
result = []
i = 0
if b_eval == False:
return result
for clf_idx, clf in classifiers:
i = i + 1
TP, FN, FP, TN, IA = 0, 0, 0, 0, 0
print()
print(f'Evaluating {clf_idx} {clf}', flush=True)
clf = load_clf(clf_idx, folder_to_save_models)
log = {}
log = evaluate_clf_parallel(clf, valid_files, cpus)
result.append(((clf_idx, clf), log))
return result
### use the result of Infer as metric
def clf_metric(TP, FN, FP, TN, IA):
return IA
def alarms_of_model (pgms, m, M):
s = 0
for p in pgms:
s = s + M[m][p]
return s
def max_alarms (p, models, M):
max = 0
for m in models:
if M[m][p] > max:
max = M[m][p]
return max
def sum_of_max_alarms (pgms, models, M):
sum = 0
for p in pgms:
sum = sum + max_alarms (p, models, M)
return sum
def best_model (pgms, models, M):
max_alarms = 0
max_model = None
for m in models:
alarms = alarms_of_model (pgms, m, M)
if max_alarms < alarms:
max_alarms = alarms
max_model = m
return max_model, max_alarms
def opt_model_comb (k, pgms, models, M):
combs = list(itertools.combinations (models, k))
opt_comb = None
opt_alarms = 0
for comb in combs:
alarms = sum_of_max_alarms (pgms, comb, M)
if opt_alarms < alarms:
opt_alarms = alarms
opt_comb = comb
return opt_comb, opt_alarms
def select_models(result, folder_to_save_models):
print()
best_clf = None
best_clf_metric = -1
dic = {}
for (clf_idx, clf),log in result:
print(f'{clf_idx}. {clf}')
TP, FN, FP, TN, IA = 0, 0, 0, 0, 0
for pgm,(tp,fn,fp,tn,ia) in log.items():
print(f' - {pgm}: TP={tp}, FN={fn}, FP={fp}, TN={tn}, IA={ia}')
TP, FN, FP, TN, IA = TP + tp, FN + fn, FP + fp, TN + tn, IA + ia
subdic = dic.get(pgm, {})
subdic[(clf_idx, clf)] = (tp, fn, fp, tn, ia)
dic[pgm] = subdic
if clf_metric(TP, FN, FP, TN, IA) > best_clf_metric:
best_clf_metric = clf_metric(TP, FN, FP, TN, IA)
best_clf = ((clf_idx, clf), TP, FN, FP, TN, IA)
print()
print("----------------------------------------------------")
print(" Best model")
print("----------------------------------------------------")
(clf_idx, clf), TP, FN, FP, TN, IA = best_clf
report(f"best clf : {clf_idx}. {clf}", TP, FN, FP, TN, IA)
os.system("mkdir best_models")
pickle.dump(clf, open("./best_models/best.model", "wb"))
print("----------------------------------------------------")
print(" Best models per program")
print("----------------------------------------------------")
best_alarms_sum = 0
alarms = {}
for pgm, subdic in dic.items():
print(pgm)
best_clf = None
best_clf_metric = -1
alarms[pgm] = {}
for (clf_idx, clf), (TP, FN, FP, TN, IA) in subdic.items():
alarms[pgm][clf_idx] = IA
if clf_metric(TP, FN, FP, TN, IA) > best_clf_metric:
best_clf_metric = clf_metric(TP, FN, FP, TN, IA)
best_clf = ((clf_idx, clf), TP, FN, FP, TN, IA)
(clf_idx, clf), TP, FN, FP, TN, IA = best_clf
basename = os.path.basename(pgm)
report(f'best clf for {basename} : {clf_idx} {clf}', TP, FN, FP, TN, IA)
pickle.dump(clf, open(f"./best_models/{basename}.model", "wb"))
best_alarms_sum = best_alarms_sum + IA
print()
print("----------------------------------------------------")
print(f'#Alarms of optimal Infer: {best_alarms_sum}')
print("----------------------------------------------------")
M = {}
pgms = []
models = []
for pgm in alarms:
pgms.append(pgm)
for (clf_id, _) in classifiers:
models.append(clf_id)
print(f'pgms : {pgms}')
print(f'models: {models}')
for m in models:
M[m] = {}
for p in pgms:
if p in alarms and m in alarms[p]:
M[m][p] = alarms[p][m]
else:
M[m][p] = 0
bm, ba = best_model (pgms, models, M)
print("-----------------------------------")
print(f'best model: {bm}, #alarms: {ba}')
print("-----------------------------------")
for k in range(1, 4):
opt_comb, opt_alarms = opt_model_comb(k, pgms, models, M)
print(f'comb size: {k}, optimal combination: {opt_comb}, #alarms: {opt_alarms}')
folder = folder_to_save_models + "/" + str(k)
os.system("mkdir " + folder)
for m in opt_comb:
mfile = get_model_filename (folder_to_save_models, m)
os.system("cp " + mfile + " " + folder)
for pgm in alarms:
for clf_idx in alarms[pgm]:
basename = get_pgm_name(pgm)
print(f'{basename} # m{clf_idx} # {alarms[pgm][clf_idx]}')
for p in pgms:
for m in models:
print(f'M[{m}][{p}] : {M[m][p]}')
if len(sys.argv) < 3:
print("Error: insufficient arguments")
exit(1)
folder_to_save_models = sys.argv[1]
ratio_of_data_to_use = float(sys.argv[2])
num_of_cpus = int(sys.argv[3])
filename = sys.argv[4]
f = open(filename, "r")
pgms_str = f.read().replace('\n', ' ')
pgms = pgms_str.split()[:]
print(pgms)
data_files = []
for p in pgms:
name="./merged_training_data/" + p + ".merged.txt"
if exists(name):
data_files.append(name)
if not exists(folder_to_save_models):
os.system(f"mkdir {folder_to_save_models}")
b_eval = True
print(f'save models in {folder_to_save_models}, using {num_of_cpus} cpus')
result = run_cv(data_files, folder_to_save_models, num_of_cpus, ratio_of_data_to_use, b_eval)
if b_eval:
select_models(result, folder_to_save_models)
|
data_driven_infer-main
|
Table2/bin/learn_classifier.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys, os
import infer
if len(sys.argv) < 6:
print("usage:")
print("python run_ml_infer.py bin/programs_test.txt 1 5 1 models")
exit(1)
filename = sys.argv[1]
pre_k = int(sys.argv[2])
main_k = int(sys.argv[3])
ncpus = int(sys.argv[4])
models = []
for model in sys.argv[5:]:
models.append(model)
path = "/home/vagrant/infer-outs/"
if os.path.exists(filename):
txtfile = open(filename, "r")
pgms = txtfile.read().splitlines()
else:
pgms = [filename]
print(f"prek = {pre_k}, maink = {main_k}, models = {models}", flush=True)
t, pret, a = infer.run_dd_infer_parallel(path, pgms, pre_k, main_k, models, ncpus, True)
print(f"k: {pre_k} {main_k}, alarms: {a}, pre_time: {pret}, main_time: {t}, total_time: {t+pret}, with model: {models}", flush=True)
|
data_driven_infer-main
|
Table2/bin/run_ml_infer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys, os
import time
import re
import random
from multiprocessing import Process, Queue, Manager
def split_list(a, n):
k, m = divmod(len(a), n)
return list(a[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(n))
def run_random_infer(path, p, k, quiet):
total_time, total_alarms = 0, 0
try:
infer_out = path + p
if not os.path.isdir(infer_out):
print(" * Error: infer-out does not exist for " + p)
exit(1)
else:
start_t = time.time()
if quiet:
verbose_opt = " 2>&1 > /dev/null"
os.system("infer analyze -q -j 1 --pulse-random-mode --pulse-only --pulse-max-disjuncts " + str(k) + " -o " + infer_out + " --pulse-cg-load " + "/vagrant/cgs/" + p + " " + verbose_opt)
end_t = time.time()
elapsed_time = end_t - start_t
report = path + p + "/report.txt"
f = open (report, "r")
text = f.read().replace('\n', '')
issues = re.findall('Found ([0-9]+) issue', text)
if len(issues) == 0:
issues_count = 0
else:
issues_count = int(issues[0])
total_time = total_time + elapsed_time
total_alarms = total_alarms + issues_count
return total_time, total_alarms
except:
print(f"Skipping {p} due to unkonwn exceptions")
return 0, 0
def run_infer(path, p, k, model, quiet, limit_fn, threads=1):
total_time, total_alarms = 0, 0
try:
infer_out = os.path.join(path, p)
if not os.path.isdir(infer_out):
print(" * Error: infer-out does not exist for " + infer_out)
exit(1)
else:
start_t = time.time()
use_model = ""
if model != None:
use_model = "--pulse-join-select " + model
limit_functions = ""
if limit_fn != 0:
limit_functions = " --pulse-limit-fn " + str(limit_fn) + " "
verbose_opt = ""
if quiet:
verbose_opt = " 2>&1 > /dev/null"
print(p, model)
cmd = f"infer analyze -q -j {threads} --pulse-only --pulse-max-disjuncts " + str(k) + " -o " + infer_out + " " + use_model + limit_functions + verbose_opt
print(cmd, file=sys.stderr)
os.system(cmd)
end_t = time.time()
elapsed_time = end_t - start_t
report = path + p + "/report.txt"
f = open (report, "r")
text = f.read().replace('\n', '')
issues = re.findall('Found ([0-9]+) issue', text)
if len(issues) == 0:
issues_count = 0
else:
issues_count = int(issues[0])
total_time = total_time + elapsed_time
total_alarms = total_alarms + issues_count
return total_time, total_alarms
except:
print(f"Skipping {p} due to unkonwn exceptions")
return 0, 0
def run_infer_main(path, p, k, model, quiet, threads=1):
return run_infer(path, p, k, model, quiet, 0, threads=threads)
def run_infer_pre(path, p, k, model, quiet, limit_fn, threads=1):
return run_infer(path, p, k, model, quiet, limit_fn, threads=threads)
def work(path, pgms, k, model, quiet, return_dict):
for pgm in pgms:
t, a = run_infer_main(path, pgm, k, model, quiet)
return_dict[pgm] = (a, t)
def run_infer_parallel(path, pgms, k, model, ncpus, quiet):
pgms_orig = pgms[:]
random.shuffle(pgms)
splits = split_list(pgms, ncpus)
for i in range(ncpus):
print(f'cpu {i}: {splits[i]}', flush=True)
manager = Manager()
return_dict = manager.dict()
jobs = []
for i in range(ncpus):
th = Process(target=work, args=(path, splits[i], k, model, quiet, return_dict))
jobs.append(th)
th.start()
for job in jobs:
job.join()
t_sum, a_sum = 0, 0
for pgm in return_dict:
a, t = return_dict[pgm]
t_sum = t_sum + t
a_sum = a_sum + a
for pgm in pgms_orig:
a, t = return_dict[pgm]
print(f'** {pgm}\t\t{a}\t\t{t}', flush=True)
return t_sum, a_sum
def pre_analysis(path, pgm, pre_k, models, threads=1):
opt_model = None
opt_alarms = -1
pt = 0
if len(models) == 1:
return models[0], 0, 0
for model in models:
t, a = run_infer_pre(path, pgm, pre_k, model, True, 0, threads=threads)
if opt_alarms < a:
opt_alarms = a
opt_model = model
pt = pt + t
return opt_model, opt_alarms, pt
def work_dd(path, pgms, pre_k, main_k, models, quiet, threads, return_dict):
for pgm in pgms:
model, alarms, pretime = pre_analysis(path, pgm, pre_k, models, threads=threads)
t, a = run_infer_main(path, pgm, main_k, model, quiet, threads=threads)
infer_out = os.path.join(path, pgm)
os.system(f"cp {infer_out}/report.json ./data/{pgm}_3_{pre_k}_{main_k}.json")
return_dict[pgm] = (a, t, pretime)
def run_dd_infer_parallel(path, pgms, pre_k, main_k, models, ncpus, quiet, threads=1):
pgms_orig = pgms[:]
random.shuffle(pgms)
splits = split_list(pgms, ncpus)
for i in range(ncpus):
print(f'cpu {i}: {splits[i]}', flush=True)
manager = Manager()
return_dict = manager.dict()
jobs = []
for i in range(ncpus):
th = Process(target=work_dd, args=(path, splits[i], pre_k, main_k, models, quiet, threads, return_dict))
jobs.append(th)
th.start()
for job in jobs:
job.join()
t_sum, pret_sum, a_sum = 0, 0, 0
for pgm in return_dict:
a, t, pret = return_dict[pgm]
t_sum = t_sum + t
a_sum = a_sum + a
pret_sum = pret_sum + pret
for pgm in pgms_orig:
a, t, pret = return_dict[pgm]
print(f'** {pgm}\t{a}\t{t}\t{pret}\t{pret+t}', flush=True)
return t_sum, pret_sum, a_sum
def work_random(path, pgms, k, quiet, return_dict):
for pgm in pgms:
t, a = run_random_infer(path, pgm, k, quiet)
return_dict[pgm] = (a, t)
def run_random_infer_parallel(path, pgms, k, ncpus, quiet):
pgms_orig = pgms[:]
random.shuffle(pgms)
splits = split_list(pgms, ncpus)
for i in range(ncpus):
print(f'cpu {i}: {splits[i]}', flush=True)
manager = Manager()
return_dict = manager.dict()
jobs = []
for i in range(ncpus):
th = Process(target=work_random, args=(path, splits[i], k, quiet, return_dict))
jobs.append(th)
th.start()
for job in jobs:
job.join()
t_sum, a_sum = 0, 0
for pgm in return_dict:
a, t = return_dict[pgm]
t_sum = t_sum + t
a_sum = a_sum + a
for pgm in pgms_orig:
a, t = return_dict[pgm]
print(f'** {pgm}\t\t{a}\t\t{t}', flush=True)
return t_sum, a_sum
|
data_driven_infer-main
|
Table2/bin/infer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Modified from github.com/openai/CLIP
from collections import OrderedDict
import numpy as np
import timm
import torch
from torch import nn
import torch.nn.functional as F
import torch.distributed as dist
import losses
import utils
import vit
def drop_path(x, drop_prob: float = 0., training: bool = False, scale_by_keep: bool = True):
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
if keep_prob > 0.0 and scale_by_keep:
random_tensor.div_(keep_prob)
return x * random_tensor
class DropPath(nn.Module):
def __init__(self, drop_prob: float = 0., scale_by_keep: bool = True):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
self.scale_by_keep = scale_by_keep
def forward(self, x):
return drop_path(x, self.drop_prob, self.training, self.scale_by_keep)
def extra_repr(self):
return f'drop_prob={round(self.drop_prob,3):0.3f}'
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None, dropout_prob=0.0, drop_path_prob=0.0):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
self.dropout = nn.Dropout(p=dropout_prob, inplace=True)
self.drop_path = DropPath(drop_prob=drop_path_prob)
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, inp):
x, mode = inp
if mode == 'local':
self.dropout(x)
x = x + self.drop_path(self.attention(self.ln_1(x)))
x = x + self.drop_path(self.mlp(self.ln_2(x)))
else:
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return (x, mode)
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None, dropout_prob=0.0, drop_path_prob=0.0):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[
ResidualAttentionBlock(
width,
heads,
attn_mask,
dropout_prob,
drop_path_prob
) for _ in range(layers)
])
def forward(self, x: torch.Tensor, mode='global'):
return self.resblocks((x, mode))[0]
class CLIP(nn.Module):
def __init__(
self,
embed_dim: int,
# vision
vision_width: int,
vision_model: nn.Module,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int = 12,
detach_proj: bool = False,
no_share_token=False,
clip_proj_type='linear',
clip_hidden_dim=4096,
global_text_mask_prob=1.0,
local_text_mask_prob=0.5,
text_dropout_prob=0.0,
text_drop_path_prob=0.0,
**kwargs,
):
super().__init__()
self.context_length = context_length
self.vision_width = vision_width
self.transformer_width = transformer_width
self.embed_dim = embed_dim
self.detach_proj = detach_proj
self.clip_proj_type = clip_proj_type
self.visual = vision_model
self.no_share_token = no_share_token
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask(),
dropout_prob=text_dropout_prob,
drop_path_prob=text_drop_path_prob,
)
self.vocab_size = vocab_size
self.local_text_mask_prob = local_text_mask_prob
self.global_text_mask_prob = global_text_mask_prob
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
if clip_proj_type == 'mlp':
self.image_projector = self._build_mlp(
in_dim=self.vision_width,
mlp_dim=clip_hidden_dim,
out_dim=embed_dim
)
self.text_projector = self._build_mlp(
in_dim=self.transformer_width,
mlp_dim=clip_hidden_dim,
out_dim=embed_dim
)
else:
self.image_projector = nn.Linear(self.vision_width, embed_dim, bias=False)
self.text_projector = nn.Linear(self.transformer_width, embed_dim, bias=False)
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.initialize_parameters()
def _build_mlp(self, in_dim, mlp_dim, out_dim, num_layers=3):
mlp = [
("layer1", nn.Linear(in_dim, mlp_dim)),
("bn1", utils.infer_batchnorm_class()(mlp_dim)),
("relu1", nn.ReLU(inplace=True))
]
i = 1
for i in range(2, num_layers):
mlp.extend([
(f"layer{i}", nn.Linear(mlp_dim, mlp_dim)),
(f"bn{i}", utils.infer_batchnorm_class()(mlp_dim)),
(f"relu{i}", nn.ReLU(inplace=True))
])
mlp.append((f"layer{i+1}", nn.Linear(mlp_dim, out_dim)))
return nn.Sequential(OrderedDict(mlp))
@torch.no_grad()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.clip_proj_type == 'linear':
nn.init.normal_(self.image_projector.weight, std=self.vision_width ** -0.5)
nn.init.normal_(self.text_projector.weight, std=self.transformer.width ** -0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
def encode_image(self, image):
feats = self.visual(image)
z = self.image_projector(feats)
return {'feats_image': feats, 'z_image': z}
def encode_text(self, text, mode='global', forward_proj=True):
range_index = torch.arange(text.size(0))
eot_index = text.argmax(dim=-1)
x = self.token_embedding(text) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x, mode=mode)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
feats = x[range_index, eot_index]
out = {'feats_text': feats}
if forward_proj:
out['z_text'] = self.text_projector(feats.detach() if self.detach_proj else feats)
return out
def forward(self, image, text):
out_image = self.encode_image(image)
out_text = self.encode_text(text)
return {**out_image, **out_text, 'logit_scale': self.logit_scale.exp()}
@torch.no_grad()
def predict_zeroshot(self, image_feats, text_feats):
z_image = image_feats['z_image']
z_text = text_feats['z_text']
z_image = z_image / z_image.norm(dim=-1, keepdim=True)
z_text = z_text / z_text.norm(dim=-1, keepdim=True)
similarity = z_image @ z_text.t()
return {'z_sim': similarity}
def encode_image_val(self, image):
out = self.encode_image(image)
return out
def encode_text_val(self, text):
out = self.encode_text(text)
return out
class CL2L(CLIP):
def __init__(self, separate_proj=False, cl2l_txt_proj_type='mlp', cl2l_img_proj_type='mlp', **kwargs):
super().__init__(separate_proj=False, **kwargs)
self.separate_proj = separate_proj
if separate_proj:
self.l2l_logit_scale = nn.Parameter(
torch.ones([]) * np.log(1 / 0.1))
if cl2l_img_proj_type == 'mlp':
self.l2l_image_projector = self._build_mlp(
in_dim=self.vision_width,
mlp_dim=4096,
out_dim=self.embed_dim
)
else:
self.l2l_image_projector = nn.Linear(self.vision_width, self.embed_dim, bias=False)
if cl2l_txt_proj_type == 'mlp':
self.l2l_text_projector = self._build_mlp(
in_dim=self.transformer_width,
mlp_dim=4096,
out_dim=self.embed_dim
)
else:
self.l2l_text_projector = nn.Linear(self.transformer_width, self.embed_dim, bias=False)
else:
self.l2l_image_projector = self.image_projector
self.l2l_text_projector = self.text_projector
def encode_image_val(self, image):
out = self.encode_image(image)
out['h_image'] = self.l2l_image_projector(out['feats_image'])
return out
def encode_text_val(self, text):
out = super().encode_text(text)
out['h_text'] = self.l2l_text_projector(out['feats_text'])
return out
def forward(self, image_global, text, *image_local):
text_global, *text_local = text.unbind(1)
out = super().forward(image_global, text_global)
# forward backbone
out['feats_image_local'] = [self.visual(l) for l in image_local]
out['feats_text_local'] = [
self.encode_text(t, mode='local', forward_proj=False)['feats_text']
for t in text_local
]
# forward projector
out['h_image_local'] = [self.l2l_image_projector(l) for l in out['feats_image_local']]
out['h_text_local'] = [self.l2l_text_projector(l) for l in out['feats_text_local']]
# fix names
out['z_image_global'] = out.pop('z_image')
out['z_text_global'] = out.pop('z_text')
out['h_logit_scale'] = self.l2l_logit_scale.exp() if self.separate_proj else out['logit_scale']
return out
@torch.no_grad()
def predict_zeroshot(self, image_feats, text_feats):
outs = super().predict_zeroshot(image_feats, text_feats)
z_image = image_feats['h_image']
z_text = text_feats['h_text']
z_image = z_image / z_image.norm(dim=-1, keepdim=True)
z_text = z_text / z_text.norm(dim=-1, keepdim=True)
similarity = z_image @ z_text.t()
return {**outs, 'h_sim': similarity}
class BARLIP(CL2L):
def __init__(self, barlip_proj_dim, barlip_hidden_dim, **kwargs):
super().__init__(**kwargs)
self.barlip_image_projector_global = nn.Sequential(
nn.Linear(kwargs['vision_width'], barlip_hidden_dim),
utils.infer_batchnorm_class()(barlip_hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(barlip_hidden_dim, barlip_hidden_dim),
utils.infer_batchnorm_class()(barlip_hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(barlip_hidden_dim, barlip_proj_dim),
utils.infer_batchnorm_class()(barlip_proj_dim)
)
self.barlip_text_projector_global = nn.Sequential(
nn.Linear(kwargs['transformer_width'], barlip_hidden_dim),
utils.infer_batchnorm_class()(barlip_hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(barlip_hidden_dim, barlip_hidden_dim),
utils.infer_batchnorm_class()(barlip_hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(barlip_hidden_dim, barlip_proj_dim),
utils.infer_batchnorm_class()(barlip_proj_dim)
)
if 'separate_proj_child' in kwargs and kwargs['separate_proj_child']:
self.barlip_image_projector_local = nn.Sequential(
nn.Linear(kwargs['vision_width'], barlip_hidden_dim),
utils.infer_batchnorm_class()(barlip_hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(barlip_hidden_dim, barlip_hidden_dim),
utils.infer_batchnorm_class()(barlip_hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(barlip_hidden_dim, barlip_proj_dim),
utils.infer_batchnorm_class()(barlip_proj_dim)
)
self.barlip_text_projector_local = nn.Sequential(
nn.Linear(kwargs['transformer_width'], barlip_hidden_dim),
utils.infer_batchnorm_class()(barlip_hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(barlip_hidden_dim, barlip_hidden_dim),
utils.infer_batchnorm_class()(barlip_hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(barlip_hidden_dim, barlip_proj_dim),
utils.infer_batchnorm_class()(barlip_proj_dim)
)
else:
self.barlip_image_projector_local = self.barlip_image_projector_global
self.barlip_text_projector_local = self.barlip_text_projector_global
def forward(self, image, text, *image_local):
out = super().forward(image, text, *image_local)
out['v_image'] = self.barlip_image_projector_global(out['feats_image'])
out['v_text'] = self.barlip_text_projector_global(out['feats_text'])
out['v_image_local'] = [self.barlip_image_projector_local(l) for l in out['feats_image_local']]
out['v_text_local'] = [self.barlip_text_projector_local(l) for l in out['feats_text_local']]
return out
class SIAMLIP(CL2L):
def __init__(self, siamlip_proj_dim, siamlip_hidden_dim, siamlip_no_last_bn, **kwargs):
super().__init__(**kwargs)
self.siamlip_image_projector_global = nn.Sequential(
nn.Linear(kwargs['vision_width'], siamlip_hidden_dim, bias=False),
utils.infer_batchnorm_class()(siamlip_hidden_dim),
nn.ReLU(inplace=True),
# nn.Linear(siamlip_hidden_dim, siamlip_hidden_dim, bias=False),
# utils.infer_batchnorm_class()(siamlip_hidden_dim),
# nn.ReLU(inplace=True),
nn.Linear(siamlip_hidden_dim, siamlip_proj_dim, bias=False),
)
self.siamlip_text_projector_global = nn.Sequential(
nn.Linear(kwargs['transformer_width'], siamlip_hidden_dim, bias=False),
utils.infer_batchnorm_class()(siamlip_hidden_dim),
nn.ReLU(inplace=True),
# nn.Linear(siamlip_hidden_dim, siamlip_hidden_dim, bias=False),
# utils.infer_batchnorm_class()(siamlip_hidden_dim),
# nn.ReLU(inplace=True),
nn.Linear(siamlip_hidden_dim, siamlip_proj_dim, bias=False),
)
if 'separate_proj_child' in kwargs and kwargs['separate_proj_child']:
self.siamlip_image_projector_local = nn.Sequential(
nn.Linear(kwargs['vision_width'], siamlip_hidden_dim, bias=False),
utils.infer_batchnorm_class()(siamlip_hidden_dim),
nn.ReLU(inplace=True),
# nn.Linear(siamlip_hidden_dim, siamlip_hidden_dim, bias=False),
# utils.infer_batchnorm_class()(siamlip_hidden_dim),
# nn.ReLU(inplace=True),
nn.Linear(siamlip_hidden_dim, siamlip_proj_dim, bias=False),
)
self.siamlip_text_projector_local = nn.Sequential(
nn.Linear(kwargs['transformer_width'], siamlip_hidden_dim, bias=False),
utils.infer_batchnorm_class()(siamlip_hidden_dim),
nn.ReLU(inplace=True),
# nn.Linear(siamlip_hidden_dim, siamlip_hidden_dim, bias=False),
# utils.infer_batchnorm_class()(siamlip_hidden_dim),
# nn.ReLU(inplace=True),
nn.Linear(siamlip_hidden_dim, siamlip_proj_dim, bias=False),
)
else:
self.siamlip_image_projector_local = self.siamlip_image_projector_global
self.siamlip_text_projector_local = self.siamlip_text_projector_global
if not siamlip_no_last_bn:
self.siamlip_image_projector_global = nn.Sequential(
self.siamlip_image_projector_global,
utils.infer_batchnorm_class()(siamlip_proj_dim, affine=False)
)
self.siamlip_text_projector_global = nn.Sequential(
self.siamlip_text_projector_global,
utils.infer_batchnorm_class()(siamlip_proj_dim, affine=False)
)
self.siamlip_image_projector_local = nn.Sequential(
self.siamlip_image_projector_local,
utils.infer_batchnorm_class()(siamlip_proj_dim, affine=False)
)
self.siamlip_text_projector_local = nn.Sequential(
self.siamlip_text_projector_local,
utils.infer_batchnorm_class()(siamlip_proj_dim, affine=False)
)
# predictors
self.image_text_predictor_global = nn.Sequential(
nn.Linear(siamlip_proj_dim, siamlip_hidden_dim, bias=False),
utils.infer_batchnorm_class()(siamlip_hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(siamlip_hidden_dim, siamlip_proj_dim),
)
self.text_image_predictor_global = nn.Sequential(
nn.Linear(siamlip_proj_dim, siamlip_hidden_dim, bias=False),
utils.infer_batchnorm_class()(siamlip_hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(siamlip_hidden_dim, siamlip_proj_dim),
)
if 'separate_proj_child' in kwargs and kwargs['separate_proj_child']:
self.image_text_predictor_local = nn.Sequential(
nn.Linear(siamlip_proj_dim, siamlip_hidden_dim, bias=False),
utils.infer_batchnorm_class()(siamlip_hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(siamlip_hidden_dim, siamlip_proj_dim),
)
self.text_image_predictor_local = nn.Sequential(
nn.Linear(siamlip_proj_dim, siamlip_hidden_dim, bias=False),
utils.infer_batchnorm_class()(siamlip_hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(siamlip_hidden_dim, siamlip_proj_dim),
)
else:
self.image_text_predictor_local = self.image_text_predictor_global
self.text_image_predictor_local = self.text_image_predictor_global
def forward(self, image, text, *image_local):
out = super().forward(image, text, *image_local)
out['v_image'] = self.siamlip_image_projector_global(out['feats_image'])
out['p_image'] = self.image_text_predictor_global(out['v_image'])
out['v_text'] = self.siamlip_text_projector_global(out['feats_text'])
out['p_text'] = self.text_image_predictor_global(out['v_text'])
out['v_image_local'] = [self.siamlip_image_projector_local(l) for l in out['feats_image_local']]
out['p_image_local'] = [self.image_text_predictor_local(l) for l in out['v_image_local']]
out['v_text_local'] = [self.siamlip_text_projector_local(l) for l in out['feats_text_local']]
out['p_text_local'] = [self.text_image_predictor_local(l) for l in out['v_text_local']]
return out
class SWALIPV1(CLIP):
def __init__(
self,
swalip_proj_dim,
swalip_hidden_dim,
swalip_num_proto,
swalip_no_shared_proto,
swalip_temperature,
swalip_learn_temperature,
**kwargs
):
super().__init__(**kwargs)
self.swalip_image_projector = nn.Sequential(
nn.Linear(kwargs['vision_width'], swalip_hidden_dim),
utils.infer_batchnorm_class()(swalip_hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(swalip_hidden_dim, swalip_proj_dim)
)
self.swalip_text_projector = nn.Sequential(
nn.Linear(kwargs['transformer_width'], swalip_hidden_dim),
utils.infer_batchnorm_class()(swalip_hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(swalip_hidden_dim, swalip_proj_dim)
)
# prototypes
if swalip_no_shared_proto:
self.image_prototypes = self.create_prototypes(swalip_proj_dim, swalip_num_proto)
self.text_prototypes = self.create_prototypes(swalip_proj_dim, swalip_num_proto)
else:
self.image_prototypes = self.create_prototypes(swalip_proj_dim, swalip_num_proto)
self.text_prototypes = self.image_prototypes
self.swalip_logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / swalip_temperature))
self.swalip_logit_scale.requires_grad = swalip_learn_temperature
def create_prototypes(self, swalip_proj_dim, swalip_num_proto):
prototypes = nn.utils.weight_norm(nn.Linear(swalip_proj_dim, swalip_num_proto, bias=False))
prototypes.weight_g.data.fill_(1)
prototypes.weight_g.requires_grad = False
return prototypes
def encode_image(self, image):
out = super().encode_image(image)
h_image = self.swalip_image_projector(out['feats_image'])
p_image = self.image_prototypes(F.normalize(h_image))
return {**out, 'h_image': h_image, 'p_image': p_image}
def encode_text(self, text):
out = super().encode_text(text)
h_text = self.swalip_text_projector(out['feats_text'])
p_text = self.text_prototypes(F.normalize(h_text))
return {**out, 'h_text': h_text, 'p_text': p_text}
def forward(self, image, text):
return {
**super().forward(image, text),
'swalip_logit_scale': self.swalip_logit_scale.exp(),
}
def get_model(args, **kwargs):
arch, model_name = args.model.rsplit('_', 1)
model_class = {
'BARLIP': BARLIP,
'SWALIP': CL2L,
'SWALIPV1': SWALIPV1,
'SIAMLIP': SIAMLIP,
'CLIP': CLIP,
'CL2L': CL2L,
}[model_name]
model = globals()[arch](model_class, **vars(args), **kwargs)
return model
def get_loss(args):
if args.model.startswith('CLIP'):
if args.model.endswith('SWALIPV1'):
return losses.SwALIPV1Loss(
sk_iters=args.sk_iters,
target_epsilon=args.target_epsilon,
swalip_weight=args.swalip_weight,
temperature=args.swalip_temperature,
)
else:
return losses.CLIPLoss()
if args.model.startswith('CL2L'):
if args.model.endswith('BARLIP'):
return losses.BarLIPLoss(
loss_avg_or_sum=args.loss_avg_or_sum,
label_smoothing=args.label_smoothing,
lamb=args.barlip_lamb,
scale_loss=args.barlip_scale_loss,
)
elif args.model.endswith('SIAMLIP'):
return losses.SiamLIPLoss(
loss_avg_or_sum=args.loss_avg_or_sum,
label_smoothing=args.label_smoothing,
)
elif args.model.endswith('SWALIP'):
return losses.SwALIPLoss(
loss_avg_or_sum=args.loss_avg_or_sum,
label_smoothing=args.label_smoothing,
sk_iters=args.sk_iters,
target_epsilon=args.target_epsilon,
swalip_weight=args.swalip_weight,
)
else:
return losses.CL2LLoss(
loss_avg_or_sum=args.loss_avg_or_sum,
label_smoothing=args.label_smoothing
)
def get_metric_names(model):
parent_model, _, child_model = model.split('_')
parent_metric_names = {
'CL2L': ['loss', 'clip_loss', 'clip_loss_image', 'clip_loss_text', 'clip_loss_image_global', 'clip_loss_text_global', 'clip_loss_image_local', 'clip_loss_text_local', 'clip_acc', 'clip_acc_image_local', 'clip_acc_text_local', 'clip_acc_image_global', 'clip_acc_text_global', 'h_logit_scale'],
'CLIP': ['loss', 'clip_loss', 'clip_acc'],
}[parent_model]
child_metric_names = {
'BARLIP': ['barlip_loss'],
'SWALIP': ['swalip_loss'],
'SIAMLIP': ['siamlip_loss'],
'CLIP': ['clip_loss', 'clip_acc'],
'CL2L': ['clip_loss', 'clip_acc'],
}[child_model]
return sorted(set(parent_metric_names + child_metric_names))
@timm.models.registry.register_model
def vit_small_mocov3_patch16_224(**kwargs):
model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=12, **kwargs)
model = vit._create_vision_transformer('vit_small_patch16_224', **model_kwargs)
return model
@timm.models.registry.register_model
def vit_tiny_patch16_224(pretrained=False, **kwargs):
""" ViT-Tiny (Vit-Ti/16)
"""
model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs)
model = vit._create_vision_transformer('vit_tiny_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_tiny_patch16_384(pretrained=False, **kwargs):
""" ViT-Tiny (Vit-Ti/16) @ 384x384.
"""
model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs)
model = vit._create_vision_transformer('vit_tiny_patch16_384', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_small_patch32_224(pretrained=False, **kwargs):
""" ViT-Small (ViT-S/32)
"""
model_kwargs = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = vit._create_vision_transformer('vit_small_patch32_224', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_small_patch32_384(pretrained=False, **kwargs):
""" ViT-Small (ViT-S/32) at 384x384.
"""
model_kwargs = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = vit._create_vision_transformer('vit_small_patch32_384', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_small_patch16_224(pretrained=False, **kwargs):
""" ViT-Small (ViT-S/16)
NOTE I've replaced my previous 'small' model definition and weights with the small variant from the DeiT paper
"""
model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = vit._create_vision_transformer('vit_small_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_small_patch16_384(pretrained=False, **kwargs):
""" ViT-Small (ViT-S/16)
NOTE I've replaced my previous 'small' model definition and weights with the small variant from the DeiT paper
"""
model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = vit._create_vision_transformer('vit_small_patch16_384', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_base_patch32_224(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = vit._create_vision_transformer('vit_base_patch32_224', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_base_patch32_384(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = vit._create_vision_transformer('vit_base_patch32_384', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_base_patch16_224(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = vit._create_vision_transformer('vit_base_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_base_patch16_384(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = vit._create_vision_transformer('vit_base_patch16_384', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_base_patch8_224(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/8) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=8, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = vit._create_vision_transformer('vit_base_patch8_224', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_large_patch32_224(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights.
"""
model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = vit._create_vision_transformer('vit_large_patch32_224', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_large_patch32_384(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = vit._create_vision_transformer('vit_large_patch32_384', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_large_patch16_224(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = vit._create_vision_transformer('vit_large_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_large_patch16_384(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = vit._create_vision_transformer('vit_large_patch16_384', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_large_patch14_224(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/14)
"""
model_kwargs = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = vit._create_vision_transformer('vit_large_patch14_224', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_huge_patch14_224(pretrained=False, **kwargs):
""" ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929).
"""
model_kwargs = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16, **kwargs)
model = vit._create_vision_transformer('vit_huge_patch14_224', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_giant_patch14_224(pretrained=False, **kwargs):
""" ViT-Giant (little-g) model (ViT-g/14) from `Scaling Vision Transformers` - https://arxiv.org/abs/2106.04560
"""
model_kwargs = dict(patch_size=14, embed_dim=1408, mlp_ratio=48/11, depth=40, num_heads=16, **kwargs)
model = vit._create_vision_transformer('vit_giant_patch14_224', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_gigantic_patch14_224(pretrained=False, **kwargs):
""" ViT-Gigantic (big-G) model (ViT-G/14) from `Scaling Vision Transformers` - https://arxiv.org/abs/2106.04560
"""
model_kwargs = dict(patch_size=14, embed_dim=1664, mlp_ratio=64/13, depth=48, num_heads=16, **kwargs)
model = vit.vit._create_vision_transformer('vit_gigantic_patch14_224', pretrained=pretrained, **model_kwargs)
return model
def CL2L_CNEXTT(model_class, **kwargs):
vision_model = timm.create_model('convnext_tiny', num_classes=0)
if dist.is_available() and dist.is_initialized():
vision_model = nn.SyncBatchNorm.convert_sync_batchnorm(vision_model)
model = model_class(vision_width=vision_model.num_features, vision_model=vision_model, context_length=77, vocab_size=49408,
transformer_width=512, transformer_heads=8, **kwargs)
return model
def CL2L_VITS16MOCO(model_class, attn_layer, **kwargs):
vision_model = timm.create_model('vit_small_mocov3_patch16_224', num_classes=0, attn_layer=attn_layer)
model = model_class(vision_width=384, vision_model=vision_model, context_length=77, vocab_size=49408,
transformer_width=512, transformer_heads=8, **kwargs)
return model
def CL2L_VITS16(model_class, attn_layer, **kwargs):
vision_model = timm.create_model('vit_small_patch16_224', num_classes=0, attn_layer=attn_layer)
model = model_class(vision_width=384, vision_model=vision_model, context_length=77, vocab_size=49408,
transformer_width=512, transformer_heads=8, **kwargs)
return model
def CL2L_VITS32(model_class, attn_layer, **kwargs):
vision_model = timm.create_model('vit_small_patch32_224', num_classes=0, attn_layer=attn_layer)
model = model_class(vision_width=384, vision_model=vision_model, context_length=77, vocab_size=49408,
transformer_width=512, transformer_heads=8, **kwargs)
return model
def CL2L_R50(model_class, **kwargs):
vision_model = timm.create_model('resnet50', num_classes=0)
if dist.is_available() and dist.is_initialized():
vision_model = nn.SyncBatchNorm.convert_sync_batchnorm(vision_model)
model = model_class(vision_width=2048, vision_model=vision_model, context_length=77, vocab_size=49408,
transformer_width=512, transformer_heads=8, **kwargs)
return model
def CLIP_R50(model_class, **kwargs):
vision_model = timm.create_model('resnet50', num_classes=0)
if dist.is_available() and dist.is_initialized():
vision_model = nn.SyncBatchNorm.convert_sync_batchnorm(vision_model)
model = model_class(vision_width=2048, vision_model=vision_model, context_length=77, vocab_size=49408,
transformer_width=512, transformer_heads=8, **kwargs)
return model
def CL2L_CNEXTS(model_class, **kwargs):
vision_model = timm.create_model('convnext_small', num_classes=0)
if dist.is_available() and dist.is_initialized():
vision_model = nn.SyncBatchNorm.convert_sync_batchnorm(vision_model)
model = model_class(vision_width=vision_model.num_features, vision_model=vision_model, context_length=77, vocab_size=49408,
transformer_width=512, transformer_heads=8, **kwargs)
return model
def CLIP_VITB16(model_class, attn_layer, **kwargs):
vision_model = timm.create_model('vit_base_patch16_224', num_classes=0, attn_layer=attn_layer)
model = model_class(vision_width=768, vision_model=vision_model, context_length=77, vocab_size=49408,
transformer_width=512, transformer_heads=8, **kwargs)
return model
def CL2L_VITB32(model_class, attn_layer, embed_dim=512, **kwargs):
vision_model = timm.create_model('vit_base_patch32_224', num_classes=0, attn_layer=attn_layer)
model = model_class(embed_dim=embed_dim, vision_width=768, vision_model=vision_model, context_length=77, vocab_size=49408,
transformer_width=512, transformer_heads=8, **kwargs)
return model
def CL2L_VITB16(model_class, attn_layer, embed_dim=512, **kwargs):
vision_model = timm.create_model('vit_base_patch16_224', num_classes=0, attn_layer=attn_layer)
model = model_class(embed_dim=embed_dim, vision_width=768, vision_model=vision_model, context_length=77, vocab_size=49408,
transformer_width=512, transformer_heads=8, **kwargs)
return model
def CLIP_VITL16(model_class, attn_layer, embed_dim=512, **kwargs):
vision_model = timm.create_model('vit_large_patch16_224', num_classes=0, attn_layer=attn_layer)
model = model_class(embed_dim=embed_dim, vision_width=1024, vision_model=vision_model, context_length=77, vocab_size=49408,
transformer_width=512, transformer_heads=8, **kwargs)
return model
def CL2L_VITL16(model_class, attn_layer, embed_dim=512, **kwargs):
vision_model = timm.create_model('vit_large_patch16_224', num_classes=0, attn_layer=attn_layer)
model = model_class(embed_dim=embed_dim, vision_width=1024, vision_model=vision_model, context_length=77, vocab_size=49408,
transformer_width=512, transformer_heads=8, **kwargs)
return model
|
clip-rocket-main
|
models.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
A script to run multinode training with submitit.
"""
import argparse
import os
import uuid
from pathlib import Path
import main as main_slip
import submitit
def parse_args():
parser = main_slip.get_args_parser()
parser = argparse.ArgumentParser("Submitit for CL2L pre-training", parents=[parser])
parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node")
parser.add_argument("--nodes", default=8, type=int, help="Number of nodes to request")
parser.add_argument("--timeout", default=2800, type=int, help="Duration of the job")
parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.")
parser.add_argument("--partition", default="", type=str, help="Partition where to submit")
parser.add_argument("--use_volta32", action='store_true', help="Big models? Use this")
parser.add_argument('--comment', default="", type=str,
help='Comment to pass to scheduler, e.g. priority message')
return parser.parse_args()
def get_shared_folder() -> Path:
user = os.getenv("USER")
if Path("/checkpoint/").is_dir():
p = Path(f"/checkpoint/{user}/experiments/cl2l")
p.mkdir(exist_ok=True)
return p
raise RuntimeError("No shared folder available")
def get_init_file():
# Init file must not exist, but it's parent dir must exist.
os.makedirs(str(get_shared_folder()), exist_ok=True)
init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init"
if init_file.exists():
os.remove(str(init_file))
return init_file
class Trainer(object):
def __init__(self, args):
self.args = args
def __call__(self):
import main as main_slip
self._setup_gpu_args()
main_slip.main(self.args)
def checkpoint(self):
import os
import submitit
self.args.dist_url = get_init_file().as_uri()
print("Requeuing ", self.args)
empty_trainer = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_gpu_args(self):
import submitit
from pathlib import Path
job_env = submitit.JobEnvironment()
self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id)))
self.args.gpu = job_env.local_rank
self.args.rank = job_env.global_rank
self.args.world_size = job_env.num_tasks
print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
def main():
args = parse_args()
if args.job_dir == "":
args.job_dir = get_shared_folder() / "%j"
# Note that the folder will depend on the job_id, to easily track experiments
executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30)
num_gpus_per_node = args.ngpus
nodes = args.nodes
timeout_min = args.timeout
partition = args.partition
kwargs = {}
if args.use_volta32:
kwargs['slurm_constraint'] = 'volta32gb'
if args.comment:
kwargs['slurm_comment'] = args.comment
executor.update_parameters(
mem_gb=40 * num_gpus_per_node,
gpus_per_node=num_gpus_per_node,
tasks_per_node=num_gpus_per_node, # one task per GPU
cpus_per_task=10,
nodes=nodes,
timeout_min=timeout_min, # max is 60 * 72
# Below are cluster dependent parameters
slurm_partition=partition,
slurm_signal_delay_s=120,
**kwargs
)
executor.update_parameters(name="slip")
args.dist_url = get_init_file().as_uri()
args.output_dir = args.job_dir
trainer = Trainer(args)
job = executor.submit(trainer)
print("Submitted job_id:", job.job_id)
if __name__ == "__main__":
main()
|
clip-rocket-main
|
run_with_submitit.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict
import json
import os
import pickle
import zipfile
import numpy as np
from PIL import Image, ImageFile
import torch
from torchvision import transforms
from torchvision import datasets as t_datasets
from torchvision.datasets import ImageFolder
import utils
ImageFile.LOAD_TRUNCATED_IMAGES = True
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def yfcc_loader(root, index):
index = format(index, "0>8d")
repo = index[:2]
z = index[2: 5]
file_img = index[5:] + '.jpg'
path_zip = os.path.join(root, 'images', repo, z) + '.zip'
with zipfile.ZipFile(path_zip, 'r') as myzip:
img = Image.open(myzip.open(file_img))
return img.convert('RGB')
class ImageCaptionDatasetBase(torch.utils.data.Dataset):
def __init__(self, dataset, root, metadata, caption_sampling='single'):
self.dataset = dataset
self.root = root
self.caption_sampling = caption_sampling
if self.dataset == 'yfcc15m':
with open(metadata, 'rb') as f:
self.samples = pickle.load(f)
elif self.dataset == 'coco':
samples = defaultdict(list)
with open(metadata) as f:
annotations = json.load(f)['annotations']
for ann in annotations:
samples[ann['image_id']].append(ann['caption'])
self.samples = [(k, v) for k, v in samples.items()]
elif self.dataset == 'cc12m' or self.dataset == 'cc3m':
self.samples = np.load(metadata, allow_pickle=True)
elif self.dataset == 'merged_opendata':
self.samples = []
self.roots = []
for md, r in zip(metadata.split("---"), root.split("---")):
self.samples.append(np.load(md, allow_pickle=True))
self.roots.append(r)
elif self.dataset == 'redcaps':
with open(metadata) as f:
annotations = json.load(f)
self.samples = [(ann['image_id'], ann['subreddit'], ann['caption']) for ann in annotations]
def get_raw_item(self, i):
if self.dataset == 'yfcc15m':
index, title, desc = self.samples[i]
caption = [c for c in [title, desc] if c != '']
caption = [''] if len(caption) == 0 else caption
caption = tuple(caption if self.caption_sampling == 'multi' else [np.random.choice(caption)])
img = yfcc_loader(self.root, index)
elif self.dataset == 'coco':
index, captions = self.samples[i]
path = os.path.join(self.root, 'train2017', '{:012d}.jpg'.format(index))
img = pil_loader(path)
caption = tuple(captions if self.caption_sampling == 'multi' else [np.random.choice(captions)])
elif self.dataset == 'cc3m':
ann = self.samples[i]
filename, captions = ann['image_id'], ann['captions']
path = os.path.join(self.root, str(filename))
img = pil_loader(path)
caption = tuple(captions if self.caption_sampling == 'multi' else [np.random.choice(captions)])
elif self.dataset == 'cc12m':
ann = self.samples[i]
filename, captions = ann['image_name'], ann['captions']
path = os.path.join(self.root, filename)
img = pil_loader(path)
caption = tuple(captions if self.caption_sampling == 'multi' else [np.random.choice(captions)])
elif self.dataset == 'merged_opendata':
datasets = ['cc3m', 'cc12m', 'yfcc15m']
cum_lens = np.array([len(s) for s in self.samples]).cumsum()
d_idx = [idx for idx, l in enumerate(cum_lens) if i < l][0]
offset = cum_lens[d_idx - 1] if d_idx > 0 else 0
samples_list = self.samples
self.samples = self.samples[d_idx]
self.dataset = datasets[d_idx]
self.root = self.roots[d_idx]
img, caption = self.get_raw_item(i - offset)
self.dataset = 'merged_opendata'
self.samples = samples_list
elif self.dataset == 'redcaps':
image_id, subreddit, caption = self.samples[i]
path = os.path.join(self.root, subreddit, f"{image_id}.jpg")
img = pil_loader(path)
elif 'pmd' in self.dataset:
img, captions = self.pmd[i]
# if isinstance(captions, str):
# caption = captions
assert isinstance(captions, list)
caption = tuple(captions if self.caption_sampling == 'multi' else [np.random.choice(captions)])
return img, caption
def __getitem__(self, i):
raise NotImplementedError
def __len__(self):
if 'pmd' in self.dataset:
return len(self.pmd)
elif 'merged_opendata' in self.dataset:
return sum([len(s) for s in self.samples])
else:
return len(self.samples)
class ImageCaptionDatasetCLIP(ImageCaptionDatasetBase):
def __init__(self, dataset, root, metadata, transform=None, tokenizer=None):
super().__init__(dataset, root, metadata)
self.transform = transform
self.tokenizer = tokenizer
def __getitem__(self, i):
img, caption = self.get_raw_item(i)
# apply transformation
if self.transform is not None:
image = self.transform(img)
# tokenize caption
if self.tokenizer is not None:
caption = self.tokenizer(caption)
return image, caption
class ImageCaptionDatasetCL2L(ImageCaptionDatasetBase):
def __init__(
self,
dataset,
root,
metadata,
transform,
augment,
num_augs=2,
tokenizer=None,
augs_only=False,
caption_sampling='single'
):
super().__init__(dataset, root, metadata, caption_sampling=caption_sampling)
self.transform = transform
self.num_augs = num_augs
self.augment = augment if isinstance(augment, list) else [augment] * num_augs
self.tokenizer = tokenizer
self.augs_only = augs_only
def __getitem__(self, i):
img, caption = self.get_raw_item(i)
augs = [self.augment[i](img) for i in range(self.num_augs)]
if self.augs_only:
return augs
image = self.transform(img)
# tokenize caption
if self.tokenizer is not None:
caption = self.tokenizer(caption)
return image, caption, *augs
class FileListDataset(torch.utils.data.Dataset):
def __init__(self, images, labels, transform=None, target_transform=None):
self.transform = transform
self.target_transform = target_transform
self.images = np.load(images)
self.labels = np.load(labels)
def __getitem__(self, index):
img = pil_loader(self.images[index])
target = self.labels[index]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.images)
def get_downstream_dataset(catalog, name, is_train, transform):
entry = catalog[name]
root = entry['path']
if entry['type'] == 'imagefolder':
dataset = t_datasets.ImageFolder(os.path.join(root, entry['train'] if is_train else entry['test']),
transform=transform)
elif entry['type'] == 'special':
if name == 'cifar10':
dataset = t_datasets.CIFAR10(root, train=is_train,
transform=transform, download=True)
elif name == 'cifar100':
dataset = t_datasets.CIFAR100(root, train=is_train,
transform=transform, download=True)
elif name == 'stl10':
dataset = t_datasets.STL10(root, split='train' if is_train else 'test',
transform=transform, download=True)
elif name == 'mnist':
dataset = t_datasets.MNIST(root, train=is_train,
transform=transform, download=True)
elif entry['type'] == 'filelist':
path = entry['train'] if is_train else entry['test']
val_images = os.path.join(root, path + '_images.npy')
val_labels = os.path.join(root, path + '_labels.npy')
if name == 'clevr_counts':
target_transform = lambda x: ['count_10', 'count_3', 'count_4', 'count_5', 'count_6', 'count_7', 'count_8', 'count_9'].index(x)
else:
target_transform = None
dataset = FileListDataset(val_images, val_labels, transform, target_transform)
else:
raise Exception('Unknown dataset')
return dataset
def get_train_dataset(args, tokenizer, metadata, augs_only=False):
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
train_transform = transforms.Compose([
transforms.RandomResizedCrop(
224,
scale=(args.weak_min_scale, 1.0),
interpolation=transforms.InterpolationMode.BICUBIC
),
transforms.ToTensor(),
normalize
])
augment = transforms.Compose([
transforms.RandomResizedCrop(
args.multicrop_resize,
scale=(0.08, args.multicrop_max_scale),
interpolation=transforms.InterpolationMode.BICUBIC
),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1) # not strengthened
], p=0.8),
transforms.RandomGrayscale(p=args.grayscale_prob),
transforms.RandomApply([utils.GaussianBlur([.1, 2.])], p=args.blur_prob),
transforms.RandomApply([utils.Solarization()], p=args.solarize_prob),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
if args.byol_augment:
assert args.num_augs == 2
augment = []
asym_blur_prob = [1.0, 0.1]
asym_solarize_prob = [0.0, 0.2]
for blur_prob, solarize_prob in zip(asym_blur_prob, asym_solarize_prob):
augment.append(transforms.Compose([
transforms.RandomResizedCrop(
args.multicrop_resize,
scale=(0.08, args.multicrop_max_scale),
interpolation=transforms.InterpolationMode.BICUBIC
),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.2, 0.1)
], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([utils.GaussianBlur([.1, 2.])], p=blur_prob),
transforms.RandomApply([utils.Solarization()], p=solarize_prob),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
assert not (args.weak_augment and args.strong_augment)
if args.weak_augment:
augment = train_transform
if args.strong_augment:
train_transform = augment
if args.randaugment:
train_transform = transforms.RandomChoice([train_transform, augment])
if args.model.startswith('CLIP'):
return ImageCaptionDatasetCLIP(args.dataset, args.root, metadata, train_transform, tokenizer)
elif args.model.startswith('CL2L'):
return ImageCaptionDatasetCL2L(
args.dataset,
args.root,
metadata,
train_transform,
augment,
args.num_augs,
tokenizer=tokenizer,
augs_only=augs_only,
caption_sampling=args.caption_sampling
)
def get_val_dataset():
val_transform = transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
cwd = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(cwd, 'dataset_catalog.json')) as f:
root = json.load(f)['imagenet']['path']
return ImageFolder(os.path.join(root, 'val'), val_transform)
|
clip-rocket-main
|
datasets.py
|
# Taken from https://github.com/rwightman/timm
""" Vision Transformer (ViT) in PyTorch
A PyTorch implement of Vision Transformers as described in:
'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale'
- https://arxiv.org/abs/2010.11929
`How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers`
- https://arxiv.org/abs/2106.10270
The official jax code is released and available at https://github.com/google-research/vision_transformer
Acknowledgments:
* The paper authors for releasing code and weights, thanks!
* I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out
for some einops/einsum fun
* Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT
* Bert reference code checks against Huggingface Transformers and Tensorflow Bert
Hacked together by / Copyright 2020, Ross Wightman
"""
import math
from functools import partial
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint
from timm.models.helpers import build_model_with_cfg, resolve_pretrained_cfg, named_apply, adapt_input_conv, checkpoint_seq
from timm.models.layers import PatchEmbed, Mlp, DropPath, trunc_normal_, lecun_normal_
from xformers.ops import memory_efficient_attention, unbind
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
super().__init__()
assert dim % num_heads == 0, 'dim should be divisible by num_heads'
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class MemEffAttention(nn.Module):
def __init__(
self,
dim: int,
num_heads: int = 8,
qkv_bias: bool = False,
attn_drop: float = 0.0,
proj_drop: float = 0.0,
) -> None:
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim**-0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads)
q, k, v = unbind(qkv, 2)
x = memory_efficient_attention(q, k, v)
x = x.reshape([B, N, C])
x = self.proj(x)
x = self.proj_drop(x)
return x
class LayerScale(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
return x.mul_(self.gamma) if self.inplace else x * self.gamma
class Block(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
drop=0.,
attn_drop=0.,
init_values=None,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
attn_layer=Attention
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = attn_layer(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop)
self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
x = x + self.drop_path1(self.ls1(self.attn(self.norm1(x))))
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
return x
class ResPostBlock(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
drop=0.,
attn_drop=0.,
init_values=None,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm
):
super().__init__()
self.init_values = init_values
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
self.norm1 = norm_layer(dim)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop)
self.norm2 = norm_layer(dim)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.init_weights()
def init_weights(self):
# NOTE this init overrides that base model init with specific changes for the block type
if self.init_values is not None:
nn.init.constant_(self.norm1.weight, self.init_values)
nn.init.constant_(self.norm2.weight, self.init_values)
def forward(self, x):
x = x + self.drop_path1(self.norm1(self.attn(x)))
x = x + self.drop_path2(self.norm2(self.mlp(x)))
return x
class ParallelBlock(nn.Module):
def __init__(
self,
dim,
num_heads,
num_parallel=2,
mlp_ratio=4.,
qkv_bias=False,
init_values=None,
drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm
):
super().__init__()
self.num_parallel = num_parallel
self.attns = nn.ModuleList()
self.ffns = nn.ModuleList()
for _ in range(num_parallel):
self.attns.append(nn.Sequential(OrderedDict([
('norm', norm_layer(dim)),
('attn', Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)),
('ls', LayerScale(dim, init_values=init_values) if init_values else nn.Identity()),
('drop_path', DropPath(drop_path) if drop_path > 0. else nn.Identity())
])))
self.ffns.append(nn.Sequential(OrderedDict([
('norm', norm_layer(dim)),
('mlp', Mlp(dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop)),
('ls', LayerScale(dim, init_values=init_values) if init_values else nn.Identity()),
('drop_path', DropPath(drop_path) if drop_path > 0. else nn.Identity())
])))
def _forward_jit(self, x):
x = x + torch.stack([attn(x) for attn in self.attns]).sum(dim=0)
x = x + torch.stack([ffn(x) for ffn in self.ffns]).sum(dim=0)
return x
@torch.jit.ignore
def _forward(self, x):
x = x + sum(attn(x) for attn in self.attns)
x = x + sum(ffn(x) for ffn in self.ffns)
return x
def forward(self, x):
if torch.jit.is_scripting() or torch.jit.is_tracing():
return self._forward_jit(x)
else:
return self._forward(x)
class VisionTransformer(nn.Module):
""" Vision Transformer
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`
- https://arxiv.org/abs/2010.11929
"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
num_classes=1000,
global_pool='token',
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.,
qkv_bias=True,
init_values=None,
class_token=True,
no_embed_class=False,
pre_norm=False,
fc_norm=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
weight_init='',
embed_layer=PatchEmbed,
norm_layer=None,
act_layer=None,
block_fn=Block,
attn_layer=Attention
):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
global_pool (str): type of global pooling for final sequence (default: 'token')
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
init_values: (float): layer-scale init values
class_token (bool): use class token
fc_norm (Optional[bool]): pre-fc norm after pool, set if global_pool == 'avg' if None (default: None)
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
weight_init (str): weight init scheme
embed_layer (nn.Module): patch embedding layer
norm_layer: (nn.Module): normalization layer
act_layer: (nn.Module): MLP activation layer
"""
super().__init__()
assert global_pool in ('', 'avg', 'token')
assert class_token or global_pool != 'token'
use_fc_norm = global_pool == 'avg' if fc_norm is None else fc_norm
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
act_layer = act_layer or nn.GELU
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.num_prefix_tokens = 1 if class_token else 0
self.no_embed_class = no_embed_class
self.grad_checkpointing = False
self.patch_embed = embed_layer(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
bias=not pre_norm, # disable bias if pre-norm is used (e.g. CLIP)
)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if class_token else None
embed_len = num_patches if no_embed_class else num_patches + self.num_prefix_tokens
self.pos_embed = nn.Parameter(torch.randn(1, embed_len, embed_dim) * .02)
self.pos_drop = nn.Dropout(p=drop_rate)
self.norm_pre = norm_layer(embed_dim) if pre_norm else nn.Identity()
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.Sequential(*[
block_fn(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
init_values=init_values,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
act_layer=act_layer,
attn_layer=attn_layer
)
for i in range(depth)])
self.norm = norm_layer(embed_dim) if not use_fc_norm else nn.Identity()
# Classifier Head
self.fc_norm = norm_layer(embed_dim) if use_fc_norm else nn.Identity()
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if weight_init != 'skip':
self.init_weights(weight_init)
def init_weights(self, mode=''):
assert mode in ('jax', 'jax_nlhb', 'moco', '')
head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0.
trunc_normal_(self.pos_embed, std=.02)
if self.cls_token is not None:
nn.init.normal_(self.cls_token, std=1e-6)
named_apply(get_init_weights_vit(mode, head_bias), self)
def _init_weights(self, m):
# this fn left here for compat with downstream users
init_weights_vit_timm(m)
@torch.jit.ignore()
def load_pretrained(self, checkpoint_path, prefix=''):
_load_weights(self, checkpoint_path, prefix)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token', 'dist_token'}
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^cls_token|pos_embed|patch_embed', # stem and embed
blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes: int, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
assert global_pool in ('', 'avg', 'token')
self.global_pool = global_pool
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def _pos_embed(self, x):
if self.no_embed_class:
# deit-3, updated JAX (big vision)
# position embedding does not overlap with class token, add then concat
x = x + self.pos_embed
if self.cls_token is not None:
x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
else:
# original timm, JAX, and deit vit impl
# pos_embed has entry for class token, concat then add
if self.cls_token is not None:
x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
x = x + self.pos_embed
return self.pos_drop(x)
def forward_features(self, x):
x = self.patch_embed(x)
x = self._pos_embed(x)
x = self.norm_pre(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool:
x = x[:, self.num_prefix_tokens:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0]
x = self.fc_norm(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def init_weights_vit_timm(module: nn.Module, name: str = ''):
""" ViT weight initialization, original timm impl (for reproducibility) """
if isinstance(module, nn.Linear):
trunc_normal_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif hasattr(module, 'init_weights'):
module.init_weights()
def init_weights_vit_jax(module: nn.Module, name: str = '', head_bias: float = 0.):
""" ViT weight initialization, matching JAX (Flax) impl """
if isinstance(module, nn.Linear):
if name.startswith('head'):
nn.init.zeros_(module.weight)
nn.init.constant_(module.bias, head_bias)
else:
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
nn.init.normal_(module.bias, std=1e-6) if 'mlp' in name else nn.init.zeros_(module.bias)
elif isinstance(module, nn.Conv2d):
lecun_normal_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif hasattr(module, 'init_weights'):
module.init_weights()
def init_weights_vit_moco(module: nn.Module, name: str = ''):
""" ViT weight initialization, matching moco-v3 impl minus fixed PatchEmbed """
if isinstance(module, nn.Linear):
if 'qkv' in name:
# treat the weights of Q, K, V separately
val = math.sqrt(6. / float(module.weight.shape[0] // 3 + module.weight.shape[1]))
nn.init.uniform_(module.weight, -val, val)
else:
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif hasattr(module, 'init_weights'):
module.init_weights()
def get_init_weights_vit(mode='jax', head_bias: float = 0.):
if 'jax' in mode:
return partial(init_weights_vit_jax, head_bias=head_bias)
elif 'moco' in mode:
return init_weights_vit_moco
else:
return init_weights_vit_timm
@torch.no_grad()
def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''):
""" Load weights from .npz checkpoints for official Google Brain Flax implementation
"""
import numpy as np
def _n2p(w, t=True):
if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1:
w = w.flatten()
if t:
if w.ndim == 4:
w = w.transpose([3, 2, 0, 1])
elif w.ndim == 3:
w = w.transpose([2, 0, 1])
elif w.ndim == 2:
w = w.transpose([1, 0])
return torch.from_numpy(w)
w = np.load(checkpoint_path)
if not prefix and 'opt/target/embedding/kernel' in w:
prefix = 'opt/target/'
if hasattr(model.patch_embed, 'backbone'):
# hybrid
backbone = model.patch_embed.backbone
stem_only = not hasattr(backbone, 'stem')
stem = backbone if stem_only else backbone.stem
stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel'])))
stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale']))
stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias']))
if not stem_only:
for i, stage in enumerate(backbone.stages):
for j, block in enumerate(stage.blocks):
bp = f'{prefix}block{i + 1}/unit{j + 1}/'
for r in range(3):
getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel']))
getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale']))
getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias']))
if block.downsample is not None:
block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel']))
block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale']))
block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias']))
embed_conv_w = _n2p(w[f'{prefix}embedding/kernel'])
else:
embed_conv_w = adapt_input_conv(
model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel']))
model.patch_embed.proj.weight.copy_(embed_conv_w)
model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias']))
model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False))
pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False)
if pos_embed_w.shape != model.pos_embed.shape:
pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights
pos_embed_w,
model.pos_embed,
getattr(model, 'num_prefix_tokens', 1),
model.patch_embed.grid_size
)
model.pos_embed.copy_(pos_embed_w)
model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale']))
model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias']))
if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]:
model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel']))
model.head.bias.copy_(_n2p(w[f'{prefix}head/bias']))
# NOTE representation layer has been removed, not used in latest 21k/1k pretrained weights
# if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w:
# model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel']))
# model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias']))
for i, block in enumerate(model.blocks.children()):
block_prefix = f'{prefix}Transformer/encoderblock_{i}/'
mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/'
block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale']))
block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias']))
block.attn.qkv.weight.copy_(torch.cat([
_n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')]))
block.attn.qkv.bias.copy_(torch.cat([
_n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')]))
block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1))
block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias']))
for r in range(2):
getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel']))
getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias']))
block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale']))
block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias']))
def resize_pos_embed(posemb, posemb_new, num_prefix_tokens=1, gs_new=()):
# Rescale the grid of position embeddings when loading from state_dict. Adapted from
# https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
ntok_new = posemb_new.shape[1]
if num_prefix_tokens:
posemb_prefix, posemb_grid = posemb[:, :num_prefix_tokens], posemb[0, num_prefix_tokens:]
ntok_new -= num_prefix_tokens
else:
posemb_prefix, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(math.sqrt(len(posemb_grid)))
if not len(gs_new): # backwards compatibility
gs_new = [int(math.sqrt(ntok_new))] * 2
assert len(gs_new) >= 2
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode='bicubic', align_corners=False)
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1)
posemb = torch.cat([posemb_prefix, posemb_grid], dim=1)
return posemb
def _convert_openai_clip(state_dict, model):
out_dict = {}
swaps = [
('visual.', ''), ('conv1', 'patch_embed.proj'), ('positional_embedding', 'pos_embed'),
('transformer.resblocks.', 'blocks.'), ('ln_pre', 'norm_pre'), ('ln_post', 'norm'), ('ln_', 'norm'),
('in_proj_', 'qkv.'), ('out_proj', 'proj'), ('mlp.c_fc', 'mlp.fc1'), ('mlp.c_proj', 'mlp.fc2'),
]
for k, v in state_dict.items():
if not k.startswith('visual.'):
continue
for sp in swaps:
k = k.replace(sp[0], sp[1])
if k == 'proj':
k = 'head.weight'
v = v.transpose(0, 1)
out_dict['head.bias'] = torch.zeros(v.shape[0])
elif k == 'class_embedding':
k = 'cls_token'
v = v.unsqueeze(0).unsqueeze(1)
elif k == 'pos_embed':
v = v.unsqueeze(0)
if v.shape[1] != model.pos_embed.shape[1]:
# To resize pos embedding when using model at different size from pretrained weights
v = resize_pos_embed(
v,
model.pos_embed,
0 if getattr(model, 'no_embed_class') else getattr(model, 'num_prefix_tokens', 1),
model.patch_embed.grid_size
)
out_dict[k] = v
return out_dict
def checkpoint_filter_fn(state_dict, model, adapt_layer_scale=False):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
import re
out_dict = {}
if 'model' in state_dict:
# For deit models
state_dict = state_dict['model']
if 'visual.class_embedding' in state_dict:
return _convert_openai_clip(state_dict, model)
for k, v in state_dict.items():
if 'patch_embed.proj.weight' in k and len(v.shape) < 4:
# For old models that I trained prior to conv based patchification
O, I, H, W = model.patch_embed.proj.weight.shape
v = v.reshape(O, -1, H, W)
elif k == 'pos_embed' and v.shape[1] != model.pos_embed.shape[1]:
# To resize pos embedding when using model at different size from pretrained weights
v = resize_pos_embed(
v,
model.pos_embed,
0 if getattr(model, 'no_embed_class') else getattr(model, 'num_prefix_tokens', 1),
model.patch_embed.grid_size
)
elif adapt_layer_scale and 'gamma_' in k:
# remap layer-scale gamma into sub-module (deit3 models)
k = re.sub(r'gamma_([0-9])', r'ls\1.gamma', k)
elif 'pre_logits' in k:
# NOTE representation layer removed as not used in latest 21k/1k pretrained weights
continue
out_dict[k] = v
return out_dict
def _create_vision_transformer(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
kwargs['attn_layer'] = MemEffAttention if kwargs['attn_layer'] == "flash" else Attention
pretrained_cfg = resolve_pretrained_cfg(variant, pretrained_cfg=kwargs.pop('pretrained_cfg', None))
model = build_model_with_cfg(
VisionTransformer, variant, pretrained,
pretrained_cfg=pretrained_cfg,
pretrained_filter_fn=checkpoint_filter_fn,
pretrained_custom_load='npz' in pretrained_cfg['url'],
**kwargs)
return model
|
clip-rocket-main
|
vit.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Modified from github.com/openai/CLIP
import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
import torch
from textaugment import EDA
import random
from nltk.tokenize import word_tokenize
import nltk
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(
self,
bpe_path: str = default_bpe(),
text_augment=False,
no_text_augment_prob=0.0,
remove_stopwords_prob=0.5,
synonym_replacement_prob=0.2,
random_swap_prob=0.2,
random_deletion_prob=0.1,
clean_before_augment=False,
num_augs=2,
):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
self.clean_before_augment = clean_before_augment
self.remove_stopwords_prob = remove_stopwords_prob
self.stopwords = set(nltk.corpus.stopwords.words('english'))
self.remove_stopwords = lambda x: " ".join([w for w in word_tokenize(x) if w.lower() not in self.stopwords])
if text_augment:
eda = EDA()
identity = lambda x: x
self.text_augment = lambda x: random.choices(
[
identity,
eda.synonym_replacement,
eda.random_swap,
eda.random_deletion
],
weights=[
no_text_augment_prob,
synonym_replacement_prob,
random_swap_prob,
random_deletion_prob
],
k=1
)[0](x)
else:
self.text_augment = None
self.num_augs = num_augs
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
def weak_augment(self, text):
if len(text) == 0:
return text
if random.random() < self.remove_stopwords_prob:
stripped_texts = self.remove_stopwords(text)
text = stripped_texts if len(stripped_texts) > 0 else text
return text
def strong_augment(self, text):
if len(text) == 0:
return text
if random.random() < self.remove_stopwords_prob:
stripped_texts = self.remove_stopwords(text)
text = stripped_texts if len(stripped_texts) > 0 else text
if self.text_augment is not None:
augmented_texts = self.text_augment(text)
augmented_texts = augmented_texts[0] if isinstance(augmented_texts, list) else augmented_texts
text = augmented_texts if len(augmented_texts) > 0 else text
return text
def __call__(self, texts, context_length=77):
if isinstance(texts, tuple): # training time
texts = list(texts)
if self.clean_before_augment:
for i, txt in enumerate(texts):
texts[i] = whitespace_clean(basic_clean(txt)).lower()
texts = [
self.weak_augment(random.choice(texts)),
*[self.strong_augment(random.choice(texts)) for _ in range(self.num_augs)],
]
sot_token = self.encoder["<|startoftext|>"]
eot_token = self.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + self.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
tokens = tokens[:context_length]
if tokens[-1] != eot_token:
tokens[-1] = eot_token
result[i, :len(tokens)] = torch.tensor(tokens)
if len(result) == 1:
return result[0]
return result
|
clip-rocket-main
|
tokenizer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import os
import random
import shutil
import torch
import torch.distributed as dist
import torch.autograd as autograd
from PIL import ImageFilter, ImageOps
def get_model_parallel(model):
if isinstance(model, torch.nn.DataParallel) \
or isinstance(model, torch.nn.parallel.DistributedDataParallel):
return model.module
else:
return model
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(state, is_best, output_dir):
if is_main_process():
ckpt_path = f'{output_dir}/checkpoint.pt'
best_path = f'{output_dir}/checkpoint_best.pt'
torch.save(state, ckpt_path)
if is_best:
shutil.copyfile(ckpt_path, best_path)
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
def scaled_all_reduce(tensors, is_scale=True):
"""Performs the scaled all_reduce operation on the provided tensors.
The input tensors are modified in-place. Currently supports only the sum
reduction operator. The reduced values are scaled by the inverse size of the
world size.
"""
world_size = get_world_size()
# There is no need for reduction in the single-proc case
if world_size == 1:
return tensors
# Queue the reductions
reductions = []
for tensor in tensors:
reduction = dist.all_reduce(tensor, async_op=True)
reductions.append(reduction)
# Wait for reductions to finish
for reduction in reductions:
reduction.wait()
# Scale the results
if is_scale:
for tensor in tensors:
tensor.mul_(1.0 / world_size)
return tensors
def all_gather_batch(tensors):
"""
Performs all_gather operation on the provided tensors.
"""
# Queue the gathered tensors
world_size = get_world_size()
# There is no need for reduction in the single-proc case
if world_size == 1:
return tensors
tensor_list = []
output_tensor = []
for tensor in tensors:
tensor_all = [torch.ones_like(tensor) for _ in range(world_size)]
dist.all_gather(
tensor_all,
tensor,
async_op=False # performance opt
)
tensor_list.append(tensor_all)
for tensor_all in tensor_list:
output_tensor.append(torch.cat(tensor_all, dim=0))
return output_tensor
class GatherLayer(autograd.Function):
"""
Gather tensors from all workers with support for backward propagation:
This implementation does not cut the gradients as torch.distributed.all_gather does.
"""
@staticmethod
def forward(ctx, x):
output = [torch.zeros_like(x) for _ in range(dist.get_world_size())]
dist.all_gather(output, x)
return tuple(output)
@staticmethod
def backward(ctx, *grads):
all_gradients = torch.stack(grads)
dist.all_reduce(all_gradients)
return all_gradients[dist.get_rank()]
def all_gather_batch_with_grad(tensors):
"""
Performs all_gather operation on the provided tensors.
Graph remains connected for backward grad computation.
"""
# Queue the gathered tensors
world_size = get_world_size()
# There is no need for reduction in the single-proc case
if world_size == 1:
return tensors
tensor_list = []
output_tensor = []
for tensor in tensors:
tensor_all = GatherLayer.apply(tensor)
tensor_list.append(tensor_all)
for tensor_all in tensor_list:
output_tensor.append(torch.cat(tensor_all, dim=0))
return output_tensor
def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0, start_warmup_value=0):
warmup_schedule = np.array([])
warmup_iters = warmup_epochs * niter_per_ep
if warmup_epochs > 0:
warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters)
iters = np.arange(epochs * niter_per_ep - warmup_iters)
schedule = final_value + 0.5 * (base_value - final_value) * (1 + np.cos(np.pi * iters / len(iters)))
schedule = np.concatenate((warmup_schedule, schedule))
assert len(schedule) == epochs * niter_per_ep
return schedule
class GaussianBlur(object):
"""Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709"""
def __init__(self, sigma=[.1, 2.]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
class Solarization:
"""Solarization as a callable object."""
def __call__(self, img):
return ImageOps.solarize(img)
@torch.no_grad()
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def infer_batchnorm_class():
if dist.is_available() and dist.is_initialized():
return torch.nn.SyncBatchNorm
else:
return torch.nn.BatchNorm1d
def cycle(iterable, sampler=None):
epoch = 0
while True:
if sampler is not None:
sampler.set_epoch(epoch)
for x in iterable:
yield x
epoch += 1
|
clip-rocket-main
|
utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from collections import OrderedDict, defaultdict
import json
import os
from sklearn import metrics
import numpy as np
from tqdm import tqdm
import torch
import torch.backends.cudnn as cudnn
import torch.utils.data
import torchvision.transforms as transforms
import datasets
import models
from tokenizer import SimpleTokenizer
import utils
def get_args_parser():
parser = argparse.ArgumentParser(description='SLIP 0-shot evaluations', add_help=False)
parser.add_argument('--output-dir', default='./', type=str, help='output dir')
parser.add_argument('--batch-size', default=256, type=int, help='batch_size')
parser.add_argument('-j', '--workers', default=10, type=int, metavar='N',
help='number of data loading workers per process')
parser.add_argument('--resume', default='', type=str, help='path to latest checkpoint')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--model-name', type=str, default='')
return parser
def main(args):
# optionally resume from a checkpoint (takes precedence over autoresume)
if args.resume:
ckpt_path = args.resume
elif os.path.isfile(os.path.join(args.output_dir, 'checkpoint_best.pt')):
ckpt_path = os.path.join(args.output_dir, 'checkpoint_best.pt')
else:
raise Exception('no checkpoint found')
ckpt = torch.load(ckpt_path, map_location='cpu')
state_dict = OrderedDict()
for k, v in ckpt['state_dict'].items():
state_dict[k.replace('module.', '')] = v
# create model
old_args = ckpt['args']
print("=> creating model: {}".format(old_args.model))
model = models.get_model(old_args, rand_embed=False)
model.cuda()
msg = model.load_state_dict(state_dict, strict=False)
print(msg)
print("=> loaded resume checkpoint '{}' (epoch {})".format(args.resume, ckpt['epoch']))
cudnn.benchmark = True
cwd = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(cwd, 'dataset_catalog.json')) as f:
catalog = json.load(f)
with open(os.path.join(cwd, 'templates.json')) as f:
all_templates = json.load(f)
with open(os.path.join(cwd, 'labels.json')) as f:
all_labels = json.load(f)
# Data loading code
print("=> creating dataset")
tokenizer = SimpleTokenizer(bpe_path=old_args.bpe_path)
val_transform = transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
lambda x: x.convert('RGB'),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
results = {}
for d in catalog:
if d == 'kinetics700_frames':
continue
print('Evaluating {}'.format(d))
val_dataset = datasets.get_downstream_dataset(catalog, name=d, is_train=False, transform=val_transform)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True, drop_last=False)
templates = all_templates[d]
labels = all_labels[d]
accs = validate_zeroshot(val_loader, templates, labels, model, tokenizer, d, old_args)
results[d] = accs
print('metric:', accs)
print('All results:')
for d, x in results.items():
print('{}:\n{}'.format(d, x))
res_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'zeroshot_results')
os.makedirs(res_dir, exist_ok=True)
exp_id = os.path.basename(args.output_dir)
ckpt_name = os.path.basename(ckpt_path).rsplit('.', 1)[0]
with open('{}/{}_{}_{}.txt'.format(res_dir, args.model_name, exp_id, ckpt_name), 'w') as f:
f.write(json.dumps(results))
def validate_zeroshot(val_loader, templates, labels, model, tokenizer, dataset_name, args):
# switch to evaluate mode
model.eval()
is_acc = dataset_name not in ['aircraft', 'pets', 'caltech101', 'flowers', 'kinetics700_frames', 'hateful_memes']
print('is_acc:', is_acc)
ensemble_weights = np.linspace(0.1, 0.9, 9).round(decimals=1)
results = defaultdict(lambda: defaultdict(int if is_acc else list))
with torch.no_grad():
print('=> encoding captions')
text_features = defaultdict(list)
for label in tqdm(labels):
if isinstance(label, list):
texts = [t.format(l) for t in templates for l in label]
else:
texts = [t.format(label) for t in templates]
texts = tokenizer(texts).cuda(non_blocking=True)
texts = texts.view(-1, 77).contiguous()
class_embeddings = utils.get_model_parallel(model).encode_text_val(texts)
embed_names = {'z_text', 'h_text'}.intersection(class_embeddings.keys())
for embed_name in embed_names:
cls_embed = class_embeddings[embed_name]
cls_embed = cls_embed / cls_embed.norm(dim=-1, keepdim=True)
cls_embed = cls_embed.mean(dim=0)
cls_embed = cls_embed / cls_embed.norm(dim=-1, keepdim=True)
text_features[embed_name].append(cls_embed)
text_features = {k: torch.stack(v, dim=0) for k, v in text_features.items()}
print('=> encoding images')
for images, target in tqdm(val_loader):
images = images.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# encode images
image_features = utils.get_model_parallel(model).encode_image_val(images)
# cosine similarity as logits
similarities = utils.get_model_parallel(model).predict_zeroshot(image_features, text_features)
# measure accuracy
for name, similarity in similarities.items():
if is_acc:
# measure accuracy and record loss
pred = similarity.argmax(dim=1)
correct = pred.eq(target).sum()
results[f'acc1_{name[0]}']['correct'] += correct.item()
results[f'acc1_{name[0]}']['total'] += images.size(0)
else:
results[name[0]]['outputs'].append(similarity.cpu())
results[name[0]]['targets'].append(target.cpu())
if is_acc and not args.model.startswith('CLIP'):
# ensemble accuracy
for w in ensemble_weights:
similarity = w * similarities['z_sim'] + (1-w) * similarities['h_sim']
# measure accuracy and record loss
pred = similarity.argmax(dim=1)
correct = pred.eq(target).sum()
results[f'acc1_zh_{w}']['correct'] += correct.item()
results[f'acc1_zh_{w}']['total'] += images.size(0)
if is_acc:
return {k: 100 * d['correct'] / d['total'] for k, d in results.items()}
else:
results = {
k: (torch.cat(d['outputs']), torch.cat(d['targets']))
for k, d in results.items()
}
results = {**results, **{
f'zh_{w}': (w * results['z'][0] + (1-w) * results['h'][0], results['z'][1])
for w in ensemble_weights
}}
if dataset_name in ['aircraft', 'pets', 'caltech101', 'flowers']:
return {k: mean_per_class(*r) for k, r in results.items()}
elif dataset_name == 'kinetics700_frames':
return {k: (sum(accuracy(*r, topk=(1, 5))) / 2).item() for k, r in results.items()}
elif dataset_name == 'hateful_memes':
return {k: roc_auc(*r) for k, r in results.items()}
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def mean_per_class(outputs, targets):
pred = outputs.argmax(1)
confusion_matrix = metrics.confusion_matrix(targets, pred)
per_classes = confusion_matrix.diagonal() / confusion_matrix.sum(axis=1)
return 100 * per_classes.mean()
def roc_auc(outputs, targets):
pos_score = outputs[:, 1] - outputs[:, 0]
metric = metrics.roc_auc_score(targets, pos_score)
return 100 * metric
if __name__ == '__main__':
parser = argparse.ArgumentParser('SLIP 0-shot evaluations', parents=[get_args_parser()])
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
main(args)
|
clip-rocket-main
|
eval_zeroshot.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
import utils
class CLIPLoss(nn.Module):
def __init__(self):
super().__init__()
self.labels = None
self.last_local_batch_size = None
def forward(self, outputs):
image_embed = outputs['z_image']
text_embed = outputs['z_text']
logit_scale = outputs['logit_scale']
local_batch_size = image_embed.size(0)
if local_batch_size != self.last_local_batch_size:
self.labels = local_batch_size * utils.get_rank() + torch.arange(
local_batch_size, device=image_embed.device
)
self.last_local_batch_size = local_batch_size
# normalized features
image_embed = F.normalize(image_embed, dim=-1, p=2)
text_embed = F.normalize(text_embed, dim=-1, p=2)
# gather features from all GPUs
image_embed_all, text_embed_all = \
utils.all_gather_batch([image_embed, text_embed])
# cosine similarity as logits
logits_per_image = logit_scale * image_embed @ text_embed_all.t()
logits_per_text = logit_scale * text_embed @ image_embed_all.t()
loss = (F.cross_entropy(logits_per_image, self.labels) + \
F.cross_entropy(logits_per_text, self.labels)) / 2
# compute accuracy
with torch.no_grad():
pred = torch.argmax(logits_per_image, dim=-1)
correct = pred.eq(self.labels).sum()
acc = 100 * correct / local_batch_size
return {'loss': loss, 'clip_loss': loss, 'clip_acc': acc}
class CL2LLoss(nn.Module):
def __init__(self, loss_avg_or_sum, label_smoothing):
super().__init__()
self.labels = None
self.last_local_batch_size = None
self.loss_avg_or_sum = loss_avg_or_sum
self.label_smoothing = label_smoothing
def forward(self, outputs):
z_image_global = outputs['z_image_global']
z_text_global = outputs['z_text_global']
h_image_local = outputs['h_image_local']
h_text_local = outputs['h_text_local']
logit_scale = outputs['logit_scale']
h_logit_scale = outputs['h_logit_scale']
local_batch_size = z_image_global.size(0)
assert len(h_image_local) == len(h_text_local)
num_augs = len(h_image_local)
if local_batch_size != self.last_local_batch_size:
self.labels = local_batch_size * utils.get_rank() + torch.arange(
local_batch_size, device=z_image_global.device
)
self.last_local_batch_size = local_batch_size
# normalized features
z_image_global = F.normalize(z_image_global)
z_text_global = F.normalize(z_text_global)
h_image_local = [F.normalize(z) for z in h_image_local]
h_text_local = [F.normalize(z) for z in h_text_local]
# gather features from all GPUs
z_image_global_all, z_text_global_all = utils.all_gather_batch([z_image_global, z_text_global])
h_image_local_all = utils.all_gather_batch(h_image_local)
h_text_local_all = utils.all_gather_batch(h_text_local)
# compute global loss
image_global_logits = logit_scale * z_image_global @ z_text_global_all.t()
text_global_logits = logit_scale * z_text_global @ z_image_global_all.t()
clip_loss_image_global = F.cross_entropy(image_global_logits, self.labels)
clip_loss_text_global = F.cross_entropy(text_global_logits, self.labels)
# compute local loss
clip_loss_image_local, clip_loss_text_local = 0, 0
if num_augs > 0:
image_local_logits = []
text_local_logits = []
for i in range(num_augs):
image_local_logits += [h_logit_scale * h @ h_text_local_all[i].t() for h in h_image_local]
text_local_logits += [h_logit_scale * h @ h_image_local_all[i].t() for h in h_text_local]
clip_loss_image_local = sum([F.cross_entropy(l, self.labels, label_smoothing=self.label_smoothing) for l in image_local_logits]) / len(image_local_logits)
clip_loss_text_local = sum([F.cross_entropy(l, self.labels, label_smoothing=self.label_smoothing) for l in text_local_logits]) / len(text_local_logits)
# compute total losses
clip_loss_image = (clip_loss_image_global + clip_loss_image_local * num_augs) / (1 + num_augs)
clip_loss_text = (clip_loss_text_global + clip_loss_text_local * num_augs) / (1 + num_augs)
clip_loss = (clip_loss_image + clip_loss_text) / 2
# compute accuracy
with torch.no_grad():
pred = torch.argmax(image_global_logits, dim=-1)
correct = pred.eq(self.labels).sum()
clip_acc_image_global = 100 * correct / local_batch_size
pred = torch.argmax(text_global_logits, dim=-1)
correct = pred.eq(self.labels).sum()
clip_acc_text_global = 100 * correct / local_batch_size
clip_acc_image_local, clip_acc_text_local = 0, 0
if num_augs > 0:
for aug_logits in image_local_logits:
pred = torch.argmax(aug_logits, dim=-1)
correct = pred.eq(self.labels).sum()
clip_acc_image_local += 100 * correct / local_batch_size
clip_acc_image_local /= len(image_local_logits)
for aug_logits in text_local_logits:
pred = torch.argmax(aug_logits, dim=-1)
correct = pred.eq(self.labels).sum()
clip_acc_text_local += 100 * correct / local_batch_size
clip_acc_text_local /= len(image_local_logits)
loss = clip_loss * (2 if self.loss_avg_or_sum == 'sum' else 1)
clip_local_dict = {
'clip_loss_image_local': clip_loss_image_local,
'clip_loss_text_local': clip_loss_text_local,
'clip_acc_image_local': clip_acc_image_local,
'clip_acc_text_local': clip_acc_text_local,
} if num_augs > 0 else {}
return {
'loss': loss,
'clip_loss_image': clip_loss_image,
'clip_loss_text': clip_loss_text,
'clip_loss_image_global': clip_loss_image_global,
'clip_loss_text_global': clip_loss_text_global,
'clip_loss_image': clip_loss_image,
'clip_loss': clip_loss,
'clip_acc': clip_acc_image_global,
'clip_acc_image_global': clip_acc_image_global,
'clip_acc_text_global': clip_acc_text_global,
'h_logit_scale': h_logit_scale,
**clip_local_dict,
}
class BarLIPLoss(CL2LLoss):
def __init__(self, loss_avg_or_sum, label_smoothing, lamb=5e-3, scale_loss=0.025):
super().__init__(loss_avg_or_sum, label_smoothing)
self.lamb = lamb
self.scale_loss = scale_loss
def barlip_loss(self, z1, z2):
N, D = z1.size()
corr = torch.einsum("bi, bj -> ij", z1, z2) / N
if dist.is_available() and dist.is_initialized():
dist.all_reduce(corr)
world_size = dist.get_world_size()
corr /= world_size
diag = torch.eye(D, device=corr.device)
cdif = (corr - diag).pow(2)
cdif[~diag.bool()] *= self.lamb
loss = self.scale_loss * cdif.sum()
return loss
def forward(self, outputs):
# global to global
num_losses = 1
barlip_loss = self.barlip_loss(outputs['v_image'], outputs['v_text'])
# local to local
for v_image in outputs['v_image_local']:
for v_text in outputs['v_text_local']:
barlip_loss += self.barlip_loss(v_image, v_text)
num_losses += 1
barlip_loss /= num_losses
# online eval with clip loss
clip_loss_out = super().forward(outputs)
loss = barlip_loss + clip_loss_out.pop('loss')
return {
'loss': loss,
'barlip_loss': barlip_loss,
**clip_loss_out
}
class SiamLIPLoss(CL2LLoss):
def __init__(self, loss_avg_or_sum, label_smoothing):
super().__init__(loss_avg_or_sum, label_smoothing)
def negative_cosine_similarity(self, p, v):
p = F.normalize(p, dim=-1)
v = F.normalize(v, dim=-1)
return 2 - 2 * (p * v.detach()).sum(dim=1).mean()
def forward(self, outputs):
p_image_global = outputs['p_image']
p_text_global = outputs['p_text']
p_image_local = outputs['p_image_local']
p_text_local = outputs['p_text_local']
if any('momentum' in k for k in outputs):
v_image_global = outputs['v_image_momentum']
v_text_global = outputs['v_text_momentum']
v_image_local = outputs['v_image_local_momentum']
v_text_local = outputs['v_text_local_momentum']
else:
v_image_global = outputs['v_image']
v_text_global = outputs['v_text']
v_image_local = outputs['v_image_local']
v_text_local = outputs['v_text_local']
# global to global
num_losses = 2
siamlip_loss = (
self.negative_cosine_similarity(p_image_global, v_text_global.detach()) + \
self.negative_cosine_similarity(p_text_global, v_image_global.detach())
)
# local to local
for p in p_image_local:
for v in v_text_local:
siamlip_loss += self.negative_cosine_similarity(p, v.detach())
num_losses += 1
for p in p_text_local:
for v in v_image_local:
siamlip_loss += self.negative_cosine_similarity(p, v.detach())
num_losses += 1
siamlip_loss /= num_losses
# online eval with clip loss
clip_loss_out = super().forward(outputs)
loss = siamlip_loss + clip_loss_out.pop('loss')
return {
'loss': loss,
'siamlip_loss': siamlip_loss,
**clip_loss_out
}
class SwALIPLoss(CL2LLoss):
def __init__(
self,
loss_avg_or_sum,
label_smoothing,
sk_iters=3,
target_epsilon=0.05,
swalip_weight=0.2,
):
assert label_smoothing == 0.0
super().__init__(loss_avg_or_sum, label_smoothing)
self.sk_iters = sk_iters
self.target_epsilon = target_epsilon
self.swalip_weight = swalip_weight
self.labels = None
self.last_local_batch_size = None
self.set_world_size()
def set_world_size(self):
if dist.is_available() and dist.is_initialized():
self.world_size = dist.get_world_size()
else:
self.world_size = 1
@torch.no_grad()
def sinkhorn_knopp(self, Q: torch.Tensor) -> torch.Tensor:
"""Produces assignments using Sinkhorn-Knopp algorithm.
Applies the entropy regularization, normalizes the Q matrix and then normalizes rows and
columns in an alternating fashion for num_iter times. Before returning it normalizes again
the columns in order for the output to be an assignment of samples to prototypes.
Args:
Q (torch.Tensor): cosine similarities between the features of the
samples and the prototypes.
Returns:
torch.Tensor: assignment of samples to prototypes according to optimal transport.
"""
Q = torch.exp(Q / self.target_epsilon).t()
B = Q.shape[1] * self.world_size
K = Q.shape[0] # num prototypes
# make the matrix sum to 1
sum_Q = torch.sum(Q)
if dist.is_available() and dist.is_initialized():
dist.all_reduce(sum_Q)
Q /= sum_Q
for _ in range(self.sk_iters):
# normalize each row: total weight per prototype must be 1/K
sum_of_rows = torch.sum(Q, dim=1, keepdim=True)
if dist.is_available() and dist.is_initialized():
dist.all_reduce(sum_of_rows)
Q /= sum_of_rows
Q /= K
# normalize each column: total weight per sample must be 1/B
Q /= torch.sum(Q, dim=0, keepdim=True)
Q /= B
Q *= B # the colomns must sum to 1 so that Q is an assignment
return Q.t()
def cross_entropy(self, logits, targets):
return -torch.mean(torch.sum(targets * torch.log_softmax(logits, dim=1), dim=1))
def forward(self, outputs):
# online eval with clip loss
clip_loss_out = super().forward(outputs)
# cl2l
h_image_local = [F.normalize(h) for h in outputs['h_image_local']]
h_text_local = [F.normalize(h) for h in outputs['h_text_local']]
h_logit_scale = outputs['h_logit_scale']
num_augs = len(h_image_local)
h_image_local_all = utils.all_gather_batch(h_image_local)
h_text_local_all = utils.all_gather_batch(h_text_local)
logits_per_image_local = [[h @ h_all.t() for h_all in h_text_local_all] for h in h_image_local]
logits_per_text_local = [[h @ h_all.t() for h_all in h_image_local_all] for h in h_text_local]
# generate pseudo-label
with torch.no_grad():
targets_per_image_local = [[self.sinkhorn_knopp(t.detach()) for t in i] for i in logits_per_image_local]
targets_per_text_local = [[self.sinkhorn_knopp(i.detach()) for i in t] for t in logits_per_text_local]
# compute the loss between all views
swalip_loss = 0
for l1 in range(2):
for l2 in range(2):
t1, t2 = abs(l1 - 1), abs(l2 - 1)
swalip_loss += (
self.cross_entropy(logits_per_image_local[l1][l2] * h_logit_scale, targets_per_image_local[t1][t2]) + \
self.cross_entropy(logits_per_text_local[l1][l2] * h_logit_scale, targets_per_text_local[t1][t2])
) / 2
swalip_loss /= num_augs ** 2
loss = self.swalip_weight * swalip_loss + clip_loss_out.pop('loss')
return {**clip_loss_out, 'loss': loss, 'swalip_loss': swalip_loss}
class SwALIPV1Loss(nn.Module):
def __init__(self, sk_iters, target_epsilon, temperature, swalip_weight=1.0):
super().__init__()
self.sk_iters = sk_iters
self.target_epsilon = target_epsilon
self.temperature = temperature
self.swalip_weight = swalip_weight
self.clip_loss = CLIPLoss()
self.set_world_size()
def set_world_size(self):
if dist.is_available() and dist.is_initialized():
self.world_size = dist.get_world_size()
else:
self.world_size = 1
@torch.no_grad()
def sinkhorn_knopp(self, Q: torch.Tensor) -> torch.Tensor:
"""Produces assignments using Sinkhorn-Knopp algorithm.
Applies the entropy regularization, normalizes the Q matrix and then normalizes rows and
columns in an alternating fashion for num_iter times. Before returning it normalizes again
the columns in order for the output to be an assignment of samples to prototypes.
Args:
Q (torch.Tensor): cosine similarities between the features of the
samples and the prototypes.
Returns:
torch.Tensor: assignment of samples to prototypes according to optimal transport.
"""
Q = torch.exp(Q / self.target_epsilon).t()
B = Q.shape[1] * self.world_size
K = Q.shape[0] # num prototypes
# make the matrix sums to 1
sum_Q = torch.sum(Q)
if dist.is_available() and dist.is_initialized():
dist.all_reduce(sum_Q)
Q /= sum_Q
for _ in range(self.sk_iters):
# normalize each row: total weight per prototype must be 1/K
sum_of_rows = torch.sum(Q, dim=1, keepdim=True)
if dist.is_available() and dist.is_initialized():
dist.all_reduce(sum_of_rows)
Q /= sum_of_rows
Q /= K
# normalize each column: total weight per sample must be 1/B
Q /= torch.sum(Q, dim=0, keepdim=True)
Q /= B
Q *= B # the colomns must sum to 1 so that Q is an assignment
return Q.t()
def cross_entropy(self, logits, assign):
return -torch.mean(torch.sum(assign * torch.log_softmax(logits, dim=1), dim=1))
def forward(self, outputs):
image_logits = outputs['p_image']
text_logits = outputs['p_text']
logit_scale = outputs['swalip_logit_scale']
if any('momentum' in k for k in outputs):
image_targets = outputs['p_image_momentum']
text_targets = outputs['p_text_momentum']
else:
image_targets = outputs['p_image'].detach()
text_targets = outputs['p_text'].detach()
image_assign = self.sinkhorn_knopp(image_targets.detach())
text_assign = self.sinkhorn_knopp(text_targets.detach())
image_logits *= logit_scale
text_logits *= logit_scale
swalip_loss = (
self.cross_entropy(image_logits, text_assign) + \
self.cross_entropy(text_logits, image_assign)
) / 2
# online eval with clip loss
clip_loss_out = self.clip_loss(outputs)
loss = self.swalip_weight * swalip_loss + clip_loss_out.pop('loss')
return {
'loss': loss,
'swalip_loss': swalip_loss,
'swalip_logit_scale': logit_scale,
**clip_loss_out
}
|
clip-rocket-main
|
losses.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from collections import OrderedDict, defaultdict
import json
import math
import os
import sys
import time
from tkinter import E
try:
import wandb
except ImportError:
print("wandb not found")
import numpy as np
import torch
import torch.cuda.amp as amp
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
import datasets
import models
import utils
from tokenizer import SimpleTokenizer
def get_args_parser():
parser = argparse.ArgumentParser(description='CL2L training and evaluation', add_help=False)
# Data
parser.add_argument('--dataset', default='yfcc15m', type=str, choices=['yfcc15m', 'cc3m', 'cc12m', 'merged_opendata'])
parser.add_argument('--root', default='datasets/yfcc100m', type=str,
help='path to dataset root')
parser.add_argument('--metadata', default='datasets/yfcc15m_v1/yfcc15m.pkl', type=str,
help='path to metadata file (see README for details)')
parser.add_argument('--metadata-unpaired', default='datasets/yfcc15m_v1/yfcc85m.pkl', type=str,
help='path to metadata file (see README for details)')
parser.add_argument('--bpe-path', default='datasets/yfcc15m_v1/bpe_simple_vocab_16e6.txt.gz', type=str,
help='path to the bpe file (see README for details)')
parser.add_argument('--output-dir', default='./', type=str, help='output dir')
# Model
parser.add_argument('--model', default='CL2L_VITB16', type=str)
parser.add_argument('--attn-layer', default='flash', type=str, choices=["flash", "standard"])
parser.add_argument('--no-share-token', action='store_true')
parser.add_argument('--semi-paired', action='store_true')
parser.add_argument('--unpaired-ratio', default=4, type=int)
parser.add_argument('--embed-dim', default=256, type=int,
help='output dim for the language-image projector')
parser.add_argument('--clip-hidden-dim', default=4096, type=int,
help='hidden dim of CLIP mlp projection head')
parser.add_argument('--ssl-scale', default=1.0, type=float,
help='loss scale for SimCLR objective')
parser.add_argument('--ssl-temp', default=0.1, type=float,
help='softmax temperature for SimCLR objective')
parser.add_argument('--resume', default='', type=str, help='path to resume from')
parser.add_argument('--init-text-encoder', default=None, type=str, help='path to init from')
parser.add_argument('--detach-proj', action='store_true',
help='whether or not to detach the clip projector')
parser.add_argument('--momentum', action='store_true',
help='whether or not to use the momentum encoder')
parser.add_argument('--momentum-tau', type=float, default=0.99,
help='whether or not to use the momentum encoder')
parser.add_argument('--transformer-layers', default=12, type=int,
help='number of layers for the text transformer')
parser.add_argument('--clip-proj-type', default='linear', type=str,
choices=['mlp', 'linear'], help='type of projector for clip')
parser.add_argument('--cl2l-txt-proj-type', default='mlp', type=str,
choices=['mlp', 'linear'], help='type of text projector for cl2l')
parser.add_argument('--cl2l-img-proj-type', default='mlp', type=str,
choices=['mlp', 'linear'], help='type of vision projector for cl2l')
parser.add_argument('--separate-proj', default=False, action='store_true',
help='different heads')
parser.add_argument('--separate-proj-child', default=False, action='store_true',
help='different heads in non-contrastive stream')
# BarLIP
parser.add_argument('--barlip-proj-dim', default=8192, type=int,
help='output dim for the barlip projector')
parser.add_argument('--barlip-hidden-dim', default=3000, type=int,
help='hidden dim for the barlip projector')
parser.add_argument('--barlip-lamb', default=5e-3, type=float,
help='lambda for BarLIP loss')
parser.add_argument('--barlip-scale-loss', default=0.025, type=float,
help='loss scaling factor for BarLIP')
# SwALIP
parser.add_argument('--swalip-proj-dim', default=128, type=int,
help='output dim for the swalip projector')
parser.add_argument('--swalip-hidden-dim', default=2048, type=int,
help='output dim for the swalip projector')
parser.add_argument('--swalip-num-proto', default=3000, type=int,
help='number of prototypes for swalip')
parser.add_argument('--swalip-temperature', default=0.1, type=float,
help='softmax temperature for swalip')
parser.add_argument('--swalip-learn-temperature', action='store_true',
help='whether to learn softmax temperature for swalip')
parser.add_argument('--sk-iters', default=3, type=int,
help='output dim for the swalip projector')
parser.add_argument('--target-epsilon', default=0.05, type=float,
help='output dim for the swalip projector')
parser.add_argument('--swalip-freeze-proto-iters', default=100, type=int,
help='number of iters to freeze swalip prototypes')
parser.add_argument('--swalip-no-shared-proto', action='store_true',
help='whether or not to share prototypes between modalities')
parser.add_argument('--swalip-weight', default=0.2, type=float,
help='weight for the swalip loss')
# SiamLIP
parser.add_argument('--siamlip-proj-dim', default=128, type=int,
help='output dim for the siamlip projector')
parser.add_argument('--siamlip-hidden-dim', default=2048, type=int,
help='output dim for the siamlip projector')
parser.add_argument('--siamlip-no-last-bn', action='store_true',
help='whether to use batchnorm at the end of the proj')
# Image Augmentations
parser.add_argument('--num-augs', default=2, type=int,
help='number of augmentations in cl2l')
parser.add_argument('--multicrop-resize', default=224, type=int)
parser.add_argument('--multicrop-max-scale', default=1.0, type=float)
parser.add_argument('--weak-min-scale', default=0.5, type=float)
parser.add_argument('--blur-prob', default=0.5, type=float)
parser.add_argument('--solarize-prob', default=0.0, type=float)
parser.add_argument('--grayscale-prob', default=0.2, type=float)
parser.add_argument('--byol-augment', default=False, action='store_true',
help='byol-like asymmetric augment. It overrides other augment probs')
parser.add_argument('--weak-augment', default=False, action='store_true',
help='make all augmentations weak, including the ones of cl2l')
parser.add_argument('--strong-augment', default=False, action='store_true',
help='make all augmentations strong, including the one of baseline clip')
parser.add_argument('--randaugment', default=False, action='store_true',
help='add randaugment to base augmentations')
# Text Augmentations
parser.add_argument('--caption-sampling', default='single', type=str,
choices=['single', 'multi'], help='how to sample captions')
parser.add_argument('--text-dropout-prob', default=0.0, type=float,
help='dropout probability')
parser.add_argument('--text-drop-path-prob', default=0.0, type=float,
help='dropout probability')
parser.add_argument('--label-smoothing', default=0.0, type=float,
help='label smoothing')
parser.add_argument('--text-augment', default=False, action='store_true',
help='text augmentations')
parser.add_argument('--clean-before-augment', default=False, action='store_true',
help='clean before text augmentations')
parser.add_argument('--no-text-augment-prob', default=0.0, type=float,
help='prob not to augment text')
parser.add_argument('--remove-stopwords-prob', default=0.8, type=float,
help='prob to remove stopwords from text')
parser.add_argument('--synonym-replacement-prob', default=0.4, type=float,
help='prob to replace synonym in text')
parser.add_argument('--random-swap-prob', default=0.4, type=float,
help='prob to randomly swap in text')
parser.add_argument('--random-deletion-prob', default=0.2, type=float,
help='prob to randomly delete text')
# Training
parser.add_argument('--epochs', default=25, type=int)
parser.add_argument('--warmup-epochs', default=1, type=int)
parser.add_argument('--start-epoch', default=0, type=int)
parser.add_argument('--batch-size', default=64, type=int,
help='number of samples per-device/per-gpu')
parser.add_argument('--lr', default=3e-3, type=float)
parser.add_argument('--lr-start', default=1e-6, type=float,
help='initial warmup lr')
parser.add_argument('--lr-end', default=1e-5, type=float,
help='minimum final lr')
parser.add_argument('--update-freq', default=1, type=int,
help='optimizer update frequency (i.e. gradient accumulation steps)')
parser.add_argument('--wd', default=0.1, type=float)
parser.add_argument('--betas', default=(0.9, 0.98), nargs=2, type=float)
parser.add_argument('--eps', default=1e-8, type=float)
parser.add_argument('--eval-freq', default=1, type=int)
parser.add_argument('--disable-amp', action='store_true',
help='disable mixed-precision training (requires more memory and compute)')
parser.add_argument('--loss-avg-or-sum', default='avg', type=str)
parser.add_argument('--checkpoint-grad', action='store_true',
help='enable gradient checkpointing')
# System
parser.add_argument('--print-freq', default=25, type=int, help='print frequency')
parser.add_argument('-j', '--workers', default=10, type=int, metavar='N',
help='number of data loading workers per process')
parser.add_argument('--workers-unpaired', default=5, type=int, metavar='N',
help='number of data loading workers per process')
parser.add_argument('--evaluate', action='store_true', help='eval only')
parser.add_argument('--world-size', default=1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=0, type=int,
help='node rank for distributed training')
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument('--dist-url', default='env://', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str)
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--gpu', default=None, type=int, help='GPU id to use.')
parser.add_argument('--wandb', action='store_true', help='Enable WandB logging')
parser.add_argument('--offline', action='store_true', help='WandB will not log online')
parser.add_argument('--name', default='CLIP_ROCKET', type=str)
return parser
best_acc1 = 0
def main(args):
utils.init_distributed_mode(args)
global best_acc1
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
# create model
print("=> creating model: {}".format(args.model))
model = models.get_model(args)
model.visual.set_grad_checkpointing(enable=args.checkpoint_grad)
model.cuda(args.gpu)
print(model)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.gpu], bucket_cap_mb=200, static_graph=True)
if args.momentum:
momentum_model = models.get_model(args, is_momentum=True)
momentum_model.cuda(args.gpu)
if args.distributed:
momentum_model = torch.nn.parallel.DistributedDataParallel(
momentum_model, device_ids=[args.gpu], bucket_cap_mb=200, static_graph=True)
msg = utils.get_model_parallel(momentum_model).load_state_dict(
utils.get_model_parallel(model).state_dict(), strict=False)
print(msg)
for p in momentum_model.parameters():
p.requires_grad = False
# define loss function (criterion) and optimizer
criterion = models.get_loss(args).cuda(args.gpu)
p_wd, p_non_wd = [], []
for n, p in model.named_parameters():
if not p.requires_grad:
continue # frozen weights
if p.ndim < 2 or 'bias' in n or 'ln' in n or 'bn' in n:
p_non_wd.append(p)
else:
p_wd.append(p)
optim_params = [{"params": p_wd, "weight_decay": args.wd},
{"params": p_non_wd, "weight_decay": 0}]
optimizer = torch.optim.AdamW(optim_params, lr=args.lr, betas=args.betas,
eps=args.eps, weight_decay=args.wd)
scaler = amp.GradScaler(enabled=not args.disable_amp)
# optionally load pre-trained text encoder
if args.init_text_encoder is not None:
cp_text_encoder = torch.load(args.init_text_encoder)['state_dict']
cp_text_encoder = {k.replace('module.', ''): v for k, v in cp_text_encoder.items() if 'transformer' in k}
result = utils.get_model_parallel(model).load_state_dict(cp_text_encoder, strict=False)
print(result)
del cp_text_encoder
# optionally resume from a checkpoint (takes precedence over autoresume)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading resume checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location='cpu')
epoch = checkpoint['epoch'] if 'epoch' in checkpoint else 0
args.start_epoch = epoch
result = model.load_state_dict(
checkpoint['state_dict'], strict=False)
print(result)
if args.momentum:
print("=> loading momentum encoder from '{}'".format(args.resume))
result = momentum_model.load_state_dict(
checkpoint['momentum_state_dict'], strict=False)
print(result)
optimizer.load_state_dict(checkpoint['optimizer']) if 'optimizer' in checkpoint else ()
scaler.load_state_dict(checkpoint['scaler']) if 'scaler' in checkpoint else ()
best_acc1 = checkpoint['best_acc1']
print("=> loaded resume checkpoint '{}' (epoch {})"
.format(args.resume, epoch))
del checkpoint
else:
print("=> no checkpoint found at '{}'".format(args.resume))
else:
# auto-resume from latest checkpoint in output directory
latest = os.path.join(args.output_dir, 'checkpoint.pt')
if os.path.isfile(latest):
print("=> loading latest checkpoint '{}'".format(latest))
latest_checkpoint = torch.load(latest, map_location='cpu')
args.start_epoch = latest_checkpoint['epoch']
model.load_state_dict(latest_checkpoint['state_dict'])
if args.momentum:
momentum_model.load_state_dict(latest_checkpoint['momentum_state_dict'])
optimizer.load_state_dict(latest_checkpoint['optimizer'])
scaler.load_state_dict(latest_checkpoint['scaler'])
best_acc1 = latest_checkpoint['best_acc1']
print("=> loaded latest checkpoint '{}' (epoch {})"
.format(latest, latest_checkpoint['epoch']))
del latest_checkpoint
cudnn.benchmark = True
# build tokenizer
tokenizer = SimpleTokenizer(
bpe_path=args.bpe_path,
text_augment=args.text_augment,
no_text_augment_prob=args.no_text_augment_prob,
remove_stopwords_prob=args.remove_stopwords_prob,
synonym_replacement_prob=args.synonym_replacement_prob,
random_swap_prob=args.random_swap_prob,
random_deletion_prob=args.random_deletion_prob,
clean_before_augment=args.clean_before_augment,
num_augs=args.num_augs,
)
# build datasets
print("=> creating paired datasets")
train_dataset = datasets.get_train_dataset(args, tokenizer, metadata=args.metadata)
val_dataset = datasets.get_val_dataset()
# dist eval resamples data to pad uneven batch sizes
# make sure num_samples = 0 mod num_gpus for exact acc
train_sampler = DistributedSampler(train_dataset) if args.distributed else None
val_sampler = DistributedSampler(val_dataset) if args.distributed else None
train_loader = DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=(train_sampler is None),
num_workers=args.workers,
pin_memory=True,
sampler=train_sampler,
drop_last=True
)
val_loader = DataLoader(
val_dataset,
batch_size=args.batch_size,
shuffle=(val_sampler is None),
num_workers=args.workers,
pin_memory=True,
sampler=val_sampler,
drop_last=False
)
# optionally also load unpaired data
if args.semi_paired:
print("=> creating unpaired dataset")
unpaired_dataset = datasets.get_train_dataset(
args,
tokenizer,
metadata=args.metadata_unpaired,
augs_only=False
)
unpaired_sampler = DistributedSampler(unpaired_dataset) if args.distributed else None
unpaired_loader = DataLoader(
unpaired_dataset,
batch_size=args.batch_size // args.unpaired_ratio,
shuffle=(unpaired_sampler is None),
num_workers=args.workers_unpaired,
pin_memory=True,
sampler=unpaired_sampler,
drop_last=True
)
unpaired_iterable = utils.cycle(unpaired_loader, unpaired_sampler)
if args.evaluate:
zero_stats = validate_zeroshot(val_loader, model, tokenizer, args)
if utils.is_main_process():
with open(os.path.join(args.output_dir, 'eval_log.txt'), 'a') as f:
f.write(json.dumps(zero_stats) + '\n')
return
lr_schedule = utils.cosine_scheduler(args.lr, args.lr_end, args.epochs,
len(train_loader) // args.update_freq, warmup_epochs=args.warmup_epochs, start_warmup_value=args.lr_start)
if utils.is_main_process() and args.output_dir != './':
with open(os.path.join(args.output_dir, 'command.txt'), 'w') as f:
f.write(' '.join(sys.argv))
json.dump(
vars(args),
open(os.path.join(args.output_dir, 'args.json'), 'w'),
default=lambda o: "<not serializable>",
indent=4
)
if args.wandb:
wandb_id = os.path.split(args.output_dir)[-1]
wandb.init(
project='clip-rocket',
id=wandb_id,
config=args,
resume='allow',
name=args.name,
save_code=True,
notes=' '.join(sys.argv),
mode='offline' if args.offline else 'online',
dir=args.output_dir
)
wandb.watch(model)
print(args)
print("=> beginning training")
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
# train for one epoch
train_stats = train(
train_loader,
model, criterion,
optimizer,
scaler,
epoch,
lr_schedule,
args,
momentum_model if args.momentum else None,
unpaired_iterable if args.semi_paired else None
)
if (epoch + 1) % args.eval_freq != 0:
continue
val_stats = validate_zeroshot(val_loader, model, tokenizer, args)
acc1 = val_stats['acc1_z']
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
print("=> saving checkpoint")
checkpoint_dict = {
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict(),
'scaler': scaler.state_dict(),
'best_acc1': best_acc1,
'args': args,
}
if args.momentum:
checkpoint_dict['momentum_state_dict'] = momentum_model.state_dict()
utils.save_on_master(checkpoint_dict, is_best, args.output_dir)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in val_stats.items()},
'epoch': epoch}
if utils.is_main_process():
if args.wandb:
wandb.log(log_stats)
with open(os.path.join(args.output_dir, 'log.txt'), 'a') as f:
f.write(json.dumps(log_stats) + '\n')
if utils.is_main_process():
wandb.finish()
def train(
train_loader,
model,
criterion,
optimizer,
scaler,
epoch,
lr_schedule,
args,
momentum_model=None,
unpaired_iterable=None,
):
batch_time = AverageMeter('Time', ':6.2f')
data_time = AverageMeter('Data', ':6.2f')
mem = AverageMeter('Mem (GB)', ':6.1f')
metric_names = models.get_metric_names(args.model)
iters_per_epoch = len(train_loader) // args.update_freq
metrics = OrderedDict([(name, AverageMeter(name, ':.2e')) for name in metric_names])
progress = ProgressMeter(
iters_per_epoch,
[batch_time, data_time, mem, *metrics.values()],
prefix="Epoch: [{}]".format(epoch))
assert (momentum_model is not None) == args.momentum
# switch to train mode
model.train()
if args.momentum:
momentum_model.train()
end = time.time()
for data_iter, inputs in enumerate(train_loader):
optim_iter = data_iter // args.update_freq
# optionally load unpaired data
if args.semi_paired:
inputs_unpaired = next(unpaired_iterable)
inputs = [
torch.cat([inputs[0], inputs_unpaired[0]]),
inputs[1],
*[torch.cat([inputs[a+2], inputs_unpaired[a+2]])
for a in range(args.num_augs)]
]
# measure data loading time
data_time.update(time.time() - end)
# update weight decay and learning rate according to their schedule
it = iters_per_epoch * epoch + optim_iter # global training iteration
for k, param_group in enumerate(optimizer.param_groups):
param_group['lr'] = lr_schedule[it]
inputs = [t.cuda(args.gpu, non_blocking=True) for t in inputs]
# compute output
with amp.autocast(enabled=not args.disable_amp):
outputs = model(*inputs)
if args.momentum:
with torch.no_grad():
momentum_outputs = momentum_model(*inputs)
momentum_outputs = {k + '_momentum' : v for k, v in momentum_outputs.items()}
outputs = {**outputs, **momentum_outputs}
loss_dict = criterion(outputs)
loss = loss_dict['loss']
loss /= args.update_freq
if not math.isfinite(loss.item()):
torch.save(
{"inputs": utils.all_gather_batch(inputs), "outputs": utils.all_gather_batch(outputs), "losses": loss_dict, "state_dict": model.state_dict()},
os.path.join(args.output_dir, "dump_loss_nan.pgz")
)
print("Loss is {}, stopping training".format(loss.item()))
time.sleep(5)
sys.exit(1)
scaler.scale(loss).backward()
if (data_iter + 1) % args.update_freq != 0:
continue
if args.model.endswith('SWALIPV1') and it < args.swalip_freeze_proto_iters:
for name, p in model.named_parameters():
if "prototypes" in name:
p.grad = None
# compute gradient and do SGD step
scaler.step(optimizer)
scaler.update()
model.zero_grad(set_to_none=True)
# momentum update
if args.momentum:
with torch.no_grad():
m = args.momentum_tau
for p, p_mom in zip(
utils.get_model_parallel(model).parameters(),
utils.get_model_parallel(momentum_model).parameters()
):
p_mom.data.mul_(m).add_((1 - m) * p.detach().data)
# clamp logit scale to [0, 100]
utils.get_model_parallel(model).logit_scale.data.clamp_(0, 4.6052)
logit_scale = utils.get_model_parallel(model).logit_scale.exp().item()
utils.get_model_parallel(model).l2l_logit_scale.data.clamp_(0, 4.6052)
for k in loss_dict:
metrics[k].update(loss_dict[k].item(), args.batch_size)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
mem.update(torch.cuda.max_memory_allocated() // 1e9)
if optim_iter % args.print_freq == 0:
if utils.is_main_process() and args.wandb:
wandb.log({**{k: v.item() for k, v in loss_dict.items()},
'scaler': scaler.get_scale(),
'logit': logit_scale})
progress.display(optim_iter)
progress.synchronize()
return {**{k: v.avg for k, v in metrics.items()},
'lr': optimizer.param_groups[0]['lr'],
'logit_scale': logit_scale}
def validate_zeroshot(val_loader, model, tokenizer, args):
batch_time = AverageMeter('Time', ':6.3f')
if args.model.startswith('SLIP') or args.model.startswith('CLIP'):
metrics = {
'acc1_z': AverageMeter('Acc@1_z', ':6.2f'),
'acc5_z': AverageMeter('Acc@5_z', ':6.2f')
}
else:
ensemble_weights = np.linspace(0.1, 0.9, 9).round(decimals=1)
metric_suffixes = ['z', 'h'] + [f'zh_{w}' for w in ensemble_weights]
metrics = {
f'acc{k}_{s}': AverageMeter(f'Acc@{k}_{s}', ':6.2f')
for s in metric_suffixes for k in [1, 5]
}
progress = ProgressMeter(
len(val_loader),
[batch_time, *metrics.values()],
prefix='Test: '
)
# switch to evaluate mode
model.eval()
print('=> encoding captions')
cwd = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(cwd, 'templates.json')) as f:
templates = json.load(f)['imagenet']
with open(os.path.join(cwd, 'labels.json')) as f:
labels = json.load(f)['imagenet']
with torch.no_grad():
text_features = defaultdict(list)
for l in labels:
texts = [t.format(l) for t in templates]
texts = tokenizer(texts).cuda(args.gpu, non_blocking=True)
class_embeddings = utils.get_model_parallel(model).encode_text_val(texts)
embed_names = {'z_text', 'h_text', 'p_text'}.intersection(class_embeddings.keys())
for embed_name in embed_names:
cls_embed = class_embeddings[embed_name]
cls_embed = cls_embed / cls_embed.norm(dim=-1, keepdim=True)
cls_embed = cls_embed.mean(dim=0)
cls_embed = cls_embed / cls_embed.norm(dim=-1, keepdim=True)
text_features[embed_name].append(cls_embed)
text_features = {k: torch.stack(v, dim=0) for k, v in text_features.items()}
end = time.time()
for i, (images, target) in enumerate(val_loader):
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# encode images
image_features = utils.get_model_parallel(model).encode_image_val(images)
# compute similarities
similarities = utils.get_model_parallel(model).predict_zeroshot(image_features, text_features)
# measure accuracy
for name, similarity in similarities.items():
acc1, acc5 = utils.accuracy(similarity, target, topk=(1, 5))
acc1, acc5 = utils.scaled_all_reduce([acc1, acc5])
metrics[f'acc1_{name[0]}'].update(acc1.item(), images.size(0))
metrics[f'acc5_{name[0]}'].update(acc5.item(), images.size(0))
if not (args.model.startswith('SLIP')) and not (args.model.startswith('CLIP')):
# ensemble accuracy
for w in ensemble_weights:
similarity = w * similarities['z_sim'] + (1-w) * similarities['h_sim']
acc1, acc5 = utils.accuracy(similarity, target, topk=(1, 5))
acc1, acc5 = utils.scaled_all_reduce([acc1, acc5])
metrics[f'acc1_zh_{w}'].update(acc1.item(), images.size(0))
metrics[f'acc5_zh_{w}'].update(acc5.item(), images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
progress.synchronize()
print(f'0-shot * Acc@1 {metrics["acc1_z"].avg:.3f} Acc@5 {metrics["acc5_z"].avg:.3f}')
return {k: v.avg for k, v in metrics.items()}
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def synchronize(self):
if not utils.is_dist_avail_and_initialized():
return
t = torch.tensor([self.sum, self.count], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.sum = int(t[0])
self.count = t[1]
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters=None, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def synchronize(self):
for meter in self.meters:
if meter.count == 0:
continue
meter.synchronize()
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
if __name__ == '__main__':
parser = argparse.ArgumentParser('SLIP training and evaluation', parents=[get_args_parser()])
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
main(args)
|
clip-rocket-main
|
main.py
|
import math
import numpy as np
def subsampled_dense_grid(d, D, gamma, deg=8):
"""Points and weights for the kernel feature map for the RBF kernel
exp(-gamma ||x - y||^2) using subsampled dense grid.
Parameters:
d: input dimension
D: number of features
gamma: parameter of the RBF kernel
deg: degree of Gaussian quadrature used
Return:
points: shape (D x d)
weights: shape (D, )
"""
points, weights = np.polynomial.hermite.hermgauss(deg)
points *= 2 * math.sqrt(gamma)
weights /= math.sqrt(math.pi)
# Now @weights must sum to 1.0, as the kernel value at 0 is 1.0
subsampled_points = np.random.choice(points, size=(D, d), replace=True, p=weights)
subsampled_weights = np.ones(D) / math.sqrt(D)
return subsampled_points, subsampled_weights
def rbf_feature_map(x, points, weights):
"""RBF kernel feature map, given points and weights
Parameters:
x: input of shape (batch_size x d)
points: shape (D x d)
weights: shape (D, )
Return:
x_transformed: shape (batch_size x 2D)
"""
prod = x @ points.T
return np.hstack((weights * np.cos(prod), weights * np.sin(prod)))
def main():
batch_size = 64
d = 40
D = 100
x = np.random.rand(batch_size, d)
points, weights = subsampled_dense_grid(d, D, gamma=0.01)
x_transformed = rbf_feature_map(x, points, weights)
assert x_transformed.shape == (batch_size, 2 * D)
if __name__ == '__main__':
main()
|
quadrature-features-master
|
dense_grid.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from absl import app
from absl import flags
import cv2
import os.path as osp
import sys
sys.path.insert(0,'third_party')
import pdb
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
from nnutils.train_utils import v2s_trainer
opts = flags.FLAGS
def main(_):
torch.cuda.set_device(opts.local_rank)
world_size = opts.ngpu
torch.distributed.init_process_group(
'nccl',
init_method='env://',
world_size=world_size,
rank=opts.local_rank,
)
print('%d/%d'%(world_size,opts.local_rank))
torch.manual_seed(0)
torch.cuda.manual_seed(1)
torch.manual_seed(0)
trainer = v2s_trainer(opts)
data_info = trainer.init_dataset()
trainer.define_model(data_info)
trainer.init_training()
trainer.train()
if __name__ == '__main__':
app.run(main)
|
banmo-main
|
main.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from absl import flags, app
import sys
sys.path.insert(0,'third_party')
import numpy as np
import torch
import os
import glob
import pdb
import cv2
import trimesh
from scipy.spatial.transform import Rotation as R
import imageio
from utils.io import save_vid, str_to_frame, save_bones
from utils.colors import label_colormap
from nnutils.train_utils import v2s_trainer
from nnutils.geom_utils import obj_to_cam, tensor2array, vec_to_sim3, obj_to_cam
from ext_utils.util_flow import write_pfm
from ext_utils.flowlib import cat_imgflo
opts = flags.FLAGS
def save_output(rendered_seq, aux_seq, seqname, save_flo):
save_dir = '%s/'%(opts.model_path.rsplit('/',1)[0])
length = len(aux_seq['mesh'])
mesh_rest = aux_seq['mesh_rest']
len_max = (mesh_rest.vertices.max(0) - mesh_rest.vertices.min(0)).max()
mesh_rest.export('%s/mesh-rest.obj'%save_dir)
if 'mesh_rest_skin' in aux_seq.keys():
aux_seq['mesh_rest_skin'].export('%s/mesh-rest-skin.obj'%save_dir)
if 'bone_rest' in aux_seq.keys():
bone_rest = aux_seq['bone_rest']
save_bones(bone_rest, len_max, '%s/bone-rest.obj'%save_dir)
flo_gt_vid = []
flo_p_vid = []
for i in range(length):
impath = aux_seq['impath'][i]
seqname = impath.split('/')[-2]
save_prefix = '%s/%s'%(save_dir,seqname)
idx = int(impath.split('/')[-1].split('.')[-2])
mesh = aux_seq['mesh'][i]
rtk = aux_seq['rtk'][i]
# convert bones to meshes TODO: warp with a function
if 'bone' in aux_seq.keys() and len(aux_seq['bone'])>0:
bones = aux_seq['bone'][i]
bone_path = '%s-bone-%05d.obj'%(save_prefix, idx)
save_bones(bones, len_max, bone_path)
mesh.export('%s-mesh-%05d.obj'%(save_prefix, idx))
np.savetxt('%s-cam-%05d.txt' %(save_prefix, idx), rtk)
img_gt = rendered_seq['img'][i]
flo_gt = rendered_seq['flo'][i]
mask_gt = rendered_seq['sil'][i][...,0]
flo_gt[mask_gt<=0] = 0
img_gt[mask_gt<=0] = 1
if save_flo: img_gt = cat_imgflo(img_gt, flo_gt)
else: img_gt*=255
cv2.imwrite('%s-img-gt-%05d.jpg'%(save_prefix, idx), img_gt[...,::-1])
flo_gt_vid.append(img_gt)
img_p = rendered_seq['img_coarse'][i]
flo_p = rendered_seq['flo_coarse'][i]
mask_gt = cv2.resize(mask_gt, flo_p.shape[:2][::-1]).astype(bool)
flo_p[mask_gt<=0] = 0
img_p[mask_gt<=0] = 1
if save_flo: img_p = cat_imgflo(img_p, flo_p)
else: img_p*=255
cv2.imwrite('%s-img-p-%05d.jpg'%(save_prefix, idx), img_p[...,::-1])
flo_p_vid.append(img_p)
flo_gt = cv2.resize(flo_gt, flo_p.shape[:2])
flo_err = np.linalg.norm( flo_p - flo_gt ,2,-1)
flo_err_med = np.median(flo_err[mask_gt])
flo_err[~mask_gt] = 0.
cv2.imwrite('%s-flo-err-%05d.jpg'%(save_prefix, idx),
128*flo_err/flo_err_med)
img_gt = rendered_seq['img'][i]
img_p = rendered_seq['img_coarse'][i]
img_gt = cv2.resize(img_gt, img_p.shape[:2][::-1])
img_err = np.power(img_gt - img_p,2).sum(-1)
img_err_med = np.median(img_err[mask_gt])
img_err[~mask_gt] = 0.
cv2.imwrite('%s-img-err-%05d.jpg'%(save_prefix, idx),
128*img_err/img_err_med)
# fps = 1./(5./len(flo_p_vid))
upsample_frame = min(30, len(flo_p_vid))
save_vid('%s-img-p' %(save_prefix), flo_p_vid, upsample_frame=upsample_frame)
save_vid('%s-img-gt' %(save_prefix),flo_gt_vid,upsample_frame=upsample_frame)
def transform_shape(mesh,rtk):
"""
(deprecated): absorb rt into mesh vertices,
"""
vertices = torch.Tensor(mesh.vertices)
Rmat = torch.Tensor(rtk[:3,:3])
Tmat = torch.Tensor(rtk[:3,3])
vertices = obj_to_cam(vertices, Rmat, Tmat)
rtk[:3,:3] = np.eye(3)
rtk[:3,3] = 0.
mesh = trimesh.Trimesh(vertices.numpy(), mesh.faces)
return mesh, rtk
def main(_):
trainer = v2s_trainer(opts, is_eval=True)
data_info = trainer.init_dataset()
trainer.define_model(data_info)
seqname=opts.seqname
dynamic_mesh = opts.flowbw or opts.lbs
idx_render = str_to_frame(opts.test_frames, data_info)
# idx_render[0] += 50
# idx_render[0] += 374
# idx_render[0] += 292
# idx_render[0] += 10
# idx_render[0] += 340
# idx_render[0] += 440
# idx_render[0] += 540
# idx_render[0] += 640
# idx_render[0] += trainer.model.data_offset[4]-4 + 37
# idx_render[0] += 36
trainer.model.img_size = opts.render_size
chunk = opts.frame_chunk
for i in range(0, len(idx_render), chunk):
rendered_seq, aux_seq = trainer.eval(idx_render=idx_render[i:i+chunk],
dynamic_mesh=dynamic_mesh)
rendered_seq = tensor2array(rendered_seq)
save_output(rendered_seq, aux_seq, seqname, save_flo=opts.use_corresp)
#TODO merge the outputs
if __name__ == '__main__':
app.run(main)
|
banmo-main
|
extract.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import cv2
import glob
import numpy as np
import pdb
import os
import shutil
import detectron2
from detectron2.config import get_cfg
from detectron2.engine import DefaultPredictor
from detectron2.utils.visualizer import Visualizer, ColorMode
from detectron2.data import MetadataCatalog
coco_metadata = MetadataCatalog.get("coco_2017_val")
import torch
import torch.nn.functional as F
import torchvision
import sys
curr_dir = os.path.abspath(os.getcwd())
sys.path.insert(0,curr_dir)
try:
detbase='./third_party/detectron2/'
sys.path.insert(0,'%s/projects/PointRend/'%detbase)
import point_rend
except:
detbase='./third_party/detectron2_old/'
sys.path.insert(0,'%s/projects/PointRend/'%detbase)
import point_rend
sys.path.insert(0,'third_party/ext_utils')
from utils.io import save_vid
from util_flow import write_pfm
seqname=sys.argv[1]
ishuman=sys.argv[2] # 'y/n'
datadir='tmp/%s/images/'%seqname
odir='database/DAVIS/'
imgdir= '%s/JPEGImages/Full-Resolution/%s'%(odir,seqname)
maskdir='%s/Annotations/Full-Resolution/%s'%(odir,seqname)
#if os.path.exists(imgdir): shutil.rmtree(imgdir)
#if os.path.exists(maskdir): shutil.rmtree(maskdir)
#os.mkdir(imgdir)
#os.mkdir(maskdir)
cfg = get_cfg()
point_rend.add_pointrend_config(cfg)
cfg.merge_from_file('%s/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_X_101_32x8d_FPN_3x_coco.yaml'%(detbase))
cfg.MODEL.WEIGHTS ='https://dl.fbaipublicfiles.com/detectron2/PointRend/InstanceSegmentation/pointrend_rcnn_X_101_32x8d_FPN_3x_coco/28119989/model_final_ba17b9.pkl'
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST=0.9
predictor = DefaultPredictor(cfg)
counter=0
frames = []
for i,path in enumerate(sorted(glob.glob('%s/*'%datadir))):
print(path)
img = cv2.imread(path)
h,w = img.shape[:2]
# store at most 1080p videos
scale = np.sqrt(1920*1080/(h*w))
if scale<1:
img = cv2.resize(img, (int(w*scale), int(h*scale)) )
h,w = img.shape[:2]
# resize to some empirical size
if h>w: h_rszd,w_rszd = 1333, 1333*w//h
else: h_rszd,w_rszd = 1333*h//w, 1333
img_rszd = cv2.resize(img,(w_rszd,h_rszd))
# pad borders to make sure detection works when obj is out-of-frame
pad=100
img_rszd = cv2.copyMakeBorder(img_rszd,pad,pad,pad,pad,cv2.BORDER_REPLICATE)
# pointrend
outputs = predictor(img_rszd)
outputs = outputs['instances'].to('cpu')
mask_rszd = np.zeros((h_rszd+pad*2,w_rszd+pad*2))
for it,ins_cls in enumerate(outputs.pred_classes):
print(ins_cls)
#if ins_cls ==15: # cat
#if ins_cls==0 or (ins_cls >= 14 and ins_cls <= 23):
if ishuman=='y':
if ins_cls ==0:
mask_rszd += np.asarray(outputs.pred_masks[it])
else:
if ins_cls >= 14 and ins_cls <= 23:
mask_rszd += np.asarray(outputs.pred_masks[it])
nb_components, output, stats, centroids = \
cv2.connectedComponentsWithStats(mask_rszd.astype(np.uint8), connectivity=8)
if nb_components>1:
max_label, max_size = max([(i, stats[i, cv2.CC_STAT_AREA]) for i in range(1, nb_components)], key=lambda x: x[1])
mask_rszd = output == max_label
mask_rszd = mask_rszd.astype(bool).astype(int)
if (mask_rszd.sum())<1000: continue
mask_rszd = mask_rszd [pad:-pad,pad:-pad]
img_rszd = img_rszd [pad:-pad,pad:-pad]
outputs.pred_masks=outputs.pred_masks[:,pad:-pad,pad:-pad]
outputs.pred_boxes.tensor[:,0:2] -= pad
outputs.pred_boxes.tensor[:,2:4] -= 2*pad
mask_rszd = np.concatenate([mask_rszd[:,:,np.newaxis]* 128,
np.zeros((h_rszd, w_rszd, 1)),
np.zeros((h_rszd, w_rszd, 1))],-1)
mask = cv2.resize(mask_rszd,(w,h))
cv2.imwrite('%s/%05d.jpg'%(imgdir,counter), img)
cv2.imwrite('%s/%05d.png'%(maskdir,counter), mask)
# vis
v = Visualizer(img_rszd, coco_metadata, scale=1, instance_mode=ColorMode.IMAGE_BW)
#outputs.remove('pred_masks')
vis = v.draw_instance_predictions(outputs)
vis = vis.get_image()
cv2.imwrite('%s/vis-%05d.jpg'%(maskdir,counter), vis)
counter+=1
frames.append(vis[:,:,::-1])
save_vid('%s/vis'%maskdir, frames, suffix='.mp4')
save_vid('%s/vis'%maskdir, frames, suffix='.gif')
|
banmo-main
|
preprocess/mask.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
"""
python img2lines.py --seqname xx
"""
from absl import flags, app
import sys
sys.path.insert(0,'third_party')
sys.path.insert(0,'./')
import numpy as np
import torch
import os
import glob
import pdb
import cv2
import trimesh
from scipy.spatial.transform import Rotation as R
import imageio
from utils.io import save_vid, str_to_frame, save_bones
from utils.colors import label_colormap
from nnutils.train_utils import v2s_trainer
from nnutils.geom_utils import obj_to_cam, tensor2array, vec_to_sim3, obj_to_cam
from utils.io import mkdir_p
from ext_utils.util_flow import write_pfm
from ext_utils.flowlib import cat_imgflo
from utils.io import config_to_dataloader
from torch.utils.data import DataLoader
from nnutils.geom_utils import tensor2array
opts = flags.FLAGS
def dict2pix(dict_array, idy):
dict_px = {}
dict_px['img'] = dict_array['img'][...,idy,:]
dict_px['mask'] = dict_array['mask'][...,idy,:]
dict_px['vis2d'] = dict_array['vis2d'][...,idy,:]
dict_px['flow'] = dict_array['flow'][...,idy,:]
dict_px['occ'] = dict_array['occ'][...,idy,:]
dict_px['dp'] = dict_array['dp'][...,idy,:]
dict_px['dp_feat_rsmp'] = dict_array['dp_feat_rsmp'][...,idy,:]
return dict_px
def dict2rtk(dict_array):
dict_out = {}
dict_out['rtk'] = dict_array['rtk']
dict_out['kaug'] = dict_array['kaug']
return dict_out
def main(_):
seqname=opts.seqname
trainer = v2s_trainer(opts, is_eval=True)
data_info = trainer.init_dataset()
impaths = data_info['impath']
data_offset = data_info['offset']
opts_dict = {}
opts_dict['seqname'] = opts.seqname
opts_dict['img_size'] = opts.img_size
opts_dict['rtk_path'] = opts.rtk_path
opts_dict['batch_size'] = 1
opts_dict['ngpu'] = 1
opts_dict['preload'] = False
opts_dict['dframe'] = [1,2,4,8,16,32]
dataset = config_to_dataloader(opts_dict,is_eval=True)
#dataset = config_to_dataloader(opts_dict,is_eval=False)
dataset = DataLoader(dataset,
batch_size= 1, num_workers=0, drop_last=False,
pin_memory=True, shuffle=False)
for dat in dataset.dataset.datasets:
dat.spec_dt = 1
#TODO
#overwrite=False
overwrite=True
# hardcoded path
base_path = 'database/DAVIS/Pixels/Full-Resolution/'
for i, batch in enumerate(dataset):
frameid = batch['frameid']
dataid = batch['dataid']
dt = frameid[0,1] - frameid[0,0]
frameid = frameid + data_offset[dataid[0,0].long()]
if dt<0: continue # only save forward pair (bachward pair is equivalent)
impath = impaths[frameid.long()[0,0]]
seqname_sub = impath.split('/')[-2]
frameid_sub = impath.split('/')[-1].split('.')[0]
save_dir = '%s/%s'%(base_path, seqname_sub)
save_dir_t = '%s/%d_%s'%(save_dir, dt, frameid_sub)
print(save_dir_t)
if (not overwrite) and os.path.exists(save_dir_t):
continue
mkdir_p(save_dir_t)
dict_array = tensor2array(batch)
# save each pixel: 00_00000/0000.npy # t,h
dict_rtk = dict2rtk(dict_array)
save_path_rtk = '%s/rtk.npy'%(save_dir_t)
np.save(save_path_rtk, dict_rtk)
for idy in range(opts.img_size):
save_path = '%s/%04d.npy'%(save_dir_t, idy)
dict_px = dict2pix(dict_array, idy)
np.save(save_path, dict_px)
if __name__ == '__main__':
app.run(main)
|
banmo-main
|
preprocess/img2lines.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import configparser
import cv2
import glob
import pdb
import sys
seqname_pre=sys.argv[1]
ishuman=sys.argv[2] # 'y/n'
silroot='database/DAVIS/Annotations/Full-Resolution/'
config = configparser.ConfigParser()
config['data'] = {
'dframe': '1',
'init_frame': '0',
'end_frame': '-1',
'can_frame': '-1'}
seqname_all = sorted(glob.glob('%s/%s[0-9][0-9][0-9]'%(silroot, seqname_pre)))
total_vid = 0
for i,seqname in enumerate(seqname_all):
seqname = seqname.split('/')[-1]
img = cv2.imread('%s/%s/00000.png'%(silroot,seqname),0)
if img is None:continue
num_fr = len(glob.glob('%s/%s/*.png'%(silroot,seqname)))
if num_fr < 16:continue
fl = max(img.shape)
px = img.shape[1]//2
py = img.shape[0]//2
camtxt = [fl,fl,px,py]
config['data_%d'%total_vid] = {
'ishuman': ishuman,
'ks': ' '.join( [str(i) for i in camtxt] ),
'datapath': 'database/DAVIS/JPEGImages/Full-Resolution/%s/'%seqname,
}
total_vid += 1
with open('configs/%s.config'%(seqname_pre), 'w') as configfile:
config.write(configfile)
|
banmo-main
|
preprocess/write_config.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import cv2
import glob
import numpy as np
import pdb
import os
import shutil
import detectron2
from detectron2.config import get_cfg
from detectron2.engine import DefaultPredictor
from detectron2.utils.visualizer import Visualizer, ColorMode
from detectron2.data import MetadataCatalog
coco_metadata = MetadataCatalog.get("coco_2017_val")
import torch
import torch.nn.functional as F
import torchvision
import sys
curr_dir = os.path.abspath(os.getcwd())
sys.path.insert(0,curr_dir)
try:
detbase='./third_party/detectron2/'
sys.path.insert(0,'%s/projects/DensePose/'%detbase)
from utils.cselib import create_cse, run_cse
except:
detbase='./third_party/detectron2_old/'
sys.path.insert(0,'%s/projects/DensePose/'%detbase)
from utils.cselib import create_cse, run_cse
sys.path.insert(0,'third_party/ext_utils')
from utils.io import save_vid, visObj
from util_flow import write_pfm
seqname=sys.argv[1]
ishuman=sys.argv[2] # 'y/n'
odir='database/DAVIS/'
imgdir= '%s/JPEGImages/Full-Resolution/%s'%(odir,seqname)
maskdir='%s/Annotations/Full-Resolution/%s'%(odir,seqname)
dpdir='%s/Densepose/Full-Resolution/%s'%(odir,seqname)
if os.path.exists(dpdir): shutil.rmtree(dpdir)
os.mkdir(dpdir)
if ishuman=='y':
#human
config_path = '%s/projects/DensePose/configs/cse/densepose_rcnn_R_101_FPN_DL_soft_s1x.yaml'%(detbase)
weight_path = 'https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_101_FPN_DL_soft_s1x/250713061/model_final_1d3314.pkl'
mesh_name = 'smpl_27554'
elif ishuman=='n':
#quadrupeds
config_path = '%s/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_CA_finetune_4k.yaml'%(detbase)
weight_path = 'https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_50_FPN_soft_animals_CA_finetune_4k/253498611/model_final_6d69b7.pkl'
mesh_name = 'sheep_5004'
else:
print('y/n, exiting')
exit()
predictor_dp, embedder, mesh_vertex_embeddings = create_cse(config_path,
weight_path)
counter=0
frames = []
for i,path in enumerate(sorted(glob.glob('%s/*'%imgdir))):
print(path)
img = cv2.imread(path)
msk = cv2.imread(path.replace('JPEGImages', 'Annotations').replace('.jpg', '.png'),0)
h,w = img.shape[:2]
# recompte mask
msk = msk/np.sort(np.unique(msk))[1]
occluder = msk==255
msk[occluder] = 0
# resize to some empirical size
if h>w: h_rszd,w_rszd = 1333, 1333*w//h
else: h_rszd,w_rszd = 1333*h//w, 1333
img_rszd = cv2.resize(img,(w_rszd,h_rszd))
msk_rszd = cv2.resize(msk,(w_rszd,h_rszd))
# densepose
clst_verts, image_bgr1, embedding, embedding_norm, bbox = run_cse(
predictor_dp, embedder,
mesh_vertex_embeddings,
img_rszd, msk_rszd,
mesh_name=mesh_name)
# resize to original size
bbox[0] *= w / clst_verts.shape[1]
bbox[2] *= w / clst_verts.shape[1]
bbox[1] *= h / clst_verts.shape[0]
bbox[3] *= h / clst_verts.shape[0]
np.savetxt( '%s/bbox-%05d.txt'%(dpdir,counter) , bbox)
clst_verts = cv2.resize(clst_verts, (w,h), interpolation=cv2.INTER_NEAREST)
# assume max 10k/200 max
clst_verts = (clst_verts/50.).astype(np.float32)
write_pfm( '%s/%05d.pfm'%(dpdir,counter), clst_verts)
embedding_norm = cv2.resize(embedding_norm, (w,h))
write_pfm( '%s/norm-%05d.pfm'%(dpdir,counter), embedding_norm)
embedding = embedding.reshape((-1,embedding.shape[-1]))
write_pfm( '%s/feat-%05d.pfm'%(dpdir,counter), embedding)
# vis
#v = Visualizer(img_rszd, coco_metadata, scale=1, instance_mode=ColorMode.IMAGE_BW)
#outvis = visObj()
#outvis.image_height = h
#outvis.image_width = w
#outvis._fields = {}
#outvis._fields["pred_boxes"] = np.asarray([[0,0,h,w,1.]])
#vis = v.draw_instance_predictions(outvis)
#vis = vis.get_image()
vis=img_rszd
alpha_mask = 0.8*(msk_rszd>0)[...,None]
mask_result = vis*(1-alpha_mask) + image_bgr1 * alpha_mask
cv2.imwrite('%s/vis-%05d.jpg'%(dpdir,counter), mask_result)
counter+=1
frames.append(mask_result[:,:,::-1])
save_vid('%s/vis'%dpdir, frames, suffix='.mp4')
save_vid('%s/vis'%dpdir, frames, suffix='.gif')
|
banmo-main
|
preprocess/compute_dp.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import os
import errno
from typing import Any, Dict, List, Tuple, Union
import cv2
import pdb
import configparser
import torch
import numpy as np
import imageio
import trimesh
import glob
import matplotlib.cm
import torch.nn.functional as F
from scipy.spatial.transform import Rotation as R
from torch.utils.data import Dataset
import sys
sys.path.insert(0,'third_party')
import dataloader.vidbase as base_data
from ext_utils.flowlib import flow_to_image
from utils.colors import label_colormap
def draw_lines(img, xy1s, xy2s):
device = img.device
colormap = label_colormap()
len_colormap = colormap.shape[0]
img = img.permute(1,2,0).cpu().numpy()*255
img = img.astype(np.uint8)[:,:,::-1].copy()
for i in range(len(xy1s)):
color = tuple([int(x) for x in colormap[i%len_colormap]])
p1 = tuple(xy1s[i].detach().cpu().numpy())
p2 = tuple(xy2s[i].detach().cpu().numpy())
cv2.circle(img,p1,3, color)
cv2.circle(img,p2,3, color)
cv2.line(img, p1, p2, color, thickness=1)
#pdb.set_trace()
#cv2.imwrite('tmp/0.png', img)
#img = torch.Tensor(img).to(device).permute(2,0,1)[None]
return img
def draw_pts(img, xys):
device = img.device
img = img.permute(1,2,0).cpu().numpy()*255
img = img.astype(np.uint8)[:,:,::-1].copy()
for point in xys:
point = point.detach().cpu().numpy()
cv2.circle(img,tuple(point),1,(0,0,255))
#pdb.set_trace()
#cv2.imwrite('tmp/0.png', img)
#img = torch.Tensor(img).to(device).permute(2,0,1)[None]
return img
def save_bones(bones, len_max, path):
B = len(bones)
elips_list = []
elips = trimesh.creation.uv_sphere(radius=len_max/20,count=[16, 16])
# remove identical vertices
elips = trimesh.Trimesh(vertices=elips.vertices, faces=elips.faces)
N_elips = len(elips.vertices)
for bone in bones:
center = bone[None,:3]
orient = bone[3:7] # real first
orient = orient / np.linalg.norm(orient, 2,-1)
orient = orient[[1,2,3,0]]
orient = R.from_quat(orient).as_matrix() # real first
orient = orient.T # transpose R
scale = np.exp(bone[None, 7:10])
elips_verts = elips.vertices
elips_verts = elips_verts / scale
elips_verts = elips_verts.dot(orient)
elips_verts = elips_verts+center
elips_list.append( trimesh.Trimesh(vertices = elips_verts,
faces=elips.faces) )
elips = trimesh.util.concatenate(elips_list)
colormap = label_colormap()[:B]
colormap= np.tile(colormap[:,None], (1,N_elips,1)).reshape((-1,3))
elips.visual.vertex_colors[:len(colormap),:3] = colormap
elips.export(path)
def vis_match(results, masks, imgs, bs,img_size,ndepth):
# show error images
bs = imgs.shape[0]
for i in range(bs):
mask_rszd = F.interpolate(masks[None],(img_size,img_size))[0,i].bool()
img_rszd = F.interpolate(imgs ,(img_size,img_size))[i].permute(1,2,0)
img_mskd = img_rszd[mask_rszd].cpu().numpy()
if 'feat_err' in results.keys():
feat_errs = results['feat_err']
feat_err = feat_errs[i].view(img_size,img_size)
feat_err[~mask_rszd] = 0.
med = feat_err[mask_rszd].median()
print('%d-median:%f' %(i,med))
cv2.imwrite('tmp/match_err-%d.png'%i, (feat_err/med).cpu().numpy()*128)
# draw lines
if 'xyz_camera_vis' in results.keys() and 'pts_exp_vis' in results.keys():
mask_rszd = F.interpolate(masks[None],(img_size,img_size))[0,0].bool()
img_rszd = F.interpolate(imgs ,(img_size,img_size))[0].permute(1,2,0)
xyz_coarse_frame = results['xyz_camera_vis']
color_plane = torch.stack([img_rszd, torch.ones_like(img_rszd)],0).view(-1,3)
color_plane = color_plane.cpu().numpy()
near_plane= xyz_coarse_frame.view(bs,-1,ndepth,3)[0,:,0]
d_near = near_plane[:,2].mean()
near_plane[...,-1] -= d_near*0.01
far_plane = xyz_coarse_frame.view(bs,-1,ndepth,3)[0,:,-1]
nf_plane = torch.cat([near_plane, far_plane],0)
#trimesh.Trimesh(nf_plane.cpu().numpy(), vertex_colors=color_plane).\
trimesh.Trimesh(near_plane.cpu().numpy(), vertex_colors=img_rszd.view(-1,3).cpu().numpy()).\
export('tmp/match_plane.obj')
near_plane_mskd = near_plane[mask_rszd.view(-1)].cpu()
pts_pred = results['pts_pred_vis']
pts_pred = pts_pred[0].view(img_size,img_size,3)[mask_rszd].cpu().numpy()
draw_lines_ray_canonical(near_plane_mskd, pts_pred,img_mskd,
'tmp/match_line_pred.obj')
pts_exp = results['pts_exp_vis']
pts_exp = pts_exp[0].view(img_size,img_size,3)[mask_rszd].cpu().numpy()
draw_lines_ray_canonical(pts_pred, pts_exp,img_mskd,
'tmp/match_line_exp.obj')
#pts_pred_col=results['pts_pred'][0][mask_rszd].cpu().numpy()
#pts_exp_col = results['pts_exp'][0][mask_rszd].cpu().numpy()
#trimesh.Trimesh(pts_pred, vertex_colors=img_mskd).export('tmp/viser_pred.obj')
#trimesh.Trimesh(pts_exp ,vertex_colors=img_mskd).export('tmp/viser_exp.obj')
def draw_lines_ray_canonical(near_plane_mskd, pts_exp, img_mskd, path):
colormap = label_colormap()
len_color = len(colormap)
meshes = []
idx=0
num_pts = len(near_plane_mskd)
for i in range(0,num_pts, num_pts//50): # display 50 points
## only plot idx=5
#if idx!=5:
# idx+=1
# continue
segment = np.stack([near_plane_mskd[i], pts_exp[i]])
line = trimesh.creation.cylinder(0.0001,
segment=segment,sections=5, vertex_colors=colormap[idx%len_color])
meshes.append(line)
idx+=1
meshes = trimesh.util.concatenate(meshes)
meshes.export(path)
def merge_dict(dict_list):
out_dict = {}
for k in dict_list[0].keys():
out_dict[k] = []
for i in range(len(dict_list)):
for k in out_dict.keys():
out_dict[k] += dict_list[i][k]
return out_dict
def render_root_txt(cam_dir, cap_frame):
# read all the data
camlist = load_root(cam_dir, cap_frame)
# construct camera mesh
mesh = draw_cams(camlist)
save_dir,seqname=cam_dir.rsplit('/',1)
mesh.export('%s/mesh-%s.obj'%(save_dir, seqname))
def load_sils(root_dir, cap_frame):
"""
load all the imgs with
input is ...-(00000.png)
"""
imglist = []
img_path = '%s*.png'%(root_dir)
#img_path = '%s0*.png'%(root_dir)
all_path = sorted(glob.glob(img_path))
if cap_frame>0:
all_path = all_path[:cap_frame]
for idx,path in enumerate(all_path):
img = cv2.imread(path,0)
imglist.append(img)
imglist = np.asarray(imglist)
return imglist
def load_root(root_dir, cap_frame):
"""
load all the root se(3)
input is ...-(00000.txt)
"""
camlist = []
#cam_path = '%s0*.txt'%(root_dir)
cam_path = '%s*.txt'%(root_dir)
all_path = sorted(glob.glob(cam_path))
if cap_frame>0:
all_path = all_path[:cap_frame]
for idx,path in enumerate(all_path):
rtk = np.loadtxt(path)
camlist.append(rtk)
camlist = np.asarray(camlist)
return camlist
def draw_cams(all_cam, color='cool', axis=True,
color_list = None):
"""
all_cam: a list of 4x4 cameras
"""
# scale: the scene bound
cmap = matplotlib.cm.get_cmap(color)
all_cam = np.asarray(all_cam)
trans_norm = np.linalg.norm(all_cam[:,:3,3],2,-1)
valid_cams = trans_norm>0
trans_max = np.median(trans_norm[valid_cams])
scale=trans_max
traj_len = len(all_cam)
cam_list = []
if color_list is None:
color_list = np.asarray(range(traj_len))/float(traj_len)
for j in range(traj_len):
cam_rot = all_cam[j][:3,:3].T
cam_tran = -cam_rot.dot(all_cam[j][:3,3:])[:,0]
radius = 0.02*scale
cam = trimesh.creation.uv_sphere(radius=radius,count=[2, 2])
if axis:
#TODO draw axis
extents = np.asarray([radius*20, radius*10, radius*0.1])
axis = trimesh.creation.axis(origin_size = radius,
origin_color = cmap(color_list[j]),
axis_radius = radius* 0.1,
axis_length = radius*5)
#extents=extents)
#axis.vertices[:,2] += radius * 5
#cam = trimesh.util.concatenate([elips, axis])
cam = axis
#cam.vertices = cam.vertices + cam_tran
cam.vertices = cam.vertices.dot(cam_rot.T) + cam_tran
#cam.visual.vertex_colors = cmap(float(j)/traj_len)
cam_list.append(cam)
mesh_cam = trimesh.util.concatenate(cam_list)
return mesh_cam
def draw_cams_pair(cam1,cam2, color='cool', axis=True,
color_list = None):
frame_num = cam1.shape[0]
cam_mesh1 = draw_cams(cam1, color=color,axis=axis,color_list=color_list)
cam_mesh2 = draw_cams(cam2, color=color,axis=axis,color_list=color_list)
# draw line
lines = []
for i in range(frame_num):
cam1_c = -cam1[i,:3,:3].T.dot(cam1[i,:3,3:])[:,0]
cam2_c = -cam2[i,:3,:3].T.dot(cam2[i,:3,3:])[:,0]
segment = np.stack([cam1_c, cam2_c])
line = trimesh.creation.cylinder(0.001,segment=segment,sections=5)
lines.append(line)
lines = trimesh.util.concatenate(lines)
return cam_mesh1, cam_mesh2, lines
def save_vid(outpath, frames, suffix='.gif',upsample_frame=150., fps=10,
is_flow=False):
"""
save frames to video
frames: n,h,w,1 or n.
"""
# convert to 150 frames
if upsample_frame<1: upsample_frame = len(frames)
frame_150=[]
for i in range(int(upsample_frame)):
fid = int(i/upsample_frame*len(frames))
frame = frames[fid]
if is_flow:
frame = flow_to_image(frame)
if frame.max()<=1:
frame=frame*255
frame = frame.astype(np.uint8)
if suffix=='.gif':
h,w=frame.shape[:2]
fxy = np.sqrt(4e5/(h*w))
frame = cv2.resize(frame,None,fx=fxy, fy=fxy)
frame_150.append(frame)
imageio.mimsave('%s%s'%(outpath,suffix), frame_150, fps=fps)
class visObj(object):
"""
a class for detectron2 vis
"""
def has(self, name: str) -> bool:
return name in self._fields
def __getattr__(self, name: str) -> Any:
if name == "_fields" or name not in self._fields:
raise AttributeError("Cannot find field '{}' in the given Instances!".format(name))
return self._fields[name]
def config_to_dataloader(opts, is_eval=False):
"""
from a dict of options {seqname, batch_size, ngpu} to a pytorch dataloader
"""
config = configparser.RawConfigParser()
config.read('configs/%s.config'%opts['seqname'])
numvid = len(config.sections())-1
datalist = []
for i in range(numvid):
dataset = get_config_info(opts, config, 'data_%d'%i, i, is_eval=is_eval)
datalist = datalist + dataset
dataset = torch.utils.data.ConcatDataset(datalist)
return dataset
def get_config_info(opts, config, name, dataid, is_eval=False):
def load_attr(attrs, config, dataname):
try:attrs['datapath'] = '%s'%(str(config.get(dataname, 'datapath')))
except:pass
try:attrs['dframe'] = [int(i) for i in config.get(dataname, 'dframe').split(',')]
except:pass
try:attrs['can_frame']= int(config.get(dataname, 'can_frame'))
except:pass
try:attrs['init_frame']=int(config.get(dataname, 'init_frame'))
except:pass
try:attrs['end_frame'] =int(config.get(dataname, 'end_frame'))
except:pass
try:attrs['rtk_path'] =config.get(dataname, 'rtk_path')
except:pass
return
attrs={}
attrs['rtk_path'] = None
load_attr(attrs, config, 'data')
load_attr(attrs, config, name)
datapath = attrs['datapath']
if 'dframe' in opts.keys():
dframe = opts['dframe'] # only in preload
else:
dframe = attrs['dframe']
can_frame =attrs['can_frame']
init_frame=attrs['init_frame']
end_frame= attrs['end_frame']
rtk_path=opts['rtk_path']
numvid = len(config.sections())-1
if numvid==1 and not config.has_option(name, 'datapath'):
datapath='%s/%s'%(datapath, opts['seqname'])
# opts rtk_path
if rtk_path =='':
# rtk path from config
rtk_path= attrs['rtk_path']
elif not os.path.isfile('%s-00000.txt'%rtk_path):
print('loading cameras from init-cam')
rtk_path = '%s/%s'%(rtk_path, datapath.strip('/').split('/')[-1])
imglist = sorted(glob.glob('%s/*'%datapath))
try: flip=int(config.get(name, 'flip'))
except: flip=0
if end_frame >0:
imglist = imglist[:end_frame]
print('init:%d, end:%d'%(init_frame, end_frame))
# load dataset
datasets = []
for df in dframe:
if 'lineload' in opts.keys() and opts['lineload']:
# per-line loader
#TODO
dataset= LineDataset(opts, imglist = imglist, can_frame = can_frame,
dframe=df, init_frame=init_frame,
dataid=dataid, numvid=numvid, flip=flip, is_eval=is_eval,
rtk_path=rtk_path)
else:
# per-image loader
try:
dataset = VidDataset(opts, imglist = imglist, can_frame = can_frame,
dframe=df, init_frame=init_frame,
dataid=dataid, numvid=numvid, flip=flip, is_eval=is_eval,
rtk_path=rtk_path)
except: continue
if rtk_path is None:
dataset.has_prior_cam = False
else:
dataset.has_prior_cam = True
# whether to use preloaded data
if 'preload' in opts.keys():
dataset.preload = opts['preload']
else:
dataset.preload = False
if 'multiply' in opts.keys():
# duplicate such that it goes more than 200 iters
dup_num = 200/(len(dataset)/opts['ngpu']/opts['batch_size'])
if 'accu_steps' in opts.keys():
dup_num = dup_num*opts['accu_steps']
dup_num = int(dup_num)+1
for i in range(dup_num):
datasets.append(dataset)
else:
datasets.append(dataset)
return datasets
class LineDataset(Dataset):
'''
'''
def __init__(self, opts, filter_key=None, imglist=None, can_frame=0,
dframe=1,init_frame=0, dataid=0, numvid=1, flip=0,
is_eval=False, rtk_path=None):
super(LineDataset, self).__init__()
self.crop_factor = 1.2
self.imglist = imglist
self.img_size = opts['img_size']
self.num_lines = (len(imglist)-1) * self.img_size # last img not saved
seqname = imglist[0].split('/')[-2]
if rtk_path is not None:
self.rtklist =['%s-%05d.txt'%(rtk_path, i) for i in range(len(self.imglist))]
else:
self.rtklist =[i.replace('JPEGImages', 'Cameras').replace('.jpg', '.txt') for i in self.imglist]
# Load the annotation file.
self.dataid = dataid
print('%d lines' % self.num_lines)
def __len__(self):
return self.num_lines
def __getitem__(self, index):
try:dataid = self.dataid
except: dataid=0
#TODO lolalize file
idt = index // self.img_size# idt, idy
idy = index % self.img_size# idt, idy
save_dir = self.imglist[0].replace('JPEGImages', 'Pixels').rsplit('/',1)[0]
dframe_list = [2,4,8,16,32]
max_id = len(self.imglist)-1
dframe_list = [1] + [i for i in dframe_list if (idt%i==0) and \
int(idt+i) <= max_id]
dframe = np.random.choice(dframe_list)
data_path = '%s/%d_%05d/%04d.npy'%(save_dir, dframe, idt, idy)
elem = np.load(data_path,allow_pickle=True).item()
# modify dataid according to training time ones
# reload rtk based on rtk predictions
# add RTK: [R_3x3|T_3x1]
# [fx,fy,px,py], to the ndc space
# always forward flow
idtn = idt + dframe
try:
rtk_path = self.rtklist[idt]
rtk = np.loadtxt(rtk_path)
rtkn_path = self.rtklist[idtn]
rtkn = np.loadtxt(rtkn_path)
rtk = np.stack([rtk, rtkn])
except:
print('warning: loading empty camera')
print(rtk_path)
rtk = np.zeros((4,4))
rtk[:3,:3] = np.eye(3)
rtk[:3, 3] = np.asarray([0,0,10])
rtk[3, :] = np.asarray([512,512,256,256])
rtkn = rtk.copy()
rtk = np.stack([rtk, rtkn])
kaug_path = '%s/%d_%05d/rtk.npy'%(save_dir, dframe, idt)
kaug = np.load(kaug_path,allow_pickle=True).item()['kaug']
#TODO fill elems
elem['rtk'] = rtk[None] # 1,2,x
elem['kaug'] = kaug
elem['dataid'] = np.stack([dataid, dataid])[None]
elem['frameid'] = np.stack([idt, idtn])[None]
elem['lineid'] = np.stack([idy, idy])[None]
return elem
class VidDataset(base_data.BaseDataset):
'''
'''
def __init__(self, opts, filter_key=None, imglist=None, can_frame=0,
dframe=1,init_frame=0, dataid=0, numvid=1, flip=0,
is_eval=False, rtk_path=None):
super(VidDataset, self).__init__(opts, filter_key=filter_key)
self.flip=flip
self.imglist = imglist
self.can_frame = can_frame
self.dframe = dframe
seqname = imglist[0].split('/')[-2]
self.masklist = [i.replace('JPEGImages', 'Annotations').replace('.jpg', '.png') for i in self.imglist]
self.camlist = [i.replace('JPEGImages', 'Camera').replace('.jpg', '.txt') for i in self.imglist]
if dframe==1:
self.flowfwlist = [i.replace('JPEGImages', 'FlowFW').replace('.jpg', '.pfm').replace('.png', '.pfm').replace('%s/'%seqname, '%s/flo-'%seqname) for i in self.imglist]
self.flowbwlist = [i.replace('JPEGImages', 'FlowBW').replace('.jpg', '.pfm').replace('.png', '.pfm').replace('%s/'%seqname, '%s/flo-'%seqname) for i in self.imglist]
else:
self.flowfwlist = [i.replace('JPEGImages', 'FlowFW').replace('.jpg', '.pfm').replace('.png', '.pfm').replace('%s/'%seqname, '%s/flo-'%(seqname)) for i in self.imglist]
self.flowbwlist = [i.replace('JPEGImages', 'FlowBW').replace('.jpg', '.pfm').replace('.png', '.pfm').replace('%s/'%seqname, '%s/flo-'%(seqname)) for i in self.imglist]
self.featlist = [i.replace('JPEGImages', 'Densepose').replace('.jpg', '.pfm').replace('.png', '.pfm').replace('%s/'%seqname, '%s/feat-'%seqname) for i in self.imglist]
self.featlist = ['%s/feat-%05d.pfm'%(i.rsplit('/',1)[0], int(i.split('feat-')[-1].split('.pfm')[0])) for i in self.featlist]
self.bboxlist = ['%s/bbox-%05d.txt'%(i.rsplit('/',1)[0], int(i.split('feat-')[-1].split('.pfm')[0])) for i in self.featlist]
self.kplist = [i.replace('JPEGImages', 'KP').replace('.jpg', '_keypoints.json').replace('.png', '_keypoints.json') for i in self.imglist]
self.dplist = [i.replace('JPEGImages', 'Densepose').replace('.jpg', '.pfm').replace('.png', '.pfm') for i in self.imglist]
if rtk_path is not None:
self.rtklist =['%s-%05d.txt'%(rtk_path, i) for i in range(len(self.imglist))]
else:
self.rtklist =[i.replace('JPEGImages', 'Cameras').replace('.jpg', '.txt') for i in self.imglist]
self.baselist = [i for i in range(len(self.imglist)-self.dframe)] + [i+self.dframe for i in range(len(self.imglist)-self.dframe)]
self.directlist = [1] * (len(self.imglist)-self.dframe) + [0]* (len(self.imglist)-self.dframe)
# to skip frames
self.odirectlist = self.directlist.copy()
len_list = len(self.baselist)//2
self.fw_list = self.baselist[:len_list][init_frame::self.dframe]
self.bw_list = self.baselist[len_list:][init_frame::self.dframe]
self.dir_fwlist = self.directlist[:len_list][init_frame::self.dframe]
self.dir_bwlist = self.directlist[len_list:][init_frame::self.dframe]
if is_eval:
self.baselist = self.fw_list
self.directlist = self.dir_fwlist
else:
self.baselist = self.fw_list + self.bw_list
self.directlist = self.dir_fwlist + self.dir_bwlist
self.baselist = [self.baselist[0]] + self.baselist + [self.baselist[-1]]
self.directlist = [self.directlist[0]] + self.directlist + [self.directlist[-1]]
fac = (opts['batch_size']*opts['ngpu']*200)//len(self.directlist) // numvid
if fac==0: fac=1
self.directlist = self.directlist*fac
self.baselist = self.baselist*fac
# Load the annotation file.
self.num_imgs = len(self.directlist)
self.dataid = dataid
print('%d pairs of images' % self.num_imgs)
def str_to_frame(test_frames, data_info):
if test_frames[0]=='{':
# render a list of videos
idx_render = []
for i in test_frames[1:-1].split(','):
vid_idx = int(i)
idx_render += range(data_info['offset'][vid_idx]-vid_idx,
data_info['offset'][vid_idx+1]-vid_idx-1)
else:
test_frames = int(test_frames)
if test_frames==0:
test_frames = data_info['len_evalloader']-1
# render specific number of frames
idx_render = np.linspace(0,data_info['len_evalloader']-1,
test_frames, dtype=int)
return idx_render
def extract_data_info(loader):
data_info = {}
dataset_list = loader.dataset.datasets
data_offset = [0]
impath = []
for dataset in dataset_list:
impath += dataset.imglist
data_offset.append(len(dataset.imglist))
data_info['offset'] = np.asarray(data_offset).cumsum()
data_info['impath'] = impath
data_info['len_evalloader'] = len(loader)
return data_info
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def get_vertex_colors(model, mesh, frame_idx=0, view_dir=None):
# assign color to mesh verts according to current frame
xyz_query = torch.cuda.FloatTensor(mesh.vertices, device=model.device)
xyz_embedded = model.embedding_xyz(xyz_query) # (N, embed_xyz_channels)
# use env code of the first frame
env_code = model.env_code(torch.Tensor([frame_idx]).long().to(model.device))
env_code = env_code.expand(xyz_query.shape[0],-1)
if view_dir is None:
# use view direction of (0,0,-1)
dir_query = torch.zeros_like(xyz_query)
dir_query[:,2] = -1
else:
dir_query = F.normalize(view_dir, 2,-1)
dir_embedded = model.embedding_dir(dir_query) # (N, embed_xyz_channels)
xyz_embedded = torch.cat([xyz_embedded, dir_embedded, env_code],-1)
#xyz_embedded = torch.cat([xyz_embedded, env_code],-1)
vis = model.nerf_coarse(xyz_embedded)[:,:3].cpu().numpy()
vis = np.clip(vis, 0, 1)
return vis
|
banmo-main
|
utils/io.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import pickle
import cv2
import numpy as np
import os
import torch
import torch.nn.functional as F
import pdb
import trimesh
from detectron2.config import get_cfg
from detectron2.modeling import build_model
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.structures import Boxes as create_boxes
import sys
try:
sys.path.insert(0,'./third_party/detectron2//projects/DensePose/')
from densepose import add_densepose_config
from densepose.modeling.cse.utils import squared_euclidean_distance_matrix
from densepose.data.build import get_class_to_mesh_name_mapping
from densepose.modeling import build_densepose_embedder
from densepose.vis.densepose_outputs_vertex import get_xyz_vertex_embedding
from densepose.vis.base import Boxes, Image, MatrixVisualizer
except:
sys.path.insert(0,'./third_party/detectron2_old//projects/DensePose/')
from densepose import add_densepose_config
from densepose.modeling.cse.utils import squared_euclidean_distance_matrix
from densepose.data.build import get_class_to_mesh_name_mapping
from densepose.modeling import build_densepose_embedder
from densepose.vis.densepose_outputs_vertex import get_xyz_vertex_embedding
from densepose.vis.base import Boxes, Image, MatrixVisualizer
# load model
def create_cse(config_fpath, weights_fpath):
cfg = get_cfg()
add_densepose_config(cfg)
cfg.merge_from_file(config_fpath)
cfg.MODEL.WEIGHTS = weights_fpath
model = build_model(cfg) # returns a torch.nn.Module
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS) # load a file, usually from cfg.MODEL.WEIGHTS
embedder = build_densepose_embedder(cfg)
class_to_mesh_name = get_class_to_mesh_name_mapping(cfg)
mesh_vertex_embeddings = {
mesh_name: embedder(mesh_name).cuda()
for mesh_name in class_to_mesh_name.values()
if embedder.has_embeddings(mesh_name)
}
return model, embedder, mesh_vertex_embeddings
def run_cse(model, embedder, mesh_vertex_embeddings, image, mask, mesh_name='smpl_27554'):
h,w,_=image.shape
# resize
max_size=1333
if h>w:
h_rszd, w_rszd = max_size, max_size*w//h
else:
h_rszd, w_rszd = max_size*h//w, max_size
image = cv2.resize(image, (w_rszd, h_rszd))
mask = cv2.resize(mask.astype(float), (w_rszd, h_rszd)).astype(np.uint8)
# pad
h_pad = (1+h_rszd//32)*32
w_pad = (1+w_rszd//32)*32
image_tmp = np.zeros((h_pad,w_pad,3)).astype(np.uint8)
mask_tmp = np.zeros((h_pad,w_pad)).astype(np.uint8)
image_tmp[:h_rszd,:w_rszd] = image
mask_tmp[:h_rszd,:w_rszd] = mask
image = image_tmp
mask = mask_tmp
image_raw = image.copy()
# preprocess image and box
indices = np.where(mask>0); xid = indices[1]; yid = indices[0]
center = ( (xid.max()+xid.min())//2, (yid.max()+yid.min())//2)
length = ( int((xid.max()-xid.min())*1.//2), int((yid.max()-yid.min())*1.//2))
bbox = [center[0]-length[0], center[1]-length[1],length[0]*2, length[1]*2]
bboxw = bbox[2]
bboxh = bbox[3]
bbox = [max(0,bbox[0]),
max(0,bbox[1]),
min(w_pad, bbox[0]+bbox[2]),
min(h_pad, bbox[1]+bbox[3])]
image=torch.Tensor(image).cuda().permute(2,0,1)[None]
image = torch.stack([(x - model.pixel_mean) / model.pixel_std for x in image])
pred_boxes = torch.Tensor([bbox]).cuda()
pred_boxes = create_boxes(pred_boxes)
# inference
model.eval()
with torch.no_grad():
features = model.backbone(image)
features = [features[f] for f in model.roi_heads.in_features]
features = [model.roi_heads.decoder(features)]
features_dp = model.roi_heads.densepose_pooler(features, [pred_boxes])
densepose_head_outputs = model.roi_heads.densepose_head(features_dp)
densepose_predictor_outputs = model.roi_heads.densepose_predictor(densepose_head_outputs)
coarse_segm_resized = densepose_predictor_outputs.coarse_segm[0]
embedding_resized = densepose_predictor_outputs.embedding[0]
# use input mask
x, y, xx, yy= bbox
mask_box = mask[y:yy, x:xx]
mask_box = torch.Tensor(mask_box).cuda()[None,None]
mask_box = F.interpolate(mask_box, coarse_segm_resized.shape[1:3], mode='bilinear')[0,0]>0
# find closest match (in the cropped/resized coordinate)
clst_verts_pad = torch.zeros(h_pad, w_pad).long().cuda()
clst_verts_box = torch.zeros(mask_box.shape, dtype=torch.long).cuda()
all_embeddings = embedding_resized[:, mask_box].t()
assign_mat = squared_euclidean_distance_matrix(all_embeddings, mesh_vertex_embeddings[mesh_name])
clst_verts_box[mask_box] = assign_mat.argmin(dim=1)
clst_verts_box = F.interpolate(clst_verts_box[None,None].float(), (yy-y,xx-x),mode='nearest')[0,0].long()
clst_verts_pad[y:yy,x:xx] = clst_verts_box
# output embedding
embedding = embedding_resized # size does not matter for a image code
embedding = embedding * mask_box.float()[None]
# embedding norm
embedding_norm = embedding.norm(2,0)
embedding_norm_pad = torch.zeros(h_rszd, w_rszd).cuda()
embedding_norm_box = F.interpolate(embedding_norm[None,None], (yy-y,xx-x),mode='bilinear')[0,0]
embedding_norm_pad[y:yy,x:xx] = embedding_norm_box
embedding_norm = embedding_norm_pad[:h_rszd, :w_rszd]
embedding_norm = F.interpolate(embedding_norm[None,None], (h,w),mode='bilinear')[0][0]
embedding = embedding.cpu().numpy()
embedding_norm = embedding_norm.cpu().numpy()
# visualization
embed_map = get_xyz_vertex_embedding(mesh_name, 'cuda')
vis = (embed_map[clst_verts_pad].clip(0, 1) * 255.0).cpu().numpy()
mask_visualizer = MatrixVisualizer(
inplace=False, cmap=cv2.COLORMAP_JET, val_scale=1.0, alpha=0.7
)
image_bgr = mask_visualizer.visualize(image_raw, mask, vis, [0,0,w_pad,h_pad])
image_bgr = image_bgr[:h_rszd,:w_rszd]
image_bgr = cv2.resize(image_bgr, (w,h))
clst_verts =clst_verts_pad[:h_rszd, :w_rszd]
clst_verts = F.interpolate(clst_verts[None,None].float(), (h,w),mode='nearest')[0,0].long()
clst_verts =clst_verts.cpu().numpy()
return clst_verts, image_bgr, embedding, embedding_norm, bbox
|
banmo-main
|
utils/cselib.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import numpy as np
def label_colormap():
"""
colormap for visualizing bones
"""
return np.asarray(
[[155, 122, 157],
[ 45, 245, 50],
[ 71, 25, 64],
[231, 176, 35],
[125, 249, 245],
[ 32, 75, 253],
[241, 31, 111],
[218, 71, 252],
[248, 220, 197],
[ 34, 194, 198],
[108, 178, 96],
[ 33, 101, 119],
[125, 100, 26],
[209, 235, 102],
[116, 105, 241],
[100, 50, 147],
[193, 159, 222],
[ 95, 254, 138],
[197, 130, 75],
[144, 31, 211],
[ 46, 150, 26],
[242, 90, 174],
[179, 41, 38],
[118, 204, 174],
[145, 209, 38],
[188, 74, 125],
[ 95, 158, 210],
[237, 152, 130],
[ 53, 151, 157],
[ 69, 86, 193],
[ 60, 204, 122],
[251, 77, 58],
[174, 248, 170],
[ 28, 81, 36],
[252, 134, 243],
[ 62, 254, 193],
[ 68, 209, 254],
[ 44, 25, 184],
[131, 58, 80],
[188, 251, 27],
[156, 25, 132],
[248, 36, 225],
[ 95, 130, 63],
[222, 204, 244],
[185, 186, 134],
[160, 146, 44],
[244, 196, 89],
[ 39, 60, 87],
[134, 239, 87],
[ 25, 166, 97],
[ 79, 36, 229],
[ 45, 130, 216],
[177, 90, 200],
[ 86, 218, 30],
[ 97, 115, 165],
[159, 104, 99],
[168, 220, 219],
[134, 76, 180],
[ 31, 238, 157],
[ 79, 140, 253],
[124, 23, 27],
[245, 234, 46],
[188, 30, 174],
[253, 246, 148],
[228, 94, 92],]
)
|
banmo-main
|
utils/colors.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import os
import os.path as osp
import sys
sys.path.insert(0,'third_party')
import time
import pdb
import numpy as np
from absl import flags
import cv2
import time
import mcubes
from nnutils import banmo
import subprocess
from torch.utils.tensorboard import SummaryWriter
from kmeans_pytorch import kmeans
import torch.distributed as dist
import torch.nn.functional as F
import trimesh
import torchvision
from torch.autograd import Variable
from collections import defaultdict
from pytorch3d import transforms
from torch.nn.utils import clip_grad_norm_
from matplotlib.pyplot import cm
from nnutils.geom_utils import lbs, reinit_bones, warp_bw, warp_fw, vec_to_sim3,\
obj_to_cam, get_near_far, near_far_to_bound, \
compute_point_visibility, process_so3_seq, \
ood_check_cse, align_sfm_sim3, gauss_mlp_skinning, \
correct_bones
from nnutils.nerf import grab_xyz_weights
from ext_utils.flowlib import flow_to_image
from utils.io import mkdir_p
from nnutils.vis_utils import image_grid
from dataloader import frameloader
from utils.io import save_vid, draw_cams, extract_data_info, merge_dict,\
render_root_txt, save_bones, draw_cams_pair, get_vertex_colors
from utils.colors import label_colormap
class DataParallelPassthrough(torch.nn.parallel.DistributedDataParallel):
"""
for multi-gpu access
"""
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
return getattr(self.module, name)
def __delattr__(self, name):
try:
return super().__delattr__(name)
except AttributeError:
return delattr(self.module, name)
class v2s_trainer():
def __init__(self, opts, is_eval=False):
self.opts = opts
self.is_eval=is_eval
self.local_rank = opts.local_rank
self.save_dir = os.path.join(opts.checkpoint_dir, opts.logname)
self.accu_steps = opts.accu_steps
# write logs
if opts.local_rank==0:
if not os.path.exists(self.save_dir): os.makedirs(self.save_dir)
log_file = os.path.join(self.save_dir, 'opts.log')
if not self.is_eval:
if os.path.exists(log_file):
os.remove(log_file)
opts.append_flags_into_file(log_file)
def define_model(self, data_info):
opts = self.opts
self.device = torch.device('cuda:{}'.format(opts.local_rank))
self.model = banmo.banmo(opts, data_info)
self.model.forward = self.model.forward_default
self.num_epochs = opts.num_epochs
# load model
if opts.model_path!='':
self.load_network(opts.model_path, is_eval=self.is_eval)
if self.is_eval:
self.model = self.model.to(self.device)
else:
# ddp
self.model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.model)
self.model = self.model.to(self.device)
self.model = DataParallelPassthrough(
self.model,
device_ids=[opts.local_rank],
output_device=opts.local_rank,
find_unused_parameters=True,
)
return
def init_dataset(self):
opts = self.opts
opts_dict = {}
opts_dict['n_data_workers'] = opts.n_data_workers
opts_dict['batch_size'] = opts.batch_size
opts_dict['seqname'] = opts.seqname
opts_dict['img_size'] = opts.img_size
opts_dict['ngpu'] = opts.ngpu
opts_dict['local_rank'] = opts.local_rank
opts_dict['rtk_path'] = opts.rtk_path
opts_dict['preload']= False
opts_dict['accu_steps'] = opts.accu_steps
if self.is_eval and opts.rtk_path=='' and opts.model_path!='':
# automatically load cameras in the logdir
model_dir = opts.model_path.rsplit('/',1)[0]
cam_dir = '%s/init-cam/'%model_dir
if os.path.isdir(cam_dir):
opts_dict['rtk_path'] = cam_dir
self.dataloader = frameloader.data_loader(opts_dict)
if opts.lineload:
opts_dict['lineload'] = True
opts_dict['multiply'] = True # multiple samples in dataset
self.trainloader = frameloader.data_loader(opts_dict)
opts_dict['lineload'] = False
del opts_dict['multiply']
else:
opts_dict['multiply'] = True
self.trainloader = frameloader.data_loader(opts_dict)
del opts_dict['multiply']
opts_dict['img_size'] = opts.render_size
self.evalloader = frameloader.eval_loader(opts_dict)
# compute data offset
data_info = extract_data_info(self.evalloader)
return data_info
def init_training(self):
opts = self.opts
# set as module attributes since they do not change across gpus
self.model.module.final_steps = self.num_epochs * \
min(200,len(self.trainloader)) * opts.accu_steps
# ideally should be greater than 200 batches
params_nerf_coarse=[]
params_nerf_beta=[]
params_nerf_feat=[]
params_nerf_beta_feat=[]
params_nerf_fine=[]
params_nerf_unc=[]
params_nerf_flowbw=[]
params_nerf_skin=[]
params_nerf_vis=[]
params_nerf_root_rts=[]
params_nerf_body_rts=[]
params_root_code=[]
params_pose_code=[]
params_env_code=[]
params_vid_code=[]
params_bones=[]
params_skin_aux=[]
params_ks=[]
params_nerf_dp=[]
params_csenet=[]
for name,p in self.model.named_parameters():
if 'nerf_coarse' in name and 'beta' not in name:
params_nerf_coarse.append(p)
elif 'nerf_coarse' in name and 'beta' in name:
params_nerf_beta.append(p)
elif 'nerf_feat' in name and 'beta' not in name:
params_nerf_feat.append(p)
elif 'nerf_feat' in name and 'beta' in name:
params_nerf_beta_feat.append(p)
elif 'nerf_fine' in name:
params_nerf_fine.append(p)
elif 'nerf_unc' in name:
params_nerf_unc.append(p)
elif 'nerf_flowbw' in name or 'nerf_flowfw' in name:
params_nerf_flowbw.append(p)
elif 'nerf_skin' in name:
params_nerf_skin.append(p)
elif 'nerf_vis' in name:
params_nerf_vis.append(p)
elif 'nerf_root_rts' in name:
params_nerf_root_rts.append(p)
elif 'nerf_body_rts' in name:
params_nerf_body_rts.append(p)
elif 'root_code' in name:
params_root_code.append(p)
elif 'pose_code' in name or 'rest_pose_code' in name:
params_pose_code.append(p)
elif 'env_code' in name:
params_env_code.append(p)
elif 'vid_code' in name:
params_vid_code.append(p)
elif 'module.bones' == name:
params_bones.append(p)
elif 'module.skin_aux' == name:
params_skin_aux.append(p)
elif 'module.ks_param' == name:
params_ks.append(p)
elif 'nerf_dp' in name:
params_nerf_dp.append(p)
elif 'csenet' in name:
params_csenet.append(p)
else: continue
if opts.local_rank==0:
print('optimized params: %s'%name)
self.optimizer = torch.optim.AdamW(
[{'params': params_nerf_coarse},
{'params': params_nerf_beta},
{'params': params_nerf_feat},
{'params': params_nerf_beta_feat},
{'params': params_nerf_fine},
{'params': params_nerf_unc},
{'params': params_nerf_flowbw},
{'params': params_nerf_skin},
{'params': params_nerf_vis},
{'params': params_nerf_root_rts},
{'params': params_nerf_body_rts},
{'params': params_root_code},
{'params': params_pose_code},
{'params': params_env_code},
{'params': params_vid_code},
{'params': params_bones},
{'params': params_skin_aux},
{'params': params_ks},
{'params': params_nerf_dp},
{'params': params_csenet},
],
lr=opts.learning_rate,betas=(0.9, 0.999),weight_decay=1e-4)
if self.model.root_basis=='exp':
lr_nerf_root_rts = 10
elif self.model.root_basis=='cnn':
lr_nerf_root_rts = 0.2
elif self.model.root_basis=='mlp':
lr_nerf_root_rts = 1
elif self.model.root_basis=='expmlp':
lr_nerf_root_rts = 1
else: print('error'); exit()
self.scheduler = torch.optim.lr_scheduler.OneCycleLR(self.optimizer,\
[opts.learning_rate, # params_nerf_coarse
opts.learning_rate, # params_nerf_beta
opts.learning_rate, # params_nerf_feat
10*opts.learning_rate, # params_nerf_beta_feat
opts.learning_rate, # params_nerf_fine
opts.learning_rate, # params_nerf_unc
opts.learning_rate, # params_nerf_flowbw
opts.learning_rate, # params_nerf_skin
opts.learning_rate, # params_nerf_vis
lr_nerf_root_rts*opts.learning_rate, # params_nerf_root_rts
opts.learning_rate, # params_nerf_body_rts
lr_nerf_root_rts*opts.learning_rate, # params_root_code
opts.learning_rate, # params_pose_code
opts.learning_rate, # params_env_code
opts.learning_rate, # params_vid_code
opts.learning_rate, # params_bones
10*opts.learning_rate, # params_skin_aux
10*opts.learning_rate, # params_ks
opts.learning_rate, # params_nerf_dp
opts.learning_rate, # params_csenet
],
int(self.model.module.final_steps/self.accu_steps),
pct_start=2./self.num_epochs, # use 2 epochs to warm up
cycle_momentum=False,
anneal_strategy='linear',
final_div_factor=1./5, div_factor = 25,
)
def save_network(self, epoch_label, prefix=''):
if self.opts.local_rank==0:
param_path = '%s/%sparams_%s.pth'%(self.save_dir,prefix,epoch_label)
save_dict = self.model.state_dict()
torch.save(save_dict, param_path)
var_path = '%s/%svars_%s.npy'%(self.save_dir,prefix,epoch_label)
latest_vars = self.model.latest_vars.copy()
del latest_vars['fp_err']
del latest_vars['flo_err']
del latest_vars['sil_err']
del latest_vars['flo_err_hist']
np.save(var_path, latest_vars)
return
@staticmethod
def rm_module_prefix(states, prefix='module'):
new_dict = {}
for i in states.keys():
v = states[i]
if i[:len(prefix)] == prefix:
i = i[len(prefix)+1:]
new_dict[i] = v
return new_dict
def load_network(self,model_path=None, is_eval=True, rm_prefix=True):
opts = self.opts
states = torch.load(model_path,map_location='cpu')
if rm_prefix: states = self.rm_module_prefix(states)
var_path = model_path.replace('params', 'vars').replace('.pth', '.npy')
latest_vars = np.load(var_path,allow_pickle=True)[()]
if is_eval:
# load variables
self.model.latest_vars = latest_vars
# if size mismatch, delete all related variables
if rm_prefix and states['near_far'].shape[0] != self.model.near_far.shape[0]:
print('!!!deleting video specific dicts due to size mismatch!!!')
self.del_key( states, 'near_far')
self.del_key( states, 'root_code.weight') # only applies to root_basis=mlp
self.del_key( states, 'pose_code.weight')
self.del_key( states, 'pose_code.basis_mlp.weight')
self.del_key( states, 'nerf_body_rts.0.weight')
self.del_key( states, 'nerf_body_rts.0.basis_mlp.weight')
self.del_key( states, 'nerf_root_rts.0.weight')
self.del_key( states, 'nerf_root_rts.root_code.weight')
self.del_key( states, 'nerf_root_rts.root_code.basis_mlp.weight')
self.del_key( states, 'nerf_root_rts.delta_rt.0.basis_mlp.weight')
self.del_key( states, 'nerf_root_rts.base_rt.se3')
self.del_key( states, 'nerf_root_rts.delta_rt.0.weight')
self.del_key( states, 'env_code.weight')
self.del_key( states, 'env_code.basis_mlp.weight')
if 'vid_code.weight' in states.keys():
self.del_key( states, 'vid_code.weight')
if 'ks_param' in states.keys():
self.del_key( states, 'ks_param')
# delete pose basis(backbones)
if not opts.keep_pose_basis:
del_key_list = []
for k in states.keys():
if 'nerf_body_rts' in k or 'nerf_root_rts' in k:
del_key_list.append(k)
for k in del_key_list:
print(k)
self.del_key( states, k)
if rm_prefix and opts.lbs and states['bones'].shape[0] != self.model.bones.shape[0]:
self.del_key(states, 'bones')
states = self.rm_module_prefix(states, prefix='nerf_skin')
states = self.rm_module_prefix(states, prefix='nerf_body_rts')
# load some variables
# this is important for volume matching
if latest_vars['obj_bound'].size==1:
latest_vars['obj_bound'] = latest_vars['obj_bound'] * np.ones(3)
self.model.latest_vars['obj_bound'] = latest_vars['obj_bound']
# load nerf_coarse, nerf_bone/root (not code), nerf_vis, nerf_feat, nerf_unc
#TODO somehow, this will reset the batch stats for
# a pretrained cse model, to keep those, we want to manually copy to states
if opts.ft_cse and \
'csenet.net.backbone.fpn_lateral2.weight' not in states.keys():
self.add_cse_to_states(self.model, states)
self.model.load_state_dict(states, strict=False)
return
@staticmethod
def add_cse_to_states(model, states):
states_init = model.state_dict()
for k in states_init.keys():
v = states_init[k]
if 'csenet' in k:
states[k] = v
def eval_cam(self, idx_render=None):
"""
idx_render: list of frame index to render
"""
opts = self.opts
with torch.no_grad():
self.model.eval()
# load data
for dataset in self.evalloader.dataset.datasets:
dataset.load_pair = False
batch = []
for i in idx_render:
batch.append( self.evalloader.dataset[i] )
batch = self.evalloader.collate_fn(batch)
for dataset in self.evalloader.dataset.datasets:
dataset.load_pair = True
#TODO can be further accelerated
self.model.convert_batch_input(batch)
if opts.unc_filter:
# process densepoe feature
valid_list, error_list = ood_check_cse(self.model.dp_feats,
self.model.dp_embed,
self.model.dps.long())
valid_list = valid_list.cpu().numpy()
error_list = error_list.cpu().numpy()
else:
valid_list = np.ones( len(idx_render))
error_list = np.zeros(len(idx_render))
self.model.convert_root_pose()
rtk = self.model.rtk
kaug = self.model.kaug
#TODO may need to recompute after removing the invalid predictions
# need to keep this to compute near-far planes
self.model.save_latest_vars()
# extract mesh sequences
aux_seq = {
'is_valid':[],
'err_valid':[],
'rtk':[],
'kaug':[],
'impath':[],
'masks':[],
}
for idx,_ in enumerate(idx_render):
frameid=self.model.frameid[idx]
if opts.local_rank==0:
print('extracting frame %d'%(frameid.cpu().numpy()))
aux_seq['rtk'].append(rtk[idx].cpu().numpy())
aux_seq['kaug'].append(kaug[idx].cpu().numpy())
aux_seq['masks'].append(self.model.masks[idx].cpu().numpy())
aux_seq['is_valid'].append(valid_list[idx])
aux_seq['err_valid'].append(error_list[idx])
impath = self.model.impath[frameid.long()]
aux_seq['impath'].append(impath)
return aux_seq
def eval(self, idx_render=None, dynamic_mesh=False):
"""
idx_render: list of frame index to render
dynamic_mesh: whether to extract canonical shape, or dynamic shape
"""
opts = self.opts
with torch.no_grad():
self.model.eval()
# run marching cubes on canonical shape
mesh_dict_rest = self.extract_mesh(self.model, opts.chunk, \
opts.sample_grid3d, opts.mc_threshold)
# choose a grid image or the whold video
if idx_render is None: # render 9 frames
idx_render = np.linspace(0,len(self.evalloader)-1, 9, dtype=int)
# render
chunk=opts.rnd_frame_chunk
rendered_seq = defaultdict(list)
aux_seq = {'mesh_rest': mesh_dict_rest['mesh'],
'mesh':[],
'rtk':[],
'impath':[],
'bone':[],}
for j in range(0, len(idx_render), chunk):
batch = []
idx_chunk = idx_render[j:j+chunk]
for i in idx_chunk:
batch.append( self.evalloader.dataset[i] )
batch = self.evalloader.collate_fn(batch)
rendered = self.render_vid(self.model, batch)
for k, v in rendered.items():
rendered_seq[k] += [v]
hbs=len(idx_chunk)
sil_rszd = F.interpolate(self.model.masks[:hbs,None],
(opts.render_size, opts.render_size))[:,0,...,None]
rendered_seq['img'] += [self.model.imgs.permute(0,2,3,1)[:hbs]]
rendered_seq['sil'] += [self.model.masks[...,None] [:hbs]]
rendered_seq['flo'] += [self.model.flow.permute(0,2,3,1)[:hbs]]
rendered_seq['dpc'] += [self.model.dp_vis[self.model.dps.long()][:hbs]]
rendered_seq['occ'] += [self.model.occ[...,None] [:hbs]]
rendered_seq['feat']+= [self.model.dp_feats.std(1)[...,None][:hbs]]
rendered_seq['flo_coarse'][-1] *= sil_rszd
rendered_seq['img_loss_samp'][-1] *= sil_rszd
if 'frame_cyc_dis' in rendered_seq.keys() and \
len(rendered_seq['frame_cyc_dis'])>0:
rendered_seq['frame_cyc_dis'][-1] *= 255/rendered_seq['frame_cyc_dis'][-1].max()
rendered_seq['frame_rigloss'][-1] *= 255/rendered_seq['frame_rigloss'][-1].max()
if opts.use_embed:
rendered_seq['pts_pred'][-1] *= sil_rszd
rendered_seq['pts_exp'] [-1] *= rendered_seq['sil_coarse'][-1]
rendered_seq['feat_err'][-1] *= sil_rszd
rendered_seq['feat_err'][-1] *= 255/rendered_seq['feat_err'][-1].max()
if opts.use_proj:
rendered_seq['proj_err'][-1] *= sil_rszd
rendered_seq['proj_err'][-1] *= 255/rendered_seq['proj_err'][-1].max()
if opts.use_unc:
rendered_seq['unc_pred'][-1] -= rendered_seq['unc_pred'][-1].min()
rendered_seq['unc_pred'][-1] *= 255/rendered_seq['unc_pred'][-1].max()
# extract mesh sequences
for idx in range(len(idx_chunk)):
frameid=self.model.frameid[idx].long()
embedid=self.model.embedid[idx].long()
print('extracting frame %d'%(frameid.cpu().numpy()))
# run marching cubes
if dynamic_mesh:
if not opts.queryfw:
mesh_dict_rest=None
mesh_dict = self.extract_mesh(self.model,opts.chunk,
opts.sample_grid3d, opts.mc_threshold,
embedid=embedid, mesh_dict_in=mesh_dict_rest)
mesh=mesh_dict['mesh']
if mesh_dict_rest is not None and opts.ce_color:
mesh.visual.vertex_colors = mesh_dict_rest['mesh'].\
visual.vertex_colors # assign rest surface color
else:
# get view direction
obj_center = self.model.rtk[idx][:3,3:4]
cam_center = -self.model.rtk[idx][:3,:3].T.matmul(obj_center)[:,0]
view_dir = torch.cuda.FloatTensor(mesh.vertices, device=self.device) \
- cam_center[None]
vis = get_vertex_colors(self.model, mesh_dict_rest['mesh'],
frame_idx=idx, view_dir=view_dir)
mesh.visual.vertex_colors[:,:3] = vis*255
# save bones
if 'bones' in mesh_dict.keys():
bone = mesh_dict['bones'][0].cpu().numpy()
aux_seq['bone'].append(bone)
else:
mesh=mesh_dict_rest['mesh']
aux_seq['mesh'].append(mesh)
# save cams
aux_seq['rtk'].append(self.model.rtk[idx].cpu().numpy())
# save image list
impath = self.model.impath[frameid]
aux_seq['impath'].append(impath)
# save canonical mesh and extract skinning weights
mesh_rest = aux_seq['mesh_rest']
if len(mesh_rest.vertices)>100:
self.model.latest_vars['mesh_rest'] = mesh_rest
if opts.lbs:
bones_rst = self.model.bones
bones_rst,_ = correct_bones(self.model, bones_rst)
# compute skinning color
if mesh_rest.vertices.shape[0]>100:
rest_verts = torch.Tensor(mesh_rest.vertices).to(self.device)
nerf_skin = self.model.nerf_skin if opts.nerf_skin else None
rest_pose_code = self.model.rest_pose_code(torch.Tensor([0])\
.long().to(self.device))
skins = gauss_mlp_skinning(rest_verts[None],
self.model.embedding_xyz,
bones_rst, rest_pose_code,
nerf_skin, skin_aux=self.model.skin_aux)[0]
skins = skins.cpu().numpy()
num_bones = skins.shape[-1]
colormap = label_colormap()
# TODO use a larger color map
colormap = np.repeat(colormap[None],4,axis=0).reshape(-1,3)
colormap = colormap[:num_bones]
colormap = (colormap[None] * skins[...,None]).sum(1)
mesh_rest_skin = mesh_rest.copy()
mesh_rest_skin.visual.vertex_colors = colormap
aux_seq['mesh_rest_skin'] = mesh_rest_skin
aux_seq['bone_rest'] = bones_rst.cpu().numpy()
# draw camera trajectory
suffix_id=0
if hasattr(self.model, 'epoch'):
suffix_id = self.model.epoch
if opts.local_rank==0:
mesh_cam = draw_cams(aux_seq['rtk'])
mesh_cam.export('%s/mesh_cam-%02d.obj'%(self.save_dir,suffix_id))
mesh_path = '%s/mesh_rest-%02d.obj'%(self.save_dir,suffix_id)
mesh_rest.export(mesh_path)
if opts.lbs:
bone_rest = aux_seq['bone_rest']
bone_path = '%s/bone_rest-%02d.obj'%(self.save_dir,suffix_id)
save_bones(bone_rest, 0.1, bone_path)
# save images
for k,v in rendered_seq.items():
rendered_seq[k] = torch.cat(rendered_seq[k],0)
##TODO
#if opts.local_rank==0:
# print('saving %s to gif'%k)
# is_flow = self.isflow(k)
# upsample_frame = min(30,len(rendered_seq[k]))
# save_vid('%s/%s'%(self.save_dir,k),
# rendered_seq[k].cpu().numpy(),
# suffix='.gif', upsample_frame=upsample_frame,
# is_flow=is_flow)
return rendered_seq, aux_seq
def train(self):
opts = self.opts
if opts.local_rank==0:
log = SummaryWriter('%s/%s'%(opts.checkpoint_dir,opts.logname), comment=opts.logname)
else: log=None
self.model.module.total_steps = 0
self.model.module.progress = 0
torch.manual_seed(8) # do it again
torch.cuda.manual_seed(1)
# disable bones before warmup epochs are finished
if opts.lbs:
self.model.num_bone_used = 0
del self.model.module.nerf_models['bones']
if opts.lbs and opts.nerf_skin:
del self.model.module.nerf_models['nerf_skin']
# warmup shape
if opts.warmup_shape_ep>0:
self.warmup_shape(log)
# CNN pose warmup or load CNN
if opts.warmup_pose_ep>0 or opts.pose_cnn_path!='':
self.warmup_pose(log, pose_cnn_path=opts.pose_cnn_path)
else:
# save cameras to latest vars and file
if opts.use_rtk_file:
self.model.module.use_cam=True
self.extract_cams(self.dataloader)
self.model.module.use_cam=opts.use_cam
else:
self.extract_cams(self.dataloader)
#TODO train mlp
if opts.warmup_rootmlp:
# set se3 directly
rmat = torch.Tensor(self.model.latest_vars['rtk'][:,:3,:3])
quat = transforms.matrix_to_quaternion(rmat).to(self.device)
self.model.module.nerf_root_rts.base_rt.se3.data[:,3:] = quat
# clear buffers for pytorch1.10+
try: self.model._assign_modules_buffers()
except: pass
# set near-far plane
if opts.model_path=='':
self.reset_nf()
# reset idk in latest_vars
self.model.module.latest_vars['idk'][:] = 0.
#TODO save loaded wts of posecs
if opts.freeze_coarse:
self.model.module.shape_xyz_wt = \
grab_xyz_weights(self.model.module.nerf_coarse, clone=True)
self.model.module.skin_xyz_wt = \
grab_xyz_weights(self.model.module.nerf_skin, clone=True)
self.model.module.feat_xyz_wt = \
grab_xyz_weights(self.model.module.nerf_feat, clone=True)
#TODO reset beta
if opts.reset_beta:
self.model.module.nerf_coarse.beta.data[:] = 0.1
# start training
for epoch in range(0, self.num_epochs):
self.model.epoch = epoch
# evaluation
torch.cuda.empty_cache()
self.model.module.img_size = opts.render_size
rendered_seq, aux_seq = self.eval()
self.model.module.img_size = opts.img_size
if epoch==0: self.save_network('0') # to save some cameras
if opts.local_rank==0: self.add_image_grid(rendered_seq, log, epoch)
self.reset_hparams(epoch)
torch.cuda.empty_cache()
## TODO harded coded
#if opts.freeze_proj:
# if self.model.module.progress<0.8:
# #opts.nsample=64
# opts.ndepth=2
# else:
# #opts.nsample = nsample
# opts.ndepth = self.model.module.ndepth_bk
self.train_one_epoch(epoch, log)
print('saving the model at the end of epoch {:d}, iters {:d}'.\
format(epoch, self.model.module.total_steps))
self.save_network('latest')
self.save_network(str(epoch+1))
@staticmethod
def save_cams(opts,aux_seq, save_prefix, latest_vars,datasets, evalsets, obj_scale,
trainloader=None, unc_filter=True):
"""
save cameras to dir and modify dataset
"""
mkdir_p(save_prefix)
dataset_dict={dataset.imglist[0].split('/')[-2]:dataset for dataset in datasets}
evalset_dict={dataset.imglist[0].split('/')[-2]:dataset for dataset in evalsets}
if trainloader is not None:
line_dict={dataset.imglist[0].split('/')[-2]:dataset for dataset in trainloader}
length = len(aux_seq['impath'])
valid_ids = aux_seq['is_valid']
idx_combine = 0
for i in range(length):
impath = aux_seq['impath'][i]
seqname = impath.split('/')[-2]
rtk = aux_seq['rtk'][i]
if unc_filter:
# in the same sequance find the closest valid frame and replace it
seq_idx = np.asarray([seqname == i.split('/')[-2] \
for i in aux_seq['impath']])
valid_ids_seq = np.where(valid_ids * seq_idx)[0]
if opts.local_rank==0 and i==0:
print('%s: %d frames are valid'%(seqname, len(valid_ids_seq)))
if len(valid_ids_seq)>0 and not aux_seq['is_valid'][i]:
closest_valid_idx = valid_ids_seq[np.abs(i-valid_ids_seq).argmin()]
rtk[:3,:3] = aux_seq['rtk'][closest_valid_idx][:3,:3]
# rescale translation according to input near-far plane
rtk[:3,3] = rtk[:3,3]*obj_scale
rtklist = dataset_dict[seqname].rtklist
idx = int(impath.split('/')[-1].split('.')[-2])
save_path = '%s/%s-%05d.txt'%(save_prefix, seqname, idx)
np.savetxt(save_path, rtk)
rtklist[idx] = save_path
evalset_dict[seqname].rtklist[idx] = save_path
if trainloader is not None:
line_dict[seqname].rtklist[idx] = save_path
#save to rtraw
latest_vars['rt_raw'][idx_combine] = rtk[:3,:4]
latest_vars['rtk'][idx_combine,:3,:3] = rtk[:3,:3]
if idx==len(rtklist)-2:
# to cover the last
save_path = '%s/%s-%05d.txt'%(save_prefix, seqname, idx+1)
if opts.local_rank==0: print('writing cam %s'%save_path)
np.savetxt(save_path, rtk)
rtklist[idx+1] = save_path
evalset_dict[seqname].rtklist[idx+1] = save_path
if trainloader is not None:
line_dict[seqname].rtklist[idx+1] = save_path
idx_combine += 1
latest_vars['rt_raw'][idx_combine] = rtk[:3,:4]
latest_vars['rtk'][idx_combine,:3,:3] = rtk[:3,:3]
idx_combine += 1
def extract_cams(self, full_loader):
# store cameras
opts = self.opts
idx_render = range(len(self.evalloader))
chunk = 50
aux_seq = []
for i in range(0, len(idx_render), chunk):
aux_seq.append(self.eval_cam(idx_render=idx_render[i:i+chunk]))
aux_seq = merge_dict(aux_seq)
aux_seq['rtk'] = np.asarray(aux_seq['rtk'])
aux_seq['kaug'] = np.asarray(aux_seq['kaug'])
aux_seq['masks'] = np.asarray(aux_seq['masks'])
aux_seq['is_valid'] = np.asarray(aux_seq['is_valid'])
aux_seq['err_valid'] = np.asarray(aux_seq['err_valid'])
save_prefix = '%s/init-cam'%(self.save_dir)
trainloader=self.trainloader.dataset.datasets
self.save_cams(opts,aux_seq, save_prefix,
self.model.module.latest_vars,
full_loader.dataset.datasets,
self.evalloader.dataset.datasets,
self.model.obj_scale, trainloader=trainloader,
unc_filter=opts.unc_filter)
dist.barrier() # wait untail all have finished
if opts.local_rank==0:
# draw camera trajectory
for dataset in full_loader.dataset.datasets:
seqname = dataset.imglist[0].split('/')[-2]
render_root_txt('%s/%s-'%(save_prefix,seqname), 0)
def reset_nf(self):
opts = self.opts
# save near-far plane
shape_verts = self.model.dp_verts_unit / 3 * self.model.near_far.mean()
shape_verts = shape_verts * 1.2
# save object bound if first stage
if opts.model_path=='' and opts.bound_factor>0:
shape_verts = shape_verts*opts.bound_factor
self.model.module.latest_vars['obj_bound'] = \
shape_verts.abs().max(0)[0].detach().cpu().numpy()
if self.model.near_far[:,0].sum()==0: # if no valid nf plane loaded
self.model.near_far.data = get_near_far(self.model.near_far.data,
self.model.latest_vars,
pts=shape_verts.detach().cpu().numpy())
save_path = '%s/init-nf.txt'%(self.save_dir)
save_nf = self.model.near_far.data.cpu().numpy() * self.model.obj_scale
np.savetxt(save_path, save_nf)
def warmup_shape(self, log):
opts = self.opts
# force using warmup forward, dataloader, cnn root
self.model.module.forward = self.model.module.forward_warmup_shape
full_loader = self.trainloader # store original loader
self.trainloader = range(200)
self.num_epochs = opts.warmup_shape_ep
# training
self.init_training()
for epoch in range(0, opts.warmup_shape_ep):
self.model.epoch = epoch
self.train_one_epoch(epoch, log, warmup=True)
self.save_network(str(epoch+1), 'mlp-')
# restore dataloader, rts, forward function
self.model.module.forward = self.model.module.forward_default
self.trainloader = full_loader
self.num_epochs = opts.num_epochs
# start from low learning rate again
self.init_training()
self.model.module.total_steps = 0
self.model.module.progress = 0.
def warmup_pose(self, log, pose_cnn_path):
opts = self.opts
# force using warmup forward, dataloader, cnn root
self.model.module.root_basis = 'cnn'
self.model.module.use_cam = False
self.model.module.forward = self.model.module.forward_warmup
full_loader = self.dataloader # store original loader
self.dataloader = range(200)
original_rp = self.model.module.nerf_root_rts
self.model.module.nerf_root_rts = self.model.module.dp_root_rts
del self.model.module.dp_root_rts
self.num_epochs = opts.warmup_pose_ep
self.model.module.is_warmup_pose=True
if pose_cnn_path=='':
# training
self.init_training()
for epoch in range(0, opts.warmup_pose_ep):
self.model.epoch = epoch
self.train_one_epoch(epoch, log, warmup=True)
self.save_network(str(epoch+1), 'cnn-')
# eval
#_,_ = self.model.forward_warmup(None)
# rendered_seq = self.model.warmup_rendered
# if opts.local_rank==0: self.add_image_grid(rendered_seq, log, epoch)
else:
pose_states = torch.load(opts.pose_cnn_path, map_location='cpu')
pose_states = self.rm_module_prefix(pose_states,
prefix='module.nerf_root_rts')
self.model.module.nerf_root_rts.load_state_dict(pose_states,
strict=False)
# extract camera and near far planes
self.extract_cams(full_loader)
# restore dataloader, rts, forward function
self.model.module.root_basis=opts.root_basis
self.model.module.use_cam = opts.use_cam
self.model.module.forward = self.model.module.forward_default
self.dataloader = full_loader
del self.model.module.nerf_root_rts
self.model.module.nerf_root_rts = original_rp
self.num_epochs = opts.num_epochs
self.model.module.is_warmup_pose=False
# start from low learning rate again
self.init_training()
self.model.module.total_steps = 0
self.model.module.progress = 0.
def train_one_epoch(self, epoch, log, warmup=False):
"""
training loop in a epoch
"""
opts = self.opts
self.model.train()
dataloader = self.trainloader
if not warmup: dataloader.sampler.set_epoch(epoch) # necessary for shuffling
for i, batch in enumerate(dataloader):
if i==200*opts.accu_steps:
break
if opts.debug:
if 'start_time' in locals().keys():
torch.cuda.synchronize()
print('load time:%.2f'%(time.time()-start_time))
if not warmup:
self.model.module.progress = float(self.model.total_steps) /\
self.model.final_steps
self.select_loss_indicator(i)
self.update_root_indicator(i)
self.update_body_indicator(i)
self.update_shape_indicator(i)
self.update_cvf_indicator(i)
# rtk_all = self.model.module.compute_rts()
# self.model.module.rtk_all = rtk_all.clone()
#
# # change near-far plane for all views
# if self.model.module.progress>=opts.nf_reset:
# rtk_all = rtk_all.detach().cpu().numpy()
# valid_rts = self.model.module.latest_vars['idk'].astype(bool)
# self.model.module.latest_vars['rtk'][valid_rts,:3] = rtk_all[valid_rts]
# self.model.module.near_far.data = get_near_far(
# self.model.module.near_far.data,
# self.model.module.latest_vars)
#
# self.optimizer.zero_grad()
total_loss,aux_out = self.model(batch)
total_loss = total_loss/self.accu_steps
if opts.debug:
if 'start_time' in locals().keys():
torch.cuda.synchronize()
print('forward time:%.2f'%(time.time()-start_time))
total_loss.mean().backward()
if opts.debug:
if 'start_time' in locals().keys():
torch.cuda.synchronize()
print('forward back time:%.2f'%(time.time()-start_time))
if (i+1)%self.accu_steps == 0:
self.clip_grad(aux_out)
self.optimizer.step()
self.scheduler.step()
self.optimizer.zero_grad()
if aux_out['nerf_root_rts_g']>1*opts.clip_scale and \
self.model.total_steps>200*self.accu_steps:
latest_path = '%s/params_latest.pth'%(self.save_dir)
self.load_network(latest_path, is_eval=False, rm_prefix=False)
for i,param_group in enumerate(self.optimizer.param_groups):
aux_out['lr_%02d'%i] = param_group['lr']
self.model.module.total_steps += 1
self.model.module.counter_frz_rebone -= 1./self.model.final_steps
aux_out['counter_frz_rebone'] = self.model.module.counter_frz_rebone
if opts.local_rank==0:
self.save_logs(log, aux_out, self.model.module.total_steps,
epoch)
if opts.debug:
if 'start_time' in locals().keys():
torch.cuda.synchronize()
print('total step time:%.2f'%(time.time()-start_time))
torch.cuda.synchronize()
start_time = time.time()
def update_cvf_indicator(self, i):
"""
whether to update canoical volume features
0: update all
1: freeze
"""
opts = self.opts
# during kp reprojection optimization
if (opts.freeze_proj and self.model.module.progress >= opts.proj_start and \
self.model.module.progress < (opts.proj_start+opts.proj_end)):
self.model.module.cvf_update = 1
else:
self.model.module.cvf_update = 0
# freeze shape after rebone
if self.model.module.counter_frz_rebone > 0:
self.model.module.cvf_update = 1
if opts.freeze_cvf:
self.model.module.cvf_update = 1
def update_shape_indicator(self, i):
"""
whether to update shape
0: update all
1: freeze shape
"""
opts = self.opts
# incremental optimization
# or during kp reprojection optimization
if (opts.model_path!='' and \
self.model.module.progress < opts.warmup_steps)\
or (opts.freeze_proj and self.model.module.progress >= opts.proj_start and \
self.model.module.progress <(opts.proj_start + opts.proj_end)):
self.model.module.shape_update = 1
else:
self.model.module.shape_update = 0
# freeze shape after rebone
if self.model.module.counter_frz_rebone > 0:
self.model.module.shape_update = 1
if opts.freeze_shape:
self.model.module.shape_update = 1
def update_root_indicator(self, i):
"""
whether to update root pose
1: update
0: freeze
"""
opts = self.opts
if (opts.freeze_proj and \
opts.root_stab and \
self.model.module.progress >=(opts.frzroot_start) and \
self.model.module.progress <=(opts.proj_start + opts.proj_end+0.01))\
: # to stablize
self.model.module.root_update = 0
else:
self.model.module.root_update = 1
# freeze shape after rebone
if self.model.module.counter_frz_rebone > 0:
self.model.module.root_update = 0
if opts.freeze_root: # to stablize
self.model.module.root_update = 0
def update_body_indicator(self, i):
"""
whether to update root pose
1: update
0: freeze
"""
opts = self.opts
if opts.freeze_proj and \
self.model.module.progress <=opts.frzbody_end:
self.model.module.body_update = 0
else:
self.model.module.body_update = 1
def select_loss_indicator(self, i):
"""
0: flo
1: flo/sil/rgb
"""
opts = self.opts
if not opts.root_opt or \
self.model.module.progress > (opts.warmup_steps):
self.model.module.loss_select = 1
elif i%2 == 0:
self.model.module.loss_select = 0
else:
self.model.module.loss_select = 1
#self.model.module.loss_select=1
def reset_hparams(self, epoch):
"""
reset hyper-parameters based on current geometry / cameras
"""
opts = self.opts
mesh_rest = self.model.latest_vars['mesh_rest']
# reset object bound, for feature matching
if epoch>int(self.num_epochs*(opts.bound_reset)):
if mesh_rest.vertices.shape[0]>100:
self.model.latest_vars['obj_bound'] = 1.2*np.abs(mesh_rest.vertices).max(0)
# reinit bones based on extracted surface
# only reinit for the initialization phase
if opts.lbs and opts.model_path=='' and \
(epoch==int(self.num_epochs*opts.reinit_bone_steps) or\
epoch==0 or\
epoch==int(self.num_epochs*opts.warmup_steps)//2):
reinit_bones(self.model.module, mesh_rest, opts.num_bones)
self.init_training() # add new params to optimizer
if epoch>0:
# freeze weights of root pose in the following 1% iters
self.model.module.counter_frz_rebone = 0.01
#reset error stats
self.model.module.latest_vars['fp_err'] [:]=0
self.model.module.latest_vars['flo_err'] [:]=0
self.model.module.latest_vars['sil_err'] [:]=0
self.model.module.latest_vars['flo_err_hist'][:]=0
# need to add bones back at 2nd opt
if opts.model_path!='':
self.model.module.nerf_models['bones'] = self.model.module.bones
# add nerf-skin when the shape is good
if opts.lbs and opts.nerf_skin and \
epoch==int(self.num_epochs*opts.dskin_steps):
self.model.module.nerf_models['nerf_skin'] = self.model.module.nerf_skin
self.broadcast()
def broadcast(self):
"""
broadcast variables to other models
"""
dist.barrier()
if self.opts.lbs:
dist.broadcast_object_list(
[self.model.module.num_bones,
self.model.module.num_bone_used,],
0)
dist.broadcast(self.model.module.bones,0)
dist.broadcast(self.model.module.nerf_body_rts[1].rgb[0].weight, 0)
dist.broadcast(self.model.module.nerf_body_rts[1].rgb[0].bias, 0)
dist.broadcast(self.model.module.near_far,0)
def clip_grad(self, aux_out):
"""
gradient clipping
"""
is_invalid_grad=False
grad_nerf_coarse=[]
grad_nerf_beta=[]
grad_nerf_feat=[]
grad_nerf_beta_feat=[]
grad_nerf_fine=[]
grad_nerf_unc=[]
grad_nerf_flowbw=[]
grad_nerf_skin=[]
grad_nerf_vis=[]
grad_nerf_root_rts=[]
grad_nerf_body_rts=[]
grad_root_code=[]
grad_pose_code=[]
grad_env_code=[]
grad_vid_code=[]
grad_bones=[]
grad_skin_aux=[]
grad_ks=[]
grad_nerf_dp=[]
grad_csenet=[]
for name,p in self.model.named_parameters():
try:
pgrad_nan = p.grad.isnan()
if pgrad_nan.sum()>0:
print(name)
is_invalid_grad=True
except: pass
if 'nerf_coarse' in name and 'beta' not in name:
grad_nerf_coarse.append(p)
elif 'nerf_coarse' in name and 'beta' in name:
grad_nerf_beta.append(p)
elif 'nerf_feat' in name and 'beta' not in name:
grad_nerf_feat.append(p)
elif 'nerf_feat' in name and 'beta' in name:
grad_nerf_beta_feat.append(p)
elif 'nerf_fine' in name:
grad_nerf_fine.append(p)
elif 'nerf_unc' in name:
grad_nerf_unc.append(p)
elif 'nerf_flowbw' in name or 'nerf_flowfw' in name:
grad_nerf_flowbw.append(p)
elif 'nerf_skin' in name:
grad_nerf_skin.append(p)
elif 'nerf_vis' in name:
grad_nerf_vis.append(p)
elif 'nerf_root_rts' in name:
grad_nerf_root_rts.append(p)
elif 'nerf_body_rts' in name:
grad_nerf_body_rts.append(p)
elif 'root_code' in name:
grad_root_code.append(p)
elif 'pose_code' in name or 'rest_pose_code' in name:
grad_pose_code.append(p)
elif 'env_code' in name:
grad_env_code.append(p)
elif 'vid_code' in name:
grad_vid_code.append(p)
elif 'module.bones' == name:
grad_bones.append(p)
elif 'module.skin_aux' == name:
grad_skin_aux.append(p)
elif 'module.ks_param' == name:
grad_ks.append(p)
elif 'nerf_dp' in name:
grad_nerf_dp.append(p)
elif 'csenet' in name:
grad_csenet.append(p)
else: continue
# freeze root pose when using re-projection loss only
if self.model.module.root_update == 0:
self.zero_grad_list(grad_root_code)
self.zero_grad_list(grad_nerf_root_rts)
if self.model.module.body_update == 0:
self.zero_grad_list(grad_pose_code)
self.zero_grad_list(grad_nerf_body_rts)
if self.opts.freeze_body_mlp:
self.zero_grad_list(grad_nerf_body_rts)
if self.model.module.shape_update == 1:
self.zero_grad_list(grad_nerf_coarse)
self.zero_grad_list(grad_nerf_beta)
self.zero_grad_list(grad_nerf_vis)
#TODO add skinning
self.zero_grad_list(grad_bones)
self.zero_grad_list(grad_nerf_skin)
self.zero_grad_list(grad_skin_aux)
if self.model.module.cvf_update == 1:
self.zero_grad_list(grad_nerf_feat)
self.zero_grad_list(grad_nerf_beta_feat)
self.zero_grad_list(grad_csenet)
if self.opts.freeze_coarse:
# freeze shape
# this include nerf_coarse, nerf_skin (optional)
grad_coarse_mlp = []
grad_coarse_mlp += self.find_nerf_coarse(\
self.model.module.nerf_coarse)
grad_coarse_mlp += self.find_nerf_coarse(\
self.model.module.nerf_skin)
grad_coarse_mlp += self.find_nerf_coarse(\
self.model.module.nerf_feat)
self.zero_grad_list(grad_coarse_mlp)
#self.zero_grad_list(grad_nerf_coarse) # freeze shape
# freeze skinning
self.zero_grad_list(grad_bones)
self.zero_grad_list(grad_skin_aux)
#self.zero_grad_list(grad_nerf_skin) # freeze fine shape
## freeze pose mlp
#self.zero_grad_list(grad_nerf_body_rts)
# add vis
self.zero_grad_list(grad_nerf_vis)
#print(self.model.module.nerf_coarse.xyz_encoding_1[0].weight[0,:])
clip_scale=self.opts.clip_scale
#TODO don't clip root pose
aux_out['nerf_coarse_g'] = clip_grad_norm_(grad_nerf_coarse, 1*clip_scale)
aux_out['nerf_beta_g'] = clip_grad_norm_(grad_nerf_beta, 1*clip_scale)
aux_out['nerf_feat_g'] = clip_grad_norm_(grad_nerf_feat, .1*clip_scale)
aux_out['nerf_beta_feat_g']= clip_grad_norm_(grad_nerf_beta_feat,.1*clip_scale)
aux_out['nerf_fine_g'] = clip_grad_norm_(grad_nerf_fine, .1*clip_scale)
aux_out['nerf_unc_g'] = clip_grad_norm_(grad_nerf_unc, .1*clip_scale)
aux_out['nerf_flowbw_g'] = clip_grad_norm_(grad_nerf_flowbw, .1*clip_scale)
aux_out['nerf_skin_g'] = clip_grad_norm_(grad_nerf_skin, .1*clip_scale)
aux_out['nerf_vis_g'] = clip_grad_norm_(grad_nerf_vis, .1*clip_scale)
aux_out['nerf_root_rts_g'] = clip_grad_norm_(grad_nerf_root_rts,100*clip_scale)
aux_out['nerf_body_rts_g'] = clip_grad_norm_(grad_nerf_body_rts,100*clip_scale)
aux_out['root_code_g']= clip_grad_norm_(grad_root_code, .1*clip_scale)
aux_out['pose_code_g']= clip_grad_norm_(grad_pose_code, 100*clip_scale)
aux_out['env_code_g'] = clip_grad_norm_(grad_env_code, .1*clip_scale)
aux_out['vid_code_g'] = clip_grad_norm_(grad_vid_code, .1*clip_scale)
aux_out['bones_g'] = clip_grad_norm_(grad_bones, 1*clip_scale)
aux_out['skin_aux_g'] = clip_grad_norm_(grad_skin_aux, .1*clip_scale)
aux_out['ks_g'] = clip_grad_norm_(grad_ks, .1*clip_scale)
aux_out['nerf_dp_g'] = clip_grad_norm_(grad_nerf_dp, .1*clip_scale)
aux_out['csenet_g'] = clip_grad_norm_(grad_csenet, .1*clip_scale)
#if aux_out['nerf_root_rts_g']>10:
# is_invalid_grad = True
if is_invalid_grad:
self.zero_grad_list(self.model.parameters())
@staticmethod
def find_nerf_coarse(nerf_model):
"""
zero grad for coarse component connected to inputs,
and return intermediate params
"""
param_list = []
input_layers=[0]+nerf_model.skips
input_wt_names = []
for layer in input_layers:
input_wt_names.append(f"xyz_encoding_{layer+1}.0.weight")
for name,p in nerf_model.named_parameters():
if name in input_wt_names:
# get the weights according to coarse posec
# 63 = 3 + 60
# 60 = (num_freqs, 2, 3)
out_dim = p.shape[0]
pos_dim = nerf_model.in_channels_xyz-nerf_model.in_channels_code
# TODO
num_coarse = 8 # out of 10
#num_coarse = 10 # out of 10
#num_coarse = 1 # out of 10
# p.grad[:,:3] = 0 # xyz
# p.grad[:,3:pos_dim].view(out_dim,-1,6)[:,:num_coarse] = 0 # xyz-coarse
p.grad[:,pos_dim:] = 0 # others
else:
param_list.append(p)
return param_list
@staticmethod
def render_vid(model, batch):
opts=model.opts
model.set_input(batch)
rtk = model.rtk
kaug=model.kaug.clone()
embedid=model.embedid
rendered, _ = model.nerf_render(rtk, kaug, embedid, ndepth=opts.ndepth)
if 'xyz_camera_vis' in rendered.keys(): del rendered['xyz_camera_vis']
if 'xyz_canonical_vis' in rendered.keys(): del rendered['xyz_canonical_vis']
if 'pts_exp_vis' in rendered.keys(): del rendered['pts_exp_vis']
if 'pts_pred_vis' in rendered.keys(): del rendered['pts_pred_vis']
rendered_first = {}
for k,v in rendered.items():
if v.dim()>0:
bs=v.shape[0]
rendered_first[k] = v[:bs//2] # remove loss term
return rendered_first
@staticmethod
def extract_mesh(model,chunk,grid_size,
#threshold = -0.005,
threshold = -0.002,
#threshold = 0.,
embedid=None,
mesh_dict_in=None):
opts = model.opts
mesh_dict = {}
if model.near_far is not None:
bound = model.latest_vars['obj_bound']
else: bound=1.5*np.asarray([1,1,1])
if mesh_dict_in is None:
ptx = np.linspace(-bound[0], bound[0], grid_size).astype(np.float32)
pty = np.linspace(-bound[1], bound[1], grid_size).astype(np.float32)
ptz = np.linspace(-bound[2], bound[2], grid_size).astype(np.float32)
query_yxz = np.stack(np.meshgrid(pty, ptx, ptz), -1) # (y,x,z)
#pts = np.linspace(-bound, bound, grid_size).astype(np.float32)
#query_yxz = np.stack(np.meshgrid(pts, pts, pts), -1) # (y,x,z)
query_yxz = torch.Tensor(query_yxz).to(model.device).view(-1, 3)
query_xyz = torch.cat([query_yxz[:,1:2], query_yxz[:,0:1], query_yxz[:,2:3]],-1)
query_dir = torch.zeros_like(query_xyz)
bs_pts = query_xyz.shape[0]
out_chunks = []
for i in range(0, bs_pts, chunk):
query_xyz_chunk = query_xyz[i:i+chunk]
query_dir_chunk = query_dir[i:i+chunk]
# backward warping
if embedid is not None and not opts.queryfw:
query_xyz_chunk, mesh_dict = warp_bw(opts, model, mesh_dict,
query_xyz_chunk, embedid)
if opts.symm_shape:
#TODO set to x-symmetric
query_xyz_chunk[...,0] = query_xyz_chunk[...,0].abs()
xyz_embedded = model.embedding_xyz(query_xyz_chunk) # (N, embed_xyz_channels)
out_chunks += [model.nerf_coarse(xyz_embedded, sigma_only=True)]
vol_o = torch.cat(out_chunks, 0)
vol_o = vol_o.view(grid_size, grid_size, grid_size)
#vol_o = F.softplus(vol_o)
if not opts.full_mesh:
#TODO set density of non-observable points to small value
if model.latest_vars['idk'].sum()>0:
vis_chunks = []
for i in range(0, bs_pts, chunk):
query_xyz_chunk = query_xyz[i:i+chunk]
if opts.nerf_vis:
# this leave no room for halucination and is not what we want
xyz_embedded = model.embedding_xyz(query_xyz_chunk) # (N, embed_xyz_channels)
vis_chunk_nerf = model.nerf_vis(xyz_embedded)
vis_chunk = vis_chunk_nerf[...,0].sigmoid()
else:
#TODO deprecated!
vis_chunk = compute_point_visibility(query_xyz_chunk.cpu(),
model.latest_vars, model.device)[None]
vis_chunks += [vis_chunk]
vol_visi = torch.cat(vis_chunks, 0)
vol_visi = vol_visi.view(grid_size, grid_size, grid_size)
vol_o[vol_visi<0.5] = -1
## save color of sampled points
#cmap = cm.get_cmap('cool')
##pts_col = cmap(vol_visi.float().view(-1).cpu())
#pts_col = cmap(vol_o.sigmoid().view(-1).cpu())
#mesh = trimesh.Trimesh(query_xyz.view(-1,3).cpu(), vertex_colors=pts_col)
#mesh.export('0.obj')
#pdb.set_trace()
print('fraction occupied:', (vol_o > threshold).float().mean())
vertices, triangles = mcubes.marching_cubes(vol_o.cpu().numpy(), threshold)
vertices = (vertices - grid_size/2)/grid_size*2*bound[None, :]
mesh = trimesh.Trimesh(vertices, triangles)
# mesh post-processing
if len(mesh.vertices)>0:
if opts.use_cc:
# keep the largest mesh
mesh = [i for i in mesh.split(only_watertight=False)]
mesh = sorted(mesh, key=lambda x:x.vertices.shape[0])
mesh = mesh[-1]
# assign color based on canonical location
vis = mesh.vertices
try:
model.module.vis_min = vis.min(0)[None]
model.module.vis_len = vis.max(0)[None] - vis.min(0)[None]
except: # test time
model.vis_min = vis.min(0)[None]
model.vis_len = vis.max(0)[None] - vis.min(0)[None]
vis = vis - model.vis_min
vis = vis / model.vis_len
if not opts.ce_color:
vis = get_vertex_colors(model, mesh, frame_idx=0)
mesh.visual.vertex_colors[:,:3] = vis*255
# forward warping
if embedid is not None and opts.queryfw:
mesh = mesh_dict_in['mesh'].copy()
vertices = mesh.vertices
vertices, mesh_dict = warp_fw(opts, model, mesh_dict,
vertices, embedid)
mesh.vertices = vertices
mesh_dict['mesh'] = mesh
return mesh_dict
def save_logs(self, log, aux_output, total_steps, epoch):
for k,v in aux_output.items():
self.add_scalar(log, k, aux_output,total_steps)
def add_image_grid(self, rendered_seq, log, epoch):
for k,v in rendered_seq.items():
grid_img = image_grid(rendered_seq[k],3,3)
if k=='depth_rnd':scale=True
elif k=='occ':scale=True
elif k=='unc_pred':scale=True
elif k=='proj_err':scale=True
elif k=='feat_err':scale=True
else: scale=False
self.add_image(log, k, grid_img, epoch, scale=scale)
def add_image(self, log,tag,timg,step,scale=True):
"""
timg, h,w,x
"""
if self.isflow(tag):
timg = timg.detach().cpu().numpy()
timg = flow_to_image(timg)
elif scale:
timg = (timg-timg.min())/(timg.max()-timg.min())
else:
timg = torch.clamp(timg, 0,1)
if len(timg.shape)==2:
formats='HW'
elif timg.shape[0]==3:
formats='CHW'
print('error'); pdb.set_trace()
else:
formats='HWC'
log.add_image(tag,timg,step,dataformats=formats)
@staticmethod
def add_scalar(log,tag,data,step):
if tag in data.keys():
log.add_scalar(tag, data[tag], step)
@staticmethod
def del_key(states, key):
if key in states.keys():
del states[key]
@staticmethod
def isflow(tag):
flolist = ['flo_coarse', 'fdp_coarse', 'flo', 'fdp', 'flo_at_samp']
if tag in flolist:
return True
else:
return False
@staticmethod
def zero_grad_list(paramlist):
"""
Clears the gradients of all optimized :class:`torch.Tensor`
"""
for p in paramlist:
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
|
banmo-main
|
nnutils/train_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# adopted from nerf-pl
import numpy as np
import pdb
import torch
import torch.nn.functional as F
from pytorch3d import transforms
from nnutils.geom_utils import lbs, Kmatinv, mat2K, pinhole_cam, obj_to_cam,\
vec_to_sim3, rtmat_invert, rot_angle, mlp_skinning,\
bone_transform, skinning, vrender_flo, \
gauss_mlp_skinning, diff_flo
from nnutils.loss_utils import elastic_loss, visibility_loss, feat_match_loss,\
kp_reproj_loss, compute_pts_exp, kp_reproj, evaluate_mlp
def render_rays(models,
embeddings,
rays,
N_samples=64,
use_disp=False,
perturb=0,
noise_std=1,
chunk=1024*32,
obj_bound=None,
use_fine=False,
img_size=None,
progress=None,
opts=None,
render_vis=False,
):
"""
Render rays by computing the output of @model applied on @rays
Inputs:
models: list of NeRF models (coarse and fine) defined in nerf.py
embeddings: list of embedding models of origin and direction defined in nerf.py
rays: (N_rays, 3+3+2), ray origins, directions and near, far depth bounds
N_samples: number of coarse samples per ray
use_disp: whether to sample in disparity space (inverse depth)
perturb: factor to perturb the sampling position on the ray (for coarse model only)
noise_std: factor to perturb the model's prediction of sigma
chunk: the chunk size in batched inference
Outputs:
result: dictionary containing final rgb and depth maps for coarse and fine models
"""
if use_fine: N_samples = N_samples//2 # use half samples to importance sample
# Extract models from lists
embedding_xyz = embeddings['xyz']
embedding_dir = embeddings['dir']
# Decompose the inputs
rays_o = rays['rays_o']
rays_d = rays['rays_d'] # both (N_rays, 3)
near = rays['near']
far = rays['far'] # both (N_rays, 1)
N_rays = rays_d.shape[0]
# Embed direction
rays_d_norm = rays_d / rays_d.norm(2,-1)[:,None]
dir_embedded = embedding_dir(rays_d_norm) # (N_rays, embed_dir_channels)
# Sample depth points
z_steps = torch.linspace(0, 1, N_samples, device=rays_d.device) # (N_samples)
if not use_disp: # use linear sampling in depth space
z_vals = near * (1-z_steps) + far * z_steps
else: # use linear sampling in disparity space
z_vals = 1/(1/near * (1-z_steps) + 1/far * z_steps)
z_vals = z_vals.expand(N_rays, N_samples)
if perturb > 0: # perturb sampling depths (z_vals)
z_vals_mid = 0.5 * (z_vals[: ,:-1] + z_vals[: ,1:]) # (N_rays, N_samples-1) interval mid points
# get intervals between samples
upper = torch.cat([z_vals_mid, z_vals[: ,-1:]], -1)
lower = torch.cat([z_vals[: ,:1], z_vals_mid], -1)
perturb_rand = perturb * torch.rand(z_vals.shape, device=rays_d.device)
z_vals = lower + (upper - lower) * perturb_rand
# zvals are not optimized
# produce points in the root body space
xyz_sampled = rays_o.unsqueeze(1) + \
rays_d.unsqueeze(1) * z_vals.unsqueeze(2) # (N_rays, N_samples, 3)
if use_fine: # sample points for fine model
# output:
# loss: 'img_coarse', 'sil_coarse', 'feat_err', 'proj_err'
# 'vis_loss', 'flo/fdp_coarse', 'flo/fdp_valid',
# not loss: 'depth_rnd', 'pts_pred', 'pts_exp'
with torch.no_grad():
_, weights_coarse = inference_deform(xyz_sampled, rays, models,
chunk, N_samples,
N_rays, embedding_xyz, rays_d, noise_std,
obj_bound, dir_embedded, z_vals,
img_size, progress,opts,fine_iter=False)
# reset N_importance
N_importance = N_samples
z_vals_mid = 0.5 * (z_vals[: ,:-1] + z_vals[: ,1:])
z_vals_ = sample_pdf(z_vals_mid, weights_coarse[:, 1:-1],
N_importance, det=(perturb==0)).detach()
# detach so that grad doesn't propogate to weights_coarse from here
z_vals, _ = torch.sort(torch.cat([z_vals, z_vals_], -1), -1)
xyz_sampled = rays_o.unsqueeze(1) + \
rays_d.unsqueeze(1) * z_vals.unsqueeze(2)
N_samples = N_samples + N_importance # get back to original # of samples
result, _ = inference_deform(xyz_sampled, rays, models,
chunk, N_samples,
N_rays, embedding_xyz, rays_d, noise_std,
obj_bound, dir_embedded, z_vals,
img_size, progress,opts,render_vis=render_vis)
return result
def inference(models, embedding_xyz, xyz_, dir_, dir_embedded, z_vals,
N_rays, N_samples,chunk, noise_std,
env_code=None, weights_only=False, clip_bound = None, vis_pred=None):
"""
Helper function that performs model inference.
Inputs:
model: NeRF model (coarse or fine)
embedding_xyz: embedding module for xyz
xyz_: (N_rays, N_samples_, 3) sampled positions
N_samples_ is the number of sampled points in each ray;
= N_samples for coarse model
= N_samples+N_importance for fine model
dir_: (N_rays, 3) ray directions
dir_embedded: (N_rays, embed_dir_channels) embedded directions
z_vals: (N_rays, N_samples_) depths of the sampled positions
weights_only: do inference on sigma only or not
Outputs:
rgb_final: (N_rays, 3) the final rgb image
depth_final: (N_rays) depth map
weights: (N_rays, N_samples_): weights of each sample
"""
nerf_sdf = models['coarse']
N_samples_ = xyz_.shape[1]
# Embed directions
xyz_ = xyz_.view(-1, 3) # (N_rays*N_samples_, 3)
if not weights_only:
dir_embedded = torch.repeat_interleave(dir_embedded, repeats=N_samples_, dim=0)
# (N_rays*N_samples_, embed_dir_channels)
# Perform model inference to get rgb and raw sigma
chunk_size=4096
B = xyz_.shape[0]
xyz_input = xyz_.view(N_rays,N_samples,3)
out = evaluate_mlp(nerf_sdf, xyz_input,
embed_xyz = embedding_xyz,
dir_embedded = dir_embedded.view(N_rays,N_samples,-1),
code=env_code,
chunk=chunk_size, sigma_only=weights_only).view(B,-1)
rgbsigma = out.view(N_rays, N_samples_, 4)
rgbs = rgbsigma[..., :3] # (N_rays, N_samples_, 3)
sigmas = rgbsigma[..., 3] # (N_rays, N_samples_)
if 'nerf_feat' in models.keys():
nerf_feat = models['nerf_feat']
feat = evaluate_mlp(nerf_feat, xyz_input,
embed_xyz = embedding_xyz,
chunk=chunk_size).view(N_rays,N_samples_,-1)
else:
feat = torch.zeros_like(rgbs)
# Convert these values using volume rendering (Section 4)
deltas = z_vals[:, 1:] - z_vals[:, :-1] # (N_rays, N_samples_-1)
# a hacky way to ensures prob. sum up to 1
# while the prob. of last bin does not correspond with the values
delta_inf = 1e10 * torch.ones_like(deltas[:, :1]) # (N_rays, 1) the last delta is infinity
deltas = torch.cat([deltas, delta_inf], -1) # (N_rays, N_samples_)
# Multiply each distance by the norm of its corresponding direction ray
# to convert to real world distance (accounts for non-unit directions).
deltas = deltas * torch.norm(dir_.unsqueeze(1), dim=-1)
noise = torch.randn(sigmas.shape, device=sigmas.device) * noise_std
# compute alpha by the formula (3)
sigmas = sigmas+noise
#sigmas = F.softplus(sigmas)
#sigmas = torch.relu(sigmas)
ibetas = 1/(nerf_sdf.beta.abs()+1e-9)
#ibetas = 100
sdf = -sigmas
sigmas = (0.5 + 0.5 * sdf.sign() * torch.expm1(-sdf.abs() * ibetas)) # 0-1
# alternative:
#sigmas = F.sigmoid(-sdf*ibetas)
sigmas = sigmas * ibetas
alphas = 1-torch.exp(-deltas*sigmas) # (N_rays, N_samples_), p_i
#set out-of-bound and nonvisible alphas to zero
if clip_bound is not None:
clip_bound = torch.Tensor(clip_bound).to(xyz_.device)[None,None]
oob = (xyz_.abs()>clip_bound).sum(-1).view(N_rays,N_samples)>0
alphas[oob]=0
if vis_pred is not None:
alphas[vis_pred<0.5] = 0
alphas_shifted = \
torch.cat([torch.ones_like(alphas[:, :1]), 1-alphas+1e-10], -1) # [1, a1, a2, ...]
alpha_prod = torch.cumprod(alphas_shifted, -1)[:, :-1]
weights = alphas * alpha_prod # (N_rays, N_samples_)
weights_sum = weights.sum(1) # (N_rays), the accumulated opacity along the rays
# equals "1 - (1-a1)(1-a2)...(1-an)" mathematically
visibility = alpha_prod.detach() # 1 q_0 q_j-1
# compute final weighted outputs
rgb_final = torch.sum(weights.unsqueeze(-1)*rgbs, -2) # (N_rays, 3)
feat_final = torch.sum(weights.unsqueeze(-1)*feat, -2) # (N_rays, 3)
depth_final = torch.sum(weights*z_vals, -1) # (N_rays)
return rgb_final, feat_final, depth_final, weights, visibility
def inference_deform(xyz_coarse_sampled, rays, models, chunk, N_samples,
N_rays, embedding_xyz, rays_d, noise_std,
obj_bound, dir_embedded, z_vals,
img_size, progress,opts, fine_iter=True,
render_vis=False):
"""
fine_iter: whether to render loss-related terms
render_vis: used for novel view synthesis
"""
is_training = models['coarse'].training
xys = rays['xys']
# root space point correspondence in t2
if opts.dist_corresp:
xyz_coarse_target = xyz_coarse_sampled.clone()
xyz_coarse_dentrg = xyz_coarse_sampled.clone()
xyz_coarse_frame = xyz_coarse_sampled.clone()
# free deform
if 'flowbw' in models.keys():
model_flowbw = models['flowbw']
model_flowfw = models['flowfw']
time_embedded = rays['time_embedded'][:,None]
xyz_coarse_embedded = embedding_xyz(xyz_coarse_sampled)
flow_bw = evaluate_mlp(model_flowbw, xyz_coarse_embedded,
chunk=chunk//N_samples, xyz=xyz_coarse_sampled, code=time_embedded)
xyz_coarse_sampled=xyz_coarse_sampled + flow_bw
if fine_iter:
# cycle loss (in the joint canonical space)
xyz_coarse_embedded = embedding_xyz(xyz_coarse_sampled)
flow_fw = evaluate_mlp(model_flowfw, xyz_coarse_embedded,
chunk=chunk//N_samples, xyz=xyz_coarse_sampled,code=time_embedded)
frame_cyc_dis = (flow_bw+flow_fw).norm(2,-1)
# rigidity loss
frame_disp3d = flow_fw.norm(2,-1)
if "time_embedded_target" in rays.keys():
time_embedded_target = rays['time_embedded_target'][:,None]
flow_fw = evaluate_mlp(model_flowfw, xyz_coarse_embedded,
chunk=chunk//N_samples, xyz=xyz_coarse_sampled,code=time_embedded_target)
xyz_coarse_target=xyz_coarse_sampled + flow_fw
if "time_embedded_dentrg" in rays.keys():
time_embedded_dentrg = rays['time_embedded_dentrg'][:,None]
flow_fw = evaluate_mlp(model_flowfw, xyz_coarse_embedded,
chunk=chunk//N_samples, xyz=xyz_coarse_sampled,code=time_embedded_dentrg)
xyz_coarse_dentrg=xyz_coarse_sampled + flow_fw
elif 'bones' in models.keys():
bones_rst = models['bones_rst']
bone_rts_fw = rays['bone_rts']
skin_aux = models['skin_aux']
rest_pose_code = models['rest_pose_code']
rest_pose_code = rest_pose_code(torch.Tensor([0]).long().to(bones_rst.device))
if 'nerf_skin' in models.keys():
# compute delta skinning weights of bs, N, B
nerf_skin = models['nerf_skin']
else:
nerf_skin = None
time_embedded = rays['time_embedded'][:,None]
# coords after deform
bones_dfm = bone_transform(bones_rst, bone_rts_fw, is_vec=True)
skin_backward = gauss_mlp_skinning(xyz_coarse_sampled, embedding_xyz,
bones_dfm, time_embedded, nerf_skin, skin_aux=skin_aux)
# backward skinning
xyz_coarse_sampled, bones_dfm = lbs(bones_rst,
bone_rts_fw,
skin_backward,
xyz_coarse_sampled,
)
if fine_iter:
#if opts.dist_corresp:
skin_forward = gauss_mlp_skinning(xyz_coarse_sampled, embedding_xyz,
bones_rst,rest_pose_code, nerf_skin, skin_aux=skin_aux)
# cycle loss (in the joint canonical space)
xyz_coarse_frame_cyc,_ = lbs(bones_rst, bone_rts_fw,
skin_forward, xyz_coarse_sampled, backward=False)
frame_cyc_dis = (xyz_coarse_frame - xyz_coarse_frame_cyc).norm(2,-1)
# rigidity loss (not used as optimization objective)
num_bone = bones_rst.shape[0]
bone_fw_reshape = bone_rts_fw.view(-1,num_bone,12)
bone_trn = bone_fw_reshape[:,:,9:12]
bone_rot = bone_fw_reshape[:,:,0:9].view(-1,num_bone,3,3)
frame_rigloss = bone_trn.pow(2).sum(-1)+rot_angle(bone_rot)
if opts.dist_corresp and 'bone_rts_target' in rays.keys():
bone_rts_target = rays['bone_rts_target']
xyz_coarse_target,_ = lbs(bones_rst, bone_rts_target,
skin_forward, xyz_coarse_sampled,backward=False)
if opts.dist_corresp and 'bone_rts_dentrg' in rays.keys():
bone_rts_dentrg = rays['bone_rts_dentrg']
xyz_coarse_dentrg,_ = lbs(bones_rst, bone_rts_dentrg,
skin_forward, xyz_coarse_sampled,backward=False)
# nerf shape/rgb
model_coarse = models['coarse']
if 'env_code' in rays.keys():
env_code = rays['env_code']
else:
env_code = None
# set out of bounds weights to zero
if render_vis:
clip_bound = obj_bound
xyz_embedded = embedding_xyz(xyz_coarse_sampled)
vis_pred = evaluate_mlp(models['nerf_vis'],
xyz_embedded, chunk=chunk)[...,0].sigmoid()
else:
clip_bound = None
vis_pred = None
if opts.symm_shape:
##TODO set to x-symmetric here
symm_ratio = 0.5
xyz_x = xyz_coarse_sampled[...,:1].clone()
symm_mask = torch.rand_like(xyz_x) < symm_ratio
xyz_x[symm_mask] = -xyz_x[symm_mask]
xyz_input = torch.cat([xyz_x, xyz_coarse_sampled[...,1:3]],-1)
else:
xyz_input = xyz_coarse_sampled
rgb_coarse, feat_rnd, depth_rnd, weights_coarse, vis_coarse = \
inference(models, embedding_xyz, xyz_input, rays_d,
dir_embedded, z_vals, N_rays, N_samples, chunk, noise_std,
weights_only=False, env_code=env_code,
clip_bound=clip_bound, vis_pred=vis_pred)
sil_coarse = weights_coarse[:,:-1].sum(1)
result = {'img_coarse': rgb_coarse,
'depth_rnd': depth_rnd,
'sil_coarse': sil_coarse,
}
# render visibility scores
if render_vis:
result['vis_pred'] = (vis_pred * weights_coarse).sum(-1)
if fine_iter:
if opts.use_corresp:
# for flow rendering
pts_exp = compute_pts_exp(weights_coarse, xyz_coarse_sampled)
pts_target = kp_reproj(pts_exp, models, embedding_xyz, rays,
to_target=True) # N,1,2
# viser feature matching
if 'feats_at_samp' in rays.keys():
feats_at_samp = rays['feats_at_samp']
nerf_feat = models['nerf_feat']
xyz_coarse_sampled_feat = xyz_coarse_sampled
weights_coarse_feat = weights_coarse
pts_pred, pts_exp, feat_err = feat_match_loss(nerf_feat, embedding_xyz,
feats_at_samp, xyz_coarse_sampled_feat, weights_coarse_feat,
obj_bound, is_training=is_training)
# 3d-2d projection
proj_err = kp_reproj_loss(pts_pred, xys, models,
embedding_xyz, rays)
proj_err = proj_err/img_size * 2
result['pts_pred'] = pts_pred
result['pts_exp'] = pts_exp
result['feat_err'] = feat_err # will be used as loss
result['proj_err'] = proj_err # will be used as loss
if opts.dist_corresp and 'rtk_vec_target' in rays.keys():
# compute correspondence: root space to target view space
# RT: root space to camera space
rtk_vec_target = rays['rtk_vec_target']
Rmat = rtk_vec_target[:,0:9].view(N_rays,1,3,3)
Tmat = rtk_vec_target[:,9:12].view(N_rays,1,3)
Kinv = rtk_vec_target[:,12:21].view(N_rays,1,3,3)
K = mat2K(Kmatinv(Kinv))
xyz_coarse_target = obj_to_cam(xyz_coarse_target, Rmat, Tmat)
xyz_coarse_target = pinhole_cam(xyz_coarse_target,K)
if opts.dist_corresp and 'rtk_vec_dentrg' in rays.keys():
# compute correspondence: root space to dentrg view space
# RT: root space to camera space
rtk_vec_dentrg = rays['rtk_vec_dentrg']
Rmat = rtk_vec_dentrg[:,0:9].view(N_rays,1,3,3)
Tmat = rtk_vec_dentrg[:,9:12].view(N_rays,1,3)
Kinv = rtk_vec_dentrg[:,12:21].view(N_rays,1,3,3)
K = mat2K(Kmatinv(Kinv))
xyz_coarse_dentrg = obj_to_cam(xyz_coarse_dentrg, Rmat, Tmat)
xyz_coarse_dentrg = pinhole_cam(xyz_coarse_dentrg,K)
# raw 3d points for visualization
result['xyz_camera_vis'] = xyz_coarse_frame
if 'flowbw' in models.keys() or 'bones' in models.keys():
result['xyz_canonical_vis'] = xyz_coarse_sampled
if 'feats_at_samp' in rays.keys():
result['pts_exp_vis'] = pts_exp
result['pts_pred_vis'] = pts_pred
if 'flowbw' in models.keys() or 'bones' in models.keys():
# cycle loss (in the joint canonical space)
#if opts.dist_corresp:
result['frame_cyc_dis'] = (frame_cyc_dis * weights_coarse.detach()).sum(-1)
#else:
# pts_exp_reg = pts_exp[:,None].detach()
# skin_forward = gauss_mlp_skinning(pts_exp_reg, embedding_xyz,
# bones_rst,rest_pose_code, nerf_skin, skin_aux=skin_aux)
# pts_exp_fw,_ = lbs(bones_rst, bone_rts_fw,
# skin_forward, pts_exp_reg, backward=False)
# skin_backward = gauss_mlp_skinning(pts_exp_fw, embedding_xyz,
# bones_dfm, time_embedded, nerf_skin, skin_aux=skin_aux)
# pts_exp_fwbw,_ = lbs(bones_rst, bone_rts_fw,
# skin_backward,pts_exp_fw)
# frame_cyc_dis = (pts_exp_fwbw - pts_exp_reg).norm(2,-1)
# result['frame_cyc_dis'] = sil_coarse.detach() * frame_cyc_dis[...,-1]
if 'flowbw' in models.keys():
result['frame_rigloss'] = (frame_disp3d * weights_coarse.detach()).sum(-1)
# only evaluate at with_grad mode
if xyz_coarse_frame.requires_grad:
# elastic energy
result['elastic_loss'] = elastic_loss(model_flowbw, embedding_xyz,
xyz_coarse_frame, time_embedded)
else:
result['frame_rigloss'] = (frame_rigloss).mean(-1)
### script to plot sigmas/weights
#from matplotlib import pyplot as plt
#plt.ioff()
#sil_rays = weights_coarse[rays['sil_at_samp'][:,0]>0]
#plt.plot(sil_rays[::1000].T.cpu().numpy(),'*-')
#plt.savefig('tmp/probs.png')
#plt.cla()
if is_training and 'nerf_vis' in models.keys():
result['vis_loss'] = visibility_loss(models['nerf_vis'], embedding_xyz,
xyz_coarse_sampled, vis_coarse, obj_bound, chunk)
# render flow
if 'rtk_vec_target' in rays.keys():
if opts.dist_corresp:
flo_coarse, flo_valid = vrender_flo(weights_coarse, xyz_coarse_target,
xys, img_size)
else:
flo_coarse = diff_flo(pts_target, xys, img_size)
flo_valid = torch.ones_like(flo_coarse[...,:1])
result['flo_coarse'] = flo_coarse
result['flo_valid'] = flo_valid
if 'rtk_vec_dentrg' in rays.keys():
if opts.dist_corresp:
fdp_coarse, fdp_valid = vrender_flo(weights_coarse,
xyz_coarse_dentrg, xys, img_size)
else:
fdp_coarse = diff_flo(pts_dentrg, xys, img_size)
fdp_valid = torch.ones_like(fdp_coarse[...,:1])
result['fdp_coarse'] = fdp_coarse
result['fdp_valid'] = fdp_valid
if 'nerf_unc' in models.keys():
# xys: bs,nsample,2
# t: bs
nerf_unc = models['nerf_unc']
ts = rays['ts']
vid_code = rays['vid_code']
# change according to K
xysn = rays['xysn']
xyt = torch.cat([xysn, ts],-1)
xyt_embedded = embedding_xyz(xyt)
xyt_code = torch.cat([xyt_embedded, vid_code],-1)
unc_pred = nerf_unc(xyt_code)
#TODO add activation function
#unc_pred = F.softplus(unc_pred)
result['unc_pred'] = unc_pred
if 'img_at_samp' in rays.keys():
# compute other losses
img_at_samp = rays['img_at_samp']
sil_at_samp = rays['sil_at_samp']
vis_at_samp = rays['vis_at_samp']
flo_at_samp = rays['flo_at_samp']
cfd_at_samp = rays['cfd_at_samp']
# img loss
img_loss_samp = (rgb_coarse - img_at_samp).pow(2).mean(-1)[...,None]
# sil loss, weight sil loss based on # points
if is_training and sil_at_samp.sum()>0 and (1-sil_at_samp).sum()>0:
pos_wt = vis_at_samp.sum()/ sil_at_samp[vis_at_samp>0].sum()
neg_wt = vis_at_samp.sum()/(1-sil_at_samp[vis_at_samp>0]).sum()
sil_balance_wt = 0.5*pos_wt*sil_at_samp + 0.5*neg_wt*(1-sil_at_samp)
else: sil_balance_wt = 1
sil_loss_samp = (sil_coarse[...,None] - sil_at_samp).pow(2) * sil_balance_wt
sil_loss_samp = sil_loss_samp * vis_at_samp
# flo loss, confidence weighting: 30x normalized distance - 0.1x pixel error
flo_loss_samp = (flo_coarse - flo_at_samp).pow(2).sum(-1)
# hard-threshold cycle error
sil_at_samp_flo = (sil_at_samp>0)\
& (flo_valid==1)
sil_at_samp_flo[cfd_at_samp==0] = False
if sil_at_samp_flo.sum()>0:
cfd_at_samp = cfd_at_samp / cfd_at_samp[sil_at_samp_flo].mean()
flo_loss_samp = flo_loss_samp[...,None] * cfd_at_samp
result['img_at_samp'] = img_at_samp
result['sil_at_samp'] = sil_at_samp
result['vis_at_samp'] = vis_at_samp
result['sil_at_samp_flo'] = sil_at_samp_flo
result['flo_at_samp'] = flo_at_samp
result['img_loss_samp'] = img_loss_samp
result['sil_loss_samp'] = sil_loss_samp
result['flo_loss_samp'] = flo_loss_samp
# exclude error outside mask
result['img_loss_samp']*=sil_at_samp
result['flo_loss_samp']*=sil_at_samp
if 'feats_at_samp' in rays.keys():
# feat loss
feats_at_samp=rays['feats_at_samp']
feat_rnd = F.normalize(feat_rnd, 2,-1)
frnd_loss_samp = (feat_rnd - feats_at_samp).pow(2).mean(-1)
result['frnd_loss_samp'] = frnd_loss_samp * sil_at_samp[...,0]
return result, weights_coarse
def sample_pdf(bins, weights, N_importance, det=False, eps=1e-5):
"""
Sample @N_importance samples from @bins with distribution defined by @weights.
Inputs:
bins: (N_rays, N_samples_+1) where N_samples_ is "the number of coarse samples per ray - 2"
weights: (N_rays, N_samples_)
N_importance: the number of samples to draw from the distribution
det: deterministic or not
eps: a small number to prevent division by zero
Outputs:
samples: the sampled samples
"""
N_rays, N_samples_ = weights.shape
weights = weights + eps # prevent division by zero (don't do inplace op!)
pdf = weights / torch.sum(weights, -1, keepdim=True) # (N_rays, N_samples_)
cdf = torch.cumsum(pdf, -1) # (N_rays, N_samples), cumulative distribution function
cdf = torch.cat([torch.zeros_like(cdf[: ,:1]), cdf], -1) # (N_rays, N_samples_+1)
# padded to 0~1 inclusive
if det:
u = torch.linspace(0, 1, N_importance, device=bins.device)
u = u.expand(N_rays, N_importance)
else:
u = torch.rand(N_rays, N_importance, device=bins.device)
u = u.contiguous()
inds = torch.searchsorted(cdf, u, right=True)
below = torch.clamp_min(inds-1, 0)
above = torch.clamp_max(inds, N_samples_)
inds_sampled = torch.stack([below, above], -1).view(N_rays, 2*N_importance)
cdf_g = torch.gather(cdf, 1, inds_sampled).view(N_rays, N_importance, 2)
bins_g = torch.gather(bins, 1, inds_sampled).view(N_rays, N_importance, 2)
denom = cdf_g[...,1]-cdf_g[...,0]
denom[denom<eps] = 1 # denom equals 0 means a bin has weight 0, in which case it will not be sampled
# anyway, therefore any value for it is fine (set to 1 here)
samples = bins_g[...,0] + (u-cdf_g[...,0])/denom * (bins_g[...,1]-bins_g[...,0])
return samples
|
banmo-main
|
nnutils/rendering.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import numpy as np
import pdb
import torch
from torch import nn
import torch.nn.functional as F
import torchvision
from pytorch3d import transforms
import trimesh
from nnutils.geom_utils import fid_reindex
class Embedding(nn.Module):
def __init__(self, in_channels, N_freqs, logscale=True, alpha=None):
"""
adapted from https://github.com/kwea123/nerf_pl/blob/master/models/nerf.py
Defines a function that embeds x to (x, sin(2^k x), cos(2^k x), ...)
in_channels: number of input channels (3 for both xyz and direction)
"""
super(Embedding, self).__init__()
self.N_freqs = N_freqs
self.in_channels = in_channels
self.funcs = [torch.sin, torch.cos]
self.nfuncs = len(self.funcs)
self.out_channels = in_channels*(len(self.funcs)*N_freqs+1)
if alpha is None:
self.alpha = self.N_freqs
else: self.alpha = alpha
if logscale:
self.freq_bands = 2**torch.linspace(0, N_freqs-1, N_freqs)
else:
self.freq_bands = torch.linspace(1, 2**(N_freqs-1), N_freqs)
def forward(self, x):
"""
Embeds x to (x, sin(2^k x), cos(2^k x), ...)
Different from the paper, "x" is also in the output
See https://github.com/bmild/nerf/issues/12
Inputs:
x: (B, self.in_channels)
Outputs:
out: (B, self.out_channels)
"""
# consine features
if self.N_freqs>0:
shape = x.shape
bs = shape[0]
input_dim = shape[-1]
output_dim = input_dim*(1+self.N_freqs*self.nfuncs)
out_shape = shape[:-1] + ((output_dim),)
device = x.device
x = x.view(-1,input_dim)
out = []
for freq in self.freq_bands:
for func in self.funcs:
out += [func(freq*x)]
out = torch.cat(out, -1)
## Apply the window w = 0.5*( 1+cos(pi + pi clip(alpha-j)) )
out = out.view(-1, self.N_freqs, self.nfuncs, input_dim)
window = self.alpha - torch.arange(self.N_freqs).to(device)
window = torch.clamp(window, 0.0, 1.0)
window = 0.5 * (1 + torch.cos(np.pi * window + np.pi))
window = window.view(1,-1, 1, 1)
out = window * out
out = out.view(-1,self.N_freqs*self.nfuncs*input_dim)
out = torch.cat([x, out],-1)
out = out.view(out_shape)
else: out = x
return out
class NeRF(nn.Module):
def __init__(self,
D=8, W=256,
in_channels_xyz=63, in_channels_dir=27,
out_channels=3,
skips=[4], raw_feat=False, init_beta=1./100,
activation=nn.ReLU(True), in_channels_code=0):
"""
adapted from https://github.com/kwea123/nerf_pl/blob/master/models/nerf.py
D: number of layers for density (sigma) encoder
W: number of hidden units in each layer
in_channels_xyz: number of input channels for xyz (3+3*10*2=63 by default)
in_channels_dir: number of input channels for direction (3+3*4*2=27 by default)
skips: add skip connection in the Dth layer
in_channels_code: only used for nerf_skin,
"""
super(NeRF, self).__init__()
self.D = D
self.W = W
self.in_channels_xyz = in_channels_xyz
self.in_channels_dir = in_channels_dir
self.in_channels_code = in_channels_code
self.skips = skips
self.use_xyz = False
# xyz encoding layers
self.weights_reg = []
for i in range(D):
if i == 0:
layer = nn.Linear(in_channels_xyz, W)
self.weights_reg.append(f"xyz_encoding_{i+1}")
elif i in skips:
layer = nn.Linear(W+in_channels_xyz, W)
self.weights_reg.append(f"xyz_encoding_{i+1}")
else:
layer = nn.Linear(W, W)
layer = nn.Sequential(layer, activation)
setattr(self, f"xyz_encoding_{i+1}", layer)
self.xyz_encoding_final = nn.Linear(W, W)
# direction encoding layers
self.dir_encoding = nn.Sequential(
nn.Linear(W+in_channels_dir, W//2),
activation)
# output layers
self.sigma = nn.Linear(W, 1)
self.rgb = nn.Sequential(
nn.Linear(W//2, out_channels),
)
self.raw_feat = raw_feat
self.beta = torch.Tensor([init_beta]) # logbeta
self.beta = nn.Parameter(self.beta)
# for m in self.modules():
# if isinstance(m, nn.Linear):
# if hasattr(m.weight,'data'):
# nn.init.xavier_uniform_(m.weight)
def forward(self, x ,xyz=None, sigma_only=False):
"""
Encodes input (xyz+dir) to rgb+sigma (not ready to render yet).
For rendering this ray, please see rendering.py
Inputs:
x: (B, self.in_channels_xyz(+self.in_channels_dir))
the embedded vector of position and direction
sigma_only: whether to infer sigma only. If True,
x is of shape (B, self.in_channels_xyz)
raw_feat: does not apply sigmoid
Outputs:
if sigma_ony:
sigma: (B, 1) sigma
else:
out: (B, 4), rgb and sigma
"""
if not sigma_only:
input_xyz, input_dir = \
torch.split(x, [self.in_channels_xyz, self.in_channels_dir], dim=-1)
else:
input_xyz, input_dir = \
torch.split(x, [self.in_channels_xyz, 0], dim=-1)
xyz_ = input_xyz
for i in range(self.D):
if i in self.skips:
xyz_ = torch.cat([input_xyz, xyz_], -1)
xyz_ = getattr(self, f"xyz_encoding_{i+1}")(xyz_)
sigma = self.sigma(xyz_)
if sigma_only:
return sigma
xyz_encoding_final = self.xyz_encoding_final(xyz_)
dir_encoding_input = torch.cat([xyz_encoding_final, input_dir], -1)
dir_encoding = self.dir_encoding(dir_encoding_input)
rgb = self.rgb(dir_encoding)
if self.raw_feat:
out = rgb
else:
rgb = rgb.sigmoid()
out = torch.cat([rgb, sigma], -1)
return out
class Transhead(NeRF):
"""
translation head
"""
def __init__(self, **kwargs):
super(Transhead, self).__init__(**kwargs)
def forward(self, x, xyz=None,sigma_only=False):
flow = super(Transhead, self).forward(x, sigma_only=sigma_only)
flow = flow*0.1
return flow
class SE3head(NeRF):
"""
modify the output to be rigid transforms per point
modified from Nerfies
"""
def __init__(self, **kwargs):
super(SE3head, self).__init__(**kwargs)
self.use_xyz=True
def forward(self, x, xyz=None,sigma_only=False):
x = super(SE3head, self).forward(x, sigma_only=sigma_only)
x = x.view(-1,9)
rotation, pivot, translation = x.split([3,3,3],-1)
pivot = pivot*0.1
translation = translation*0.1
shape = xyz.shape
warped_points = xyz.view(-1,3).clone()
warped_points = warped_points + pivot
rotmat = transforms.so3_exponential_map(rotation)
warped_points = rotmat.matmul(warped_points[...,None])[...,0]
warped_points = warped_points - pivot
warped_points = warped_points + translation
flow = warped_points.view(shape) - xyz
return flow
class RTHead(NeRF):
"""
modify the output to be rigid transforms
"""
def __init__(self, use_quat, **kwargs):
super(RTHead, self).__init__(**kwargs)
# use quaternion when estimating full rotation
# use exponential map when estimating delta rotation
self.use_quat=use_quat
if self.use_quat: self.num_output=7
else: self.num_output=6
for m in self.modules():
if isinstance(m, nn.Linear):
if hasattr(m.bias,'data'):
m.bias.data.zero_()
def forward(self, x):
# output: NxBx(9 rotation + 3 translation)
x = super(RTHead, self).forward(x)
bs = x.shape[0]
rts = x.view(-1,self.num_output) # bs B,x
B = rts.shape[0]//bs
tmat= rts[:,0:3] *0.1
if self.use_quat:
rquat=rts[:,3:7]
rquat=F.normalize(rquat,2,-1)
rmat=transforms.quaternion_to_matrix(rquat)
else:
rot=rts[:,3:6]
rmat = transforms.so3_exponential_map(rot)
rmat = rmat.view(-1,9)
rts = torch.cat([rmat,tmat],-1)
rts = rts.view(bs,1,-1)
return rts
class FrameCode(nn.Module):
"""
frame index and video index to code
"""
def __init__(self, num_freq, embedding_dim, vid_offset, scale=1):
super(FrameCode, self).__init__()
self.vid_offset = vid_offset
self.num_vids = len(vid_offset)-1
# compute maximum frequency:64-127 frame=>10
max_ts = (self.vid_offset[1:] - self.vid_offset[:-1]).max()
self.num_freq = 2*int(np.log2(max_ts))-2
# self.num_freq = num_freq
self.fourier_embed = Embedding(1,num_freq,alpha=num_freq)
self.basis_mlp = nn.Linear(self.num_vids*self.fourier_embed.out_channels,
embedding_dim)
self.scale = scale # input scale factor
def forward(self, fid):
"""
fid->code: N->N,embedding_dim
"""
bs = fid.shape[0]
vid, tid = fid_reindex(fid, self.num_vids, self.vid_offset)
tid = tid*self.scale
tid = tid.view(bs,1)
vid = vid.view(bs,1)
coeff = self.fourier_embed(tid) # N, n_channels
vid = F.one_hot(vid, num_classes=self.num_vids) # N, 1, num_vids
# pad zeros for each
coeff = coeff[...,None] * vid # N, n_channels, num_vids
coeff = coeff.view(bs, -1)
code = self.basis_mlp(coeff)
return code
class RTExplicit(nn.Module):
"""
index rigid transforms from a dictionary
"""
def __init__(self, max_t, delta=False, rand=True):
super(RTExplicit, self).__init__()
self.max_t = max_t
self.delta = delta
# initialize rotation
trans = torch.zeros(max_t, 3)
if delta:
rot = torch.zeros(max_t, 3)
else:
if rand:
rot = torch.rand(max_t, 4) * 2 - 1
else:
rot = torch.zeros(max_t, 4)
rot[:,0] = 1
se3 = torch.cat([trans, rot],-1)
self.se3 = nn.Parameter(se3)
self.num_output = se3.shape[-1]
def forward(self, x):
# output: NxBx(9 rotation + 3 translation)
bs = x.shape[0]
x = self.se3[x] # bs B,x
rts = x.view(-1,self.num_output)
B = rts.shape[0]//bs
tmat= rts[:,0:3] *0.1
if self.delta:
rot=rts[:,3:6]
rmat = transforms.so3_exponential_map(rot)
else:
rquat=rts[:,3:7]
rquat=F.normalize(rquat,2,-1)
rmat=transforms.quaternion_to_matrix(rquat)
rmat = rmat.view(-1,9)
rts = torch.cat([rmat,tmat],-1)
rts = rts.view(bs,1,-1)
return rts
class RTExpMLP(nn.Module):
"""
index rigid transforms from a dictionary
"""
def __init__(self, max_t, num_freqs, t_embed_dim, data_offset, delta=False):
super(RTExpMLP, self).__init__()
#self.root_code = nn.Embedding(max_t, t_embed_dim)
self.root_code = FrameCode(num_freqs, t_embed_dim, data_offset, scale=0.1)
self.base_rt = RTExplicit(max_t, delta=delta,rand=False)
#self.base_rt = RTHead(use_quat=True,
# D=2, W=64,
# in_channels_xyz=t_embed_dim,in_channels_dir=0,
# out_channels=7, raw_feat=True)
#self.base_rt = nn.Sequential(self.root_code, self.base_rt)
self.mlp_rt = RTHead(use_quat=False,
in_channels_xyz=t_embed_dim,in_channels_dir=0,
out_channels=6, raw_feat=True)
self.delta_rt = nn.Sequential(self.root_code, self.mlp_rt)
def forward(self, x):
# output: NxBx(9 rotation + 3 translation)
base_rts = self.base_rt(x)
delt_rts = self.delta_rt(x)
# magnify gradient by 10x
base_rts = base_rts * 10 - (base_rts*9).detach()
rmat = base_rts[:,0,:9].view(-1,3,3)
tmat = base_rts[:,0,9:12]
delt_rmat = delt_rts[:,0,:9].view(-1,3,3)
delt_tmat = delt_rts[:,0,9:12]
tmat = tmat + rmat.matmul(delt_tmat[...,None])[...,0]
rmat = rmat.matmul(delt_rmat)
rmat = rmat.view(-1,9)
rts = torch.cat([rmat,tmat],-1)
rts = rts.view(-1,1,12)
return rts
class ScoreHead(NeRF):
"""
modify the output to be rigid transforms
"""
def __init__(self, recursion_level, **kwargs):
super(ScoreHead, self).__init__(**kwargs)
grid= generate_healpix_grid(recursion_level=recursion_level)
self.register_buffer('grid', grid)
self.num_scores = self.grid.shape[0]
def forward(self, x):
# output: NxBx(9 rotation + 3 translation)
x = super(ScoreHead, self).forward(x)
bs = x.shape[0]
x = x.view(-1,self.num_scores+3) # bs B,x
# do not use tmat since it is not trained
tmat = x[:,0:3]*0.
scores=x[:,3:]
if self.training:
return scores, self.grid
else:
scores = scores.view(bs,-1,1)
rmat = self.grid[None].repeat(bs,1,1,1)
tmat = tmat[:,None].repeat(1,self.num_scores,1)
rmat = rmat.view(bs,-1,9)
rts = torch.cat([scores,rmat, tmat],-1)
rts = rts.view(bs,self.num_scores,-1)
return rts
class NeRFUnc(NeRF):
"""
nerf uncertainty
"""
def __init__(self, **kwargs):
super(NeRFUnc, self).__init__(**kwargs)
def forward(self, x, xyz=None,sigma_only=False):
unc = super(NeRFUnc, self).forward(x, sigma_only=sigma_only)
return unc
class ResNetConv(nn.Module):
"""
adapted from https://github.com/shubhtuls/factored3d/blob/master/nnutils/net_blocks.py
"""
def __init__(self, in_channels):
super(ResNetConv, self).__init__()
self.resnet = torchvision.models.resnet18(pretrained=True)
if in_channels!=3:
self.resnet.conv1 = nn.Conv2d(in_channels, 64, kernel_size=(7, 7),
stride=(2, 2), padding=(3, 3), bias=False)
self.resnet.fc=None
def forward(self, x):
x = self.resnet.conv1(x)
x = self.resnet.bn1(x)
x = self.resnet.relu(x)
x = self.resnet.maxpool(x)
x = self.resnet.layer1(x)
x = self.resnet.layer2(x)
x = self.resnet.layer3(x)
x = self.resnet.layer4(x)
return x
class Encoder(nn.Module):
"""
adapted from https://github.com/shubhtuls/factored3d/blob/master/nnutils/net_blocks.py
Current:
Resnet with 4 blocks (x32 spatial dim reduction)
Another conv with stride 2 (x64)
This is sent to 2 fc layers with final output nz_feat.
"""
def __init__(self, input_shape, in_channels=3,out_channels=128, batch_norm=True):
super(Encoder, self).__init__()
self.resnet_conv = ResNetConv(in_channels=in_channels)
self.conv1 = conv2d(batch_norm, 512, 128, stride=1, kernel_size=3)
#net_init(self.conv1)
def forward(self, img):
feat = self.resnet_conv.forward(img) # 512,4,4
feat = self.conv1(feat) # 128,4,4
feat = F.max_pool2d(feat, 4, 4)
feat = feat.view(img.size(0), -1)
return feat
## 2D convolution layers
def conv2d(batch_norm, in_planes, out_planes, kernel_size=3, stride=1):
"""
adapted from https://github.com/shubhtuls/factored3d/blob/master/nnutils/net_blocks.py
"""
if batch_norm:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=True),
nn.BatchNorm2d(out_planes),
nn.LeakyReLU(0.2,inplace=True)
)
else:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=True),
nn.LeakyReLU(0.2,inplace=True)
)
def grab_xyz_weights(nerf_model, clone=False):
"""
zero grad for coarse component connected to inputs,
and return intermediate params
"""
param_list = []
input_layers=[0]+nerf_model.skips
input_wt_names = []
for layer in input_layers:
input_wt_names.append(f"xyz_encoding_{layer+1}.0.weight")
for name,p in nerf_model.named_parameters():
if name in input_wt_names:
# equiv since the wt after pos_dim does not change
if clone:
param_list.append(p.detach().clone())
else:
param_list.append(p)
## get the weights according to coarse posec
## 63 = 3 + 60
## 60 = (num_freqs, 2, 3)
#out_dim = p.shape[0]
#pos_dim = nerf_model.in_channels_xyz-nerf_model.in_channels_code
#param_list.append(p[:,:pos_dim]) #
return param_list
|
banmo-main
|
nnutils/nerf.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from collections import defaultdict
import os
import os.path as osp
import pickle
import sys
sys.path.insert(0, 'third_party')
import cv2, numpy as np, time, torch, torchvision
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import trimesh, pytorch3d, pytorch3d.loss, pdb
from pytorch3d import transforms
import configparser
from nnutils.nerf import Embedding, NeRF, RTHead, SE3head, RTExplicit, Encoder,\
ScoreHead, Transhead, NeRFUnc, \
grab_xyz_weights, FrameCode, RTExpMLP
from nnutils.geom_utils import K2mat, mat2K, Kmatinv, K2inv, raycast, sample_xy,\
chunk_rays, generate_bones,\
canonical2ndc, obj_to_cam, vec_to_sim3, \
near_far_to_bound, compute_flow_geodist, \
compute_flow_cse, fb_flow_check, pinhole_cam, \
render_color, mask_aug, bbox_dp2rnd, resample_dp, \
vrender_flo, get_near_far, array2tensor, rot_angle, \
rtk_invert, rtk_compose, bone_transform, correct_bones,\
correct_rest_pose, fid_reindex
from nnutils.rendering import render_rays
from nnutils.loss_utils import eikonal_loss, rtk_loss, \
feat_match_loss, kp_reproj_loss, grad_update_bone, \
loss_filter, loss_filter_line, compute_xyz_wt_loss,\
compute_root_sm_2nd_loss, shape_init_loss
from utils.io import draw_pts
# distributed data parallel
flags.DEFINE_integer('local_rank', 0, 'for distributed training')
flags.DEFINE_integer('ngpu', 1, 'number of gpus to use')
# data io
flags.DEFINE_integer('accu_steps', 1, 'how many steps to do gradient accumulation')
flags.DEFINE_string('seqname', 'syn-spot-40', 'name of the sequence')
flags.DEFINE_string('logname', 'exp_name', 'Experiment Name')
flags.DEFINE_string('checkpoint_dir', 'logdir/', 'Root directory for output files')
flags.DEFINE_string('model_path', '', 'load model path')
flags.DEFINE_string('pose_cnn_path', '', 'path to pre-trained pose cnn')
flags.DEFINE_string('rtk_path', '', 'path to rtk files')
flags.DEFINE_bool('lineload',False,'whether to use pre-computed data per line')
flags.DEFINE_integer('n_data_workers', 1, 'Number of data loading workers')
flags.DEFINE_boolean('use_rtk_file', False, 'whether to use input rtk files')
flags.DEFINE_boolean('debug', False, 'deubg')
# model: shape, appearance, and feature
flags.DEFINE_bool('use_human', False, 'whether to use human cse model')
flags.DEFINE_bool('symm_shape', False, 'whether to set geometry to x-symmetry')
flags.DEFINE_bool('env_code', True, 'whether to use environment code for each video')
flags.DEFINE_bool('env_fourier', True, 'whether to use fourier basis for env')
flags.DEFINE_bool('use_unc',False, 'whether to use uncertainty sampling')
flags.DEFINE_bool('nerf_vis', True, 'use visibility volume')
flags.DEFINE_bool('anneal_freq', False, 'whether to use frequency annealing')
flags.DEFINE_integer('alpha', 10, 'maximum frequency for fourier features')
flags.DEFINE_bool('use_cc', True, 'whether to use connected component for mesh')
# model: motion
flags.DEFINE_bool('lbs', True, 'use lbs for backward warping 3d flow')
flags.DEFINE_integer('num_bones', 25, 'maximum number of bones')
flags.DEFINE_bool('nerf_skin', True, 'use mlp skinning function')
flags.DEFINE_integer('t_embed_dim', 128, 'dimension of the pose code')
flags.DEFINE_bool('frame_code', True, 'whether to use frame code')
flags.DEFINE_bool('flowbw', False, 'use backward warping 3d flow')
flags.DEFINE_bool('se3_flow', False, 'whether to use se3 field for 3d flow')
# model: cameras
flags.DEFINE_bool('use_cam', False, 'whether to use pre-defined camera pose')
flags.DEFINE_string('root_basis', 'expmlp', 'which root pose basis to use {mlp, cnn, exp}')
flags.DEFINE_bool('root_opt', True, 'whether to optimize root body poses')
flags.DEFINE_bool('ks_opt', True, 'whether to optimize camera intrinsics')
# optimization: hyperparams
flags.DEFINE_integer('num_epochs', 1000, 'Number of epochs to train')
flags.DEFINE_float('learning_rate', 5e-4, 'learning rate')
flags.DEFINE_integer('batch_size', 2, 'size of minibatches')
flags.DEFINE_integer('img_size', 512, 'image size for optimization')
flags.DEFINE_integer('nsample', 6, 'num of samples per image at optimization time')
flags.DEFINE_float('perturb', 1.0, 'factor to perturb depth sampling points')
flags.DEFINE_float('noise_std', 0., 'std dev of noise added to regularize sigma')
flags.DEFINE_float('nactive', 0.5, 'num of samples per image at optimization time')
flags.DEFINE_integer('ndepth', 128, 'num of depth samples per px at optimization time')
flags.DEFINE_float('clip_scale', 100, 'grad clip scale')
flags.DEFINE_float('warmup_steps', 0.4, 'steps used to increase sil loss')
flags.DEFINE_float('reinit_bone_steps', 0.667, 'steps to initialize bones')
flags.DEFINE_float('dskin_steps', 0.8, 'steps to add delta skinning weights')
flags.DEFINE_float('init_beta', 0.1, 'initial value for transparency beta')
flags.DEFINE_bool('reset_beta', False, 'reset volsdf beta to 0.1')
flags.DEFINE_float('fine_steps', 1.1, 'by default, not using fine samples')
flags.DEFINE_float('nf_reset', 0.5, 'by default, start reseting near-far plane at 50%')
flags.DEFINE_float('bound_reset', 0.5, 'by default, start reseting bound from 50%')
flags.DEFINE_float('bound_factor', 2, 'by default, use a loose bound')
# optimization: initialization
flags.DEFINE_bool('init_ellips', False, 'whether to init shape as ellips')
flags.DEFINE_integer('warmup_pose_ep', 0, 'epochs to pre-train cnn pose predictor')
flags.DEFINE_integer('warmup_shape_ep', 0, 'epochs to pre-train nerf')
flags.DEFINE_bool('warmup_rootmlp', False, 'whether to preset base root pose (compatible with expmlp root basis only)')
flags.DEFINE_bool('unc_filter', True, 'whether to filter root poses init with low uncertainty')
# optimization: fine-tuning
flags.DEFINE_bool('keep_pose_basis', True, 'keep pose basis when loading models at train time')
flags.DEFINE_bool('freeze_coarse', False, 'whether to freeze coarse posec of MLP')
flags.DEFINE_bool('freeze_root', False, 'whether to freeze root body pose')
flags.DEFINE_bool('root_stab', True, 'whether to stablize root at ft')
flags.DEFINE_bool('freeze_cvf', False, 'whether to freeze canonical features')
flags.DEFINE_bool('freeze_shape',False, 'whether to freeze canonical shape')
flags.DEFINE_bool('freeze_proj', False, 'whether to freeze some params w/ proj loss')
flags.DEFINE_bool('freeze_body_mlp', False, 'whether to freeze body pose mlp')
flags.DEFINE_float('proj_start', 0.0, 'steps to strat projection opt')
flags.DEFINE_float('frzroot_start', 0.0, 'steps to strat fixing root pose')
flags.DEFINE_float('frzbody_end', 0.0, 'steps to end fixing body pose')
flags.DEFINE_float('proj_end', 0.2, 'steps to end projection opt')
# CSE fine-tuning (turned off by default)
flags.DEFINE_bool('ft_cse', False, 'whether to fine-tune cse features')
flags.DEFINE_bool('mt_cse', True, 'whether to maintain cse features')
flags.DEFINE_float('mtcse_steps', 0.0, 'only distill cse before several epochs')
flags.DEFINE_float('ftcse_steps', 0.0, 'finetune cse after several epochs')
# render / eval
flags.DEFINE_integer('render_size', 64, 'size used for eval visualizations')
flags.DEFINE_integer('frame_chunk', 20, 'chunk size to split the input frames')
flags.DEFINE_integer('chunk', 32*1024, 'chunk size to split the input to avoid OOM')
flags.DEFINE_integer('rnd_frame_chunk', 3, 'chunk size to render eval images')
flags.DEFINE_bool('queryfw', True, 'use forward warping to query deformed shape')
flags.DEFINE_float('mc_threshold', -0.002, 'marching cubes threshold')
flags.DEFINE_bool('full_mesh', False, 'extract surface without visibility check')
flags.DEFINE_bool('ce_color', True, 'assign mesh color as canonical surface mapping or radiance')
flags.DEFINE_integer('sample_grid3d', 64, 'resolution for mesh extraction from nerf')
flags.DEFINE_string('test_frames', '9', 'a list of video index or num of frames, {0,1,2}, 30')
# losses
flags.DEFINE_bool('use_embed', True, 'whether to use feature consistency losses')
flags.DEFINE_bool('use_proj', True, 'whether to use reprojection loss')
flags.DEFINE_bool('use_corresp', True, 'whether to render and compare correspondence')
flags.DEFINE_bool('dist_corresp', True, 'whether to render distributed corresp')
flags.DEFINE_float('total_wt', 1, 'by default, multiple total loss by 1')
flags.DEFINE_float('sil_wt', 0.1, 'weight for silhouette loss')
flags.DEFINE_float('img_wt', 0.1, 'weight for silhouette loss')
flags.DEFINE_float('feat_wt', 0., 'by default, multiple feat loss by 1')
flags.DEFINE_float('frnd_wt', 1., 'by default, multiple feat loss by 1')
flags.DEFINE_float('proj_wt', 0.02, 'by default, multiple proj loss by 1')
flags.DEFINE_float('flow_wt', 1, 'by default, multiple flow loss by 1')
flags.DEFINE_float('cyc_wt', 1, 'by default, multiple cyc loss by 1')
flags.DEFINE_bool('rig_loss', False,'whether to use globally rigid loss')
flags.DEFINE_bool('root_sm', True, 'whether to use smooth loss for root pose')
flags.DEFINE_float('eikonal_wt', 0., 'weight of eikonal loss')
flags.DEFINE_float('bone_loc_reg', 0.1, 'use bone location regularization')
flags.DEFINE_bool('loss_flt', True, 'whether to use loss filter')
flags.DEFINE_bool('rm_novp', True,'whether to remove loss on non-overlapping pxs')
# for scripts/visualize/match.py
flags.DEFINE_string('match_frames', '0 1', 'a list of frame index')
class banmo(nn.Module):
def __init__(self, opts, data_info):
super(banmo, self).__init__()
self.opts = opts
self.device = torch.device("cuda:%d"%opts.local_rank)
self.config = configparser.RawConfigParser()
self.config.read('configs/%s.config'%opts.seqname)
self.alpha=torch.Tensor([opts.alpha])
self.alpha=nn.Parameter(self.alpha)
self.loss_select = 1 # by default, use all losses
self.root_update = 1 # by default, update root pose
self.body_update = 1 # by default, update body pose
self.shape_update = 0 # by default, update all
self.cvf_update = 0 # by default, update all
self.progress = 0. # also reseted in optimizer
self.counter_frz_rebone = 0. # counter to freeze params for reinit bones
self.use_fine = False # by default not using fine samples
#self.ndepth_bk = opts.ndepth # original ndepth
self.root_basis = opts.root_basis
self.use_cam = opts.use_cam
self.is_warmup_pose = False # by default not warming up
self.img_size = opts.img_size # current rendering size,
# have to be consistent with dataloader,
# eval/train has different size
embed_net = nn.Embedding
# multi-video mode
self.num_vid = len(self.config.sections())-1
self.data_offset = data_info['offset']
self.num_fr=self.data_offset[-1]
self.max_ts = (self.data_offset[1:] - self.data_offset[:-1]).max()
self.impath = data_info['impath']
self.latest_vars = {}
# only used in get_near_far: rtk, idk
# only used in visibility: rtk, vis, idx (deprecated)
# raw rot/trans estimated by pose net
self.latest_vars['rt_raw'] = np.zeros((self.data_offset[-1], 3,4)) # from data
# rtk raw scaled and refined
self.latest_vars['rtk'] = np.zeros((self.data_offset[-1], 4,4))
self.latest_vars['idk'] = np.zeros((self.data_offset[-1],))
self.latest_vars['mesh_rest'] = trimesh.Trimesh()
if opts.lineload:
#TODO todo, this should be idx512,-1
self.latest_vars['fp_err'] = np.zeros((self.data_offset[-1]*opts.img_size,2)) # feat, proj
self.latest_vars['flo_err'] = np.zeros((self.data_offset[-1]*opts.img_size,6))
self.latest_vars['sil_err'] = np.zeros((self.data_offset[-1]*opts.img_size,))
self.latest_vars['flo_err_hist'] = np.zeros((self.data_offset[-1]*opts.img_size,6,10))
else:
self.latest_vars['fp_err'] = np.zeros((self.data_offset[-1],2)) # feat, proj
self.latest_vars['flo_err'] = np.zeros((self.data_offset[-1],6))
self.latest_vars['sil_err'] = np.zeros((self.data_offset[-1],))
self.latest_vars['flo_err_hist'] = np.zeros((self.data_offset[-1],6,10))
# get near-far plane
self.near_far = np.zeros((self.data_offset[-1],2))
self.near_far[...,1] = 6.
self.near_far = self.near_far.astype(np.float32)
self.near_far = torch.Tensor(self.near_far).to(self.device)
self.obj_scale = float(near_far_to_bound(self.near_far)) / 0.3 # to 0.3
self.near_far = self.near_far / self.obj_scale
self.near_far_base = self.near_far.clone() # used for create_base_se3()
self.near_far = nn.Parameter(self.near_far)
# object bound
self.latest_vars['obj_bound'] = np.asarray([1.,1.,1.])
self.latest_vars['obj_bound'] *= near_far_to_bound(self.near_far)
self.vis_min=np.asarray([[0,0,0]])
self.vis_len=self.latest_vars['obj_bound']/2
# set shape/appearancce model
self.num_freqs = 10
in_channels_xyz=3+3*self.num_freqs*2
in_channels_dir=27
if opts.env_code:
# add video-speficit environment lighting embedding
env_code_dim = 64
if opts.env_fourier:
self.env_code = FrameCode(self.num_freqs, env_code_dim, self.data_offset, scale=1)
else:
self.env_code = embed_net(self.num_fr, env_code_dim)
else:
env_code_dim = 0
self.nerf_coarse = NeRF(in_channels_xyz=in_channels_xyz,
in_channels_dir=in_channels_dir+env_code_dim,
init_beta=opts.init_beta)
self.embedding_xyz = Embedding(3,self.num_freqs,alpha=self.alpha.data[0])
self.embedding_dir = Embedding(3,4, alpha=self.alpha.data[0])
self.embeddings = {'xyz':self.embedding_xyz, 'dir':self.embedding_dir}
self.nerf_models= {'coarse':self.nerf_coarse}
# set motion model
t_embed_dim = opts.t_embed_dim
if opts.frame_code:
self.pose_code = FrameCode(self.num_freqs, t_embed_dim, self.data_offset)
else:
self.pose_code = embed_net(self.num_fr, t_embed_dim)
if opts.flowbw:
if opts.se3_flow:
flow3d_arch = SE3head
out_channels=9
else:
flow3d_arch = Transhead
out_channels=3
self.nerf_flowbw = flow3d_arch(in_channels_xyz=in_channels_xyz+t_embed_dim,
D=5, W=128,
out_channels=out_channels,in_channels_dir=0, raw_feat=True)
self.nerf_flowfw = flow3d_arch(in_channels_xyz=in_channels_xyz+t_embed_dim,
D=5, W=128,
out_channels=out_channels,in_channels_dir=0, raw_feat=True)
self.nerf_models['flowbw'] = self.nerf_flowbw
self.nerf_models['flowfw'] = self.nerf_flowfw
elif opts.lbs:
self.num_bones = opts.num_bones
bones= generate_bones(self.num_bones, self.num_bones, 0, self.device)
self.bones = nn.Parameter(bones)
self.nerf_models['bones'] = self.bones
self.num_bone_used = self.num_bones # bones used in the model
self.nerf_body_rts = nn.Sequential(self.pose_code,
RTHead(use_quat=False,
#D=5,W=128,
in_channels_xyz=t_embed_dim,in_channels_dir=0,
out_channels=6*self.num_bones, raw_feat=True))
#TODO scale+constant parameters
skin_aux = torch.Tensor([0,self.obj_scale])
self.skin_aux = nn.Parameter(skin_aux)
self.nerf_models['skin_aux'] = self.skin_aux
if opts.nerf_skin:
self.nerf_skin = NeRF(in_channels_xyz=in_channels_xyz+t_embed_dim,
# D=5,W=128,
D=5,W=64,
in_channels_dir=0, out_channels=self.num_bones,
raw_feat=True, in_channels_code=t_embed_dim)
self.rest_pose_code = embed_net(1, t_embed_dim)
self.nerf_models['nerf_skin'] = self.nerf_skin
self.nerf_models['rest_pose_code'] = self.rest_pose_code
# set visibility nerf
if opts.nerf_vis:
self.nerf_vis = NeRF(in_channels_xyz=in_channels_xyz, D=5, W=64,
out_channels=1, in_channels_dir=0,
raw_feat=True)
self.nerf_models['nerf_vis'] = self.nerf_vis
# optimize camera
if opts.root_opt:
if self.use_cam:
use_quat=False
out_channels=6
else:
use_quat=True
out_channels=7
# train a cnn pose predictor for warmup
cnn_in_channels = 16
cnn_head = RTHead(use_quat=True, D=1,
in_channels_xyz=128,in_channels_dir=0,
out_channels=7, raw_feat=True)
self.dp_root_rts = nn.Sequential(
Encoder((112,112), in_channels=cnn_in_channels,
out_channels=128), cnn_head)
if self.root_basis == 'cnn':
self.nerf_root_rts = nn.Sequential(
Encoder((112,112), in_channels=cnn_in_channels,
out_channels=128),
RTHead(use_quat=use_quat, D=1,
in_channels_xyz=128,in_channels_dir=0,
out_channels=out_channels, raw_feat=True))
elif self.root_basis == 'exp':
self.nerf_root_rts = RTExplicit(self.num_fr, delta=self.use_cam)
elif self.root_basis == 'expmlp':
self.nerf_root_rts = RTExpMLP(self.num_fr,
self.num_freqs,t_embed_dim,self.data_offset,
delta=self.use_cam)
elif self.root_basis == 'mlp':
self.root_code = embed_net(self.num_fr, t_embed_dim)
output_head = RTHead(use_quat=use_quat,
in_channels_xyz=t_embed_dim,in_channels_dir=0,
out_channels=out_channels, raw_feat=True)
self.nerf_root_rts = nn.Sequential(self.root_code, output_head)
else: print('error'); exit()
# intrinsics
ks_list = []
for i in range(self.num_vid):
fx,fy,px,py=[float(i) for i in \
self.config.get('data_%d'%i, 'ks').split(' ')]
ks_list.append([fx,fy,px,py])
self.ks_param = torch.Tensor(ks_list).to(self.device)
if opts.ks_opt:
self.ks_param = nn.Parameter(self.ks_param)
# densepose
detbase='./third_party/detectron2/'
if opts.use_human:
canonical_mesh_name = 'smpl_27554'
config_path = '%s/projects/DensePose/configs/cse/densepose_rcnn_R_101_FPN_DL_soft_s1x.yaml'%(detbase)
weight_path = 'https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_101_FPN_DL_soft_s1x/250713061/model_final_1d3314.pkl'
else:
canonical_mesh_name = 'sheep_5004'
config_path = '%s/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_CA_finetune_4k.yaml'%(detbase)
weight_path = 'https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_50_FPN_soft_animals_CA_finetune_4k/253498611/model_final_6d69b7.pkl'
canonical_mesh_path = 'mesh_material/%s_sph.pkl'%canonical_mesh_name
with open(canonical_mesh_path, 'rb') as f:
dp = pickle.load(f)
self.dp_verts = dp['vertices']
self.dp_faces = dp['faces'].astype(int)
self.dp_verts = torch.Tensor(self.dp_verts).cuda(self.device)
self.dp_faces = torch.Tensor(self.dp_faces).cuda(self.device).long()
self.dp_verts -= self.dp_verts.mean(0)[None]
self.dp_verts /= self.dp_verts.abs().max()
self.dp_verts_unit = self.dp_verts.clone()
self.dp_verts *= (self.near_far[:,1] - self.near_far[:,0]).mean()/2
# visualize
self.dp_vis = self.dp_verts.detach()
self.dp_vmin = self.dp_vis.min(0)[0][None]
self.dp_vis = self.dp_vis - self.dp_vmin
self.dp_vmax = self.dp_vis.max(0)[0][None]
self.dp_vis = self.dp_vis / self.dp_vmax
# save colorvis
if not os.path.isdir('tmp'): os.mkdir('tmp')
trimesh.Trimesh(self.dp_verts_unit.cpu().numpy(),
dp['faces'],
vertex_colors = self.dp_vis.cpu().numpy())\
.export('tmp/%s.obj'%canonical_mesh_name)
if opts.unc_filter:
from utils.cselib import create_cse
# load surface embedding
_, _, mesh_vertex_embeddings = create_cse(config_path,
weight_path)
self.dp_embed = mesh_vertex_embeddings[canonical_mesh_name]
# add densepose mlp
if opts.use_embed:
self.num_feat = 16
# TODO change this to D-8
self.nerf_feat = NeRF(in_channels_xyz=in_channels_xyz, D=5, W=128,
out_channels=self.num_feat,in_channels_dir=0, raw_feat=True, init_beta=1.)
self.nerf_models['nerf_feat'] = self.nerf_feat
if opts.ft_cse:
from nnutils.cse import CSENet
self.csenet = CSENet(ishuman=opts.use_human)
# add uncertainty MLP
if opts.use_unc:
# input, (x,y,t)+code, output, (1)
vid_code_dim=32 # add video-specific code
self.vid_code = embed_net(self.num_vid, vid_code_dim)
#self.nerf_unc = NeRFUnc(in_channels_xyz=in_channels_xyz, D=5, W=128,
self.nerf_unc = NeRFUnc(in_channels_xyz=in_channels_xyz, D=8, W=256,
out_channels=1,in_channels_dir=vid_code_dim, raw_feat=True, init_beta=1.)
self.nerf_models['nerf_unc'] = self.nerf_unc
if opts.warmup_pose_ep>0:
# soft renderer
import soft_renderer as sr
self.mesh_renderer = sr.SoftRenderer(image_size=256, sigma_val=1e-12,
camera_mode='look_at',perspective=False, aggr_func_rgb='hard',
light_mode='vertex', light_intensity_ambient=1.,light_intensity_directionals=0.)
self.resnet_transform = torchvision.transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
def forward_default(self, batch):
opts = self.opts
# get root poses
rtk_all = self.compute_rts()
# change near-far plane for all views
if self.progress>=opts.nf_reset:
rtk_np = rtk_all.clone().detach().cpu().numpy()
valid_rts = self.latest_vars['idk'].astype(bool)
self.latest_vars['rtk'][valid_rts,:3] = rtk_np[valid_rts]
self.near_far.data = get_near_far(
self.near_far.data,
self.latest_vars)
if opts.debug:
torch.cuda.synchronize()
start_time = time.time()
if opts.lineload:
bs = self.set_input(batch, load_line=True)
else:
bs = self.set_input(batch)
if opts.debug:
torch.cuda.synchronize()
print('set input time:%.2f'%(time.time()-start_time))
rtk = self.rtk
kaug= self.kaug
embedid=self.embedid
aux_out={}
# Render
rendered, rand_inds = self.nerf_render(rtk, kaug, embedid,
nsample=opts.nsample, ndepth=opts.ndepth)
if opts.debug:
torch.cuda.synchronize()
print('set input + render time:%.2f'%(time.time()-start_time))
# image and silhouette loss
sil_at_samp = rendered['sil_at_samp']
sil_at_samp_flo = rendered['sil_at_samp_flo']
vis_at_samp = rendered['vis_at_samp']
if opts.loss_flt:
# frame-level rejection of bad segmentations
if opts.lineload:
invalid_idx = loss_filter_line(self.latest_vars['sil_err'],
self.errid.long(),self.frameid.long(),
rendered['sil_loss_samp']*opts.sil_wt,
opts.img_size)
else:
sil_err, invalid_idx = loss_filter(self.latest_vars['sil_err'],
rendered['sil_loss_samp']*opts.sil_wt,
sil_at_samp>-1, scale_factor=10)
self.latest_vars['sil_err'][self.errid.long()] = sil_err
if self.progress > (opts.warmup_steps):
rendered['sil_loss_samp'][invalid_idx] *= 0.
if invalid_idx.sum()>0:
print('%d removed from sil'%(invalid_idx.sum()))
img_loss_samp = opts.img_wt*rendered['img_loss_samp']
if opts.loss_flt:
img_loss_samp[invalid_idx] *= 0
img_loss = img_loss_samp
if opts.rm_novp:
img_loss = img_loss * rendered['sil_coarse'].detach()
img_loss = img_loss[sil_at_samp[...,0]>0].mean() # eval on valid pts
sil_loss_samp = opts.sil_wt*rendered['sil_loss_samp']
sil_loss = sil_loss_samp[vis_at_samp>0].mean()
aux_out['sil_loss'] = sil_loss
aux_out['img_loss'] = img_loss
total_loss = img_loss
total_loss = total_loss + sil_loss
# feat rnd loss
frnd_loss_samp = opts.frnd_wt*rendered['frnd_loss_samp']
if opts.loss_flt:
frnd_loss_samp[invalid_idx] *= 0
if opts.rm_novp:
frnd_loss_samp = frnd_loss_samp * rendered['sil_coarse'].detach()
feat_rnd_loss = frnd_loss_samp[sil_at_samp[...,0]>0].mean() # eval on valid pts
aux_out['feat_rnd_loss'] = feat_rnd_loss
total_loss = total_loss + feat_rnd_loss
# flow loss
if opts.use_corresp:
flo_loss_samp = rendered['flo_loss_samp']
if opts.loss_flt:
flo_loss_samp[invalid_idx] *= 0
if opts.rm_novp:
flo_loss_samp = flo_loss_samp * rendered['sil_coarse'].detach()
# eval on valid pts
flo_loss = flo_loss_samp[sil_at_samp_flo[...,0]].mean() * 2
#flo_loss = flo_loss_samp[sil_at_samp_flo[...,0]].mean()
flo_loss = flo_loss * opts.flow_wt
# warm up by only using flow loss to optimize root pose
if self.loss_select == 0:
total_loss = total_loss*0. + flo_loss
else:
total_loss = total_loss + flo_loss
aux_out['flo_loss'] = flo_loss
# viser loss
if opts.use_embed:
feat_err_samp = rendered['feat_err']* opts.feat_wt
if opts.loss_flt:
feat_err_samp[invalid_idx] *= 0
feat_loss = feat_err_samp
if opts.rm_novp:
feat_loss = feat_loss * rendered['sil_coarse'].detach()
feat_loss = feat_loss[sil_at_samp>0].mean()
total_loss = total_loss + feat_loss
aux_out['feat_loss'] = feat_loss
aux_out['beta_feat'] = self.nerf_feat.beta.clone().detach()[0]
if opts.use_proj:
proj_err_samp = rendered['proj_err']* opts.proj_wt
if opts.loss_flt:
proj_err_samp[invalid_idx] *= 0
proj_loss = proj_err_samp[sil_at_samp>0].mean()
aux_out['proj_loss'] = proj_loss
if opts.freeze_proj:
total_loss = total_loss + proj_loss
## warm up by only using projection loss to optimize bones
warmup_weight = (self.progress - opts.proj_start)/(opts.proj_end-opts.proj_start)
warmup_weight = (warmup_weight - 0.8) * 5 # [-4,1]
warmup_weight = np.clip(warmup_weight, 0,1)
if (self.progress > opts.proj_start and \
self.progress < opts.proj_end):
total_loss = total_loss*warmup_weight + \
10*proj_loss*(1-warmup_weight)
else:
# only add it after feature volume is trained well
total_loss = total_loss + proj_loss
# regularization
if 'frame_cyc_dis' in rendered.keys():
# cycle loss
cyc_loss = rendered['frame_cyc_dis'].mean()
total_loss = total_loss + cyc_loss * opts.cyc_wt
#total_loss = total_loss + cyc_loss*0
aux_out['cyc_loss'] = cyc_loss
# globally rigid prior
rig_loss = 0.0001*rendered['frame_rigloss'].mean()
if opts.rig_loss:
total_loss = total_loss + rig_loss
else:
total_loss = total_loss + rig_loss*0
aux_out['rig_loss'] = rig_loss
# elastic energy for se3 field / translation field
if 'elastic_loss' in rendered.keys():
elastic_loss = rendered['elastic_loss'].mean() * 1e-3
total_loss = total_loss + elastic_loss
aux_out['elastic_loss'] = elastic_loss
# regularization of root poses
if opts.root_sm:
root_sm_loss = compute_root_sm_2nd_loss(rtk_all, self.data_offset)
aux_out['root_sm_loss'] = root_sm_loss
total_loss = total_loss + root_sm_loss
if opts.eikonal_wt > 0:
ekl_loss = opts.eikonal_wt * eikonal_loss(self.nerf_coarse, self.embedding_xyz,
rendered['xyz_canonical_vis'], self.latest_vars['obj_bound'])
aux_out['ekl_loss'] = ekl_loss
total_loss = total_loss + ekl_loss
# bone location regularization: pull bones away from empth space (low sdf)
if opts.lbs and opts.bone_loc_reg>0:
bones_rst = self.bones
bones_rst,_ = correct_bones(self, bones_rst)
mesh_rest = self.latest_vars['mesh_rest']
if len(mesh_rest.vertices)>100: # not a degenerate mesh
# issue #4 the following causes error on certain archs for torch110+cu113
# seems to be a conflict between geomloss and pytorch3d
# mesh_rest = pytorch3d.structures.meshes.Meshes(
# verts=torch.Tensor(mesh_rest.vertices[None]),
# faces=torch.Tensor(mesh_rest.faces[None]))
# a ugly workaround
mesh_verts = [torch.Tensor(mesh_rest.vertices)]
mesh_faces = [torch.Tensor(mesh_rest.faces).long()]
try:
mesh_rest = pytorch3d.structures.meshes.Meshes(verts=mesh_verts, faces=mesh_faces)
except:
mesh_rest = pytorch3d.structures.meshes.Meshes(verts=mesh_verts, faces=mesh_faces)
shape_samp = pytorch3d.ops.sample_points_from_meshes(mesh_rest,
1000, return_normals=False)
shape_samp = shape_samp[0].to(self.device)
from geomloss import SamplesLoss
samploss = SamplesLoss(loss="sinkhorn", p=2, blur=.05)
bone_loc_loss = samploss(bones_rst[:,:3]*10, shape_samp*10)
bone_loc_loss = opts.bone_loc_reg*bone_loc_loss
total_loss = total_loss + bone_loc_loss
aux_out['bone_loc_loss'] = bone_loc_loss
# visibility loss
if 'vis_loss' in rendered.keys():
vis_loss = 0.01*rendered['vis_loss'].mean()
total_loss = total_loss + vis_loss
aux_out['visibility_loss'] = vis_loss
# uncertainty MLP inference
if opts.use_unc:
# add uncertainty MLP loss, loss = | |img-img_r|*sil - unc_pred |
unc_pred = rendered['unc_pred']
unc_rgb = sil_at_samp[...,0]*img_loss_samp.mean(-1)
unc_feat= (sil_at_samp*feat_err_samp)[...,0]
unc_proj= (sil_at_samp*proj_err_samp)[...,0]
unc_sil = sil_loss_samp[...,0]
#unc_accumulated = unc_feat + unc_proj
#unc_accumulated = unc_feat + unc_proj + unc_rgb*0.1
# unc_accumulated = unc_feat + unc_proj + unc_rgb
unc_accumulated = unc_rgb
# unc_accumulated = unc_rgb + unc_sil
unc_loss = (unc_accumulated.detach() - unc_pred[...,0]).pow(2)
unc_loss = unc_loss.mean()
aux_out['unc_loss'] = unc_loss
total_loss = total_loss + unc_loss
# cse feature tuning
if opts.ft_cse and opts.mt_cse:
csenet_loss = (self.csenet_feats - self.csepre_feats).pow(2).sum(1)
csenet_loss = csenet_loss[self.dp_feats_mask].mean()* 1e-5
if self.progress < opts.mtcse_steps:
total_loss = total_loss*0 + csenet_loss
else:
total_loss = total_loss + csenet_loss
aux_out['csenet_loss'] = csenet_loss
if opts.freeze_coarse:
# compute nerf xyz wt loss
shape_xyz_wt_curr = grab_xyz_weights(self.nerf_coarse)
shape_xyz_wt_loss = 100*compute_xyz_wt_loss(self.shape_xyz_wt,
shape_xyz_wt_curr)
skin_xyz_wt_curr = grab_xyz_weights(self.nerf_skin)
skin_xyz_wt_loss = 100*compute_xyz_wt_loss(self.skin_xyz_wt,
skin_xyz_wt_curr)
feat_xyz_wt_curr = grab_xyz_weights(self.nerf_feat)
feat_xyz_wt_loss = 100*compute_xyz_wt_loss(self.feat_xyz_wt,
feat_xyz_wt_curr)
aux_out['shape_xyz_wt_loss'] = shape_xyz_wt_loss
aux_out['skin_xyz_wt_loss'] = skin_xyz_wt_loss
aux_out['feat_xyz_wt_loss'] = feat_xyz_wt_loss
total_loss = total_loss + shape_xyz_wt_loss + skin_xyz_wt_loss\
+ feat_xyz_wt_loss
# save some variables
if opts.lbs:
aux_out['skin_scale'] = self.skin_aux[0].clone().detach()
aux_out['skin_const'] = self.skin_aux[1].clone().detach()
total_loss = total_loss * opts.total_wt
aux_out['total_loss'] = total_loss
aux_out['beta'] = self.nerf_coarse.beta.clone().detach()[0]
if opts.debug:
torch.cuda.synchronize()
print('set input + render + loss time:%.2f'%(time.time()-start_time))
return total_loss, aux_out
def forward_warmup_rootmlp(self, batch):
"""
batch variable is not never being used here
"""
# render ground-truth data
opts = self.opts
device = self.device
# loss
aux_out={}
self.rtk = torch.zeros(self.num_fr,4,4).to(device)
self.frameid = torch.Tensor(range(self.num_fr)).to(device)
self.dataid,_ = fid_reindex(self.frameid, self.num_vid, self.data_offset)
self.convert_root_pose()
rtk_gt = torch.Tensor(self.latest_vars['rtk']).to(device)
_ = rtk_loss(self.rtk, rtk_gt, aux_out)
root_sm_loss = compute_root_sm_2nd_loss(self.rtk, self.data_offset)
total_loss = 0.1*aux_out['rot_loss'] + 0.01*root_sm_loss
aux_out['warmup_root_sm_loss'] = root_sm_loss
del aux_out['trn_loss']
return total_loss, aux_out
def forward_warmup_shape(self, batch):
"""
batch variable is not never being used here
"""
# render ground-truth data
opts = self.opts
# loss
shape_factor = 0.1
aux_out={}
total_loss = shape_init_loss(self.dp_verts_unit*shape_factor,self.dp_faces, \
self.nerf_coarse, self.embedding_xyz,
bound_factor=opts.bound_factor * 1.2, use_ellips=opts.init_ellips)
aux_out['shape_init_loss'] = total_loss
return total_loss, aux_out
def forward_warmup(self, batch):
"""
batch variable is not never being used here
"""
# render ground-truth data
opts = self.opts
bs_rd = 16
with torch.no_grad():
vertex_color = self.dp_embed
dp_feats_rd, rtk_raw = self.render_dp(self.dp_verts_unit,
self.dp_faces, vertex_color, self.near_far, self.device,
self.mesh_renderer, bs_rd)
aux_out={}
# predict delta se3
root_rts = self.nerf_root_rts(dp_feats_rd)
root_rmat = root_rts[:,0,:9].view(-1,3,3)
root_tmat = root_rts[:,0,9:12]
# construct base se3
rtk = torch.zeros(bs_rd, 4,4).to(self.device)
rtk[:,:3] = self.create_base_se3(bs_rd, self.device)
# compose se3
rmat = rtk[:,:3,:3]
tmat = rtk[:,:3,3]
tmat = tmat + rmat.matmul(root_tmat[...,None])[...,0]
rmat = rmat.matmul(root_rmat)
rtk[:,:3,:3] = rmat
rtk[:,:3,3] = tmat.detach() # do not train translation
# loss
total_loss = rtk_loss(rtk, rtk_raw, aux_out)
aux_out['total_loss'] = total_loss
return total_loss, aux_out
def nerf_render(self, rtk, kaug, embedid, nsample=256, ndepth=128):
opts=self.opts
# render rays
if opts.debug:
torch.cuda.synchronize()
start_time = time.time()
# 2bs,...
Rmat, Tmat, Kinv = self.prepare_ray_cams(rtk, kaug)
bs = Kinv.shape[0]
# for batch:2bs, nsample+x
# for line: 2bs*(nsample+x),1
rand_inds, rays, frameid, errid = self.sample_pxs(bs, nsample, Rmat, Tmat, Kinv,
self.dataid, self.frameid, self.frameid_sub, self.embedid,self.lineid,self.errid,
self.imgs, self.masks, self.vis2d, self.flow, self.occ, self.dp_feats)
self.frameid = frameid # only used in loss filter
self.errid = errid
if opts.debug:
torch.cuda.synchronize()
print('prepare rays time: %.2f'%(time.time()-start_time))
bs_rays = rays['bs'] * rays['nsample'] # over pixels
results=defaultdict(list)
for i in range(0, bs_rays, opts.chunk):
rays_chunk = chunk_rays(rays,i,opts.chunk)
# decide whether to use fine samples
if self.progress > opts.fine_steps:
self.use_fine = True
else:
self.use_fine = False
rendered_chunks = render_rays(self.nerf_models,
self.embeddings,
rays_chunk,
N_samples = ndepth,
use_disp=False,
perturb=opts.perturb,
noise_std=opts.noise_std,
chunk=opts.chunk, # chunk size is effective in val mode
obj_bound=self.latest_vars['obj_bound'],
use_fine=self.use_fine,
img_size=self.img_size,
progress=self.progress,
opts=opts,
)
for k, v in rendered_chunks.items():
results[k] += [v]
for k, v in results.items():
if v[0].dim()==0: # loss
v = torch.stack(v).mean()
else:
v = torch.cat(v, 0)
if self.training:
v = v.view(rays['bs'],rays['nsample'],-1)
else:
v = v.view(bs,self.img_size, self.img_size, -1)
results[k] = v
if opts.debug:
torch.cuda.synchronize()
print('rendering time: %.2f'%(time.time()-start_time))
# viser feature matching
if opts.use_embed:
results['pts_pred'] = (results['pts_pred'] - torch.Tensor(self.vis_min[None]).\
to(self.device)) / torch.Tensor(self.vis_len[None]).to(self.device)
results['pts_exp'] = (results['pts_exp'] - torch.Tensor(self.vis_min[None]).\
to(self.device)) / torch.Tensor(self.vis_len[None]).to(self.device)
results['pts_pred'] = results['pts_pred'].clamp(0,1)
results['pts_exp'] = results['pts_exp'].clamp(0,1)
if opts.debug:
torch.cuda.synchronize()
print('compute flow time: %.2f'%(time.time()-start_time))
return results, rand_inds
@staticmethod
def render_dp(dp_verts_unit, dp_faces, dp_embed, near_far, device,
mesh_renderer, bs):
"""
render a pair of (densepose feature bsx16x112x112, se3)
input is densepose surface model and near-far plane
"""
verts = dp_verts_unit
faces = dp_faces
dp_embed = dp_embed
num_verts, embed_dim = dp_embed.shape
img_size = 256
crop_size = 112
focal = 2
std_rot = 6.28 # rotation std
std_dep = 0.5 # depth std
# scale geometry and translation based on near-far plane
d_mean = near_far.mean()
verts = verts / 3 * d_mean # scale based on mean depth
dep_rand = 1 + np.random.normal(0,std_dep,bs)
dep_rand = torch.Tensor(dep_rand).to(device)
d_obj = d_mean * dep_rand
d_obj = torch.max(d_obj, 1.2*1/3 * d_mean)
# set cameras
rot_rand = np.random.normal(0,std_rot,(bs,3))
rot_rand = torch.Tensor(rot_rand).to(device)
Rmat = transforms.axis_angle_to_matrix(rot_rand)
Tmat = torch.cat([torch.zeros(bs, 2).to(device), d_obj[:,None]],-1)
K = torch.Tensor([[focal,focal,0,0]]).to(device).repeat(bs,1)
# add RTK: [R_3x3|T_3x1]
# [fx,fy,px,py], to the ndc space
Kimg = torch.Tensor([[focal*img_size/2.,focal*img_size/2.,img_size/2.,
img_size/2.]]).to(device).repeat(bs,1)
rtk = torch.zeros(bs,4,4).to(device)
rtk[:,:3,:3] = Rmat
rtk[:,:3, 3] = Tmat
rtk[:,3, :] = Kimg
# repeat mesh
verts = verts[None].repeat(bs,1,1)
faces = faces[None].repeat(bs,1,1)
dp_embed = dp_embed[None].repeat(bs,1,1)
# obj-cam transform
verts = obj_to_cam(verts, Rmat, Tmat)
# pespective projection
verts = pinhole_cam(verts, K)
# render sil+rgb
rendered = []
for i in range(0,embed_dim,3):
dp_chunk = dp_embed[...,i:i+3]
dp_chunk_size = dp_chunk.shape[-1]
if dp_chunk_size<3:
dp_chunk = torch.cat([dp_chunk,
dp_embed[...,:(3-dp_chunk_size)]],-1)
rendered_chunk = render_color(mesh_renderer, verts, faces,
dp_chunk, texture_type='vertex')
rendered_chunk = rendered_chunk[:,:3]
rendered.append(rendered_chunk)
rendered = torch.cat(rendered, 1)
rendered = rendered[:,:embed_dim]
# resize to bounding box
rendered_crops = []
for i in range(bs):
mask = rendered[i].max(0)[0]>0
mask = mask.cpu().numpy()
indices = np.where(mask>0); xid = indices[1]; yid = indices[0]
center = ( (xid.max()+xid.min())//2, (yid.max()+yid.min())//2)
length = ( int((xid.max()-xid.min())*1.//2 ),
int((yid.max()-yid.min())*1.//2 ))
left,top,w,h = [center[0]-length[0], center[1]-length[1],
length[0]*2, length[1]*2]
rendered_crop = torchvision.transforms.functional.resized_crop(\
rendered[i], top,left,h,w,(50,50))
# mask augmentation
rendered_crop = mask_aug(rendered_crop)
rendered_crops.append( rendered_crop)
#cv2.imwrite('%d.png'%i, rendered_crop.std(0).cpu().numpy()*1000)
rendered_crops = torch.stack(rendered_crops,0)
rendered_crops = F.interpolate(rendered_crops, (crop_size, crop_size),
mode='bilinear')
rendered_crops = F.normalize(rendered_crops, 2,1)
return rendered_crops, rtk
@staticmethod
def create_base_se3(bs, device):
"""
create a base se3 based on near-far plane
"""
rt = torch.zeros(bs,3,4).to(device)
rt[:,:3,:3] = torch.eye(3)[None].repeat(bs,1,1).to(device)
rt[:,:2,3] = 0.
rt[:,2,3] = 0.3
return rt
@staticmethod
def prepare_ray_cams(rtk, kaug):
"""
in: rtk, kaug
out: Rmat, Tmat, Kinv
"""
Rmat = rtk[:,:3,:3]
Tmat = rtk[:,:3,3]
Kmat = K2mat(rtk[:,3,:])
Kaug = K2inv(kaug) # p = Kaug Kmat P
Kinv = Kmatinv(Kaug.matmul(Kmat))
return Rmat, Tmat, Kinv
def sample_pxs(self, bs, nsample, Rmat, Tmat, Kinv,
dataid, frameid, frameid_sub, embedid, lineid,errid,
imgs, masks, vis2d, flow, occ, dp_feats):
"""
make sure self. is not modified
xys: bs, nsample, 2
rand_inds: bs, nsample
"""
opts = self.opts
Kinv_in=Kinv.clone()
dataid_in=dataid.clone()
frameid_sub_in = frameid_sub.clone()
# sample 1x points, sample 4x points for further selection
nsample_a = 4*nsample
rand_inds, xys = sample_xy(self.img_size, bs, nsample+nsample_a, self.device,
return_all= not(self.training), lineid=lineid)
if self.training and opts.use_unc and \
self.progress >= (opts.warmup_steps):
is_active=True
nsample_s = int(opts.nactive * nsample) # active
nsample = int(nsample*(1-opts.nactive)) # uniform
else:
is_active=False
if self.training:
rand_inds_a, xys_a = rand_inds[:,-nsample_a:].clone(), xys[:,-nsample_a:].clone()
rand_inds, xys = rand_inds[:,:nsample].clone(), xys[:,:nsample].clone()
if opts.lineload:
# expand frameid, Rmat,Tmat, Kinv
frameid_a= frameid[:,None].repeat(1,nsample_a)
frameid_sub_a=frameid_sub[:,None].repeat(1,nsample_a)
dataid_a= dataid[:,None].repeat(1,nsample_a)
errid_a= errid[:,None].repeat(1,nsample_a)
Rmat_a = Rmat[:,None].repeat(1,nsample_a,1,1)
Tmat_a = Tmat[:,None].repeat(1,nsample_a,1)
Kinv_a = Kinv[:,None].repeat(1,nsample_a,1,1)
# expand
frameid = frameid[:,None].repeat(1,nsample)
frameid_sub = frameid_sub[:,None].repeat(1,nsample)
dataid = dataid[:,None].repeat(1,nsample)
errid = errid[:,None].repeat(1,nsample)
Rmat = Rmat[:,None].repeat(1,nsample,1,1)
Tmat = Tmat[:,None].repeat(1,nsample,1)
Kinv = Kinv[:,None].repeat(1,nsample,1,1)
batch_map = torch.Tensor(range(bs)).to(self.device)[:,None].long()
batch_map_a = batch_map.repeat(1,nsample_a)
batch_map = batch_map.repeat(1,nsample)
# importance sampling
if is_active:
with torch.no_grad():
# run uncertainty estimation
ts = frameid_sub_in.to(self.device) / self.max_ts * 2 -1
ts = ts[:,None,None].repeat(1,nsample_a,1)
dataid_in = dataid_in.long().to(self.device)
vid_code = self.vid_code(dataid_in)[:,None].repeat(1,nsample_a,1)
# convert to normalized coords
xysn = torch.cat([xys_a, torch.ones_like(xys_a[...,:1])],2)
xysn = xysn.matmul(Kinv_in.permute(0,2,1))[...,:2]
xyt = torch.cat([xysn, ts],-1)
xyt_embedded = self.embedding_xyz(xyt)
xyt_code = torch.cat([xyt_embedded, vid_code],-1)
unc_pred = self.nerf_unc(xyt_code)[...,0]
# preprocess to format 2,bs,w
if opts.lineload:
unc_pred = unc_pred.view(2,-1)
xys = xys.view(2,-1,2)
xys_a = xys_a.view(2,-1,2)
rand_inds = rand_inds.view(2,-1)
rand_inds_a = rand_inds_a.view(2,-1)
frameid = frameid.view(2,-1)
frameid_a = frameid_a.view(2,-1)
frameid_sub = frameid_sub.view(2,-1)
frameid_sub_a = frameid_sub_a.view(2,-1)
dataid = dataid.view(2,-1)
dataid_a = dataid_a.view(2,-1)
errid = errid.view(2,-1)
errid_a = errid_a.view(2,-1)
batch_map = batch_map.view(2,-1)
batch_map_a = batch_map_a.view(2,-1)
Rmat = Rmat.view(2,-1,3,3)
Rmat_a = Rmat_a.view(2,-1,3,3)
Tmat = Tmat.view(2,-1,3)
Tmat_a = Tmat_a.view(2,-1,3)
Kinv = Kinv.view(2,-1,3,3)
Kinv_a = Kinv_a.view(2,-1,3,3)
nsample_s = nsample_s * bs//2
bs=2
# merge top nsamples
topk_samp = unc_pred.topk(nsample_s,dim=-1)[1] # bs,nsamp
# use the first imgs (in a pair) sampled index
xys_a = torch.stack( [xys_a[i][topk_samp[0]] for i in range(bs)],0)
rand_inds_a = torch.stack( [rand_inds_a[i][topk_samp[0]] for i in range(bs)],0)
frameid_a = torch.stack( [frameid_a[i][topk_samp[0]] for i in range(bs)],0)
frameid_sub_a=torch.stack( [frameid_sub_a[i][topk_samp[0]] for i in range(bs)],0)
dataid_a = torch.stack( [dataid_a[i][topk_samp[0]] for i in range(bs)],0)
errid_a = torch.stack( [errid_a[i][topk_samp[0]] for i in range(bs)],0)
batch_map_a = torch.stack( [batch_map_a[i][topk_samp[0]] for i in range(bs)],0)
Rmat_a = torch.stack( [Rmat_a[i][topk_samp[0]] for i in range(bs)],0)
Tmat_a = torch.stack( [Tmat_a[i][topk_samp[0]] for i in range(bs)],0)
Kinv_a = torch.stack( [Kinv_a[i][topk_samp[0]] for i in range(bs)],0)
xys = torch.cat([xys,xys_a],1)
rand_inds = torch.cat([rand_inds,rand_inds_a],1)
frameid = torch.cat([frameid,frameid_a],1)
frameid_sub=torch.cat([frameid_sub,frameid_sub_a],1)
dataid = torch.cat([dataid,dataid_a],1)
errid = torch.cat([errid,errid_a],1)
batch_map = torch.cat([batch_map,batch_map_a],1)
Rmat = torch.cat([Rmat,Rmat_a],1)
Tmat = torch.cat([Tmat,Tmat_a],1)
Kinv = torch.cat([Kinv,Kinv_a],1)
else:
topk_samp = unc_pred.topk(nsample_s,dim=-1)[1] # bs,nsamp
xys_a = torch.stack( [xys_a[i][topk_samp[i]] for i in range(bs)],0)
rand_inds_a = torch.stack([rand_inds_a[i][topk_samp[i]] for i in range(bs)],0)
xys = torch.cat([xys,xys_a],1)
rand_inds = torch.cat([rand_inds,rand_inds_a],1)
# for line: reshape to 2*bs, 1,...
if self.training and opts.lineload:
frameid = frameid.view(-1)
frameid_sub = frameid_sub.view(-1)
dataid = dataid.view(-1)
errid = errid.view(-1)
batch_map = batch_map.view(-1)
xys = xys.view(-1,1,2)
rand_inds = rand_inds.view(-1,1)
Rmat = Rmat.view(-1,3,3)
Tmat = Tmat.view(-1,3)
Kinv = Kinv.view(-1,3,3)
near_far = self.near_far[frameid.long()]
rays = raycast(xys, Rmat, Tmat, Kinv, near_far)
# need to reshape dataid, frameid_sub, embedid #TODO embedid equiv to frameid
self.update_rays(rays, bs>1, dataid, frameid_sub, frameid, xys, Kinv)
if 'bones' in self.nerf_models.keys():
# update delta rts fw
self.update_delta_rts(rays)
# for line: 2bs*nsamp,1
# for batch:2bs,nsamp
#TODO reshape imgs, masks, etc.
if self.training and opts.lineload:
self.obs_to_rays_line(rays, rand_inds, imgs, masks, vis2d, flow, occ,
dp_feats, batch_map)
else:
self.obs_to_rays(rays, rand_inds, imgs, masks, vis2d, flow, occ, dp_feats)
# TODO visualize samples
#pdb.set_trace()
#self.imgs_samp = []
#for i in range(bs):
# self.imgs_samp.append(draw_pts(self.imgs[i], xys_a[i]))
#self.imgs_samp = torch.stack(self.imgs_samp,0)
return rand_inds, rays, frameid, errid
def obs_to_rays_line(self, rays, rand_inds, imgs, masks, vis2d,
flow, occ, dp_feats,batch_map):
"""
convert imgs, masks, flow, occ, dp_feats to rays
rand_map: map pixel index to original batch index
rand_inds: bs,
"""
opts = self.opts
rays['img_at_samp']=torch.gather(imgs[batch_map][...,0], 2,
rand_inds[:,None].repeat(1,3,1))[:,None][...,0]
rays['sil_at_samp']=torch.gather(masks[batch_map][...,0], 2,
rand_inds[:,None].repeat(1,1,1))[:,None][...,0]
rays['vis_at_samp']=torch.gather(vis2d[batch_map][...,0], 2,
rand_inds[:,None].repeat(1,1,1))[:,None][...,0]
rays['flo_at_samp']=torch.gather(flow[batch_map][...,0], 2,
rand_inds[:,None].repeat(1,2,1))[:,None][...,0]
rays['cfd_at_samp']=torch.gather(occ[batch_map][...,0], 2,
rand_inds[:,None].repeat(1,1,1))[:,None][...,0]
if opts.use_embed:
rays['feats_at_samp']=torch.gather(dp_feats[batch_map][...,0], 2,
rand_inds[:,None].repeat(1,16,1))[:,None][...,0]
def obs_to_rays(self, rays, rand_inds, imgs, masks, vis2d,
flow, occ, dp_feats):
"""
convert imgs, masks, flow, occ, dp_feats to rays
"""
opts = self.opts
bs = imgs.shape[0]
rays['img_at_samp'] = torch.stack([imgs[i].view(3,-1).T[rand_inds[i]]\
for i in range(bs)],0) # bs,ns,3
rays['sil_at_samp'] = torch.stack([masks[i].view(-1,1)[rand_inds[i]]\
for i in range(bs)],0) # bs,ns,1
rays['vis_at_samp'] = torch.stack([vis2d[i].view(-1,1)[rand_inds[i]]\
for i in range(bs)],0) # bs,ns,1
rays['flo_at_samp'] = torch.stack([flow[i].view(2,-1).T[rand_inds[i]]\
for i in range(bs)],0) # bs,ns,2
rays['cfd_at_samp'] = torch.stack([occ[i].view(-1,1)[rand_inds[i]]\
for i in range(bs)],0) # bs,ns,1
if opts.use_embed:
feats_at_samp = [dp_feats[i].view(16,-1).T\
[rand_inds[i].long()] for i in range(bs)]
feats_at_samp = torch.stack(feats_at_samp,0) # bs,ns,num_feat
rays['feats_at_samp'] = feats_at_samp
def update_delta_rts(self, rays):
"""
change bone_rts_fw to delta fw
"""
opts = self.opts
bones_rst, bone_rts_rst = correct_bones(self, self.nerf_models['bones'])
self.nerf_models['bones_rst']=bones_rst
# delta rts
rays['bone_rts'] = correct_rest_pose(opts, rays['bone_rts'], bone_rts_rst)
if 'bone_rts_target' in rays.keys():
rays['bone_rts_target'] = correct_rest_pose(opts,
rays['bone_rts_target'], bone_rts_rst)
if 'bone_rts_dentrg' in rays.keys():
rays['bone_rts_dentrg'] = correct_rest_pose(opts,
rays['bone_rts_dentrg'], bone_rts_rst)
def update_rays(self, rays, is_pair, dataid, frameid_sub, embedid, xys, Kinv):
"""
"""
opts = self.opts
# append target frame rtk
embedid = embedid.long().to(self.device)
if is_pair:
rtk_vec = rays['rtk_vec'] # bs, N, 21
rtk_vec_target = rtk_vec.view(2,-1).flip(0)
rays['rtk_vec_target'] = rtk_vec_target.reshape(rays['rtk_vec'].shape)
embedid_target = embedid.view(2,-1).flip(0).reshape(-1)
if opts.flowbw:
time_embedded_target = self.pose_code(embedid_target)[:,None]
rays['time_embedded_target'] = time_embedded_target.repeat(1,
rays['nsample'],1)
elif opts.lbs and self.num_bone_used>0:
bone_rts_target = self.nerf_body_rts(embedid_target)
rays['bone_rts_target'] = bone_rts_target.repeat(1,rays['nsample'],1)
# pass time-dependent inputs
time_embedded = self.pose_code(embedid)[:,None]
rays['time_embedded'] = time_embedded.repeat(1,rays['nsample'],1)
if opts.lbs and self.num_bone_used>0:
bone_rts = self.nerf_body_rts(embedid)
rays['bone_rts'] = bone_rts.repeat(1,rays['nsample'],1)
if opts.env_code:
rays['env_code'] = self.env_code(embedid)[:,None]
rays['env_code'] = rays['env_code'].repeat(1,rays['nsample'],1)
#rays['env_code'] = self.env_code(dataid.long().to(self.device))
#rays['env_code'] = rays['env_code'][:,None].repeat(1,rays['nsample'],1)
if opts.use_unc:
ts = frameid_sub.to(self.device) / self.max_ts * 2 -1
ts = ts[:,None,None].repeat(1,rays['nsample'],1)
rays['ts'] = ts
dataid = dataid.long().to(self.device)
vid_code = self.vid_code(dataid)[:,None].repeat(1,rays['nsample'],1)
rays['vid_code'] = vid_code
xysn = torch.cat([xys, torch.ones_like(xys[...,:1])],2)
xysn = xysn.matmul(Kinv.permute(0,2,1))[...,:2]
rays['xysn'] = xysn
def convert_line_input(self, batch):
device = self.device
opts = self.opts
# convert to float
for k,v in batch.items():
batch[k] = batch[k].float()
bs=batch['dataid'].shape[0]
self.imgs = batch['img'] .view(bs,2,3, -1).permute(1,0,2,3).reshape(bs*2,3, -1,1).to(device)
self.masks = batch['mask'] .view(bs,2,1, -1).permute(1,0,2,3).reshape(bs*2,1, -1,1).to(device)
self.vis2d = batch['vis2d'] .view(bs,2,1, -1).permute(1,0,2,3).reshape(bs*2,1, -1,1).to(device)
self.flow = batch['flow'] .view(bs,2,2, -1).permute(1,0,2,3).reshape(bs*2,2, -1,1).to(device)
self.occ = batch['occ'] .view(bs,2,1, -1).permute(1,0,2,3).reshape(bs*2,1, -1,1).to(device)
self.dps = batch['dp'] .view(bs,2,1, -1).permute(1,0,2,3).reshape(bs*2,1, -1,1).to(device)
self.dp_feats = batch['dp_feat_rsmp'].view(bs,2,16,-1).permute(1,0,2,3).reshape(bs*2,16,-1,1).to(device)
self.dp_feats = F.normalize(self.dp_feats, 2,1)
self.rtk = batch['rtk'] .view(bs,-1,4,4).permute(1,0,2,3).reshape(-1,4,4) .to(device)
self.kaug = batch['kaug'] .view(bs,-1,4).permute(1,0,2).reshape(-1,4) .to(device)
self.frameid = batch['frameid'] .view(bs,-1).permute(1,0).reshape(-1).cpu()
self.dataid = batch['dataid'] .view(bs,-1).permute(1,0).reshape(-1).cpu()
self.lineid = batch['lineid'] .view(bs,-1).permute(1,0).reshape(-1).to(device)
self.frameid_sub = self.frameid.clone() # id within a video
self.embedid = self.frameid + self.data_offset[self.dataid.long()]
self.frameid = self.frameid + self.data_offset[self.dataid.long()]
self.errid = self.frameid*opts.img_size + self.lineid.cpu() # for err filter
self.rt_raw = self.rtk.clone()[:,:3]
# process silhouette
self.masks = (self.masks*self.vis2d)>0
self.masks = self.masks.float()
def convert_batch_input(self, batch):
device = self.device
opts = self.opts
if batch['img'].dim()==4:
bs,_,h,w = batch['img'].shape
else:
bs,_,_,h,w = batch['img'].shape
# convert to float
for k,v in batch.items():
batch[k] = batch[k].float()
img_tensor = batch['img'].view(bs,-1,3,h,w).permute(1,0,2,3,4).reshape(-1,3,h,w)
input_img_tensor = img_tensor.clone()
for b in range(input_img_tensor.size(0)):
input_img_tensor[b] = self.resnet_transform(input_img_tensor[b])
self.input_imgs = input_img_tensor.to(device)
self.imgs = img_tensor.to(device)
self.masks = batch['mask'] .view(bs,-1,h,w).permute(1,0,2,3).reshape(-1,h,w) .to(device)
self.vis2d = batch['vis2d'] .view(bs,-1,h,w).permute(1,0,2,3).reshape(-1,h,w) .to(device)
self.dps = batch['dp'] .view(bs,-1,h,w).permute(1,0,2,3).reshape(-1,h,w) .to(device)
dpfd = 16
dpfs = 112
self.dp_feats = batch['dp_feat'] .view(bs,-1,dpfd,dpfs,dpfs).permute(1,0,2,3,4).reshape(-1,dpfd,dpfs,dpfs).to(device)
self.dp_bbox = batch['dp_bbox'] .view(bs,-1,4).permute(1,0,2).reshape(-1,4) .to(device)
if opts.use_embed and opts.ft_cse and (not self.is_warmup_pose):
self.dp_feats_mask = self.dp_feats.abs().sum(1)>0
self.csepre_feats = self.dp_feats.clone()
# unnormalized features
self.csenet_feats, self.dps = self.csenet(self.imgs, self.masks)
# for visualization
self.dps = self.dps * self.dp_feats_mask.float()
if self.progress > opts.ftcse_steps:
self.dp_feats = self.csenet_feats
else:
self.dp_feats = self.csenet_feats.detach()
self.dp_feats = F.normalize(self.dp_feats, 2,1)
self.rtk = batch['rtk'] .view(bs,-1,4,4).permute(1,0,2,3).reshape(-1,4,4) .to(device)
self.kaug = batch['kaug'] .view(bs,-1,4).permute(1,0,2).reshape(-1,4) .to(device)
self.frameid = batch['frameid'] .view(bs,-1).permute(1,0).reshape(-1).cpu()
self.dataid = batch['dataid'] .view(bs,-1).permute(1,0).reshape(-1).cpu()
self.frameid_sub = self.frameid.clone() # id within a video
self.embedid = self.frameid + self.data_offset[self.dataid.long()]
self.frameid = self.frameid + self.data_offset[self.dataid.long()]
self.errid = self.frameid # for err filter
self.rt_raw = self.rtk.clone()[:,:3]
# process silhouette
self.masks = (self.masks*self.vis2d)>0
self.masks = self.masks.float()
self.flow = batch['flow'].view(bs,-1,2,h,w).permute(1,0,2,3,4).reshape(-1,2,h,w).to(device)
self.occ = batch['occ'].view(bs,-1,h,w).permute(1,0,2,3).reshape(-1,h,w) .to(device)
self.lineid = None
def convert_root_pose(self):
"""
assumes has self.
{rtk, frameid, dp_feats, dps, masks, kaug }
produces self.
"""
opts = self.opts
bs = self.rtk.shape[0]
device = self.device
# scale initial poses
if self.use_cam:
self.rtk[:,:3,3] = self.rtk[:,:3,3] / self.obj_scale
else:
self.rtk[:,:3] = self.create_base_se3(bs, device)
# compute delta pose
if self.opts.root_opt:
if self.root_basis == 'cnn':
frame_code = self.dp_feats
elif self.root_basis == 'mlp' or self.root_basis == 'exp'\
or self.root_basis == 'expmlp':
frame_code = self.frameid.long().to(device)
else: print('error'); exit()
root_rts = self.nerf_root_rts(frame_code)
self.rtk = self.refine_rt(self.rtk, root_rts)
self.rtk[:,3,:] = self.ks_param[self.dataid.long()] #TODO kmat
@staticmethod
def refine_rt(rt_raw, root_rts):
"""
input: rt_raw representing the initial root poses (after scaling)
input: root_rts representing delta se3
output: current estimate of rtks for all frames
"""
rt_raw = rt_raw.clone()
root_rmat = root_rts[:,0,:9].view(-1,3,3)
root_tmat = root_rts[:,0,9:12]
rmat = rt_raw[:,:3,:3].clone()
tmat = rt_raw[:,:3,3].clone()
tmat = tmat + rmat.matmul(root_tmat[...,None])[...,0]
rmat = rmat.matmul(root_rmat)
rt_raw[:,:3,:3] = rmat
rt_raw[:,:3,3] = tmat
return rt_raw
def compute_rts(self):
"""
Assumpions
- use_cam
- use mlp or exp root pose
input: rt_raw representing the initial root poses
output: current estimate of rtks for all frames
"""
device = self.device
opts = self.opts
frameid = torch.Tensor(range(self.num_fr)).to(device).long()
if self.use_cam:
# scale initial poses
rt_raw = torch.Tensor(self.latest_vars['rt_raw']).to(device)
rt_raw[:,:3,3] = rt_raw[:,:3,3] / self.obj_scale
else:
rt_raw = self.create_base_se3(self.num_fr, device)
# compute mlp rts
if opts.root_opt:
if self.root_basis == 'mlp' or self.root_basis == 'exp'\
or self.root_basis == 'expmlp':
root_rts = self.nerf_root_rts(frameid)
else: print('error'); exit()
rt_raw = self.refine_rt(rt_raw, root_rts)
return rt_raw
def save_latest_vars(self):
"""
in: self.
{rtk, kaug, frameid, vis2d}
out: self.
{latest_vars}
these are only used in get_near_far_plane and compute_visibility
"""
rtk = self.rtk.clone().detach()
Kmat = K2mat(rtk[:,3])
Kaug = K2inv(self.kaug) # p = Kaug Kmat P
rtk[:,3] = mat2K(Kaug.matmul(Kmat))
# TODO don't want to save k at eval time (due to different intrinsics)
self.latest_vars['rtk'][self.frameid.long()] = rtk.cpu().numpy()
self.latest_vars['rt_raw'][self.frameid.long()] = self.rt_raw.cpu().numpy()
self.latest_vars['idk'][self.frameid.long()] = 1
def set_input(self, batch, load_line=False):
device = self.device
opts = self.opts
if load_line:
self.convert_line_input(batch)
else:
self.convert_batch_input(batch)
bs = self.imgs.shape[0]
self.convert_root_pose()
self.save_latest_vars()
if opts.lineload and self.training:
self.dp_feats = self.dp_feats
else:
self.dp_feats = resample_dp(self.dp_feats,
self.dp_bbox, self.kaug, self.img_size)
if self.training and self.opts.anneal_freq:
alpha = self.num_freqs * \
self.progress / (opts.warmup_steps)
#if alpha>self.alpha.data[0]:
self.alpha.data[0] = min(max(6, alpha),self.num_freqs) # alpha from 6 to 10
self.embedding_xyz.alpha = self.alpha.data[0]
self.embedding_dir.alpha = self.alpha.data[0]
return bs
|
banmo-main
|
nnutils/banmo.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import cv2, pdb, os, sys, numpy as np, torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
curr_dir = os.path.abspath(os.getcwd())
sys.path.insert(0, curr_dir)
detbase = './third_party/detectron2/'
sys.path.insert(0, '%s/projects/DensePose/' % detbase)
from detectron2.structures import Boxes
from detectron2.config import get_cfg
from detectron2.modeling import build_model
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.structures import Boxes
from densepose import add_densepose_config
from densepose.modeling.cse.utils import squared_euclidean_distance_matrix
from utils.cselib import create_cse, run_cse
class CSENet(nn.Module):
def __init__(self, ishuman):
super(CSENet, self).__init__()
if ishuman:
config_path = '%s/projects/DensePose/configs/cse/densepose_rcnn_R_101_FPN_DL_soft_s1x.yaml' % detbase
weight_path = 'https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_101_FPN_DL_soft_s1x/250713061/model_final_1d3314.pkl'
self.mesh_name = 'smpl_27554'
else:
config_path = '%s/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_CA_finetune_4k.yaml' % detbase
weight_path = 'https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_50_FPN_soft_animals_CA_finetune_4k/253498611/model_final_6d69b7.pkl'
self.mesh_name = 'sheep_5004'
self.net, self.embedder, self.mesh_vertex_embeddings = create_cse(config_path, weight_path)
def forward(self, img, msk):
bs = img.shape[0]
h = img.shape[2]
device = img.device
img = img * 255
img = torch.flip(img, [1])
pad = h
img = F.pad(img, (pad, pad, pad, pad))
msk = F.pad(msk, (pad, pad, pad, pad))
img = F.interpolate(img, (384, 384),mode='bilinear')
msk = F.interpolate(msk[:,None], (384, 384),mode='nearest')[:,0]
bboxes = []
for i in range(bs):
indices = torch.where(msk[i]>0);
xid = indices[1]; yid = indices[0]
bbox = [xid.min(), yid.min(),
xid.max(), yid.max()]
bbox = torch.Tensor([bbox]).to(device)
bbox = Boxes(bbox)
bboxes.append(bbox)
#dps = []
#feats = []
#for i in range(bs):
# img_sub = img[i].permute(1, 2, 0).cpu().numpy()
# msk_sub = msk[i].cpu().numpy()
# # put into a bigger image: out size 112/512
# dp, img_bgr, feat, feat_norm, bbox = run_cse((self.net), (self.embedder), (self.mesh_vertex_embeddings),
# img_sub,
# msk_sub,
# mesh_name=(self.mesh_name))
# pdb.set_trace()
# dp = torch.Tensor(dp).to(device)
# feat = torch.Tensor(feat).to(device)
# dps.append(dp)
# feats.append(feat)
#dps = torch.stack(dps, 0)
#feats = torch.stack(feats, 0)
#pdb.set_trace()
self.net.eval()
with torch.no_grad():
img = torch.stack([(x - self.net.pixel_mean) / self.net.pixel_std\
for x in img])
features = self.net.backbone(img)
features = [features[f] for f in self.net.roi_heads.in_features]
features = [self.net.roi_heads.decoder(features)]
features_dp = self.net.roi_heads.densepose_pooler(features, bboxes).detach()
densepose_head_outputs = self.net.roi_heads.densepose_head(features_dp)
densepose_predictor_outputs = self.net.roi_heads.densepose_predictor(densepose_head_outputs)
feats = densepose_predictor_outputs.embedding # (xxx,112,112)
with torch.no_grad():
dps = []
for i in range(bs):
assign_mat = squared_euclidean_distance_matrix(feats[i].view(16,-1).T,
self.mesh_vertex_embeddings[self.mesh_name])
dp = assign_mat.argmin(dim=1).view(112,112)
dps.append(dp)
dps = torch.stack(dps,0)
return feats, dps
|
banmo-main
|
nnutils/cse.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import torch
def image_grid(img, row, col):
"""
img: N,h,w,x
collage: 1,.., x
"""
bs,h,w,c=img.shape
device = img.device
collage = torch.zeros(h*row, w*col, c).to(device)
for i in range(row):
for j in range(col):
collage[i*h:(i+1)*h,j*w:(j+1)*w] = img[i*col+j]
return collage
|
banmo-main
|
nnutils/vis_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import pdb
import time
import cv2
import numpy as np
import trimesh
from pytorch3d import transforms
import torch
import torch.nn as nn
import torch.nn.functional as F
from scipy.spatial.transform import Rotation as R
import sys
sys.path.insert(0, 'third_party')
from ext_utils.flowlib import warp_flow, cat_imgflo
def evaluate_mlp(model, xyz_embedded, embed_xyz=None, dir_embedded=None,
chunk=32*1024,
xyz=None,
code=None, sigma_only=False):
"""
embed_xyz: embedding function
chunk is the point-level chunk divided by number of bins
"""
B,nbins,_ = xyz_embedded.shape
out_chunks = []
for i in range(0, B, chunk):
embedded = xyz_embedded[i:i+chunk]
if embed_xyz is not None:
embedded = embed_xyz(embedded)
if dir_embedded is not None:
embedded = torch.cat([embedded,
dir_embedded[i:i+chunk]], -1)
if code is not None:
code_chunk = code[i:i+chunk]
if code_chunk.dim() == 2:
code_chunk = code_chunk[:,None]
code_chunk = code_chunk.repeat(1,nbins,1)
embedded = torch.cat([embedded,code_chunk], -1)
if xyz is not None:
xyz_chunk = xyz[i:i+chunk]
else: xyz_chunk = None
out_chunks += [model(embedded, sigma_only=sigma_only, xyz=xyz_chunk)]
out = torch.cat(out_chunks, 0)
return out
def bone_transform(bones_in, rts, is_vec=False):
"""
bones_in: 1,B,10 - B gaussian ellipsoids of bone coordinates
rts: ...,B,3,4 - B ririd transforms
rts are applied to bone coordinate transforms (left multiply)
is_vec: whether rts are stored as r1...9,t1...3 vector form
"""
B = bones_in.shape[-2]
bones = bones_in.view(-1,B,10).clone()
if is_vec:
rts = rts.view(-1,B,12)
else:
rts = rts.view(-1,B,3,4)
bs = rts.shape[0]
center = bones[:,:,:3]
orient = bones[:,:,3:7] # real first
scale = bones[:,:,7:10]
if is_vec:
Rmat = rts[:,:,:9].view(-1,B,3,3)
Tmat = rts[:,:,9:12].view(-1,B,3,1)
else:
Rmat = rts[:,:,:3,:3]
Tmat = rts[:,:,:3,3:4]
# move bone coordinates (left multiply)
center = Rmat.matmul(center[...,None])[...,0]+Tmat[...,0]
Rquat = transforms.matrix_to_quaternion(Rmat)
orient = transforms.quaternion_multiply(Rquat, orient)
scale = scale.repeat(bs,1,1)
bones = torch.cat([center,orient,scale],-1)
return bones
def rtmat_invert(Rmat, Tmat):
"""
Rmat: ...,3,3 - rotations
Tmat: ...,3 - translations
"""
rts = torch.cat([Rmat, Tmat[...,None]],-1)
rts_i = rts_invert(rts)
Rmat_i = rts_i[...,:3,:3] # bs, B, 3,3
Tmat_i = rts_i[...,:3,3]
return Rmat_i, Tmat_i
def rtk_invert(rtk_in, B):
"""
rtk_in: ... (rot 1...9, trans 1...3)
"""
rtk_shape = rtk_in.shape
rtk_in = rtk_in.view(-1,B,12)# B,12
rmat=rtk_in[:,:,:9]
rmat=rmat.view(-1,B,3,3)
tmat= rtk_in[:,:,9:12]
rts_fw = torch.cat([rmat,tmat[...,None]],-1)
rts_fw = rts_fw.view(-1,B,3,4)
rts_bw = rts_invert(rts_fw)
rvec = rts_bw[...,:3,:3].reshape(-1,9)
tvec = rts_bw[...,:3,3] .reshape(-1,3)
rtk = torch.cat([rvec,tvec],-1).view(rtk_shape)
return rtk
def rts_invert(rts_in):
"""
rts: ...,3,4 - B ririd transforms
"""
rts = rts_in.view(-1,3,4).clone()
Rmat = rts[:,:3,:3] # bs, B, 3,3
Tmat = rts[:,:3,3:]
Rmat_i=Rmat.permute(0,2,1)
Tmat_i=-Rmat_i.matmul(Tmat)
rts_i = torch.cat([Rmat_i, Tmat_i],-1)
rts_i = rts_i.view(rts_in.shape)
return rts_i
def rtk_to_4x4(rtk):
"""
rtk: ...,12
"""
device = rtk.device
bs = rtk.shape[0]
zero_one = torch.Tensor([[0,0,0,1]]).to(device).repeat(bs,1)
rmat=rtk[:,:9]
rmat=rmat.view(-1,3,3)
tmat=rtk[:,9:12]
rts = torch.cat([rmat,tmat[...,None]],-1)
rts = torch.cat([rts,zero_one[:,None]],1)
return rts
def rtk_compose(rtk1, rtk2):
"""
rtk ...
"""
rtk_shape = rtk1.shape
rtk1 = rtk1.view(-1,12)# ...,12
rtk2 = rtk2.view(-1,12)# ...,12
rts1 = rtk_to_4x4(rtk1)
rts2 = rtk_to_4x4(rtk2)
rts = rts1.matmul(rts2)
rvec = rts[...,:3,:3].reshape(-1,9)
tvec = rts[...,:3,3].reshape(-1,3)
rtk = torch.cat([rvec,tvec],-1).view(rtk_shape)
return rtk
def vec_to_sim3(vec):
"""
vec: ...,10
center: ...,3
orient: ...,3,3
scale: ...,3
"""
center = vec[...,:3]
orient = vec[...,3:7] # real first
orient = F.normalize(orient, 2,-1)
orient = transforms.quaternion_to_matrix(orient) # real first
scale = vec[...,7:10].exp()
return center, orient, scale
def gauss_mlp_skinning(xyz, embedding_xyz, bones,
pose_code, nerf_skin, skin_aux=None):
"""
xyz: N_rays, ndepth, 3
bones: ... nbones, 10
pose_code: ...,1, nchannel
"""
N_rays = xyz.shape[0]
#TODO hacky way to make code compaitible with noqueryfw
if pose_code.dim() == 2 and pose_code.shape[0]!=N_rays:
pose_code = pose_code[None].repeat(N_rays, 1,1)
xyz_embedded = embedding_xyz(xyz)
dskin = mlp_skinning(nerf_skin, pose_code, xyz_embedded)
skin = skinning(bones, xyz, dskin, skin_aux=skin_aux) # bs, N, B
return skin
def mlp_skinning(mlp, code, pts_embed):
"""
code: bs, D - N D-dimensional pose code
pts_embed: bs,N,x - N point positional embeddings
dskin: bs,N,B - delta skinning matrix
"""
if mlp is None:
dskin = None
else:
dskin = evaluate_mlp(mlp, pts_embed, code=code, chunk=8*1024)
return dskin
def axis_rotate(orient, mdis):
bs,N,B,_,_ = mdis.shape
mdis = (orient * mdis.view(bs,N,B,1,3)).sum(4)[...,None] # faster
#mdis = orient.matmul(mdis) # bs,N,B,3,1 # slower
return mdis
def skinning_chunk(bones, pts, dskin=None, skin_aux=None):
#def skinning(bones, pts, dskin=None, skin_aux=None):
"""
bone: bs,B,10 - B gaussian ellipsoids
pts: bs,N,3 - N 3d points, usually N=num points per ray, b~=2034
skin: bs,N,B - skinning matrix
"""
device = pts.device
log_scale= skin_aux[0]
w_const = skin_aux[1]
bs,N,_ = pts.shape
B = bones.shape[-2]
if bones.dim()==2: bones = bones[None].repeat(bs,1,1)
bones = bones.view(-1,B,10)
center, orient, scale = vec_to_sim3(bones)
orient = orient.permute(0,1,3,2) # transpose R
# mahalanobis distance [(p-v)^TR^T]S[R(p-v)]
# transform a vector to the local coordinate
mdis = center.view(bs,1,B,3) - pts.view(bs,N,1,3) # bs,N,B,3
mdis = axis_rotate(orient.view(bs,1,B,3,3), mdis[...,None])
mdis = mdis[...,0]
mdis = scale.view(bs,1,B,3) * mdis.pow(2)
# log_scale (being optimized) controls temporature of the skinning weight softmax
# multiply 1000 to make the weights more concentrated initially
inv_temperature = 1000 * log_scale.exp()
mdis = (-inv_temperature * mdis.sum(3)) # bs,N,B
if dskin is not None:
mdis = mdis+dskin
skin = mdis.softmax(2)
return skin
def skinning(bones, pts, dskin=None, skin_aux=None):
"""
bone: ...,B,10 - B gaussian ellipsoids
pts: bs,N,3 - N 3d points
skin: bs,N,B - skinning matrix
"""
chunk=4096
bs,N,_ = pts.shape
B = bones.shape[-2]
if bones.dim()==2: bones = bones[None].repeat(bs,1,1)
bones = bones.view(-1,B,10)
skin = []
for i in range(0,bs,chunk):
if dskin is None:
dskin_chunk = None
else:
dskin_chunk = dskin[i:i+chunk]
skin_chunk = skinning_chunk(bones[i:i+chunk], pts[i:i+chunk], \
dskin=dskin_chunk, skin_aux=skin_aux)
skin.append( skin_chunk )
skin = torch.cat(skin,0)
return skin
def blend_skinning_chunk(bones, rts, skin, pts):
#def blend_skinning(bones, rts, skin, pts):
"""
bone: bs,B,10 - B gaussian ellipsoids
rts: bs,B,3,4 - B ririd transforms, applied to bone coordinates (points attached to bones in world coords)
pts: bs,N,3 - N 3d points
skin: bs,N,B - skinning matrix
apply rts to bone coordinates, while computing blending globally
"""
B = rts.shape[-3]
N = pts.shape[-2]
pts = pts.view(-1,N,3)
rts = rts.view(-1,B,3,4)
Rmat = rts[:,:,:3,:3] # bs, B, 3,3
Tmat = rts[:,:,:3,3]
device = Tmat.device
## convert from bone to root transforms
#bones = bones.view(-1,B,10)
#bs = Rmat.shape[0]
#center = bones[:,:,:3]
#orient = bones[:,:,3:7] # real first
#orient = F.normalize(orient, 2,-1)
#orient = transforms.quaternion_to_matrix(orient) # real first
#gmat = torch.eye(4)[None,None].repeat(bs, B, 1, 1).to(device)
#
## root to bone
#gmat_r2b = gmat.clone()
#gmat_r2b[:,:,:3,:3] = orient.permute(0,1,3,2)
#gmat_r2b[:,:,:3,3] = -orient.permute(0,1,3,2).matmul(center[...,None])[...,0]
## bone to root
#gmat_b2r = gmat.clone()
#gmat_b2r[:,:,:3,:3] = orient
#gmat_b2r[:,:,:3,3] = center
## bone to bone
#gmat_b = gmat.clone()
#gmat_b[:,:,:3,:3] = Rmat
#gmat_b[:,:,:3,3] = Tmat
#gmat = gmat_b2r.matmul(gmat_b.matmul(gmat_r2b))
#Rmat = gmat[:,:,:3,:3]
#Tmat = gmat[:,:,:3,3]
# Gi=sum(wbGb), V=RV+T
Rmat_w = (skin[...,None,None] * Rmat[:,None]).sum(2) # bs,N,B,3
Tmat_w = (skin[...,None] * Tmat[:,None]).sum(2) # bs,N,B,3
pts = Rmat_w.matmul(pts[...,None]) + Tmat_w[...,None]
pts = pts[...,0]
return pts
def blend_skinning(bones, rts, skin, pts):
"""
bone: bs,B,10 - B gaussian ellipsoids
rts: bs,B,3,4 - B ririd transforms, applied to bone coordinates
pts: bs,N,3 - N 3d points
skin: bs,N,B - skinning matrix
apply rts to bone coordinates, while computing blending globally
"""
chunk=4096
B = rts.shape[-3]
N = pts.shape[-2]
bones = bones.view(-1,B,10)
pts = pts.view(-1,N,3)
rts = rts.view(-1,B,3,4)
bs = pts.shape[0]
pts_out = []
for i in range(0,bs,chunk):
pts_chunk = blend_skinning_chunk(bones[i:i+chunk], rts[i:i+chunk],
skin[i:i+chunk], pts[i:i+chunk])
pts_out.append(pts_chunk)
pts = torch.cat(pts_out,0)
return pts
def lbs(bones, rts_fw, skin, xyz_in, backward=True):
"""
bones: bs,B,10 - B gaussian ellipsoids indicating rest bone coordinates
rts_fw: bs,B,12 - B rigid transforms, applied to the rest bones
xyz_in: bs,N,3 - N 3d points after transforms in the root coordinates
"""
B = bones.shape[-2]
N = xyz_in.shape[-2]
bs = rts_fw.shape[0]
bones = bones.view(-1,B,10)
xyz_in = xyz_in.view(-1,N,3)
rts_fw = rts_fw.view(-1,B,12)# B,12
rmat=rts_fw[:,:,:9]
rmat=rmat.view(bs,B,3,3)
tmat= rts_fw[:,:,9:12]
rts_fw = torch.cat([rmat,tmat[...,None]],-1)
rts_fw = rts_fw.view(-1,B,3,4)
if backward:
bones_dfm = bone_transform(bones, rts_fw) # bone coordinates after deform
rts_bw = rts_invert(rts_fw)
xyz = blend_skinning(bones_dfm, rts_bw, skin, xyz_in)
else:
xyz = blend_skinning(bones.repeat(bs,1,1), rts_fw, skin, xyz_in)
bones_dfm = bone_transform(bones, rts_fw) # bone coordinates after deform
return xyz, bones_dfm
def obj_to_cam(in_verts, Rmat, Tmat):
"""
verts: ...,N,3
Rmat: ...,3,3
Tmat: ...,3
"""
verts = in_verts.clone()
if verts.dim()==2: verts=verts[None]
verts = verts.view(-1,verts.shape[1],3)
Rmat = Rmat.view(-1,3,3).permute(0,2,1) # left multiply
Tmat = Tmat.view(-1,1,3)
verts = verts.matmul(Rmat) + Tmat
verts = verts.reshape(in_verts.shape)
return verts
def obj2cam_np(pts, Rmat, Tmat):
"""
a wrapper for numpy array
pts: ..., 3
Rmat: 1,3,3
Tmat: 1,3,3
"""
pts_shape = pts.shape
pts = torch.Tensor(pts).cuda().reshape(1,-1,3)
pts = obj_to_cam(pts, Rmat,Tmat)
return pts.view(pts_shape).cpu().numpy()
def K2mat(K):
"""
K: ...,4
"""
K = K.view(-1,4)
device = K.device
bs = K.shape[0]
Kmat = torch.zeros(bs, 3, 3, device=device)
Kmat[:,0,0] = K[:,0]
Kmat[:,1,1] = K[:,1]
Kmat[:,0,2] = K[:,2]
Kmat[:,1,2] = K[:,3]
Kmat[:,2,2] = 1
return Kmat
def mat2K(Kmat):
"""
Kmat: ...,3,3
"""
shape=Kmat.shape[:-2]
Kmat = Kmat.view(-1,3,3)
device = Kmat.device
bs = Kmat.shape[0]
K = torch.zeros(bs, 4, device=device)
K[:,0] = Kmat[:,0,0]
K[:,1] = Kmat[:,1,1]
K[:,2] = Kmat[:,0,2]
K[:,3] = Kmat[:,1,2]
K = K.view(shape+(4,))
return K
def Kmatinv(Kmat):
"""
Kmat: ...,3,3
"""
K = mat2K(Kmat)
Kmatinv = K2inv(K)
Kmatinv = Kmatinv.view(Kmat.shape)
return Kmatinv
def K2inv(K):
"""
K: ...,4
"""
K = K.view(-1,4)
device = K.device
bs = K.shape[0]
Kmat = torch.zeros(bs, 3, 3, device=device)
Kmat[:,0,0] = 1./K[:,0]
Kmat[:,1,1] = 1./K[:,1]
Kmat[:,0,2] = -K[:,2]/K[:,0]
Kmat[:,1,2] = -K[:,3]/K[:,1]
Kmat[:,2,2] = 1
return Kmat
def pinhole_cam(in_verts, K):
"""
in_verts: ...,N,3
K: ...,4
verts: ...,N,3 in (x,y,Z)
"""
verts = in_verts.clone()
verts = verts.view(-1,verts.shape[1],3)
K = K.view(-1,4)
Kmat = K2mat(K)
Kmat = Kmat.permute(0,2,1)
verts = verts.matmul(Kmat)
verts_z = verts[:,:,2:3]
verts_xy = verts[:,:,:2] / (1e-6+verts_z) # deal with neg z
verts = torch.cat([verts_xy,verts_z],-1)
verts = verts.reshape(in_verts.shape)
return verts
def render_color(renderer, in_verts, faces, colors, texture_type='vertex'):
"""
verts in ndc
in_verts: ...,N,3/4
faces: ...,N,3
rendered: ...,4,...
"""
import soft_renderer as sr
verts = in_verts.clone()
verts = verts.view(-1,verts.shape[-2],3)
faces = faces.view(-1,faces.shape[-2],3)
if texture_type=='vertex': colors = colors.view(-1,colors.shape[-2],3)
elif texture_type=='surface': colors = colors.view(-1,colors.shape[1],colors.shape[2],3)
device=verts.device
offset = torch.Tensor( renderer.transform.transformer._eye).to(device)[np.newaxis,np.newaxis]
verts_pre = verts[:,:,:3]-offset
verts_pre[:,:,1] = -1*verts_pre[:,:,1] # pre-flip
rendered = renderer.render_mesh(sr.Mesh(verts_pre,faces,textures=colors,texture_type=texture_type))
return rendered
def render_flow(renderer, verts, faces, verts_n):
"""
rasterization
verts in ndc
verts: ...,N,3/4
verts_n: ...,N,3/4
faces: ...,N,3
"""
verts = verts.view(-1,verts.shape[1],3)
verts_n = verts_n.view(-1,verts_n.shape[1],3)
faces = faces.view(-1,faces.shape[1],3)
device=verts.device
rendered_ndc_n = render_color(renderer, verts, faces, verts_n)
_,_,h,w = rendered_ndc_n.shape
rendered_sil = rendered_ndc_n[:,-1]
ndc = np.meshgrid(range(w), range(h))
ndc = torch.Tensor(ndc).to(device)[None]
ndc[:,0] = ndc[:,0]*2 / (w-1) - 1
ndc[:,1] = ndc[:,1]*2 / (h-1) - 1
flow = rendered_ndc_n[:,:2] - ndc
flow = flow.permute(0,2,3,1) # x,h,w,2
flow = torch.cat([flow, rendered_sil[...,None]],-1)
flow[rendered_sil<1]=0.
flow[...,-1]=0. # discard the last channel
return flow
def force_type(varlist):
for i in range(len(varlist)):
varlist[i] = varlist[i].type(varlist[0].dtype)
return varlist
def tensor2array(tdict):
adict={}
for k,v in tdict.items():
adict[k] = v.detach().cpu().numpy()
return adict
def array2tensor(adict, device='cpu'):
tdict={}
for k,v in adict.items():
try:
tdict[k] = torch.Tensor(v)
if device != 'cpu': tdict[k] = tdict[k].to(device)
except: pass # trimesh object
return tdict
def raycast(xys, Rmat, Tmat, Kinv, near_far):
"""
assuming xys and Rmat have same num of bs
xys: bs, N, 3
Rmat:bs, ...,3,3
Tmat:bs, ...,3, camera to root coord transform
Kinv:bs, ...,3,3
near_far:bs,2
"""
Rmat, Tmat, Kinv, xys = force_type([Rmat, Tmat, Kinv, xys])
Rmat = Rmat.view(-1,3,3)
Tmat = Tmat.view(-1,1,3)
Kinv = Kinv.view(-1,3,3)
bs,nsample,_ = xys.shape
device = Rmat.device
xy1s = torch.cat([xys, torch.ones_like(xys[:,:,:1])],2)
xyz3d = xy1s.matmul(Kinv.permute(0,2,1))
ray_directions = xyz3d.matmul(Rmat) # transpose -> right multiply
ray_origins = -Tmat.matmul(Rmat) # transpose -> right multiply
if near_far is not None:
znear= (torch.ones(bs,nsample,1).to(device) * near_far[:,0,None,None])
zfar = (torch.ones(bs,nsample,1).to(device) * near_far[:,1,None,None])
else:
lbound, ubound=[-1.5,1.5]
znear= Tmat[:,:,-1:].repeat(1,nsample,1)+lbound
zfar = Tmat[:,:,-1:].repeat(1,nsample,1)+ubound
znear[znear<1e-5]=1e-5
ray_origins = ray_origins.repeat(1,nsample,1)
rmat_vec = Rmat.reshape(-1,1,9)
tmat_vec = Tmat.reshape(-1,1,3)
kinv_vec = Kinv.reshape(-1,1,9)
rtk_vec = torch.cat([rmat_vec, tmat_vec, kinv_vec],-1) # x,21
rtk_vec = rtk_vec.repeat(1,nsample,1)
rays={'rays_o': ray_origins,
'rays_d': ray_directions,
'near': znear,
'far': zfar,
'rtk_vec': rtk_vec,
'xys': xys,
'nsample': nsample,
'bs': bs,
}
return rays
def sample_xy(img_size, bs, nsample, device, return_all=False, lineid=None):
"""
rand_inds: bs, ns
xys: bs, ns, 2
"""
xygrid = np.meshgrid(range(img_size), range(img_size)) # w,h->hxw
xygrid = torch.Tensor(xygrid).to(device) # (x,y)
xygrid = xygrid.permute(1,2,0).reshape(1,-1,2) # 1,..., 2
if return_all:
xygrid = xygrid.repeat(bs,1,1) # bs,..., 2
nsample = xygrid.shape[1]
rand_inds=torch.Tensor(range(nsample))
rand_inds=rand_inds[None].repeat(bs,1)
xys = xygrid
else:
if lineid is None:
probs = torch.ones(img_size**2).to(device) # 512*512 vs 128*64
rand_inds = torch.multinomial(probs, bs*nsample, replacement=False)
rand_inds = rand_inds.view(bs,nsample)
xys = torch.stack([xygrid[0][rand_inds[i]] for i in range(bs)],0) # bs,ns,2
else:
probs = torch.ones(img_size).to(device) # 512*512 vs 128*64
rand_inds = torch.multinomial(probs, bs*nsample, replacement=True)
rand_inds = rand_inds.view(bs,nsample)
xys = torch.stack([xygrid[0][rand_inds[i]] for i in range(bs)],0) # bs,ns,2
xys[...,1] = xys[...,1] + lineid[:,None]
rand_inds = rand_inds.long()
return rand_inds, xys
def chunk_rays(rays,start,delta):
"""
rays: a dictionary
"""
rays_chunk = {}
for k,v in rays.items():
if torch.is_tensor(v):
v = v.view(-1, v.shape[-1])
rays_chunk[k] = v[start:start+delta]
return rays_chunk
def generate_bones(num_bones_x, num_bones, bound, device):
"""
num_bones_x: bones along one direction
bones: x**3,9
"""
center = torch.linspace(-bound, bound, num_bones_x).to(device)
center =torch.meshgrid(center, center, center)
center = torch.stack(center,0).permute(1,2,3,0).reshape(-1,3)
center = center[:num_bones]
orient = torch.Tensor([[1,0,0,0]]).to(device)
orient = orient.repeat(num_bones,1)
scale = torch.zeros(num_bones,3).to(device)
bones = torch.cat([center, orient, scale],-1)
return bones
def reinit_bones(model, mesh, num_bones):
"""
update the data of bones and nerf_body_rts[1].rgb without add new parameters
num_bones: number of bones on the surface
mesh: trimesh
warning: ddp does not support adding/deleting parameters after construction
"""
#TODO find another way to add/delete bones
from kmeans_pytorch import kmeans
device = model.device
points = torch.Tensor(mesh.vertices).to(device)
rthead = model.nerf_body_rts[1].rgb
# reinit
num_in = rthead[0].weight.shape[1]
rthead = nn.Sequential(nn.Linear(num_in, 6*num_bones)).to(device)
torch.nn.init.xavier_uniform_(rthead[0].weight, gain=0.5)
torch.nn.init.zeros_(rthead[0].bias)
if points.shape[0]<100:
bound = model.latest_vars['obj_bound']
bound = torch.Tensor(bound)[None]
center = torch.rand(num_bones, 3) * bound*2 - bound
else:
_, center = kmeans(X=points, num_clusters=num_bones, iter_limit=100,
tqdm_flag=False, distance='euclidean', device=device)
center=center.to(device)
orient = torch.Tensor([[1,0,0,0]]).to(device)
orient = orient.repeat(num_bones,1)
scale = torch.zeros(num_bones,3).to(device)
bones = torch.cat([center, orient, scale],-1)
model.num_bones = num_bones
num_output = model.nerf_body_rts[1].num_output
bias_reinit = rthead[0].bias.data
weight_reinit=rthead[0].weight.data
model.nerf_body_rts[1].rgb[0].bias.data[:num_bones*num_output] = bias_reinit
model.nerf_body_rts[1].rgb[0].weight.data[:num_bones*num_output] = weight_reinit
bones,_ = correct_bones(model, bones, inverse=True)
model.bones.data[:num_bones] = bones
model.nerf_models['bones'] = model.bones
return
def correct_bones(model, bones_rst, inverse=False):
# bones=>bones_rst
bones_rst = bones_rst.clone()
rest_pose_code = model.rest_pose_code
rest_pose_code = rest_pose_code(torch.Tensor([0]).long().to(model.device))
rts_head = model.nerf_body_rts[1]
bone_rts_rst = rts_head(rest_pose_code)[0] # 1,B*12
if inverse:
bone_rts_rst = rtk_invert(bone_rts_rst, model.opts.num_bones)
bones_rst = bone_transform(bones_rst, bone_rts_rst, is_vec=True)[0]
return bones_rst, bone_rts_rst
def correct_rest_pose(opts, bone_rts_fw, bone_rts_rst):
# delta rts
bone_rts_fw = bone_rts_fw.clone()
rts_shape = bone_rts_fw.shape
bone_rts_rst_inv = rtk_invert(bone_rts_rst, opts.num_bones)
bone_rts_rst_inv = bone_rts_rst_inv.repeat(rts_shape[0],rts_shape[1],1)
bone_rts_fw = rtk_compose(bone_rts_rst_inv, bone_rts_fw)
return bone_rts_fw
def warp_bw(opts, model, rt_dict, query_xyz_chunk, embedid):
"""
only used in mesh extraction
embedid: embedding id
"""
chunk = query_xyz_chunk.shape[0]
query_time = torch.ones(chunk,1).to(model.device)*embedid
query_time = query_time.long()
if opts.flowbw:
# flowbw
xyz_embedded = model.embedding_xyz(query_xyz_chunk)
time_embedded = model.pose_code(query_time)[:,0]
xyztime_embedded = torch.cat([xyz_embedded, time_embedded],1)
flowbw_chunk = model.nerf_flowbw(xyztime_embedded, xyz=query_xyz_chunk)
query_xyz_chunk += flowbw_chunk
elif opts.lbs:
# backward skinning
bones_rst = model.bones
bone_rts_fw = model.nerf_body_rts(query_time)
# update bones
bones_rst, bone_rts_rst = correct_bones(model, bones_rst)
bone_rts_fw = correct_rest_pose(opts, bone_rts_fw, bone_rts_rst)
query_xyz_chunk = query_xyz_chunk[:,None]
if opts.nerf_skin:
nerf_skin = model.nerf_skin
else:
nerf_skin = None
time_embedded = model.pose_code(query_time)
bones_dfm = bone_transform(bones_rst, bone_rts_fw, is_vec=True)
skin_backward = gauss_mlp_skinning(query_xyz_chunk, model.embedding_xyz,
bones_dfm, time_embedded, nerf_skin, skin_aux=model.skin_aux )
query_xyz_chunk,bones_dfm = lbs(bones_rst,
bone_rts_fw,
skin_backward,
query_xyz_chunk)
query_xyz_chunk = query_xyz_chunk[:,0]
rt_dict['bones'] = bones_dfm
return query_xyz_chunk, rt_dict
def warp_fw(opts, model, rt_dict, vertices, embedid):
"""
only used in mesh extraction
"""
num_pts = vertices.shape[0]
query_time = torch.ones(num_pts,1).long().to(model.device)*embedid
pts_can=torch.Tensor(vertices).to(model.device)
if opts.flowbw:
# forward flow
pts_can_embedded = model.embedding_xyz(pts_can)
time_embedded = model.pose_code(query_time)[:,0]
ptstime_embedded = torch.cat([pts_can_embedded, time_embedded],1)
pts_dfm = pts_can + model.nerf_flowfw(ptstime_embedded, xyz=pts_can)
elif opts.lbs:
# forward skinning
pts_can = pts_can[:,None]
bones_rst = model.bones
bone_rts_fw = model.nerf_body_rts(query_time)
bones_rst, bone_rts_rst = correct_bones(model, bones_rst)
bone_rts_fw = correct_rest_pose(opts, bone_rts_fw, bone_rts_rst)
if opts.nerf_skin:
nerf_skin = model.nerf_skin
else:
nerf_skin = None
rest_pose_code = model.rest_pose_code
rest_pose_code = rest_pose_code(torch.Tensor([0]).long().to(bones_rst.device))
skin_forward = gauss_mlp_skinning(pts_can, model.embedding_xyz, bones_rst,
rest_pose_code, nerf_skin, skin_aux=model.skin_aux)
pts_dfm,bones_dfm = lbs(bones_rst, bone_rts_fw, skin_forward,
pts_can,backward=False)
pts_dfm = pts_dfm[:,0]
rt_dict['bones'] = bones_dfm
vertices = pts_dfm.cpu().numpy()
return vertices, rt_dict
def canonical2ndc(model, dp_canonical_pts, rtk, kaug, embedid):
"""
dp_canonical_pts: 5004,3, pts in the canonical space of each video
dp_px: bs, 5004, 3
"""
Rmat = rtk[:,:3,:3]
Tmat = rtk[:,:3,3]
Kmat = K2mat(rtk[:,3,:])
Kaug = K2inv(kaug) # p = Kaug Kmat P
Kinv = Kmatinv(Kaug.matmul(Kmat))
K = mat2K(Kmatinv(Kinv))
bs = Kinv.shape[0]
npts = dp_canonical_pts.shape[0]
# projection
dp_canonical_pts = dp_canonical_pts[None]
if model.opts.flowbw:
time_embedded = model.pose_code(embedid)
time_embedded = time_embedded.repeat(1,npts, 1)
dp_canonical_embedded = model.embedding_xyz(dp_canonical_pts)[None]
dp_canonical_embedded = dp_canonical_embedded.repeat(bs,1,1)
dp_canonical_embedded = torch.cat([dp_canonical_embedded, time_embedded], -1)
dp_deformed_flo = model.nerf_flowfw(dp_canonical_embedded, xyz=dp_canonical_pts)
dp_deformed_pts = dp_canonical_pts + dp_deformed_flo
else:
dp_deformed_pts = dp_canonical_pts.repeat(bs,1,1)
dp_cam_pts = obj_to_cam(dp_deformed_pts, Rmat, Tmat)
dp_px = pinhole_cam(dp_cam_pts,K)
return dp_px
def get_near_far(near_far, vars_np, tol_fac=1.2, pts=None):
"""
pts: point coordinate N,3
near_far: near and far plane M,2
rtk: object to camera transform, M,4,4
idk: indicator of obsered or not M
tol_fac tolerance factor
"""
if pts is None:
#pts = vars_np['mesh_rest'].vertices
# turn points to bounding box
pts = trimesh.bounds.corners(vars_np['mesh_rest'].bounds)
device = near_far.device
rtk = torch.Tensor(vars_np['rtk']).to(device)
idk = torch.Tensor(vars_np['idk']).to(device)
pts = pts_to_view(pts, rtk, device)
pmax = pts[...,-1].max(-1)[0]
pmin = pts[...,-1].min(-1)[0]
delta = (pmax - pmin)*(tol_fac-1)
near= pmin-delta
far = pmax+delta
near_far[idk==1,0] = torch.clamp(near[idk==1], min=1e-3)
near_far[idk==1,1] = torch.clamp( far[idk==1], min=1e-3)
return near_far
def pts_to_view(pts, rtk, device):
"""
object to camera coordinates
pts: point coordinate N,3
rtk: object to camera transform, M,4,4
idk: indicator of obsered or not M
"""
M = rtk.shape[0]
out_pts = []
chunk=100
for i in range(0,M,chunk):
rtk_sub = rtk[i:i+chunk]
pts_sub = torch.Tensor(np.tile(pts[None],
(len(rtk_sub),1,1))).to(device) # M,N,3
pts_sub = obj_to_cam(pts_sub, rtk_sub[:,:3,:3],
rtk_sub[:,:3,3])
pts_sub = pinhole_cam(pts_sub, rtk_sub[:,3])
out_pts.append(pts_sub)
out_pts = torch.cat(out_pts, 0)
return out_pts
def compute_point_visibility(pts, vars_np, device):
"""
pts: point coordinate N,3
rtk: object to camera transform, M,4,4
idk: indicator of obsered or not M
**deprecated** due to K vars_tensor['rtk'] may not be consistent
"""
vars_tensor = array2tensor(vars_np, device=device)
rtk = vars_tensor['rtk']
idk = vars_tensor['idk']
vis = vars_tensor['vis']
pts = pts_to_view(pts, rtk, device) # T, N, 3
h,w = vis.shape[1:]
vis = vis[:,None]
xy = pts[:,None,:,:2]
xy[...,0] = xy[...,0]/w*2 - 1
xy[...,1] = xy[...,1]/h*2 - 1
# grab the visibility value in the mask and sum over frames
vis = F.grid_sample(vis, xy)[:,0,0]
vis = (idk[:,None]*vis).sum(0)
vis = (vis>0).float() # at least seen in one view
return vis
def near_far_to_bound(near_far):
"""
near_far: T, 2 on cuda
bound: float
this can only be used for a single video (and for approximation)
"""
bound=(near_far[:,1]-near_far[:,0]).mean() / 2
bound = bound.detach().cpu().numpy()
return bound
def rot_angle(mat):
"""
rotation angle of rotation matrix
rmat: ..., 3,3
"""
eps=1e-4
cos = ( mat[...,0,0] + mat[...,1,1] + mat[...,2,2] - 1 )/2
cos = cos.clamp(-1+eps,1-eps)
angle = torch.acos(cos)
return angle
def match2coords(match, w_rszd):
tar_coord = torch.cat([match[:,None]%w_rszd, match[:,None]//w_rszd],-1)
tar_coord = tar_coord.float()
return tar_coord
def match2flo(match, w_rszd, img_size, warp_r, warp_t, device):
ref_coord = sample_xy(w_rszd, 1, 0, device, return_all=True)[1].view(-1,2)
ref_coord = ref_coord.matmul(warp_r[:2,:2]) + warp_r[None,:2,2]
tar_coord = match2coords(match, w_rszd)
tar_coord = tar_coord.matmul(warp_t[:2,:2]) + warp_t[None,:2,2]
flo_dp = (tar_coord - ref_coord) / img_size * 2 # [-2,2]
flo_dp = flo_dp.view(w_rszd, w_rszd, 2)
flo_dp = flo_dp.permute(2,0,1)
xygrid = sample_xy(w_rszd, 1, 0, device, return_all=True)[1] # scale to img_size
xygrid = xygrid * float(img_size/w_rszd)
warp_r_inv = Kmatinv(warp_r)
xygrid = xygrid.matmul(warp_r_inv[:2,:2]) + warp_r_inv[None,:2,2]
xygrid = xygrid / w_rszd * 2 - 1
flo_dp = F.grid_sample(flo_dp[None], xygrid.view(1,w_rszd,w_rszd,2))[0]
return flo_dp
def compute_flow_cse(cse_a,cse_b, warp_a, warp_b, img_size):
"""
compute the flow between two frames under cse feature matching
assuming two feature images have the same dimension (also rectangular)
cse: 16,h,w, feature image
flo_dp: 2,h,w
"""
_,_,w_rszd = cse_a.shape
hw_rszd = w_rszd*w_rszd
device = cse_a.device
cost = (cse_b[:,None,None] * cse_a[...,None,None]).sum(0)
_,match_a = cost.view(hw_rszd, hw_rszd).max(1)
_,match_b = cost.view(hw_rszd, hw_rszd).max(0)
flo_a = match2flo(match_a, w_rszd, img_size, warp_a, warp_b, device)
flo_b = match2flo(match_b, w_rszd, img_size, warp_b, warp_a, device)
return flo_a, flo_b
def compute_flow_geodist(dp_refr,dp_targ, geodists):
"""
compute the flow between two frames under geodesic distance matching
dps: h,w, canonical surface mapping index
geodists N,N, distance matrix
flo_dp: 2,h,w
"""
h_rszd,w_rszd = dp_refr.shape
hw_rszd = h_rszd*w_rszd
device = dp_refr.device
chunk = 1024
# match: hw**2
match = torch.zeros(hw_rszd).to(device)
for i in range(0,hw_rszd,chunk):
chunk_size = len(dp_refr.view(-1,1)[i:i+chunk] )
dp_refr_sub = dp_refr.view(-1,1)[i:i+chunk].repeat(1,hw_rszd).view(-1,1)
dp_targ_sub = dp_targ.view(1,-1) .repeat(chunk_size,1).view(-1,1)
match_sub = geodists[dp_refr_sub, dp_targ_sub]
dis_geo_sub,match_sub = match_sub.view(-1, hw_rszd).min(1)
#match_sub[dis_geo_sub>0.1] = 0
match[i:i+chunk] = match_sub
# cx,cy
tar_coord = match2coords(match, w_rszd)
ref_coord = sample_xy(w_rszd, 1, 0, device, return_all=True)[1].view(-1,2)
ref_coord = ref_coord.view(h_rszd, w_rszd, 2)
tar_coord = tar_coord.view(h_rszd, w_rszd, 2)
flo_dp = (tar_coord - ref_coord) / w_rszd * 2 # [-2,2]
match = match.view(h_rszd, w_rszd)
flo_dp[match==0] = 0
flo_dp = flo_dp.permute(2,0,1)
return flo_dp
def compute_flow_geodist_old(dp_refr,dp_targ, geodists):
"""
compute the flow between two frames under geodesic distance matching
dps: h,w, canonical surface mapping index
geodists N,N, distance matrix
flo_dp: 2,h,w
"""
h_rszd,w_rszd = dp_refr.shape
hw_rszd = h_rszd*w_rszd
device = dp_refr.device
dp_refr = dp_refr.view(-1,1).repeat(1,hw_rszd).view(-1,1)
dp_targ = dp_targ.view(1,-1).repeat(hw_rszd,1).view(-1,1)
match = geodists[dp_refr, dp_targ]
dis_geo,match = match.view(hw_rszd, hw_rszd).min(1)
#match[dis_geo>0.1] = 0
# cx,cy
tar_coord = match2coords(match, w_rszd)
ref_coord = sample_xy(w_rszd, 1, 0, device, return_all=True)[1].view(-1,2)
ref_coord = ref_coord.view(h_rszd, w_rszd, 2)
tar_coord = tar_coord.view(h_rszd, w_rszd, 2)
flo_dp = (tar_coord - ref_coord) / w_rszd * 2 # [-2,2]
match = match.view(h_rszd, w_rszd)
flo_dp[match==0] = 0
flo_dp = flo_dp.permute(2,0,1)
return flo_dp
def fb_flow_check(flo_refr, flo_targ, img_refr, img_targ, dp_thrd,
save_path=None):
"""
apply forward backward consistency check on flow fields
flo_refr: 2,h,w forward flow
flo_targ: 2,h,w backward flow
fberr: h,w forward backward error
"""
h_rszd, w_rszd = flo_refr.shape[1:]
# clean up flow
flo_refr = flo_refr.permute(1,2,0).cpu().numpy()
flo_targ = flo_targ.permute(1,2,0).cpu().numpy()
flo_refr_mask = np.linalg.norm(flo_refr,2,-1)>0 # this also removes 0 flows
flo_targ_mask = np.linalg.norm(flo_targ,2,-1)>0
flo_refr_px = flo_refr * w_rszd / 2
flo_targ_px = flo_targ * w_rszd / 2
#fb check
x0,y0 =np.meshgrid(range(w_rszd),range(h_rszd))
hp0 = np.stack([x0,y0],-1) # screen coord
flo_fb = warp_flow(hp0 + flo_targ_px, flo_refr_px) - hp0
flo_fb = 2*flo_fb/w_rszd
fberr_fw = np.linalg.norm(flo_fb, 2,-1)
fberr_fw[~flo_refr_mask] = 0
flo_bf = warp_flow(hp0 + flo_refr_px, flo_targ_px) - hp0
flo_bf = 2*flo_bf/w_rszd
fberr_bw = np.linalg.norm(flo_bf, 2,-1)
fberr_bw[~flo_targ_mask] = 0
if save_path is not None:
# vis
thrd_vis = 0.01
img_refr = F.interpolate(img_refr, (h_rszd, w_rszd), mode='bilinear')[0]
img_refr = img_refr.permute(1,2,0).cpu().numpy()[:,:,::-1]
img_targ = F.interpolate(img_targ, (h_rszd, w_rszd), mode='bilinear')[0]
img_targ = img_targ.permute(1,2,0).cpu().numpy()[:,:,::-1]
flo_refr[:,:,0] = (flo_refr[:,:,0] + 2)/2
flo_targ[:,:,0] = (flo_targ[:,:,0] - 2)/2
flo_refr[fberr_fw>thrd_vis]=0.
flo_targ[fberr_bw>thrd_vis]=0.
flo_refr[~flo_refr_mask]=0.
flo_targ[~flo_targ_mask]=0.
img = np.concatenate([img_refr, img_targ], 1)
flo = np.concatenate([flo_refr, flo_targ], 1)
imgflo = cat_imgflo(img, flo)
imgcnf = np.concatenate([fberr_fw, fberr_bw],1)
imgcnf = np.clip(imgcnf, 0, dp_thrd)*(255/dp_thrd)
imgcnf = np.repeat(imgcnf[...,None],3,-1)
imgcnf = cv2.resize(imgcnf, imgflo.shape[::-1][1:])
imgflo_cnf = np.concatenate([imgflo, imgcnf],0)
cv2.imwrite(save_path, imgflo_cnf)
return fberr_fw, fberr_bw
def mask_aug(rendered):
lb = 0.1; ub = 0.3
_,h,w=rendered.shape
if np.random.binomial(1,0.5):
sx = int(np.random.uniform(lb*w,ub*w))
sy = int(np.random.uniform(lb*h,ub*h))
cx = int(np.random.uniform(sx,w-sx))
cy = int(np.random.uniform(sy,h-sy))
feat_mean = rendered.mean(-1).mean(-1)[:,None,None]
rendered[:,cx-sx:cx+sx,cy-sy:cy+sy] = feat_mean
return rendered
def process_so3_seq(rtk_seq, vis=False, smooth=True):
"""
rtk_seq, bs, N, 13 including
{scoresx1, rotationsx9, translationsx3}
"""
from utils.io import draw_cams
scores =rtk_seq[...,0]
bs,N = scores.shape
rmat = rtk_seq[...,1:10]
tmat = rtk_seq[:,0,10:13]
rtk_raw = rtk_seq[:,0,13:29].reshape((-1,4,4))
distribution = torch.Tensor(scores).softmax(1)
entropy = (-distribution.log() * distribution).sum(1)
if vis:
# draw distribution
obj_scale = 3
cam_space = obj_scale * 0.2
tmat_raw = np.tile(rtk_raw[:,None,:3,3], (1,N,1))
scale_factor = obj_scale/tmat_raw[...,-1].mean()
tmat_raw *= scale_factor
tmat_raw = tmat_raw.reshape((bs,12,-1,3))
tmat_raw[...,-1] += np.linspace(-cam_space, cam_space,12)[None,:,None]
tmat_raw = tmat_raw.reshape((bs,-1,3))
# bs, tiltxae
all_rts = np.concatenate([rmat, tmat_raw],-1)
all_rts = np.transpose(all_rts.reshape(bs,N,4,3), [0,1,3,2])
for i in range(bs):
top_idx = scores[i].argsort()[-30:]
top_rt = all_rts[i][top_idx]
top_score = scores[i][top_idx]
top_score = (top_score - top_score.min())/(top_score.max()-top_score.min())
mesh = draw_cams(top_rt, color_list = top_score)
mesh.export('tmp/%d.obj'%(i))
if smooth:
# graph cut scores, bsxN
import pydensecrf.densecrf as dcrf
from pydensecrf.utils import unary_from_softmax
graph = dcrf.DenseCRF2D(bs, 1, N) # width, height, nlabels
unary = unary_from_softmax(distribution.numpy().T.copy())
graph.setUnaryEnergy(unary)
grid = rmat[0].reshape((N,3,3))
drot = np.matmul(grid[None], np.transpose(grid[:,None], (0,1,3,2)))
drot = rot_angle(torch.Tensor(drot))
compat = (-2*(drot).pow(2)).exp()*10
compat = compat.numpy()
graph.addPairwiseGaussian(sxy=10, compat=compat)
Q = graph.inference(100)
scores = np.asarray(Q).T
# argmax
idx_max = scores.argmax(-1)
rmat = rmat[0][idx_max]
rmat = rmat.reshape((-1,9))
rts = np.concatenate([rmat, tmat],-1)
rts = rts.reshape((bs,1,-1))
# post-process se3
root_rmat = rts[:,0,:9].reshape((-1,3,3))
root_tmat = rts[:,0,9:12]
rmat = rtk_raw[:,:3,:3]
tmat = rtk_raw[:,:3,3]
tmat = tmat + np.matmul(rmat, root_tmat[...,None])[...,0]
rmat = np.matmul(rmat, root_rmat)
rtk_raw[:,:3,:3] = rmat
rtk_raw[:,:3,3] = tmat
if vis:
# draw again
pdb.set_trace()
rtk_vis = rtk_raw.copy()
rtk_vis[:,:3,3] *= scale_factor
mesh = draw_cams(rtk_vis)
mesh.export('tmp/final.obj')
return rtk_raw
def align_sim3(rootlist_a, rootlist_b, is_inlier=None, err_valid=None):
"""
nx4x4 matrices
is_inlier: n
"""
# ta = np.matmul(-np.transpose(rootlist_a[:,:3,:3],[0,2,1]),
# rootlist_a[:,:3,3:4])
# ta = ta[...,0].T
# tb = np.matmul(-np.transpose(rootlist_b[:,:3,:3],[0,2,1]),
# rootlist_b[:,:3,3:4])
# tb = tb[...,0].T
# dso3,dtrn,dscale=umeyama_alignment(tb, ta,with_scale=False)
#
# dscale = np.linalg.norm(rootlist_a[0,:3,3],2,-1) /\
# np.linalg.norm(rootlist_b[0,:3,3],2,-1)
# rootlist_b[:,:3,:3] = np.matmul(rootlist_b[:,:3,:3], dso3.T[None])
# rootlist_b[:,:3,3:4] = rootlist_b[:,:3,3:4] - \
# np.matmul(rootlist_b[:,:3,:3], dtrn[None,:,None])
dso3 = np.matmul(np.transpose(rootlist_b[:,:3,:3],(0,2,1)),
rootlist_a[:,:3,:3])
dscale = np.linalg.norm(rootlist_a[:,:3,3],2,-1)/\
np.linalg.norm(rootlist_b[:,:3,3],2,-1)
# select inliers to fit
if is_inlier is not None:
if is_inlier.sum() == 0:
is_inlier[np.argmin(err_valid)] = True
dso3 = dso3[is_inlier]
dscale = dscale[is_inlier]
dso3 = R.from_matrix(dso3).mean().as_matrix()
rootlist_b[:,:3,:3] = np.matmul(rootlist_b[:,:3,:3], dso3[None])
dscale = dscale.mean()
rootlist_b[:,:3,3] = rootlist_b[:,:3,3] * dscale
so3_err = np.matmul(rootlist_a[:,:3,:3],
np.transpose(rootlist_b[:,:3,:3],[0,2,1]))
so3_err = rot_angle(torch.Tensor(so3_err))
so3_err = so3_err / np.pi*180
so3_err_max = so3_err.max()
so3_err_mean = so3_err.mean()
so3_err_med = np.median(so3_err)
so3_err_std = np.asarray(so3_err.std())
print(so3_err)
print('max so3 error (deg): %.1f'%(so3_err_max))
print('med so3 error (deg): %.1f'%(so3_err_med))
print('mean so3 error (deg): %.1f'%(so3_err_mean))
print('std so3 error (deg): %.1f'%(so3_err_std))
return rootlist_b
def align_sfm_sim3(aux_seq, datasets):
from utils.io import draw_cams, load_root
for dataset in datasets:
seqname = dataset.imglist[0].split('/')[-2]
# only process dataset with rtk_path input
if dataset.has_prior_cam:
root_dir = dataset.rtklist[0][:-9]
root_sfm = load_root(root_dir, 0)[:-1] # excluding the last
# split predicted root into multiple sequences
seq_idx = [seqname == i.split('/')[-2] for i in aux_seq['impath']]
root_pred = aux_seq['rtk'][seq_idx]
is_inlier = aux_seq['is_valid'][seq_idx]
err_valid = aux_seq['err_valid'][seq_idx]
# only use certain ones to match
#pdb.set_trace()
#mesh = draw_cams(root_sfm, color='gray')
#mesh.export('0.obj')
# pre-align the center according to cat mask
root_sfm = visual_hull_align(root_sfm,
aux_seq['kaug'][seq_idx],
aux_seq['masks'][seq_idx])
root_sfm = align_sim3(root_pred, root_sfm,
is_inlier=is_inlier, err_valid=err_valid)
# only modify rotation
#root_pred[:,:3,:3] = root_sfm[:,:3,:3]
root_pred = root_sfm
aux_seq['rtk'][seq_idx] = root_pred
aux_seq['is_valid'][seq_idx] = True
else:
print('not aligning %s, no rtk path in config file'%seqname)
def visual_hull_align(rtk, kaug, masks):
"""
input: array
output: array
"""
rtk = torch.Tensor(rtk)
kaug = torch.Tensor(kaug)
masks = torch.Tensor(masks)
num_view,h,w = masks.shape
grid_size = 64
if rtk.shape[0]!=num_view:
print('rtk size mismtach: %d vs %d'%(rtk.shape[0], num_view))
rtk = rtk[:num_view]
rmat = rtk[:,:3,:3]
tmat = rtk[:,:3,3:]
Kmat = K2mat(rtk[:,3])
Kaug = K2inv(kaug) # p = Kaug Kmat P
kmat = mat2K(Kaug.matmul(Kmat))
rmatc = rmat.permute((0,2,1))
tmatc = -rmatc.matmul(tmat)
bound = tmatc.norm(2,-1).mean()
pts = np.linspace(-bound, bound, grid_size).astype(np.float32)
query_yxz = np.stack(np.meshgrid(pts, pts, pts), -1) # (y,x,z)
query_yxz = torch.Tensor(query_yxz).view(-1, 3)
query_xyz = torch.cat([query_yxz[:,1:2], query_yxz[:,0:1], query_yxz[:,2:3]],-1)
score_xyz = []
chunk = 1000
for i in range(0,len(query_xyz),chunk):
query_xyz_chunk = query_xyz[None, i:i+chunk].repeat(num_view, 1,1)
query_xyz_chunk = obj_to_cam(query_xyz_chunk, rmat, tmat)
query_xyz_chunk = pinhole_cam(query_xyz_chunk, kmat)
query_xy = query_xyz_chunk[...,:2]
query_xy[...,0] = query_xy[...,0]/w*2-1
query_xy[...,1] = query_xy[...,1]/h*2-1
# sum over time
score = F.grid_sample(masks[:,None], query_xy[:,None])[:,0,0]
score = score.sum(0)
score_xyz.append(score)
# align the center
score_xyz = torch.cat(score_xyz)
center = query_xyz[score_xyz>0.8*num_view]
print('%d points used to align center'% (len(center)) )
center = center.mean(0)
tmatc = tmatc - center[None,:,None]
tmat = np.matmul(-rmat, tmatc)
rtk[:,:3,3:] = tmat
return rtk
def ood_check_cse(dp_feats, dp_embed, dp_idx):
"""
dp_feats: bs,16,h,w
dp_idx: bs, h,w
dp_embed: N,16
valid_list bs
"""
bs,_,h,w = dp_feats.shape
N,_ = dp_embed.shape
device = dp_feats.device
dp_idx = F.interpolate(dp_idx.float()[None], (h,w), mode='nearest').long()[0]
## dot product
#pdb.set_trace()
#err_list = []
#err_threshold = 0.05
#for i in range(bs):
# err = 1- (dp_embed[dp_idx[i]]*dp_feats[i].permute(1,2,0)).sum(-1)
# err_list.append(err)
# fb check
err_list = []
err_threshold = 12
# TODO no fb check
#err_threshold = 100
for i in range(bs):
# use chunk
chunk = 5000
max_idx = torch.zeros(N).to(device)
for j in range(0,N,chunk):
costmap = (dp_embed.view(N,16,1)[j:j+chunk]*\
dp_feats[i].view(1,16,h*w)).sum(-2)
max_idx[j:j+chunk] = costmap.argmax(-1) # N
rpj_idx = max_idx[dp_idx[i]]
rpj_coord = torch.stack([rpj_idx % w, rpj_idx//w],-1)
ref_coord = sample_xy(w, 1, 0, device, return_all=True)[1].view(h,w,2)
err = (rpj_coord - ref_coord).norm(2,-1)
err_list.append(err)
valid_list = []
error_list = []
for i in range(bs):
err = err_list[i]
mean_error = err[dp_idx[i]!=0].mean()
is_valid = mean_error < err_threshold
error_list.append( mean_error)
valid_list.append( is_valid )
#cv2.imwrite('tmp/%05d.png'%i, (err/mean_error).cpu().numpy()*100)
#print(i); print(mean_error)
error_list = torch.stack(error_list,0)
valid_list = torch.stack(valid_list,0)
return valid_list, error_list
def bbox_dp2rnd(bbox, kaug):
"""
bbox: bs, 4
kaug: bs, 4
cropab2: bs, 3,3, transformation from dp bbox to rendered bbox coords
"""
cropa2im = torch.cat([(bbox[:,2:] - bbox[:,:2]) / 112.,
bbox[:,:2]],-1)
cropa2im = K2mat(cropa2im)
im2cropb = K2inv(kaug)
cropa2b = im2cropb.matmul(cropa2im)
return cropa2b
def resample_dp(dp_feats, dp_bbox, kaug, target_size):
"""
dp_feats: bs, 16, h,w
dp_bbox: bs, 4
kaug: bs, 4
"""
# if dp_bbox are all zeros, just do the resizing
if dp_bbox.abs().sum()==0:
dp_feats_rsmp = F.interpolate(dp_feats, (target_size, target_size),
mode='bilinear')
else:
dp_size = dp_feats.shape[-1]
device = dp_feats.device
dp2rnd = bbox_dp2rnd(dp_bbox, kaug)
rnd2dp = Kmatinv(dp2rnd)
xygrid = sample_xy(target_size, 1, 0, device, return_all=True)[1]
xygrid = xygrid.matmul(rnd2dp[:,:2,:2]) + rnd2dp[:,None,:2,2]
xygrid = xygrid / dp_size * 2 - 1
dp_feats_rsmp = F.grid_sample(dp_feats, xygrid.view(-1,target_size,target_size,2))
return dp_feats_rsmp
def vrender_flo(weights_coarse, xyz_coarse_target, xys, img_size):
"""
weights_coarse: ..., ndepth
xyz_coarse_target: ..., ndepth, 3
flo_coarse: ..., 2
flo_valid: ..., 1
"""
# render flow
weights_coarse = weights_coarse.clone()
xyz_coarse_target = xyz_coarse_target.clone()
# bs, nsamp, -1, x
weights_shape = weights_coarse.shape
xyz_coarse_target = xyz_coarse_target.view(weights_shape+(3,))
xy_coarse_target = xyz_coarse_target[...,:2]
# deal with negative z
invalid_ind = torch.logical_or(xyz_coarse_target[...,-1]<1e-5,
xy_coarse_target.norm(2,-1).abs()>2*img_size)
weights_coarse[invalid_ind] = 0.
xy_coarse_target[invalid_ind] = 0.
# renormalize
weights_coarse = weights_coarse/(1e-9+weights_coarse.sum(-1)[...,None])
# candidate motion vector
xys_unsq = xys.view(weights_shape[:-1]+(1,2))
flo_coarse = xy_coarse_target - xys_unsq
flo_coarse = weights_coarse[...,None] * flo_coarse
flo_coarse = flo_coarse.sum(-2)
## candidate target point
#xys_unsq = xys.view(weights_shape[:-1]+(2,))
#xy_coarse_target = weights_coarse[...,None] * xy_coarse_target
#xy_coarse_target = xy_coarse_target.sum(-2)
#flo_coarse = xy_coarse_target - xys_unsq
flo_coarse = flo_coarse/img_size * 2
flo_valid = (invalid_ind.sum(-1)==0).float()[...,None]
return flo_coarse, flo_valid
def diff_flo(pts_target, xys, img_size):
"""
pts_target: ..., 1, 2
xys: ..., 2
flo_coarse: ..., 2
flo_valid: ..., 1
"""
# candidate motion vector
pts_target = pts_target.view(xys.shape)
flo_coarse = pts_target - xys
flo_coarse = flo_coarse/img_size * 2
return flo_coarse
def fid_reindex(fid, num_vids, vid_offset):
"""
re-index absolute frameid {0,....N} to subsets of video id and relative frameid
fid: N absolution id
vid: N video id
tid: N relative id
"""
tid = torch.zeros_like(fid).float()
vid = torch.zeros_like(fid)
max_ts = (vid_offset[1:] - vid_offset[:-1]).max()
for i in range(num_vids):
assign = torch.logical_and(fid>=vid_offset[i],
fid<vid_offset[i+1])
vid[assign] = i
tid[assign] = fid[assign].float() - vid_offset[i]
doffset = vid_offset[i+1] - vid_offset[i]
tid[assign] = (tid[assign] - doffset/2)/max_ts*2
#tid[assign] = 2*(tid[assign] / doffset)-1
#tid[assign] = (tid[assign] - doffset/2)/1000.
return vid, tid
|
banmo-main
|
nnutils/geom_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import pdb
import trimesh
import cv2
import numpy as np
import torch
from nnutils.geom_utils import rot_angle, mat2K, Kmatinv, obj_to_cam, \
pinhole_cam, lbs, gauss_mlp_skinning, evaluate_mlp
import torch.nn.functional as F
def nerf_gradient(mlp, embed, pts, use_xyz=False,code=None, sigma_only=False):
"""
gradient of mlp params wrt pts
"""
pts.requires_grad_(True)
pts_embedded = embed(pts)
if use_xyz: xyz=pts
else: xyz=None
y = evaluate_mlp(mlp, pts_embedded, chunk=pts.shape[0],
xyz=xyz,code=code,sigma_only=sigma_only)
sdf = -y
ibetas = 1/(mlp.beta.abs()+1e-9)
sigmas = (0.5 + 0.5 * sdf.sign() * torch.expm1(-sdf.abs() * ibetas))
# get gradient for each size-1 output
gradients = []
for i in range(y.shape[-1]):
y_sub = y [...,i:i+1]
d_output = torch.ones_like(y_sub, requires_grad=False, device=y.device)
gradient = torch.autograd.grad(
outputs=y_sub,
inputs=pts,
grad_outputs=d_output,
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
gradients.append( gradient[...,None] )
gradients = torch.cat(gradients,-1) # ...,input-dim, output-dim
return gradients, sigmas
def eikonal_loss(mlp, embed, pts, bound):
"""
pts: X* backward warped points
"""
# make it more efficient
bs = pts.shape[0]
sample_size = 1000
if bs>sample_size:
probs = torch.ones(bs)
rand_inds = torch.multinomial(probs, sample_size, replacement=False)
pts = pts[rand_inds]
pts = pts.view(-1,3).detach()
nsample = pts.shape[0]
device = next(mlp.parameters()).device
bound = torch.Tensor(bound)[None].to(device)
inbound_idx = ((bound - pts.abs()) > 0).sum(-1) == 3
pts = pts[inbound_idx]
pts = pts[None]
g,sigmas_unit = nerf_gradient(mlp, embed, pts, sigma_only=True)
g = g[...,0]
grad_norm = g.norm(2, dim=-1)
eikonal_loss = (grad_norm - 1) ** 2
eikonal_loss = eikonal_loss.mean()
return eikonal_loss
def elastic_loss(mlp, embed, xyz, time_embedded):
xyz = xyz.detach().clone()
time_embedded = time_embedded.detach().clone()
g,_ = nerf_gradient(mlp, embed, xyz, use_xyz=mlp.use_xyz,code=time_embedded)
jacobian = g+torch.eye(3)[None,None].to(g.device)
sign, log_svals = jacobian.slogdet()
log_svals = log_svals.clone()
log_svals[sign<=0] = 0.
elastic_loss = log_svals**2
return elastic_loss
def bone_density_loss(mlp, embed, bones):
pts = bones[:,:3]
pts_embedded = embed(pts)
y = evaluate_mlp(mlp, pts_embedded, pts.shape[0], sigma_only=True)
return bone_density_loss
def visibility_loss(mlp, embed, xyz_pos, w_pos, bound, chunk):
"""
w_pos: num_points x num_samples, visibility returns from nerf
bound: scalar, used to sample negative samples
"""
device = next(mlp.parameters()).device
xyz_pos = xyz_pos.detach().clone()
w_pos = w_pos.detach().clone()
# negative examples
nsample = w_pos.shape[0]*w_pos.shape[1]
bound = torch.Tensor(bound)[None,None]
xyz_neg = torch.rand(1,nsample,3)*2*bound-bound
xyz_neg = xyz_neg.to(device)
xyz_neg_embedded = embed(xyz_neg)
vis_neg_pred = evaluate_mlp(mlp, xyz_neg_embedded, chunk=chunk)[...,0]
vis_loss_neg = -F.logsigmoid(-vis_neg_pred).sum()*0.1/nsample
# positive examples
xyz_pos_embedded = embed(xyz_pos)
vis_pos_pred = evaluate_mlp(mlp, xyz_pos_embedded, chunk=chunk)[...,0]
vis_loss_pos = -(F.logsigmoid(vis_pos_pred) * w_pos).sum()/nsample
vis_loss = vis_loss_pos + vis_loss_neg
return vis_loss
def rtk_loss(rtk, rtk_raw, aux_out):
rot_pred = rtk[:,:3,:3]
rot_gt = rtk_raw[:,:3,:3]
rot_loss = rot_angle(rot_pred.matmul(rot_gt.permute(0,2,1))).mean()
rot_loss = 0.01*rot_loss
trn_pred = rtk[:,:3,3]
trn_gt = rtk_raw[:,:3,3]
trn_loss = (trn_pred - trn_gt).pow(2).sum(-1).mean()
total_loss = rot_loss + trn_loss
aux_out['rot_loss'] = rot_loss
aux_out['trn_loss'] = trn_loss
return total_loss
def compute_pts_exp(pts_prob, pts):
"""
pts: ..., ndepth, 3
pts_prob: ..., ndepth
"""
ndepth = pts_prob.shape[-1]
pts_prob = pts_prob.clone()
pts_prob = pts_prob.view(-1, ndepth,1)
pts_prob = pts_prob/(1e-9+pts_prob.sum(1)[:,None])
pts_exp = (pts * pts_prob).sum(1)
return pts_exp
def feat_match_loss(nerf_feat, embedding_xyz, feats, pts, pts_prob, bound,
is_training=True):
"""
feats: ..., num_feat
pts: ..., ndepth, 3
pts_prob: ..., ndepth
loss: ..., 1
"""
pts = pts.clone()
base_shape = feats.shape[:-1] # bs, ns
nfeat = feats.shape[-1]
ndepth = pts_prob.shape[-1]
feats= feats.view(-1, nfeat)
pts = pts.view(-1, ndepth,3)
# part1: compute expected pts
pts_exp = compute_pts_exp(pts_prob, pts)
## part2: matching
pts_pred = feat_match(nerf_feat, embedding_xyz, feats,
bound,grid_size=20,is_training=is_training)
# part3: compute loss
feat_err = (pts_pred - pts_exp).norm(2,-1) # n,ndepth
# rearrange outputs
pts_pred = pts_pred.view(base_shape+(3,))
pts_exp = pts_exp .view(base_shape+(3,))
feat_err = feat_err .view(base_shape+(1,))
return pts_pred, pts_exp, feat_err
def kp_reproj_loss(pts_pred, xys, models, embedding_xyz, rays):
"""
pts_pred, ...,3
xys, ...,2
out, ...,1 same as pts_pred
gcc loss is only used to update root/body pose and skinning weights
"""
xys = xys.view(-1,1,2)
xy_reproj = kp_reproj(pts_pred, models, embedding_xyz, rays)
proj_err = (xys - xy_reproj[...,:2]).norm(2,-1)
proj_err = proj_err.view(pts_pred.shape[:-1]+(1,))
return proj_err
def kp_reproj(pts_pred, models, embedding_xyz, rays, to_target=False):
"""
pts_pred, ...,3
out, ...,1,3 same as pts_pred
to_target whether reproject to target frame
"""
N = pts_pred.view(-1,3).shape[0]
xyz_coarse_sampled = pts_pred.view(-1,1,3)
# detach grad since reproj-loss would not benefit feature learning
# (due to ambiguity)
#xyz_coarse_sampled = xyz_coarse_sampled.detach()
# TODO wrap flowbw and lbs into the same module
# TODO include loss for flowbw
if to_target: rtk_vec = rays['rtk_vec_target']
else: rtk_vec = rays['rtk_vec']
rtk_vec = rtk_vec.view(N,-1) # bs, ns, 21
if 'bones' in models.keys():
if to_target: bone_rts_fw = rays['bone_rts_target']
else: bone_rts_fw = rays['bone_rts']
bone_rts_fw = bone_rts_fw.view(N,-1) # bs, ns,-1
if 'nerf_skin' in models.keys():
nerf_skin = models['nerf_skin']
else: nerf_skin = None
bones = models['bones_rst']
skin_aux = models['skin_aux']
rest_pose_code = models['rest_pose_code']
rest_pose_code = rest_pose_code(torch.Tensor([0]).long().to(bones.device))
skin_forward = gauss_mlp_skinning(xyz_coarse_sampled, embedding_xyz, bones,
rest_pose_code, nerf_skin, skin_aux=skin_aux)
xyz_coarse_sampled,_ = lbs(bones, bone_rts_fw,
skin_forward, xyz_coarse_sampled, backward=False)
Rmat = rtk_vec[:,0:9] .view(N,1,3,3)
Tmat = rtk_vec[:,9:12] .view(N,1,3)
Kinv = rtk_vec[:,12:21].view(N,1,3,3)
K = mat2K(Kmatinv(Kinv))
xyz_coarse_sampled = obj_to_cam( xyz_coarse_sampled, Rmat, Tmat)
xyz_coarse_sampled = pinhole_cam(xyz_coarse_sampled,K)
xy_coarse_sampled = xyz_coarse_sampled[...,:2]
return xy_coarse_sampled
def feat_match(nerf_feat, embedding_xyz, feats, bound,
grid_size=20,is_training=True, init_pts=None, rt_entropy=False):
"""
feats: -1, num_feat
"""
if is_training:
chunk_pts = 8*1024
else:
chunk_pts = 1024
chunk_pix = 4096
nsample,_ = feats.shape
device = feats.device
feats = F.normalize(feats,2,-1)
# sample model on a regular 3d grid, and correlate with feature, nkxkxk
#p1d = np.linspace(-bound, bound, grid_size).astype(np.float32)
#query_yxz = np.stack(np.meshgrid(p1d, p1d, p1d), -1) # (y,x,z)
pxd = np.linspace(-bound[0], bound[0], grid_size).astype(np.float32)
pyd = np.linspace(-bound[1], bound[1], grid_size).astype(np.float32)
pzd = np.linspace(-bound[2], bound[2], grid_size).astype(np.float32)
query_yxz = np.stack(np.meshgrid(pyd, pxd, pzd), -1) # (y,x,z)
query_yxz = torch.Tensor(query_yxz).to(device).view(-1, 3)
query_xyz = torch.cat([query_yxz[:,1:2], query_yxz[:,0:1], query_yxz[:,2:3]],-1)
if init_pts is not None:
query_xyz = query_xyz[None] + init_pts[:,None]
else:
# N x Ns x 3
query_xyz = query_xyz[None]
# inject some noise at training time
if is_training and init_pts is None:
bound = torch.Tensor(bound)[None,None].to(device)
query_xyz = query_xyz + torch.randn_like(query_xyz) * bound * 0.05
cost_vol = []
for i in range(0,grid_size**3,chunk_pts):
if init_pts is None:
query_xyz_chunk = query_xyz[0,i:i+chunk_pts]
xyz_embedded = embedding_xyz(query_xyz_chunk)[:,None] # (N,1,...)
vol_feat_subchunk = evaluate_mlp(nerf_feat, xyz_embedded)[:,0] # (chunk, num_feat)
# normalize vol feat
vol_feat_subchunk = F.normalize(vol_feat_subchunk,2,-1)[None]
cost_chunk = []
for j in range(0,nsample,chunk_pix):
feats_chunk = feats[j:j+chunk_pix] # (chunk pix, num_feat)
if init_pts is not None:
# only query 3d grid according to each px when they are diff
# vol feature
query_xyz_chunk = query_xyz[j:j+chunk_pix,i:i+chunk_pts].clone()
xyz_embedded = embedding_xyz(query_xyz_chunk)
vol_feat_subchunk = evaluate_mlp(nerf_feat, xyz_embedded)
# normalize vol feat
vol_feat_subchunk = F.normalize(vol_feat_subchunk,2,-1)
# cpix, cpts
# distance metric
cost_subchunk = (vol_feat_subchunk * \
feats_chunk[:,None]).sum(-1) * (nerf_feat.beta.abs()+1e-9)
cost_chunk.append(cost_subchunk)
cost_chunk = torch.cat(cost_chunk,0) # (nsample, cpts)
cost_vol.append(cost_chunk)
cost_vol = torch.cat(cost_vol,-1) # (nsample, k**3)
prob_vol = cost_vol.softmax(-1)
# regress to the true location, n,3
if not is_training: torch.cuda.empty_cache()
# n, ns, 1 * n, ns, 3
pts_pred = (prob_vol[...,None] * query_xyz).sum(1)
if rt_entropy:
# compute normalized entropy
match_unc = (-prob_vol * prob_vol.clamp(1e-9,1-1e-9).log()).sum(1)[:,None]
match_unc = match_unc/np.log(grid_size**3)
return pts_pred, match_unc
else:
return pts_pred
def grad_update_bone(bones,embedding_xyz, nerf_vis, learning_rate):
"""
#TODO need to update bones locally
"""
device = bones.device
bones_data = bones.data.detach()
bones_data.requires_grad_(True)
bone_xyz_embed = embedding_xyz(bones_data[:,None,:3])
sdf_at_bone = evaluate_mlp(nerf_vis, bone_xyz_embed)
bone_loc_loss = F.relu(-sdf_at_bone).mean()
# compute gradient wrt bones
d_output = torch.ones_like(bone_loc_loss, requires_grad=False, device=device)
gradient = torch.autograd.grad(
outputs=bone_loc_loss,
inputs=bones_data,
grad_outputs=d_output,
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
bones.data = bones.data-gradient*learning_rate
return bone_loc_loss
def loss_filter_line(sil_err, errid, frameid, sil_loss_samp, img_size, scale_factor=10):
"""
sil_err: Tx512
errid: N
"""
sil_loss_samp = sil_loss_samp.detach().cpu().numpy().reshape(-1)
sil_err[errid] = sil_loss_samp
sil_err = sil_err.reshape(-1,img_size)
sil_err = sil_err.sum(-1) / (1e-9+(sil_err>0).astype(float).sum(-1))
sil_err_med = np.median(sil_err[sil_err>0])
invalid_frame = sil_err > sil_err_med*scale_factor
invalid_idx = invalid_frame[frameid]
sil_err[:] = 0
return invalid_idx
def loss_filter(g_floerr, flo_loss_samp, sil_at_samp_flo, scale_factor=10):
"""
g_floerr: T,
flo_loss_samp: bs,N,1
sil_at_samp_flo:bs,N,1
"""
bs = sil_at_samp_flo.shape[0]
# find history meidan
g_floerr = g_floerr[g_floerr>0]
# tb updated as history value
#flo_err = []
#for i in range(bs):
# flo_err_sub =flo_loss_samp[i][sil_at_samp_flo[i]]
# if len(flo_err_sub) >0:
# #flo_err_sub = flo_err_sub.median().detach().cpu().numpy()
# flo_err_sub = flo_err_sub.mean().detach().cpu().numpy()
# else:
# flo_err_sub = 0
# flo_err.append(flo_err_sub)
#flo_err = np.stack(flo_err)
# vectorized version but uses mean to update
flo_err = (flo_loss_samp * sil_at_samp_flo).sum(1) /\
(1e-9+sil_at_samp_flo.sum(1)) # bs, N, 1
flo_err = flo_err.detach().cpu().numpy()[...,0]
# find invalid idx
invalid_idx = flo_err > np.median(g_floerr)*scale_factor
return flo_err, invalid_idx
def compute_xyz_wt_loss(gt_list, curr_list):
loss = []
for i in range(len(gt_list)):
loss.append( (gt_list[i].detach() - curr_list[i]).pow(2).mean() )
loss = torch.stack(loss).mean()
return loss
def compute_root_sm_2nd_loss(rtk_all, data_offset):
"""
2nd order loss
"""
rot_sm_loss = []
trn_sm_loss = []
for didx in range(len(data_offset)-1):
stt_idx = data_offset[didx]
end_idx = data_offset[didx+1]
stt_rtk = rtk_all[stt_idx:end_idx-2]
mid_rtk = rtk_all[stt_idx+1:end_idx-1]
end_rtk = rtk_all[stt_idx+2:end_idx]
rot_sub1 = stt_rtk[:,:3,:3].matmul(mid_rtk[:,:3,:3].permute(0,2,1))
rot_sub2 = mid_rtk[:,:3,:3].matmul(end_rtk[:,:3,:3].permute(0,2,1))
trn_sub1 = stt_rtk[:,:3,3] - mid_rtk[:,:3,3]
trn_sub2 = mid_rtk[:,:3,3] - end_rtk[:,:3,3]
rot_sm_sub = rot_sub1.matmul(rot_sub2.permute(0,2,1))
trn_sm_sub = trn_sub1 - trn_sub2
rot_sm_loss.append(rot_sm_sub)
trn_sm_loss.append(trn_sm_sub)
rot_sm_loss = torch.cat(rot_sm_loss,0)
rot_sm_loss = rot_angle(rot_sm_loss).mean()*1e-1
trn_sm_loss = torch.cat(trn_sm_loss,0)
trn_sm_loss = trn_sm_loss.norm(2,-1).mean()
root_sm_loss = rot_sm_loss + trn_sm_loss
root_sm_loss = root_sm_loss * 0.1
return root_sm_loss
def compute_root_sm_loss(rtk_all, data_offset):
rot_sm_loss = []
trans_sm_loss = []
for didx in range(len(data_offset)-1):
stt_idx = data_offset[didx]
end_idx = data_offset[didx+1]
rot_sm_sub = rtk_all[stt_idx:end_idx-1,:3,:3].matmul(
rtk_all[stt_idx+1:end_idx,:3,:3].permute(0,2,1))
trans_sm_sub = rtk_all[stt_idx:end_idx-1,:3,3] - \
rtk_all[stt_idx+1:end_idx,:3,3]
rot_sm_loss.append(rot_sm_sub)
trans_sm_loss.append(trans_sm_sub)
rot_sm_loss = torch.cat(rot_sm_loss,0)
rot_sm_loss = rot_angle(rot_sm_loss).mean()*1e-3
trans_sm_loss = torch.cat(trans_sm_loss,0)
trans_sm_loss = trans_sm_loss.norm(2,-1).mean()*0.1
root_sm_loss = rot_sm_loss + trans_sm_loss
return root_sm_loss
def shape_init_loss(pts, faces, mlp, embed, bound_factor, use_ellips=True):
# compute sdf loss wrt to a mesh
# construct mesh
mesh = trimesh.Trimesh(pts.cpu(), faces=faces.cpu())
device = next(mlp.parameters()).device
# Sample points
nsample =10000
obj_bound = pts.abs().max(0)[0][None,None]
bound = obj_bound * bound_factor
pts_samp = torch.rand(1,nsample,3).to(device)*2*bound-bound
# outside: positive
if use_ellips:
# signed distance to a ellipsoid
dis = (pts_samp/obj_bound).pow(2).sum(2).view(-1)
dis = torch.sqrt(dis)
dis = dis - 1
dis = dis * obj_bound.mean()
else:
# signed distance to a sphere
dis = (pts_samp).pow(2).sum(2).view(-1)
dis = torch.sqrt(dis)
dis = dis - obj_bound.min()
# compute sdf
pts_embedded = embed(pts_samp)
y = evaluate_mlp(mlp, pts_embedded, chunk=pts_samp.shape[0],
xyz=None,code=None,sigma_only=True)
sdf = -y.view(-1) # positive: outside
shape_loss = (sdf - dis).pow(2).mean()
return shape_loss
|
banmo-main
|
nnutils/loss_utils.py
|
"""
MIT License
Copyright (c) 2019 ThibaultGROUEIX
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import torch
def fscore(dist1, dist2, threshold=0.001):
"""
Calculates the F-score between two point clouds with the corresponding threshold value.
:param dist1: Batch, N-Points
:param dist2: Batch, N-Points
:param th: float
:return: fscore, precision, recall
"""
# NB : In this depo, dist1 and dist2 are squared pointcloud euclidean distances, so you should adapt the threshold accordingly.
precision_1 = torch.mean((dist1 < threshold).float(), dim=1)
precision_2 = torch.mean((dist2 < threshold).float(), dim=1)
fscore = 2 * precision_1 * precision_2 / (precision_1 + precision_2)
fscore[torch.isnan(fscore)] = 0
return fscore, precision_1, precision_2
|
banmo-main
|
third_party/fscore.py
|
# MIT license
# Copyright (c) 2019 LI RUOTENG
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# modified from https://github.com/liruoteng/OpticalFlowToolkit
import png
import numpy as np
import matplotlib.colors as cl
import matplotlib.pyplot as plt
from PIL import Image
import cv2
import pdb
UNKNOWN_FLOW_THRESH = 1e7
SMALLFLOW = 0.0
LARGEFLOW = 1e8
def warp_flow(img, flow, normed=False):
h, w = flow.shape[:2]
flow = flow.copy().astype(np.float32)
if normed:
flow[:,:,0] = flow[:,:,0] * w / 2.
flow[:,:,1] = flow[:,:,1] * h / 2.
flow[:,:,0] += np.arange(w)
flow[:,:,1] += np.arange(h)[:,np.newaxis]
res = cv2.remap(img, flow, None, cv2.INTER_LINEAR)
return res
def cat_imgflo(img, flo):
"""
img in (0,1)
flo in normalized coordinate
"""
img = img.copy() * 255
h,w = img.shape[:2]
flo = flo.copy()
flo[:,:,0] = flo[:,:,0] * 0.5 * w
flo[:,:,1] = flo[:,:,1] * 0.5 * h
imgflo = point_vec(img, flo)
return imgflo
def point_vec(img,flow,skip=10):
skip=10
maxsize=500.
extendfac=1.
#resize_factor = 2
resize_factor = max(1,max(maxsize/img.shape[0], maxsize/img.shape[1]))
dispimg = cv2.resize(img.copy(), None,fx=resize_factor,fy=resize_factor)
flow = cv2.resize(flow.copy(), None, fx=resize_factor, fy=resize_factor,interpolation=cv2.INTER_NEAREST) * resize_factor
meshgrid = np.meshgrid(range(dispimg.shape[1]),range(dispimg.shape[0]))
colorflow = flow_to_image(flow).astype(int)
for i in range(dispimg.shape[1]): # x
for j in range(dispimg.shape[0]): # y
if flow.shape[-1]==3 and flow[j,i,2] != 1: continue
if j%skip!=0 or i%skip!=0: continue
xend = int((meshgrid[0][j,i]+extendfac*flow[j,i,0]))
yend = int((meshgrid[1][j,i]+extendfac*flow[j,i,1]))
leng = np.linalg.norm(flow[j,i,:2]*extendfac)
if leng<1:continue
dispimg = cv2.arrowedLine(dispimg, (meshgrid[0][j,i],meshgrid[1][j,i]),\
(xend,yend),
(int(colorflow[j,i,2]),int(colorflow[j,i,1]),int(colorflow[j,i,0])),1,tipLength=4/leng,line_type=cv2.LINE_AA)
return dispimg
def flow_to_image(flow):
"""
Convert flow into middlebury color code image
:param flow: optical flow map
:return: optical flow image in middlebury color
"""
u = flow[:, :, 0]
v = flow[:, :, 1]
maxu = -999.
maxv = -999.
minu = 999.
minv = 999.
idxUnknow = (abs(u) > UNKNOWN_FLOW_THRESH) | (abs(v) > UNKNOWN_FLOW_THRESH)
u[idxUnknow] = 0
v[idxUnknow] = 0
maxu = max(maxu, np.max(u))
minu = min(minu, np.min(u))
maxv = max(maxv, np.max(v))
minv = min(minv, np.min(v))
rad = np.sqrt(u ** 2 + v ** 2)
maxrad = max(-1, np.max(rad))
u = u/(maxrad + np.finfo(float).eps)
v = v/(maxrad + np.finfo(float).eps)
img = compute_color(u, v)
idx = np.repeat(idxUnknow[:, :, np.newaxis], 3, axis=2)
img[idx] = 0
return np.uint8(img)
def compute_color(u, v):
"""
compute optical flow color map
:param u: optical flow horizontal map
:param v: optical flow vertical map
:return: optical flow in color code
"""
[h, w] = u.shape
img = np.zeros([h, w, 3])
nanIdx = np.isnan(u) | np.isnan(v)
u[nanIdx] = 0
v[nanIdx] = 0
colorwheel = make_color_wheel()
ncols = np.size(colorwheel, 0)
rad = np.sqrt(u**2+v**2)
a = np.arctan2(-v, -u) / np.pi
fk = (a+1) / 2 * (ncols - 1) + 1
k0 = np.floor(fk).astype(int)
k1 = k0 + 1
k1[k1 == ncols+1] = 1
f = fk - k0
for i in range(0, np.size(colorwheel,1)):
tmp = colorwheel[:, i]
col0 = tmp[k0-1] / 255
col1 = tmp[k1-1] / 255
col = (1-f) * col0 + f * col1
idx = rad <= 1
col[idx] = 1-rad[idx]*(1-col[idx])
notidx = np.logical_not(idx)
col[notidx] *= 0.75
img[:, :, i] = np.uint8(np.floor(255 * col*(1-nanIdx)))
return img
def make_color_wheel():
"""
Generate color wheel according Middlebury color code
:return: Color wheel
"""
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros([ncols, 3])
col = 0
# RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.transpose(np.floor(255*np.arange(0, RY) / RY))
col += RY
# YG
colorwheel[col:col+YG, 0] = 255 - np.transpose(np.floor(255*np.arange(0, YG) / YG))
colorwheel[col:col+YG, 1] = 255
col += YG
# GC
colorwheel[col:col+GC, 1] = 255
colorwheel[col:col+GC, 2] = np.transpose(np.floor(255*np.arange(0, GC) / GC))
col += GC
# CB
colorwheel[col:col+CB, 1] = 255 - np.transpose(np.floor(255*np.arange(0, CB) / CB))
colorwheel[col:col+CB, 2] = 255
col += CB
# BM
colorwheel[col:col+BM, 2] = 255
colorwheel[col:col+BM, 0] = np.transpose(np.floor(255*np.arange(0, BM) / BM))
col += + BM
# MR
colorwheel[col:col+MR, 2] = 255 - np.transpose(np.floor(255 * np.arange(0, MR) / MR))
colorwheel[col:col+MR, 0] = 255
return colorwheel
|
banmo-main
|
third_party/ext_utils/flowlib.py
|
# MIT License
#
# Copyright (c) 2019 Carnegie Mellon University
# Copyright (c) 2021 Google LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
import png
import struct
import array
import numpy as np
import cv2
import pdb
import sys
import re
from io import *
def readPFM(file):
file = open(file, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
if (sys.version[0]) == '3':
header = header.decode('utf-8')
if header == 'PF':
color = True
elif header == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
if (sys.version[0]) == '3':
dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline().decode('utf-8'))
else:
dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline())
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
if (sys.version[0]) == '3':
scale = float(file.readline().rstrip().decode('utf-8'))
else:
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
return data, scale
def write_pfm(path, image, scale=1):
"""Write pfm file.
Args:
path (str): pathto file
image (array): data
scale (int, optional): Scale. Defaults to 1.
"""
# case to hw3
if image.ndim == 3 and image.shape[-1]==2:
image = np.concatenate([image, np.zeros(image.shape[:2] + (1,))],-1)
image = image.astype(np.float32)
with open(path, "wb") as file:
color = None
if image.dtype.name != "float32":
raise Exception("Image dtype must be float32.")
image = np.flipud(image)
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif (
len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1
): # greyscale
color = False
else:
raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.")
file.write("PF\n".encode() if color else "Pf\n".encode())
file.write("%d %d\n".encode() % (image.shape[1], image.shape[0]))
endian = image.dtype.byteorder
if endian == "<" or endian == "=" and sys.byteorder == "little":
scale = -scale
file.write("%f\n".encode() % scale)
image.tofile(file)
|
banmo-main
|
third_party/ext_utils/util_flow.py
|
from setuptools import setup, find_packages
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
CUDA_FLAGS = []
gencodes = [
'-gencode', 'arch=compute_52,code=sm_52',
'-gencode', 'arch=compute_60,code=sm_60',
'-gencode', 'arch=compute_61,code=sm_61',
'-gencode', 'arch=compute_70,code=sm_70',
'-gencode', 'arch=compute_75,code=sm_75',
'-gencode', 'arch=compute_75,code=compute_75',]
extra_compile_flags = {'cxx': [], 'nvcc': []}
extra_compile_flags['nvcc'] += gencodes
ext_modules=[
CUDAExtension('soft_renderer.cuda.load_textures', [
'soft_renderer/cuda/load_textures_cuda.cpp',
'soft_renderer/cuda/load_textures_cuda_kernel.cu',
],extra_compile_args=extra_compile_flags,),
CUDAExtension('soft_renderer.cuda.create_texture_image', [
'soft_renderer/cuda/create_texture_image_cuda.cpp',
'soft_renderer/cuda/create_texture_image_cuda_kernel.cu',
],extra_compile_args=extra_compile_flags,),
CUDAExtension('soft_renderer.cuda.soft_rasterize', [
'soft_renderer/cuda/soft_rasterize_cuda.cpp',
'soft_renderer/cuda/soft_rasterize_cuda_kernel.cu',
],extra_compile_args=extra_compile_flags,),
CUDAExtension('soft_renderer.cuda.voxelization', [
'soft_renderer/cuda/voxelization_cuda.cpp',
'soft_renderer/cuda/voxelization_cuda_kernel.cu',
],extra_compile_args=extra_compile_flags,),
]
INSTALL_REQUIREMENTS = ['numpy', 'torch', 'torchvision', 'scikit-image', 'tqdm', 'imageio']
setup(
description='PyTorch implementation of "Soft Rasterizer"',
author='Shichen Liu',
author_email='liushichen95@gmail.com',
license='MIT License',
version='1.0.0',
name='soft_renderer',
packages=['soft_renderer', 'soft_renderer.cuda', 'soft_renderer.functional'],
install_requires=INSTALL_REQUIREMENTS,
ext_modules=ext_modules,
cmdclass = {'build_ext': BuildExtension}
)
|
banmo-main
|
third_party/softras/setup.py
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy
import soft_renderer as sr
class Renderer(nn.Module):
def __init__(self, image_size=256, background_color=[0,0,0], near=1, far=100,
anti_aliasing=True, fill_back=True, eps=1e-6,
camera_mode='projection',
P=None, dist_coeffs=None, orig_size=512,
perspective=True, viewing_angle=30, viewing_scale=1.0,
eye=None, camera_direction=[0,0,1],
light_mode='surface',
light_intensity_ambient=0.5, light_color_ambient=[1,1,1],
light_intensity_directionals=0.5, light_color_directionals=[1,1,1],
light_directions=[0,1,0]):
super(Renderer, self).__init__()
# light
self.lighting = sr.Lighting(light_mode,
light_intensity_ambient, light_color_ambient,
light_intensity_directionals, light_color_directionals,
light_directions)
# camera
self.transform = sr.Transform(camera_mode,
P, dist_coeffs, orig_size,
perspective, viewing_angle, viewing_scale,
eye, camera_direction)
# rasterization
self.rasterizer = sr.Rasterizer(image_size, background_color, near, far,
anti_aliasing, fill_back, eps)
def forward(self, mesh, mode=None):
mesh = self.lighting(mesh)
mesh = self.transform(mesh)
return self.rasterizer(mesh, mode)
class SoftRenderer(nn.Module):
def __init__(self, image_size=256, background_color=[0,0,0], near=1, far=100,
anti_aliasing=False, fill_back=True, eps=1e-3,
sigma_val=1e-5, dist_func='euclidean', dist_eps=1e-4,
gamma_val=1e-4, aggr_func_rgb='softmax', aggr_func_alpha='prod',
texture_type='surface',
camera_mode='projection',
P=None, dist_coeffs=None, orig_size=512,
perspective=True, viewing_angle=30, viewing_scale=1.0,
eye=None, camera_direction=[0,0,1],
light_mode='surface',
light_intensity_ambient=0.5, light_color_ambient=[1,1,1],
light_intensity_directionals=0.5, light_color_directionals=[1,1,1],
light_directions=[0,1,0]):
super(SoftRenderer, self).__init__()
# light
self.lighting = sr.Lighting(light_mode,
light_intensity_ambient, light_color_ambient,
light_intensity_directionals, light_color_directionals,
light_directions)
# camera
self.transform = sr.Transform(camera_mode,
P, dist_coeffs, orig_size,
perspective, viewing_angle, viewing_scale,
eye, camera_direction)
# rasterization
self.rasterizer = sr.SoftRasterizer(image_size, background_color, near, far,
anti_aliasing, fill_back, eps,
sigma_val, dist_func, dist_eps,
gamma_val, aggr_func_rgb, aggr_func_alpha,
texture_type)
def set_sigma(self, sigma):
self.rasterizer.sigma_val = sigma
def set_gamma(self, gamma):
self.rasterizer.gamma_val = gamma
def set_texture_mode(self, mode):
assert mode in ['vertex', 'surface'], 'Mode only support surface and vertex'
self.lighting.light_mode = mode
self.rasterizer.texture_type = mode
def render_mesh(self, mesh, mode=None):
self.set_texture_mode(mesh.texture_type)
mesh = self.lighting(mesh)
mesh = self.transform(mesh)
return self.rasterizer(mesh, mode)
def forward(self, vertices, faces, textures=None, mode=None, texture_type='surface'):
mesh = sr.Mesh(vertices, faces, textures=textures, texture_type=texture_type)
return self.render_mesh(mesh, mode)
|
banmo-main
|
third_party/softras/soft_renderer/renderer.py
|
from . import functional
from .mesh import Mesh
from .renderer import Renderer, SoftRenderer
from .transform import Projection, LookAt, Look, Transform
from .lighting import AmbientLighting, DirectionalLighting, Lighting
from .rasterizer import SoftRasterizer
from .losses import LaplacianLoss, FlattenLoss
__version__ = '1.0.0'
|
banmo-main
|
third_party/softras/soft_renderer/__init__.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import soft_renderer.functional as srf
class Mesh(object):
'''
A simple class for creating and manipulating trimesh objects
'''
def __init__(self, vertices, faces, textures=None, texture_res=1, texture_type='surface'):
'''
vertices, faces and textures(if not None) are expected to be Tensor objects
'''
self._vertices = vertices
self._faces = faces
if isinstance(self._vertices, np.ndarray):
self._vertices = torch.from_numpy(self._vertices).float().cuda()
if isinstance(self._faces, np.ndarray):
self._faces = torch.from_numpy(self._faces).int().cuda()
if self._vertices.ndimension() == 2:
self._vertices = self._vertices[None, :, :]
if self._faces.ndimension() == 2:
self._faces = self._faces[None, :, :]
self.device = self._vertices.device
self.texture_type = texture_type
self.batch_size = self._vertices.shape[0]
self.num_vertices = self._vertices.shape[1]
self.num_faces = self._faces.shape[1]
self._face_vertices = None
self._face_vertices_update = True
self._surface_normals = None
self._surface_normals_update = True
self._vertex_normals = None
self._vertex_normals_update = True
self._fill_back = False
# create textures
if textures is None:
if texture_type == 'surface':
self._textures = torch.ones(self.batch_size, self.num_faces, texture_res**2, 3,
dtype=torch.float32).to(self.device)
self.texture_res = texture_res
elif texture_type == 'vertex':
self._textures = torch.ones(self.batch_size, self.num_vertices, 3,
dtype=torch.float32).to(self.device)
self.texture_res = 1
else:
if isinstance(textures, np.ndarray):
textures = torch.from_numpy(textures).float().cuda()
if textures.ndimension() == 3 and texture_type == 'surface':
textures = textures[None, :, :, :]
if textures.ndimension() == 2 and texture_type == 'vertex':
textures = textures[None, :, :]
self._textures = textures
self.texture_res = int(np.sqrt(self._textures.shape[2]))
self._origin_vertices = self._vertices
self._origin_faces = self._faces
self._origin_textures = self._textures
@property
def faces(self):
return self._faces
@faces.setter
def faces(self, faces):
# need check tensor
self._faces = faces
self.num_faces = self._faces.shape[1]
self._face_vertices_update = True
self._surface_normals_update = True
self._vertex_normals_update = True
@property
def vertices(self):
return self._vertices
@vertices.setter
def vertices(self, vertices):
# need check tensor
self._vertices = vertices
self.num_vertices = self._vertices.shape[1]
self._face_vertices_update = True
self._surface_normals_update = True
self._vertex_normals_update = True
@property
def textures(self):
return self._textures
@textures.setter
def textures(self, textures):
# need check tensor
self._textures = textures
@property
def face_vertices(self):
if self._face_vertices_update:
self._face_vertices = srf.face_vertices(self.vertices, self.faces)
self._face_vertices_update = False
return self._face_vertices
@property
def surface_normals(self):
if self._surface_normals_update:
v10 = self.face_vertices[:, :, 0] - self.face_vertices[:, :, 1]
v12 = self.face_vertices[:, :, 2] - self.face_vertices[:, :, 1]
self._surface_normals = F.normalize(torch.cross(v12, v10), p=2, dim=2, eps=1e-6)
self._surface_normals_update = False
return self._surface_normals
@property
def vertex_normals(self):
if self._vertex_normals_update:
self._vertex_normals = srf.vertex_normals(self.vertices, self.faces)
self._vertex_normals_update = False
return self._vertex_normals
@property
def face_textures(self):
if self.texture_type in ['surface']:
return self.textures
elif self.texture_type in ['vertex']:
return srf.face_vertices(self.textures, self.faces)
else:
raise ValueError('texture type not applicable')
def fill_back_(self):
if not self._fill_back:
self.faces = torch.cat((self.faces, self.faces[:, :, [2, 1, 0]]), dim=1)
self.textures = torch.cat((self.textures, self.textures), dim=1)
self._fill_back = True
def reset_(self):
self.vertices = self._origin_vertices
self.faces = self._origin_faces
self.textures = self._origin_textures
self._fill_back = False
@classmethod
def from_obj(cls, filename_obj, normalization=False, load_texture=False, texture_res=1, texture_type='surface'):
'''
Create a Mesh object from a .obj file
'''
if load_texture:
vertices, faces, textures = srf.load_obj(filename_obj,
normalization=normalization,
texture_res=texture_res,
load_texture=True,
texture_type=texture_type)
else:
vertices, faces = srf.load_obj(filename_obj,
normalization=normalization,
texture_res=texture_res,
load_texture=False)
textures = None
return cls(vertices, faces, textures, texture_res, texture_type)
def save_obj(self, filename_obj, save_texture=False, texture_res_out=16):
if self.batch_size != 1:
raise ValueError('Could not save when batch size >= 1')
if save_texture:
srf.save_obj(filename_obj, self.vertices[0], self.faces[0],
textures=self.textures[0],
texture_res=texture_res_out, texture_type=self.texture_type)
else:
srf.save_obj(filename_obj, self.vertices[0], self.faces[0], textures=None)
def voxelize(self, voxel_size=32):
face_vertices_norm = self.face_vertices * voxel_size / (voxel_size - 1) + 0.5
return srf.voxelization(face_vertices_norm, voxel_size, False)
|
banmo-main
|
third_party/softras/soft_renderer/mesh.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import soft_renderer.functional as srf
class AmbientLighting(nn.Module):
def __init__(self, light_intensity=0.5, light_color=(1,1,1)):
super(AmbientLighting, self).__init__()
self.light_intensity = light_intensity
self.light_color = light_color
def forward(self, light):
return srf.ambient_lighting(light, self.light_intensity, self.light_color)
class DirectionalLighting(nn.Module):
def __init__(self, light_intensity=0.5, light_color=(1,1,1), light_direction=(0,1,0)):
super(DirectionalLighting, self).__init__()
self.light_intensity = light_intensity
self.light_color = light_color
self.light_direction = light_direction
def forward(self, light, normals):
return srf.directional_lighting(light, normals,
self.light_intensity, self.light_color,
self.light_direction)
class Lighting(nn.Module):
def __init__(self, light_mode='surface',
intensity_ambient=0.5, color_ambient=[1,1,1],
intensity_directionals=0.5, color_directionals=[1,1,1],
directions=[0,1,0]):
super(Lighting, self).__init__()
if light_mode not in ['surface', 'vertex']:
raise ValueError('Lighting mode only support surface and vertex')
self.light_mode = light_mode
self.ambient = AmbientLighting(intensity_ambient, color_ambient)
self.directionals = nn.ModuleList([DirectionalLighting(intensity_directionals,
color_directionals,
directions)])
def forward(self, mesh):
if self.light_mode == 'surface':
light = torch.zeros_like(mesh.faces, dtype=torch.float32).to(mesh.device)
light = light.contiguous()
light = self.ambient(light)
for directional in self.directionals:
light = directional(light, mesh.surface_normals)
mesh.textures = mesh.textures * light[:, :, None, :]
elif self.light_mode == 'vertex':
light = torch.zeros_like(mesh.vertices, dtype=torch.float32).to(mesh.device)
light = light.contiguous()
light = self.ambient(light)
for directional in self.directionals:
light = directional(light, mesh.vertex_normals)
mesh.textures = mesh.textures * light
return mesh
|
banmo-main
|
third_party/softras/soft_renderer/lighting.py
|
import math
import numpy as np
import torch
import torch.nn as nn
import soft_renderer.functional as srf
class Projection(nn.Module):
def __init__(self, P, dist_coeffs=None, orig_size=512):
super(Projection, self).__init__()
self.P = P
self.dist_coeffs = dist_coeffs
self.orig_size = orig_size
if isinstance(self.P, np.ndarray):
self.P = torch.from_numpy(self.P).cuda()
if self.P is None or self.P.ndimension() != 3 or self.P.shape[1] != 3 or self.P.shape[2] != 4:
raise ValueError('You need to provide a valid (batch_size)x3x4 projection matrix')
if dist_coeffs is None:
self.dist_coeffs = torch.cuda.FloatTensor([[0., 0., 0., 0., 0.]]).repeat(self.P.shape[0], 1)
def forward(self, vertices):
vertices = srf.projection(vertices, self.P, self.dist_coeffs, self.orig_size)
return vertices
class LookAt(nn.Module):
def __init__(self, perspective=True, viewing_angle=30, viewing_scale=1.0, eye=None):
super(LookAt, self).__init__()
self.perspective = perspective
self.viewing_angle = viewing_angle
self.viewing_scale = viewing_scale
self._eye = eye
if self._eye is None:
self._eye = [0, 0, -(1. / math.tan(math.radians(self.viewing_angle)) + 1)]
def forward(self, vertices):
vertices = srf.look_at(vertices, self._eye)
# perspective transformation
if self.perspective:
vertices = srf.perspective(vertices, angle=self.viewing_angle)
else:
vertices = srf.orthogonal(vertices, scale=self.viewing_scale)
return vertices
class Look(nn.Module):
def __init__(self, camera_direction=[0,0,1], perspective=True, viewing_angle=30, viewing_scale=1.0, eye=None):
super(Look, self).__init__()
self.perspective = perspective
self.viewing_angle = viewing_angle
self.viewing_scale = viewing_scale
self._eye = eye
self.camera_direction = camera_direction
if self._eye is None:
self._eye = [0, 0, -(1. / math.tan(math.radians(self.viewing_angle)) + 1)]
def forward(self, vertices):
vertices = srf.look(vertices, self._eye, self.camera_direction)
# perspective transformation
if self.perspective:
vertices = srf.perspective(vertices, angle=self.viewing_angle)
else:
vertices = srf.orthogonal(vertices, scale=self.viewing_scale)
return vertices
class Transform(nn.Module):
def __init__(self, camera_mode='projection', P=None, dist_coeffs=None, orig_size=512,
perspective=True, viewing_angle=30, viewing_scale=1.0,
eye=None, camera_direction=[0,0,1]):
super(Transform, self).__init__()
self.camera_mode = camera_mode
if self.camera_mode == 'projection':
self.transformer = Projection(P, dist_coeffs, orig_size)
elif self.camera_mode == 'look':
self.transformer = Look(perspective, viewing_angle, viewing_scale, eye, camera_direction)
elif self.camera_mode == 'look_at':
self.transformer = LookAt(perspective, viewing_angle, viewing_scale, eye)
else:
raise ValueError('Camera mode has to be one of projection, look or look_at')
def forward(self, mesh):
mesh.vertices = self.transformer(mesh.vertices)
return mesh
def set_eyes_from_angles(self, distances, elevations, azimuths):
if self.camera_mode not in ['look', 'look_at']:
raise ValueError('Projection does not need to set eyes')
self.transformer._eye = srf.get_points_from_angles(distances, elevations, azimuths)
def set_eyes(self, eyes):
if self.camera_mode not in ['look', 'look_at']:
raise ValueError('Projection does not need to set eyes')
self.transformer._eye = eyes
@property
def eyes(self):
return self.transformer._eyes
|
banmo-main
|
third_party/softras/soft_renderer/transform.py
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import soft_renderer.functional as srf
class SoftRasterizer(nn.Module):
def __init__(self, image_size=256, background_color=[0, 0, 0], near=1, far=100,
anti_aliasing=False, fill_back=False, eps=1e-3,
sigma_val=1e-5, dist_func='euclidean', dist_eps=1e-4,
gamma_val=1e-4, aggr_func_rgb='softmax', aggr_func_alpha='prod',
texture_type='surface'):
super(SoftRasterizer, self).__init__()
if dist_func not in ['hard', 'euclidean', 'barycentric']:
raise ValueError('Distance function only support hard, euclidean and barycentric')
if aggr_func_rgb not in ['hard', 'softmax']:
raise ValueError('Aggregate function(rgb) only support hard and softmax')
if aggr_func_alpha not in ['hard', 'prod', 'sum']:
raise ValueError('Aggregate function(a) only support hard, prod and sum')
if texture_type not in ['surface', 'vertex']:
raise ValueError('Texture type only support surface and vertex')
self.image_size = image_size
self.background_color = background_color
self.near = near
self.far = far
self.anti_aliasing = anti_aliasing
self.eps = eps
self.fill_back = fill_back
self.sigma_val = sigma_val
self.dist_func = dist_func
self.dist_eps = dist_eps
self.gamma_val = gamma_val
self.aggr_func_rgb = aggr_func_rgb
self.aggr_func_alpha = aggr_func_alpha
self.texture_type = texture_type
def forward(self, mesh, mode=None):
image_size = self.image_size * (2 if self.anti_aliasing else 1)
images = srf.soft_rasterize(mesh.face_vertices, mesh.face_textures, image_size,
self.background_color, self.near, self.far,
self.fill_back, self.eps,
self.sigma_val, self.dist_func, self.dist_eps,
self.gamma_val, self.aggr_func_rgb, self.aggr_func_alpha,
self.texture_type)
if self.anti_aliasing:
images = F.avg_pool2d(images, kernel_size=2, stride=2)
return images
|
banmo-main
|
third_party/softras/soft_renderer/rasterizer.py
|
import torch
import torch.nn as nn
import numpy as np
class LaplacianLoss(nn.Module):
def __init__(self, vertex, faces, average=False):
super(LaplacianLoss, self).__init__()
self.nv = vertex.size(0)
self.nf = faces.size(0)
self.average = average
laplacian = np.zeros([self.nv, self.nv]).astype(np.float32)
laplacian[faces[:, 0], faces[:, 1]] = -1
laplacian[faces[:, 1], faces[:, 0]] = -1
laplacian[faces[:, 1], faces[:, 2]] = -1
laplacian[faces[:, 2], faces[:, 1]] = -1
laplacian[faces[:, 2], faces[:, 0]] = -1
laplacian[faces[:, 0], faces[:, 2]] = -1
r, c = np.diag_indices(laplacian.shape[0])
laplacian[r, c] = -laplacian.sum(1)
for i in range(self.nv):
laplacian[i, :] /= laplacian[i, i]
self.register_buffer('laplacian', torch.from_numpy(laplacian))
def forward(self, x):
batch_size = x.size(0)
x = torch.matmul(self.laplacian, x)
dims = tuple(range(x.ndimension())[1:])
x = x.pow(2).sum(dims)
if self.average:
return x.sum() / batch_size
else:
return x
class FlattenLoss(nn.Module):
def __init__(self, faces, average=False):
super(FlattenLoss, self).__init__()
self.nf = faces.size(0)
self.average = average
faces = faces.detach().cpu().numpy()
vertices = list(set([tuple(v) for v in np.sort(np.concatenate((faces[:, 0:2], faces[:, 1:3]), axis=0))]))
v0s = np.array([v[0] for v in vertices], 'int32')
v1s = np.array([v[1] for v in vertices], 'int32')
v2s = []
v3s = []
for v0, v1 in zip(v0s, v1s):
count = 0
for face in faces:
if v0 in face and v1 in face:
v = np.copy(face)
v = v[v != v0]
v = v[v != v1]
if count == 0:
v2s.append(int(v[0]))
count += 1
else:
v3s.append(int(v[0]))
v2s = np.array(v2s, 'int32')
v3s = np.array(v3s, 'int32')
self.register_buffer('v0s', torch.from_numpy(v0s).long())
self.register_buffer('v1s', torch.from_numpy(v1s).long())
self.register_buffer('v2s', torch.from_numpy(v2s).long())
self.register_buffer('v3s', torch.from_numpy(v3s).long())
def forward(self, vertices, eps=1e-6):
# make v0s, v1s, v2s, v3s
batch_size = vertices.size(0)
v0s = vertices[:, self.v0s, :]
v1s = vertices[:, self.v1s, :]
v2s = vertices[:, self.v2s, :]
v3s = vertices[:, self.v3s, :]
a1 = v1s - v0s
b1 = v2s - v0s
a1l2 = a1.pow(2).sum(-1)
b1l2 = b1.pow(2).sum(-1)
a1l1 = (a1l2 + eps).sqrt()
b1l1 = (b1l2 + eps).sqrt()
ab1 = (a1 * b1).sum(-1)
cos1 = ab1 / (a1l1 * b1l1 + eps)
sin1 = (1 - cos1.pow(2) + eps).sqrt()
c1 = a1 * (ab1 / (a1l2 + eps))[:, :, None]
cb1 = b1 - c1
cb1l1 = b1l1 * sin1
a2 = v1s - v0s
b2 = v3s - v0s
a2l2 = a2.pow(2).sum(-1)
b2l2 = b2.pow(2).sum(-1)
a2l1 = (a2l2 + eps).sqrt()
b2l1 = (b2l2 + eps).sqrt()
ab2 = (a2 * b2).sum(-1)
cos2 = ab2 / (a2l1 * b2l1 + eps)
sin2 = (1 - cos2.pow(2) + eps).sqrt()
c2 = a2 * (ab2 / (a2l2 + eps))[:, :, None]
cb2 = b2 - c2
cb2l1 = b2l1 * sin2
cos = (cb1 * cb2).sum(-1) / (cb1l1 * cb2l1 + eps)
dims = tuple(range(cos.ndimension())[1:])
loss = (cos + 1).pow(2).sum(dims)
if self.average:
return loss.sum() / batch_size
else:
return loss
|
banmo-main
|
third_party/softras/soft_renderer/losses.py
|
banmo-main
|
third_party/softras/soft_renderer/cuda/__init__.py
|
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
import soft_renderer.cuda.voxelization as voxelization_cuda
def voxelize_sub1(faces, size, dim):
bs = faces.size(0)
nf = faces.size(1)
if dim == 0:
faces = faces[:, :, :, [2, 1, 0]].contiguous()
elif dim == 1:
faces = faces[:, :, :, [0, 2, 1]].contiguous()
voxels = torch.zeros(bs, size, size, size).int().cuda()
return voxelization_cuda.voxelize_sub1(faces, voxels)[0].transpose(dim + 1, -1)
def voxelize_sub2(faces, size):
bs = faces.size(0)
nf = faces.size(1)
voxels = torch.zeros(bs, size, size, size).int().cuda()
return voxelization_cuda.voxelize_sub2(faces, voxels)[0]
def voxelize_sub3(faces, voxels):
bs = voxels.size(0)
vs = voxels.size(1)
visible = torch.zeros_like(voxels, dtype=torch.int32).cuda()
voxels, visible = voxelization_cuda.voxelize_sub3(faces, voxels, visible)
sum_visible = visible.sum()
while True:
voxels, visible = voxelization_cuda.voxelize_sub4(faces, voxels, visible)
if visible.sum() == sum_visible:
break
else:
sum_visible = visible.sum()
return 1 - visible
def voxelization(faces, size, normalize=False):
faces = faces.clone()
if normalize:
pass
else:
faces *= size
voxels0 = voxelize_sub1(faces, size, 0)
voxels1 = voxelize_sub1(faces, size, 1)
voxels2 = voxelize_sub1(faces, size, 2)
voxels3 = voxelize_sub2(faces, size)
voxels = voxels0 + voxels1 + voxels2 + voxels3
voxels = (voxels > 0).int()
voxels = voxelize_sub3(faces, voxels)
return voxels
|
banmo-main
|
third_party/softras/soft_renderer/functional/voxelization.py
|
import numpy as np
import torch
import torch.nn.functional as F
def look_at(vertices, eye, at=[0, 0, 0], up=[0, 1, 0]):
"""
"Look at" transformation of vertices.
"""
if (vertices.ndimension() != 3):
raise ValueError('vertices Tensor should have 3 dimensions')
device = vertices.device
# if list or tuple convert to numpy array
if isinstance(at, list) or isinstance(at, tuple):
at = torch.tensor(at, dtype=torch.float32, device=device)
# if numpy array convert to tensor
elif isinstance(at, np.ndarray):
at = torch.from_numpy(at).to(device)
elif torch.is_tensor(at):
at.to(device)
if isinstance(up, list) or isinstance(up, tuple):
up = torch.tensor(up, dtype=torch.float32, device=device)
elif isinstance(up, np.ndarray):
up = torch.from_numpy(up).to(device)
elif torch.is_tensor(up):
up.to(device)
if isinstance(eye, list) or isinstance(eye, tuple):
eye = torch.tensor(eye, dtype=torch.float32, device=device)
elif isinstance(eye, np.ndarray):
eye = torch.from_numpy(eye).to(device)
elif torch.is_tensor(eye):
eye = eye.to(device)
batch_size = vertices.shape[0]
if eye.ndimension() == 1:
eye = eye[None, :].repeat(batch_size, 1)
if at.ndimension() == 1:
at = at[None, :].repeat(batch_size, 1)
if up.ndimension() == 1:
up = up[None, :].repeat(batch_size, 1)
# create new axes
# eps is chosen as 1e-5 to match the chainer version
z_axis = F.normalize(at - eye, eps=1e-5)
x_axis = F.normalize(torch.cross(up, z_axis), eps=1e-5)
y_axis = F.normalize(torch.cross(z_axis, x_axis), eps=1e-5)
# create rotation matrix: [bs, 3, 3]
r = torch.cat((x_axis[:, None, :], y_axis[:, None, :], z_axis[:, None, :]), dim=1)
# apply
# [bs, nv, 3] -> [bs, nv, 3] -> [bs, nv, 3]
if vertices.shape != eye.shape:
eye = eye[:, None, :]
vertices = vertices - eye
vertices = torch.matmul(vertices, r.transpose(1,2))
return vertices
|
banmo-main
|
third_party/softras/soft_renderer/functional/look_at.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def directional_lighting(light, normals, light_intensity=0.5, light_color=(1,1,1),
light_direction=(0,1,0)):
# normals: [nb, :, 3]
device = light.device
if isinstance(light_color, tuple) or isinstance(light_color, list):
light_color = torch.tensor(light_color, dtype=torch.float32, device=device)
elif isinstance(light_color, np.ndarray):
light_color = torch.from_numpy(light_color).float().to(device)
if isinstance(light_direction, tuple) or isinstance(light_direction, list):
light_direction = torch.tensor(light_direction, dtype=torch.float32, device=device)
elif isinstance(light_direction, np.ndarray):
light_direction = torch.from_numpy(light_direction).float().to(device)
if light_color.ndimension() == 1:
light_color = light_color[None, :]
if light_direction.ndimension() == 1:
light_direction = light_direction[None, :] #[nb, 3]
cosine = F.relu(torch.sum(normals * light_direction, dim=2)) #[]
light += light_intensity * (light_color[:, None, :] * cosine[:, :, None])
return light #[nb, :, 3]
|
banmo-main
|
third_party/softras/soft_renderer/functional/directional_lighting.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def ambient_lighting(light, light_intensity=0.5, light_color=(1,1,1)):
device = light.device
if isinstance(light_color, tuple) or isinstance(light_color, list):
light_color = torch.tensor(light_color, dtype=torch.float32, device=device)
elif isinstance(light_color, np.ndarray):
light_color = torch.from_numpy(light_color).float().to(device)
if light_color.ndimension() == 1:
light_color = light_color[None, :]
light += light_intensity * light_color[:, None, :]
return light #[nb, :, 3]
|
banmo-main
|
third_party/softras/soft_renderer/functional/ambient_lighting.py
|
import math
import torch
def perspective(vertices, angle=30.):
'''
Compute perspective distortion from a given angle
'''
if (vertices.ndimension() != 3):
raise ValueError('vertices Tensor should have 3 dimensions')
device = vertices.device
angle = torch.tensor(angle / 180 * math.pi, dtype=torch.float32, device=device)
angle = angle[None]
width = torch.tan(angle)
width = width[:, None]
z = vertices[:, :, 2]
x = vertices[:, :, 0] / z / width
y = vertices[:, :, 1] / z / width
vertices = torch.stack((x,y,z), dim=2)
return vertices
|
banmo-main
|
third_party/softras/soft_renderer/functional/perspective.py
|
import os
import torch
import numpy as np
from skimage.io import imread
import soft_renderer.cuda.load_textures as load_textures_cuda
def load_mtl(filename_mtl):
'''
load color (Kd) and filename of textures from *.mtl
'''
texture_filenames = {}
colors = {}
material_name = ''
with open(filename_mtl) as f:
for line in f.readlines():
if len(line.split()) != 0:
if line.split()[0] == 'newmtl':
material_name = line.split()[1]
if line.split()[0] == 'map_Kd':
texture_filenames[material_name] = line.split()[1]
if line.split()[0] == 'Kd':
colors[material_name] = np.array(list(map(float, line.split()[1:4])))
return colors, texture_filenames
def load_textures(filename_obj, filename_mtl, texture_res):
# load vertices
vertices = []
with open(filename_obj) as f:
lines = f.readlines()
for line in lines:
if len(line.split()) == 0:
continue
if line.split()[0] == 'vt':
vertices.append([float(v) for v in line.split()[1:3]])
vertices = np.vstack(vertices).astype(np.float32)
# load faces for textures
faces = []
material_names = []
material_name = ''
for line in lines:
if len(line.split()) == 0:
continue
if line.split()[0] == 'f':
vs = line.split()[1:]
nv = len(vs)
if '/' in vs[0] and '//' not in vs[0]:
v0 = int(vs[0].split('/')[1])
else:
v0 = 0
for i in range(nv - 2):
if '/' in vs[i + 1] and '//' not in vs[i + 1]:
v1 = int(vs[i + 1].split('/')[1])
else:
v1 = 0
if '/' in vs[i + 2] and '//' not in vs[i + 2]:
v2 = int(vs[i + 2].split('/')[1])
else:
v2 = 0
faces.append((v0, v1, v2))
material_names.append(material_name)
if line.split()[0] == 'usemtl':
material_name = line.split()[1]
faces = np.vstack(faces).astype(np.int32) - 1
faces = vertices[faces]
faces = torch.from_numpy(faces).cuda()
faces[1 < faces] = faces[1 < faces] % 1
colors, texture_filenames = load_mtl(filename_mtl)
textures = torch.ones(faces.shape[0], texture_res**2, 3, dtype=torch.float32)
textures = textures.cuda()
#
for material_name, color in list(colors.items()):
color = torch.from_numpy(color).cuda()
for i, material_name_f in enumerate(material_names):
if material_name == material_name_f:
textures[i, :, :] = color[None, :]
for material_name, filename_texture in list(texture_filenames.items()):
filename_texture = os.path.join(os.path.dirname(filename_obj), filename_texture)
image = imread(filename_texture).astype(np.float32) / 255.
# texture image may have one channel (grey color)
if len(image.shape) == 2:
image = np.stack((image,)*3, -1)
# or has extral alpha channel shoule ignore for now
if image.shape[2] == 4:
image = image[:, :, :3]
# pytorch does not support negative slicing for the moment
image = image[::-1, :, :]
image = torch.from_numpy(image.copy()).cuda()
is_update = (np.array(material_names) == material_name).astype(np.int32)
is_update = torch.from_numpy(is_update).cuda()
textures = load_textures_cuda.load_textures(image, faces, textures, is_update)
return textures
def load_obj(filename_obj, normalization=False, load_texture=False, texture_res=4, texture_type='surface'):
"""
Load Wavefront .obj file.
This function only supports vertices (v x x x) and faces (f x x x).
"""
assert texture_type in ['surface', 'vertex']
# load vertices
vertices = []
with open(filename_obj) as f:
lines = f.readlines()
for line in lines:
if len(line.split()) == 0:
continue
if line.split()[0] == 'v':
vertices.append([float(v) for v in line.split()[1:4]])
vertices = torch.from_numpy(np.vstack(vertices).astype(np.float32)).cuda()
# load faces
faces = []
for line in lines:
if len(line.split()) == 0:
continue
if line.split()[0] == 'f':
vs = line.split()[1:]
nv = len(vs)
v0 = int(vs[0].split('/')[0])
for i in range(nv - 2):
v1 = int(vs[i + 1].split('/')[0])
v2 = int(vs[i + 2].split('/')[0])
faces.append((v0, v1, v2))
faces = torch.from_numpy(np.vstack(faces).astype(np.int32)).cuda() - 1
# load textures
if load_texture and texture_type == 'surface':
textures = None
for line in lines:
if line.startswith('mtllib'):
filename_mtl = os.path.join(os.path.dirname(filename_obj), line.split()[1])
textures = load_textures(filename_obj, filename_mtl, texture_res)
if textures is None:
raise Exception('Failed to load textures.')
elif load_texture and texture_type == 'vertex':
textures = []
for line in lines:
if len(line.split()) == 0:
continue
if line.split()[0] == 'v':
textures.append([float(v) for v in line.split()[4:7]])
textures = torch.from_numpy(np.vstack(textures).astype(np.float32)).cuda()
# normalize into a unit cube centered zero
if normalization:
vertices -= vertices.min(0)[0][None, :]
vertices /= torch.abs(vertices).max()
vertices *= 2
vertices -= vertices.max(0)[0][None, :] / 2
if load_texture:
return vertices, faces, textures
else:
return vertices, faces
|
banmo-main
|
third_party/softras/soft_renderer/functional/load_obj.py
|
import torch
def face_vertices(vertices, faces):
"""
:param vertices: [batch size, number of vertices, 3]
:param faces: [batch size, number of faces, 3]
:return: [batch size, number of faces, 3, 3]
"""
assert (vertices.ndimension() == 3)
assert (faces.ndimension() == 3)
assert (vertices.shape[0] == faces.shape[0])
assert (vertices.shape[2] == 3)
assert (faces.shape[2] == 3)
bs, nv = vertices.shape[:2]
bs, nf = faces.shape[:2]
device = vertices.device
faces = faces + (torch.arange(bs, dtype=torch.int32).to(device) * nv)[:, None, None]
vertices = vertices.reshape((bs * nv, 3))
# pytorch only supports long and byte tensors for indexing
return vertices[faces.long()]
|
banmo-main
|
third_party/softras/soft_renderer/functional/face_vertices.py
|
import numpy as np
import torch
import torch.nn.functional as F
def look(vertices, eye, direction=[0, 1, 0], up=None):
"""
"Look" transformation of vertices.
"""
if (vertices.ndimension() != 3):
raise ValueError('vertices Tensor should have 3 dimensions')
device = vertices.device
if isinstance(direction, list) or isinstance(direction, tuple):
direction = torch.tensor(direction, dtype=torch.float32, device=device)
elif isinstance(direction, np.ndarray):
direction = torch.from_numpy(direction).to(device)
elif torch.is_tensor(direction):
direction.to(device)
if isinstance(eye, list) or isinstance(eye, tuple):
eye = torch.tensor(eye, dtype=torch.float32, device=device)
elif isinstance(eye, np.ndarray):
eye = torch.from_numpy(eye).to(device)
elif torch.is_tensor(eye):
eye = eye.to(device)
if eye.ndimension() == 1:
eye = eye[None, :]
if direction.ndimension() == 1:
direction = direction[None, :]
if up.ndimension() == 1:
up = up[None, :]
# create new axes
z_axis = F.normalize(direction, eps=1e-5)
x_axis = F.normalize(torch.cross(up, z_axis), eps=1e-5)
y_axis = F.normalize(torch.cross(z_axis, x_axis), eps=1e-5)
# create rotation matrix: [bs, 3, 3]
r = torch.cat((x_axis[:, None, :], y_axis[:, None, :], z_axis[:, None, :]), dim=1)
# apply
# [bs, nv, 3] -> [bs, nv, 3] -> [bs, nv, 3]
if vertices.shape != eye.shape:
eye = eye[:, None, :]
vertices = vertices - eye
vertices = torch.matmul(vertices, r.transpose(1,2))
return vertices
|
banmo-main
|
third_party/softras/soft_renderer/functional/look.py
|
from .get_points_from_angles import get_points_from_angles
from .ambient_lighting import ambient_lighting
from .directional_lighting import directional_lighting
from .load_obj import load_obj
from .look import look
from .look_at import look_at
from .perspective import perspective
from .orthogonal import orthogonal
from .projection import projection
from .soft_rasterize import soft_rasterize
from .save_obj import (save_obj, save_voxel)
from .face_vertices import face_vertices
from .vertex_normals import vertex_normals
from .voxelization import voxelization
|
banmo-main
|
third_party/softras/soft_renderer/functional/__init__.py
|
import os
import torch
from skimage.io import imsave
import soft_renderer.cuda.create_texture_image as create_texture_image_cuda
def create_texture_image(textures, texture_res=16):
num_faces = textures.shape[0]
tile_width = int((num_faces - 1.) ** 0.5) + 1
tile_height = int((num_faces - 1.) / tile_width) + 1
image = torch.ones(tile_height * texture_res, tile_width * texture_res, 3, dtype=torch.float32)
vertices = torch.zeros((num_faces, 3, 2), dtype=torch.float32) # [:, :, UV]
face_nums = torch.arange(num_faces)
column = face_nums % tile_width
row = face_nums // tile_width
vertices[:, 0, 0] = column * texture_res + texture_res / 2
vertices[:, 0, 1] = row * texture_res + 1
vertices[:, 1, 0] = column * texture_res + 1
vertices[:, 1, 1] = (row + 1) * texture_res - 1 - 1
vertices[:, 2, 0] = (column + 1) * texture_res - 1 - 1
vertices[:, 2, 1] = (row + 1) * texture_res - 1 - 1
image = image.cuda()
vertices = vertices.cuda()
textures = textures.cuda()
image = create_texture_image_cuda.create_texture_image(vertices, textures, image, 1e-5)
vertices[:, :, 0] /= (image.shape[1] - 1)
vertices[:, :, 1] /= (image.shape[0] - 1)
image = image.detach().cpu().numpy()
vertices = vertices.detach().cpu().numpy()
image = image[::-1, ::1]
return image, vertices
def save_obj(filename, vertices, faces, textures=None, texture_res=16, texture_type='surface'):
assert vertices.ndimension() == 2
assert faces.ndimension() == 2
assert texture_type in ['surface', 'vertex']
assert texture_res >= 2
if textures is not None and texture_type == 'surface':
filename_mtl = filename[:-4] + '.mtl'
filename_texture = filename[:-4] + '.png'
material_name = 'material_1'
texture_image, vertices_textures = create_texture_image(textures, texture_res)
texture_image = texture_image.clip(0, 1)
texture_image = (texture_image * 255).astype('uint8')
imsave(filename_texture, texture_image)
faces = faces.detach().cpu().numpy()
with open(filename, 'w') as f:
f.write('# %s\n' % os.path.basename(filename))
f.write('#\n')
f.write('\n')
if textures is not None and texture_type == 'surface':
f.write('mtllib %s\n\n' % os.path.basename(filename_mtl))
if textures is not None and texture_type == 'vertex':
for vertex, color in zip(vertices, textures):
f.write('v %.8f %.8f %.8f %.8f %.8f %.8f\n' % (vertex[0], vertex[1], vertex[2],
color[0], color[1], color[2]))
f.write('\n')
else:
for vertex in vertices:
f.write('v %.8f %.8f %.8f\n' % (vertex[0], vertex[1], vertex[2]))
f.write('\n')
if textures is not None and texture_type == 'surface':
for vertex in vertices_textures.reshape((-1, 2)):
f.write('vt %.8f %.8f\n' % (vertex[0], vertex[1]))
f.write('\n')
f.write('usemtl %s\n' % material_name)
for i, face in enumerate(faces):
f.write('f %d/%d %d/%d %d/%d\n' % (
face[0] + 1, 3 * i + 1, face[1] + 1, 3 * i + 2, face[2] + 1, 3 * i + 3))
f.write('\n')
else:
for face in faces:
f.write('f %d %d %d\n' % (face[0] + 1, face[1] + 1, face[2] + 1))
if textures is not None and texture_type == 'surface':
with open(filename_mtl, 'w') as f:
f.write('newmtl %s\n' % material_name)
f.write('map_Kd %s\n' % os.path.basename(filename_texture))
def save_voxel(filename, voxel):
vertices = []
for i in range(voxel.shape[0]):
for j in range(voxel.shape[1]):
for k in range(voxel.shape[2]):
if voxel[i, j, k] == 1:
vertices.append([i / voxel.shape[0], j / voxel.shape[1], k / voxel.shape[2]])
vertices = torch.autograd.Variable(torch.tensor(vertices))
return save_obj(filename, vertices, torch.autograd.Variable(torch.tensor([])))
|
banmo-main
|
third_party/softras/soft_renderer/functional/save_obj.py
|
import math
import torch
def get_points_from_angles(distance, elevation, azimuth, degrees=True):
if isinstance(distance, float) or isinstance(distance, int):
if degrees:
elevation = math.radians(elevation)
azimuth = math.radians(azimuth)
return (
distance * math.cos(elevation) * math.sin(azimuth),
distance * math.sin(elevation),
-distance * math.cos(elevation) * math.cos(azimuth))
else:
if degrees:
elevation = math.pi / 180. * elevation
azimuth = math.pi / 180. * azimuth
#
return torch.stack([
distance * torch.cos(elevation) * torch.sin(azimuth),
distance * torch.sin(elevation),
-distance * torch.cos(elevation) * torch.cos(azimuth)
]).transpose(1,0)
|
banmo-main
|
third_party/softras/soft_renderer/functional/get_points_from_angles.py
|
import torch
def orthogonal(vertices, scale):
'''
Compute orthogonal projection from a given angle
To find equivalent scale to perspective projection
set scale = focal_pixel / object_depth -- to 0~H/W pixel range
= 1 / ( object_depth * tan(half_fov_angle) ) -- to -1~1 pixel range
'''
if (vertices.ndimension() != 3):
raise ValueError('vertices Tensor should have 3 dimensions')
z = vertices[:, :, 2]
x = vertices[:, :, 0] * scale
y = vertices[:, :, 1] * scale
vertices = torch.stack((x,y,z), dim=2)
return vertices
|
banmo-main
|
third_party/softras/soft_renderer/functional/orthogonal.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
import numpy as np
import soft_renderer.cuda.soft_rasterize as soft_rasterize_cuda
class SoftRasterizeFunction(Function):
@staticmethod
def forward(ctx, face_vertices, textures, image_size=256,
background_color=[0, 0, 0], near=1, far=100,
fill_back=True, eps=1e-3,
sigma_val=1e-5, dist_func='euclidean', dist_eps=1e-4,
gamma_val=1e-4, aggr_func_rgb='softmax', aggr_func_alpha='prod',
texture_type='surface'):
# face_vertices: [nb, nf, 9]
# textures: [nb, nf, 9]
func_dist_map = {'hard': 0, 'barycentric': 1, 'euclidean': 2}
func_rgb_map = {'hard': 0, 'softmax': 1}
func_alpha_map = {'hard': 0, 'sum': 1, 'prod': 2}
func_map_sample = {'surface': 0, 'vertex': 1}
ctx.image_size = image_size
ctx.background_color = background_color
ctx.near = near
ctx.far = far
ctx.eps = eps
ctx.sigma_val = sigma_val
ctx.gamma_val = gamma_val
ctx.func_dist_type = func_dist_map[dist_func]
ctx.dist_eps = np.log(1. / dist_eps - 1.)
ctx.func_rgb_type = func_rgb_map[aggr_func_rgb]
ctx.func_alpha_type = func_alpha_map[aggr_func_alpha]
ctx.texture_type = func_map_sample[texture_type]
ctx.fill_back = fill_back
face_vertices = face_vertices.clone()
textures = textures.clone()
ctx.device = face_vertices.device
ctx.batch_size, ctx.num_faces = face_vertices.shape[:2]
faces_info = torch.FloatTensor(ctx.batch_size, ctx.num_faces, 9*3).fill_(0.0).to(device=ctx.device) # [inv*9, sym*9, obt*3, 0*6]
aggrs_info = torch.FloatTensor(ctx.batch_size, 2, ctx.image_size, ctx.image_size).fill_(0.0).to(device=ctx.device)
soft_colors = torch.FloatTensor(ctx.batch_size, 4, ctx.image_size, ctx.image_size).fill_(1.0).to(device=ctx.device)
soft_colors[:, 0, :, :] *= background_color[0]
soft_colors[:, 1, :, :] *= background_color[1]
soft_colors[:, 2, :, :] *= background_color[2]
faces_info, aggrs_info, soft_colors = \
soft_rasterize_cuda.forward_soft_rasterize(face_vertices, textures,
faces_info, aggrs_info,
soft_colors,
image_size, near, far, eps,
sigma_val, ctx.func_dist_type, ctx.dist_eps,
gamma_val, ctx.func_rgb_type, ctx.func_alpha_type,
ctx.texture_type, fill_back)
ctx.save_for_backward(face_vertices, textures, soft_colors, faces_info, aggrs_info)
return soft_colors
@staticmethod
def backward(ctx, grad_soft_colors):
#print(grad_soft_colors.dtype)
face_vertices, textures, soft_colors, faces_info, aggrs_info = ctx.saved_tensors
image_size = ctx.image_size
background_color = ctx.background_color
near = ctx.near
far = ctx.far
eps = ctx.eps
sigma_val = ctx.sigma_val
dist_eps = ctx.dist_eps
gamma_val = ctx.gamma_val
func_dist_type = ctx.func_dist_type
func_rgb_type = ctx.func_rgb_type
func_alpha_type = ctx.func_alpha_type
texture_type = ctx.texture_type
fill_back = ctx.fill_back
# grad_faces = torch.zeros_like(face_vertices, dtype=torch.float32).to(ctx.device).contiguous()
# grad_textures = torch.zeros_like(textures, dtype=torch.float32).to(ctx.device).contiguous()
grad_faces = torch.zeros_like(face_vertices,dtype=torch.float32,device=ctx.device)
grad_textures = torch.zeros_like(textures,dtype=torch.float32,device=ctx.device)
grad_soft_colors = grad_soft_colors.contiguous()
grad_faces, grad_textures = \
soft_rasterize_cuda.backward_soft_rasterize(face_vertices, textures, soft_colors,
faces_info, aggrs_info,
grad_faces, grad_textures, grad_soft_colors,
image_size, near, far, eps,
sigma_val, func_dist_type, dist_eps,
gamma_val, func_rgb_type, func_alpha_type,
texture_type, fill_back)
return grad_faces, grad_textures, None, None, None, None, None, None, None, None, None, None, None, None, None
def soft_rasterize(face_vertices, textures, image_size=256,
background_color=[0, 0, 0], near=1, far=100,
fill_back=True, eps=1e-3,
sigma_val=1e-5, dist_func='euclidean', dist_eps=1e-4,
gamma_val=1e-4, aggr_func_rgb='softmax', aggr_func_alpha='prod',
texture_type='surface'):
if face_vertices.device == "cpu":
raise TypeError('Rasterize module supports only cuda Tensors')
return SoftRasterizeFunction.apply(face_vertices, textures, image_size,
background_color, near, far,
fill_back, eps,
sigma_val, dist_func, dist_eps,
gamma_val, aggr_func_rgb, aggr_func_alpha,
texture_type)
|
banmo-main
|
third_party/softras/soft_renderer/functional/soft_rasterize.py
|
import torch
def projection(vertices, P, dist_coeffs, orig_size):
'''
Calculate projective transformation of vertices given a projection matrix
P: 3x4 projection matrix
dist_coeffs: vector of distortion coefficients
orig_size: original size of image captured by the camera
'''
vertices = torch.cat([vertices, torch.ones_like(vertices[:, :, None, 0])], dim=-1)
vertices = torch.bmm(vertices, P.transpose(2,1))
x, y, z = vertices[:, :, 0], vertices[:, :, 1], vertices[:, :, 2]
x_ = x / (z + 1e-5)
y_ = y / (z + 1e-5)
# Get distortion coefficients from vector
k1 = dist_coeffs[:, None, 0]
k2 = dist_coeffs[:, None, 1]
p1 = dist_coeffs[:, None, 2]
p2 = dist_coeffs[:, None, 3]
k3 = dist_coeffs[:, None, 4]
# we use x_ for x' and x__ for x'' etc.
r = torch.sqrt(x_ ** 2 + y_ ** 2)
x__ = x_*(1 + k1*(r**2) + k2*(r**4) + k3*(r**6)) + 2*p1*x_*y_ + p2*(r**2 + 2*x_**2)
y__ = y_*(1 + k1*(r**2) + k2*(r**4) + k3 *(r**6)) + p1*(r**2 + 2*y_**2) + 2*p2*x_*y_
x__ = 2 * (x__ - orig_size / 2.) / orig_size
y__ = 2 * (y__ - orig_size / 2.) / orig_size
vertices = torch.stack([x__,y__,z], dim=-1)
return vertices
|
banmo-main
|
third_party/softras/soft_renderer/functional/projection.py
|
import torch
import torch.nn.functional as F
def vertex_normals(vertices, faces):
"""
:param vertices: [batch size, number of vertices, 3]
:param faces: [batch size, number of faces, 3]
:return: [batch size, number of vertices, 3]
"""
assert (vertices.ndimension() == 3)
assert (faces.ndimension() == 3)
assert (vertices.shape[0] == faces.shape[0])
assert (vertices.shape[2] == 3)
assert (faces.shape[2] == 3)
bs, nv = vertices.shape[:2]
bs, nf = faces.shape[:2]
device = vertices.device
normals = torch.zeros(bs * nv, 3).to(device)
faces = faces + (torch.arange(bs, dtype=torch.int32).to(device) * nv)[:, None, None] # expanded faces
vertices_faces = vertices.reshape((bs * nv, 3))[faces.long()]
faces = faces.view(-1, 3)
vertices_faces = vertices_faces.view(-1, 3, 3)
normals.index_add_(0, faces[:, 1].long(),
torch.cross(vertices_faces[:, 2] - vertices_faces[:, 1], vertices_faces[:, 0] - vertices_faces[:, 1]))
normals.index_add_(0, faces[:, 2].long(),
torch.cross(vertices_faces[:, 0] - vertices_faces[:, 2], vertices_faces[:, 1] - vertices_faces[:, 2]))
normals.index_add_(0, faces[:, 0].long(),
torch.cross(vertices_faces[:, 1] - vertices_faces[:, 0], vertices_faces[:, 2] - vertices_faces[:, 0]))
normals = F.normalize(normals, eps=1e-6, dim=1)
normals = normals.reshape((bs, nv, 3))
# pytorch only supports long and byte tensors for indexing
return normals
|
banmo-main
|
third_party/softras/soft_renderer/functional/vertex_normals.py
|
from torch import nn
from torch.autograd import Function
import torch
import importlib
import os
chamfer_found = importlib.find_loader("chamfer_3D") is not None
if not chamfer_found:
## Cool trick from https://github.com/chrdiller
print("Jitting Chamfer 3D")
from torch.utils.cpp_extension import load
chamfer_3D = load(name="chamfer_3D",
sources=[
"/".join(os.path.abspath(__file__).split('/')[:-1] + ["chamfer_cuda.cpp"]),
"/".join(os.path.abspath(__file__).split('/')[:-1] + ["chamfer3D.cu"]),
])
print("Loaded JIT 3D CUDA chamfer distance")
else:
import chamfer_3D
print("Loaded compiled 3D CUDA chamfer distance")
# Chamfer's distance module @thibaultgroueix
# GPU tensors only
class chamfer_3DFunction(Function):
@staticmethod
def forward(ctx, xyz1, xyz2):
batchsize, n, _ = xyz1.size()
_, m, _ = xyz2.size()
device = xyz1.device
dist1 = torch.zeros(batchsize, n)
dist2 = torch.zeros(batchsize, m)
idx1 = torch.zeros(batchsize, n).type(torch.IntTensor)
idx2 = torch.zeros(batchsize, m).type(torch.IntTensor)
dist1 = dist1.to(device)
dist2 = dist2.to(device)
idx1 = idx1.to(device)
idx2 = idx2.to(device)
torch.cuda.set_device(device)
chamfer_3D.forward(xyz1, xyz2, dist1, dist2, idx1, idx2)
ctx.save_for_backward(xyz1, xyz2, idx1, idx2)
return dist1, dist2, idx1, idx2
@staticmethod
def backward(ctx, graddist1, graddist2, gradidx1, gradidx2):
xyz1, xyz2, idx1, idx2 = ctx.saved_tensors
graddist1 = graddist1.contiguous()
graddist2 = graddist2.contiguous()
device = graddist1.device
gradxyz1 = torch.zeros(xyz1.size())
gradxyz2 = torch.zeros(xyz2.size())
gradxyz1 = gradxyz1.to(device)
gradxyz2 = gradxyz2.to(device)
chamfer_3D.backward(
xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2
)
return gradxyz1, gradxyz2
class chamfer_3DDist(nn.Module):
def __init__(self):
super(chamfer_3DDist, self).__init__()
def forward(self, input1, input2):
input1 = input1.contiguous()
input2 = input2.contiguous()
return chamfer_3DFunction.apply(input1, input2)
|
banmo-main
|
third_party/chamfer3D/dist_chamfer_3D.py
|
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name='chamfer_3D',
ext_modules=[
CUDAExtension('chamfer_3D', [
"/".join(__file__.split('/')[:-1] + ['chamfer_cuda.cpp']),
"/".join(__file__.split('/')[:-1] + ['chamfer3D.cu']),
]),
],
cmdclass={
'build_ext': BuildExtension
})
|
banmo-main
|
third_party/chamfer3D/setup.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
import glob
import os
import shutil
from os import path
from setuptools import find_packages, setup
from typing import List
import torch
from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension
from torch.utils.hipify import hipify_python
torch_ver = [int(x) for x in torch.__version__.split(".")[:2]]
assert torch_ver >= [1, 6], "Requires PyTorch >= 1.6"
def get_version():
init_py_path = path.join(path.abspath(path.dirname(__file__)), "detectron2", "__init__.py")
init_py = open(init_py_path, "r").readlines()
version_line = [l.strip() for l in init_py if l.startswith("__version__")][0]
version = version_line.split("=")[-1].strip().strip("'\"")
# The following is used to build release packages.
# Users should never use it.
suffix = os.getenv("D2_VERSION_SUFFIX", "")
version = version + suffix
if os.getenv("BUILD_NIGHTLY", "0") == "1":
from datetime import datetime
date_str = datetime.today().strftime("%y%m%d")
version = version + ".dev" + date_str
new_init_py = [l for l in init_py if not l.startswith("__version__")]
new_init_py.append('__version__ = "{}"\n'.format(version))
with open(init_py_path, "w") as f:
f.write("".join(new_init_py))
return version
def get_extensions():
this_dir = path.dirname(path.abspath(__file__))
extensions_dir = path.join(this_dir, "detectron2", "layers", "csrc")
main_source = path.join(extensions_dir, "vision.cpp")
sources = glob.glob(path.join(extensions_dir, "**", "*.cpp"))
from torch.utils.cpp_extension import ROCM_HOME
is_rocm_pytorch = (
True if ((torch.version.hip is not None) and (ROCM_HOME is not None)) else False
)
hipify_ver = (
[int(x) for x in torch.utils.hipify.__version__.split(".")]
if hasattr(torch.utils.hipify, "__version__")
else [0, 0, 0]
)
if is_rocm_pytorch and hipify_ver < [1, 0, 0]: # TODO not needed since pt1.8
# Earlier versions of hipification and extension modules were not
# transparent, i.e. would require an explicit call to hipify, and the
# hipification would introduce "hip" subdirectories, possibly changing
# the relationship between source and header files.
# This path is maintained for backwards compatibility.
hipify_python.hipify(
project_directory=this_dir,
output_directory=this_dir,
includes="/detectron2/layers/csrc/*",
show_detailed=True,
is_pytorch_extension=True,
)
source_cuda = glob.glob(path.join(extensions_dir, "**", "hip", "*.hip")) + glob.glob(
path.join(extensions_dir, "hip", "*.hip")
)
shutil.copy(
"detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h",
"detectron2/layers/csrc/box_iou_rotated/hip/box_iou_rotated_utils.h",
)
shutil.copy(
"detectron2/layers/csrc/deformable/deform_conv.h",
"detectron2/layers/csrc/deformable/hip/deform_conv.h",
)
sources = [main_source] + sources
sources = [
s
for s in sources
if not is_rocm_pytorch or torch_ver < [1, 7] or not s.endswith("hip/vision.cpp")
]
else:
# common code between cuda and rocm platforms,
# for hipify version [1,0,0] and later.
source_cuda = glob.glob(path.join(extensions_dir, "**", "*.cu")) + glob.glob(
path.join(extensions_dir, "*.cu")
)
sources = [main_source] + sources
extension = CppExtension
extra_compile_args = {"cxx": []}
define_macros = []
if (torch.cuda.is_available() and ((CUDA_HOME is not None) or is_rocm_pytorch)) or os.getenv(
"FORCE_CUDA", "0"
) == "1":
extension = CUDAExtension
sources += source_cuda
if not is_rocm_pytorch:
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-O3",
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
else:
define_macros += [("WITH_HIP", None)]
extra_compile_args["nvcc"] = []
if torch_ver < [1, 7]:
# supported by https://github.com/pytorch/pytorch/pull/43931
CC = os.environ.get("CC", None)
if CC is not None:
extra_compile_args["nvcc"].append("-ccbin={}".format(CC))
include_dirs = [extensions_dir]
ext_modules = [
extension(
"detectron2._C",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
def get_model_zoo_configs() -> List[str]:
"""
Return a list of configs to include in package for model zoo. Copy over these configs inside
detectron2/model_zoo.
"""
# Use absolute paths while symlinking.
source_configs_dir = path.join(path.dirname(path.realpath(__file__)), "configs")
destination = path.join(
path.dirname(path.realpath(__file__)), "detectron2", "model_zoo", "configs"
)
# Symlink the config directory inside package to have a cleaner pip install.
# Remove stale symlink/directory from a previous build.
if path.exists(source_configs_dir):
if path.islink(destination):
os.unlink(destination)
elif path.isdir(destination):
shutil.rmtree(destination)
if not path.exists(destination):
try:
os.symlink(source_configs_dir, destination)
except OSError:
# Fall back to copying if symlink fails: ex. on Windows.
shutil.copytree(source_configs_dir, destination)
config_paths = glob.glob("configs/**/*.yaml", recursive=True) + glob.glob(
"configs/**/*.py", recursive=True
)
return config_paths
# For projects that are relative small and provide features that are very close
# to detectron2's core functionalities, we install them under detectron2.projects
PROJECTS = {
"detectron2.projects.point_rend": "projects/PointRend/point_rend",
"detectron2.projects.deeplab": "projects/DeepLab/deeplab",
"detectron2.projects.panoptic_deeplab": "projects/Panoptic-DeepLab/panoptic_deeplab",
}
setup(
name="detectron2",
version=get_version(),
author="FAIR",
url="https://github.com/facebookresearch/detectron2",
description="Detectron2 is FAIR's next-generation research "
"platform for object detection and segmentation.",
packages=find_packages(exclude=("configs", "tests*")) + list(PROJECTS.keys()),
package_dir=PROJECTS,
package_data={"detectron2.model_zoo": get_model_zoo_configs()},
python_requires=">=3.6",
install_requires=[
# Do not add opencv here. Just like pytorch, user should install
# opencv themselves, preferrably by OS's package manager, or by
# choosing the proper pypi package name at https://github.com/skvark/opencv-python
"termcolor>=1.1",
"Pillow>=7.1", # or use pillow-simd for better performance
"yacs>=0.1.6",
"tabulate",
"cloudpickle",
"matplotlib",
"tqdm>4.29.0",
"tensorboard",
# Lock version of fvcore/iopath because they may have breaking changes
# NOTE: when updating fvcore/iopath version, make sure fvcore depends
# on compatible version of iopath.
"fvcore>=0.1.5,<0.1.6", # required like this to make it pip installable
"iopath>=0.1.7,<0.1.9",
"pycocotools>=2.0.2", # corresponds to https://github.com/ppwwyyxx/cocoapi
"future", # used by caffe2
"pydot", # used to save caffe2 SVGs
"dataclasses; python_version<'3.7'",
"omegaconf>=2.1.0rc1",
"hydra-core>=1.1.0rc1",
"black==21.4b2",
# When adding to the list, may need to update docs/requirements.txt
# or add mock in docs/conf.py
],
extras_require={
"all": [
"shapely",
"pygments>=2.2",
"psutil",
"panopticapi @ https://github.com/cocodataset/panopticapi/archive/master.zip",
],
"dev": [
"flake8==3.8.1",
"isort==4.3.21",
"flake8-bugbear",
"flake8-comprehensions",
],
},
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
)
|
banmo-main
|
third_party/detectron2_old/setup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import atexit
import bisect
import multiprocessing as mp
from collections import deque
import cv2
import torch
from detectron2.data import MetadataCatalog
from detectron2.engine.defaults import DefaultPredictor
from detectron2.utils.video_visualizer import VideoVisualizer
from detectron2.utils.visualizer import ColorMode, Visualizer
class VisualizationDemo(object):
def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False):
"""
Args:
cfg (CfgNode):
instance_mode (ColorMode):
parallel (bool): whether to run the model in different processes from visualization.
Useful since the visualization logic can be slow.
"""
self.metadata = MetadataCatalog.get(
cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused"
)
self.cpu_device = torch.device("cpu")
self.instance_mode = instance_mode
self.parallel = parallel
if parallel:
num_gpu = torch.cuda.device_count()
self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu)
else:
self.predictor = DefaultPredictor(cfg)
def run_on_image(self, image):
"""
Args:
image (np.ndarray): an image of shape (H, W, C) (in BGR order).
This is the format used by OpenCV.
Returns:
predictions (dict): the output of the model.
vis_output (VisImage): the visualized image output.
"""
vis_output = None
predictions = self.predictor(image)
# Convert image from OpenCV BGR format to Matplotlib RGB format.
image = image[:, :, ::-1]
visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode)
if "panoptic_seg" in predictions:
panoptic_seg, segments_info = predictions["panoptic_seg"]
vis_output = visualizer.draw_panoptic_seg_predictions(
panoptic_seg.to(self.cpu_device), segments_info
)
else:
if "sem_seg" in predictions:
vis_output = visualizer.draw_sem_seg(
predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
)
if "instances" in predictions:
instances = predictions["instances"].to(self.cpu_device)
vis_output = visualizer.draw_instance_predictions(predictions=instances)
return predictions, vis_output
def _frame_from_video(self, video):
while video.isOpened():
success, frame = video.read()
if success:
yield frame
else:
break
def run_on_video(self, video):
"""
Visualizes predictions on frames of the input video.
Args:
video (cv2.VideoCapture): a :class:`VideoCapture` object, whose source can be
either a webcam or a video file.
Yields:
ndarray: BGR visualizations of each video frame.
"""
video_visualizer = VideoVisualizer(self.metadata, self.instance_mode)
def process_predictions(frame, predictions):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if "panoptic_seg" in predictions:
panoptic_seg, segments_info = predictions["panoptic_seg"]
vis_frame = video_visualizer.draw_panoptic_seg_predictions(
frame, panoptic_seg.to(self.cpu_device), segments_info
)
elif "instances" in predictions:
predictions = predictions["instances"].to(self.cpu_device)
vis_frame = video_visualizer.draw_instance_predictions(frame, predictions)
elif "sem_seg" in predictions:
vis_frame = video_visualizer.draw_sem_seg(
frame, predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
)
# Converts Matplotlib RGB format to OpenCV BGR format
vis_frame = cv2.cvtColor(vis_frame.get_image(), cv2.COLOR_RGB2BGR)
return vis_frame
frame_gen = self._frame_from_video(video)
if self.parallel:
buffer_size = self.predictor.default_buffer_size
frame_data = deque()
for cnt, frame in enumerate(frame_gen):
frame_data.append(frame)
self.predictor.put(frame)
if cnt >= buffer_size:
frame = frame_data.popleft()
predictions = self.predictor.get()
yield process_predictions(frame, predictions)
while len(frame_data):
frame = frame_data.popleft()
predictions = self.predictor.get()
yield process_predictions(frame, predictions)
else:
for frame in frame_gen:
yield process_predictions(frame, self.predictor(frame))
class AsyncPredictor:
"""
A predictor that runs the model asynchronously, possibly on >1 GPUs.
Because rendering the visualization takes considerably amount of time,
this helps improve throughput a little bit when rendering videos.
"""
class _StopToken:
pass
class _PredictWorker(mp.Process):
def __init__(self, cfg, task_queue, result_queue):
self.cfg = cfg
self.task_queue = task_queue
self.result_queue = result_queue
super().__init__()
def run(self):
predictor = DefaultPredictor(self.cfg)
while True:
task = self.task_queue.get()
if isinstance(task, AsyncPredictor._StopToken):
break
idx, data = task
result = predictor(data)
self.result_queue.put((idx, result))
def __init__(self, cfg, num_gpus: int = 1):
"""
Args:
cfg (CfgNode):
num_gpus (int): if 0, will run on CPU
"""
num_workers = max(num_gpus, 1)
self.task_queue = mp.Queue(maxsize=num_workers * 3)
self.result_queue = mp.Queue(maxsize=num_workers * 3)
self.procs = []
for gpuid in range(max(num_gpus, 1)):
cfg = cfg.clone()
cfg.defrost()
cfg.MODEL.DEVICE = "cuda:{}".format(gpuid) if num_gpus > 0 else "cpu"
self.procs.append(
AsyncPredictor._PredictWorker(cfg, self.task_queue, self.result_queue)
)
self.put_idx = 0
self.get_idx = 0
self.result_rank = []
self.result_data = []
for p in self.procs:
p.start()
atexit.register(self.shutdown)
def put(self, image):
self.put_idx += 1
self.task_queue.put((self.put_idx, image))
def get(self):
self.get_idx += 1 # the index needed for this request
if len(self.result_rank) and self.result_rank[0] == self.get_idx:
res = self.result_data[0]
del self.result_data[0], self.result_rank[0]
return res
while True:
# make sure the results are returned in the correct order
idx, res = self.result_queue.get()
if idx == self.get_idx:
return res
insert = bisect.bisect(self.result_rank, idx)
self.result_rank.insert(insert, idx)
self.result_data.insert(insert, res)
def __len__(self):
return self.put_idx - self.get_idx
def __call__(self, image):
self.put(image)
return self.get()
def shutdown(self):
for _ in self.procs:
self.task_queue.put(AsyncPredictor._StopToken())
@property
def default_buffer_size(self):
return len(self.procs) * 5
|
banmo-main
|
third_party/detectron2_old/demo/predictor.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import glob
import multiprocessing as mp
import numpy as np
import os
import tempfile
import time
import warnings
import cv2
import tqdm
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.utils.logger import setup_logger
from predictor import VisualizationDemo
# constants
WINDOW_NAME = "COCO detections"
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
# To use demo for Panoptic-DeepLab, please uncomment the following two lines.
# from detectron2.projects.panoptic_deeplab import add_panoptic_deeplab_config # noqa
# add_panoptic_deeplab_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# Set score_threshold for builtin models
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="Detectron2 demo for builtin configs")
parser.add_argument(
"--config-file",
default="configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--webcam", action="store_true", help="Take inputs from webcam.")
parser.add_argument("--video-input", help="Path to video file.")
parser.add_argument(
"--input",
nargs="+",
help="A list of space separated input images; "
"or a single glob pattern such as 'directory/*.jpg'",
)
parser.add_argument(
"--output",
help="A file or directory to save output visualizations. "
"If not given, will show output in an OpenCV window.",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.5,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
return parser
def test_opencv_video_format(codec, file_ext):
with tempfile.TemporaryDirectory(prefix="video_format_test") as dir:
filename = os.path.join(dir, "test_file" + file_ext)
writer = cv2.VideoWriter(
filename=filename,
fourcc=cv2.VideoWriter_fourcc(*codec),
fps=float(30),
frameSize=(10, 10),
isColor=True,
)
[writer.write(np.zeros((10, 10, 3), np.uint8)) for _ in range(30)]
writer.release()
if os.path.isfile(filename):
return True
return False
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
setup_logger(name="fvcore")
logger = setup_logger()
logger.info("Arguments: " + str(args))
cfg = setup_cfg(args)
demo = VisualizationDemo(cfg)
if args.input:
if len(args.input) == 1:
args.input = glob.glob(os.path.expanduser(args.input[0]))
assert args.input, "The input path(s) was not found"
for path in tqdm.tqdm(args.input, disable=not args.output):
# use PIL, to be consistent with evaluation
img = read_image(path, format="BGR")
start_time = time.time()
predictions, visualized_output = demo.run_on_image(img)
logger.info(
"{}: {} in {:.2f}s".format(
path,
"detected {} instances".format(len(predictions["instances"]))
if "instances" in predictions
else "finished",
time.time() - start_time,
)
)
if args.output:
if os.path.isdir(args.output):
assert os.path.isdir(args.output), args.output
out_filename = os.path.join(args.output, os.path.basename(path))
else:
assert len(args.input) == 1, "Please specify a directory with args.output"
out_filename = args.output
visualized_output.save(out_filename)
else:
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.imshow(WINDOW_NAME, visualized_output.get_image()[:, :, ::-1])
if cv2.waitKey(0) == 27:
break # esc to quit
elif args.webcam:
assert args.input is None, "Cannot have both --input and --webcam!"
assert args.output is None, "output not yet supported with --webcam!"
cam = cv2.VideoCapture(0)
for vis in tqdm.tqdm(demo.run_on_video(cam)):
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.imshow(WINDOW_NAME, vis)
if cv2.waitKey(1) == 27:
break # esc to quit
cam.release()
cv2.destroyAllWindows()
elif args.video_input:
video = cv2.VideoCapture(args.video_input)
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
frames_per_second = video.get(cv2.CAP_PROP_FPS)
num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
basename = os.path.basename(args.video_input)
codec, file_ext = (
("x264", ".mkv") if test_opencv_video_format("x264", ".mkv") else ("mp4v", ".mp4")
)
if codec == ".mp4v":
warnings.warn("x264 codec not available, switching to mp4v")
if args.output:
if os.path.isdir(args.output):
output_fname = os.path.join(args.output, basename)
output_fname = os.path.splitext(output_fname)[0] + file_ext
else:
output_fname = args.output
assert not os.path.isfile(output_fname), output_fname
output_file = cv2.VideoWriter(
filename=output_fname,
# some installation of opencv may not support x264 (due to its license),
# you can try other format (e.g. MPEG)
fourcc=cv2.VideoWriter_fourcc(*codec),
fps=float(frames_per_second),
frameSize=(width, height),
isColor=True,
)
assert os.path.isfile(args.video_input)
for vis_frame in tqdm.tqdm(demo.run_on_video(video), total=num_frames):
if args.output:
output_file.write(vis_frame)
else:
cv2.namedWindow(basename, cv2.WINDOW_NORMAL)
cv2.imshow(basename, vis_frame)
if cv2.waitKey(1) == 27:
break # esc to quit
video.release()
if args.output:
output_file.release()
else:
cv2.destroyAllWindows()
|
banmo-main
|
third_party/detectron2_old/demo/demo.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
import pickle as pkl
import sys
import torch
"""
Usage:
# download one of the ResNet{18,34,50,101,152} models from torchvision:
wget https://download.pytorch.org/models/resnet50-19c8e357.pth -O r50.pth
# run the conversion
./convert-torchvision-to-d2.py r50.pth r50.pkl
# Then, use r50.pkl with the following changes in config:
MODEL:
WEIGHTS: "/path/to/r50.pkl"
PIXEL_MEAN: [123.675, 116.280, 103.530]
PIXEL_STD: [58.395, 57.120, 57.375]
RESNETS:
DEPTH: 50
STRIDE_IN_1X1: False
INPUT:
FORMAT: "RGB"
These models typically produce slightly worse results than the
pre-trained ResNets we use in official configs, which are the
original ResNet models released by MSRA.
"""
if __name__ == "__main__":
input = sys.argv[1]
obj = torch.load(input, map_location="cpu")
newmodel = {}
for k in list(obj.keys()):
old_k = k
if "layer" not in k:
k = "stem." + k
for t in [1, 2, 3, 4]:
k = k.replace("layer{}".format(t), "res{}".format(t + 1))
for t in [1, 2, 3]:
k = k.replace("bn{}".format(t), "conv{}.norm".format(t))
k = k.replace("downsample.0", "shortcut")
k = k.replace("downsample.1", "shortcut.norm")
print(old_k, "->", k)
newmodel[k] = obj.pop(old_k).detach().numpy()
res = {"model": newmodel, "__author__": "torchvision", "matching_heuristics": True}
with open(sys.argv[2], "wb") as f:
pkl.dump(res, f)
if obj:
print("Unconverted keys:", obj.keys())
|
banmo-main
|
third_party/detectron2_old/tools/convert-torchvision-to-d2.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
"""
A script to benchmark builtin models.
Note: this script has an extra dependency of psutil.
"""
import itertools
import logging
import psutil
import torch
import tqdm
from fvcore.common.timer import Timer
from torch.nn.parallel import DistributedDataParallel
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import (
DatasetFromList,
build_detection_test_loader,
build_detection_train_loader,
)
from detectron2.engine import AMPTrainer, SimpleTrainer, default_argument_parser, hooks, launch
from detectron2.modeling import build_model
from detectron2.solver import build_optimizer
from detectron2.utils import comm
from detectron2.utils.collect_env import collect_env_info
from detectron2.utils.events import CommonMetricPrinter
from detectron2.utils.logger import setup_logger
logger = logging.getLogger("detectron2")
def setup(args):
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.SOLVER.BASE_LR = 0.001 # Avoid NaNs. Not useful in this script anyway.
cfg.merge_from_list(args.opts)
cfg.freeze()
setup_logger(distributed_rank=comm.get_rank())
return cfg
def RAM_msg():
vram = psutil.virtual_memory()
return "RAM Usage: {:.2f}/{:.2f} GB".format(
(vram.total - vram.available) / 1024 ** 3, vram.total / 1024 ** 3
)
def benchmark_data(args):
cfg = setup(args)
logger.info("After spawning " + RAM_msg())
timer = Timer()
dataloader = build_detection_train_loader(cfg)
logger.info("Initialize loader using {} seconds.".format(timer.seconds()))
timer.reset()
itr = iter(dataloader)
for i in range(10): # warmup
next(itr)
if i == 0:
startup_time = timer.seconds()
logger.info("Startup time: {} seconds".format(startup_time))
timer = Timer()
max_iter = 1000
for _ in tqdm.trange(max_iter):
next(itr)
logger.info(
"{} iters ({} images) in {} seconds.".format(
max_iter, max_iter * cfg.SOLVER.IMS_PER_BATCH, timer.seconds()
)
)
# test for a few more rounds
for k in range(10):
logger.info(f"Iteration {k} " + RAM_msg())
timer = Timer()
max_iter = 1000
for _ in tqdm.trange(max_iter):
next(itr)
logger.info(
"{} iters ({} images) in {} seconds.".format(
max_iter, max_iter * cfg.SOLVER.IMS_PER_BATCH, timer.seconds()
)
)
def benchmark_train(args):
cfg = setup(args)
model = build_model(cfg)
logger.info("Model:\n{}".format(model))
if comm.get_world_size() > 1:
model = DistributedDataParallel(
model, device_ids=[comm.get_local_rank()], broadcast_buffers=False
)
optimizer = build_optimizer(cfg, model)
checkpointer = DetectionCheckpointer(model, optimizer=optimizer)
checkpointer.load(cfg.MODEL.WEIGHTS)
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 2
data_loader = build_detection_train_loader(cfg)
dummy_data = list(itertools.islice(data_loader, 100))
def f():
data = DatasetFromList(dummy_data, copy=False, serialize=False)
while True:
yield from data
max_iter = 400
trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else SimpleTrainer)(model, f(), optimizer)
trainer.register_hooks(
[hooks.IterationTimer(), hooks.PeriodicWriter([CommonMetricPrinter(max_iter)])]
)
trainer.train(1, max_iter)
@torch.no_grad()
def benchmark_eval(args):
cfg = setup(args)
model = build_model(cfg)
model.eval()
logger.info("Model:\n{}".format(model))
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 0
data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
dummy_data = DatasetFromList(list(itertools.islice(data_loader, 100)), copy=False)
def f():
while True:
yield from dummy_data
for k in range(5): # warmup
model(dummy_data[k])
max_iter = 300
timer = Timer()
with tqdm.tqdm(total=max_iter) as pbar:
for idx, d in enumerate(f()):
if idx == max_iter:
break
model(d)
pbar.update()
logger.info("{} iters in {} seconds.".format(max_iter, timer.seconds()))
if __name__ == "__main__":
parser = default_argument_parser()
parser.add_argument("--task", choices=["train", "eval", "data"], required=True)
args = parser.parse_args()
assert not args.eval_only
logger.info("Environment info:\n" + collect_env_info())
if args.task == "data":
f = benchmark_data
print("Initial " + RAM_msg())
elif args.task == "train":
"""
Note: training speed may not be representative.
The training cost of a R-CNN model varies with the content of the data
and the quality of the model.
"""
f = benchmark_train
elif args.task == "eval":
f = benchmark_eval
# only benchmark single-GPU inference.
assert args.num_gpus == 1 and args.num_machines == 1
launch(f, args.num_gpus, args.num_machines, args.machine_rank, args.dist_url, args=(args,))
|
banmo-main
|
third_party/detectron2_old/tools/benchmark.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import os
from itertools import chain
import cv2
import tqdm
from detectron2.config import get_cfg
from detectron2.data import DatasetCatalog, MetadataCatalog, build_detection_train_loader
from detectron2.data import detection_utils as utils
from detectron2.data.build import filter_images_with_few_keypoints
from detectron2.utils.logger import setup_logger
from detectron2.utils.visualizer import Visualizer
def setup(args):
cfg = get_cfg()
if args.config_file:
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.DATALOADER.NUM_WORKERS = 0
cfg.freeze()
return cfg
def parse_args(in_args=None):
parser = argparse.ArgumentParser(description="Visualize ground-truth data")
parser.add_argument(
"--source",
choices=["annotation", "dataloader"],
required=True,
help="visualize the annotations or the data loader (with pre-processing)",
)
parser.add_argument("--config-file", metavar="FILE", help="path to config file")
parser.add_argument("--output-dir", default="./", help="path to output directory")
parser.add_argument("--show", action="store_true", help="show output in a window")
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser.parse_args(in_args)
if __name__ == "__main__":
args = parse_args()
logger = setup_logger()
logger.info("Arguments: " + str(args))
cfg = setup(args)
dirname = args.output_dir
os.makedirs(dirname, exist_ok=True)
metadata = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])
def output(vis, fname):
if args.show:
print(fname)
cv2.imshow("window", vis.get_image()[:, :, ::-1])
cv2.waitKey()
else:
filepath = os.path.join(dirname, fname)
print("Saving to {} ...".format(filepath))
vis.save(filepath)
scale = 1.0
if args.source == "dataloader":
train_data_loader = build_detection_train_loader(cfg)
for batch in train_data_loader:
for per_image in batch:
# Pytorch tensor is in (C, H, W) format
img = per_image["image"].permute(1, 2, 0).cpu().detach().numpy()
img = utils.convert_image_to_rgb(img, cfg.INPUT.FORMAT)
visualizer = Visualizer(img, metadata=metadata, scale=scale)
target_fields = per_image["instances"].get_fields()
labels = [metadata.thing_classes[i] for i in target_fields["gt_classes"]]
vis = visualizer.overlay_instances(
labels=labels,
boxes=target_fields.get("gt_boxes", None),
masks=target_fields.get("gt_masks", None),
keypoints=target_fields.get("gt_keypoints", None),
)
output(vis, str(per_image["image_id"]) + ".jpg")
else:
dicts = list(chain.from_iterable([DatasetCatalog.get(k) for k in cfg.DATASETS.TRAIN]))
if cfg.MODEL.KEYPOINT_ON:
dicts = filter_images_with_few_keypoints(dicts, 1)
for dic in tqdm.tqdm(dicts):
img = utils.read_image(dic["file_name"], "RGB")
visualizer = Visualizer(img, metadata=metadata, scale=scale)
vis = visualizer.draw_dataset_dict(dic)
output(vis, os.path.basename(dic["file_name"]))
|
banmo-main
|
third_party/detectron2_old/tools/visualize_data.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Detectron2 training script with a plain training loop.
This script reads a given config file and runs the training or evaluation.
It is an entry point that is able to train standard models in detectron2.
In order to let one script support training of many models,
this script contains logic that are specific to these built-in models and therefore
may not be suitable for your own project.
For example, your research project perhaps only needs a single "evaluator".
Therefore, we recommend you to use detectron2 as a library and take
this file as an example of how to use the library.
You may want to write your own script with your datasets and other customizations.
Compared to "train_net.py", this script supports fewer default features.
It also includes fewer abstraction, therefore is easier to add custom logic.
"""
import logging
import os
from collections import OrderedDict
import torch
from torch.nn.parallel import DistributedDataParallel
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer, PeriodicCheckpointer
from detectron2.config import get_cfg
from detectron2.data import (
MetadataCatalog,
build_detection_test_loader,
build_detection_train_loader,
)
from detectron2.engine import default_argument_parser, default_setup, default_writers, launch
from detectron2.evaluation import (
CityscapesInstanceEvaluator,
CityscapesSemSegEvaluator,
COCOEvaluator,
COCOPanopticEvaluator,
DatasetEvaluators,
LVISEvaluator,
PascalVOCDetectionEvaluator,
SemSegEvaluator,
inference_on_dataset,
print_csv_format,
)
from detectron2.modeling import build_model
from detectron2.solver import build_lr_scheduler, build_optimizer
from detectron2.utils.events import EventStorage
logger = logging.getLogger("detectron2")
def get_evaluator(cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
evaluator_list.append(
SemSegEvaluator(
dataset_name,
distributed=True,
output_dir=output_folder,
)
)
if evaluator_type in ["coco", "coco_panoptic_seg"]:
evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
if evaluator_type == "coco_panoptic_seg":
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
if evaluator_type == "cityscapes_instance":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesInstanceEvaluator(dataset_name)
if evaluator_type == "cityscapes_sem_seg":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesSemSegEvaluator(dataset_name)
if evaluator_type == "pascal_voc":
return PascalVOCDetectionEvaluator(dataset_name)
if evaluator_type == "lvis":
return LVISEvaluator(dataset_name, cfg, True, output_folder)
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(dataset_name, evaluator_type)
)
if len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
def do_test(cfg, model):
results = OrderedDict()
for dataset_name in cfg.DATASETS.TEST:
data_loader = build_detection_test_loader(cfg, dataset_name)
evaluator = get_evaluator(
cfg, dataset_name, os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
)
results_i = inference_on_dataset(model, data_loader, evaluator)
results[dataset_name] = results_i
if comm.is_main_process():
logger.info("Evaluation results for {} in csv format:".format(dataset_name))
print_csv_format(results_i)
if len(results) == 1:
results = list(results.values())[0]
return results
def do_train(cfg, model, resume=False):
model.train()
optimizer = build_optimizer(cfg, model)
scheduler = build_lr_scheduler(cfg, optimizer)
checkpointer = DetectionCheckpointer(
model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler
)
start_iter = (
checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1
)
max_iter = cfg.SOLVER.MAX_ITER
periodic_checkpointer = PeriodicCheckpointer(
checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter
)
writers = default_writers(cfg.OUTPUT_DIR, max_iter) if comm.is_main_process() else []
# compared to "train_net.py", we do not support accurate timing and
# precise BN here, because they are not trivial to implement in a small training loop
data_loader = build_detection_train_loader(cfg)
logger.info("Starting training from iteration {}".format(start_iter))
with EventStorage(start_iter) as storage:
for data, iteration in zip(data_loader, range(start_iter, max_iter)):
storage.iter = iteration
loss_dict = model(data)
losses = sum(loss_dict.values())
assert torch.isfinite(losses).all(), loss_dict
loss_dict_reduced = {k: v.item() for k, v in comm.reduce_dict(loss_dict).items()}
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
if comm.is_main_process():
storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced)
optimizer.zero_grad()
losses.backward()
optimizer.step()
storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False)
scheduler.step()
if (
cfg.TEST.EVAL_PERIOD > 0
and (iteration + 1) % cfg.TEST.EVAL_PERIOD == 0
and iteration != max_iter - 1
):
do_test(cfg, model)
# Compared to "train_net.py", the test results are not dumped to EventStorage
comm.synchronize()
if iteration - start_iter > 5 and (
(iteration + 1) % 20 == 0 or iteration == max_iter - 1
):
for writer in writers:
writer.write()
periodic_checkpointer.step(iteration)
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(
cfg, args
) # if you don't like any of the default setup, write your own setup code
return cfg
def main(args):
cfg = setup(args)
model = build_model(cfg)
logger.info("Model:\n{}".format(model))
if args.eval_only:
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
return do_test(cfg, model)
distributed = comm.get_world_size() > 1
if distributed:
model = DistributedDataParallel(
model, device_ids=[comm.get_local_rank()], broadcast_buffers=False
)
do_train(cfg, model, resume=args.resume)
return do_test(cfg, model)
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
banmo-main
|
third_party/detectron2_old/tools/plain_train_net.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import json
import numpy as np
import os
from collections import defaultdict
import cv2
import tqdm
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.structures import Boxes, BoxMode, Instances
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import setup_logger
from detectron2.utils.visualizer import Visualizer
def create_instances(predictions, image_size):
ret = Instances(image_size)
score = np.asarray([x["score"] for x in predictions])
chosen = (score > args.conf_threshold).nonzero()[0]
score = score[chosen]
bbox = np.asarray([predictions[i]["bbox"] for i in chosen]).reshape(-1, 4)
bbox = BoxMode.convert(bbox, BoxMode.XYWH_ABS, BoxMode.XYXY_ABS)
labels = np.asarray([dataset_id_map(predictions[i]["category_id"]) for i in chosen])
ret.scores = score
ret.pred_boxes = Boxes(bbox)
ret.pred_classes = labels
try:
ret.pred_masks = [predictions[i]["segmentation"] for i in chosen]
except KeyError:
pass
return ret
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="A script that visualizes the json predictions from COCO or LVIS dataset."
)
parser.add_argument("--input", required=True, help="JSON file produced by the model")
parser.add_argument("--output", required=True, help="output directory")
parser.add_argument("--dataset", help="name of the dataset", default="coco_2017_val")
parser.add_argument("--conf-threshold", default=0.5, type=float, help="confidence threshold")
args = parser.parse_args()
logger = setup_logger()
with PathManager.open(args.input, "r") as f:
predictions = json.load(f)
pred_by_image = defaultdict(list)
for p in predictions:
pred_by_image[p["image_id"]].append(p)
dicts = list(DatasetCatalog.get(args.dataset))
metadata = MetadataCatalog.get(args.dataset)
if hasattr(metadata, "thing_dataset_id_to_contiguous_id"):
def dataset_id_map(ds_id):
return metadata.thing_dataset_id_to_contiguous_id[ds_id]
elif "lvis" in args.dataset:
# LVIS results are in the same format as COCO results, but have a different
# mapping from dataset category id to contiguous category id in [0, #categories - 1]
def dataset_id_map(ds_id):
return ds_id - 1
else:
raise ValueError("Unsupported dataset: {}".format(args.dataset))
os.makedirs(args.output, exist_ok=True)
for dic in tqdm.tqdm(dicts):
img = cv2.imread(dic["file_name"], cv2.IMREAD_COLOR)[:, :, ::-1]
basename = os.path.basename(dic["file_name"])
predictions = create_instances(pred_by_image[dic["image_id"]], img.shape[:2])
vis = Visualizer(img, metadata)
vis_pred = vis.draw_instance_predictions(predictions).get_image()
vis = Visualizer(img, metadata)
vis_gt = vis.draw_dataset_dict(dic).get_image()
concat = np.concatenate((vis_pred, vis_gt), axis=1)
cv2.imwrite(os.path.join(args.output, basename), concat[:, :, ::-1])
|
banmo-main
|
third_party/detectron2_old/tools/visualize_json_results.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import numpy as np
from collections import Counter
import tqdm
from fvcore.nn import flop_count_table # can also try flop_count_str
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import build_detection_test_loader
from detectron2.engine import default_argument_parser
from detectron2.modeling import build_model
from detectron2.utils.analysis import (
FlopCountAnalysis,
activation_count_operators,
parameter_count_table,
)
from detectron2.utils.logger import setup_logger
logger = logging.getLogger("detectron2")
def setup(args):
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.DATALOADER.NUM_WORKERS = 0
cfg.merge_from_list(args.opts)
cfg.freeze()
setup_logger(name="fvcore")
setup_logger()
return cfg
def do_flop(cfg):
data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
model = build_model(cfg)
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
model.eval()
counts = Counter()
total_flops = []
for idx, data in zip(tqdm.trange(args.num_inputs), data_loader): # noqa
flops = FlopCountAnalysis(model, data)
if idx > 0:
flops.unsupported_ops_warnings(False).uncalled_modules_warnings(False)
counts += flops.by_operator()
total_flops.append(flops.total())
logger.info("Flops table computed from only one input sample:\n" + flop_count_table(flops))
logger.info(
"Average GFlops for each type of operators:\n"
+ str([(k, v / (idx + 1) / 1e9) for k, v in counts.items()])
)
logger.info(
"Total GFlops: {:.1f}±{:.1f}".format(np.mean(total_flops) / 1e9, np.std(total_flops) / 1e9)
)
def do_activation(cfg):
data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
model = build_model(cfg)
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
model.eval()
counts = Counter()
total_activations = []
for idx, data in zip(tqdm.trange(args.num_inputs), data_loader): # noqa
count = activation_count_operators(model, data)
counts += count
total_activations.append(sum(count.values()))
logger.info(
"(Million) Activations for Each Type of Operators:\n"
+ str([(k, v / idx) for k, v in counts.items()])
)
logger.info(
"Total (Million) Activations: {}±{}".format(
np.mean(total_activations), np.std(total_activations)
)
)
def do_parameter(cfg):
model = build_model(cfg)
logger.info("Parameter Count:\n" + parameter_count_table(model, max_depth=5))
def do_structure(cfg):
model = build_model(cfg)
logger.info("Model Structure:\n" + str(model))
if __name__ == "__main__":
parser = default_argument_parser(
epilog="""
Examples:
To show parameters of a model:
$ ./analyze_model.py --tasks parameter \\
--config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml
Flops and activations are data-dependent, therefore inputs and model weights
are needed to count them:
$ ./analyze_model.py --num-inputs 100 --tasks flop \\
--config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \\
MODEL.WEIGHTS /path/to/model.pkl
"""
)
parser.add_argument(
"--tasks",
choices=["flop", "activation", "parameter", "structure"],
required=True,
nargs="+",
)
parser.add_argument(
"-n",
"--num-inputs",
default=100,
type=int,
help="number of inputs used to compute statistics for flops/activations, "
"both are data dependent.",
)
args = parser.parse_args()
assert not args.eval_only
assert args.num_gpus == 1
cfg = setup(args)
for task in args.tasks:
{
"flop": do_flop,
"activation": do_activation,
"parameter": do_parameter,
"structure": do_structure,
}[task](cfg)
|
banmo-main
|
third_party/detectron2_old/tools/analyze_model.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Training script using the new "LazyConfig" python config files.
This scripts reads a given python config file and runs the training or evaluation.
It can be used to train any models or dataset as long as they can be
instantiated by the recursive construction defined in the given config file.
Besides lazy construction of models, dataloader, etc., this scripts expects a
few common configuration parameters currently defined in "configs/common/train.py".
To add more complicated training logic, you can easily add other configs
in the config file and implement a new train_net.py to handle them.
"""
import logging
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import LazyConfig, instantiate
from detectron2.engine import (
AMPTrainer,
SimpleTrainer,
default_argument_parser,
default_setup,
default_writers,
hooks,
launch,
)
from detectron2.engine.defaults import create_ddp_model
from detectron2.evaluation import inference_on_dataset, print_csv_format
from detectron2.utils import comm
logger = logging.getLogger("detectron2")
def do_test(cfg, model):
if "evaluator" in cfg.dataloader:
ret = inference_on_dataset(
model, instantiate(cfg.dataloader.test), instantiate(cfg.dataloader.evaluator)
)
print_csv_format(ret)
return ret
def do_train(args, cfg):
"""
Args:
cfg: an object with the following attributes:
model: instantiate to a module
dataloader.{train,test}: instantiate to dataloaders
dataloader.evaluator: instantiate to evaluator for test set
optimizer: instantaite to an optimizer
lr_multiplier: instantiate to a fvcore scheduler
train: other misc config defined in `common_train.py`, including:
output_dir (str)
init_checkpoint (str)
amp.enabled (bool)
max_iter (int)
eval_period, log_period (int)
device (str)
checkpointer (dict)
ddp (dict)
"""
model = instantiate(cfg.model)
logger = logging.getLogger("detectron2")
logger.info("Model:\n{}".format(model))
model.to(cfg.train.device)
cfg.optimizer.params.model = model
optim = instantiate(cfg.optimizer)
train_loader = instantiate(cfg.dataloader.train)
model = create_ddp_model(model, **cfg.train.ddp)
trainer = (AMPTrainer if cfg.train.amp.enabled else SimpleTrainer)(model, train_loader, optim)
checkpointer = DetectionCheckpointer(
model,
cfg.train.output_dir,
optimizer=optim,
trainer=trainer,
)
trainer.register_hooks(
[
hooks.IterationTimer(),
hooks.LRScheduler(scheduler=instantiate(cfg.lr_multiplier)),
hooks.PeriodicCheckpointer(checkpointer, **cfg.train.checkpointer)
if comm.is_main_process()
else None,
hooks.EvalHook(cfg.train.eval_period, lambda: do_test(cfg, model)),
hooks.PeriodicWriter(
default_writers(cfg.train.output_dir, cfg.train.max_iter),
period=cfg.train.log_period,
)
if comm.is_main_process()
else None,
]
)
checkpointer.resume_or_load(cfg.train.init_checkpoint, resume=args.resume)
if args.resume and checkpointer.has_checkpoint():
# The checkpoint stores the training iteration that just finished, thus we start
# at the next iteration
start_iter = trainer.iter + 1
else:
start_iter = 0
trainer.train(start_iter, cfg.train.max_iter)
def main(args):
cfg = LazyConfig.load(args.config_file)
cfg = LazyConfig.apply_overrides(cfg, args.opts)
default_setup(cfg, args)
if args.eval_only:
model = instantiate(cfg.model)
model.to(cfg.train.device)
model = create_ddp_model(model)
DetectionCheckpointer(model).load(cfg.train.init_checkpoint)
print(do_test(cfg, model))
else:
do_train(args, cfg)
if __name__ == "__main__":
args = default_argument_parser().parse_args()
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
banmo-main
|
third_party/detectron2_old/tools/lazyconfig_train_net.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
"""
A main training script.
This scripts reads a given config file and runs the training or evaluation.
It is an entry point that is made to train standard models in detectron2.
In order to let one script support training of many models,
this script contains logic that are specific to these built-in models and therefore
may not be suitable for your own project.
For example, your research project perhaps only needs a single "evaluator".
Therefore, we recommend you to use detectron2 as an library and take
this file as an example of how to use the library.
You may want to write your own script with your datasets and other customizations.
"""
import logging
import os
from collections import OrderedDict
import torch
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, hooks, launch
from detectron2.evaluation import (
CityscapesInstanceEvaluator,
CityscapesSemSegEvaluator,
COCOEvaluator,
COCOPanopticEvaluator,
DatasetEvaluators,
LVISEvaluator,
PascalVOCDetectionEvaluator,
SemSegEvaluator,
verify_results,
)
from detectron2.modeling import GeneralizedRCNNWithTTA
class Trainer(DefaultTrainer):
"""
We use the "DefaultTrainer" which contains pre-defined default logic for
standard training workflow. They may not work for you, especially if you
are working on a new research project. In that case you can write your
own training loop. You can use "tools/plain_train_net.py" as an example.
"""
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
evaluator_list.append(
SemSegEvaluator(
dataset_name,
distributed=True,
output_dir=output_folder,
)
)
if evaluator_type in ["coco", "coco_panoptic_seg"]:
evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
if evaluator_type == "coco_panoptic_seg":
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
if evaluator_type == "cityscapes_instance":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesInstanceEvaluator(dataset_name)
if evaluator_type == "cityscapes_sem_seg":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesSemSegEvaluator(dataset_name)
elif evaluator_type == "pascal_voc":
return PascalVOCDetectionEvaluator(dataset_name)
elif evaluator_type == "lvis":
return LVISEvaluator(dataset_name, output_dir=output_folder)
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(
dataset_name, evaluator_type
)
)
elif len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
@classmethod
def test_with_TTA(cls, cfg, model):
logger = logging.getLogger("detectron2.trainer")
# In the end of training, run an evaluation with TTA
# Only support some R-CNN models.
logger.info("Running inference with test-time augmentation ...")
model = GeneralizedRCNNWithTTA(cfg, model)
evaluators = [
cls.build_evaluator(
cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA")
)
for name in cfg.DATASETS.TEST
]
res = cls.test(cfg, model, evaluators)
res = OrderedDict({k + "_TTA": v for k, v in res.items()})
return res
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
if cfg.TEST.AUG.ENABLED:
res.update(Trainer.test_with_TTA(cfg, model))
if comm.is_main_process():
verify_results(cfg, res)
return res
"""
If you'd like to do anything fancier than the standard training logic,
consider writing your own training loop (see plain_train_net.py) or
subclassing the trainer.
"""
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
if cfg.TEST.AUG.ENABLED:
trainer.register_hooks(
[hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))]
)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
banmo-main
|
third_party/detectron2_old/tools/train_net.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import os
from typing import Dict, List, Tuple
import torch
from torch import Tensor, nn
import detectron2.data.transforms as T
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import build_detection_test_loader, detection_utils
from detectron2.evaluation import COCOEvaluator, inference_on_dataset, print_csv_format
from detectron2.export import (
Caffe2Tracer,
TracingAdapter,
add_export_config,
dump_torchscript_IR,
scripting_with_instances,
)
from detectron2.modeling import GeneralizedRCNN, RetinaNet, build_model
from detectron2.modeling.postprocessing import detector_postprocess
from detectron2.projects.point_rend import add_pointrend_config
from detectron2.structures import Boxes
from detectron2.utils.env import TORCH_VERSION
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import setup_logger
def setup_cfg(args):
cfg = get_cfg()
# cuda context is initialized before creating dataloader, so we don't fork anymore
cfg.DATALOADER.NUM_WORKERS = 0
cfg = add_export_config(cfg)
add_pointrend_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
def export_caffe2_tracing(cfg, torch_model, inputs):
tracer = Caffe2Tracer(cfg, torch_model, inputs)
if args.format == "caffe2":
caffe2_model = tracer.export_caffe2()
caffe2_model.save_protobuf(args.output)
# draw the caffe2 graph
caffe2_model.save_graph(os.path.join(args.output, "model.svg"), inputs=inputs)
return caffe2_model
elif args.format == "onnx":
import onnx
onnx_model = tracer.export_onnx()
onnx.save(onnx_model, os.path.join(args.output, "model.onnx"))
elif args.format == "torchscript":
ts_model = tracer.export_torchscript()
with PathManager.open(os.path.join(args.output, "model.ts"), "wb") as f:
torch.jit.save(ts_model, f)
dump_torchscript_IR(ts_model, args.output)
# experimental. API not yet final
def export_scripting(torch_model):
assert TORCH_VERSION >= (1, 8)
fields = {
"proposal_boxes": Boxes,
"objectness_logits": Tensor,
"pred_boxes": Boxes,
"scores": Tensor,
"pred_classes": Tensor,
"pred_masks": Tensor,
"pred_keypoints": torch.Tensor,
"pred_keypoint_heatmaps": torch.Tensor,
}
assert args.format == "torchscript", "Scripting only supports torchscript format."
class ScriptableAdapterBase(nn.Module):
# Use this adapter to workaround https://github.com/pytorch/pytorch/issues/46944
# by not retuning instances but dicts. Otherwise the exported model is not deployable
def __init__(self):
super().__init__()
self.model = torch_model
self.eval()
if isinstance(torch_model, GeneralizedRCNN):
class ScriptableAdapter(ScriptableAdapterBase):
def forward(self, inputs: Tuple[Dict[str, torch.Tensor]]) -> List[Dict[str, Tensor]]:
instances = self.model.inference(inputs, do_postprocess=False)
return [i.get_fields() for i in instances]
else:
class ScriptableAdapter(ScriptableAdapterBase):
def forward(self, inputs: Tuple[Dict[str, torch.Tensor]]) -> List[Dict[str, Tensor]]:
instances = self.model(inputs)
return [i.get_fields() for i in instances]
ts_model = scripting_with_instances(ScriptableAdapter(), fields)
with PathManager.open(os.path.join(args.output, "model.ts"), "wb") as f:
torch.jit.save(ts_model, f)
dump_torchscript_IR(ts_model, args.output)
# TODO inference in Python now missing postprocessing glue code
return None
# experimental. API not yet final
def export_tracing(torch_model, inputs):
assert TORCH_VERSION >= (1, 8)
image = inputs[0]["image"]
inputs = [{"image": image}] # remove other unused keys
if isinstance(torch_model, GeneralizedRCNN):
def inference(model, inputs):
# use do_postprocess=False so it returns ROI mask
inst = model.inference(inputs, do_postprocess=False)[0]
return [{"instances": inst}]
else:
inference = None # assume that we just call the model directly
traceable_model = TracingAdapter(torch_model, inputs, inference)
if args.format == "torchscript":
ts_model = torch.jit.trace(traceable_model, (image,))
with PathManager.open(os.path.join(args.output, "model.ts"), "wb") as f:
torch.jit.save(ts_model, f)
dump_torchscript_IR(ts_model, args.output)
elif args.format == "onnx":
# NOTE onnx export currently failing in pytorch
with PathManager.open(os.path.join(args.output, "model.onnx"), "wb") as f:
torch.onnx.export(traceable_model, (image,), f)
logger.info("Inputs schema: " + str(traceable_model.inputs_schema))
logger.info("Outputs schema: " + str(traceable_model.outputs_schema))
if args.format != "torchscript":
return None
if not isinstance(torch_model, (GeneralizedRCNN, RetinaNet)):
return None
def eval_wrapper(inputs):
"""
The exported model does not contain the final resize step, which is typically
unused in deployment but needed for evaluation. We add it manually here.
"""
input = inputs[0]
instances = traceable_model.outputs_schema(ts_model(input["image"]))[0]["instances"]
postprocessed = detector_postprocess(instances, input["height"], input["width"])
return [{"instances": postprocessed}]
return eval_wrapper
def get_sample_inputs(args):
if args.sample_image is None:
# get a first batch from dataset
data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
first_batch = next(iter(data_loader))
return first_batch
else:
# get a sample data
original_image = detection_utils.read_image(args.sample_image, format=cfg.INPUT.FORMAT)
# Do same preprocessing as DefaultPredictor
aug = T.ResizeShortestEdge(
[cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
)
height, width = original_image.shape[:2]
image = aug.get_transform(original_image).apply_image(original_image)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
inputs = {"image": image, "height": height, "width": width}
# Sample ready
sample_inputs = [inputs]
return sample_inputs
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Export a model for deployment.")
parser.add_argument(
"--format",
choices=["caffe2", "onnx", "torchscript"],
help="output format",
default="caffe2",
)
parser.add_argument(
"--export-method",
choices=["caffe2_tracing", "tracing", "scripting"],
help="Method to export models",
default="caffe2_tracing",
)
parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file")
parser.add_argument("--sample-image", default=None, type=str, help="sample image for input")
parser.add_argument("--run-eval", action="store_true")
parser.add_argument("--output", help="output directory for the converted model")
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
logger = setup_logger()
logger.info("Command line arguments: " + str(args))
PathManager.mkdirs(args.output)
# Disable respecialization on new shapes. Otherwise --run-eval will be slow
torch._C._jit_set_bailout_depth(1)
cfg = setup_cfg(args)
# create a torch model
torch_model = build_model(cfg)
DetectionCheckpointer(torch_model).resume_or_load(cfg.MODEL.WEIGHTS)
torch_model.eval()
# get sample data
sample_inputs = get_sample_inputs(args)
# convert and save model
if args.export_method == "caffe2_tracing":
exported_model = export_caffe2_tracing(cfg, torch_model, sample_inputs)
elif args.export_method == "scripting":
exported_model = export_scripting(torch_model)
elif args.export_method == "tracing":
exported_model = export_tracing(torch_model, sample_inputs)
# run evaluation with the converted model
if args.run_eval:
assert exported_model is not None, (
"Python inference is not yet implemented for "
f"export_method={args.export_method}, format={args.format}."
)
logger.info("Running evaluation ... this takes a long time if you export to CPU.")
dataset = cfg.DATASETS.TEST[0]
data_loader = build_detection_test_loader(cfg, dataset)
# NOTE: hard-coded evaluator. change to the evaluator for your dataset
evaluator = COCOEvaluator(dataset, output_dir=args.output)
metrics = inference_on_dataset(exported_model, data_loader, evaluator)
print_csv_format(metrics)
|
banmo-main
|
third_party/detectron2_old/tools/deploy/export_model.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
"""
TridentNet Training Script.
This script is a simplified version of the training script in detectron2/tools.
"""
import os
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from detectron2.evaluation import COCOEvaluator
from tridentnet import add_tridentnet_config
class Trainer(DefaultTrainer):
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
return COCOEvaluator(dataset_name, output_dir=output_folder)
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_tridentnet_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
banmo-main
|
third_party/detectron2_old/projects/TridentNet/train_net.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.