index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
720,345 |
ete3.coretype.tree
|
write
|
Returns the newick representation of current node. Several
arguments control the way in which extra data is shown for
every node:
:argument features: a list of feature names to be exported
using the Extended Newick Format (i.e. features=["name",
"dist"]). Use an empty list to export all available features
in each node (features=[])
:argument outfile: writes the output to a given file
:argument format: defines the newick standard used to encode the
tree. See tutorial for details.
:argument False format_root_node: If True, it allows features
and branch information from root node to be exported as a
part of the newick text string. For newick compatibility
reasons, this is False by default.
:argument is_leaf_fn: See :func:`TreeNode.traverse` for
documentation.
**Example:**
::
t.write(features=["species","name"], format=1)
|
def write(self, features=None, outfile=None, format=0, is_leaf_fn=None,
format_root_node=False, dist_formatter=None, support_formatter=None,
name_formatter=None, quoted_node_names=False):
"""
Returns the newick representation of current node. Several
arguments control the way in which extra data is shown for
every node:
:argument features: a list of feature names to be exported
using the Extended Newick Format (i.e. features=["name",
"dist"]). Use an empty list to export all available features
in each node (features=[])
:argument outfile: writes the output to a given file
:argument format: defines the newick standard used to encode the
tree. See tutorial for details.
:argument False format_root_node: If True, it allows features
and branch information from root node to be exported as a
part of the newick text string. For newick compatibility
reasons, this is False by default.
:argument is_leaf_fn: See :func:`TreeNode.traverse` for
documentation.
**Example:**
::
t.write(features=["species","name"], format=1)
"""
nw = write_newick(self, features=features, format=format,
is_leaf_fn=is_leaf_fn,
format_root_node=format_root_node,
dist_formatter=dist_formatter,
support_formatter=support_formatter,
name_formatter=name_formatter,
quoted_names=quoted_node_names)
if outfile is not None:
with open(outfile, "w") as OUT:
OUT.write(nw)
else:
return nw
|
(self, features=None, outfile=None, format=0, is_leaf_fn=None, format_root_node=False, dist_formatter=None, support_formatter=None, name_formatter=None, quoted_node_names=False)
|
720,453 |
ete3.evol.evoltree
|
EvolNode
|
Re-implementation of the standart TreeNode instance. It adds
attributes and methods to work with phylogentic trees.
:argument newick: path to tree in newick format, can also be a string
:argument alignment: path to alignment, can also be a string.
:argument fasta alg_format: alignment format.
:argument sp_naming_function: function to infer species name.
:argument format: type of newick format
:argument binpath: path to binaries, in case codeml or SLR are not in global path.
|
class EvolNode(PhyloNode):
""" Re-implementation of the standart TreeNode instance. It adds
attributes and methods to work with phylogentic trees.
:argument newick: path to tree in newick format, can also be a string
:argument alignment: path to alignment, can also be a string.
:argument fasta alg_format: alignment format.
:argument sp_naming_function: function to infer species name.
:argument format: type of newick format
:argument binpath: path to binaries, in case codeml or SLR are not in global path.
"""
def __init__(self, newick=None, alignment=None, alg_format="fasta",
sp_naming_function=_parse_species, format=0,
binpath='', **kwargs):
'''
freebranch: path to find codeml output of freebranch model.
'''
# _update names?
self.workdir = '/tmp/ete3-tmp/'
if not binpath:
ete3_path = which("ete3")
binpath = os.path.split(ete3_path)[0]
self.execpath = binpath
self._models = {}
self.__gui_mark_mode = False
PhyloNode.__init__(self, newick=newick, format=format,
sp_naming_function=sp_naming_function, **kwargs)
if newick:
self._label_as_paml()
# initialize node marks
self.mark_tree([])
def _set_mark_mode(self, val):
self.__gui_mark_mode = val
def _is_mark_mode(self):
return self.__gui_mark_mode
def _label_internal_nodes(self, nid=None):
"""
nid needs to be a list in order to keep count through recursivity
"""
for node in self.get_children():
if node.is_leaf():
continue
nid[0] += 1
node.add_feature('node_id', nid[0])
node._label_internal_nodes(nid)
def _label_as_paml(self):
'''
to label tree as paml, nearly walking man over the tree algorithm
WARNING: sorted names in same order that sequence
WARNING: depends on tree topology conformation, not the same after a swap
activates the function get_descendants_by_pamlid
'''
nid = 1
# check we do not have dupplicated names in tree
if (len(self)) != len(set(self.get_leaf_names())):
duplis = [n for n in self.get_leaf_names(
) if self.get_leaf_names().count(n) > 1]
raise Exception('EvolTree require unique names for leaves', duplis)
# put ids
for leaf in sorted(self, key=lambda x: x.name):
leaf.add_feature('node_id', nid)
nid += 1
self.add_feature('node_id', nid)
self._label_internal_nodes([nid])
def get_descendant_by_node_id(self, idname):
'''
returns node list corresponding to a given idname
'''
try:
for n in self.iter_descendants():
if n.node_id == idname:
return n
if self.node_id == idname:
return self
except AttributeError:
warn('Should be first labelled as paml ' +
'(automatically done when alignemnt is loaded)')
def _write_algn(self, fullpath):
"""
to write algn in paml format
"""
seq_group = SeqGroup()
for n in self:
seq_group.id2seq[n.node_id] = n.nt_sequence
seq_group.id2name[n.node_id] = n.name
seq_group.name2id[n.name] = n.node_id
seq_group.write(outfile=fullpath, format='paml')
def run_model(self, model_name, ctrl_string='', keep=True, **kwargs):
'''
To compute evolutionnary models. e.g.: b_free_lala.vs.lele, will launch one free branch model, and store
it in "WORK_DIR/b_free_lala.vs.lele" directory
WARNING: this functionality needs to create a working directory in "rep"
WARNING: you need to have codeml and/or SLR in your path
The models available are:
=========== ============================= ==================
Model name Description Model kind
=========== ============================= ==================\n%s
=========== ============================= ==================\n
**Note that M1 and M2 models are making reference to the new versions
of these models, with continuous omega rates (namely M1a and M2a in the
PAML user guide).**
:argument model_name: a string like "model-name[.some-secondary-name]" (e.g.: "fb.my_first_try", or just "fb")
* model-name is compulsory, is the name of the model (see table above for the full list)
* the second part is accessory, it is to avoid over-writing models with the same name.
:argument ctrl_string: list of parameters that can be used as control file.
:argument True keep: links the model to the tree (equivalen of running `EVOL_TREE.link_to_evol_model(MODEL_NAME)`)
:argument kwargs: extra parameters should be one of: %s.
'''
from subprocess import Popen, PIPE
model_obj = Model(model_name, self, **kwargs)
fullpath = os.path.join(self.workdir, model_obj.name)
os.system("mkdir -p %s" % fullpath)
# write tree file
self._write_algn(fullpath + '/algn')
if model_obj.properties['exec'] == 'Slr':
self.write(outfile=fullpath+'/tree', format=(11))
else:
self.write(outfile=fullpath+'/tree',
format=(10 if model_obj.properties['allow_mark'] else 9))
# write algn file
# MODEL MODEL MDE
if ctrl_string == '':
ctrl_string = model_obj.get_ctrl_string(fullpath+'/tmp.ctl')
else:
open(fullpath+'/tmp.ctl', 'w').write(ctrl_string)
hlddir = os.getcwd()
os.chdir(fullpath)
bin_ = os.path.join(self.execpath, model_obj.properties['exec'])
try:
proc = Popen([bin_, 'tmp.ctl'], stdout=PIPE,
stdin=PIPE, stderr=PIPE)
except OSError:
raise Exception(('ERROR: {} not installed, ' +
'or wrong path to binary\n').format(bin_))
# send \n via stdin in case codeml/slr asks something (note on py3, stdin needs bytes)
run, err = proc.communicate(b'\n')
run = run.decode(sys.stdout.encoding)
os.chdir(hlddir)
if err:
warn("ERROR: inside codeml!!\n" + err)
return 1
if keep:
setattr(model_obj, 'run', run)
self.link_to_evol_model(os.path.join(fullpath, 'out'), model_obj)
sep = '\n'
run_model.__doc__ = run_model.__doc__ % \
(sep.join([' %-8s %-27s %-15s ' %
('%s' % (x), AVAIL[x]['evol'], AVAIL[x]['typ']) for x in sorted(sorted(AVAIL.keys()), key=lambda x:
AVAIL[x]['typ'],
reverse=True)]),
', '.join(list(PARAMS.keys())))
# def test_codon_model(self):
# for c_frq in range(4):
# self.run_model('M0.model_test-'+str(c_frq), CodonFreq=c_frq)
# if self.get_most_likely('M0.model_test-1', 'M0.model_test-0') > 0.05:
#
# self.get_most_likely('M0.model_test-2', 'M0.model_test-0')
# self.get_most_likely('M0.model_test-3', 'M0.model_test-0')
# self.get_most_likely('M0.model_test-2', 'M0.model_test-1')
# self.get_most_likely('M0.model_test-3', 'M0.model_test-1')
# self.get_most_likely('M0.model_test-3', 'M0.model_test-2')
def link_to_alignment(self, alignment, alg_format="paml",
nucleotides=True, **kwargs):
'''
same function as for phyloTree, but translate sequences if nucleotides
nucleotidic sequence is kept under node.nt_sequence
:argument alignment: path to alignment or string
:argument alg_format: one of fasta phylip or paml
:argument True alignment: set to False in case we want to keep it untranslated
'''
super(EvolTree, self).link_to_alignment(alignment,
alg_format=alg_format, **kwargs)
check_len = 0
for leaf in self.iter_leaves():
seq_len = len(str(leaf.sequence))
if check_len and check_len != seq_len:
warn('WARNING: sequences with different lengths found!')
check_len = seq_len
leaf.nt_sequence = str(leaf.sequence)
if nucleotides:
leaf.sequence = translate(leaf.nt_sequence)
def show(self, layout=None, tree_style=None, histfaces=None):
'''
call super show of PhyloTree
histface should be a list of models to be displayes as histfaces
:argument layout: a layout function
:argument None tree_style: tree_style object
:argument Nonehistface: an histogram face function. This is only to plot selective pressure among sites
'''
if TREEVIEW:
if not tree_style:
ts = TreeStyle()
else:
ts = tree_style
if histfaces:
for hist in histfaces:
try:
mdl = self.get_evol_model(hist)
except AttributeError:
warn('model %s not computed' % (hist))
if not 'histface' in mdl.properties:
if len(histfaces) > 1 and histfaces.index(hist) != 0:
mdl.set_histface(up=False)
else:
mdl.set_histface()
if mdl.properties['histface'].up:
ts.aligned_header.add_face(
mdl.properties['histface'], 1)
else:
ts.aligned_foot.add_face(
mdl.properties['histface'], 1)
super(EvolTree, self).show(layout=layout, tree_style=ts)
else:
raise ValueError("Treeview module is disabled")
def render(self, file_name, layout=None, w=None, h=None,
tree_style=None, header=None, histfaces=None):
'''
call super show adding up and down faces
:argument layout: a layout function
:argument None tree_style: tree_style object
:argument Nonehistface: an histogram face function. This is only to plot selective pressure among sites
'''
if TREEVIEW:
if not tree_style:
ts = TreeStyle()
else:
ts = tree_style
if histfaces:
for hist in histfaces:
try:
mdl = self.get_evol_model(hist)
except AttributeError:
warn('model %s not computed' % (hist))
if not 'histface' in mdl.properties:
if len(histfaces) > 1 and histfaces.index(hist) != 0:
mdl.set_histface(up=False)
else:
mdl.set_histface()
if mdl.properties['histface'].up:
ts.aligned_header.add_face(
mdl.properties['histface'], 1)
else:
ts.aligned_foot.add_face(
mdl.properties['histface'], 1)
return super(EvolTree, self).render(file_name, layout=layout,
tree_style=ts,
w=w, h=h)
else:
raise ValueError("Treeview module is disabled")
def mark_tree(self, node_ids, verbose=False, **kargs):
'''
function to mark branches on tree in order that paml could interpret it.
takes a "marks" argument that should be a list of #1,#1,#2
e.g.:
::
t=Tree.mark_tree([2,3], marks=["#1","#2"])
:argument node_ids: list of node ids (have a look to node.node_id)
:argument False verbose: warn if marks do not correspond to codeml standard
:argument kargs: mainly for the marks key-word which needs a list of marks (marks=['#1', '#2'])
'''
from re import match
node_ids = list(map(int, node_ids))
if 'marks' in kargs:
marks = list(kargs['marks'])
else:
marks = ['#1']*len(node_ids)
for node in self.traverse():
if not hasattr(node, 'node_id'):
continue
if node.node_id in node_ids:
if ('.' in marks[node_ids.index(node.node_id)] or
match('#[0-9]+',
marks[node_ids.index(node.node_id)]) is None) and verbose:
warn('WARNING: marks should be "#" sign directly ' +
'followed by integer\n' + self.mark_tree.__doc__)
node.add_feature(
'mark', ' '+marks[node_ids.index(node.node_id)])
elif not 'mark' in node.features:
node.add_feature('mark', '')
def link_to_evol_model(self, path, model):
'''
link EvolTree to evolutionary model
* free-branch model ("fb") will append evol values to tree
* Site models (M0, M1, M2, M7, M8) will give evol values by site
and likelihood
:argument path: path to outfile containing model computation result
:argument model: either the name of a model, or a Model object (usually empty)
'''
if isinstance(model, str):
model = Model(model, self, path)
else:
model._load(path)
# new entry in _models dict
while model.name in self._models:
model.name = model.name.split('__')[0] + str(
(int(model.name.split('__')[1]) + 1)
if '__' in model.name else 0)
self._models[model.name] = model
if not os.path.isfile(path):
warn("ERROR: not a file: " + path)
return 1
if len(self._models) == 1 and model.properties['exec'] == 'codeml':
self.change_dist_to_evol('bL', model, fill=True)
def get_evol_model(self, modelname):
'''
returns one precomputed model
:argument modelname: string of the name of a model object stored
:returns: Model object
'''
try:
return self._models[modelname]
except KeyError:
Exception("ERROR: Model %s not found." % (modelname))
def write(self, features=None, outfile=None, format=10):
"""
Inherits from Tree but adds the tenth format, that allows to display marks for CodeML.
TODO: internal writting format need to be something like 0
"""
from re import sub
if int(format) == 11:
nwk = ' %s 1\n' % (len(self))
nwk += sub('\[&&NHX:mark=([ #0-9.]*)\]', r'\1',
write_newick(self, features=['mark'], format=9))
elif int(format) == 10:
nwk = sub('\[&&NHX:mark=([ #0-9.]*)\]', r'\1',
write_newick(self, features=['mark'], format=9))
else:
nwk = write_newick(self, features=features, format=format)
if outfile is not None:
open(outfile, "w").write(nwk)
return nwk
else:
return nwk
write.__doc__ += super(PhyloNode, PhyloNode()).write.__doc__.replace(
'argument format', 'argument 10 format')
def get_most_likely(self, altn, null):
'''
Returns pvalue of LRT between alternative model and null model.
usual comparison are:
============ ======= ===========================================
Alternative Null Test
============ ======= ===========================================
M2 M1 PS on sites (M2 prone to miss some sites)
(Yang 2000).
M3 M0 test of variability among sites
M8 M7 PS on sites
(Yang 2000)
M8 M8a RX on sites?? think so....
bsA bsA1 PS on sites on specific branch
(Zhang 2005)
bsA M1 RX on sites on specific branch
(Zhang 2005)
bsC M1 different omegas on clades branches sites
ref: Yang Nielsen 2002
bsD M3 different omegas on clades branches sites
(Yang Nielsen 2002, Bielawski 2004)
b_free b_neut foreground branch not neutral (w != 1)
- RX if P<0.05 (means that w on frg=1)
- PS if P>0.05 and wfrg>1
- CN if P>0.05 and wfrg>1
(Yang Nielsen 2002)
b_free M0 different ratio on branches
(Yang Nielsen 2002)
============ ======= ===========================================
**Note that M1 and M2 models are making reference to the new versions
of these models, with continuous omega rates (namely M1a and M2a in the
PAML user guide).**
:argument altn: model with higher number of parameters (np)
:argument null: model with lower number of parameters (np)
'''
altn = self.get_evol_model(altn)
null = self.get_evol_model(null)
if null.np > altn.np:
warn("first model should be the alternative, change the order")
return 1.0
try:
if hasattr(altn, 'lnL') and hasattr(null, 'lnL'):
if null.lnL - altn.lnL < 0:
return chi_high(2 * abs(altn.lnL - null.lnL),
float(altn.np - null.np))
else:
warn("\nWARNING: Likelihood of the alternative model is "
"smaller than null's (%f - %f = %f)" % (
null.lnL, altn.lnL, null.lnL - altn.lnL) +
"\nLarge differences (> 0.1) may indicate mistaken "
"assigantion of null and alternative models")
return 1
except KeyError:
warn("at least one of %s or %s, was not calculated" % (altn.name,
null.name))
exit(self.get_most_likely.__doc__)
def change_dist_to_evol(self, evol, model, fill=False):
'''
change dist/branch length of the tree to a given evolutionary
variable (dN, dS, w or bL), default is bL.
:argument evol: evolutionary variable
:argument model: Model object from which to retrieve evolutionary variables
:argument False fill: do not affects only dist parameter, each node will be annotated with all evolutionary variables (nodel.dN, node.w...).
'''
# branch-site outfiles do not give specific branch info
if not model.branches:
return
for node in self.iter_descendants():
if not evol in model.branches[node.node_id]:
continue
node.dist = model.branches[node.node_id][evol]
if fill:
for e in ['dN', 'dS', 'w', 'bL']:
node.add_feature(e, model.branches[node.node_id][e])
|
(newick=None, alignment=None, alg_format='fasta', sp_naming_function=<function _parse_species at 0x7ff39bbe70a0>, format=0, binpath='', **kwargs)
|
720,454 |
ete3.phylo.phylotree
|
__get_speciation_trees_recursive
|
experimental and testing
|
def __get_speciation_trees_recursive(self):
""" experimental and testing """
t = self.copy()
if autodetect_duplications:
dups = 0
#n2content, n2species = t.get_node2species()
n2content = t.get_cached_content()
n2species = t.get_cached_content(store_attr="species")
#print "Detecting dups"
for node in n2content:
sp_subtotal = sum([len(n2species[_ch]) for _ch in node.children])
if len(n2species[node]) > 1 and len(n2species[node]) != sp_subtotal:
node.add_features(evoltype="D")
dups += 1
elif node.is_leaf():
node._leaf = True
#print dups
else:
for node in t.iter_leaves():
node._leaf = True
subtrees = _get_subtrees_recursive(t)
return len(subtrees), 0, subtrees
|
(self)
|
720,459 |
ete3.evol.evoltree
|
__init__
|
freebranch: path to find codeml output of freebranch model.
|
def __init__(self, newick=None, alignment=None, alg_format="fasta",
sp_naming_function=_parse_species, format=0,
binpath='', **kwargs):
'''
freebranch: path to find codeml output of freebranch model.
'''
# _update names?
self.workdir = '/tmp/ete3-tmp/'
if not binpath:
ete3_path = which("ete3")
binpath = os.path.split(ete3_path)[0]
self.execpath = binpath
self._models = {}
self.__gui_mark_mode = False
PhyloNode.__init__(self, newick=newick, format=format,
sp_naming_function=sp_naming_function, **kwargs)
if newick:
self._label_as_paml()
# initialize node marks
self.mark_tree([])
|
(self, newick=None, alignment=None, alg_format='fasta', sp_naming_function=<function _parse_species at 0x7ff39bbe70a0>, format=0, binpath='', **kwargs)
|
720,463 |
ete3.phylo.phylotree
|
__repr__
| null |
def __repr__(self):
return "PhyloTree node '%s' (%s)" %(self.name, hex(self.__hash__()))
|
(self)
|
720,471 |
ete3.phylo.phylotree
|
_get_species
| null |
def _get_species(self):
if self._speciesFunction:
try:
return self._speciesFunction(self.name)
except:
return self._speciesFunction(self)
else:
return self._species
|
(self)
|
720,475 |
ete3.evol.evoltree
|
_is_mark_mode
| null |
def _is_mark_mode(self):
return self.__gui_mark_mode
|
(self)
|
720,479 |
ete3.evol.evoltree
|
_label_as_paml
|
to label tree as paml, nearly walking man over the tree algorithm
WARNING: sorted names in same order that sequence
WARNING: depends on tree topology conformation, not the same after a swap
activates the function get_descendants_by_pamlid
|
def _label_as_paml(self):
'''
to label tree as paml, nearly walking man over the tree algorithm
WARNING: sorted names in same order that sequence
WARNING: depends on tree topology conformation, not the same after a swap
activates the function get_descendants_by_pamlid
'''
nid = 1
# check we do not have dupplicated names in tree
if (len(self)) != len(set(self.get_leaf_names())):
duplis = [n for n in self.get_leaf_names(
) if self.get_leaf_names().count(n) > 1]
raise Exception('EvolTree require unique names for leaves', duplis)
# put ids
for leaf in sorted(self, key=lambda x: x.name):
leaf.add_feature('node_id', nid)
nid += 1
self.add_feature('node_id', nid)
self._label_internal_nodes([nid])
|
(self)
|
720,480 |
ete3.evol.evoltree
|
_label_internal_nodes
|
nid needs to be a list in order to keep count through recursivity
|
def _label_internal_nodes(self, nid=None):
"""
nid needs to be a list in order to keep count through recursivity
"""
for node in self.get_children():
if node.is_leaf():
continue
nid[0] += 1
node.add_feature('node_id', nid[0])
node._label_internal_nodes(nid)
|
(self, nid=None)
|
720,484 |
ete3.evol.evoltree
|
_set_mark_mode
| null |
def _set_mark_mode(self, val):
self.__gui_mark_mode = val
|
(self, val)
|
720,485 |
ete3.phylo.phylotree
|
_set_species
| null |
def _set_species(self, value):
if self._speciesFunction:
pass
else:
self._species = value
|
(self, value)
|
720,489 |
ete3.evol.evoltree
|
_write_algn
|
to write algn in paml format
|
def _write_algn(self, fullpath):
"""
to write algn in paml format
"""
seq_group = SeqGroup()
for n in self:
seq_group.id2seq[n.node_id] = n.nt_sequence
seq_group.id2name[n.node_id] = n.name
seq_group.name2id[n.name] = n.node_id
seq_group.write(outfile=fullpath, format='paml')
|
(self, fullpath)
|
720,495 |
ete3.phylo.phylotree
|
annotate_ncbi_taxa
|
Add NCBI taxonomy annotation to all descendant nodes. Leaf nodes are
expected to contain a feature (name, by default) encoding a valid taxid
number.
All descendant nodes (including internal nodes) are annotated with the
following new features:
`Node.spname`: scientific spcies name as encoded in the NCBI taxonomy database
`Node.named_lineage`: the NCBI lineage track using scientific names
`Node.taxid`: NCBI taxid number
`Node.lineage`: same as named_lineage but using taxid codes.
Note that for internal nodes, NCBI information will refer to the first
common lineage of the grouped species.
:param name taxid_attr: the name of the feature that should be used to access the taxid number associated to each node.
:param None tax2name: A dictionary where keys are taxid
numbers and values are their translation into NCBI
scientific name. Its use is optional and allows to avoid
database queries when annotating many trees containing the
same set of taxids.
:param None tax2track: A dictionary where keys are taxid
numbers and values are their translation into NCBI lineage
tracks (taxids). Its use is optional and allows to avoid
database queries when annotating many trees containing the
same set of taxids.
:param None tax2rank: A dictionary where keys are taxid
numbers and values are their translation into NCBI rank
name. Its use is optional and allows to avoid database
queries when annotating many trees containing the same set
of taxids.
:param None dbfile : If provided, the provided file will be
used as a local copy of the NCBI taxonomy database.
:returns: tax2name (a dictionary translating taxid numbers
into scientific name), tax2lineage (a dictionary
translating taxid numbers into their corresponding NCBI
lineage track) and tax2rank (a dictionary translating
taxid numbers into rank names).
|
def annotate_ncbi_taxa(self, taxid_attr='species', tax2name=None, tax2track=None, tax2rank=None, dbfile=None):
"""Add NCBI taxonomy annotation to all descendant nodes. Leaf nodes are
expected to contain a feature (name, by default) encoding a valid taxid
number.
All descendant nodes (including internal nodes) are annotated with the
following new features:
`Node.spname`: scientific spcies name as encoded in the NCBI taxonomy database
`Node.named_lineage`: the NCBI lineage track using scientific names
`Node.taxid`: NCBI taxid number
`Node.lineage`: same as named_lineage but using taxid codes.
Note that for internal nodes, NCBI information will refer to the first
common lineage of the grouped species.
:param name taxid_attr: the name of the feature that should be used to access the taxid number associated to each node.
:param None tax2name: A dictionary where keys are taxid
numbers and values are their translation into NCBI
scientific name. Its use is optional and allows to avoid
database queries when annotating many trees containing the
same set of taxids.
:param None tax2track: A dictionary where keys are taxid
numbers and values are their translation into NCBI lineage
tracks (taxids). Its use is optional and allows to avoid
database queries when annotating many trees containing the
same set of taxids.
:param None tax2rank: A dictionary where keys are taxid
numbers and values are their translation into NCBI rank
name. Its use is optional and allows to avoid database
queries when annotating many trees containing the same set
of taxids.
:param None dbfile : If provided, the provided file will be
used as a local copy of the NCBI taxonomy database.
:returns: tax2name (a dictionary translating taxid numbers
into scientific name), tax2lineage (a dictionary
translating taxid numbers into their corresponding NCBI
lineage track) and tax2rank (a dictionary translating
taxid numbers into rank names).
"""
ncbi = NCBITaxa(dbfile=dbfile)
return ncbi.annotate_tree(self, taxid_attr=taxid_attr, tax2name=tax2name, tax2track=tax2track, tax2rank=tax2rank)
|
(self, taxid_attr='species', tax2name=None, tax2track=None, tax2rank=None, dbfile=None)
|
720,496 |
ete3.evol.evoltree
|
change_dist_to_evol
|
change dist/branch length of the tree to a given evolutionary
variable (dN, dS, w or bL), default is bL.
:argument evol: evolutionary variable
:argument model: Model object from which to retrieve evolutionary variables
:argument False fill: do not affects only dist parameter, each node will be annotated with all evolutionary variables (nodel.dN, node.w...).
|
def change_dist_to_evol(self, evol, model, fill=False):
'''
change dist/branch length of the tree to a given evolutionary
variable (dN, dS, w or bL), default is bL.
:argument evol: evolutionary variable
:argument model: Model object from which to retrieve evolutionary variables
:argument False fill: do not affects only dist parameter, each node will be annotated with all evolutionary variables (nodel.dN, node.w...).
'''
# branch-site outfiles do not give specific branch info
if not model.branches:
return
for node in self.iter_descendants():
if not evol in model.branches[node.node_id]:
continue
node.dist = model.branches[node.node_id][evol]
if fill:
for e in ['dN', 'dS', 'w', 'bL']:
node.add_feature(e, model.branches[node.node_id][e])
|
(self, evol, model, fill=False)
|
720,498 |
ete3.phylo.phylotree
|
collapse_lineage_specific_expansions
|
Converts lineage specific expansion nodes into a single
tip node (randomly chosen from tips within the expansion).
:param None species: If supplied, only expansions matching the
species criteria will be pruned. When None, all expansions
within the tree will be processed.
|
def collapse_lineage_specific_expansions(self, species=None, return_copy=True):
""" Converts lineage specific expansion nodes into a single
tip node (randomly chosen from tips within the expansion).
:param None species: If supplied, only expansions matching the
species criteria will be pruned. When None, all expansions
within the tree will be processed.
"""
if species and isinstance(species, (list, tuple)):
species = set(species)
elif species and (not isinstance(species, (set, frozenset))):
raise TypeError("species argument should be a set (preferred), list or tuple")
prunned = self.copy("deepcopy") if return_copy else self
n2sp = prunned.get_cached_content(store_attr="species")
n2leaves = prunned.get_cached_content()
is_expansion = lambda n: (len(n2sp[n])==1 and len(n2leaves[n])>1
and (species is None or species & n2sp[n]))
for n in prunned.get_leaves(is_leaf_fn=is_expansion):
repre = list(n2leaves[n])[0]
repre.detach()
if n is not prunned:
n.up.add_child(repre)
n.detach()
else:
return repre
return prunned
|
(self, species=None, return_copy=True)
|
720,510 |
ete3.phylo.phylotree
|
get_age
|
Implements the phylostratigrafic method described in:
Huerta-Cepas, J., & Gabaldon, T. (2011). Assigning duplication events to
relative temporal scales in genome-wide studies. Bioinformatics, 27(1),
38-45.
|
def get_age(self, species2age):
"""
Implements the phylostratigrafic method described in:
Huerta-Cepas, J., & Gabaldon, T. (2011). Assigning duplication events to
relative temporal scales in genome-wide studies. Bioinformatics, 27(1),
38-45.
"""
return max([species2age[sp] for sp in self.get_species()])
|
(self, species2age)
|
720,511 |
ete3.phylo.phylotree
|
get_age_balanced_outgroup
|
.. versionadded:: 2.2
Returns the node better balance current tree structure
according to the topological age of the different leaves and
internal node sizes.
:param species2age: A dictionary translating from leaf names
into a topological age.
.. warning: This is currently an experimental method!!
|
def get_age_balanced_outgroup(self, species2age):
"""
.. versionadded:: 2.2
Returns the node better balance current tree structure
according to the topological age of the different leaves and
internal node sizes.
:param species2age: A dictionary translating from leaf names
into a topological age.
.. warning: This is currently an experimental method!!
"""
root = self
all_seqs = set(self.get_leaf_names())
outgroup_dist = 0
best_balance = max(species2age.values())
outgroup_node = self
outgroup_size = 0
for leaf in root.iter_descendants():
leaf_seqs = set(leaf.get_leaf_names())
size = len(leaf_seqs)
leaf_species =[self._speciesFunction(s) for s in leaf_seqs]
out_species = [self._speciesFunction(s) for s in all_seqs-leaf_seqs]
leaf_age_min = min([species2age[sp] for sp in leaf_species])
out_age_min = min([species2age[sp] for sp in out_species])
leaf_age_max = max([species2age[sp] for sp in leaf_species])
out_age_max = max([species2age[sp] for sp in out_species])
leaf_age = leaf_age_max - leaf_age_min
out_age = out_age_max - out_age_min
age_inbalance = abs(out_age - leaf_age)
# DEBUG ONLY
# leaf.add_features(age_inbalance = age_inbalance, age=leaf_age)
update = False
if age_inbalance < best_balance:
update = True
elif age_inbalance == best_balance:
if size > outgroup_size:
update = True
elif size == outgroup_size:
dist = self.get_distance(leaf)
outgroup_dist = self.get_distance(outgroup_node)
if dist > outgroup_dist:
update = True
if update:
best_balance = age_inbalance
outgroup_node = leaf
outgroup_size = size
return outgroup_node
|
(self, species2age)
|
720,518 |
ete3.evol.evoltree
|
get_descendant_by_node_id
|
returns node list corresponding to a given idname
|
def get_descendant_by_node_id(self, idname):
'''
returns node list corresponding to a given idname
'''
try:
for n in self.iter_descendants():
if n.node_id == idname:
return n
if self.node_id == idname:
return self
except AttributeError:
warn('Should be first labelled as paml ' +
'(automatically done when alignemnt is loaded)')
|
(self, idname)
|
720,519 |
ete3.phylo.phylotree
|
get_descendant_evol_events
|
Returns a list of **all** duplication and speciation
events detected after this node. Nodes are assumed to be
duplications when a species overlap is found between its child
linages. Method is described more detail in:
"The Human Phylome." Huerta-Cepas J, Dopazo H, Dopazo J, Gabaldon
T. Genome Biol. 2007;8(6):R109.
|
def get_descendant_evol_events(self, sos_thr=0.0):
""" Returns a list of **all** duplication and speciation
events detected after this node. Nodes are assumed to be
duplications when a species overlap is found between its child
linages. Method is described more detail in:
"The Human Phylome." Huerta-Cepas J, Dopazo H, Dopazo J, Gabaldon
T. Genome Biol. 2007;8(6):R109.
"""
return spoverlap.get_evol_events_from_root(self, sos_thr=sos_thr)
|
(self, sos_thr=0.0)
|
720,523 |
ete3.evol.evoltree
|
get_evol_model
|
returns one precomputed model
:argument modelname: string of the name of a model object stored
:returns: Model object
|
def get_evol_model(self, modelname):
'''
returns one precomputed model
:argument modelname: string of the name of a model object stored
:returns: Model object
'''
try:
return self._models[modelname]
except KeyError:
Exception("ERROR: Model %s not found." % (modelname))
|
(self, modelname)
|
720,526 |
ete3.phylo.phylotree
|
get_farthest_oldest_leaf
|
Returns the farthest oldest leaf to the current
one. It requires an species2age dictionary with the age
estimation for all species.
:argument None is_leaf_fn: A pointer to a function that
receives a node instance as unique argument and returns True
or False. It can be used to dynamically collapse nodes, so
they are seen as leaves.
|
def get_farthest_oldest_leaf(self, species2age, is_leaf_fn=None):
""" Returns the farthest oldest leaf to the current
one. It requires an species2age dictionary with the age
estimation for all species.
:argument None is_leaf_fn: A pointer to a function that
receives a node instance as unique argument and returns True
or False. It can be used to dynamically collapse nodes, so
they are seen as leaves.
"""
root = self.get_tree_root()
outgroup_dist = 0
outgroup_node = self
outgroup_age = 0 # self.get_age(species2age)
for leaf in root.iter_leaves(is_leaf_fn=is_leaf_fn):
if leaf.get_age(species2age) > outgroup_age:
outgroup_dist = leaf.get_distance(self)
outgroup_node = leaf
outgroup_age = species2age[leaf.get_species().pop()]
elif leaf.get_age(species2age) == outgroup_age:
dist = leaf.get_distance(self)
if dist>outgroup_dist:
outgroup_dist = leaf.get_distance(self)
outgroup_node = leaf
outgroup_age = species2age[leaf.get_species().pop()]
return outgroup_node
|
(self, species2age, is_leaf_fn=None)
|
720,527 |
ete3.phylo.phylotree
|
get_farthest_oldest_node
|
.. versionadded:: 2.1
Returns the farthest oldest node (leaf or internal). The
difference with get_farthest_oldest_leaf() is that in this
function internal nodes grouping seqs from the same species
are collapsed.
|
def get_farthest_oldest_node(self, species2age):
"""
.. versionadded:: 2.1
Returns the farthest oldest node (leaf or internal). The
difference with get_farthest_oldest_leaf() is that in this
function internal nodes grouping seqs from the same species
are collapsed.
"""
# I use a custom is_leaf() function to collapse nodes groups
# seqs from the same species
is_leaf = lambda node: len(node.get_species())==1
return self.get_farthest_oldest_leaf(species2age, is_leaf_fn=is_leaf)
|
(self, species2age)
|
720,533 |
ete3.evol.evoltree
|
get_most_likely
|
Returns pvalue of LRT between alternative model and null model.
usual comparison are:
============ ======= ===========================================
Alternative Null Test
============ ======= ===========================================
M2 M1 PS on sites (M2 prone to miss some sites)
(Yang 2000).
M3 M0 test of variability among sites
M8 M7 PS on sites
(Yang 2000)
M8 M8a RX on sites?? think so....
bsA bsA1 PS on sites on specific branch
(Zhang 2005)
bsA M1 RX on sites on specific branch
(Zhang 2005)
bsC M1 different omegas on clades branches sites
ref: Yang Nielsen 2002
bsD M3 different omegas on clades branches sites
(Yang Nielsen 2002, Bielawski 2004)
b_free b_neut foreground branch not neutral (w != 1)
- RX if P<0.05 (means that w on frg=1)
- PS if P>0.05 and wfrg>1
- CN if P>0.05 and wfrg>1
(Yang Nielsen 2002)
b_free M0 different ratio on branches
(Yang Nielsen 2002)
============ ======= ===========================================
**Note that M1 and M2 models are making reference to the new versions
of these models, with continuous omega rates (namely M1a and M2a in the
PAML user guide).**
:argument altn: model with higher number of parameters (np)
:argument null: model with lower number of parameters (np)
|
def get_most_likely(self, altn, null):
'''
Returns pvalue of LRT between alternative model and null model.
usual comparison are:
============ ======= ===========================================
Alternative Null Test
============ ======= ===========================================
M2 M1 PS on sites (M2 prone to miss some sites)
(Yang 2000).
M3 M0 test of variability among sites
M8 M7 PS on sites
(Yang 2000)
M8 M8a RX on sites?? think so....
bsA bsA1 PS on sites on specific branch
(Zhang 2005)
bsA M1 RX on sites on specific branch
(Zhang 2005)
bsC M1 different omegas on clades branches sites
ref: Yang Nielsen 2002
bsD M3 different omegas on clades branches sites
(Yang Nielsen 2002, Bielawski 2004)
b_free b_neut foreground branch not neutral (w != 1)
- RX if P<0.05 (means that w on frg=1)
- PS if P>0.05 and wfrg>1
- CN if P>0.05 and wfrg>1
(Yang Nielsen 2002)
b_free M0 different ratio on branches
(Yang Nielsen 2002)
============ ======= ===========================================
**Note that M1 and M2 models are making reference to the new versions
of these models, with continuous omega rates (namely M1a and M2a in the
PAML user guide).**
:argument altn: model with higher number of parameters (np)
:argument null: model with lower number of parameters (np)
'''
altn = self.get_evol_model(altn)
null = self.get_evol_model(null)
if null.np > altn.np:
warn("first model should be the alternative, change the order")
return 1.0
try:
if hasattr(altn, 'lnL') and hasattr(null, 'lnL'):
if null.lnL - altn.lnL < 0:
return chi_high(2 * abs(altn.lnL - null.lnL),
float(altn.np - null.np))
else:
warn("\nWARNING: Likelihood of the alternative model is "
"smaller than null's (%f - %f = %f)" % (
null.lnL, altn.lnL, null.lnL - altn.lnL) +
"\nLarge differences (> 0.1) may indicate mistaken "
"assigantion of null and alternative models")
return 1
except KeyError:
warn("at least one of %s or %s, was not calculated" % (altn.name,
null.name))
exit(self.get_most_likely.__doc__)
|
(self, altn, null)
|
720,534 |
ete3.phylo.phylotree
|
get_my_evol_events
|
Returns a list of duplication and speciation events in
which the current node has been involved. Scanned nodes are
also labeled internally as dup=True|False. You can access this
labels using the 'node.dup' sintaxis.
Method: the algorithm scans all nodes from the given leafName to
the root. Nodes are assumed to be duplications when a species
overlap is found between its child linages. Method is described
more detail in:
"The Human Phylome." Huerta-Cepas J, Dopazo H, Dopazo J, Gabaldon
T. Genome Biol. 2007;8(6):R109.
|
def get_my_evol_events(self, sos_thr=0.0):
""" Returns a list of duplication and speciation events in
which the current node has been involved. Scanned nodes are
also labeled internally as dup=True|False. You can access this
labels using the 'node.dup' sintaxis.
Method: the algorithm scans all nodes from the given leafName to
the root. Nodes are assumed to be duplications when a species
overlap is found between its child linages. Method is described
more detail in:
"The Human Phylome." Huerta-Cepas J, Dopazo H, Dopazo J, Gabaldon
T. Genome Biol. 2007;8(6):R109.
"""
return spoverlap.get_evol_events_from_leaf(self, sos_thr=sos_thr)
|
(self, sos_thr=0.0)
|
720,536 |
ete3.phylo.phylotree
|
get_speciation_trees
|
.. versionadded: 2.2
Calculates all possible species trees contained within a
duplicated gene family tree as described in `Treeko
<http://treeko.cgenomics.org>`_ (see `Marcet and Gabaldon,
2011 <http://www.ncbi.nlm.nih.gov/pubmed/21335609>`_ ).
:argument True autodetect_duplications: If True, duplication
nodes will be automatically detected using the Species Overlap
algorithm (:func:`PhyloNode.get_descendants_evol_events`. If
False, duplication nodes within the original tree are expected
to contain the feature "evoltype=D".
:argument None features: A list of features that should be
mapped from the original gene family tree to each species
tree subtree.
:returns: (number_of_sptrees, number_of_dups, species_tree_iterator)
|
def get_speciation_trees(self, map_features=None, autodetect_duplications=True,
newick_only=False, target_attr='species'):
"""
.. versionadded: 2.2
Calculates all possible species trees contained within a
duplicated gene family tree as described in `Treeko
<http://treeko.cgenomics.org>`_ (see `Marcet and Gabaldon,
2011 <http://www.ncbi.nlm.nih.gov/pubmed/21335609>`_ ).
:argument True autodetect_duplications: If True, duplication
nodes will be automatically detected using the Species Overlap
algorithm (:func:`PhyloNode.get_descendants_evol_events`. If
False, duplication nodes within the original tree are expected
to contain the feature "evoltype=D".
:argument None features: A list of features that should be
mapped from the original gene family tree to each species
tree subtree.
:returns: (number_of_sptrees, number_of_dups, species_tree_iterator)
"""
t = self
if autodetect_duplications:
#n2content, n2species = t.get_node2species()
n2content = t.get_cached_content()
n2species = t.get_cached_content(store_attr=target_attr)
for node in n2content:
sp_subtotal = sum([len(n2species[_ch]) for _ch in node.children])
if len(n2species[node]) > 1 and len(n2species[node]) != sp_subtotal:
node.add_features(evoltype="D")
sp_trees = get_subtrees(t, features=map_features, newick_only=newick_only)
return sp_trees
|
(self, map_features=None, autodetect_duplications=True, newick_only=False, target_attr='species')
|
720,537 |
ete3.phylo.phylotree
|
get_species
|
Returns the set of species covered by its partition.
|
def get_species(self):
""" Returns the set of species covered by its partition. """
return set([l.species for l in self.iter_leaves()])
|
(self)
|
720,549 |
ete3.phylo.phylotree
|
iter_species
|
Returns an iterator over the species grouped by this node.
|
def iter_species(self):
""" Returns an iterator over the species grouped by this node. """
spcs = set([])
for l in self.iter_leaves():
if l.species not in spcs:
spcs.add(l.species)
yield l.species
|
(self)
|
720,551 |
ete3.evol.evoltree
|
link_to_alignment
|
same function as for phyloTree, but translate sequences if nucleotides
nucleotidic sequence is kept under node.nt_sequence
:argument alignment: path to alignment or string
:argument alg_format: one of fasta phylip or paml
:argument True alignment: set to False in case we want to keep it untranslated
|
def link_to_alignment(self, alignment, alg_format="paml",
nucleotides=True, **kwargs):
'''
same function as for phyloTree, but translate sequences if nucleotides
nucleotidic sequence is kept under node.nt_sequence
:argument alignment: path to alignment or string
:argument alg_format: one of fasta phylip or paml
:argument True alignment: set to False in case we want to keep it untranslated
'''
super(EvolTree, self).link_to_alignment(alignment,
alg_format=alg_format, **kwargs)
check_len = 0
for leaf in self.iter_leaves():
seq_len = len(str(leaf.sequence))
if check_len and check_len != seq_len:
warn('WARNING: sequences with different lengths found!')
check_len = seq_len
leaf.nt_sequence = str(leaf.sequence)
if nucleotides:
leaf.sequence = translate(leaf.nt_sequence)
|
(self, alignment, alg_format='paml', nucleotides=True, **kwargs)
|
720,552 |
ete3.evol.evoltree
|
link_to_evol_model
|
link EvolTree to evolutionary model
* free-branch model ("fb") will append evol values to tree
* Site models (M0, M1, M2, M7, M8) will give evol values by site
and likelihood
:argument path: path to outfile containing model computation result
:argument model: either the name of a model, or a Model object (usually empty)
|
def link_to_evol_model(self, path, model):
'''
link EvolTree to evolutionary model
* free-branch model ("fb") will append evol values to tree
* Site models (M0, M1, M2, M7, M8) will give evol values by site
and likelihood
:argument path: path to outfile containing model computation result
:argument model: either the name of a model, or a Model object (usually empty)
'''
if isinstance(model, str):
model = Model(model, self, path)
else:
model._load(path)
# new entry in _models dict
while model.name in self._models:
model.name = model.name.split('__')[0] + str(
(int(model.name.split('__')[1]) + 1)
if '__' in model.name else 0)
self._models[model.name] = model
if not os.path.isfile(path):
warn("ERROR: not a file: " + path)
return 1
if len(self._models) == 1 and model.properties['exec'] == 'codeml':
self.change_dist_to_evol('bL', model, fill=True)
|
(self, path, model)
|
720,553 |
ete3.evol.evoltree
|
mark_tree
|
function to mark branches on tree in order that paml could interpret it.
takes a "marks" argument that should be a list of #1,#1,#2
e.g.:
::
t=Tree.mark_tree([2,3], marks=["#1","#2"])
:argument node_ids: list of node ids (have a look to node.node_id)
:argument False verbose: warn if marks do not correspond to codeml standard
:argument kargs: mainly for the marks key-word which needs a list of marks (marks=['#1', '#2'])
|
def mark_tree(self, node_ids, verbose=False, **kargs):
'''
function to mark branches on tree in order that paml could interpret it.
takes a "marks" argument that should be a list of #1,#1,#2
e.g.:
::
t=Tree.mark_tree([2,3], marks=["#1","#2"])
:argument node_ids: list of node ids (have a look to node.node_id)
:argument False verbose: warn if marks do not correspond to codeml standard
:argument kargs: mainly for the marks key-word which needs a list of marks (marks=['#1', '#2'])
'''
from re import match
node_ids = list(map(int, node_ids))
if 'marks' in kargs:
marks = list(kargs['marks'])
else:
marks = ['#1']*len(node_ids)
for node in self.traverse():
if not hasattr(node, 'node_id'):
continue
if node.node_id in node_ids:
if ('.' in marks[node_ids.index(node.node_id)] or
match('#[0-9]+',
marks[node_ids.index(node.node_id)]) is None) and verbose:
warn('WARNING: marks should be "#" sign directly ' +
'followed by integer\n' + self.mark_tree.__doc__)
node.add_feature(
'mark', ' '+marks[node_ids.index(node.node_id)])
elif not 'mark' in node.features:
node.add_feature('mark', '')
|
(self, node_ids, verbose=False, **kargs)
|
720,554 |
ete3.phylo.phylotree
|
ncbi_compare
| null |
def ncbi_compare(self, autodetect_duplications=True, cached_content=None):
if not cached_content:
cached_content = self.get_cached_content()
cached_species = set([n.species for n in cached_content[self]])
if len(cached_species) != len(cached_content[self]):
print(cached_species)
ntrees, ndups, target_trees = self.get_speciation_trees(autodetect_duplications=autodetect_duplications, map_features=["taxid"])
else:
target_trees = [self]
ncbi = NCBITaxa()
for t in target_trees:
ncbi.get_broken_branches(t, cached_content)
|
(self, autodetect_duplications=True, cached_content=None)
|
720,558 |
ete3.phylo.phylotree
|
reconcile
|
Returns the reconcilied topology with the provided species
tree, and a list of evolutionary events inferred from such
reconciliation.
|
def reconcile(self, species_tree):
""" Returns the reconcilied topology with the provided species
tree, and a list of evolutionary events inferred from such
reconciliation. """
return get_reconciled_tree(self, species_tree, [])
|
(self, species_tree)
|
720,561 |
ete3.evol.evoltree
|
render
|
call super show adding up and down faces
:argument layout: a layout function
:argument None tree_style: tree_style object
:argument Nonehistface: an histogram face function. This is only to plot selective pressure among sites
|
def render(self, file_name, layout=None, w=None, h=None,
tree_style=None, header=None, histfaces=None):
'''
call super show adding up and down faces
:argument layout: a layout function
:argument None tree_style: tree_style object
:argument Nonehistface: an histogram face function. This is only to plot selective pressure among sites
'''
if TREEVIEW:
if not tree_style:
ts = TreeStyle()
else:
ts = tree_style
if histfaces:
for hist in histfaces:
try:
mdl = self.get_evol_model(hist)
except AttributeError:
warn('model %s not computed' % (hist))
if not 'histface' in mdl.properties:
if len(histfaces) > 1 and histfaces.index(hist) != 0:
mdl.set_histface(up=False)
else:
mdl.set_histface()
if mdl.properties['histface'].up:
ts.aligned_header.add_face(
mdl.properties['histface'], 1)
else:
ts.aligned_foot.add_face(
mdl.properties['histface'], 1)
return super(EvolTree, self).render(file_name, layout=layout,
tree_style=ts,
w=w, h=h)
else:
raise ValueError("Treeview module is disabled")
|
(self, file_name, layout=None, w=None, h=None, tree_style=None, header=None, histfaces=None)
|
720,564 |
ete3.evol.evoltree
|
run_model
|
To compute evolutionnary models. e.g.: b_free_lala.vs.lele, will launch one free branch model, and store
it in "WORK_DIR/b_free_lala.vs.lele" directory
WARNING: this functionality needs to create a working directory in "rep"
WARNING: you need to have codeml and/or SLR in your path
The models available are:
=========== ============================= ==================
Model name Description Model kind
=========== ============================= ==================
M1 relaxation site
M10 beta and gamma + 1 site
M11 beta and normal > 1 site
M12 0 and 2 normal > 2 site
M13 3 normal > 0 site
M2 positive-selection site
M3 discrete site
M4 frequencies site
M5 gamma site
M6 2 gamma site
M7 relaxation site
M8 positive-selection site
M8a relaxation site
M9 beta and gamma site
SLR positive/negative selection site
M0 negative-selection null
fb_anc free-ratios branch_ancestor
bsA positive-selection branch-site
bsA1 relaxation branch-site
bsB positive-selection branch-site
bsC different-ratios branch-site
bsD different-ratios branch-site
b_free positive-selection branch
b_neut relaxation branch
fb free-ratios branch
XX User defined Unknown
=========== ============================= ==================
**Note that M1 and M2 models are making reference to the new versions
of these models, with continuous omega rates (namely M1a and M2a in the
PAML user guide).**
:argument model_name: a string like "model-name[.some-secondary-name]" (e.g.: "fb.my_first_try", or just "fb")
* model-name is compulsory, is the name of the model (see table above for the full list)
* the second part is accessory, it is to avoid over-writing models with the same name.
:argument ctrl_string: list of parameters that can be used as control file.
:argument True keep: links the model to the tree (equivalen of running `EVOL_TREE.link_to_evol_model(MODEL_NAME)`)
:argument kwargs: extra parameters should be one of: seqfile, treefile, outfile, noisy, verbose, runmode, seqtype, CodonFreq, clock, aaDist, model, NSsites, icode, Mgene, fix_kappa, kappa, ndata, fix_omega, omega, fix_alpha, alpha, Malpha, ncatG, getSE, RateAncestor, fix_blength, Small_Diff, cleandata, method.
|
def run_model(self, model_name, ctrl_string='', keep=True, **kwargs):
'''
To compute evolutionnary models. e.g.: b_free_lala.vs.lele, will launch one free branch model, and store
it in "WORK_DIR/b_free_lala.vs.lele" directory
WARNING: this functionality needs to create a working directory in "rep"
WARNING: you need to have codeml and/or SLR in your path
The models available are:
=========== ============================= ==================
Model name Description Model kind
=========== ============================= ==================\n%s
=========== ============================= ==================\n
**Note that M1 and M2 models are making reference to the new versions
of these models, with continuous omega rates (namely M1a and M2a in the
PAML user guide).**
:argument model_name: a string like "model-name[.some-secondary-name]" (e.g.: "fb.my_first_try", or just "fb")
* model-name is compulsory, is the name of the model (see table above for the full list)
* the second part is accessory, it is to avoid over-writing models with the same name.
:argument ctrl_string: list of parameters that can be used as control file.
:argument True keep: links the model to the tree (equivalen of running `EVOL_TREE.link_to_evol_model(MODEL_NAME)`)
:argument kwargs: extra parameters should be one of: %s.
'''
from subprocess import Popen, PIPE
model_obj = Model(model_name, self, **kwargs)
fullpath = os.path.join(self.workdir, model_obj.name)
os.system("mkdir -p %s" % fullpath)
# write tree file
self._write_algn(fullpath + '/algn')
if model_obj.properties['exec'] == 'Slr':
self.write(outfile=fullpath+'/tree', format=(11))
else:
self.write(outfile=fullpath+'/tree',
format=(10 if model_obj.properties['allow_mark'] else 9))
# write algn file
# MODEL MODEL MDE
if ctrl_string == '':
ctrl_string = model_obj.get_ctrl_string(fullpath+'/tmp.ctl')
else:
open(fullpath+'/tmp.ctl', 'w').write(ctrl_string)
hlddir = os.getcwd()
os.chdir(fullpath)
bin_ = os.path.join(self.execpath, model_obj.properties['exec'])
try:
proc = Popen([bin_, 'tmp.ctl'], stdout=PIPE,
stdin=PIPE, stderr=PIPE)
except OSError:
raise Exception(('ERROR: {} not installed, ' +
'or wrong path to binary\n').format(bin_))
# send \n via stdin in case codeml/slr asks something (note on py3, stdin needs bytes)
run, err = proc.communicate(b'\n')
run = run.decode(sys.stdout.encoding)
os.chdir(hlddir)
if err:
warn("ERROR: inside codeml!!\n" + err)
return 1
if keep:
setattr(model_obj, 'run', run)
self.link_to_evol_model(os.path.join(fullpath, 'out'), model_obj)
|
(self, model_name, ctrl_string='', keep=True, **kwargs)
|
720,567 |
ete3.phylo.phylotree
|
set_species_naming_function
|
Sets the parsing function used to extract species name from a
node's name.
:argument fn: Pointer to a parsing python function that
receives nodename as first argument and returns the species
name.
::
# Example of a parsing function to extract species names for
# all nodes in a given tree.
def parse_sp_name(node_name):
return node_name.split("_")[1]
tree.set_species_naming_function(parse_sp_name)
|
def set_species_naming_function(self, fn):
"""
Sets the parsing function used to extract species name from a
node's name.
:argument fn: Pointer to a parsing python function that
receives nodename as first argument and returns the species
name.
::
# Example of a parsing function to extract species names for
# all nodes in a given tree.
def parse_sp_name(node_name):
return node_name.split("_")[1]
tree.set_species_naming_function(parse_sp_name)
"""
if fn:
for n in self.traverse():
n._speciesFunction = fn
if n.is_leaf():
n.features.add("species")
|
(self, fn)
|
720,569 |
ete3.evol.evoltree
|
show
|
call super show of PhyloTree
histface should be a list of models to be displayes as histfaces
:argument layout: a layout function
:argument None tree_style: tree_style object
:argument Nonehistface: an histogram face function. This is only to plot selective pressure among sites
|
def show(self, layout=None, tree_style=None, histfaces=None):
'''
call super show of PhyloTree
histface should be a list of models to be displayes as histfaces
:argument layout: a layout function
:argument None tree_style: tree_style object
:argument Nonehistface: an histogram face function. This is only to plot selective pressure among sites
'''
if TREEVIEW:
if not tree_style:
ts = TreeStyle()
else:
ts = tree_style
if histfaces:
for hist in histfaces:
try:
mdl = self.get_evol_model(hist)
except AttributeError:
warn('model %s not computed' % (hist))
if not 'histface' in mdl.properties:
if len(histfaces) > 1 and histfaces.index(hist) != 0:
mdl.set_histface(up=False)
else:
mdl.set_histface()
if mdl.properties['histface'].up:
ts.aligned_header.add_face(
mdl.properties['histface'], 1)
else:
ts.aligned_foot.add_face(
mdl.properties['histface'], 1)
super(EvolTree, self).show(layout=layout, tree_style=ts)
else:
raise ValueError("Treeview module is disabled")
|
(self, layout=None, tree_style=None, histfaces=None)
|
720,571 |
ete3.phylo.phylotree
|
split_by_dups
|
.. versionadded: 2.2
Returns the list of all subtrees resulting from splitting
current tree by its duplication nodes.
:argument True autodetect_duplications: If True, duplication
nodes will be automatically detected using the Species Overlap
algorithm (:func:`PhyloNode.get_descendants_evol_events`. If
False, duplication nodes within the original tree are expected
to contain the feature "evoltype=D".
:returns: species_trees
|
def split_by_dups(self, autodetect_duplications=True):
"""
.. versionadded: 2.2
Returns the list of all subtrees resulting from splitting
current tree by its duplication nodes.
:argument True autodetect_duplications: If True, duplication
nodes will be automatically detected using the Species Overlap
algorithm (:func:`PhyloNode.get_descendants_evol_events`. If
False, duplication nodes within the original tree are expected
to contain the feature "evoltype=D".
:returns: species_trees
"""
try:
t = self.copy()
except Exception:
t = self.copy("deepcopy")
if autodetect_duplications:
dups = 0
#n2content, n2species = t.get_node2species()
n2content = t.get_cached_content()
n2species = t.get_cached_content(store_attr="species")
#print "Detecting dups"
for node in n2content:
sp_subtotal = sum([len(n2species[_ch]) for _ch in node.children])
if len(n2species[node]) > 1 and len(n2species[node]) != sp_subtotal:
node.add_features(evoltype="D")
dups += 1
elif node.is_leaf():
node._leaf = True
#print dups
else:
for node in t.iter_leaves():
node._leaf = True
sp_trees = get_subparts(t)
return sp_trees
|
(self, autodetect_duplications=True)
|
720,576 |
ete3.evol.evoltree
|
write
|
Inherits from Tree but adds the tenth format, that allows to display marks for CodeML.
TODO: internal writting format need to be something like 0
Returns the newick representation of current node. Several
arguments control the way in which extra data is shown for
every node:
:argument features: a list of feature names to be exported
using the Extended Newick Format (i.e. features=["name",
"dist"]). Use an empty list to export all available features
in each node (features=[])
:argument outfile: writes the output to a given file
:argument 10 format: defines the newick standard used to encode the
tree. See tutorial for details.
:argument False format_root_node: If True, it allows features
and branch information from root node to be exported as a
part of the newick text string. For newick compatibility
reasons, this is False by default.
:argument is_leaf_fn: See :func:`TreeNode.traverse` for
documentation.
**Example:**
::
t.write(features=["species","name"], format=1)
|
def write(self, features=None, outfile=None, format=10):
"""
Inherits from Tree but adds the tenth format, that allows to display marks for CodeML.
TODO: internal writting format need to be something like 0
"""
from re import sub
if int(format) == 11:
nwk = ' %s 1\n' % (len(self))
nwk += sub('\[&&NHX:mark=([ #0-9.]*)\]', r'\1',
write_newick(self, features=['mark'], format=9))
elif int(format) == 10:
nwk = sub('\[&&NHX:mark=([ #0-9.]*)\]', r'\1',
write_newick(self, features=['mark'], format=9))
else:
nwk = write_newick(self, features=features, format=format)
if outfile is not None:
open(outfile, "w").write(nwk)
return nwk
else:
return nwk
|
(self, features=None, outfile=None, format=10)
|
720,701 |
ete3.ncbi_taxonomy.ncbiquery
|
NCBITaxa
|
versionadded: 2.3
Provides a local transparent connector to the NCBI taxonomy database.
|
class NCBITaxa(object):
"""
versionadded: 2.3
Provides a local transparent connector to the NCBI taxonomy database.
"""
def __init__(self, dbfile=None, taxdump_file=None, update=True):
if not dbfile:
self.dbfile = DEFAULT_TAXADB
else:
self.dbfile = dbfile
if taxdump_file:
self.update_taxonomy_database(taxdump_file)
if dbfile != DEFAULT_TAXADB and not os.path.exists(self.dbfile):
print('NCBI database not present yet (first time used?)', file=sys.stderr)
self.update_taxonomy_database(taxdump_file)
if not os.path.exists(self.dbfile):
raise ValueError("Cannot open taxonomy database: %s" % self.dbfile)
self.db = None
self._connect()
if not is_taxadb_up_to_date(self.dbfile) and update:
print('NCBI database format is outdated. Upgrading', file=sys.stderr)
self.update_taxonomy_database(taxdump_file)
def update_taxonomy_database(self, taxdump_file=None):
"""Updates the ncbi taxonomy database by downloading and parsing the latest
taxdump.tar.gz file from the NCBI FTP site (via HTTP).
:param None taxdump_file: an alternative location of the taxdump.tax.gz file.
"""
if not taxdump_file:
update_db(self.dbfile)
else:
update_db(self.dbfile, taxdump_file)
def _connect(self):
self.db = sqlite3.connect(self.dbfile)
def _translate_merged(self, all_taxids):
conv_all_taxids = set((list(map(int, all_taxids))))
cmd = 'select taxid_old, taxid_new FROM merged WHERE taxid_old IN (%s)' %','.join(map(str, all_taxids))
result = self.db.execute(cmd)
conversion = {}
for old, new in result.fetchall():
conv_all_taxids.discard(int(old))
conv_all_taxids.add(int(new))
conversion[int(old)] = int(new)
return conv_all_taxids, conversion
def get_fuzzy_name_translation(self, name, sim=0.9):
'''
Given an inexact species name, returns the best match in the NCBI database of taxa names.
:argument 0.9 sim: Min word similarity to report a match (from 0 to 1).
:return: taxid, species-name-match, match-score
'''
import sqlite3.dbapi2 as dbapi2
_db = dbapi2.connect(self.dbfile)
_db.enable_load_extension(True)
module_path = os.path.split(os.path.realpath(__file__))[0]
_db.execute("select load_extension('%s')" % os.path.join(module_path,
"SQLite-Levenshtein/levenshtein.sqlext"))
print("Trying fuzzy search for %s" % name)
maxdiffs = math.ceil(len(name) * (1-sim))
cmd = 'SELECT taxid, spname, LEVENSHTEIN(spname, "%s") AS sim FROM species WHERE sim<=%s ORDER BY sim LIMIT 1;' % (name, maxdiffs)
taxid, spname, score = None, None, len(name)
result = _db.execute(cmd)
try:
taxid, spname, score = result.fetchone()
except TypeError:
cmd = 'SELECT taxid, spname, LEVENSHTEIN(spname, "%s") AS sim FROM synonym WHERE sim<=%s ORDER BY sim LIMIT 1;' % (name, maxdiffs)
result = _db.execute(cmd)
try:
taxid, spname, score = result.fetchone()
except:
pass
else:
taxid = int(taxid)
else:
taxid = int(taxid)
norm_score = 1 - (float(score)/len(name))
if taxid:
print("FOUND! %s taxid:%s score:%s (%s)" %(spname, taxid, score, norm_score))
return taxid, spname, norm_score
def get_rank(self, taxids):
'return a dictionary converting a list of taxids into their corresponding NCBI taxonomy rank'
all_ids = set(taxids)
all_ids.discard(None)
all_ids.discard("")
query = ','.join(['"%s"' %v for v in all_ids])
cmd = "select taxid, rank FROM species WHERE taxid IN (%s);" %query
result = self.db.execute(cmd)
id2rank = {}
for tax, spname in result.fetchall():
id2rank[tax] = spname
return id2rank
def get_lineage_translator(self, taxids):
"""Given a valid taxid number, return its corresponding lineage track as a
hierarchically sorted list of parent taxids.
"""
all_ids = set(taxids)
all_ids.discard(None)
all_ids.discard("")
query = ','.join(['"%s"' %v for v in all_ids])
result = self.db.execute('SELECT taxid, track FROM species WHERE taxid IN (%s);' %query)
id2lineages = {}
for tax, track in result.fetchall():
id2lineages[tax] = list(map(int, reversed(track.split(","))))
return id2lineages
def get_lineage(self, taxid):
"""Given a valid taxid number, return its corresponding lineage track as a
hierarchically sorted list of parent taxids.
"""
if not taxid:
return None
taxid = int(taxid)
result = self.db.execute('SELECT track FROM species WHERE taxid=%s' %taxid)
raw_track = result.fetchone()
if not raw_track:
#perhaps is an obsolete taxid
_, merged_conversion = self._translate_merged([taxid])
if taxid in merged_conversion:
result = self.db.execute('SELECT track FROM species WHERE taxid=%s' %merged_conversion[taxid])
raw_track = result.fetchone()
# if not raise error
if not raw_track:
#raw_track = ["1"]
raise ValueError("%s taxid not found" %taxid)
else:
warnings.warn("taxid %s was translated into %s" %(taxid, merged_conversion[taxid]))
track = list(map(int, raw_track[0].split(",")))
return list(reversed(track))
def get_common_names(self, taxids):
query = ','.join(['"%s"' %v for v in taxids])
cmd = "select taxid, common FROM species WHERE taxid IN (%s);" %query
result = self.db.execute(cmd)
id2name = {}
for tax, common_name in result.fetchall():
if common_name:
id2name[tax] = common_name
return id2name
def get_taxid_translator(self, taxids, try_synonyms=True):
"""Given a list of taxids, returns a dictionary with their corresponding
scientific names.
"""
all_ids = set(map(int, taxids))
all_ids.discard(None)
all_ids.discard("")
query = ','.join(['"%s"' %v for v in all_ids])
cmd = "select taxid, spname FROM species WHERE taxid IN (%s);" %query
result = self.db.execute(cmd)
id2name = {}
for tax, spname in result.fetchall():
id2name[tax] = spname
# any taxid without translation? lets tray in the merged table
if len(all_ids) != len(id2name) and try_synonyms:
not_found_taxids = all_ids - set(id2name.keys())
taxids, old2new = self._translate_merged(not_found_taxids)
new2old = {v: k for k,v in six.iteritems(old2new)}
if old2new:
query = ','.join(['"%s"' %v for v in new2old])
cmd = "select taxid, spname FROM species WHERE taxid IN (%s);" %query
result = self.db.execute(cmd)
for tax, spname in result.fetchall():
id2name[new2old[tax]] = spname
return id2name
def get_name_translator(self, names):
"""
Given a list of taxid scientific names, returns a dictionary translating them into their corresponding taxids.
Exact name match is required for translation.
"""
name2id = {}
#name2realname = {}
name2origname = {}
for n in names:
name2origname[n.lower()] = n
names = set(name2origname.keys())
query = ','.join(['"%s"' %n for n in six.iterkeys(name2origname)])
cmd = 'select spname, taxid from species where spname IN (%s)' %query
result = self.db.execute('select spname, taxid from species where spname IN (%s)' %query)
for sp, taxid in result.fetchall():
oname = name2origname[sp.lower()]
name2id.setdefault(oname, []).append(taxid)
#name2realname[oname] = sp
missing = names - set([n.lower() for n in name2id.keys()])
if missing:
query = ','.join(['"%s"' %n for n in missing])
result = self.db.execute('select spname, taxid from synonym where spname IN (%s)' %query)
for sp, taxid in result.fetchall():
oname = name2origname[sp.lower()]
name2id.setdefault(oname, []).append(taxid)
#name2realname[oname] = sp
return name2id
def translate_to_names(self, taxids):
"""
Given a list of taxid numbers, returns another list with their corresponding scientific names.
"""
id2name = self.get_taxid_translator(taxids)
names = []
for sp in taxids:
names.append(id2name.get(sp, sp))
return names
def get_descendant_taxa(self, parent, intermediate_nodes=False, rank_limit=None, collapse_subspecies=False, return_tree=False):
"""
given a parent taxid or scientific species name, returns a list of all its descendants taxids.
If intermediate_nodes is set to True, internal nodes will also be dumped.
"""
try:
taxid = int(parent)
except ValueError:
try:
taxid = self.get_name_translator([parent])[parent][0]
except KeyError:
raise ValueError('%s not found!' %parent)
# checks if taxid is a deprecated one, and converts into the right one.
_, conversion = self._translate_merged([taxid]) #try to find taxid in synonyms table
if conversion:
taxid = conversion[taxid]
with open(self.dbfile+".traverse.pkl", "rb") as CACHED_TRAVERSE:
prepostorder = pickle.load(CACHED_TRAVERSE)
descendants = {}
found = 0
for tid in prepostorder:
if tid == taxid:
found += 1
elif found == 1:
descendants[tid] = descendants.get(tid, 0) + 1
elif found == 2:
break
if not found:
raise ValueError("taxid not found:%s" %taxid)
elif found == 1:
return [taxid]
if rank_limit or collapse_subspecies or return_tree:
tree = self.get_topology(list(descendants.keys()), intermediate_nodes=intermediate_nodes, collapse_subspecies=collapse_subspecies, rank_limit=rank_limit)
if return_tree:
return tree
elif intermediate_nodes:
return list(map(int, [n.name for n in tree.get_descendants()]))
else:
return list(map(int, [n.name for n in tree]))
elif intermediate_nodes:
return [tid for tid, count in six.iteritems(descendants)]
else:
return [tid for tid, count in six.iteritems(descendants) if count == 1]
def get_topology(self, taxids, intermediate_nodes=False, rank_limit=None, collapse_subspecies=False, annotate=True):
"""Given a list of taxid numbers, return the minimal pruned NCBI taxonomy tree
containing all of them.
:param False intermediate_nodes: If True, single child nodes
representing the complete lineage of leaf nodes are kept.
Otherwise, the tree is pruned to contain the first common
ancestor of each group.
:param None rank_limit: If valid NCBI rank name is provided,
the tree is pruned at that given level. For instance, use
rank="species" to get rid of sub-species or strain leaf
nodes.
:param False collapse_subspecies: If True, any item under the
species rank will be collapsed into the species upper
node.
"""
from .. import PhyloTree
taxids, merged_conversion = self._translate_merged(taxids)
if len(taxids) == 1:
root_taxid = int(list(taxids)[0])
with open(self.dbfile+".traverse.pkl", "rb") as CACHED_TRAVERSE:
prepostorder = pickle.load(CACHED_TRAVERSE)
descendants = {}
found = 0
nodes = {}
hit = 0
visited = set()
start = prepostorder.index(root_taxid)
try:
end = prepostorder.index(root_taxid, start+1)
subtree = prepostorder[start:end+1]
except ValueError:
# If root taxid is not found in postorder, must be a tip node
subtree = [root_taxid]
leaves = set([v for v, count in Counter(subtree).items() if count == 1])
nodes[root_taxid] = PhyloTree(name=str(root_taxid))
current_parent = nodes[root_taxid]
for tid in subtree:
if tid in visited:
current_parent = nodes[tid].up
else:
visited.add(tid)
nodes[tid] = PhyloTree(name=str(tid))
current_parent.add_child(nodes[tid])
if tid not in leaves:
current_parent = nodes[tid]
root = nodes[root_taxid]
else:
taxids = set(map(int, taxids))
sp2track = {}
elem2node = {}
id2lineage = self.get_lineage_translator(taxids)
all_taxids = set()
for lineage in id2lineage.values():
all_taxids.update(lineage)
id2rank = self.get_rank(all_taxids)
for sp in taxids:
track = []
lineage = id2lineage[sp]
for elem in lineage:
if elem not in elem2node:
node = elem2node.setdefault(elem, PhyloTree())
node.name = str(elem)
node.taxid = elem
node.add_feature("rank", str(id2rank.get(int(elem), "no rank")))
else:
node = elem2node[elem]
track.append(node)
sp2track[sp] = track
# generate parent child relationships
for sp, track in six.iteritems(sp2track):
parent = None
for elem in track:
if parent and elem not in parent.children:
parent.add_child(elem)
if rank_limit and elem.rank == rank_limit:
break
parent = elem
root = elem2node[1]
#remove onechild-nodes
if not intermediate_nodes:
for n in root.get_descendants():
if len(n.children) == 1 and int(n.name) not in taxids:
n.delete(prevent_nondicotomic=False)
if len(root.children) == 1:
tree = root.children[0].detach()
else:
tree = root
if collapse_subspecies:
to_detach = []
for node in tree.traverse():
if node.rank == "species":
to_detach.extend(node.children)
for n in to_detach:
n.detach()
if annotate:
self.annotate_tree(tree)
return tree
def annotate_tree(self, t, taxid_attr="name", tax2name=None, tax2track=None, tax2rank=None):
"""Annotate a tree containing taxids as leaf names by adding the 'taxid',
'sci_name', 'lineage', 'named_lineage' and 'rank' additional attributes.
:param t: a Tree (or Tree derived) instance.
:param name taxid_attr: Allows to set a custom node attribute
containing the taxid number associated to each node (i.e.
species in PhyloTree instances).
:param tax2name,tax2track,tax2rank: Use these arguments to
provide pre-calculated dictionaries providing translation
from taxid number and names,track lineages and ranks.
"""
taxids = set()
for n in t.traverse():
try:
tid = int(getattr(n, taxid_attr))
except (ValueError,AttributeError):
pass
else:
taxids.add(tid)
merged_conversion = {}
taxids, merged_conversion = self._translate_merged(taxids)
if not tax2name or taxids - set(map(int, list(tax2name.keys()))):
tax2name = self.get_taxid_translator(taxids)
if not tax2track or taxids - set(map(int, list(tax2track.keys()))):
tax2track = self.get_lineage_translator(taxids)
all_taxid_codes = set([_tax for _lin in list(tax2track.values()) for _tax in _lin])
extra_tax2name = self.get_taxid_translator(list(all_taxid_codes - set(tax2name.keys())))
tax2name.update(extra_tax2name)
tax2common_name = self.get_common_names(tax2name.keys())
if not tax2rank:
tax2rank = self.get_rank(list(tax2name.keys()))
n2leaves = t.get_cached_content()
for n in t.traverse('postorder'):
try:
node_taxid = int(getattr(n, taxid_attr))
except (ValueError, AttributeError):
node_taxid = None
n.add_features(taxid = node_taxid)
if node_taxid:
if node_taxid in merged_conversion:
node_taxid = merged_conversion[node_taxid]
n.add_features(sci_name = tax2name.get(node_taxid, getattr(n, taxid_attr, '')),
common_name = tax2common_name.get(node_taxid, ''),
lineage = tax2track.get(node_taxid, []),
rank = tax2rank.get(node_taxid, 'Unknown'),
named_lineage = [tax2name.get(tax, str(tax)) for tax in tax2track.get(node_taxid, [])])
elif n.is_leaf():
n.add_features(sci_name = getattr(n, taxid_attr, 'NA'),
common_name = '',
lineage = [],
rank = 'Unknown',
named_lineage = [])
else:
lineage = self._common_lineage([lf.lineage for lf in n2leaves[n]])
ancestor = lineage[-1]
n.add_features(sci_name = tax2name.get(ancestor, str(ancestor)),
common_name = tax2common_name.get(ancestor, ''),
taxid = ancestor,
lineage = lineage,
rank = tax2rank.get(ancestor, 'Unknown'),
named_lineage = [tax2name.get(tax, str(tax)) for tax in lineage])
return tax2name, tax2track, tax2rank
def _common_lineage(self, vectors):
occurrence = defaultdict(int)
pos = defaultdict(set)
for v in vectors:
for i, taxid in enumerate(v):
occurrence[taxid] += 1
pos[taxid].add(i)
common = [taxid for taxid, ocu in six.iteritems(occurrence) if ocu == len(vectors)]
if not common:
return [""]
else:
sorted_lineage = sorted(common, key=lambda x: min(pos[x]))
return sorted_lineage
# OLD APPROACH:
# visited = defaultdict(int)
# for index, name in [(ei, e) for v in vectors for ei, e in enumerate(v)]:
# visited[(name, index)] += 1
# def _sort(a, b):
# if a[1] > b[1]:
# return 1
# elif a[1] < b[1]:
# return -1
# else:
# if a[0][1] > b[0][1]:
# return 1
# elif a[0][1] < b[0][1]:
# return -1
# return 0
# matches = sorted(visited.items(), _sort)
# if matches:
# best_match = matches[-1]
# else:
# return "", set()
# if best_match[1] != len(vectors):
# return "", set()
# else:
# return best_match[0][0], [m[0][0] for m in matches if m[1] == len(vectors)]
def get_broken_branches(self, t, taxa_lineages, n2content=None):
"""Returns a list of NCBI lineage names that are not monophyletic in the
provided tree, as well as the list of affected branches and their size.
CURRENTLY EXPERIMENTAL
"""
if not n2content:
n2content = t.get_cached_content()
tax2node = defaultdict(set)
unknown = set()
for leaf in t.iter_leaves():
if leaf.sci_name.lower() != "unknown":
lineage = taxa_lineages[leaf.taxid]
for index, tax in enumerate(lineage):
tax2node[tax].add(leaf)
else:
unknown.add(leaf)
broken_branches = defaultdict(set)
broken_clades = set()
for tax, leaves in six.iteritems(tax2node):
if len(leaves) > 1:
common = t.get_common_ancestor(leaves)
else:
common = list(leaves)[0]
if (leaves ^ set(n2content[common])) - unknown:
broken_branches[common].add(tax)
broken_clades.add(tax)
broken_clade_sizes = [len(tax2node[tax]) for tax in broken_clades]
return broken_branches, broken_clades, broken_clade_sizes
# def annotate_tree_with_taxa(self, t, name2taxa_file, tax2name=None, tax2track=None, attr_name="name"):
# if name2taxa_file:
# names2taxid = dict([map(strip, line.split("\t"))
# for line in open(name2taxa_file)])
# else:
# names2taxid = dict([(n.name, getattr(n, attr_name)) for n in t.iter_leaves()])
# not_found = 0
# for n in t.iter_leaves():
# n.add_features(taxid=names2taxid.get(n.name, 0))
# n.add_features(species=n.taxid)
# if n.taxid == 0:
# not_found += 1
# if not_found:
# print >>sys.stderr, "WARNING: %s nodes where not found within NCBI taxonomy!!" %not_found
# return self.annotate_tree(t, tax2name, tax2track, attr_name="taxid")
|
(dbfile=None, taxdump_file=None, update=True)
|
720,702 |
ete3.ncbi_taxonomy.ncbiquery
|
__init__
| null |
def __init__(self, dbfile=None, taxdump_file=None, update=True):
if not dbfile:
self.dbfile = DEFAULT_TAXADB
else:
self.dbfile = dbfile
if taxdump_file:
self.update_taxonomy_database(taxdump_file)
if dbfile != DEFAULT_TAXADB and not os.path.exists(self.dbfile):
print('NCBI database not present yet (first time used?)', file=sys.stderr)
self.update_taxonomy_database(taxdump_file)
if not os.path.exists(self.dbfile):
raise ValueError("Cannot open taxonomy database: %s" % self.dbfile)
self.db = None
self._connect()
if not is_taxadb_up_to_date(self.dbfile) and update:
print('NCBI database format is outdated. Upgrading', file=sys.stderr)
self.update_taxonomy_database(taxdump_file)
|
(self, dbfile=None, taxdump_file=None, update=True)
|
720,703 |
ete3.ncbi_taxonomy.ncbiquery
|
_common_lineage
| null |
def _common_lineage(self, vectors):
occurrence = defaultdict(int)
pos = defaultdict(set)
for v in vectors:
for i, taxid in enumerate(v):
occurrence[taxid] += 1
pos[taxid].add(i)
common = [taxid for taxid, ocu in six.iteritems(occurrence) if ocu == len(vectors)]
if not common:
return [""]
else:
sorted_lineage = sorted(common, key=lambda x: min(pos[x]))
return sorted_lineage
# OLD APPROACH:
# visited = defaultdict(int)
# for index, name in [(ei, e) for v in vectors for ei, e in enumerate(v)]:
# visited[(name, index)] += 1
# def _sort(a, b):
# if a[1] > b[1]:
# return 1
# elif a[1] < b[1]:
# return -1
# else:
# if a[0][1] > b[0][1]:
# return 1
# elif a[0][1] < b[0][1]:
# return -1
# return 0
# matches = sorted(visited.items(), _sort)
# if matches:
# best_match = matches[-1]
# else:
# return "", set()
# if best_match[1] != len(vectors):
# return "", set()
# else:
# return best_match[0][0], [m[0][0] for m in matches if m[1] == len(vectors)]
|
(self, vectors)
|
720,704 |
ete3.ncbi_taxonomy.ncbiquery
|
_connect
| null |
def _connect(self):
self.db = sqlite3.connect(self.dbfile)
|
(self)
|
720,705 |
ete3.ncbi_taxonomy.ncbiquery
|
_translate_merged
| null |
def _translate_merged(self, all_taxids):
conv_all_taxids = set((list(map(int, all_taxids))))
cmd = 'select taxid_old, taxid_new FROM merged WHERE taxid_old IN (%s)' %','.join(map(str, all_taxids))
result = self.db.execute(cmd)
conversion = {}
for old, new in result.fetchall():
conv_all_taxids.discard(int(old))
conv_all_taxids.add(int(new))
conversion[int(old)] = int(new)
return conv_all_taxids, conversion
|
(self, all_taxids)
|
720,706 |
ete3.ncbi_taxonomy.ncbiquery
|
annotate_tree
|
Annotate a tree containing taxids as leaf names by adding the 'taxid',
'sci_name', 'lineage', 'named_lineage' and 'rank' additional attributes.
:param t: a Tree (or Tree derived) instance.
:param name taxid_attr: Allows to set a custom node attribute
containing the taxid number associated to each node (i.e.
species in PhyloTree instances).
:param tax2name,tax2track,tax2rank: Use these arguments to
provide pre-calculated dictionaries providing translation
from taxid number and names,track lineages and ranks.
|
def annotate_tree(self, t, taxid_attr="name", tax2name=None, tax2track=None, tax2rank=None):
"""Annotate a tree containing taxids as leaf names by adding the 'taxid',
'sci_name', 'lineage', 'named_lineage' and 'rank' additional attributes.
:param t: a Tree (or Tree derived) instance.
:param name taxid_attr: Allows to set a custom node attribute
containing the taxid number associated to each node (i.e.
species in PhyloTree instances).
:param tax2name,tax2track,tax2rank: Use these arguments to
provide pre-calculated dictionaries providing translation
from taxid number and names,track lineages and ranks.
"""
taxids = set()
for n in t.traverse():
try:
tid = int(getattr(n, taxid_attr))
except (ValueError,AttributeError):
pass
else:
taxids.add(tid)
merged_conversion = {}
taxids, merged_conversion = self._translate_merged(taxids)
if not tax2name or taxids - set(map(int, list(tax2name.keys()))):
tax2name = self.get_taxid_translator(taxids)
if not tax2track or taxids - set(map(int, list(tax2track.keys()))):
tax2track = self.get_lineage_translator(taxids)
all_taxid_codes = set([_tax for _lin in list(tax2track.values()) for _tax in _lin])
extra_tax2name = self.get_taxid_translator(list(all_taxid_codes - set(tax2name.keys())))
tax2name.update(extra_tax2name)
tax2common_name = self.get_common_names(tax2name.keys())
if not tax2rank:
tax2rank = self.get_rank(list(tax2name.keys()))
n2leaves = t.get_cached_content()
for n in t.traverse('postorder'):
try:
node_taxid = int(getattr(n, taxid_attr))
except (ValueError, AttributeError):
node_taxid = None
n.add_features(taxid = node_taxid)
if node_taxid:
if node_taxid in merged_conversion:
node_taxid = merged_conversion[node_taxid]
n.add_features(sci_name = tax2name.get(node_taxid, getattr(n, taxid_attr, '')),
common_name = tax2common_name.get(node_taxid, ''),
lineage = tax2track.get(node_taxid, []),
rank = tax2rank.get(node_taxid, 'Unknown'),
named_lineage = [tax2name.get(tax, str(tax)) for tax in tax2track.get(node_taxid, [])])
elif n.is_leaf():
n.add_features(sci_name = getattr(n, taxid_attr, 'NA'),
common_name = '',
lineage = [],
rank = 'Unknown',
named_lineage = [])
else:
lineage = self._common_lineage([lf.lineage for lf in n2leaves[n]])
ancestor = lineage[-1]
n.add_features(sci_name = tax2name.get(ancestor, str(ancestor)),
common_name = tax2common_name.get(ancestor, ''),
taxid = ancestor,
lineage = lineage,
rank = tax2rank.get(ancestor, 'Unknown'),
named_lineage = [tax2name.get(tax, str(tax)) for tax in lineage])
return tax2name, tax2track, tax2rank
|
(self, t, taxid_attr='name', tax2name=None, tax2track=None, tax2rank=None)
|
720,707 |
ete3.ncbi_taxonomy.ncbiquery
|
get_broken_branches
|
Returns a list of NCBI lineage names that are not monophyletic in the
provided tree, as well as the list of affected branches and their size.
CURRENTLY EXPERIMENTAL
|
def get_broken_branches(self, t, taxa_lineages, n2content=None):
"""Returns a list of NCBI lineage names that are not monophyletic in the
provided tree, as well as the list of affected branches and their size.
CURRENTLY EXPERIMENTAL
"""
if not n2content:
n2content = t.get_cached_content()
tax2node = defaultdict(set)
unknown = set()
for leaf in t.iter_leaves():
if leaf.sci_name.lower() != "unknown":
lineage = taxa_lineages[leaf.taxid]
for index, tax in enumerate(lineage):
tax2node[tax].add(leaf)
else:
unknown.add(leaf)
broken_branches = defaultdict(set)
broken_clades = set()
for tax, leaves in six.iteritems(tax2node):
if len(leaves) > 1:
common = t.get_common_ancestor(leaves)
else:
common = list(leaves)[0]
if (leaves ^ set(n2content[common])) - unknown:
broken_branches[common].add(tax)
broken_clades.add(tax)
broken_clade_sizes = [len(tax2node[tax]) for tax in broken_clades]
return broken_branches, broken_clades, broken_clade_sizes
|
(self, t, taxa_lineages, n2content=None)
|
720,708 |
ete3.ncbi_taxonomy.ncbiquery
|
get_common_names
| null |
def get_common_names(self, taxids):
query = ','.join(['"%s"' %v for v in taxids])
cmd = "select taxid, common FROM species WHERE taxid IN (%s);" %query
result = self.db.execute(cmd)
id2name = {}
for tax, common_name in result.fetchall():
if common_name:
id2name[tax] = common_name
return id2name
|
(self, taxids)
|
720,709 |
ete3.ncbi_taxonomy.ncbiquery
|
get_descendant_taxa
|
given a parent taxid or scientific species name, returns a list of all its descendants taxids.
If intermediate_nodes is set to True, internal nodes will also be dumped.
|
def get_descendant_taxa(self, parent, intermediate_nodes=False, rank_limit=None, collapse_subspecies=False, return_tree=False):
"""
given a parent taxid or scientific species name, returns a list of all its descendants taxids.
If intermediate_nodes is set to True, internal nodes will also be dumped.
"""
try:
taxid = int(parent)
except ValueError:
try:
taxid = self.get_name_translator([parent])[parent][0]
except KeyError:
raise ValueError('%s not found!' %parent)
# checks if taxid is a deprecated one, and converts into the right one.
_, conversion = self._translate_merged([taxid]) #try to find taxid in synonyms table
if conversion:
taxid = conversion[taxid]
with open(self.dbfile+".traverse.pkl", "rb") as CACHED_TRAVERSE:
prepostorder = pickle.load(CACHED_TRAVERSE)
descendants = {}
found = 0
for tid in prepostorder:
if tid == taxid:
found += 1
elif found == 1:
descendants[tid] = descendants.get(tid, 0) + 1
elif found == 2:
break
if not found:
raise ValueError("taxid not found:%s" %taxid)
elif found == 1:
return [taxid]
if rank_limit or collapse_subspecies or return_tree:
tree = self.get_topology(list(descendants.keys()), intermediate_nodes=intermediate_nodes, collapse_subspecies=collapse_subspecies, rank_limit=rank_limit)
if return_tree:
return tree
elif intermediate_nodes:
return list(map(int, [n.name for n in tree.get_descendants()]))
else:
return list(map(int, [n.name for n in tree]))
elif intermediate_nodes:
return [tid for tid, count in six.iteritems(descendants)]
else:
return [tid for tid, count in six.iteritems(descendants) if count == 1]
|
(self, parent, intermediate_nodes=False, rank_limit=None, collapse_subspecies=False, return_tree=False)
|
720,710 |
ete3.ncbi_taxonomy.ncbiquery
|
get_fuzzy_name_translation
|
Given an inexact species name, returns the best match in the NCBI database of taxa names.
:argument 0.9 sim: Min word similarity to report a match (from 0 to 1).
:return: taxid, species-name-match, match-score
|
def get_fuzzy_name_translation(self, name, sim=0.9):
'''
Given an inexact species name, returns the best match in the NCBI database of taxa names.
:argument 0.9 sim: Min word similarity to report a match (from 0 to 1).
:return: taxid, species-name-match, match-score
'''
import sqlite3.dbapi2 as dbapi2
_db = dbapi2.connect(self.dbfile)
_db.enable_load_extension(True)
module_path = os.path.split(os.path.realpath(__file__))[0]
_db.execute("select load_extension('%s')" % os.path.join(module_path,
"SQLite-Levenshtein/levenshtein.sqlext"))
print("Trying fuzzy search for %s" % name)
maxdiffs = math.ceil(len(name) * (1-sim))
cmd = 'SELECT taxid, spname, LEVENSHTEIN(spname, "%s") AS sim FROM species WHERE sim<=%s ORDER BY sim LIMIT 1;' % (name, maxdiffs)
taxid, spname, score = None, None, len(name)
result = _db.execute(cmd)
try:
taxid, spname, score = result.fetchone()
except TypeError:
cmd = 'SELECT taxid, spname, LEVENSHTEIN(spname, "%s") AS sim FROM synonym WHERE sim<=%s ORDER BY sim LIMIT 1;' % (name, maxdiffs)
result = _db.execute(cmd)
try:
taxid, spname, score = result.fetchone()
except:
pass
else:
taxid = int(taxid)
else:
taxid = int(taxid)
norm_score = 1 - (float(score)/len(name))
if taxid:
print("FOUND! %s taxid:%s score:%s (%s)" %(spname, taxid, score, norm_score))
return taxid, spname, norm_score
|
(self, name, sim=0.9)
|
720,711 |
ete3.ncbi_taxonomy.ncbiquery
|
get_lineage
|
Given a valid taxid number, return its corresponding lineage track as a
hierarchically sorted list of parent taxids.
|
def get_lineage(self, taxid):
"""Given a valid taxid number, return its corresponding lineage track as a
hierarchically sorted list of parent taxids.
"""
if not taxid:
return None
taxid = int(taxid)
result = self.db.execute('SELECT track FROM species WHERE taxid=%s' %taxid)
raw_track = result.fetchone()
if not raw_track:
#perhaps is an obsolete taxid
_, merged_conversion = self._translate_merged([taxid])
if taxid in merged_conversion:
result = self.db.execute('SELECT track FROM species WHERE taxid=%s' %merged_conversion[taxid])
raw_track = result.fetchone()
# if not raise error
if not raw_track:
#raw_track = ["1"]
raise ValueError("%s taxid not found" %taxid)
else:
warnings.warn("taxid %s was translated into %s" %(taxid, merged_conversion[taxid]))
track = list(map(int, raw_track[0].split(",")))
return list(reversed(track))
|
(self, taxid)
|
720,712 |
ete3.ncbi_taxonomy.ncbiquery
|
get_lineage_translator
|
Given a valid taxid number, return its corresponding lineage track as a
hierarchically sorted list of parent taxids.
|
def get_lineage_translator(self, taxids):
"""Given a valid taxid number, return its corresponding lineage track as a
hierarchically sorted list of parent taxids.
"""
all_ids = set(taxids)
all_ids.discard(None)
all_ids.discard("")
query = ','.join(['"%s"' %v for v in all_ids])
result = self.db.execute('SELECT taxid, track FROM species WHERE taxid IN (%s);' %query)
id2lineages = {}
for tax, track in result.fetchall():
id2lineages[tax] = list(map(int, reversed(track.split(","))))
return id2lineages
|
(self, taxids)
|
720,713 |
ete3.ncbi_taxonomy.ncbiquery
|
get_name_translator
|
Given a list of taxid scientific names, returns a dictionary translating them into their corresponding taxids.
Exact name match is required for translation.
|
def get_name_translator(self, names):
"""
Given a list of taxid scientific names, returns a dictionary translating them into their corresponding taxids.
Exact name match is required for translation.
"""
name2id = {}
#name2realname = {}
name2origname = {}
for n in names:
name2origname[n.lower()] = n
names = set(name2origname.keys())
query = ','.join(['"%s"' %n for n in six.iterkeys(name2origname)])
cmd = 'select spname, taxid from species where spname IN (%s)' %query
result = self.db.execute('select spname, taxid from species where spname IN (%s)' %query)
for sp, taxid in result.fetchall():
oname = name2origname[sp.lower()]
name2id.setdefault(oname, []).append(taxid)
#name2realname[oname] = sp
missing = names - set([n.lower() for n in name2id.keys()])
if missing:
query = ','.join(['"%s"' %n for n in missing])
result = self.db.execute('select spname, taxid from synonym where spname IN (%s)' %query)
for sp, taxid in result.fetchall():
oname = name2origname[sp.lower()]
name2id.setdefault(oname, []).append(taxid)
#name2realname[oname] = sp
return name2id
|
(self, names)
|
720,714 |
ete3.ncbi_taxonomy.ncbiquery
|
get_rank
|
return a dictionary converting a list of taxids into their corresponding NCBI taxonomy rank
|
def get_rank(self, taxids):
'return a dictionary converting a list of taxids into their corresponding NCBI taxonomy rank'
all_ids = set(taxids)
all_ids.discard(None)
all_ids.discard("")
query = ','.join(['"%s"' %v for v in all_ids])
cmd = "select taxid, rank FROM species WHERE taxid IN (%s);" %query
result = self.db.execute(cmd)
id2rank = {}
for tax, spname in result.fetchall():
id2rank[tax] = spname
return id2rank
|
(self, taxids)
|
720,715 |
ete3.ncbi_taxonomy.ncbiquery
|
get_taxid_translator
|
Given a list of taxids, returns a dictionary with their corresponding
scientific names.
|
def get_taxid_translator(self, taxids, try_synonyms=True):
"""Given a list of taxids, returns a dictionary with their corresponding
scientific names.
"""
all_ids = set(map(int, taxids))
all_ids.discard(None)
all_ids.discard("")
query = ','.join(['"%s"' %v for v in all_ids])
cmd = "select taxid, spname FROM species WHERE taxid IN (%s);" %query
result = self.db.execute(cmd)
id2name = {}
for tax, spname in result.fetchall():
id2name[tax] = spname
# any taxid without translation? lets tray in the merged table
if len(all_ids) != len(id2name) and try_synonyms:
not_found_taxids = all_ids - set(id2name.keys())
taxids, old2new = self._translate_merged(not_found_taxids)
new2old = {v: k for k,v in six.iteritems(old2new)}
if old2new:
query = ','.join(['"%s"' %v for v in new2old])
cmd = "select taxid, spname FROM species WHERE taxid IN (%s);" %query
result = self.db.execute(cmd)
for tax, spname in result.fetchall():
id2name[new2old[tax]] = spname
return id2name
|
(self, taxids, try_synonyms=True)
|
720,716 |
ete3.ncbi_taxonomy.ncbiquery
|
get_topology
|
Given a list of taxid numbers, return the minimal pruned NCBI taxonomy tree
containing all of them.
:param False intermediate_nodes: If True, single child nodes
representing the complete lineage of leaf nodes are kept.
Otherwise, the tree is pruned to contain the first common
ancestor of each group.
:param None rank_limit: If valid NCBI rank name is provided,
the tree is pruned at that given level. For instance, use
rank="species" to get rid of sub-species or strain leaf
nodes.
:param False collapse_subspecies: If True, any item under the
species rank will be collapsed into the species upper
node.
|
def get_topology(self, taxids, intermediate_nodes=False, rank_limit=None, collapse_subspecies=False, annotate=True):
"""Given a list of taxid numbers, return the minimal pruned NCBI taxonomy tree
containing all of them.
:param False intermediate_nodes: If True, single child nodes
representing the complete lineage of leaf nodes are kept.
Otherwise, the tree is pruned to contain the first common
ancestor of each group.
:param None rank_limit: If valid NCBI rank name is provided,
the tree is pruned at that given level. For instance, use
rank="species" to get rid of sub-species or strain leaf
nodes.
:param False collapse_subspecies: If True, any item under the
species rank will be collapsed into the species upper
node.
"""
from .. import PhyloTree
taxids, merged_conversion = self._translate_merged(taxids)
if len(taxids) == 1:
root_taxid = int(list(taxids)[0])
with open(self.dbfile+".traverse.pkl", "rb") as CACHED_TRAVERSE:
prepostorder = pickle.load(CACHED_TRAVERSE)
descendants = {}
found = 0
nodes = {}
hit = 0
visited = set()
start = prepostorder.index(root_taxid)
try:
end = prepostorder.index(root_taxid, start+1)
subtree = prepostorder[start:end+1]
except ValueError:
# If root taxid is not found in postorder, must be a tip node
subtree = [root_taxid]
leaves = set([v for v, count in Counter(subtree).items() if count == 1])
nodes[root_taxid] = PhyloTree(name=str(root_taxid))
current_parent = nodes[root_taxid]
for tid in subtree:
if tid in visited:
current_parent = nodes[tid].up
else:
visited.add(tid)
nodes[tid] = PhyloTree(name=str(tid))
current_parent.add_child(nodes[tid])
if tid not in leaves:
current_parent = nodes[tid]
root = nodes[root_taxid]
else:
taxids = set(map(int, taxids))
sp2track = {}
elem2node = {}
id2lineage = self.get_lineage_translator(taxids)
all_taxids = set()
for lineage in id2lineage.values():
all_taxids.update(lineage)
id2rank = self.get_rank(all_taxids)
for sp in taxids:
track = []
lineage = id2lineage[sp]
for elem in lineage:
if elem not in elem2node:
node = elem2node.setdefault(elem, PhyloTree())
node.name = str(elem)
node.taxid = elem
node.add_feature("rank", str(id2rank.get(int(elem), "no rank")))
else:
node = elem2node[elem]
track.append(node)
sp2track[sp] = track
# generate parent child relationships
for sp, track in six.iteritems(sp2track):
parent = None
for elem in track:
if parent and elem not in parent.children:
parent.add_child(elem)
if rank_limit and elem.rank == rank_limit:
break
parent = elem
root = elem2node[1]
#remove onechild-nodes
if not intermediate_nodes:
for n in root.get_descendants():
if len(n.children) == 1 and int(n.name) not in taxids:
n.delete(prevent_nondicotomic=False)
if len(root.children) == 1:
tree = root.children[0].detach()
else:
tree = root
if collapse_subspecies:
to_detach = []
for node in tree.traverse():
if node.rank == "species":
to_detach.extend(node.children)
for n in to_detach:
n.detach()
if annotate:
self.annotate_tree(tree)
return tree
|
(self, taxids, intermediate_nodes=False, rank_limit=None, collapse_subspecies=False, annotate=True)
|
720,717 |
ete3.ncbi_taxonomy.ncbiquery
|
translate_to_names
|
Given a list of taxid numbers, returns another list with their corresponding scientific names.
|
def translate_to_names(self, taxids):
"""
Given a list of taxid numbers, returns another list with their corresponding scientific names.
"""
id2name = self.get_taxid_translator(taxids)
names = []
for sp in taxids:
names.append(id2name.get(sp, sp))
return names
|
(self, taxids)
|
720,718 |
ete3.ncbi_taxonomy.ncbiquery
|
update_taxonomy_database
|
Updates the ncbi taxonomy database by downloading and parsing the latest
taxdump.tar.gz file from the NCBI FTP site (via HTTP).
:param None taxdump_file: an alternative location of the taxdump.tax.gz file.
|
def update_taxonomy_database(self, taxdump_file=None):
"""Updates the ncbi taxonomy database by downloading and parsing the latest
taxdump.tar.gz file from the NCBI FTP site (via HTTP).
:param None taxdump_file: an alternative location of the taxdump.tax.gz file.
"""
if not taxdump_file:
update_db(self.dbfile)
else:
update_db(self.dbfile, taxdump_file)
|
(self, taxdump_file=None)
|
720,719 |
ete3.nexml
|
Nexml
|
Creates a new nexml project.
|
class Nexml(_nexml.Nexml):
""" Creates a new nexml project. """
def __repr__(self):
return "NeXML project <%s>" %hex(hash(self))
def __init__(self, *args, **kargs):
_nexml.Nexml.__init__(self, *args, **kargs)
def build_from_file(self, fname, index_otus=True):
""" Populate Nexml project with data in a nexml file. """
doc = _nexml.parsexml_(fname)
rootNode = doc.getroot()
rootTag, rootClass = _nexml.get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Nexml'
rootClass = self.__class__
#rootObj = rootClass.factory()
self.build(rootNode)
# This keeps a pointer from all trees to the parent nexml
# project. This way I can access other parts, such as otus,
# etc...
if index_otus:
id2taxa = {}
for taxa in self.get_otus():
id2taxon = {}
for taxon in taxa.otu:
id2taxon[taxon.id] = taxon
id2taxa[taxa.id] = [taxa, id2taxon]
for trees in self.get_trees():
for t in trees.get_tree():
t.set_nexml_project(self)
if trees.otus in id2taxa:
t.nexml_otus = id2taxa[trees.otus][0]
def export(self, outfile=stdout, level=0):
namespace='xmlns:nex="http://www.nexml.org/2009"'
return super(Nexml, self).export(outfile=outfile, level=level, namespacedef_=namespace)
|
(*args, **kargs)
|
720,720 |
ete3.nexml
|
__init__
| null |
def __init__(self, *args, **kargs):
_nexml.Nexml.__init__(self, *args, **kargs)
|
(self, *args, **kargs)
|
720,721 |
ete3.nexml
|
__repr__
| null |
def __repr__(self):
return "NeXML project <%s>" %hex(hash(self))
|
(self)
|
720,722 |
ete3.nexml._nexml
|
add_characters
| null |
def add_characters(self, value): self.characters.append(value)
|
(self, value)
|
720,723 |
ete3.nexml._nexml
|
add_meta
| null |
def add_meta(self, value): self.meta.append(value)
|
(self, value)
|
720,724 |
ete3.nexml._nexml
|
add_otus
| null |
def add_otus(self, value): self.otus.append(value)
|
(self, value)
|
720,725 |
ete3.nexml._nexml
|
add_trees
| null |
def add_trees(self, value): self.trees.append(value)
|
(self, value)
|
720,726 |
ete3.nexml._nexml
|
build
| null |
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
|
(self, node)
|
720,727 |
ete3.nexml._nexml
|
buildAttributes
| null |
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('version', node)
if value is not None and 'version' not in already_processed:
already_processed.append('version')
try:
self.version = float(value)
except ValueError as exp:
raise ValueError('Bad float/double attribute (version): %s' % exp)
self.validate_Nexml1_0(self.version) # validate type Nexml1_0
value = find_attr_value_('generator', node)
if value is not None and 'generator' not in already_processed:
already_processed.append('generator')
self.generator = value
super(Nexml, self).buildAttributes(node, attrs, already_processed)
|
(self, node, attrs, already_processed)
|
720,728 |
ete3.nexml._nexml
|
buildChildren
| null |
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'otus':
obj_ = Taxa.factory()
obj_.build(child_)
self.otus.append(obj_)
elif nodeName_ == 'characters':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <characters> element')
self.characters.append(obj_)
elif nodeName_ == 'trees':
obj_ = Trees.factory()
obj_.build(child_)
self.trees.append(obj_)
super(Nexml, self).buildChildren(child_, node, nodeName_, True)
|
(self, child_, node, nodeName_, fromsubclass_=False)
|
720,729 |
ete3.nexml
|
build_from_file
|
Populate Nexml project with data in a nexml file.
|
def build_from_file(self, fname, index_otus=True):
""" Populate Nexml project with data in a nexml file. """
doc = _nexml.parsexml_(fname)
rootNode = doc.getroot()
rootTag, rootClass = _nexml.get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Nexml'
rootClass = self.__class__
#rootObj = rootClass.factory()
self.build(rootNode)
# This keeps a pointer from all trees to the parent nexml
# project. This way I can access other parts, such as otus,
# etc...
if index_otus:
id2taxa = {}
for taxa in self.get_otus():
id2taxon = {}
for taxon in taxa.otu:
id2taxon[taxon.id] = taxon
id2taxa[taxa.id] = [taxa, id2taxon]
for trees in self.get_trees():
for t in trees.get_tree():
t.set_nexml_project(self)
if trees.otus in id2taxa:
t.nexml_otus = id2taxa[trees.otus][0]
|
(self, fname, index_otus=True)
|
720,730 |
ete3.nexml
|
export
| null |
def export(self, outfile=stdout, level=0):
namespace='xmlns:nex="http://www.nexml.org/2009"'
return super(Nexml, self).export(outfile=outfile, level=level, namespacedef_=namespace)
|
(self, outfile=<_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>, level=0)
|
720,731 |
ete3.nexml._nexml
|
exportAttributes
| null |
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='Nexml'):
super(Nexml, self).exportAttributes(outfile, level, already_processed, namespace_, name_='Nexml')
if self.version is not None and 'version' not in already_processed:
already_processed.append('version')
outfile.write(' version=%s' % (quote_attrib(self.version), ))
if self.generator is not None and 'generator' not in already_processed:
already_processed.append('generator')
outfile.write(' generator=%s' % (self.gds_format_string(quote_attrib(self.generator).encode(ExternalEncoding), input_name='generator'), ))
|
(self, outfile, level, already_processed, namespace_='', name_='Nexml')
|
720,732 |
ete3.nexml._nexml
|
exportChildren
| null |
def exportChildren(self, outfile, level, namespace_='', name_='Nexml', fromsubclass_=False):
super(Nexml, self).exportChildren(outfile, level, namespace_, name_, True)
for otus_ in self.otus:
otus_.export(outfile, level, namespace_, name_='otus')
for characters_ in self.get_characters():
characters_.export(outfile, level, namespace_, name_='characters')
for trees_ in self.trees:
trees_.export(outfile, level, namespace_, name_='trees')
|
(self, outfile, level, namespace_='', name_='Nexml', fromsubclass_=False)
|
720,733 |
ete3.nexml._nexml
|
exportLiteral
| null |
def exportLiteral(self, outfile, level, name_='Nexml'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
|
(self, outfile, level, name_='Nexml')
|
720,734 |
ete3.nexml._nexml
|
exportLiteralAttributes
| null |
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.version is not None and 'version' not in already_processed:
already_processed.append('version')
showIndent(outfile, level)
outfile.write('version = %f,\n' % (self.version,))
if self.generator is not None and 'generator' not in already_processed:
already_processed.append('generator')
showIndent(outfile, level)
outfile.write('generator = "%s",\n' % (self.generator,))
super(Nexml, self).exportLiteralAttributes(outfile, level, already_processed, name_)
|
(self, outfile, level, already_processed, name_)
|
720,735 |
ete3.nexml._nexml
|
exportLiteralChildren
| null |
def exportLiteralChildren(self, outfile, level, name_):
super(Nexml, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('otus=[\n')
level += 1
for otus_ in self.otus:
showIndent(outfile, level)
outfile.write('model_.Taxa(\n')
otus_.exportLiteral(outfile, level, name_='Taxa')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('characters=[\n')
level += 1
for characters_ in self.characters:
showIndent(outfile, level)
outfile.write('model_.AbstractBlock(\n')
characters_.exportLiteral(outfile, level, name_='AbstractBlock')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('trees=[\n')
level += 1
for trees_ in self.trees:
showIndent(outfile, level)
outfile.write('model_.Trees(\n')
trees_.exportLiteral(outfile, level, name_='Trees')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
|
(self, outfile, level, name_)
|
720,736 |
ete3.nexml._nexml
|
factory
| null |
def factory(*args_, **kwargs_):
if Nexml.subclass:
return Nexml.subclass(*args_, **kwargs_)
else:
return Nexml(*args_, **kwargs_)
|
(*args_, **kwargs_)
|
720,737 |
ete3.nexml._nexml
|
gds_format_boolean
| null |
def gds_format_boolean(self, input_data, input_name=''):
return '%s' % input_data
|
(self, input_data, input_name='')
|
720,738 |
ete3.nexml._nexml
|
gds_format_boolean_list
| null |
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
|
(self, input_data, input_name='')
|
720,739 |
ete3.nexml._nexml
|
gds_format_double
| null |
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
|
(self, input_data, input_name='')
|
720,740 |
ete3.nexml._nexml
|
gds_format_double_list
| null |
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
|
(self, input_data, input_name='')
|
720,741 |
ete3.nexml._nexml
|
gds_format_float
| null |
def gds_format_float(self, input_data, input_name=''):
return '%f' % input_data
|
(self, input_data, input_name='')
|
720,742 |
ete3.nexml._nexml
|
gds_format_float_list
| null |
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
|
(self, input_data, input_name='')
|
720,743 |
ete3.nexml._nexml
|
gds_format_integer
| null |
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
|
(self, input_data, input_name='')
|
720,744 |
ete3.nexml._nexml
|
gds_format_integer_list
| null |
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
|
(self, input_data, input_name='')
|
720,745 |
ete3.nexml._nexml
|
gds_format_string
| null |
def gds_format_string(self, input_data, input_name=''):
return input_data
|
(self, input_data, input_name='')
|
720,746 |
ete3.nexml._nexml
|
gds_str_lower
| null |
def gds_str_lower(self, instring):
return instring.lower()
|
(self, instring)
|
720,747 |
ete3.nexml._nexml
|
gds_validate_boolean
| null |
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
|
(self, input_data, node, input_name='')
|
720,748 |
ete3.nexml._nexml
|
gds_validate_boolean_list
| null |
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(node, 'Requires sequence of booleans ("true", "1", "false", "0")')
return input_data
|
(self, input_data, node, input_name='')
|
720,749 |
ete3.nexml._nexml
|
gds_validate_double
| null |
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
|
(self, input_data, node, input_name='')
|
720,750 |
ete3.nexml._nexml
|
gds_validate_double_list
| null |
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError) as exp:
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
|
(self, input_data, node, input_name='')
|
720,751 |
ete3.nexml._nexml
|
gds_validate_float
| null |
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
|
(self, input_data, node, input_name='')
|
720,752 |
ete3.nexml._nexml
|
gds_validate_float_list
| null |
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError) as exp:
raise_parse_error(node, 'Requires sequence of floats')
return input_data
|
(self, input_data, node, input_name='')
|
720,753 |
ete3.nexml._nexml
|
gds_validate_integer
| null |
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
|
(self, input_data, node, input_name='')
|
720,754 |
ete3.nexml._nexml
|
gds_validate_integer_list
| null |
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError) as exp:
raise_parse_error(node, 'Requires sequence of integers')
return input_data
|
(self, input_data, node, input_name='')
|
720,755 |
ete3.nexml._nexml
|
gds_validate_string
| null |
def gds_validate_string(self, input_data, node, input_name=''):
return input_data
|
(self, input_data, node, input_name='')
|
720,756 |
ete3.nexml._nexml
|
get_about
| null |
def get_about(self): return self.about
|
(self)
|
720,757 |
ete3.nexml._nexml
|
get_anyAttributes_
| null |
def get_anyAttributes_(self): return self.anyAttributes_
|
(self)
|
720,758 |
ete3.nexml._nexml
|
get_characters
| null |
def get_characters(self): return self.characters
|
(self)
|
720,759 |
ete3.nexml._nexml
|
get_generator
| null |
def get_generator(self): return self.generator
|
(self)
|
720,760 |
ete3.nexml._nexml
|
get_meta
| null |
def get_meta(self): return self.meta
|
(self)
|
720,761 |
ete3.nexml._nexml
|
get_otus
| null |
def get_otus(self): return self.otus
|
(self)
|
720,762 |
ete3.nexml._nexml
|
get_path_
| null |
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
|
(self, node)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.