index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
720,763 |
ete3.nexml._nexml
|
get_path_list_
| null |
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
|
(self, node, path_list)
|
720,764 |
ete3.nexml._nexml
|
get_trees
| null |
def get_trees(self): return self.trees
|
(self)
|
720,765 |
ete3.nexml._nexml
|
get_version
| null |
def get_version(self): return self.version
|
(self)
|
720,766 |
ete3.nexml._nexml
|
hasContent_
| null |
def hasContent_(self):
if (
self.otus or
self.characters or
self.trees or
super(Nexml, self).hasContent_()
):
return True
else:
return False
|
(self)
|
720,767 |
ete3.nexml._nexml
|
insert_characters
| null |
def insert_characters(self, index, value): self.characters[index] = value
|
(self, index, value)
|
720,768 |
ete3.nexml._nexml
|
insert_meta
| null |
def insert_meta(self, index, value): self.meta[index] = value
|
(self, index, value)
|
720,769 |
ete3.nexml._nexml
|
insert_otus
| null |
def insert_otus(self, index, value): self.otus[index] = value
|
(self, index, value)
|
720,770 |
ete3.nexml._nexml
|
insert_trees
| null |
def insert_trees(self, index, value): self.trees[index] = value
|
(self, index, value)
|
720,771 |
ete3.nexml._nexml
|
set_about
| null |
def set_about(self, about): self.about = about
|
(self, about)
|
720,772 |
ete3.nexml._nexml
|
set_anyAttributes_
| null |
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
|
(self, anyAttributes_)
|
720,773 |
ete3.nexml._nexml
|
set_characters
| null |
def set_characters(self, characters): self.characters = characters
|
(self, characters)
|
720,774 |
ete3.nexml._nexml
|
set_generator
| null |
def set_generator(self, generator): self.generator = generator
|
(self, generator)
|
720,775 |
ete3.nexml._nexml
|
set_meta
| null |
def set_meta(self, meta): self.meta = meta
|
(self, meta)
|
720,776 |
ete3.nexml._nexml
|
set_otus
| null |
def set_otus(self, otus): self.otus = otus
|
(self, otus)
|
720,777 |
ete3.nexml._nexml
|
set_trees
| null |
def set_trees(self, trees): self.trees = trees
|
(self, trees)
|
720,778 |
ete3.nexml._nexml
|
set_version
| null |
def set_version(self, version): self.version = version
|
(self, version)
|
720,779 |
ete3.nexml._nexml
|
validate_Nexml1_0
| null |
def validate_Nexml1_0(self, value):
# Validate type Nexml1_0, a restriction on xs:decimal.
pass
|
(self, value)
|
720,780 |
ete3.nexml._nexml_tree
|
NexmlTree
|
Special PhyloTree object with nexml support
|
class NexmlTree(PhyloTree):
"""
Special PhyloTree object with nexml support
"""
def __repr__(self):
return "NexML ETE tree <%s>" %hex(hash(self))
def _get_dist(self):
return self.nexml_edge.get_length()
def _set_dist(self, value):
try:
self.nexml_edge.set_length(value)
except ValueError:
raise
def _get_support(self):
return self._nexml_support.content
def _set_support(self, value):
try:
self._nexml_support.content = float(value)
except ValueError:
raise
def _get_name(self):
return self.nexml_node.get_label()
def _set_name(self, value):
try:
self.nexml_node.set_label(value)
except ValueError:
raise
def _get_children(self):
return self._children
def _set_children(self, value):
if isinstance(value, Children) and \
len(set([type(n)==type(self) for n in value]))<2:
self._children = value
else:
raise ValueError("children:wrong type")
dist = property(fget=_get_dist, fset=_set_dist)
support = property(fget=_get_support, fset=_set_support)
children = property(fget=_get_children, fset=_set_children)
name = property(fget=_get_name, fset=_set_name)
def __init__(self, newick=None, alignment=None, alg_format="fasta", \
sp_naming_function=_parse_species, format=0):
self.nexml_tree = FloatTree()
self.nexml_tree.set_anyAttributes_({'xsi:type': 'FloatTree'})
self.nexml_node = TreeNode()
self.nexml_edge = TreeFloatEdge()
self.nexml_node.id = "node_%s" %hash(self)
self.nexml_edge.id = "edge_%s" %hash(self)
self.nexml_project = None
self._nexml_support = LiteralMeta(datatype="float", property="branch_support", content=1.0)
self.nexml_edge.length = 0.0
self.nexml_edge.add_meta(self._nexml_support)
# Initialize empty PhyloTree
super(NexmlTree, self).__init__()
self._children = Children()
self._children.node = self
if alignment:
self.link_to_alignment(alignment, alg_format)
if newick:
read_newick(newick, root_node=self, format=format)
self.set_species_naming_function(sp_naming_function)
def set_nexml_project(self, nexml_obj):
self.nexml_project = nexml_obj
def build(self, node):
self.nexml_tree = FloatTree()
tree = self.nexml_tree
tree.build(node)
# This detects the outgroup of the tree even if the root tag
# is not set in any node
rootid = set([e.source for e in tree.edge]) - set([e.target for e in tree.edge])
nodeid2node = {rootid.pop(): self}
for xmledge in tree.edge:
child = nodeid2node.setdefault(xmledge.target, self.__class__() )
parent = nodeid2node.setdefault(xmledge.source, self.__class__() )
child.name = xmledge.target
child.nexml_node.id = xmledge.target
parent.name = xmledge.source
parent.nexml_node.id = xmledge.source
child.nexml_edge = xmledge
if xmledge.length is not None:
child.dist = float(xmledge.length)
parent.add_child(child)
for xmlnode in tree.node:
# just a warning. I don't know if this can occur
if xmlnode.id not in nodeid2node:
print("Unused node", xmlnode.id, file=sys.stderr)
continue
ete_node = nodeid2node[xmlnode.id]
ete_node.nexml_node = xmlnode
if xmlnode.label:
ete_node.name = xmlnode.label
elif xmlnode.id is not None:
ete_node.name = xmlnode.id
def export(self, outfile=sys.stdout, level=0, namespace_='', name_='FloatTree', namespacedef_=''):
if self.nexml_tree:
info = [(n.nexml_edge, n.nexml_node) for n in self.traverse()]
self.nexml_node.set_root(True)
self.nexml_tree.set_edge([i[0] for i in info])
self.nexml_tree.set_node([i[1] for i in info])
self.nexml_tree.export(outfile=outfile, level=level, name_=name_, namespacedef_=namespacedef_)
def exportChildren(self, outfile, level, namespace_='', name_='FloatTree'):
sorted_nodes = []
sorted_edges = []
for n in self.traverse():
# process node
node_obj = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'node', n.nexml_node)
sorted_nodes.append(node_obj)
# process edge
if n.nexml_edge:
edge_obj = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'edge', n.nexml_edge)
sorted_edges.append(edge_obj)
# process the nodes and edges
self.tree.content_ = sorted_nodes + sorted_edges
for item_ in self.tree.content_:
item_.export(outfile, level, item_.name, namespace_)
|
(newick=None, alignment=None, alg_format='fasta', sp_naming_function=<function _parse_species at 0x7ff39bbdf760>, format=0)
|
720,786 |
ete3.nexml._nexml_tree
|
__init__
| null |
def __init__(self, newick=None, alignment=None, alg_format="fasta", \
sp_naming_function=_parse_species, format=0):
self.nexml_tree = FloatTree()
self.nexml_tree.set_anyAttributes_({'xsi:type': 'FloatTree'})
self.nexml_node = TreeNode()
self.nexml_edge = TreeFloatEdge()
self.nexml_node.id = "node_%s" %hash(self)
self.nexml_edge.id = "edge_%s" %hash(self)
self.nexml_project = None
self._nexml_support = LiteralMeta(datatype="float", property="branch_support", content=1.0)
self.nexml_edge.length = 0.0
self.nexml_edge.add_meta(self._nexml_support)
# Initialize empty PhyloTree
super(NexmlTree, self).__init__()
self._children = Children()
self._children.node = self
if alignment:
self.link_to_alignment(alignment, alg_format)
if newick:
read_newick(newick, root_node=self, format=format)
self.set_species_naming_function(sp_naming_function)
|
(self, newick=None, alignment=None, alg_format='fasta', sp_naming_function=<function _parse_species at 0x7ff39bbdf760>, format=0)
|
720,790 |
ete3.nexml._nexml_tree
|
__repr__
| null |
def __repr__(self):
return "NexML ETE tree <%s>" %hex(hash(self))
|
(self)
|
720,795 |
ete3.nexml._nexml_tree
|
_get_dist
| null |
def _get_dist(self):
return self.nexml_edge.get_length()
|
(self)
|
720,798 |
ete3.nexml._nexml_tree
|
_get_name
| null |
def _get_name(self):
return self.nexml_node.get_label()
|
(self)
|
720,801 |
ete3.nexml._nexml_tree
|
_get_support
| null |
def _get_support(self):
return self._nexml_support.content
|
(self)
|
720,806 |
ete3.nexml._nexml_tree
|
_set_children
| null |
def _set_children(self, value):
if isinstance(value, Children) and \
len(set([type(n)==type(self) for n in value]))<2:
self._children = value
else:
raise ValueError("children:wrong type")
|
(self, value)
|
720,807 |
ete3.nexml._nexml_tree
|
_set_dist
| null |
def _set_dist(self, value):
try:
self.nexml_edge.set_length(value)
except ValueError:
raise
|
(self, value)
|
720,809 |
ete3.nexml._nexml_tree
|
_set_name
| null |
def _set_name(self, value):
try:
self.nexml_node.set_label(value)
except ValueError:
raise
|
(self, value)
|
720,812 |
ete3.nexml._nexml_tree
|
_set_support
| null |
def _set_support(self, value):
try:
self._nexml_support.content = float(value)
except ValueError:
raise
|
(self, value)
|
720,820 |
ete3.nexml._nexml_tree
|
build
| null |
def build(self, node):
self.nexml_tree = FloatTree()
tree = self.nexml_tree
tree.build(node)
# This detects the outgroup of the tree even if the root tag
# is not set in any node
rootid = set([e.source for e in tree.edge]) - set([e.target for e in tree.edge])
nodeid2node = {rootid.pop(): self}
for xmledge in tree.edge:
child = nodeid2node.setdefault(xmledge.target, self.__class__() )
parent = nodeid2node.setdefault(xmledge.source, self.__class__() )
child.name = xmledge.target
child.nexml_node.id = xmledge.target
parent.name = xmledge.source
parent.nexml_node.id = xmledge.source
child.nexml_edge = xmledge
if xmledge.length is not None:
child.dist = float(xmledge.length)
parent.add_child(child)
for xmlnode in tree.node:
# just a warning. I don't know if this can occur
if xmlnode.id not in nodeid2node:
print("Unused node", xmlnode.id, file=sys.stderr)
continue
ete_node = nodeid2node[xmlnode.id]
ete_node.nexml_node = xmlnode
if xmlnode.label:
ete_node.name = xmlnode.label
elif xmlnode.id is not None:
ete_node.name = xmlnode.id
|
(self, node)
|
720,832 |
ete3.nexml._nexml_tree
|
export
| null |
def export(self, outfile=sys.stdout, level=0, namespace_='', name_='FloatTree', namespacedef_=''):
if self.nexml_tree:
info = [(n.nexml_edge, n.nexml_node) for n in self.traverse()]
self.nexml_node.set_root(True)
self.nexml_tree.set_edge([i[0] for i in info])
self.nexml_tree.set_node([i[1] for i in info])
self.nexml_tree.export(outfile=outfile, level=level, name_=name_, namespacedef_=namespacedef_)
|
(self, outfile=<_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>, level=0, namespace_='', name_='FloatTree', namespacedef_='')
|
720,833 |
ete3.nexml._nexml_tree
|
exportChildren
| null |
def exportChildren(self, outfile, level, namespace_='', name_='FloatTree'):
sorted_nodes = []
sorted_edges = []
for n in self.traverse():
# process node
node_obj = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'node', n.nexml_node)
sorted_nodes.append(node_obj)
# process edge
if n.nexml_edge:
edge_obj = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'edge', n.nexml_edge)
sorted_edges.append(edge_obj)
# process the nodes and edges
self.tree.content_ = sorted_nodes + sorted_edges
for item_ in self.tree.content_:
item_.export(outfile, level, item_.name, namespace_)
|
(self, outfile, level, namespace_='', name_='FloatTree')
|
720,874 |
ete3.phylo.phylotree
|
link_to_alignment
| null |
def link_to_alignment(self, alignment, alg_format="fasta", **kwargs):
missing_leaves = []
missing_internal = []
if type(alignment) == SeqGroup:
alg = alignment
else:
alg = SeqGroup(alignment, format=alg_format, **kwargs)
# sets the seq of
for n in self.traverse():
try:
n.add_feature("sequence",alg.get_seq(n.name))
except KeyError:
if n.is_leaf():
missing_leaves.append(n.name)
else:
missing_internal.append(n.name)
if len(missing_leaves)>0:
print("Warnning: [%d] terminal nodes could not be found in the alignment." %\
len(missing_leaves), file=sys.stderr)
# Show warning of not associated internal nodes.
# if len(missing_internal)>0:
# print >>sys.stderr, \
# "Warnning: [%d] internal nodes could not be found in the alignment." %\
# len(missing_leaves)
|
(self, alignment, alg_format='fasta', **kwargs)
|
720,886 |
ete3.nexml._nexml_tree
|
set_nexml_project
| null |
def set_nexml_project(self, nexml_obj):
self.nexml_project = nexml_obj
|
(self, nexml_obj)
|
720,898 |
ete3.phylo.phylotree
|
PhyloNode
|
.. currentmodule:: ete3
Extends the standard :class:`TreeNode` instance. It adds
specific attributes and methods to work with phylogentic trees.
:argument newick: Path to the file containing the tree or, alternatively,
the text string containing the same information.
:argument alignment: file containing a multiple sequence alignment.
:argument alg_format: "fasta", "phylip" or "iphylip" (interleaved)
:argument format: sub-newick format
.. table::
====== ==============================================
FORMAT DESCRIPTION
====== ==============================================
0 flexible with support values
1 flexible with internal node names
2 all branches + leaf names + internal supports
3 all branches + all names
4 leaf branches + leaf names
5 internal and leaf branches + leaf names
6 internal branches + leaf names
7 leaf branches + all names
8 all names
9 leaf names
100 topology only
====== ==============================================
:argument sp_naming_function: Pointer to a parsing python
function that receives nodename as first argument and returns
the species name (see
:func:`PhyloNode.set_species_naming_function`. By default, the
3 first letter of nodes will be used as species identifiers.
:returns: a tree node object which represents the base of the tree.
|
class PhyloNode(TreeNode):
"""
.. currentmodule:: ete3
Extends the standard :class:`TreeNode` instance. It adds
specific attributes and methods to work with phylogentic trees.
:argument newick: Path to the file containing the tree or, alternatively,
the text string containing the same information.
:argument alignment: file containing a multiple sequence alignment.
:argument alg_format: "fasta", "phylip" or "iphylip" (interleaved)
:argument format: sub-newick format
.. table::
====== ==============================================
FORMAT DESCRIPTION
====== ==============================================
0 flexible with support values
1 flexible with internal node names
2 all branches + leaf names + internal supports
3 all branches + all names
4 leaf branches + leaf names
5 internal and leaf branches + leaf names
6 internal branches + leaf names
7 leaf branches + all names
8 all names
9 leaf names
100 topology only
====== ==============================================
:argument sp_naming_function: Pointer to a parsing python
function that receives nodename as first argument and returns
the species name (see
:func:`PhyloNode.set_species_naming_function`. By default, the
3 first letter of nodes will be used as species identifiers.
:returns: a tree node object which represents the base of the tree.
"""
def _get_species(self):
if self._speciesFunction:
try:
return self._speciesFunction(self.name)
except:
return self._speciesFunction(self)
else:
return self._species
def _set_species(self, value):
if self._speciesFunction:
pass
else:
self._species = value
# This tweak overwrites the native 'name' attribute to create a
# property that updates the species code every time name is
# changed
#: .. currentmodule:: ete3
#:
#Species code associated to the node. This property can be
#automatically extracted from the TreeNode.name attribute or
#manually set (see :func:`PhyloNode.set_species_naming_function`).
species = property(fget = _get_species, fset = _set_species)
def __init__(self, newick=None, alignment=None, alg_format="fasta", \
sp_naming_function=_parse_species, format=0, **kargs):
# _update names?
self._name = "NoName"
self._species = "Unknown"
self._speciesFunction = None
# Caution! native __init__ has to be called after setting
# _speciesFunction to None!!
TreeNode.__init__(self, newick=newick, format=format, **kargs)
# This will be only executed after reading the whole tree,
# because the argument 'alignment' is not passed to the
# PhyloNode constructor during parsing
if alignment:
self.link_to_alignment(alignment, alg_format)
if newick:
self.set_species_naming_function(sp_naming_function)
def __repr__(self):
return "PhyloTree node '%s' (%s)" %(self.name, hex(self.__hash__()))
def set_species_naming_function(self, fn):
"""
Sets the parsing function used to extract species name from a
node's name.
:argument fn: Pointer to a parsing python function that
receives nodename as first argument and returns the species
name.
::
# Example of a parsing function to extract species names for
# all nodes in a given tree.
def parse_sp_name(node_name):
return node_name.split("_")[1]
tree.set_species_naming_function(parse_sp_name)
"""
if fn:
for n in self.traverse():
n._speciesFunction = fn
if n.is_leaf():
n.features.add("species")
def link_to_alignment(self, alignment, alg_format="fasta", **kwargs):
missing_leaves = []
missing_internal = []
if type(alignment) == SeqGroup:
alg = alignment
else:
alg = SeqGroup(alignment, format=alg_format, **kwargs)
# sets the seq of
for n in self.traverse():
try:
n.add_feature("sequence",alg.get_seq(n.name))
except KeyError:
if n.is_leaf():
missing_leaves.append(n.name)
else:
missing_internal.append(n.name)
if len(missing_leaves)>0:
print("Warnning: [%d] terminal nodes could not be found in the alignment." %\
len(missing_leaves), file=sys.stderr)
# Show warning of not associated internal nodes.
# if len(missing_internal)>0:
# print >>sys.stderr, \
# "Warnning: [%d] internal nodes could not be found in the alignment." %\
# len(missing_leaves)
def get_species(self):
""" Returns the set of species covered by its partition. """
return set([l.species for l in self.iter_leaves()])
def iter_species(self):
""" Returns an iterator over the species grouped by this node. """
spcs = set([])
for l in self.iter_leaves():
if l.species not in spcs:
spcs.add(l.species)
yield l.species
def get_age(self, species2age):
"""
Implements the phylostratigrafic method described in:
Huerta-Cepas, J., & Gabaldon, T. (2011). Assigning duplication events to
relative temporal scales in genome-wide studies. Bioinformatics, 27(1),
38-45.
"""
return max([species2age[sp] for sp in self.get_species()])
def reconcile(self, species_tree):
""" Returns the reconcilied topology with the provided species
tree, and a list of evolutionary events inferred from such
reconciliation. """
return get_reconciled_tree(self, species_tree, [])
def get_my_evol_events(self, sos_thr=0.0):
""" Returns a list of duplication and speciation events in
which the current node has been involved. Scanned nodes are
also labeled internally as dup=True|False. You can access this
labels using the 'node.dup' sintaxis.
Method: the algorithm scans all nodes from the given leafName to
the root. Nodes are assumed to be duplications when a species
overlap is found between its child linages. Method is described
more detail in:
"The Human Phylome." Huerta-Cepas J, Dopazo H, Dopazo J, Gabaldon
T. Genome Biol. 2007;8(6):R109.
"""
return spoverlap.get_evol_events_from_leaf(self, sos_thr=sos_thr)
def get_descendant_evol_events(self, sos_thr=0.0):
""" Returns a list of **all** duplication and speciation
events detected after this node. Nodes are assumed to be
duplications when a species overlap is found between its child
linages. Method is described more detail in:
"The Human Phylome." Huerta-Cepas J, Dopazo H, Dopazo J, Gabaldon
T. Genome Biol. 2007;8(6):R109.
"""
return spoverlap.get_evol_events_from_root(self, sos_thr=sos_thr)
def get_farthest_oldest_leaf(self, species2age, is_leaf_fn=None):
""" Returns the farthest oldest leaf to the current
one. It requires an species2age dictionary with the age
estimation for all species.
:argument None is_leaf_fn: A pointer to a function that
receives a node instance as unique argument and returns True
or False. It can be used to dynamically collapse nodes, so
they are seen as leaves.
"""
root = self.get_tree_root()
outgroup_dist = 0
outgroup_node = self
outgroup_age = 0 # self.get_age(species2age)
for leaf in root.iter_leaves(is_leaf_fn=is_leaf_fn):
if leaf.get_age(species2age) > outgroup_age:
outgroup_dist = leaf.get_distance(self)
outgroup_node = leaf
outgroup_age = species2age[leaf.get_species().pop()]
elif leaf.get_age(species2age) == outgroup_age:
dist = leaf.get_distance(self)
if dist>outgroup_dist:
outgroup_dist = leaf.get_distance(self)
outgroup_node = leaf
outgroup_age = species2age[leaf.get_species().pop()]
return outgroup_node
def get_farthest_oldest_node(self, species2age):
"""
.. versionadded:: 2.1
Returns the farthest oldest node (leaf or internal). The
difference with get_farthest_oldest_leaf() is that in this
function internal nodes grouping seqs from the same species
are collapsed.
"""
# I use a custom is_leaf() function to collapse nodes groups
# seqs from the same species
is_leaf = lambda node: len(node.get_species())==1
return self.get_farthest_oldest_leaf(species2age, is_leaf_fn=is_leaf)
def get_age_balanced_outgroup(self, species2age):
"""
.. versionadded:: 2.2
Returns the node better balance current tree structure
according to the topological age of the different leaves and
internal node sizes.
:param species2age: A dictionary translating from leaf names
into a topological age.
.. warning: This is currently an experimental method!!
"""
root = self
all_seqs = set(self.get_leaf_names())
outgroup_dist = 0
best_balance = max(species2age.values())
outgroup_node = self
outgroup_size = 0
for leaf in root.iter_descendants():
leaf_seqs = set(leaf.get_leaf_names())
size = len(leaf_seqs)
leaf_species =[self._speciesFunction(s) for s in leaf_seqs]
out_species = [self._speciesFunction(s) for s in all_seqs-leaf_seqs]
leaf_age_min = min([species2age[sp] for sp in leaf_species])
out_age_min = min([species2age[sp] for sp in out_species])
leaf_age_max = max([species2age[sp] for sp in leaf_species])
out_age_max = max([species2age[sp] for sp in out_species])
leaf_age = leaf_age_max - leaf_age_min
out_age = out_age_max - out_age_min
age_inbalance = abs(out_age - leaf_age)
# DEBUG ONLY
# leaf.add_features(age_inbalance = age_inbalance, age=leaf_age)
update = False
if age_inbalance < best_balance:
update = True
elif age_inbalance == best_balance:
if size > outgroup_size:
update = True
elif size == outgroup_size:
dist = self.get_distance(leaf)
outgroup_dist = self.get_distance(outgroup_node)
if dist > outgroup_dist:
update = True
if update:
best_balance = age_inbalance
outgroup_node = leaf
outgroup_size = size
return outgroup_node
def get_speciation_trees(self, map_features=None, autodetect_duplications=True,
newick_only=False, target_attr='species'):
"""
.. versionadded: 2.2
Calculates all possible species trees contained within a
duplicated gene family tree as described in `Treeko
<http://treeko.cgenomics.org>`_ (see `Marcet and Gabaldon,
2011 <http://www.ncbi.nlm.nih.gov/pubmed/21335609>`_ ).
:argument True autodetect_duplications: If True, duplication
nodes will be automatically detected using the Species Overlap
algorithm (:func:`PhyloNode.get_descendants_evol_events`. If
False, duplication nodes within the original tree are expected
to contain the feature "evoltype=D".
:argument None features: A list of features that should be
mapped from the original gene family tree to each species
tree subtree.
:returns: (number_of_sptrees, number_of_dups, species_tree_iterator)
"""
t = self
if autodetect_duplications:
#n2content, n2species = t.get_node2species()
n2content = t.get_cached_content()
n2species = t.get_cached_content(store_attr=target_attr)
for node in n2content:
sp_subtotal = sum([len(n2species[_ch]) for _ch in node.children])
if len(n2species[node]) > 1 and len(n2species[node]) != sp_subtotal:
node.add_features(evoltype="D")
sp_trees = get_subtrees(t, features=map_features, newick_only=newick_only)
return sp_trees
def __get_speciation_trees_recursive(self):
""" experimental and testing """
t = self.copy()
if autodetect_duplications:
dups = 0
#n2content, n2species = t.get_node2species()
n2content = t.get_cached_content()
n2species = t.get_cached_content(store_attr="species")
#print "Detecting dups"
for node in n2content:
sp_subtotal = sum([len(n2species[_ch]) for _ch in node.children])
if len(n2species[node]) > 1 and len(n2species[node]) != sp_subtotal:
node.add_features(evoltype="D")
dups += 1
elif node.is_leaf():
node._leaf = True
#print dups
else:
for node in t.iter_leaves():
node._leaf = True
subtrees = _get_subtrees_recursive(t)
return len(subtrees), 0, subtrees
def split_by_dups(self, autodetect_duplications=True):
"""
.. versionadded: 2.2
Returns the list of all subtrees resulting from splitting
current tree by its duplication nodes.
:argument True autodetect_duplications: If True, duplication
nodes will be automatically detected using the Species Overlap
algorithm (:func:`PhyloNode.get_descendants_evol_events`. If
False, duplication nodes within the original tree are expected
to contain the feature "evoltype=D".
:returns: species_trees
"""
try:
t = self.copy()
except Exception:
t = self.copy("deepcopy")
if autodetect_duplications:
dups = 0
#n2content, n2species = t.get_node2species()
n2content = t.get_cached_content()
n2species = t.get_cached_content(store_attr="species")
#print "Detecting dups"
for node in n2content:
sp_subtotal = sum([len(n2species[_ch]) for _ch in node.children])
if len(n2species[node]) > 1 and len(n2species[node]) != sp_subtotal:
node.add_features(evoltype="D")
dups += 1
elif node.is_leaf():
node._leaf = True
#print dups
else:
for node in t.iter_leaves():
node._leaf = True
sp_trees = get_subparts(t)
return sp_trees
def collapse_lineage_specific_expansions(self, species=None, return_copy=True):
""" Converts lineage specific expansion nodes into a single
tip node (randomly chosen from tips within the expansion).
:param None species: If supplied, only expansions matching the
species criteria will be pruned. When None, all expansions
within the tree will be processed.
"""
if species and isinstance(species, (list, tuple)):
species = set(species)
elif species and (not isinstance(species, (set, frozenset))):
raise TypeError("species argument should be a set (preferred), list or tuple")
prunned = self.copy("deepcopy") if return_copy else self
n2sp = prunned.get_cached_content(store_attr="species")
n2leaves = prunned.get_cached_content()
is_expansion = lambda n: (len(n2sp[n])==1 and len(n2leaves[n])>1
and (species is None or species & n2sp[n]))
for n in prunned.get_leaves(is_leaf_fn=is_expansion):
repre = list(n2leaves[n])[0]
repre.detach()
if n is not prunned:
n.up.add_child(repre)
n.detach()
else:
return repre
return prunned
def annotate_ncbi_taxa(self, taxid_attr='species', tax2name=None, tax2track=None, tax2rank=None, dbfile=None):
"""Add NCBI taxonomy annotation to all descendant nodes. Leaf nodes are
expected to contain a feature (name, by default) encoding a valid taxid
number.
All descendant nodes (including internal nodes) are annotated with the
following new features:
`Node.spname`: scientific spcies name as encoded in the NCBI taxonomy database
`Node.named_lineage`: the NCBI lineage track using scientific names
`Node.taxid`: NCBI taxid number
`Node.lineage`: same as named_lineage but using taxid codes.
Note that for internal nodes, NCBI information will refer to the first
common lineage of the grouped species.
:param name taxid_attr: the name of the feature that should be used to access the taxid number associated to each node.
:param None tax2name: A dictionary where keys are taxid
numbers and values are their translation into NCBI
scientific name. Its use is optional and allows to avoid
database queries when annotating many trees containing the
same set of taxids.
:param None tax2track: A dictionary where keys are taxid
numbers and values are their translation into NCBI lineage
tracks (taxids). Its use is optional and allows to avoid
database queries when annotating many trees containing the
same set of taxids.
:param None tax2rank: A dictionary where keys are taxid
numbers and values are their translation into NCBI rank
name. Its use is optional and allows to avoid database
queries when annotating many trees containing the same set
of taxids.
:param None dbfile : If provided, the provided file will be
used as a local copy of the NCBI taxonomy database.
:returns: tax2name (a dictionary translating taxid numbers
into scientific name), tax2lineage (a dictionary
translating taxid numbers into their corresponding NCBI
lineage track) and tax2rank (a dictionary translating
taxid numbers into rank names).
"""
ncbi = NCBITaxa(dbfile=dbfile)
return ncbi.annotate_tree(self, taxid_attr=taxid_attr, tax2name=tax2name, tax2track=tax2track, tax2rank=tax2rank)
def ncbi_compare(self, autodetect_duplications=True, cached_content=None):
if not cached_content:
cached_content = self.get_cached_content()
cached_species = set([n.species for n in cached_content[self]])
if len(cached_species) != len(cached_content[self]):
print(cached_species)
ntrees, ndups, target_trees = self.get_speciation_trees(autodetect_duplications=autodetect_duplications, map_features=["taxid"])
else:
target_trees = [self]
ncbi = NCBITaxa()
for t in target_trees:
ncbi.get_broken_branches(t, cached_content)
|
(newick=None, alignment=None, alg_format='fasta', sp_naming_function=<function _parse_species at 0x7ff39bbdf760>, format=0, **kargs)
|
720,904 |
ete3.phylo.phylotree
|
__init__
| null |
def __init__(self, newick=None, alignment=None, alg_format="fasta", \
sp_naming_function=_parse_species, format=0, **kargs):
# _update names?
self._name = "NoName"
self._species = "Unknown"
self._speciesFunction = None
# Caution! native __init__ has to be called after setting
# _speciesFunction to None!!
TreeNode.__init__(self, newick=newick, format=format, **kargs)
# This will be only executed after reading the whole tree,
# because the argument 'alignment' is not passed to the
# PhyloNode constructor during parsing
if alignment:
self.link_to_alignment(alignment, alg_format)
if newick:
self.set_species_naming_function(sp_naming_function)
|
(self, newick=None, alignment=None, alg_format='fasta', sp_naming_function=<function _parse_species at 0x7ff39bbdf760>, format=0, **kargs)
|
721,122 |
ete3.phyloxml
|
Phyloxml
| null |
class Phyloxml(_phyloxml.Phyloxml):
def __repr__(self):
return "PhyloXML project <%s>" %hex(hash(self))
def __init__(self, *args, **kargs):
_phyloxml.Phyloxml.__init__(self, *args, **kargs)
def build_from_file(self, fname):
doc = _phyloxml.parsexml_(fname)
rootNode = doc.getroot()
rootTag, rootClass = _phyloxml.get_root_tag(rootNode)
if rootClass is None:
rootTag = 'phyloxml'
rootClass = self.__class__
self.build(rootNode)
def export(self, outfile=stdout, level=0):
namespace = 'xmlns:phy="http://www.phyloxml.org/1.10/phyloxml.xsd"'
return super(Phyloxml, self).export(outfile=outfile, level=level, namespacedef_=namespace)
|
(*args, **kargs)
|
721,123 |
ete3.phyloxml
|
__init__
| null |
def __init__(self, *args, **kargs):
_phyloxml.Phyloxml.__init__(self, *args, **kargs)
|
(self, *args, **kargs)
|
721,124 |
ete3.phyloxml
|
__repr__
| null |
def __repr__(self):
return "PhyloXML project <%s>" %hex(hash(self))
|
(self)
|
721,125 |
ete3.phyloxml._phyloxml
|
add_phylogeny
| null |
def add_phylogeny(self, value): self.phylogeny.append(value)
|
(self, value)
|
721,127 |
ete3.phyloxml._phyloxml
|
buildAttributes
| null |
def buildAttributes(self, node, attrs, already_processed):
pass
|
(self, node, attrs, already_processed)
|
721,128 |
ete3.phyloxml._phyloxml
|
buildChildren
| null |
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'phylogeny':
obj_ = Phylogeny.factory()
obj_.build(child_)
self.phylogeny.append(obj_)
|
(self, child_, node, nodeName_, fromsubclass_=False)
|
721,129 |
ete3.phyloxml
|
build_from_file
| null |
def build_from_file(self, fname):
doc = _phyloxml.parsexml_(fname)
rootNode = doc.getroot()
rootTag, rootClass = _phyloxml.get_root_tag(rootNode)
if rootClass is None:
rootTag = 'phyloxml'
rootClass = self.__class__
self.build(rootNode)
|
(self, fname)
|
721,130 |
ete3.phyloxml
|
export
| null |
def export(self, outfile=stdout, level=0):
namespace = 'xmlns:phy="http://www.phyloxml.org/1.10/phyloxml.xsd"'
return super(Phyloxml, self).export(outfile=outfile, level=level, namespacedef_=namespace)
|
(self, outfile=<_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>, level=0)
|
721,131 |
ete3.phyloxml._phyloxml
|
exportAttributes
| null |
def exportAttributes(self, outfile, level, already_processed, namespace_='phy:', name_='Phyloxml'):
pass
|
(self, outfile, level, already_processed, namespace_='phy:', name_='Phyloxml')
|
721,132 |
ete3.phyloxml._phyloxml
|
exportChildren
| null |
def exportChildren(self, outfile, level, namespace_='phy:', name_='Phyloxml', fromsubclass_=False):
for phylogeny_ in self.phylogeny:
phylogeny_.export(outfile, level, namespace_, name_='phylogeny')
|
(self, outfile, level, namespace_='phy:', name_='Phyloxml', fromsubclass_=False)
|
721,133 |
ete3.phyloxml._phyloxml
|
exportLiteral
| null |
def exportLiteral(self, outfile, level, name_='Phyloxml'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
|
(self, outfile, level, name_='Phyloxml')
|
721,134 |
ete3.phyloxml._phyloxml
|
exportLiteralAttributes
| null |
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
|
(self, outfile, level, already_processed, name_)
|
721,135 |
ete3.phyloxml._phyloxml
|
exportLiteralChildren
| null |
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('phylogeny=[\n')
level += 1
for phylogeny_ in self.phylogeny:
showIndent(outfile, level)
outfile.write('model_.Phylogeny(\n')
phylogeny_.exportLiteral(outfile, level, name_='Phylogeny')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
|
(self, outfile, level, name_)
|
721,136 |
ete3.phyloxml._phyloxml
|
factory
| null |
def factory(*args_, **kwargs_):
if Phyloxml.subclass:
return Phyloxml.subclass(*args_, **kwargs_)
else:
return Phyloxml(*args_, **kwargs_)
|
(*args_, **kwargs_)
|
721,158 |
ete3.phyloxml._phyloxml
|
get_phylogeny
| null |
def get_phylogeny(self): return self.phylogeny
|
(self)
|
721,159 |
ete3.phyloxml._phyloxml
|
hasContent_
| null |
def hasContent_(self):
if (
self.phylogeny
):
return True
else:
return False
|
(self)
|
721,160 |
ete3.phyloxml._phyloxml
|
insert_phylogeny
| null |
def insert_phylogeny(self, index, value): self.phylogeny[index] = value
|
(self, index, value)
|
721,161 |
ete3.phyloxml._phyloxml
|
set_phylogeny
| null |
def set_phylogeny(self, phylogeny): self.phylogeny = phylogeny
|
(self, phylogeny)
|
721,162 |
ete3.phyloxml._phyloxml_tree
|
PhyloxmlTree
|
PhyloTree object supporting phyloXML format.
|
class PhyloxmlTree(PhyloTree):
''' PhyloTree object supporting phyloXML format. '''
def __repr__(self):
return "PhyloXML ETE tree <%s>" %hex(hash(self))
def _get_dist(self):
v = self.phyloxml_clade.get_branch_length_attr()
if v is None:
v = self.phyloxml_clade.get_branch_length()
if v is None:
self._set_dist(self._dist)
v = self.phyloxml_clade.get_branch_length_attr()
return float(v)
def _set_dist(self, value):
try:
self.phyloxml_clade.set_branch_length(float(value))
self.phyloxml_clade.set_branch_length_attr(float(value))
except ValueError:
raise
def _get_support(self):
if len(self.phyloxml_clade.confidence) == 0:
_c = Confidence(valueOf_=1.0, type_="branch_support")
self.phyloxml_clade.add_confidence(_c)
return float(self.phyloxml_clade.confidence[0].valueOf_)
def _set_support(self, value):
self._get_support()
self.phyloxml_clade.confidence[0].valueOf_ = float(value)
def _get_name(self):
return self.phyloxml_clade.get_name()
def _set_name(self, value):
try:
self.phyloxml_clade.set_name(value)
except ValueError:
raise
def _get_children(self):
return self.phyloxml_clade.clade
dist = property(fget=_get_dist, fset=_set_dist)
support = property(fget=_get_support, fset=_set_support)
children = property(fget=_get_children)
name = property(fget=_get_name, fset=_set_name)
def __init__(self, phyloxml_clade=None, phyloxml_phylogeny=None, **kargs):
if not phyloxml_phylogeny:
self.phyloxml_phylogeny = Phylogeny()
else:
self.phyloxml_phylogeny = phyloxml_phylogeny
if not phyloxml_clade:
self.phyloxml_clade = Clade()
self.phyloxml_clade.set_branch_length(0.0)
self.phyloxml_clade.set_name("NoName")
#self.__support = Confidence(valueOf_=1.0, type_="branch_support")
#self.phyloxml_clade.add_confidence(self.__support)
else:
self.phyloxml_clade = phyloxml_clade
super(PhyloxmlTree, self).__init__(**kargs)
def build(self, node):
nodetype = Tag_pattern_.match(node.tag).groups()[-1]
if nodetype == 'phylogeny':
self.phyloxml_phylogeny.buildAttributes(node, node.attrib, [])
elif nodetype == 'clade':
if "branch_length" in node.attrib:
node.attrib["branch_length_attr"] = node.attrib["branch_length"]
self.phyloxml_clade.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, nodetype=nodetype)
def buildChildren(self, child_, node, nodeName_, fromsubclass=False, nodetype=None):
if nodetype == 'phylogeny':
baseclass = self.phyloxml_phylogeny
if nodeName_ == 'clade':
self.build(child_)
else:
baseclass.buildChildren(child_, node, nodeName_)
elif nodetype == 'clade':
baseclass = self.phyloxml_clade
if nodeName_ == 'clade':
new_node = self.add_child()
new_node.build(child_)
else:
baseclass.buildChildren(child_, node, nodeName_)
def export(self, outfile=sys.stdout, level=0, namespace_='phy:', name_='Phylogeny', namespacedef_=''):
if not self.up:
self.phyloxml_phylogeny.clade = self.phyloxml_clade
self.phyloxml_clade.clade = self.children
self.phyloxml_phylogeny.export(outfile=outfile, level=level, name_=name_, namespacedef_=namespacedef_)
else:
self.phyloxml_clade.clade = self.children
self.phyloxml_clade.export(outfile=outfile, level=level, name_=name_, namespacedef_=namespacedef_)
|
(phyloxml_clade=None, phyloxml_phylogeny=None, **kargs)
|
721,168 |
ete3.phyloxml._phyloxml_tree
|
__init__
| null |
def __init__(self, phyloxml_clade=None, phyloxml_phylogeny=None, **kargs):
if not phyloxml_phylogeny:
self.phyloxml_phylogeny = Phylogeny()
else:
self.phyloxml_phylogeny = phyloxml_phylogeny
if not phyloxml_clade:
self.phyloxml_clade = Clade()
self.phyloxml_clade.set_branch_length(0.0)
self.phyloxml_clade.set_name("NoName")
#self.__support = Confidence(valueOf_=1.0, type_="branch_support")
#self.phyloxml_clade.add_confidence(self.__support)
else:
self.phyloxml_clade = phyloxml_clade
super(PhyloxmlTree, self).__init__(**kargs)
|
(self, phyloxml_clade=None, phyloxml_phylogeny=None, **kargs)
|
721,172 |
ete3.phyloxml._phyloxml_tree
|
__repr__
| null |
def __repr__(self):
return "PhyloXML ETE tree <%s>" %hex(hash(self))
|
(self)
|
721,176 |
ete3.phyloxml._phyloxml_tree
|
_get_children
| null |
def _get_children(self):
return self.phyloxml_clade.clade
|
(self)
|
721,177 |
ete3.phyloxml._phyloxml_tree
|
_get_dist
| null |
def _get_dist(self):
v = self.phyloxml_clade.get_branch_length_attr()
if v is None:
v = self.phyloxml_clade.get_branch_length()
if v is None:
self._set_dist(self._dist)
v = self.phyloxml_clade.get_branch_length_attr()
return float(v)
|
(self)
|
721,180 |
ete3.phyloxml._phyloxml_tree
|
_get_name
| null |
def _get_name(self):
return self.phyloxml_clade.get_name()
|
(self)
|
721,183 |
ete3.phyloxml._phyloxml_tree
|
_get_support
| null |
def _get_support(self):
if len(self.phyloxml_clade.confidence) == 0:
_c = Confidence(valueOf_=1.0, type_="branch_support")
self.phyloxml_clade.add_confidence(_c)
return float(self.phyloxml_clade.confidence[0].valueOf_)
|
(self)
|
721,189 |
ete3.phyloxml._phyloxml_tree
|
_set_dist
| null |
def _set_dist(self, value):
try:
self.phyloxml_clade.set_branch_length(float(value))
self.phyloxml_clade.set_branch_length_attr(float(value))
except ValueError:
raise
|
(self, value)
|
721,191 |
ete3.phyloxml._phyloxml_tree
|
_set_name
| null |
def _set_name(self, value):
try:
self.phyloxml_clade.set_name(value)
except ValueError:
raise
|
(self, value)
|
721,194 |
ete3.phyloxml._phyloxml_tree
|
_set_support
| null |
def _set_support(self, value):
self._get_support()
self.phyloxml_clade.confidence[0].valueOf_ = float(value)
|
(self, value)
|
721,202 |
ete3.phyloxml._phyloxml_tree
|
build
| null |
def build(self, node):
nodetype = Tag_pattern_.match(node.tag).groups()[-1]
if nodetype == 'phylogeny':
self.phyloxml_phylogeny.buildAttributes(node, node.attrib, [])
elif nodetype == 'clade':
if "branch_length" in node.attrib:
node.attrib["branch_length_attr"] = node.attrib["branch_length"]
self.phyloxml_clade.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, nodetype=nodetype)
|
(self, node)
|
721,203 |
ete3.phyloxml._phyloxml_tree
|
buildChildren
| null |
def buildChildren(self, child_, node, nodeName_, fromsubclass=False, nodetype=None):
if nodetype == 'phylogeny':
baseclass = self.phyloxml_phylogeny
if nodeName_ == 'clade':
self.build(child_)
else:
baseclass.buildChildren(child_, node, nodeName_)
elif nodetype == 'clade':
baseclass = self.phyloxml_clade
if nodeName_ == 'clade':
new_node = self.add_child()
new_node.build(child_)
else:
baseclass.buildChildren(child_, node, nodeName_)
|
(self, child_, node, nodeName_, fromsubclass=False, nodetype=None)
|
721,215 |
ete3.phyloxml._phyloxml_tree
|
export
| null |
def export(self, outfile=sys.stdout, level=0, namespace_='phy:', name_='Phylogeny', namespacedef_=''):
if not self.up:
self.phyloxml_phylogeny.clade = self.phyloxml_clade
self.phyloxml_clade.clade = self.children
self.phyloxml_phylogeny.export(outfile=outfile, level=level, name_=name_, namespacedef_=namespacedef_)
else:
self.phyloxml_clade.clade = self.children
self.phyloxml_clade.export(outfile=outfile, level=level, name_=name_, namespacedef_=namespacedef_)
|
(self, outfile=<_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>, level=0, namespace_='phy:', name_='Phylogeny', namespacedef_='')
|
721,279 |
ete3.coretype.seqgroup
|
SeqGroup
|
SeqGroup class can be used to store a set of sequences (aligned
or not).
:argument sequences: Path to the file containing the sequences or,
alternatively, the text string containing the same
information.
:argument fasta format: the format in which sequences are
encoded. Current supported formats are: ``fasta``, ``phylip``
(phylip sequencial) and ``iphylip`` (phylip
interleaved). Phylip format forces sequence names to a maximum
of 10 chars. To avoid this effect, you can use the relaxed
phylip format: ``phylip_relaxed`` and ``iphylip_relaxed``.
::
msf = ">seq1\nAAAAAAAAAAA\n>seq2\nTTTTTTTTTTTTT\n"
seqs = SeqGroup(msf, format="fasta")
print seqs.get_seq("seq1")
|
class SeqGroup(object):
"""
SeqGroup class can be used to store a set of sequences (aligned
or not).
:argument sequences: Path to the file containing the sequences or,
alternatively, the text string containing the same
information.
:argument fasta format: the format in which sequences are
encoded. Current supported formats are: ``fasta``, ``phylip``
(phylip sequencial) and ``iphylip`` (phylip
interleaved). Phylip format forces sequence names to a maximum
of 10 chars. To avoid this effect, you can use the relaxed
phylip format: ``phylip_relaxed`` and ``iphylip_relaxed``.
::
msf = ">seq1\\nAAAAAAAAAAA\\n>seq2\\nTTTTTTTTTTTTT\\n"
seqs = SeqGroup(msf, format="fasta")
print seqs.get_seq("seq1")
"""
def __len__(self):
return len(self.id2seq)
def __contains__(self, item):
return item in self.name2id
def __str__(self):
return write_fasta(self)
def __iter__(self):
return self.iter_entries()
def __init__(self, sequences=None , format="fasta", fix_duplicates=True, **kwargs):
self.parsers = {
"fasta": [read_fasta, write_fasta, {}],
"phylip": [read_phylip, write_phylip, {"interleaved":False, "relaxed":False}],
"iphylip": [read_phylip, write_phylip, {"interleaved":True, "relaxed":False}],
"phylip_relaxed": [read_phylip, write_phylip, {"interleaved":False, "relaxed":True}],
"iphylip_relaxed": [read_phylip, write_phylip, {"interleaved":True, "relaxed":True}],
"paml" : [read_paml , write_paml , kwargs ]
}
self.id2name = {}
self.name2id = {}
self.id2comment= {}
self.id2seq = {}
if sequences is not None:
format = format.lower()
if format in self.parsers:
read = self.parsers[format][0]
args = self.parsers[format][2]
read(sequences, obj=self, fix_duplicates=fix_duplicates, **args)
else:
raise ValueError("Unsupported format: [%s]" %format)
def __repr__(self):
return "SeqGroup (%s)" %hex(self.__hash__())
def write(self, format="fasta", outfile=None):
""" Returns the text representation of the sequences in the
supplied given format (default=FASTA). If "oufile" argument is
used, the result is written into the given path."""
format = format.lower()
if format in self.parsers:
write = self.parsers[format][1]
args = self.parsers[format][2]
return write(self, outfile, **args)
else:
raise ValueError("Unsupported format: [%s]" %format)
def iter_entries(self):
""" Returns an iterator over all sequences in the
collection. Each item is a tuple with the sequence name,
sequence, and sequence comments """
for i, seq in six.iteritems(self.id2seq):
yield self.id2name[i], seq, self.id2comment.get(i, [])
def get_seq(self, name):
""" Returns the sequence associated to a given entry name."""
return self.id2seq[self.name2id[name]]
def get_entries(self):
""" Returns the list of entries currently stored."""
keys = list(self.id2seq.keys())
seqs = list(self.id2seq.values())
comments = [self.id2comment.get(x, []) for x in keys]
names = [self.id2name[x] for x in keys]
return list(zip(names, seqs, comments))
def set_seq(self, name, seq, comments = None):
"""Updates or adds a sequence """
if comments is None:
comments = []
name = name.strip()
seq = seq.replace(" ", "")
seq = seq.replace("\t", "")
seq = seq.replace("\n", "")
seq = seq.replace("\r", "")
seqid = self.name2id.get(name, max([0]+list(self.name2id.values()))+1)
self.name2id[name] = seqid
self.id2name[seqid] = name
self.id2comment[seqid] = comments
self.id2seq[seqid] = seq
|
(sequences=None, format='fasta', fix_duplicates=True, **kwargs)
|
721,280 |
ete3.coretype.seqgroup
|
__contains__
| null |
def __contains__(self, item):
return item in self.name2id
|
(self, item)
|
721,281 |
ete3.coretype.seqgroup
|
__init__
| null |
def __init__(self, sequences=None , format="fasta", fix_duplicates=True, **kwargs):
self.parsers = {
"fasta": [read_fasta, write_fasta, {}],
"phylip": [read_phylip, write_phylip, {"interleaved":False, "relaxed":False}],
"iphylip": [read_phylip, write_phylip, {"interleaved":True, "relaxed":False}],
"phylip_relaxed": [read_phylip, write_phylip, {"interleaved":False, "relaxed":True}],
"iphylip_relaxed": [read_phylip, write_phylip, {"interleaved":True, "relaxed":True}],
"paml" : [read_paml , write_paml , kwargs ]
}
self.id2name = {}
self.name2id = {}
self.id2comment= {}
self.id2seq = {}
if sequences is not None:
format = format.lower()
if format in self.parsers:
read = self.parsers[format][0]
args = self.parsers[format][2]
read(sequences, obj=self, fix_duplicates=fix_duplicates, **args)
else:
raise ValueError("Unsupported format: [%s]" %format)
|
(self, sequences=None, format='fasta', fix_duplicates=True, **kwargs)
|
721,282 |
ete3.coretype.seqgroup
|
__iter__
| null |
def __iter__(self):
return self.iter_entries()
|
(self)
|
721,283 |
ete3.coretype.seqgroup
|
__len__
| null |
def __len__(self):
return len(self.id2seq)
|
(self)
|
721,284 |
ete3.coretype.seqgroup
|
__repr__
| null |
def __repr__(self):
return "SeqGroup (%s)" %hex(self.__hash__())
|
(self)
|
721,285 |
ete3.coretype.seqgroup
|
__str__
| null |
def __str__(self):
return write_fasta(self)
|
(self)
|
721,286 |
ete3.coretype.seqgroup
|
get_entries
|
Returns the list of entries currently stored.
|
def get_entries(self):
""" Returns the list of entries currently stored."""
keys = list(self.id2seq.keys())
seqs = list(self.id2seq.values())
comments = [self.id2comment.get(x, []) for x in keys]
names = [self.id2name[x] for x in keys]
return list(zip(names, seqs, comments))
|
(self)
|
721,287 |
ete3.coretype.seqgroup
|
get_seq
|
Returns the sequence associated to a given entry name.
|
def get_seq(self, name):
""" Returns the sequence associated to a given entry name."""
return self.id2seq[self.name2id[name]]
|
(self, name)
|
721,288 |
ete3.coretype.seqgroup
|
iter_entries
|
Returns an iterator over all sequences in the
collection. Each item is a tuple with the sequence name,
sequence, and sequence comments
|
def iter_entries(self):
""" Returns an iterator over all sequences in the
collection. Each item is a tuple with the sequence name,
sequence, and sequence comments """
for i, seq in six.iteritems(self.id2seq):
yield self.id2name[i], seq, self.id2comment.get(i, [])
|
(self)
|
721,289 |
ete3.coretype.seqgroup
|
set_seq
|
Updates or adds a sequence
|
def set_seq(self, name, seq, comments = None):
"""Updates or adds a sequence """
if comments is None:
comments = []
name = name.strip()
seq = seq.replace(" ", "")
seq = seq.replace("\t", "")
seq = seq.replace("\n", "")
seq = seq.replace("\r", "")
seqid = self.name2id.get(name, max([0]+list(self.name2id.values()))+1)
self.name2id[name] = seqid
self.id2name[seqid] = name
self.id2comment[seqid] = comments
self.id2seq[seqid] = seq
|
(self, name, seq, comments=None)
|
721,290 |
ete3.coretype.seqgroup
|
write
|
Returns the text representation of the sequences in the
supplied given format (default=FASTA). If "oufile" argument is
used, the result is written into the given path.
|
def write(self, format="fasta", outfile=None):
""" Returns the text representation of the sequences in the
supplied given format (default=FASTA). If "oufile" argument is
used, the result is written into the given path."""
format = format.lower()
if format in self.parsers:
write = self.parsers[format][1]
args = self.parsers[format][2]
return write(self, outfile, **args)
else:
raise ValueError("Unsupported format: [%s]" %format)
|
(self, format='fasta', outfile=None)
|
721,291 |
ete3.coretype.tree
|
TreeNode
|
TreeNode (Tree) class is used to store a tree structure. A tree
consists of a collection of TreeNode instances connected in a
hierarchical way. Trees can be loaded from the New Hampshire Newick
format (newick).
:argument newick: Path to the file containing the tree or, alternatively,
the text string containing the same information.
:argument 0 format: subnewick format
.. table::
====== ==============================================
FORMAT DESCRIPTION
====== ==============================================
0 flexible with support values
1 flexible with internal node names
2 all branches + leaf names + internal supports
3 all branches + all names
4 leaf branches + leaf names
5 internal and leaf branches + leaf names
6 internal branches + leaf names
7 leaf branches + all names
8 all names
9 leaf names
100 topology only
====== ==============================================
:returns: a tree node object which represents the base of the tree.
**Examples:**
::
t1 = Tree() # creates an empty tree
t2 = Tree('(A:1,(B:1,(C:1,D:1):0.5):0.5);')
t3 = Tree('/home/user/myNewickFile.txt')
|
class TreeNode(object):
"""
TreeNode (Tree) class is used to store a tree structure. A tree
consists of a collection of TreeNode instances connected in a
hierarchical way. Trees can be loaded from the New Hampshire Newick
format (newick).
:argument newick: Path to the file containing the tree or, alternatively,
the text string containing the same information.
:argument 0 format: subnewick format
.. table::
====== ==============================================
FORMAT DESCRIPTION
====== ==============================================
0 flexible with support values
1 flexible with internal node names
2 all branches + leaf names + internal supports
3 all branches + all names
4 leaf branches + leaf names
5 internal and leaf branches + leaf names
6 internal branches + leaf names
7 leaf branches + all names
8 all names
9 leaf names
100 topology only
====== ==============================================
:returns: a tree node object which represents the base of the tree.
**Examples:**
::
t1 = Tree() # creates an empty tree
t2 = Tree('(A:1,(B:1,(C:1,D:1):0.5):0.5);')
t3 = Tree('/home/user/myNewickFile.txt')
"""
def _get_dist(self):
return self._dist
def _set_dist(self, value):
try:
self._dist = float(value)
except ValueError:
raise TreeError('node dist must be a float number')
def _get_support(self):
return self._support
def _set_support(self, value):
try:
self._support = float(value)
except ValueError:
raise TreeError('node support must be a float number')
def _get_up(self):
return self._up
def _set_up(self, value):
if type(value) == type(self) or value is None:
self._up = value
else:
raise TreeError("bad node_up type")
def _get_children(self):
return self._children
def _set_children(self, value):
if type(value) == list:
for n in value:
if type(n) != type(self):
raise TreeError("Incorrect child node type")
self._children = value
else:
raise TreeError("Incorrect children type")
def _get_style(self):
if self._img_style is None:
self._set_style(None)
return self._img_style
def _set_style(self, value):
self.set_style(value)
#: Branch length distance to parent node. Default = 0.0
img_style = property(fget=_get_style, fset=_set_style)
#: Branch length distance to parent node. Default = 0.0
dist = property(fget=_get_dist, fset=_set_dist)
#: Branch support for current node
support = property(fget=_get_support, fset=_set_support)
#: Pointer to parent node
up = property(fget=_get_up, fset=_set_up)
#: A list of children nodes
children = property(fget=_get_children, fset=_set_children)
def _set_face_areas(self, value):
if isinstance(value, _FaceAreas):
self._faces = value
else:
raise ValueError("[%s] is not a valid FaceAreas instance" %type(value))
def _get_face_areas(self):
if not hasattr(self, "_faces"):
self._faces = _FaceAreas()
return self._faces
faces = property(fget=_get_face_areas, \
fset=_set_face_areas)
def __init__(self, newick=None, format=0, dist=None, support=None,
name=None, quoted_node_names=False):
self._children = []
self._up = None
self._dist = DEFAULT_DIST
self._support = DEFAULT_SUPPORT
self._img_style = None
self.features = set([])
# Add basic features
self.features.update(["dist", "support", "name"])
if dist is not None:
self.dist = dist
if support is not None:
self.support = support
self.name = name if name is not None else DEFAULT_NAME
# Initialize tree
if newick is not None:
self._dist = 0.0
read_newick(newick, root_node = self, format=format,
quoted_names=quoted_node_names)
def __nonzero__(self):
return True
def __bool__(self):
"""
Python3's equivalent of __nonzero__
If this is not defined bool(class_instance) will call
__len__ in python3
"""
return True
def __repr__(self):
return "Tree node '%s' (%s)" %(self.name, hex(self.__hash__()))
def __and__(self, value):
""" This allows to execute tree&'A' to obtain the descendant node
whose name is A"""
value=str(value)
try:
first_match = next(self.iter_search_nodes(name=value))
return first_match
except StopIteration:
raise TreeError("Node not found")
def __add__(self, value):
""" This allows to sum two trees."""
# Should a make the sum with two copies of the original trees?
if type(value) == self.__class__:
new_root = self.__class__()
new_root.add_child(self)
new_root.add_child(value)
return new_root
else:
raise TreeError("Invalid node type")
def __str__(self):
""" Print tree in newick format. """
return self.get_ascii(compact=DEFAULT_COMPACT, \
show_internal=DEFAULT_SHOWINTERNAL)
def __contains__(self, item):
""" Check if item belongs to this node. The 'item' argument must
be a node instance or its associated name."""
if isinstance(item, self.__class__):
return item in set(self.get_descendants())
elif type(item)==str:
return item in set([n.name for n in self.traverse()])
def __len__(self):
"""Node len returns number of children."""
return len(self.get_leaves())
def __iter__(self):
""" Iterator over leaf nodes"""
return self.iter_leaves()
def add_feature(self, pr_name, pr_value):
"""
Add or update a node's feature.
"""
setattr(self, pr_name, pr_value)
self.features.add(pr_name)
def add_features(self, **features):
"""
Add or update several features. """
for fname, fvalue in six.iteritems(features):
setattr(self, fname, fvalue)
self.features.add(fname)
def del_feature(self, pr_name):
"""
Permanently deletes a node's feature.
"""
if hasattr(self, pr_name):
delattr(self, pr_name)
self.features.remove(pr_name)
# Topology management
def add_child(self, child=None, name=None, dist=None, support=None):
"""
Adds a new child to this node. If child node is not suplied
as an argument, a new node instance will be created.
:argument None child: the node instance to be added as a child.
:argument None name: the name that will be given to the child.
:argument None dist: the distance from the node to the child.
:argument None support: the support value of child partition.
:returns: The child node instance
"""
if child is None:
child = self.__class__()
if name is not None:
child.name = name
if dist is not None:
child.dist = dist
if support is not None:
child.support = support
self.children.append(child)
child.up = self
return child
def remove_child(self, child):
"""
Removes a child from this node (parent and child
nodes still exit but are no longer connected).
"""
try:
self.children.remove(child)
except ValueError as e:
raise TreeError("child not found")
else:
child.up = None
return child
def add_sister(self, sister=None, name=None, dist=None):
"""
Adds a sister to this node. If sister node is not supplied
as an argument, a new TreeNode instance will be created and
returned.
"""
if self.up is None:
raise TreeError("A parent node is required to add a sister")
else:
return self.up.add_child(child=sister, name=name, dist=dist)
def remove_sister(self, sister=None):
"""
Removes a sister node. It has the same effect as
**`TreeNode.up.remove_child(sister)`**
If a sister node is not supplied, the first sister will be deleted
and returned.
:argument sister: A node instance
:return: The node removed
"""
sisters = self.get_sisters()
if len(sisters) > 0:
if sister is None:
sister = sisters.pop(0)
return self.up.remove_child(sister)
def delete(self, prevent_nondicotomic=True, preserve_branch_length=False):
"""
Deletes node from the tree structure. Notice that this method
makes 'disappear' the node from the tree structure. This means
that children from the deleted node are transferred to the
next available parent.
:param True prevent_nondicotomic: When True (default), delete
function will be execute recursively to prevent
single-child nodes.
:param False preserve_branch_length: If True, branch lengths
of the deleted nodes are transferred (summed up) to its
parent's branch, thus keeping original distances among
nodes.
**Example:**
::
/ C
root-|
| / B
\--- H |
\ A
> H.delete() will produce this structure:
/ C
|
root-|--B
|
\ A
"""
parent = self.up
if parent:
if preserve_branch_length:
if len(self.children) == 1:
self.children[0].dist += self.dist
elif len(self.children) > 1:
parent.dist += self.dist
for ch in self.children:
parent.add_child(ch)
parent.remove_child(self)
# Avoids parents with only one child
if prevent_nondicotomic and parent and\
len(parent.children) < 2:
parent.delete(prevent_nondicotomic=False,
preserve_branch_length=preserve_branch_length)
def detach(self):
"""
Detachs this node (and all its descendants) from its parent
and returns the referent to itself.
Detached node conserves all its structure of descendants, and can
be attached to another node through the 'add_child' function. This
mechanism can be seen as a cut and paste.
"""
if self.up:
self.up.children.remove(self)
self.up = None
return self
def prune(self, nodes, preserve_branch_length=False):
"""Prunes the topology of a node to conserve only the selected list of leaf
internal nodes. The minimum number of nodes that conserve the
topological relationships among the requested nodes will be
retained. Root node is always conserved.
:var nodes: a list of node names or node objects that should be retained
:param False preserve_branch_length: If True, branch lengths
of the deleted nodes are transferred (summed up) to its
parent's branch, thus keeping original distances among
nodes.
**Examples:**
::
t1 = Tree('(((((A,B)C)D,E)F,G)H,(I,J)K)root;', format=1)
t1.prune(['A', 'B'])
# /-A
# /D /C|
# /F| \-B
# | |
# /H| \-E
# | | /-A
#-root \-G -root
# | \-B
# | /-I
# \K|
# \-J
t1 = Tree('(((((A,B)C)D,E)F,G)H,(I,J)K)root;', format=1)
t1.prune(['A', 'B', 'C'])
# /-A
# /D /C|
# /F| \-B
# | |
# /H| \-E
# | | /-A
#-root \-G -root- C|
# | \-B
# | /-I
# \K|
# \-J
t1 = Tree('(((((A,B)C)D,E)F,G)H,(I,J)K)root;', format=1)
t1.prune(['A', 'B', 'I'])
# /-A
# /D /C|
# /F| \-B
# | |
# /H| \-E /-I
# | | -root
#-root \-G | /-A
# | \C|
# | /-I \-B
# \K|
# \-J
t1 = Tree('(((((A,B)C)D,E)F,G)H,(I,J)K)root;', format=1)
t1.prune(['A', 'B', 'F', 'H'])
# /-A
# /D /C|
# /F| \-B
# | |
# /H| \-E
# | | /-A
#-root \-G -root-H /F|
# | \-B
# | /-I
# \K|
# \-J
"""
def cmp_nodes(x, y):
# if several nodes are in the same path of two kept nodes,
# only one should be maintained. This prioritize internal
# nodes that are already in the to_keep list and then
# deeper nodes (closer to the leaves).
if n2depth[x] > n2depth[y]:
return -1
elif n2depth[x] < n2depth[y]:
return 1
else:
return 0
to_keep = set(_translate_nodes(self, *nodes))
start, node2path = self.get_common_ancestor(to_keep, get_path=True)
to_keep.add(self)
# Calculate which kept nodes are visiting the same nodes in
# their path to the common ancestor.
n2count = {}
n2depth = {}
for seed, path in six.iteritems(node2path):
for visited_node in path:
if visited_node not in n2depth:
depth = visited_node.get_distance(start, topology_only=True)
n2depth[visited_node] = depth
if visited_node is not seed:
n2count.setdefault(visited_node, set()).add(seed)
# if several internal nodes are in the path of exactly the same kept
# nodes, only one (the deepest) should be maintain.
visitors2nodes = {}
for node, visitors in six.iteritems(n2count):
# keep nodes connection at least two other nodes
if len(visitors)>1:
visitor_key = frozenset(visitors)
visitors2nodes.setdefault(visitor_key, set()).add(node)
for visitors, nodes in six.iteritems(visitors2nodes):
if not (to_keep & nodes):
sorted_nodes = sorted(nodes, key=cmp_to_key(cmp_nodes))
to_keep.add(sorted_nodes[0])
for n in self.get_descendants('postorder'):
if n not in to_keep:
if preserve_branch_length:
if len(n.children) == 1:
n.children[0].dist += n.dist
elif len(n.children) > 1 and n.up:
n.up.dist += n.dist
n.delete(prevent_nondicotomic=False)
def swap_children(self):
"""
Swaps current children order.
"""
if len(self.children)>1:
self.children.reverse()
# #####################
# Tree traversing
# #####################
def get_children(self):
"""
Returns an independent list of node's children.
"""
return [ch for ch in self.children]
def get_sisters(self):
"""
Returns an independent list of sister nodes.
"""
if self.up is not None:
return [ch for ch in self.up.children if ch!=self]
else:
return []
def iter_leaves(self, is_leaf_fn=None):
"""
Returns an iterator over the leaves under this node.
:argument None is_leaf_fn: See :func:`TreeNode.traverse` for
documentation.
"""
for n in self.traverse(strategy="preorder", is_leaf_fn=is_leaf_fn):
if not is_leaf_fn:
if n.is_leaf():
yield n
else:
if is_leaf_fn(n):
yield n
def get_leaves(self, is_leaf_fn=None):
"""
Returns the list of terminal nodes (leaves) under this node.
:argument None is_leaf_fn: See :func:`TreeNode.traverse` for
documentation.
"""
return [n for n in self.iter_leaves(is_leaf_fn=is_leaf_fn)]
def iter_leaf_names(self, is_leaf_fn=None):
"""
Returns an iterator over the leaf names under this node.
:argument None is_leaf_fn: See :func:`TreeNode.traverse` for
documentation.
"""
for n in self.iter_leaves(is_leaf_fn=is_leaf_fn):
yield n.name
def get_leaf_names(self, is_leaf_fn=None):
"""
Returns the list of terminal node names under the current
node.
:argument None is_leaf_fn: See :func:`TreeNode.traverse` for
documentation.
"""
return [name for name in self.iter_leaf_names(is_leaf_fn=is_leaf_fn)]
def iter_descendants(self, strategy="levelorder", is_leaf_fn=None):
"""
Returns an iterator over all descendant nodes.
:argument None is_leaf_fn: See :func:`TreeNode.traverse` for
documentation.
"""
for n in self.traverse(strategy=strategy, is_leaf_fn=is_leaf_fn):
if n is not self:
yield n
def get_descendants(self, strategy="levelorder", is_leaf_fn=None):
"""
Returns a list of all (leaves and internal) descendant nodes.
:argument None is_leaf_fn: See :func:`TreeNode.traverse` for
documentation.
"""
return [n for n in self.iter_descendants(strategy=strategy, \
is_leaf_fn=is_leaf_fn)]
def traverse(self, strategy="levelorder", is_leaf_fn=None):
"""
Returns an iterator to traverse the tree structure under this
node.
:argument "levelorder" strategy: set the way in which tree
will be traversed. Possible values are: "preorder" (first
parent and then children) 'postorder' (first children and
the parent) and "levelorder" (nodes are visited in order
from root to leaves)
:argument None is_leaf_fn: If supplied, ``is_leaf_fn``
function will be used to interrogate nodes about if they
are terminal or internal. ``is_leaf_fn`` function should
receive a node instance as first argument and return True
or False. Use this argument to traverse a tree by
dynamically collapsing internal nodes matching
``is_leaf_fn``.
"""
if strategy=="preorder":
return self._iter_descendants_preorder(is_leaf_fn=is_leaf_fn)
elif strategy=="levelorder":
return self._iter_descendants_levelorder(is_leaf_fn=is_leaf_fn)
elif strategy=="postorder":
return self._iter_descendants_postorder(is_leaf_fn=is_leaf_fn)
def iter_prepostorder(self, is_leaf_fn=None):
"""
Iterate over all nodes in a tree yielding every node in both
pre and post order. Each iteration returns a postorder flag
(True if node is being visited in postorder) and a node
instance.
"""
to_visit = [self]
if is_leaf_fn is not None:
_leaf = is_leaf_fn
else:
_leaf = self.__class__.is_leaf
while to_visit:
node = to_visit.pop(-1)
try:
node = node[1]
except TypeError:
# PREORDER ACTIONS
yield (False, node)
if not _leaf(node):
# ADD CHILDREN
to_visit.extend(reversed(node.children + [[1, node]]))
else:
#POSTORDER ACTIONS
yield (True, node)
def _iter_descendants_postorder(self, is_leaf_fn=None):
to_visit = [self]
if is_leaf_fn is not None:
_leaf = is_leaf_fn
else:
_leaf = self.__class__.is_leaf
while to_visit:
node = to_visit.pop(-1)
try:
node = node[1]
except TypeError:
# PREORDER ACTIONS
if not _leaf(node):
# ADD CHILDREN
to_visit.extend(reversed(node.children + [[1, node]]))
else:
yield node
else:
#POSTORDER ACTIONS
yield node
def _iter_descendants_levelorder(self, is_leaf_fn=None):
"""
Iterate over all desdecendant nodes.
"""
tovisit = deque([self])
while len(tovisit)>0:
node = tovisit.popleft()
yield node
if not is_leaf_fn or not is_leaf_fn(node):
tovisit.extend(node.children)
def _iter_descendants_preorder(self, is_leaf_fn=None):
"""
Iterator over all descendant nodes.
"""
to_visit = deque()
node = self
while node is not None:
yield node
if not is_leaf_fn or not is_leaf_fn(node):
to_visit.extendleft(reversed(node.children))
try:
node = to_visit.popleft()
except:
node = None
def iter_ancestors(self):
'''versionadded: 2.2
Iterates over the list of all ancestor nodes from current node
to the current tree root.
'''
node = self
while node.up is not None:
yield node.up
node = node.up
def get_ancestors(self):
'''versionadded: 2.2
Returns the list of all ancestor nodes from current node to
the current tree root.
'''
return [n for n in self.iter_ancestors()]
def describe(self):
"""
Prints general information about this node and its
connections.
"""
if len(self.get_tree_root().children)==2:
rooting = "Yes"
elif len(self.get_tree_root().children)>2:
rooting = "No"
else:
rooting = "No children"
max_node, max_dist = self.get_farthest_leaf()
cached_content = self.get_cached_content()
print("Number of leaf nodes:\t%d" % len(cached_content[self]))
print("Total number of nodes:\t%d" % len(cached_content))
print("Rooted:\t%s" %rooting)
print("Most distant node:\t%s" %max_node.name)
print("Max. distance:\t%f" %max_dist)
def write(self, features=None, outfile=None, format=0, is_leaf_fn=None,
format_root_node=False, dist_formatter=None, support_formatter=None,
name_formatter=None, quoted_node_names=False):
"""
Returns the newick representation of current node. Several
arguments control the way in which extra data is shown for
every node:
:argument features: a list of feature names to be exported
using the Extended Newick Format (i.e. features=["name",
"dist"]). Use an empty list to export all available features
in each node (features=[])
:argument outfile: writes the output to a given file
:argument format: defines the newick standard used to encode the
tree. See tutorial for details.
:argument False format_root_node: If True, it allows features
and branch information from root node to be exported as a
part of the newick text string. For newick compatibility
reasons, this is False by default.
:argument is_leaf_fn: See :func:`TreeNode.traverse` for
documentation.
**Example:**
::
t.write(features=["species","name"], format=1)
"""
nw = write_newick(self, features=features, format=format,
is_leaf_fn=is_leaf_fn,
format_root_node=format_root_node,
dist_formatter=dist_formatter,
support_formatter=support_formatter,
name_formatter=name_formatter,
quoted_names=quoted_node_names)
if outfile is not None:
with open(outfile, "w") as OUT:
OUT.write(nw)
else:
return nw
def get_tree_root(self):
"""
Returns the absolute root node of current tree structure.
"""
root = self
while root.up is not None:
root = root.up
return root
def get_common_ancestor(self, *target_nodes, **kargs):
"""
Returns the first common ancestor between this node and a given
list of 'target_nodes'.
**Examples:**
::
t = tree.Tree("(((A:0.1, B:0.01):0.001, C:0.0001):1.0[&&NHX:name=common], (D:0.00001):0.000001):2.0[&&NHX:name=root];")
A = t.get_descendants_by_name("A")[0]
C = t.get_descendants_by_name("C")[0]
common = A.get_common_ancestor(C)
print common.name
"""
get_path = kargs.get("get_path", False)
if len(target_nodes) == 1 and type(target_nodes[0]) \
in set([set, tuple, list, frozenset]):
target_nodes = target_nodes[0]
# Convert node names into node instances
target_nodes = _translate_nodes(self, *target_nodes)
if type(target_nodes) != list:
# If only one node is provided and is the same as the seed node,
# return itself
if target_nodes is self:
if get_path:
return self, {}
else:
return self
else:
#Otherwise find the common ancestor of current seed node and
#the target_node provided
target_nodes = [target_nodes, self]
n2path = {}
reference = []
ref_node = None
for n in target_nodes:
current = n
while current:
n2path.setdefault(n, set()).add(current)
if not ref_node:
reference.append(current)
current = current.up
if not ref_node:
ref_node = n
common = None
for n in reference:
broken = False
for node, path in six.iteritems(n2path):
if node is not ref_node and n not in path:
broken = True
break
if not broken:
common = n
break
if not common:
raise TreeError("Nodes are not connected!")
if get_path:
return common, n2path
else:
return common
def iter_search_nodes(self, **conditions):
"""
Search nodes in an iterative way. Matches are yielded as they
are being found. This avoids needing to scan the full tree
topology before returning the first matches. Useful when
dealing with huge trees.
"""
for n in self.traverse():
conditions_passed = 0
for key, value in six.iteritems(conditions):
if hasattr(n, key) and getattr(n, key) == value:
conditions_passed +=1
if conditions_passed == len(conditions):
yield n
def search_nodes(self, **conditions):
"""
Returns the list of nodes matching a given set of conditions.
**Example:**
::
tree.search_nodes(dist=0.0, name="human")
"""
matching_nodes = []
for n in self.iter_search_nodes(**conditions):
matching_nodes.append(n)
return matching_nodes
def get_leaves_by_name(self, name):
"""
Returns a list of leaf nodes matching a given name.
"""
return self.search_nodes(name=name, children=[])
def is_leaf(self):
"""
Return True if current node is a leaf.
"""
return len(self.children) == 0
def is_root(self):
"""
Returns True if current node has no parent
"""
if self.up is None:
return True
else:
return False
# ###########################
# Distance related functions
# ###########################
def get_distance(self, target, target2=None, topology_only=False):
"""
Returns the distance between two nodes. If only one target is
specified, it returns the distance between the target and the
current node.
:argument target: a node within the same tree structure.
:argument target2: a node within the same tree structure. If
not specified, current node is used as target2.
:argument False topology_only: If set to True, distance will
refer to the number of nodes between target and target2.
:returns: branch length distance between target and
target2. If topology_only flag is True, returns the number
of nodes between target and target2.
"""
if target2 is None:
target2 = self
root = self.get_tree_root()
else:
# is target node under current node?
root = self
target, target2 = _translate_nodes(root, target, target2)
ancestor = root.get_common_ancestor(target, target2)
dist = 0.0
for n in [target2, target]:
current = n
while current != ancestor:
if topology_only:
if current!=target:
dist += 1
else:
dist += current.dist
current = current.up
return dist
def get_farthest_node(self, topology_only=False):
"""
Returns the node's farthest descendant or ancestor node, and the
distance to it.
:argument False topology_only: If set to True, distance
between nodes will be referred to the number of nodes
between them. In other words, topological distance will be
used instead of branch length distances.
:return: A tuple containing the farthest node referred to the
current node and the distance to it.
"""
# Init farthest node to current farthest leaf
farthest_node, farthest_dist = self.get_farthest_leaf(topology_only=topology_only)
prev = self
cdist = 0.0 if topology_only else prev.dist
current = prev.up
while current is not None:
for ch in current.children:
if ch != prev:
if not ch.is_leaf():
fnode, fdist = ch.get_farthest_leaf(topology_only=topology_only)
else:
fnode = ch
fdist = 0
if topology_only:
fdist += 1.0
else:
fdist += ch.dist
if cdist+fdist > farthest_dist:
farthest_dist = cdist + fdist
farthest_node = fnode
prev = current
if topology_only:
cdist += 1
else:
cdist += prev.dist
current = prev.up
return farthest_node, farthest_dist
def _get_farthest_and_closest_leaves(self, topology_only=False, is_leaf_fn=None):
# if called from a leaf node, no necessary to compute
if (is_leaf_fn and is_leaf_fn(self)) or self.is_leaf():
return self, 0.0, self, 0.0
min_dist = None
min_node = None
max_dist = None
max_node = None
d = 0.0
for post, n in self.iter_prepostorder(is_leaf_fn=is_leaf_fn):
if n is self:
continue
if post:
d -= n.dist if not topology_only else 1.0
else:
if (is_leaf_fn and is_leaf_fn(n)) or n.is_leaf():
total_d = d + n.dist if not topology_only else d
if min_dist is None or total_d < min_dist:
min_dist = total_d
min_node = n
if max_dist is None or total_d > max_dist:
max_dist = total_d
max_node = n
else:
d += n.dist if not topology_only else 1.0
return min_node, min_dist, max_node, max_dist
def get_farthest_leaf(self, topology_only=False, is_leaf_fn=None):
"""
Returns node's farthest descendant node (which is always a leaf), and the
distance to it.
:argument False topology_only: If set to True, distance
between nodes will be referred to the number of nodes
between them. In other words, topological distance will be
used instead of branch length distances.
:return: A tuple containing the farthest leaf referred to the
current node and the distance to it.
"""
min_node, min_dist, max_node, max_dist = self._get_farthest_and_closest_leaves(
topology_only=topology_only, is_leaf_fn=is_leaf_fn)
return max_node, max_dist
def get_closest_leaf(self, topology_only=False, is_leaf_fn=None):
"""Returns node's closest descendant leaf and the distance to
it.
:argument False topology_only: If set to True, distance
between nodes will be referred to the number of nodes
between them. In other words, topological distance will be
used instead of branch length distances.
:return: A tuple containing the closest leaf referred to the
current node and the distance to it.
"""
min_node, min_dist, max_node, max_dist = self._get_farthest_and_closest_leaves(
topology_only=topology_only, is_leaf_fn=is_leaf_fn)
return min_node, min_dist
def get_midpoint_outgroup(self):
"""
Returns the node that divides the current tree into two distance-balanced
partitions.
"""
# Gets the farthest node to the current root
root = self.get_tree_root()
nA, r2A_dist = root.get_farthest_leaf()
nB, A2B_dist = nA.get_farthest_node()
outgroup = nA
middist = A2B_dist / 2.0
cdist = 0
current = nA
while current is not None:
cdist += current.dist
if cdist > (middist): # Deja de subir cuando se pasa del maximo
break
else:
current = current.up
# if we reached the root, the tree is already at midpoint. Return any child as valid outgroup
if current is None:
current = self.children[0]
return current
def populate(self, size, names_library=None, reuse_names=False,
random_branches=False, branch_range=(0,1),
support_range=(0,1)):
"""
Generates a random topology by populating current node.
:argument None names_library: If provided, names library
(list, set, dict, etc.) will be used to name nodes.
:argument False reuse_names: If True, node names will not be
necessarily unique, which makes the process a bit more
efficient.
:argument False random_branches: If True, branch distances and support
values will be randomized.
:argument (0,1) branch_range: If random_branches is True, this
range of values will be used to generate random distances.
:argument (0,1) support_range: If random_branches is True,
this range of values will be used to generate random branch
support values.
"""
NewNode = self.__class__
if len(self.children) > 1:
connector = NewNode()
for ch in self.get_children():
ch.detach()
connector.add_child(child = ch)
root = NewNode()
self.add_child(child = connector)
self.add_child(child = root)
else:
root = self
next_deq = deque([root])
for i in range(size-1):
if random.randint(0, 1):
p = next_deq.pop()
else:
p = next_deq.popleft()
c1 = p.add_child()
c2 = p.add_child()
next_deq.extend([c1, c2])
if random_branches:
c1.dist = random.uniform(*branch_range)
c2.dist = random.uniform(*branch_range)
c1.support = random.uniform(*branch_range)
c2.support = random.uniform(*branch_range)
else:
c1.dist = 1.0
c2.dist = 1.0
c1.support = 1.0
c2.support = 1.0
# next contains leaf nodes
charset = "abcdefghijklmnopqrstuvwxyz"
if names_library:
names_library = deque(names_library)
else:
avail_names = itertools.combinations_with_replacement(charset, 10)
for n in next_deq:
if names_library:
if reuse_names:
tname = random.sample(names_library, 1)[0]
else:
tname = names_library.pop()
else:
tname = ''.join(next(avail_names))
n.name = tname
def set_outgroup(self, outgroup):
"""
Sets a descendant node as the outgroup of a tree. This function
can be used to root a tree or even an internal node.
:argument outgroup: a node instance within the same tree
structure that will be used as a basal node.
"""
outgroup = _translate_nodes(self, outgroup)
if self == outgroup:
raise TreeError("Cannot set myself as outgroup")
parent_outgroup = outgroup.up
# Detects (sub)tree root
n = outgroup
while n.up is not self:
n = n.up
# If outgroup is a child from root, but with more than one
# sister nodes, creates a new node to group them
self.children.remove(n)
if len(self.children) != 1:
down_branch_connector = self.__class__()
down_branch_connector.dist = 0.0
down_branch_connector.support = n.support
for ch in self.get_children():
down_branch_connector.children.append(ch)
ch.up = down_branch_connector
self.children.remove(ch)
else:
down_branch_connector = self.children[0]
# Connects down branch to myself or to outgroup
quien_va_ser_padre = parent_outgroup
if quien_va_ser_padre is not self:
# Parent-child swapping
quien_va_ser_hijo = quien_va_ser_padre.up
quien_fue_padre = None
buffered_dist = quien_va_ser_padre.dist
buffered_support = quien_va_ser_padre.support
while quien_va_ser_hijo is not self:
quien_va_ser_padre.children.append(quien_va_ser_hijo)
quien_va_ser_hijo.children.remove(quien_va_ser_padre)
buffered_dist2 = quien_va_ser_hijo.dist
buffered_support2 = quien_va_ser_hijo.support
quien_va_ser_hijo.dist = buffered_dist
quien_va_ser_hijo.support = buffered_support
buffered_dist = buffered_dist2
buffered_support = buffered_support2
quien_va_ser_padre.up = quien_fue_padre
quien_fue_padre = quien_va_ser_padre
quien_va_ser_padre = quien_va_ser_hijo
quien_va_ser_hijo = quien_va_ser_padre.up
quien_va_ser_padre.children.append(down_branch_connector)
down_branch_connector.up = quien_va_ser_padre
quien_va_ser_padre.up = quien_fue_padre
down_branch_connector.dist += buffered_dist
outgroup2 = parent_outgroup
parent_outgroup.children.remove(outgroup)
outgroup2.dist = 0
else:
outgroup2 = down_branch_connector
outgroup.up = self
outgroup2.up = self
# outgroup is always the first children. Some function my
# trust on this fact, so do no change this.
self.children = [outgroup,outgroup2]
middist = (outgroup2.dist + outgroup.dist)/2
outgroup.dist = middist
outgroup2.dist = middist
outgroup2.support = outgroup.support
def unroot(self, mode='legacy'):
"""
Unroots current node. This function is expected to be used on
the absolute tree root node, but it can be also be applied to
any other internal node. It will convert a split into a
multifurcation.
:argument "legacy" mode: The value can be "legacy" or "keep".
If value is "keep", then function keeps the distance between
the leaves by adding the distance associated to the deleted
edge to the remaining edge. In the other case the distance
value of the deleted edge is dropped
"""
if not (mode == 'legacy' or mode == 'keep'):
raise ValueError("The value of the mode parameter must be 'legacy' or 'keep'")
if len(self.children)==2:
if not self.children[0].is_leaf():
if mode == "keep":
self.children[1].dist+=self.children[0].dist
self.children[0].delete()
elif not self.children[1].is_leaf():
if mode == "keep":
self.children[0].dist+=self.children[1].dist
self.children[1].delete()
else:
raise TreeError("Cannot unroot a tree with only two leaves")
def show(self, layout=None, tree_style=None, name="ETE"):
"""
Starts an interactive session to visualize current node
structure using provided layout and TreeStyle.
"""
from ..treeview import drawer
drawer.show_tree(self, layout=layout,
tree_style=tree_style, win_name=name)
def render(self, file_name, layout=None, w=None, h=None, \
tree_style=None, units="px", dpi=90):
"""
Renders the node structure as an image.
:var file_name: path to the output image file. valid
extensions are .SVG, .PDF, .PNG
:var layout: a layout function or a valid layout function name
:var tree_style: a `TreeStyle` instance containing the image
properties
:var px units: "px": pixels, "mm": millimeters, "in": inches
:var None h: height of the image in :attr:`units`
:var None w: width of the image in :attr:`units`
:var 90 dpi: dots per inches.
"""
from ..treeview import drawer
if file_name.startswith('%%return'):
return drawer.get_img(self, w=w, h=h,
layout=layout, tree_style=tree_style,
units=units, dpi=dpi, return_format=file_name)
else:
return drawer.render_tree(self, file_name, w=w, h=h,
layout=layout, tree_style=tree_style,
units=units, dpi=dpi)
def copy(self, method="cpickle"):
""".. versionadded: 2.1
Returns a copy of the current node.
:var cpickle method: Protocol used to copy the node
structure. The following values are accepted:
- "newick": Tree topology, node names, branch lengths and
branch support values will be copied by as represented in
the newick string (copy by newick string serialisation).
- "newick-extended": Tree topology and all node features
will be copied based on the extended newick format
representation. Only node features will be copied, thus
excluding other node attributes. As this method is also
based on newick serialisation, features will be converted
into text strings when making the copy.
- "cpickle": The whole node structure and its content is
cloned based on cPickle object serialisation (slower, but
recommended for full tree copying)
- "deepcopy": The whole node structure and its content is
copied based on the standard "copy" Python functionality
(this is the slowest method but it allows to copy complex
objects even if attributes point to lambda functions,
etc.)
"""
method = method.lower()
if method=="newick":
new_node = self.__class__(self.write(features=["name"], format_root_node=True))
elif method=="newick-extended":
self.write(features=[], format_root_node=True)
new_node = self.__class__(self.write(features=[]))
elif method == "deepcopy":
parent = self.up
self.up = None
new_node = copy.deepcopy(self)
self.up = parent
elif method == "cpickle":
parent = self.up
self.up = None
new_node = six.moves.cPickle.loads(six.moves.cPickle.dumps(self, 2))
self.up = parent
else:
raise TreeError("Invalid copy method")
return new_node
def _asciiArt(self, char1='-', show_internal=True, compact=False, attributes=None):
"""
Returns the ASCII representation of the tree.
Code based on the PyCogent GPL project.
"""
if not attributes:
attributes = ["name"]
node_name = ', '.join(map(str, [getattr(self, v) for v in attributes if hasattr(self, v)]))
LEN = max(3, len(node_name) if not self.children or show_internal else 3)
PAD = ' ' * LEN
PA = ' ' * (LEN-1)
if not self.is_leaf():
mids = []
result = []
for c in self.children:
if len(self.children) == 1:
char2 = '/'
elif c is self.children[0]:
char2 = '/'
elif c is self.children[-1]:
char2 = '\\'
else:
char2 = '-'
(clines, mid) = c._asciiArt(char2, show_internal, compact, attributes)
mids.append(mid+len(result))
result.extend(clines)
if not compact:
result.append('')
if not compact:
result.pop()
(lo, hi, end) = (mids[0], mids[-1], len(result))
prefixes = [PAD] * (lo+1) + [PA+'|'] * (hi-lo-1) + [PAD] * (end-hi)
mid = int((lo + hi) / 2)
prefixes[mid] = char1 + '-'*(LEN-2) + prefixes[mid][-1]
result = [p+l for (p,l) in zip(prefixes, result)]
if show_internal:
stem = result[mid]
result[mid] = stem[0] + node_name + stem[len(node_name)+1:]
return (result, mid)
else:
return ([char1 + '-' + node_name], 0)
def get_ascii(self, show_internal=True, compact=False, attributes=None):
"""
Returns a string containing an ascii drawing of the tree.
:argument show_internal: includes internal edge names.
:argument compact: use exactly one line per tip.
:param attributes: A list of node attributes to shown in the
ASCII representation.
"""
(lines, mid) = self._asciiArt(show_internal=show_internal,
compact=compact, attributes=attributes)
return '\n'+'\n'.join(lines)
def ladderize(self, direction=0):
"""
.. versionadded: 2.1
Sort the branches of a given tree (swapping children nodes)
according to the size of each partition.
::
t = Tree("(f,((d, ((a,b),c)),e));")
print t
#
# /-f
# |
# | /-d
# ----| |
# | /---| /-a
# | | | /---|
# | | \---| \-b
# \---| |
# | \-c
# |
# \-e
t.ladderize()
print t
# /-f
# ----|
# | /-e
# \---|
# | /-d
# \---|
# | /-c
# \---|
# | /-a
# \---|
# \-b
"""
if not self.is_leaf():
n2s = {}
for n in self.get_children():
s = n.ladderize(direction=direction)
n2s[n] = s
self.children.sort(key=lambda x: n2s[x])
if direction == 1:
self.children.reverse()
size = sum(n2s.values())
else:
size = 1
return size
def sort_descendants(self, attr="name"):
"""
.. versionadded: 2.1
Sort the branches of a given tree by node names. After the
tree is sorted. Note that if duplicated names are present,
extra criteria should be added to sort nodes.
"""
node2content = self.get_cached_content(store_attr=attr, container_type=list)
for n in self.traverse():
if not n.is_leaf():
n.children.sort(key=lambda x: str(sorted(node2content[x])))
def get_cached_content(self, store_attr=None, container_type=set, leaves_only=True, _store=None):
"""
.. versionadded: 2.2
Returns a dictionary pointing to the preloaded content of each
internal node under this tree. Such a dictionary is intended
to work as a cache for operations that require many traversal
operations.
:param None store_attr: Specifies the node attribute that
should be cached (i.e. name, distance, etc.). When none,
the whole node instance is cached.
:param _store: (internal use)
"""
if _store is None:
_store = {}
def get_value(_n):
if store_attr is None:
_val = [_n]
else:
if not isinstance(store_attr, six.string_types):
_val = [tuple(getattr(_n, attr, None) for attr in store_attr)]
else:
_val = [getattr(_n, store_attr, None)]
return _val
for ch in self.children:
ch.get_cached_content(store_attr=store_attr,
container_type=container_type,
leaves_only=leaves_only,
_store=_store)
if self.children:
if not leaves_only:
val = container_type(get_value(self))
else:
val = container_type()
for ch in self.children:
if type(val) == list:
val.extend(_store[ch])
if type(val) == set:
val.update(_store[ch])
if not leaves_only:
if type(val) == list:
val.extend(get_value(ch))
if type(val) == set:
val.update(get_value(ch))
_store[self] = val
else:
_store[self] = container_type(get_value(self))
return _store
def robinson_foulds(self, t2, attr_t1="name", attr_t2="name",
unrooted_trees=False, expand_polytomies=False,
polytomy_size_limit=5, skip_large_polytomies=False,
correct_by_polytomy_size=False, min_support_t1=0.0,
min_support_t2=0.0):
"""
.. versionadded: 2.2
Returns the Robinson-Foulds symmetric distance between current
tree and a different tree instance.
:param t2: reference tree
:param name attr_t1: Compare trees using a custom node
attribute as a node name.
:param name attr_t2: Compare trees using a custom node
attribute as a node name in target tree.
:param False unrooted_trees: If True, consider trees as unrooted.
:param False expand_polytomies: If True, all polytomies in the reference
and target tree will be expanded into all possible binary
trees. Robinson-foulds distance will be calculated between all
tree combinations and the minimum value will be returned.
See also, :func:`NodeTree.expand_polytomy`.
:returns: (rf, rf_max, common_attrs, names, edges_t1, edges_t2, discarded_edges_t1, discarded_edges_t2)
"""
ref_t = self
target_t = t2
if not unrooted_trees and (len(ref_t.children) > 2 or len(target_t.children) > 2):
raise TreeError("Unrooted tree found! You may want to activate the unrooted_trees flag.")
if expand_polytomies and correct_by_polytomy_size:
raise TreeError("expand_polytomies and correct_by_polytomy_size are mutually exclusive.")
if expand_polytomies and unrooted_trees:
raise TreeError("expand_polytomies and unrooted_trees arguments cannot be enabled at the same time")
attrs_t1 = set([getattr(n, attr_t1) for n in ref_t.iter_leaves() if hasattr(n, attr_t1)])
attrs_t2 = set([getattr(n, attr_t2) for n in target_t.iter_leaves() if hasattr(n, attr_t2)])
common_attrs = attrs_t1 & attrs_t2
# release mem
attrs_t1, attrs_t2 = None, None
# Check for duplicated items (is it necessary? can we optimize? what's the impact in performance?')
size1 = len([True for n in ref_t.iter_leaves() if getattr(n, attr_t1, None) in common_attrs])
size2 = len([True for n in target_t.iter_leaves() if getattr(n, attr_t2, None) in common_attrs])
if size1 > len(common_attrs):
raise TreeError('Duplicated items found in source tree')
if size2 > len(common_attrs):
raise TreeError('Duplicated items found in reference tree')
if expand_polytomies:
ref_trees = [Tree(nw) for nw in
ref_t.expand_polytomies(map_attr=attr_t1,
polytomy_size_limit=polytomy_size_limit,
skip_large_polytomies=skip_large_polytomies)]
target_trees = [Tree(nw) for nw in
target_t.expand_polytomies(map_attr=attr_t2,
polytomy_size_limit=polytomy_size_limit,
skip_large_polytomies=skip_large_polytomies)]
attr_t1, attr_t2 = "name", "name"
else:
ref_trees = [ref_t]
target_trees = [target_t]
polytomy_correction = 0
if correct_by_polytomy_size:
corr1 = sum([0]+[len(n.children) - 2 for n in ref_t.traverse() if len(n.children) > 2])
corr2 = sum([0]+[len(n.children) - 2 for n in target_t.traverse() if len(n.children) > 2])
if corr1 and corr2:
raise TreeError("Both trees contain polytomies! Try expand_polytomies=True instead")
else:
polytomy_correction = max([corr1, corr2])
min_comparison = None
for t1 in ref_trees:
t1_content = t1.get_cached_content()
t1_leaves = t1_content[t1]
if unrooted_trees:
edges1 = set([
tuple(sorted([tuple(sorted([getattr(n, attr_t1) for n in content if hasattr(n, attr_t1) and getattr(n, attr_t1) in common_attrs])),
tuple(sorted([getattr(n, attr_t1) for n in t1_leaves-content if hasattr(n, attr_t1) and getattr(n, attr_t1) in common_attrs]))]))
for content in six.itervalues(t1_content)])
edges1.discard(((),()))
else:
edges1 = set([
tuple(sorted([getattr(n, attr_t1) for n in content if hasattr(n, attr_t1) and getattr(n, attr_t1) in common_attrs]))
for content in six.itervalues(t1_content)])
edges1.discard(())
if min_support_t1:
support_t1 = dict([
(tuple(sorted([getattr(n, attr_t1) for n in content if hasattr(n, attr_t1) and getattr(n, attr_t1) in common_attrs])), branch.support)
for branch, content in six.iteritems(t1_content)])
for t2 in target_trees:
t2_content = t2.get_cached_content()
t2_leaves = t2_content[t2]
if unrooted_trees:
edges2 = set([
tuple(sorted([
tuple(sorted([getattr(n, attr_t2) for n in content if hasattr(n, attr_t2) and getattr(n, attr_t2) in common_attrs])),
tuple(sorted([getattr(n, attr_t2) for n in t2_leaves-content if hasattr(n, attr_t2) and getattr(n, attr_t2) in common_attrs]))]))
for content in six.itervalues(t2_content)])
edges2.discard(((),()))
else:
edges2 = set([
tuple(sorted([getattr(n, attr_t2) for n in content if hasattr(n, attr_t2) and getattr(n, attr_t2) in common_attrs]))
for content in six.itervalues(t2_content)])
edges2.discard(())
if min_support_t2:
support_t2 = dict([
(tuple(sorted(([getattr(n, attr_t2) for n in content if hasattr(n, attr_t2) and getattr(n, attr_t2) in common_attrs]))), branch.support)
for branch, content in six.iteritems(t2_content)])
# if a support value is passed as a constraint, discard lowly supported branches from the analysis
discard_t1, discard_t2 = set(), set()
if min_support_t1 and unrooted_trees:
discard_t1 = set([p for p in edges1 if support_t1.get(p[0], support_t1.get(p[1], 999999999)) < min_support_t1])
elif min_support_t1:
discard_t1 = set([p for p in edges1 if support_t1[p] < min_support_t1])
if min_support_t2 and unrooted_trees:
discard_t2 = set([p for p in edges2 if support_t2.get(p[0], support_t2.get(p[1], 999999999)) < min_support_t2])
elif min_support_t2:
discard_t2 = set([p for p in edges2 if support_t2[p] < min_support_t2])
#rf = len(edges1 ^ edges2) - (len(discard_t1) + len(discard_t2)) - polytomy_correction # poly_corr is 0 if the flag is not enabled
#rf = len((edges1-discard_t1) ^ (edges2-discard_t2)) - polytomy_correction
# the two root edges are never counted here, as they are always
# present in both trees because of the common attr filters
rf = len(((edges1 ^ edges2) - discard_t2) - discard_t1) - polytomy_correction
if unrooted_trees:
# thought this may work, but it does not, still I don't see why
#max_parts = (len(common_attrs)*2) - 6 - len(discard_t1) - len(discard_t2)
max_parts = (len([p for p in edges1 - discard_t1 if len(p[0])>1 and len(p[1])>1]) +
len([p for p in edges2 - discard_t2 if len(p[0])>1 and len(p[1])>1]))
else:
# thought this may work, but it does not, still I don't see why
#max_parts = (len(common_attrs)*2) - 4 - len(discard_t1) - len(discard_t2)
# Otherwise we need to count the actual number of valid
# partitions in each tree -2 is to avoid counting the root
# partition of the two trees (only needed in rooted trees)
max_parts = (len([p for p in edges1 - discard_t1 if len(p)>1]) +
len([p for p in edges2 - discard_t2 if len(p)>1])) - 2
# print max_parts
if not min_comparison or min_comparison[0] > rf:
min_comparison = [rf, max_parts, common_attrs, edges1, edges2, discard_t1, discard_t2]
return min_comparison
def compare(self, ref_tree, use_collateral=False, min_support_source=0.0, min_support_ref=0.0,
has_duplications=False, expand_polytomies=False, unrooted=False,
max_treeko_splits_to_be_artifact=1000, ref_tree_attr='name', source_tree_attr='name'):
"""compare this tree with another using robinson foulds symmetric difference
and number of shared edges. Trees of different sizes and with duplicated
items allowed.
returns: a Python dictionary with results
"""
source_tree = self
def _safe_div(a, b):
if a != 0:
return a / float(b)
else: return 0.0
def _compare(src_tree, ref_tree):
# calculate partitions and rf distances
rf, maxrf, common, ref_p, src_p, ref_disc, src_disc = ref_tree.robinson_foulds(src_tree,
expand_polytomies=expand_polytomies,
unrooted_trees=unrooted,
attr_t1=ref_tree_attr,
attr_t2=source_tree_attr,
min_support_t2=min_support_source,
min_support_t1=min_support_ref)
# if trees share leaves, count their distances
if len(common) > 0 and src_p and ref_p:
if unrooted:
valid_ref_edges = set([p for p in (ref_p - ref_disc) if len(p[0])>1 and len(p[1])>0])
valid_src_edges = set([p for p in (src_p - src_disc) if len(p[0])>1 and len(p[1])>0])
common_edges = valid_ref_edges & valid_src_edges
else:
valid_ref_edges = set([p for p in (ref_p - ref_disc) if len(p)>1])
valid_src_edges = set([p for p in (src_p - src_disc) if len(p)>1])
common_edges = valid_ref_edges & valid_src_edges
else:
valid_ref_edges = set()
valid_src_edges = set()
common_edges = set()
# # % of ref edges found in tree
# ref_found.append(float(len(p2 & p1)) / reftree_edges)
# # valid edges in target, discard also leaves
# p2bis = set([p for p in (p2-d2) if len(p[0])>1 and len(p[1])>1])
# if p2bis:
# incompatible_target_branches = float(len((p2-d2) - p1))
# target_found.append(1 - (incompatible_target_branches / (len(p2-d2))))
return rf, maxrf, len(common), valid_ref_edges, valid_src_edges, common_edges
total_valid_ref_edges = len([n for n in ref_tree.traverse() if n.children and n.support > min_support_ref])
result = {}
if has_duplications:
orig_target_size = len(source_tree)
ntrees, ndups, sp_trees = source_tree.get_speciation_trees(
autodetect_duplications=True, newick_only=True,
target_attr=source_tree_attr, map_features=[source_tree_attr, "support"])
if ntrees < max_treeko_splits_to_be_artifact:
all_rf = []
ref_found = []
src_found = []
tree_sizes = []
all_max_rf = []
common_names = 0
for subtree_nw in sp_trees:
#if seedid and not use_collateral and (seedid not in subtree_nw):
# continue
subtree = source_tree.__class__(subtree_nw, sp_naming_function = source_tree._speciesFunction)
if not subtree.children:
continue
# only necessary if rf function is going to filter by support
# value. It slows downs the analysis, obviously, as it has to
# find the support for each node in the treeko tree from the
# original one.
if min_support_source > 0:
subtree_content = subtree.get_cached_content(store_attr='name')
for n in subtree.traverse():
if n.children:
n.support = source_tree.get_common_ancestor(subtree_content[n]).support
total_rf, max_rf, ncommon, valid_ref_edges, valid_src_edges, common_edges = _compare(subtree, ref_tree)
all_rf.append(total_rf)
all_max_rf.append(max_rf)
tree_sizes.append(ncommon)
if unrooted:
ref_found_in_src = len(common_edges)/float(len(valid_ref_edges)) if valid_ref_edges else None
src_found_in_ref = len(common_edges)/float(len(valid_src_edges)) if valid_src_edges else None
else:
# in rooted trees, we want to discount the root edge
# from the percentage of congruence. Otherwise we will never see a 0%
# congruence for totally different trees
ref_found_in_src = (len(common_edges)-1)/float(len(valid_ref_edges)-1) if len(valid_ref_edges)>1 else None
src_found_in_ref = (len(common_edges)-1)/float(len(valid_src_edges)-1) if len(valid_src_edges)>1 else None
if ref_found_in_src is not None:
ref_found.append(ref_found_in_src)
if src_found_in_ref is not None:
src_found.append(src_found_in_ref)
if all_rf:
# Treeko speciation distance
alld = [_safe_div(all_rf[i], float(all_max_rf[i])) for i in range(len(all_rf))]
a = sum([alld[i] * tree_sizes[i] for i in range(len(all_rf))])
b = float(sum(tree_sizes))
treeko_d = a/b if a else 0.0
result["treeko_dist"] = treeko_d
result["rf"] = utils.mean(all_rf)
result["max_rf"] = max(all_max_rf)
result["effective_tree_size"] = utils.mean(tree_sizes)
result["norm_rf"] = utils.mean([_safe_div(all_rf[i], float(all_max_rf[i])) for i in range(len(all_rf))])
result["ref_edges_in_source"] = utils.mean(ref_found)
result["source_edges_in_ref"] = utils.mean(src_found)
result["source_subtrees"] = len(all_rf)
result["common_edges"] = set()
result["source_edges"] = set()
result["ref_edges"] = set()
else:
total_rf, max_rf, ncommon, valid_ref_edges, valid_src_edges, common_edges = _compare(source_tree, ref_tree)
result["rf"] = float(total_rf) if max_rf else "NA"
result["max_rf"] = float(max_rf)
if unrooted:
result["ref_edges_in_source"] = len(common_edges)/float(len(valid_ref_edges)) if valid_ref_edges else "NA"
result["source_edges_in_ref"] = len(common_edges)/float(len(valid_src_edges)) if valid_src_edges else "NA"
else:
# in rooted trees, we want to discount the root edge from the
# percentage of congruence. Otherwise we will never see a 0%
# congruence for totally different trees
result["ref_edges_in_source"] = (len(common_edges)-1)/float(len(valid_ref_edges)-1) if len(valid_ref_edges)>1 else "NA"
result["source_edges_in_ref"] = (len(common_edges)-1)/float(len(valid_src_edges)-1) if len(valid_src_edges)>1 else "NA"
result["effective_tree_size"] = ncommon
result["norm_rf"] = total_rf/float(max_rf) if max_rf else "NA"
result["treeko_dist"] = "NA"
result["source_subtrees"] = 1
result["common_edges"] = common_edges
result["source_edges"] = valid_src_edges
result["ref_edges"] = valid_ref_edges
return result
def _diff(self, t2, output='topology', attr_t1='name', attr_t2='name', color=True):
"""
.. versionadded:: 2.3
Show or return the difference between two tree topologies.
:param [raw|table|topology|diffs|diffs_tab] output: Output type
"""
from ..tools import ete_diff
difftable = ete_diff.treediff(self, t2, attr1=attr_t1, attr2=attr_t2)
if output == "topology":
ete_diff.show_difftable_topo(difftable, attr_t1, attr_t2, usecolor=color)
elif output == "diffs":
ete_diff.show_difftable(difftable)
elif output == "diffs_tab":
ete_diff.show_difftable_tab(difftable)
elif output == 'table':
rf, rf_max, _, _, _, _, _ = self.robinson_foulds(t2, attr_t1=attr_t1, attr_t2=attr_t2)[:2]
ete_diff.show_difftable_summary(difftable, rf, rf_max)
else:
return difftable
def iter_edges(self, cached_content = None):
'''
.. versionadded:: 2.3
Iterate over the list of edges of a tree. Each edge is represented as a
tuple of two elements, each containing the list of nodes separated by
the edge.
'''
if not cached_content:
cached_content = self.get_cached_content()
all_leaves = cached_content[self]
for n, side1 in six.iteritems(cached_content):
yield (side1, all_leaves-side1)
def get_edges(self, cached_content = None):
'''
.. versionadded:: 2.3
Returns the list of edges of a tree. Each edge is represented as a
tuple of two elements, each containing the list of nodes separated by
the edge.
'''
return [edge for edge in self.iter_edges(cached_content)]
def standardize(self, delete_orphan=True, preserve_branch_length=True):
"""
.. versionadded:: 2.3
process current tree structure to produce a standardized topology: nodes
with only one child are removed and multifurcations are automatically resolved.
"""
self.resolve_polytomy()
for n in self.get_descendants():
if len(n.children) == 1:
n.delete(prevent_nondicotomic=True,
preserve_branch_length=preserve_branch_length)
def get_topology_id(self, attr="name"):
'''
.. versionadded:: 2.3
Returns the unique ID representing the topology of the current tree. Two
trees with the same topology will produce the same id. If trees are
unrooted, make sure that the root node is not binary or use the
tree.unroot() function before generating the topology id.
This is useful to detect the number of unique topologies over a bunch of
trees, without requiring full distance methods.
The id is, by default, calculated based on the terminal node's names. Any
other node attribute could be used instead.
'''
edge_keys = []
for s1, s2 in self.get_edges():
k1 = sorted([getattr(e, attr) for e in s1])
k2 = sorted([getattr(e, attr) for e in s2])
edge_keys.append(sorted([k1, k2]))
return md5(str(sorted(edge_keys)).encode('utf-8')).hexdigest()
# def get_partitions(self):
# """
# .. versionadded: 2.1
# It returns the set of all possible partitions under a
# node. Note that current implementation is quite inefficient
# when used in very large trees.
# t = Tree("((a, b), e);")
# partitions = t.get_partitions()
# # Will return:
# # a,b,e
# # a,e
# # b,e
# # a,b
# # e
# # b
# # a
# """
# all_leaves = frozenset(self.get_leaf_names())
# all_partitions = set([all_leaves])
# for n in self.iter_descendants():
# p1 = frozenset(n.get_leaf_names())
# p2 = frozenset(all_leaves - p1)
# all_partitions.add(p1)
# all_partitions.add(p2)
# return all_partitions
def convert_to_ultrametric(self, tree_length=None, strategy='balanced'):
"""
.. versionadded: 2.1
Converts a tree into ultrametric topology (all leaves must have
the same distance to root). Note that, for visual inspection
of ultrametric trees, node.img_style["size"] should be set to
0.
"""
# Could something like this replace the old algorithm?
#most_distant_leaf, tree_length = self.get_farthest_leaf()
#for leaf in self:
# d = leaf.get_distance(self)
# leaf.dist += (tree_length - d)
#return
# pre-calculate how many splits remain under each node
node2max_depth = {}
for node in self.traverse("postorder"):
if not node.is_leaf():
max_depth = max([node2max_depth[c] for c in node.children]) + 1
node2max_depth[node] = max_depth
else:
node2max_depth[node] = 1
node2dist = {self: 0.0}
if not tree_length:
most_distant_leaf, tree_length = self.get_farthest_leaf()
else:
tree_length = float(tree_length)
step = tree_length / node2max_depth[self]
for node in self.iter_descendants("levelorder"):
if strategy == "balanced":
node.dist = (tree_length - node2dist[node.up]) / node2max_depth[node]
node2dist[node] = node.dist + node2dist[node.up]
elif strategy == "fixed":
if not node.is_leaf():
node.dist = step
else:
node.dist = tree_length - ((node2dist[node.up]) * step)
node2dist[node] = node2dist[node.up] + 1
node.dist = node.dist
def check_monophyly(self, values, target_attr, ignore_missing=False,
unrooted=False):
"""
.. versionadded: 2.2
Returns True if a given target attribute is monophyletic under
this node for the provided set of values.
If not all values are represented in the current tree
structure, a ValueError exception will be raised to warn that
strict monophyly could never be reached (this behaviour can be
avoided by enabling the `ignore_missing` flag.
:param values: a set of values for which monophyly is
expected.
:param target_attr: node attribute being used to check
monophyly (i.e. species for species trees, names for gene
family trees, or any custom feature present in the tree).
:param False ignore_missing: Avoid raising an Exception when
missing attributes are found.
.. versionchanged: 2.3
:param False unrooted: If True, tree will be treated as unrooted, thus
allowing to find monophyly even when current outgroup is splitting a
monophyletic group.
:returns: the following tuple
IsMonophyletic (boolean),
clade type ('monophyletic', 'paraphyletic' or 'polyphyletic'),
leaves breaking the monophyly (set)
"""
if type(values) != set:
values = set(values)
# This is the only time I traverse the tree, then I use cached
# leaf content
n2leaves = self.get_cached_content()
# Raise an error if requested attribute values are not even present
if ignore_missing:
found_values = set([getattr(n, target_attr) for n in n2leaves[self]])
missing_values = values - found_values
values = values & found_values
# Locate leaves matching requested attribute values
targets = set([leaf for leaf in n2leaves[self]
if getattr(leaf, target_attr) in values])
if not ignore_missing:
if values - set([getattr(leaf, target_attr) for leaf in targets]):
raise ValueError('The monophyly of the provided values could never be reached, as not all of them exist in the tree.'
' Please check your target attribute and values, or set the ignore_missing flag to True')
if unrooted:
smallest = None
for side1, side2 in self.iter_edges(cached_content=n2leaves):
if targets.issubset(side1) and (not smallest or len(side1) < len(smallest)):
smallest = side1
elif targets.issubset(side2) and (not smallest or len(side2) < len(smallest)):
smallest = side2
if smallest is not None and len(smallest) == len(targets):
break
foreign_leaves = smallest - targets
else:
# Check monophyly with get_common_ancestor. Note that this
# step does not require traversing the tree again because
# targets are node instances instead of node names, and
# get_common_ancestor function is smart enough to detect it
# and avoid unnecessary traversing.
common = self.get_common_ancestor(targets)
observed = n2leaves[common]
foreign_leaves = set([leaf for leaf in observed
if getattr(leaf, target_attr) not in values])
if not foreign_leaves:
return True, "monophyletic", foreign_leaves
else:
# if the requested attribute is not monophyletic in this
# node, let's differentiate between poly and paraphyly.
poly_common = self.get_common_ancestor(foreign_leaves)
# if the common ancestor of all foreign leaves is self
# contained, we have a paraphyly. Otherwise, polyphyly.
polyphyletic = [leaf for leaf in poly_common if
getattr(leaf, target_attr) in values]
if polyphyletic:
return False, "polyphyletic", foreign_leaves
else:
return False, "paraphyletic", foreign_leaves
def get_monophyletic(self, values, target_attr):
"""
.. versionadded:: 2.2
Returns a list of nodes matching the provided monophyly
criteria. For a node to be considered a match, all
`target_attr` values within and node, and exclusively them,
should be grouped.
:param values: a set of values for which monophyly is
expected.
:param target_attr: node attribute being used to check
monophyly (i.e. species for species trees, names for gene
family trees).
"""
if type(values) != set:
values = set(values)
n2values = self.get_cached_content(store_attr=target_attr)
is_monophyletic = lambda node: n2values[node] == values
for match in self.iter_leaves(is_leaf_fn=is_monophyletic):
if is_monophyletic(match):
yield match
def expand_polytomies(self, map_attr="name", polytomy_size_limit=5,
skip_large_polytomies=False):
'''
.. versionadded:: 2.3
Given a tree with one or more polytomies, this functions returns the
list of all trees (in newick format) resulting from the combination of
all possible solutions of the multifurcated nodes.
.. warning:
Please note that the number of of possible binary trees grows
exponentially with the number and size of polytomies. Using this
function with large multifurcations is not feasible:
polytomy size: 3 number of binary trees: 3
polytomy size: 4 number of binary trees: 15
polytomy size: 5 number of binary trees: 105
polytomy size: 6 number of binary trees: 945
polytomy size: 7 number of binary trees: 10395
polytomy size: 8 number of binary trees: 135135
polytomy size: 9 number of binary trees: 2027025
http://ajmonline.org/2010/darwin.php
'''
class TipTuple(tuple):
pass
def add_leaf(tree, label):
yield (label, tree)
if not isinstance(tree, TipTuple) and isinstance(tree, tuple):
for left in add_leaf(tree[0], label):
yield (left, tree[1])
for right in add_leaf(tree[1], label):
yield (tree[0], right)
def enum_unordered(labels):
if len(labels) == 1:
yield labels[0]
else:
for tree in enum_unordered(labels[1:]):
for new_tree in add_leaf(tree, labels[0]):
yield new_tree
n2subtrees = {}
for n in self.traverse("postorder"):
if n.is_leaf():
subtrees = [getattr(n, map_attr)]
else:
subtrees = []
if len(n.children) > polytomy_size_limit:
if skip_large_polytomies:
for childtrees in itertools.product(*[n2subtrees[ch] for ch in n.children]):
subtrees.append(TipTuple(childtrees))
else:
raise TreeError("Found polytomy larger than current limit: %s" %n)
else:
for childtrees in itertools.product(*[n2subtrees[ch] for ch in n.children]):
subtrees.extend([TipTuple(subtree) for subtree in enum_unordered(childtrees)])
n2subtrees[n] = subtrees
return ["%s;"%str(nw) for nw in n2subtrees[self]] # tuples are in newick format ^_^
def resolve_polytomy(self, default_dist=0.0, default_support=0.0,
recursive=True):
"""
.. versionadded: 2.2
Resolve all polytomies under current node by creating an
arbitrary dicotomic structure among the affected nodes. This
function randomly modifies current tree topology and should
only be used for compatibility reasons (i.e. programs
rejecting multifurcated node in the newick representation).
:param 0.0 default_dist: artificial branch distance of new
nodes.
:param 0.0 default_support: artificial branch support of new
nodes.
:param True recursive: Resolve any polytomy under this
node. When False, only current node will be checked and fixed.
"""
def _resolve(node):
if len(node.children) > 2:
children = list(node.children)
node.children = []
next_node = root = node
for i in range(len(children)-2):
next_node = next_node.add_child()
next_node.dist = default_dist
next_node.support = default_support
next_node = root
for ch in children:
next_node.add_child(ch)
if ch != children[-2]:
next_node = next_node.children[0]
target = [self]
if recursive:
target.extend([n for n in self.get_descendants()])
for n in target:
_resolve(n)
def cophenetic_matrix(self):
"""
.. versionadded: 3.1.1
Generate a cophenetic distance matrix of the treee to standard output
The `cophenetic matrix <https://en.wikipedia.org/wiki/Cophenetic>` is a matrix representation of the
distance between each node.
if we have a tree like
----A
_____________|y
| |
| ----B
________|z
| ----C
| |
|____________|x -----D
| |
|______|w
|
|
-----E
Where w,x,y,z are internal nodes.
d(A,B) = d(y,A) + d(y,B)
and
d(A, E) = d(z,A) + d(z, E) = {d(z,y) + d(y,A)} + {d(z,x) + d(x,w) + d(w,E)}
We use an idea inspired by the ete3 team: https://gist.github.com/jhcepas/279f9009f46bf675e3a890c19191158b :
For each node find its path to the root.
e.g.
A -> A, y, z
E -> E, w, x,z
and make these orderless sets. Then we XOR the two sets to only find the elements
that are in one or other sets but not both. In this case A, E, y, x, w.
The distance between the two nodes is the sum of the distances from each of those nodes
to the parent
One more optimization: since the distances are symmetric, and distance to itself is zero
we user itertools.combinations rather than itertools.permutations. This cuts our computes from theta(n^2)
1/2n^2 - n (= O(n^2), which is still not great, but in reality speeds things up for large trees).
For this tree, we will return the two dimensional array:
A B C D E
A 0 d(A-y) + d(B-y) d(A-z) + d(C-z) d(A-z) + d(D-z) d(A-z) + d(E-z)
B d(B-y) + d(A-y) 0 d(B-z) + d(C-z) d(B-z) + d(D-z) d(B-z) + d(E-z)
C d(C-z) + d(A-z) d(C-z) + d(B-z) 0 d(C-x) + d(D-x) d(C-x) + d(E-x)
D d(D-z) + d(A-z) d(D-z) + d(B-z) d(D-x) + d(C-x) 0 d(D-w) + d(E-w)
E d(E-z) + d(A-z) d(E-z) + d(B-z) d(E-x) + d(C-x) d(E-w) + d(D-w) 0
We will also return the one dimensional array with the leaves in the order in which they appear in the matrix
(i.e. the column and/or row headers).
:param filename: the optional file to write to. If not provided, output will be to standard output
:return: two-dimensional array and a one dimensional array
"""
leaves = self.get_leaves()
paths = {x: set() for x in leaves}
# get the paths going up the tree
# we get all the nodes up to the last one and store them in a set
for n in leaves:
if n.is_root():
continue
movingnode = n
while not movingnode.is_root():
paths[n].add(movingnode)
movingnode = movingnode.up
# now we want to get all pairs of nodes using itertools combinations. We need AB AC etc but don't need BA CA
leaf_distances = {x.name: {} for x in leaves}
for (leaf1, leaf2) in itertools.combinations(leaves, 2):
# figure out the unique nodes in the path
uniquenodes = paths[leaf1] ^ paths[leaf2]
distance = sum(x.dist for x in uniquenodes)
leaf_distances[leaf1.name][leaf2.name] = leaf_distances[leaf2.name][leaf1.name] = distance
allleaves = sorted(leaf_distances.keys()) # the leaves in order that we will return
output = [] # the two dimensional array that we will return
for i, n in enumerate(allleaves):
output.append([])
for m in allleaves:
if m == n:
output[i].append(0) # distance to ourself = 0
else:
output[i].append(leaf_distances[n][m])
return output, allleaves
def add_face(self, face, column, position="branch-right"):
"""
.. versionadded: 2.1
Add a fixed face to the node. This type of faces will be
always attached to nodes, independently of the layout
function.
:argument face: a Face or inherited instance
:argument column: An integer number starting from 0
:argument "branch-right" position: Posible values are:
"branch-right", "branch-top", "branch-bottom", "float",
"aligned"
"""
if not hasattr(self, "_faces"):
self._faces = _FaceAreas()
if position not in FACE_POSITIONS:
raise ValueError("face position not in %s" %FACE_POSITIONS)
if isinstance(face, Face):
getattr(self._faces, position).add_face(face, column=column)
else:
raise ValueError("not a Face instance")
def set_style(self, node_style):
"""
.. versionadded: 2.1
Set 'node_style' as the fixed style for the current node.
"""
if TREEVIEW:
if node_style is None:
node_style = NodeStyle()
if type(node_style) is NodeStyle:
self._img_style = node_style
else:
raise ValueError("Treeview module is disabled")
@staticmethod
def from_parent_child_table(parent_child_table):
"""Converts a parent-child table into an ETE Tree instance.
:argument parent_child_table: a list of tuples containing parent-child
relationships. For example: [("A", "B", 0.1), ("A", "C", 0.2), ("C",
"D", 1), ("C", "E", 1.5)]. Where each tuple represents: [parent, child,
child-parent-dist]
:returns: A new Tree instance
:example:
>>> tree = Tree.from_parent_child_table([("A", "B", 0.1), ("A", "C", 0.2), ("C", "D", 1), ("C", "E", 1.5)])
>>> print tree
"""
def get_node(nodename, dist=None):
if nodename not in nodes_by_name:
nodes_by_name[nodename] = Tree(name=nodename, dist=dist)
node = nodes_by_name[nodename]
if dist is not None:
node.dist = dist
node.name = nodename
return nodes_by_name[nodename]
nodes_by_name = {}
for columns in parent_child_table:
if len(columns) == 3:
parent_name, child_name, distance = columns
dist = float(distance)
else:
parent_name, child_name = columns
dist = None
parent = get_node(parent_name)
parent.add_child(get_node(child_name, dist=dist))
root = parent.get_tree_root()
return root
@staticmethod
def from_skbio(skbio_tree, map_attributes=None):
"""Converts a scikit-bio TreeNode object into ETE Tree object.
:argument skbio_tree: a scikit bio TreeNode instance
:argument None map_attributes: A list of attribute nanes in the
scikit-bio tree that should be mapped into the ETE tree
instance. (name, id and branch length are always mapped)
:returns: A new Tree instance
:example:
>>> tree = Tree.from_skibio(skbioTree, map_attributes=["value"])
"""
from skbio import TreeNode as skbioTreeNode
def get_ete_node(skbio_node):
ete_node = all_nodes.get(skbio_node, Tree())
if skbio_node.length is not None:
ete_node.dist = float(skbio_node.length)
ete_node.name = skbio_node.name
ete_node.add_features(id=skbio_node.id)
if map_attributes:
for a in map_attributes:
ete_node.add_feature(a, getattr(skbio_node, a, None))
return ete_node
all_nodes = {}
if isinstance(skbio_tree, skbioTreeNode):
for node in skbio_tree.preorder(include_self=True):
all_nodes[node] = get_ete_node(node)
ete_node = all_nodes[node]
for ch in node.children:
ete_ch = get_ete_node(ch)
ete_node.add_child(ete_ch)
all_nodes[ch] = ete_ch
return ete_ch.get_tree_root()
def phonehome(self):
from .. import _ph
_ph.call()
|
(newick=None, format=0, dist=None, support=None, name=None, quoted_node_names=False)
|
721,296 |
ete3.coretype.tree
|
__init__
| null |
def __init__(self, newick=None, format=0, dist=None, support=None,
name=None, quoted_node_names=False):
self._children = []
self._up = None
self._dist = DEFAULT_DIST
self._support = DEFAULT_SUPPORT
self._img_style = None
self.features = set([])
# Add basic features
self.features.update(["dist", "support", "name"])
if dist is not None:
self.dist = dist
if support is not None:
self.support = support
self.name = name if name is not None else DEFAULT_NAME
# Initialize tree
if newick is not None:
self._dist = 0.0
read_newick(newick, root_node = self, format=format,
quoted_names=quoted_node_names)
|
(self, newick=None, format=0, dist=None, support=None, name=None, quoted_node_names=False)
|
721,300 |
ete3.coretype.tree
|
__repr__
| null |
def __repr__(self):
return "Tree node '%s' (%s)" %(self.name, hex(self.__hash__()))
|
(self)
|
721,477 |
ete3.webplugin.webapp
|
WebTreeApplication
|
Provides a basic WSGI application object which can handle ETE
tree visualization and interactions. Please, see the
webplugin example provided with the ETE installation package
(http://pypi.python.org/pypi/ete3).
|
class WebTreeApplication(object):
""" Provides a basic WSGI application object which can handle ETE
tree visualization and interactions. Please, see the
webplugin example provided with the ETE installation package
(http://pypi.python.org/pypi/ete3)."""
def __init__(self):
# Redirects normal output msgs to stderr, since stdout in web
# application is for the browser
sys.stdout = sys.stderr
self.TreeConstructor = None
self.NODE_TARGET_ACTIONS = ["node", "face"]
self.TREE_TARGET_ACTIONS = ["layout", "search"]
self.actions = []
self._layout = None
self._tree_style = None
self._width = None
self._height = None
self._size_units = "px"
self._custom_tree_renderer = None
self._treeid2layout = {}
self._external_app_handler = None
self._treeid2tree = {}
self._treeid2cache = {}
self._treeid2index = {}
self.queries = {}
self.CONFIG = {
"temp_dir":"/var/www/webplugin/",
"temp_url":"http://localhost/webplugin/tmp",
"DISPLAY" :":0" # Used by ete to render images
}
def set_tree_size(self, w, h, units="px"):
""" Fix the size of tree image """
self._width = w
self._height = h
self._size_units = units
def set_external_app_handler(self, handler):
""" Sets a custom function that will extend current WSGI
application."""
self._external_app_handler = handler
def set_external_tree_renderer(self, handler):
""" If the tree needs to be processed every time is going to
be drawn, the task can be delegated. """
self._custom_tree_renderer = handler
def register_action(self, name, target, handler, checker, html_generator):
""" Adds a new web interactive function associated to tree
nodes. """
self.actions.append([name, target, handler, checker, html_generator])
def set_tree_loader(self, TreeConstructor):
""" Delegate tree constructor. It allows to customize the Tree
class used to create new tree instances. """
self._tree = TreeConstructor
def set_default_layout_fn(self, layout_fn):
""" Fix the layout function used to render the tree. """
self._layout = layout_fn
def set_tree_style(self, handler):
""" Fix a :class:`TreeStyle` instance to render tree images. """
self._tree_style = handler
def _get_html_map(self, img_map, treeid, mapid, tree):
# Scans for node-enabled actions.
nid2actions = {}
nid2face_actions = {}
for n in tree.traverse():
for aindex, (action, target, handler, checker, html_generator) in enumerate(self.actions):
if target == "node" and (not checker or checker(n)):
nid2actions.setdefault(int(n._nid), []).append(aindex)
elif target == "face" and (not checker or checker(n)):
nid2face_actions.setdefault(int(n._nid), []).append(aindex)
html_map = '<MAP NAME="%s" class="ete_tree_img">' %(mapid)
if img_map["nodes"]:
for x1, y1, x2, y2, nodeid, text in img_map["nodes"]:
text = "" if not text else text
area = img_map["node_areas"].get(int(nodeid), [0,0,0,0])
html_map += """ <AREA SHAPE="rect" COORDS="%s,%s,%s,%s" onMouseOut='unhighlight_node();' onMouseOver='highlight_node("#%s", "%s", %s, %s, %s, %s);' onClick='show_context_menu("%s", "%s", "%s");' href="javascript:void('%s');">""" %\
(int(x1), int(y1), int(x2), int(y2),
treeid, text, area[0], area[1], area[2]-area[0], area[3]-area[1],
treeid, nodeid, ','.join(map(str, nid2actions.get(nodeid,[]))), str(nodeid) )
if img_map["faces"]:
for x1, y1, x2, y2, nodeid, text in img_map["faces"]:
text = "" if not text else text
area = img_map["node_areas"].get(int(nodeid), [0,0,0,0])
html_map += """ <AREA SHAPE="rect" COORDS="%s,%s,%s,%s" onMouseOut='unhighlight_node(); hide_face_popup();' onMouseOver='highlight_node("#%s", "%s", %s, %s, %s, %s); show_face_popup("%s", "%s", "%s", "%s");' onClick='show_context_menu("%s", "%s", "%s", "%s");' href="javascript:void('%s');">""" %\
(int(x1),int(y1),int(x2),int(y2),
treeid, text, area[0], area[1], area[2]-area[0], area[3]-area[1],
treeid, nodeid, ','.join(map(str, nid2actions.get(nodeid,[])+nid2face_actions.get(nodeid,[]) )), text,
treeid, nodeid, ','.join(map(str, nid2actions.get(nodeid,[])+nid2face_actions.get(nodeid,[]) )), text,
text,
)
html_map += '</MAP>'
return html_map
def _load_tree(self, treeid, tree=None, cache_file=None):
# if a tree is given, it overwrites previous versions
if tree and isinstance(tree, str):
tree = self._tree(tree)
self._treeid2tree[treeid] = tree
self._load_tree_index(treeid)
elif tree:
self._treeid2tree[treeid] = tree
self._load_tree_index(treeid)
self._treeid2cache[treeid] = cache_file if cache_file else "%s.pkl" %treeid
# if no tree is given, and not in memmory, it tries to loaded
# from previous sessions
if treeid not in self._treeid2tree:
self._load_tree_from_path(self._treeid2cache[treeid])
# Returns True if tree and indexes are loaded
return (treeid in self._treeid2tree) and (treeid in self._treeid2index)
def _load_tree_from_path(self, pkl_path):
tree_path = os.path.join(self.CONFIG["temp_dir"], pkl_path)
if os.path.exists(tree_path):
print(six.moves.cPickle.load(open(tree_path)))
t = self._treeid2tree[treeid] = six.moves.cPickle.load(open(tree_path))
self._load_tree_index(treeid)
return True
else:
return False
def _load_tree_index(self, treeid):
if not self._treeid2index.get(treeid, {}):
tree_index = self._treeid2index[treeid] = {}
t = self._treeid2tree[treeid]
for n in t.traverse():
if hasattr(n, "_nid"):
tree_index[str(n._nid)] = n
return True
else:
return False
def _dump_tree_to_file(self, t, treeid):
tree_path = os.path.join(self.CONFIG["temp_dir"], treeid+".pkl")
six.moves.cPickle.dump(t, open(tree_path, "wb"))
#open(tree_path, "w").write(t.write(features=[]))
def _get_tree_img(self, treeid, pre_drawing_action=None):
img_url = os.path.join(self.CONFIG["temp_url"], treeid+".png?"+str(time.time()))
img_path = os.path.join(self.CONFIG["temp_dir"], treeid+".png")
t = self._treeid2tree[treeid]
tree_index = self._treeid2index[treeid]
if pre_drawing_action:
atype, handler, arguments = pre_drawing_action
if atype in set(["node", "face"]) and len(arguments)==1 and handler:
nid = arguments[0]
node = tree_index.get(str(nid), None)
handler(node)
elif atype == "tree":
handler(t, arguments[0])
elif atype == "search":
handler(t, arguments[0])
elif atype == "layout":
self._treeid2layout[treeid] = handler
layout_fn = self._treeid2layout.get(treeid, self._layout)
mapid = "img_map_"+str(time.time())
img_map = _render_tree(t, img_path, self.CONFIG["DISPLAY"], layout = layout_fn,
tree_style = self._tree_style,
w=self._width,
h=self._height,
units=self._size_units)
html_map = self._get_html_map(img_map, treeid, mapid, t)
for n in t.traverse():
self._treeid2index[treeid][str(n._nid)]=n
if hasattr(n, "_QtItem_"):
n._QtItem_ = None
delattr(n, "_QtItem_")
tree_actions = []
for aindex, (action, target, handler, checker, html_generator) in enumerate(self.actions):
if target in self.TREE_TARGET_ACTIONS and (not checker or checker(t)):
tree_actions.append(aindex)
try:
version_tag = __version__
except NameError:
version_tag = "ete3"
self._dump_tree_to_file(t, treeid)
ete_publi = '<div style="margin:0px;padding:0px;text-align:left;"><a href="http://etetoolkit.org" style="font-size:7pt;" target="_blank" >%s</a></div>' %\
(version_tag)
img_html = """<img id="%s" class="ete_tree_img" src="%s" USEMAP="#%s" onLoad='javascript:bind_popup();' onclick='javascript:show_context_menu("%s", "", "%s");' >""" %\
(treeid, img_url, mapid, treeid, ','.join(map(str, tree_actions)))
tree_div_id = "ETE_tree_"+str(treeid)
return html_map+ '<div id="%s" >'%tree_div_id + img_html + ete_publi + "</div>"
# WSGI web application
def __call__(self, environ, start_response):
""" This function is executed when the application is called
by the WSGI apache module. It is, therefore, in charge of
answering web requests."""
path = environ['PATH_INFO'].split("/")
start_response('202 OK', [('content-type', 'text/plain')])
if environ['REQUEST_METHOD'].upper() == 'GET' and environ['QUERY_STRING']:
self.queries = cgi.parse_qs(environ['QUERY_STRING'])
elif environ['REQUEST_METHOD'].upper() == 'POST' and environ['wsgi.input']:
self.queries = cgi.parse_qs(environ['wsgi.input'].read())
else:
self.queries = {}
method = path[1]
treeid = self.queries.get("treeid", [None])[0]
nodeid = self.queries.get("nid", [None])[0]
textface = self.queries.get("textface", [None])[0]
actions = self.queries.get("show_actions", [None])[0]
tree = self.queries.get("tree", [None])[0]
search_term = self.queries.get("search_term", [None])[0]
aindex = self.queries.get("aindex", [None])[0]
if method == "draw":
# if not treeid is given, generate one
if not treeid:
treeid = md5(str(time.time())).hexdigest()
if not self._load_tree(treeid, tree):
return "draw: Cannot load the tree: %s" %treeid
if self._custom_tree_renderer:
t = self._treeid2tree[treeid]
return self._custom_tree_renderer(t, treeid, self)
elif t and treeid:
return self._get_tree_img(treeid=treeid)
else:
return "No tree to draw"
elif method == "get_menu":
if not self._load_tree(treeid):
return "get_menu: Cannot load the tree: %s" %treeid
if nodeid:
tree_index = self._treeid2index[treeid]
node = tree_index[nodeid]
else:
node = None
if textface:
header = str(textface).strip()
else:
header = "Menu"
html = """<div id="ete_popup_header"><span id="ete_popup_header_text">%s</span><div id="ete_close_popup" onClick='hide_popup();'></div></div><ul>""" %\
(header)
for i in map(int, actions.split(",")):
aname, target, handler, checker, html_generator = self.actions[i]
if html_generator:
html += html_generator(i, treeid, nodeid, textface, node)
else:
html += """<li><a href='javascript:void(0);' onClick='hide_popup(); run_action("%s", "%s", "%s");'> %s </a></li> """ %\
(treeid, nodeid, i, aname)
html += '</ul>'
return html
elif method == "action":
if not self._load_tree(treeid):
return "action: Cannot load the tree: %s" %treeid
if aindex is None:
# just refresh tree
return self._get_tree_img(treeid=treeid)
else:
aname, target, handler, checker, html_generator = self.actions[int(aindex)]
if target in set(["node", "face", "layout"]):
return self._get_tree_img(treeid=treeid, pre_drawing_action=[target, handler, [nodeid]])
elif target in set(["search"]):
return self._get_tree_img(treeid=treeid, pre_drawing_action=[target, handler, [search_term]])
elif target in set(["refresh"]):
return self._get_tree_img(treeid=treeid)
return "Bad guy"
elif self._external_app_handler:
return self._external_app_handler(environ, start_response, self.queries)
else:
return '\n'.join(map(str, list(environ.items()))) + str(self.queries) + '\t\n'.join(environ['wsgi.input'])
|
()
|
721,478 |
ete3.webplugin.webapp
|
__call__
|
This function is executed when the application is called
by the WSGI apache module. It is, therefore, in charge of
answering web requests.
|
def __call__(self, environ, start_response):
""" This function is executed when the application is called
by the WSGI apache module. It is, therefore, in charge of
answering web requests."""
path = environ['PATH_INFO'].split("/")
start_response('202 OK', [('content-type', 'text/plain')])
if environ['REQUEST_METHOD'].upper() == 'GET' and environ['QUERY_STRING']:
self.queries = cgi.parse_qs(environ['QUERY_STRING'])
elif environ['REQUEST_METHOD'].upper() == 'POST' and environ['wsgi.input']:
self.queries = cgi.parse_qs(environ['wsgi.input'].read())
else:
self.queries = {}
method = path[1]
treeid = self.queries.get("treeid", [None])[0]
nodeid = self.queries.get("nid", [None])[0]
textface = self.queries.get("textface", [None])[0]
actions = self.queries.get("show_actions", [None])[0]
tree = self.queries.get("tree", [None])[0]
search_term = self.queries.get("search_term", [None])[0]
aindex = self.queries.get("aindex", [None])[0]
if method == "draw":
# if not treeid is given, generate one
if not treeid:
treeid = md5(str(time.time())).hexdigest()
if not self._load_tree(treeid, tree):
return "draw: Cannot load the tree: %s" %treeid
if self._custom_tree_renderer:
t = self._treeid2tree[treeid]
return self._custom_tree_renderer(t, treeid, self)
elif t and treeid:
return self._get_tree_img(treeid=treeid)
else:
return "No tree to draw"
elif method == "get_menu":
if not self._load_tree(treeid):
return "get_menu: Cannot load the tree: %s" %treeid
if nodeid:
tree_index = self._treeid2index[treeid]
node = tree_index[nodeid]
else:
node = None
if textface:
header = str(textface).strip()
else:
header = "Menu"
html = """<div id="ete_popup_header"><span id="ete_popup_header_text">%s</span><div id="ete_close_popup" onClick='hide_popup();'></div></div><ul>""" %\
(header)
for i in map(int, actions.split(",")):
aname, target, handler, checker, html_generator = self.actions[i]
if html_generator:
html += html_generator(i, treeid, nodeid, textface, node)
else:
html += """<li><a href='javascript:void(0);' onClick='hide_popup(); run_action("%s", "%s", "%s");'> %s </a></li> """ %\
(treeid, nodeid, i, aname)
html += '</ul>'
return html
elif method == "action":
if not self._load_tree(treeid):
return "action: Cannot load the tree: %s" %treeid
if aindex is None:
# just refresh tree
return self._get_tree_img(treeid=treeid)
else:
aname, target, handler, checker, html_generator = self.actions[int(aindex)]
if target in set(["node", "face", "layout"]):
return self._get_tree_img(treeid=treeid, pre_drawing_action=[target, handler, [nodeid]])
elif target in set(["search"]):
return self._get_tree_img(treeid=treeid, pre_drawing_action=[target, handler, [search_term]])
elif target in set(["refresh"]):
return self._get_tree_img(treeid=treeid)
return "Bad guy"
elif self._external_app_handler:
return self._external_app_handler(environ, start_response, self.queries)
else:
return '\n'.join(map(str, list(environ.items()))) + str(self.queries) + '\t\n'.join(environ['wsgi.input'])
|
(self, environ, start_response)
|
721,479 |
ete3.webplugin.webapp
|
__init__
| null |
def __init__(self):
# Redirects normal output msgs to stderr, since stdout in web
# application is for the browser
sys.stdout = sys.stderr
self.TreeConstructor = None
self.NODE_TARGET_ACTIONS = ["node", "face"]
self.TREE_TARGET_ACTIONS = ["layout", "search"]
self.actions = []
self._layout = None
self._tree_style = None
self._width = None
self._height = None
self._size_units = "px"
self._custom_tree_renderer = None
self._treeid2layout = {}
self._external_app_handler = None
self._treeid2tree = {}
self._treeid2cache = {}
self._treeid2index = {}
self.queries = {}
self.CONFIG = {
"temp_dir":"/var/www/webplugin/",
"temp_url":"http://localhost/webplugin/tmp",
"DISPLAY" :":0" # Used by ete to render images
}
|
(self)
|
721,480 |
ete3.webplugin.webapp
|
_dump_tree_to_file
| null |
def _dump_tree_to_file(self, t, treeid):
tree_path = os.path.join(self.CONFIG["temp_dir"], treeid+".pkl")
six.moves.cPickle.dump(t, open(tree_path, "wb"))
#open(tree_path, "w").write(t.write(features=[]))
|
(self, t, treeid)
|
721,481 |
ete3.webplugin.webapp
|
_get_html_map
| null |
def _get_html_map(self, img_map, treeid, mapid, tree):
# Scans for node-enabled actions.
nid2actions = {}
nid2face_actions = {}
for n in tree.traverse():
for aindex, (action, target, handler, checker, html_generator) in enumerate(self.actions):
if target == "node" and (not checker or checker(n)):
nid2actions.setdefault(int(n._nid), []).append(aindex)
elif target == "face" and (not checker or checker(n)):
nid2face_actions.setdefault(int(n._nid), []).append(aindex)
html_map = '<MAP NAME="%s" class="ete_tree_img">' %(mapid)
if img_map["nodes"]:
for x1, y1, x2, y2, nodeid, text in img_map["nodes"]:
text = "" if not text else text
area = img_map["node_areas"].get(int(nodeid), [0,0,0,0])
html_map += """ <AREA SHAPE="rect" COORDS="%s,%s,%s,%s" onMouseOut='unhighlight_node();' onMouseOver='highlight_node("#%s", "%s", %s, %s, %s, %s);' onClick='show_context_menu("%s", "%s", "%s");' href="javascript:void('%s');">""" %\
(int(x1), int(y1), int(x2), int(y2),
treeid, text, area[0], area[1], area[2]-area[0], area[3]-area[1],
treeid, nodeid, ','.join(map(str, nid2actions.get(nodeid,[]))), str(nodeid) )
if img_map["faces"]:
for x1, y1, x2, y2, nodeid, text in img_map["faces"]:
text = "" if not text else text
area = img_map["node_areas"].get(int(nodeid), [0,0,0,0])
html_map += """ <AREA SHAPE="rect" COORDS="%s,%s,%s,%s" onMouseOut='unhighlight_node(); hide_face_popup();' onMouseOver='highlight_node("#%s", "%s", %s, %s, %s, %s); show_face_popup("%s", "%s", "%s", "%s");' onClick='show_context_menu("%s", "%s", "%s", "%s");' href="javascript:void('%s');">""" %\
(int(x1),int(y1),int(x2),int(y2),
treeid, text, area[0], area[1], area[2]-area[0], area[3]-area[1],
treeid, nodeid, ','.join(map(str, nid2actions.get(nodeid,[])+nid2face_actions.get(nodeid,[]) )), text,
treeid, nodeid, ','.join(map(str, nid2actions.get(nodeid,[])+nid2face_actions.get(nodeid,[]) )), text,
text,
)
html_map += '</MAP>'
return html_map
|
(self, img_map, treeid, mapid, tree)
|
721,482 |
ete3.webplugin.webapp
|
_get_tree_img
| null |
def _get_tree_img(self, treeid, pre_drawing_action=None):
img_url = os.path.join(self.CONFIG["temp_url"], treeid+".png?"+str(time.time()))
img_path = os.path.join(self.CONFIG["temp_dir"], treeid+".png")
t = self._treeid2tree[treeid]
tree_index = self._treeid2index[treeid]
if pre_drawing_action:
atype, handler, arguments = pre_drawing_action
if atype in set(["node", "face"]) and len(arguments)==1 and handler:
nid = arguments[0]
node = tree_index.get(str(nid), None)
handler(node)
elif atype == "tree":
handler(t, arguments[0])
elif atype == "search":
handler(t, arguments[0])
elif atype == "layout":
self._treeid2layout[treeid] = handler
layout_fn = self._treeid2layout.get(treeid, self._layout)
mapid = "img_map_"+str(time.time())
img_map = _render_tree(t, img_path, self.CONFIG["DISPLAY"], layout = layout_fn,
tree_style = self._tree_style,
w=self._width,
h=self._height,
units=self._size_units)
html_map = self._get_html_map(img_map, treeid, mapid, t)
for n in t.traverse():
self._treeid2index[treeid][str(n._nid)]=n
if hasattr(n, "_QtItem_"):
n._QtItem_ = None
delattr(n, "_QtItem_")
tree_actions = []
for aindex, (action, target, handler, checker, html_generator) in enumerate(self.actions):
if target in self.TREE_TARGET_ACTIONS and (not checker or checker(t)):
tree_actions.append(aindex)
try:
version_tag = __version__
except NameError:
version_tag = "ete3"
self._dump_tree_to_file(t, treeid)
ete_publi = '<div style="margin:0px;padding:0px;text-align:left;"><a href="http://etetoolkit.org" style="font-size:7pt;" target="_blank" >%s</a></div>' %\
(version_tag)
img_html = """<img id="%s" class="ete_tree_img" src="%s" USEMAP="#%s" onLoad='javascript:bind_popup();' onclick='javascript:show_context_menu("%s", "", "%s");' >""" %\
(treeid, img_url, mapid, treeid, ','.join(map(str, tree_actions)))
tree_div_id = "ETE_tree_"+str(treeid)
return html_map+ '<div id="%s" >'%tree_div_id + img_html + ete_publi + "</div>"
|
(self, treeid, pre_drawing_action=None)
|
721,483 |
ete3.webplugin.webapp
|
_load_tree
| null |
def _load_tree(self, treeid, tree=None, cache_file=None):
# if a tree is given, it overwrites previous versions
if tree and isinstance(tree, str):
tree = self._tree(tree)
self._treeid2tree[treeid] = tree
self._load_tree_index(treeid)
elif tree:
self._treeid2tree[treeid] = tree
self._load_tree_index(treeid)
self._treeid2cache[treeid] = cache_file if cache_file else "%s.pkl" %treeid
# if no tree is given, and not in memmory, it tries to loaded
# from previous sessions
if treeid not in self._treeid2tree:
self._load_tree_from_path(self._treeid2cache[treeid])
# Returns True if tree and indexes are loaded
return (treeid in self._treeid2tree) and (treeid in self._treeid2index)
|
(self, treeid, tree=None, cache_file=None)
|
721,484 |
ete3.webplugin.webapp
|
_load_tree_from_path
| null |
def _load_tree_from_path(self, pkl_path):
tree_path = os.path.join(self.CONFIG["temp_dir"], pkl_path)
if os.path.exists(tree_path):
print(six.moves.cPickle.load(open(tree_path)))
t = self._treeid2tree[treeid] = six.moves.cPickle.load(open(tree_path))
self._load_tree_index(treeid)
return True
else:
return False
|
(self, pkl_path)
|
721,485 |
ete3.webplugin.webapp
|
_load_tree_index
| null |
def _load_tree_index(self, treeid):
if not self._treeid2index.get(treeid, {}):
tree_index = self._treeid2index[treeid] = {}
t = self._treeid2tree[treeid]
for n in t.traverse():
if hasattr(n, "_nid"):
tree_index[str(n._nid)] = n
return True
else:
return False
|
(self, treeid)
|
721,486 |
ete3.webplugin.webapp
|
register_action
|
Adds a new web interactive function associated to tree
nodes.
|
def register_action(self, name, target, handler, checker, html_generator):
""" Adds a new web interactive function associated to tree
nodes. """
self.actions.append([name, target, handler, checker, html_generator])
|
(self, name, target, handler, checker, html_generator)
|
721,487 |
ete3.webplugin.webapp
|
set_default_layout_fn
|
Fix the layout function used to render the tree.
|
def set_default_layout_fn(self, layout_fn):
""" Fix the layout function used to render the tree. """
self._layout = layout_fn
|
(self, layout_fn)
|
721,488 |
ete3.webplugin.webapp
|
set_external_app_handler
|
Sets a custom function that will extend current WSGI
application.
|
def set_external_app_handler(self, handler):
""" Sets a custom function that will extend current WSGI
application."""
self._external_app_handler = handler
|
(self, handler)
|
721,489 |
ete3.webplugin.webapp
|
set_external_tree_renderer
|
If the tree needs to be processed every time is going to
be drawn, the task can be delegated.
|
def set_external_tree_renderer(self, handler):
""" If the tree needs to be processed every time is going to
be drawn, the task can be delegated. """
self._custom_tree_renderer = handler
|
(self, handler)
|
721,490 |
ete3.webplugin.webapp
|
set_tree_loader
|
Delegate tree constructor. It allows to customize the Tree
class used to create new tree instances.
|
def set_tree_loader(self, TreeConstructor):
""" Delegate tree constructor. It allows to customize the Tree
class used to create new tree instances. """
self._tree = TreeConstructor
|
(self, TreeConstructor)
|
721,491 |
ete3.webplugin.webapp
|
set_tree_size
|
Fix the size of tree image
|
def set_tree_size(self, w, h, units="px"):
""" Fix the size of tree image """
self._width = w
self._height = h
self._size_units = units
|
(self, w, h, units='px')
|
721,492 |
ete3.webplugin.webapp
|
set_tree_style
|
Fix a :class:`TreeStyle` instance to render tree images.
|
def set_tree_style(self, handler):
""" Fix a :class:`TreeStyle` instance to render tree images. """
self._tree_style = handler
|
(self, handler)
|
721,497 |
ete3.ncbi_taxonomy.ncbiquery
|
is_taxadb_up_to_date
|
Check if a valid and up-to-date taxa.sqlite database exists
If dbfile= is not specified, DEFAULT_TAXADB is assumed
|
def is_taxadb_up_to_date(dbfile=DEFAULT_TAXADB):
"""Check if a valid and up-to-date taxa.sqlite database exists
If dbfile= is not specified, DEFAULT_TAXADB is assumed
"""
db = sqlite3.connect(dbfile)
try:
r = db.execute('SELECT version FROM stats;')
version = r.fetchone()[0]
except (sqlite3.OperationalError, ValueError, IndexError, TypeError):
version = None
db.close()
if version != DB_VERSION:
return False
return True
|
(dbfile='/root/.etetoolkit/taxa.sqlite')
|
721,515 |
pydal.base
|
DAL
|
An instance of this class represents a database connection
Args:
uri(str): contains information for connecting to a database.
Defaults to `'sqlite://dummy.db'`
Note:
experimental: you can specify a dictionary as uri
parameter i.e. with::
db = DAL({"uri": "sqlite://storage.sqlite",
"tables": {...}, ...})
for an example of dict input you can check the output
of the scaffolding db model with
db.as_dict()
Note that for compatibility with Python older than
version 2.6.5 you should cast your dict input keys
to str due to a syntax limitation on kwarg names.
for proper DAL dictionary input you can use one of::
obj = serializers.cast_keys(dict, [encoding="utf-8"])
#or else (for parsing json input)
obj = serializers.loads_json(data, unicode_keys=False)
pool_size: How many open connections to make to the database object.
folder: where .table files will be created. Automatically set within
web2py. Use an explicit path when using DAL outside web2py
db_codec: string encoding of the database (default: 'UTF-8')
table_hash: database identifier with .tables. If your connection hash
change you can still using old .tables if they have
table_hash as prefix
check_reserved: list of adapters to check tablenames and column names
against sql/nosql reserved keywords. Defaults to `None`
- 'common' List of sql keywords that are common to all database
types such as "SELECT, INSERT". (recommended)
- 'all' Checks against all known SQL keywords
- '<adaptername>'' Checks against the specific adapters list of
keywords
- '<adaptername>_nonreserved' Checks against the specific adapters
list of nonreserved keywords. (if available)
migrate: sets default migrate behavior for all tables
fake_migrate: sets default fake_migrate behavior for all tables
migrate_enabled: If set to False disables ALL migrations
fake_migrate_all: If set to True fake migrates ALL tables
attempts: Number of times to attempt connecting
auto_import: If set to True, tries import automatically table
definitions from the databases folder (works only for simple models)
bigint_id: If set, turn on bigint instead of int for id and reference
fields
lazy_tables: delays table definition until table access
after_connection: can a callable that will be executed after the
connection
Example:
Use as::
db = DAL('sqlite://test.db')
or::
db = DAL(**{"uri": ..., "tables": [...]...}) # experimental
db.define_table('tablename', Field('fieldname1'),
Field('fieldname2'))
|
class DAL(with_metaclass(MetaDAL, Serializable, BasicStorage)):
"""
An instance of this class represents a database connection
Args:
uri(str): contains information for connecting to a database.
Defaults to `'sqlite://dummy.db'`
Note:
experimental: you can specify a dictionary as uri
parameter i.e. with::
db = DAL({"uri": "sqlite://storage.sqlite",
"tables": {...}, ...})
for an example of dict input you can check the output
of the scaffolding db model with
db.as_dict()
Note that for compatibility with Python older than
version 2.6.5 you should cast your dict input keys
to str due to a syntax limitation on kwarg names.
for proper DAL dictionary input you can use one of::
obj = serializers.cast_keys(dict, [encoding="utf-8"])
#or else (for parsing json input)
obj = serializers.loads_json(data, unicode_keys=False)
pool_size: How many open connections to make to the database object.
folder: where .table files will be created. Automatically set within
web2py. Use an explicit path when using DAL outside web2py
db_codec: string encoding of the database (default: 'UTF-8')
table_hash: database identifier with .tables. If your connection hash
change you can still using old .tables if they have
table_hash as prefix
check_reserved: list of adapters to check tablenames and column names
against sql/nosql reserved keywords. Defaults to `None`
- 'common' List of sql keywords that are common to all database
types such as "SELECT, INSERT". (recommended)
- 'all' Checks against all known SQL keywords
- '<adaptername>'' Checks against the specific adapters list of
keywords
- '<adaptername>_nonreserved' Checks against the specific adapters
list of nonreserved keywords. (if available)
migrate: sets default migrate behavior for all tables
fake_migrate: sets default fake_migrate behavior for all tables
migrate_enabled: If set to False disables ALL migrations
fake_migrate_all: If set to True fake migrates ALL tables
attempts: Number of times to attempt connecting
auto_import: If set to True, tries import automatically table
definitions from the databases folder (works only for simple models)
bigint_id: If set, turn on bigint instead of int for id and reference
fields
lazy_tables: delays table definition until table access
after_connection: can a callable that will be executed after the
connection
Example:
Use as::
db = DAL('sqlite://test.db')
or::
db = DAL(**{"uri": ..., "tables": [...]...}) # experimental
db.define_table('tablename', Field('fieldname1'),
Field('fieldname2'))
"""
serializers = None
validators = None
representers = {}
validators_method = default_validators
uuid = staticmethod(uuidstr)
logger = logging.getLogger("pyDAL")
Field = Field
Table = Table
Rows = Rows
Row = Row
record_operators = {"update_record": RecordUpdater, "delete_record": RecordDeleter}
execution_handlers = [TimingHandler]
def __new__(cls, uri="sqlite://dummy.db", *args, **kwargs):
if not hasattr(THREAD_LOCAL, "_pydal_db_instances_"):
THREAD_LOCAL._pydal_db_instances_ = {}
if not hasattr(THREAD_LOCAL, "_pydal_db_instances_zombie_"):
THREAD_LOCAL._pydal_db_instances_zombie_ = {}
if uri == "<zombie>":
db_uid = kwargs["db_uid"] # a zombie must have a db_uid!
if db_uid in THREAD_LOCAL._pydal_db_instances_:
db_group = THREAD_LOCAL._pydal_db_instances_[db_uid]
db = db_group[-1]
elif db_uid in THREAD_LOCAL._pydal_db_instances_zombie_:
db = THREAD_LOCAL._pydal_db_instances_zombie_[db_uid]
else:
db = super(DAL, cls).__new__(cls)
THREAD_LOCAL._pydal_db_instances_zombie_[db_uid] = db
else:
db_uid = kwargs.get("db_uid", hashlib_md5(repr(uri)).hexdigest())
if db_uid in THREAD_LOCAL._pydal_db_instances_zombie_:
db = THREAD_LOCAL._pydal_db_instances_zombie_[db_uid]
del THREAD_LOCAL._pydal_db_instances_zombie_[db_uid]
else:
db = super(DAL, cls).__new__(cls)
db_group = THREAD_LOCAL._pydal_db_instances_.get(db_uid, [])
db_group.append(db)
THREAD_LOCAL._pydal_db_instances_[db_uid] = db_group
db._db_uid = db_uid
return db
@staticmethod
def set_folder(folder):
# ## this allows gluon to set a folder for this thread
# ## <<<<<<<<< Should go away as new DAL replaces old sql.py
BaseAdapter.set_folder(folder)
@staticmethod
def get_instances():
"""
Returns a dictionary with uri as key with timings and defined tables::
{'sqlite://storage.sqlite': {
'dbstats': [(select auth_user.email from auth_user, 0.02009)],
'dbtables': {
'defined': ['auth_cas', 'auth_event', 'auth_group',
'auth_membership', 'auth_permission', 'auth_user'],
'lazy': '[]'
}
}
}
"""
dbs = getattr(THREAD_LOCAL, "_pydal_db_instances_", {}).items()
infos = {}
for db_uid, db_group in dbs:
for db in db_group:
if not db._uri:
continue
k = hide_password(db._adapter.uri)
infos[k] = dict(
dbstats=[(row[0], row[1]) for row in db._timings],
dbtables={
"defined": sorted(
list(set(db.tables) - set(db._LAZY_TABLES.keys()))
),
"lazy": sorted(db._LAZY_TABLES.keys()),
},
)
return infos
@staticmethod
def distributed_transaction_begin(*instances):
if not instances:
return
thread_key = "%s.%s" % (socket.gethostname(), threading.current_thread())
instances = enumerate(instances)
keys = ["%s.%i" % (thread_key, i) for (i, db) in instances]
for i, db in instances:
if not db._adapter.support_distributed_transaction():
raise SyntaxError(
"distributed transaction not suported by %s" % db._dbname
)
for i, db in instances:
db._adapter.distributed_transaction_begin(keys[i])
@staticmethod
def distributed_transaction_commit(*instances):
if not instances:
return
instances = enumerate(instances)
thread_key = "%s.%s" % (socket.gethostname(), threading.current_thread())
keys = ["%s.%i" % (thread_key, i) for (i, db) in instances]
for i, db in instances:
if not db._adapter.support_distributed_transaction():
raise SyntaxError(
"distributed transaction not suported by %s" % db._dbanme
)
try:
for i, db in instances:
db._adapter.prepare(keys[i])
except:
for i, db in instances:
db._adapter.rollback_prepared(keys[i])
raise RuntimeError("failure to commit distributed transaction")
else:
for i, db in instances:
db._adapter.commit_prepared(keys[i])
return
def __init__(
self,
uri="sqlite://dummy.db",
pool_size=0,
folder=None,
db_codec="UTF-8",
check_reserved=None,
migrate=True,
fake_migrate=False,
migrate_enabled=True,
fake_migrate_all=False,
decode_credentials=False,
driver_args=None,
adapter_args=None,
attempts=5,
auto_import=False,
bigint_id=False,
debug=False,
lazy_tables=False,
db_uid=None,
after_connection=None,
tables=None,
ignore_field_case=True,
entity_quoting=True,
table_hash=None,
):
if uri == "<zombie>" and db_uid is not None:
return
super(DAL, self).__init__()
if not issubclass(self.Rows, Rows):
raise RuntimeError("`Rows` class must be a subclass of pydal.objects.Rows")
if not issubclass(self.Row, Row):
raise RuntimeError("`Row` class must be a subclass of pydal.objects.Row")
from .drivers import DRIVERS, is_jdbc
self._drivers_available = DRIVERS
if not decode_credentials:
credential_decoder = lambda cred: cred
else:
credential_decoder = lambda cred: unquote(cred)
self._folder = folder
if folder:
self.set_folder(folder)
self._uri = uri
self._pool_size = pool_size
self._db_codec = db_codec
self._pending_references = {}
self._request_tenant = "request_tenant"
self._common_fields = []
self._referee_name = "%(table)s"
self._bigint_id = bigint_id
self._debug = debug
self._migrated = []
self._LAZY_TABLES = {}
self._lazy_tables = lazy_tables
self._tables = SQLCallableList()
self._aliased_tables = threading.local()
self._driver_args = driver_args
self._adapter_args = adapter_args
self._check_reserved = check_reserved
self._decode_credentials = decode_credentials
self._attempts = attempts
self._ignore_field_case = ignore_field_case
if not str(attempts).isdigit() or attempts < 0:
attempts = 5
if uri:
uris = isinstance(uri, (list, tuple)) and uri or [uri]
connected = False
for k in range(attempts):
for uri in uris:
try:
from .adapters import adapters
if is_jdbc and not uri.startswith("jdbc:"):
uri = "jdbc:" + uri
self._dbname = REGEX_DBNAME.match(uri).group()
# notice that driver args or {} else driver_args
# defaults to {} global, not correct
kwargs = dict(
db=self,
uri=uri,
pool_size=pool_size,
folder=folder,
db_codec=db_codec,
credential_decoder=credential_decoder,
driver_args=driver_args or {},
adapter_args=adapter_args or {},
after_connection=after_connection,
entity_quoting=entity_quoting,
)
adapter = adapters.get_for(self._dbname)
self._adapter = adapter(**kwargs)
# self._adapter.ignore_field_case = ignore_field_case
if bigint_id:
self._adapter.dialect._force_bigints()
# if there are multiple URIs to try in sequence, do not defer connection
if len(uris) > 1:
self._adapter.connector()
connected = True
break
except SyntaxError:
raise
except Exception:
tb = traceback.format_exc()
self.logger.debug(
"DEBUG: connect attempt %i, connection error:\n%s" % (k, tb)
)
if connected:
break
else:
time.sleep(1)
if not connected:
raise RuntimeError(
"Failure to connect, tried %d times:\n%s" % (attempts, tb)
)
else:
self._adapter = NullAdapter(
db=self,
pool_size=0,
uri="None",
folder=folder,
db_codec=db_codec,
after_connection=after_connection,
entity_quoting=entity_quoting,
)
migrate = fake_migrate = False
self.validators_method = None
self.validators = None
adapter = self._adapter
self._uri_hash = table_hash or hashlib_md5(adapter.uri).hexdigest()
if check_reserved:
from .contrib.reserved_sql_keywords import ADAPTERS as RSK
self.RSK = RSK
self._migrate = migrate
self._fake_migrate = fake_migrate
self._migrate_enabled = migrate_enabled
self._fake_migrate_all = fake_migrate_all
if self.serializers is not None:
for k, v in self.serializers.items():
serializers._custom_[k] = v
if auto_import or tables:
self.import_table_definitions(adapter.folder, tables=tables)
@contextlib.contextmanager
def single_transaction(self):
self._adapter.reconnect()
try:
yield self
except Exception:
self._adapter.rollback()
else:
self._adapter.commit()
finally:
self.close()
@property
def tables(self):
return self._tables
@property
def _timings(self):
return getattr(THREAD_LOCAL, "_pydal_timings_", [])
@property
def _lastsql(self):
return self._timings[-1] if self._timings else None
def import_table_definitions(
self, path, migrate=False, fake_migrate=False, tables=None
):
if tables:
for table in tables:
self.define_table(**table)
else:
pattern = pjoin(path, self._uri_hash + "_*.table")
for filename in glob.glob(pattern):
tfile = self._adapter.migrator.file_open(filename, "r" if PY2 else "rb")
try:
sql_fields = pickle.load(tfile)
name = filename[len(pattern) - 7 : -6]
mf = [
(
value["sortable"],
Field(
key,
type=value["type"],
length=value.get("length", None),
notnull=value.get("notnull", False),
unique=value.get("unique", False),
),
)
for key, value in iteritems(sql_fields)
]
mf.sort(key=lambda a: a[0])
self.define_table(
name,
*[item[1] for item in mf],
**dict(migrate=migrate, fake_migrate=fake_migrate),
)
finally:
self._adapter.migrator.file_close(tfile)
def check_reserved_keyword(self, name):
"""
Validates `name` against SQL keywords
Uses self._check_reserved which is a list of operators to use.
"""
for backend in self._check_reserved:
if name.upper() in self.RSK[backend]:
raise SyntaxError(
'invalid table/column name "%s" is a "%s" reserved SQL/NOSQL keyword'
% (name, backend.upper())
)
def parse_as_rest(self, patterns, args, vars, queries=None, nested_select=True):
return RestParser(self).parse(patterns, args, vars, queries, nested_select)
def define_table(self, tablename, *fields, **kwargs):
invalid_kwargs = set(kwargs) - TABLE_ARGS
if invalid_kwargs:
raise SyntaxError(
'invalid table "%s" attributes: %s' % (tablename, invalid_kwargs)
)
if not fields and "fields" in kwargs:
fields = kwargs.get("fields", ())
if not isinstance(tablename, str):
if isinstance(tablename, unicode):
try:
tablename = str(tablename)
except UnicodeEncodeError:
raise SyntaxError("invalid unicode table name")
else:
raise SyntaxError("missing table name")
redefine = kwargs.get("redefine", False)
if tablename in self.tables:
if redefine:
try:
delattr(self, tablename)
except:
pass
else:
raise SyntaxError("table already defined: %s" % tablename)
elif (
tablename.startswith("_")
or hasattr(self, tablename)
or REGEX_PYTHON_KEYWORDS.match(tablename)
):
raise SyntaxError("invalid table name: %s" % tablename)
elif self._check_reserved:
self.check_reserved_keyword(tablename)
if self._lazy_tables:
if tablename not in self._LAZY_TABLES or redefine:
self._LAZY_TABLES[tablename] = (tablename, fields, kwargs)
table = None
else:
table = self.lazy_define_table(tablename, *fields, **kwargs)
if tablename not in self.tables:
self.tables.append(tablename)
return table
def lazy_define_table(self, tablename, *fields, **kwargs):
kwargs_get = kwargs.get
common_fields = self._common_fields
if common_fields:
fields = list(fields) + [
f if isinstance(f, Table) else f.clone() for f in common_fields
]
table_class = kwargs_get("table_class", Table)
table = table_class(self, tablename, *fields, **kwargs)
table._actual = True
self[tablename] = table
# must follow above line to handle self references
table._create_references()
for field in table:
if field.requires is DEFAULT:
field.requires = auto_validators(field)
if field.represent is None:
field.represent = auto_represent(field)
migrate = self._migrate_enabled and kwargs_get("migrate", self._migrate)
if (
migrate
and self._uri not in (None, "None")
or self._adapter.dbengine == "google:datastore"
):
fake_migrate = self._fake_migrate_all or kwargs_get(
"fake_migrate", self._fake_migrate
)
polymodel = kwargs_get("polymodel", None)
try:
GLOBAL_LOCKER.acquire()
self._adapter.create_table(
table,
migrate=migrate,
fake_migrate=fake_migrate,
polymodel=polymodel,
)
finally:
GLOBAL_LOCKER.release()
else:
table._dbt = None
on_define = kwargs_get("on_define", None)
if on_define:
on_define(table)
return table
def as_dict(self, flat=False, sanitize=True):
db_uid = uri = None
if not sanitize:
uri, db_uid = (self._uri, self._db_uid)
db_as_dict = dict(
tables=[],
uri=uri,
db_uid=db_uid,
**dict(
[
(k, getattr(self, "_" + k, None))
for k in [
"pool_size",
"folder",
"db_codec",
"check_reserved",
"migrate",
"fake_migrate",
"migrate_enabled",
"fake_migrate_all",
"decode_credentials",
"driver_args",
"adapter_args",
"attempts",
"bigint_id",
"debug",
"lazy_tables",
]
]
),
)
for table in self:
db_as_dict["tables"].append(table.as_dict(flat=flat, sanitize=sanitize))
return db_as_dict
def __contains__(self, tablename):
try:
return tablename in self.tables
except AttributeError:
# The instance has no .tables attribute yet
return False
def __iter__(self):
for tablename in self.tables:
yield self[tablename]
def __getitem__(self, key):
return self.__getattr__(str(key))
def __getattr__(self, key):
if object.__getattribute__(
self, "_lazy_tables"
) and key in object.__getattribute__(self, "_LAZY_TABLES"):
tablename, fields, kwargs = self._LAZY_TABLES.pop(key)
return self.lazy_define_table(tablename, *fields, **kwargs)
aliased_tables = object.__getattribute__(self, "_aliased_tables")
aliased = getattr(aliased_tables, key, None)
if aliased:
return aliased
return BasicStorage.__getattribute__(self, key)
def __setattr__(self, key, value):
if key[:1] != "_" and key in self:
raise SyntaxError("Object %s exists and cannot be redefined" % key)
return super(DAL, self).__setattr__(key, value)
def __repr__(self):
if hasattr(self, "_uri"):
return '<DAL uri="%s">' % hide_password(self._adapter.uri)
else:
return '<DAL db_uid="%s">' % self._db_uid
def smart_query(self, fields, text):
return Set(self, smart_query(fields, text))
def __call__(self, query=None, ignore_common_filters=None):
return self.where(query, ignore_common_filters)
def where(self, query=None, ignore_common_filters=None):
if isinstance(query, Table):
query = self._adapter.id_query(query)
elif isinstance(query, Field):
query = query != None
elif isinstance(query, dict):
icf = query.get("ignore_common_filters")
if icf:
ignore_common_filters = icf
return Set(self, query, ignore_common_filters=ignore_common_filters)
def commit(self):
self._adapter.commit()
object.__getattribute__(self, "_aliased_tables").__dict__.clear()
def rollback(self):
self._adapter.rollback()
object.__getattribute__(self, "_aliased_tables").__dict__.clear()
def close(self):
self._adapter.close()
if self._db_uid in THREAD_LOCAL._pydal_db_instances_:
db_group = THREAD_LOCAL._pydal_db_instances_[self._db_uid]
db_group.remove(self)
if not db_group:
del THREAD_LOCAL._pydal_db_instances_[self._db_uid]
self._adapter._clean_tlocals()
def get_connection_from_pool_or_new(self):
self._adapter.reconnect()
def recycle_connection_in_pool_or_close(self, action="commit"):
self._adapter.close(action, really=True)
def executesql(
self,
query,
placeholders=None,
as_dict=False,
fields=None,
colnames=None,
as_ordered_dict=False,
):
"""
Executes an arbitrary query
Args:
query (str): the query to submit to the backend
placeholders: is optional and will always be None.
If using raw SQL with placeholders, placeholders may be
a sequence of values to be substituted in
or, (if supported by the DB driver), a dictionary with keys
matching named placeholders in your SQL.
as_dict: will always be None when using DAL.
If using raw SQL can be set to True and the results cursor
returned by the DB driver will be converted to a sequence of
dictionaries keyed with the db field names. Results returned
with as_dict=True are the same as those returned when applying
.to_list() to a DAL query. If "as_ordered_dict"=True the
behaviour is the same as when "as_dict"=True with the keys
(field names) guaranteed to be in the same order as returned
by the select name executed on the database.
fields: list of DAL Fields that match the fields returned from the
DB. The Field objects should be part of one or more Table
objects defined on the DAL object. The "fields" list can include
one or more DAL Table objects in addition to or instead of
including Field objects, or it can be just a single table
(not in a list). In that case, the Field objects will be
extracted from the table(s).
Note:
if either `fields` or `colnames` is provided, the results
will be converted to a DAL `Rows` object using the
`db._adapter.parse()` method
colnames: list of field names in tablename.fieldname format
Note:
It is also possible to specify both "fields" and the associated
"colnames". In that case, "fields" can also include DAL Expression
objects in addition to Field objects. For Field objects in "fields",
the associated "colnames" must still be in tablename.fieldname
format. For Expression objects in "fields", the associated
"colnames" can be any arbitrary labels.
DAL Table objects referred to by "fields" or "colnames" can be dummy
tables and do not have to represent any real tables in the database.
Also, note that the "fields" and "colnames" must be in the
same order as the fields in the results cursor returned from the DB.
"""
adapter = self._adapter
if placeholders:
adapter.execute(query, placeholders)
else:
adapter.execute(query)
if as_dict or as_ordered_dict:
if not hasattr(adapter.cursor, "description"):
raise RuntimeError(
"database does not support executesql(...,as_dict=True)"
)
# Non-DAL legacy db query, converts cursor results to dict.
# sequence of 7-item sequences. each sequence tells about a column.
# first item is always the field name according to Python Database API specs
columns = adapter.cursor.description
# reduce the column info down to just the field names
fields = colnames or [f[0] for f in columns]
if len(fields) != len(set(fields)):
raise RuntimeError(
"Result set includes duplicate column names. Specify unique column names using the 'colnames' argument"
)
#: avoid bytes strings in columns names (py3)
if columns and not PY2:
for i in range(0, len(fields)):
if isinstance(fields[i], bytes):
fields[i] = fields[i].decode("utf8")
# will hold our finished resultset in a list
data = adapter.fetchall()
# convert the list for each row into a dictionary so it's
# easier to work with. row['field_name'] rather than row[0]
if as_ordered_dict:
_dict = OrderedDict
else:
_dict = dict
return [_dict(zip(fields, row)) for row in data]
try:
data = adapter.fetchall()
except:
return None
if fields or colnames:
fields = [] if fields is None else fields
if not isinstance(fields, list):
fields = [fields]
extracted_fields = []
for field in fields:
if isinstance(field, Table):
extracted_fields.extend([f for f in field])
else:
extracted_fields.append(field)
if not colnames:
colnames = [f.sqlsafe for f in extracted_fields]
else:
#: extracted_fields is empty we should make it from colnames
# what 'col_fields' is for
col_fields = [] # [[tablename, fieldname], ....]
newcolnames = []
for tf in colnames:
if "." in tf:
t_f = tf.split(".")
tf = ".".join(adapter.dialect.quote(f) for f in t_f)
else:
t_f = None
if not extracted_fields:
col_fields.append(t_f)
newcolnames.append(tf)
colnames = newcolnames
data = adapter.parse(
data,
fields=extracted_fields
or [tf and self[tf[0]][tf[1]] for tf in col_fields],
colnames=colnames,
)
return data
def _remove_references_to(self, thistable):
for table in self:
table._referenced_by = [
field for field in table._referenced_by if not field.table == thistable
]
def has_representer(self, name):
return callable(self.representers.get(name))
def represent(self, name, *args, **kwargs):
return self.representers[name](*args, **kwargs)
def export_to_csv_file(self, ofile, *args, **kwargs):
step = long(kwargs.get("max_fetch_rows", 500))
write_colnames = kwargs["write_colnames"] = kwargs.get("write_colnames", True)
for table in self.tables:
ofile.write("TABLE %s\r\n" % table)
query = self._adapter.id_query(self[table])
nrows = self(query).count()
kwargs["write_colnames"] = write_colnames
for k in range(0, nrows, step):
self(query).select(limitby=(k, k + step)).export_to_csv_file(
ofile, *args, **kwargs
)
kwargs["write_colnames"] = False
ofile.write("\r\n\r\n")
ofile.write("END")
def import_from_csv_file(
self,
ifile,
id_map=None,
null="<NULL>",
unique="uuid",
map_tablenames=None,
ignore_missing_tables=False,
*args,
**kwargs,
):
# if id_map is None: id_map={}
id_offset = {} # only used if id_map is None
map_tablenames = map_tablenames or {}
for line in ifile:
line = line.strip()
if not line:
continue
elif line == "END":
return
elif not line.startswith("TABLE "):
raise SyntaxError("Invalid file format")
elif not line[6:] in self.tables:
raise SyntaxError("Unknown table : %s" % line[6:])
else:
tablename = line[6:]
tablename = map_tablenames.get(tablename, tablename)
if tablename is not None and tablename in self.tables:
self[tablename].import_from_csv_file(
ifile, id_map, null, unique, id_offset, *args, **kwargs
)
elif tablename is None or ignore_missing_tables:
# skip all non-empty lines
for line in ifile:
if not line.strip():
break
else:
raise RuntimeError(
"Unable to import table that does not exist.\nTry db.import_from_csv_file(..., map_tablenames={'table':'othertable'},ignore_missing_tables=True)"
)
def can_join(self):
return self._adapter.can_join()
|
(*args, **kwargs)
|
721,517 |
pydal.base
|
__call__
| null |
def __call__(self, query=None, ignore_common_filters=None):
return self.where(query, ignore_common_filters)
|
(self, query=None, ignore_common_filters=None)
|
721,521 |
pydal.base
|
__getitem__
| null |
def __getitem__(self, key):
return self.__getattr__(str(key))
|
(self, key)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.