id
int64 0
843k
| repository_name
stringlengths 7
55
| file_path
stringlengths 9
332
| class_name
stringlengths 3
290
| human_written_code
stringlengths 12
4.36M
| class_skeleton
stringlengths 19
2.2M
| total_program_units
int64 1
9.57k
| total_doc_str
int64 0
4.2k
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
300
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
176
| CountClassBase
float64 0
48
| CountClassCoupled
float64 0
589
| CountClassCoupledModified
float64 0
581
| CountClassDerived
float64 0
5.37k
| CountDeclInstanceMethod
float64 0
4.2k
| CountDeclInstanceVariable
float64 0
299
| CountDeclMethod
float64 0
4.2k
| CountDeclMethodAll
float64 0
4.2k
| CountLine
float64 1
115k
| CountLineBlank
float64 0
9.01k
| CountLineCode
float64 0
94.4k
| CountLineCodeDecl
float64 0
46.1k
| CountLineCodeExe
float64 0
91.3k
| CountLineComment
float64 0
27k
| CountStmt
float64 1
93.2k
| CountStmtDecl
float64 0
46.1k
| CountStmtExe
float64 0
90.2k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
6k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
143,648 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/ligand.py
|
klab.bio.ligand.SimplePDBLigand
|
class SimplePDBLigand(object):
'''A simple container class for the basic ligand properties described in PDB files. The Ligand and PDBLigand classes
have more features.'''
def __init__(self, ligand_code, sequence_id, description = None, chain_id = None, names = [], formula = None, number_of_atoms = None):
assert(len(sequence_id) == 5)
self.PDBCode = ligand_code
self.Chain = chain_id
self.SequenceID = sequence_id
self.Description = description
self.Names = names
self.Formula = formula
self.NumberOfAtoms = number_of_atoms
def get_code(self):
return self.PDBCode
def __repr__(self):
s = ['{0}{1}: {2}'.format(self.Chain or ' ', self.SequenceID, self.get_code())]
if self.Formula:
s.append(self.Formula)
if self.Description:
s.append(self.Description)
if self.Names:
s.append('(' + ', '.join([n for n in self.Names]) + ')')
return ', '.join(s)
|
class SimplePDBLigand(object):
'''A simple container class for the basic ligand properties described in PDB files. The Ligand and PDBLigand classes
have more features.'''
def __init__(self, ligand_code, sequence_id, description = None, chain_id = None, names = [], formula = None, number_of_atoms = None):
pass
def get_code(self):
pass
def __repr__(self):
pass
| 4 | 1 | 7 | 0 | 7 | 0 | 2 | 0.1 | 1 | 0 | 0 | 1 | 3 | 7 | 3 | 3 | 28 | 5 | 21 | 12 | 17 | 2 | 21 | 12 | 17 | 4 | 1 | 1 | 6 |
143,649 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/alignment.py
|
klab.bio.alignment.MatchedChainList
|
class MatchedChainList(object):
'''A helper class to store a list of chains related to pdb_name:chain_id and their percentage identities.'''
def __init__(self, pdb_name, chain_id):
self.pdb_name = pdb_name
self.chain_id = chain_id
self.chain_list = []
def add_chain(self, other_pdb_id, chain_id, percentage_identity):
self.chain_list.append((percentage_identity, other_pdb_id, chain_id))
self.chain_list = sorted(self.chain_list)
def get_related_chains_ids(self, other_pdb_id):
return [e[2] for e in self.chain_list if e[1] == other_pdb_id]
def get_related_chains_ids_and_identities(self, other_pdb_id):
return [(e[2], e[0]) for e in self.chain_list if e[1] == other_pdb_id]
def __repr__(self):
s = ['Matched chain list for %s_%s' % (self.pdb_name, self.chain_id)]
if self.chain_list:
for mtch in self.chain_list:
s.append('\t%s_%s at %s%%' % (mtch[1], mtch[2], mtch[0]))
else:
s.append('No matches.')
return '\n'.join(s)
|
class MatchedChainList(object):
'''A helper class to store a list of chains related to pdb_name:chain_id and their percentage identities.'''
def __init__(self, pdb_name, chain_id):
pass
def add_chain(self, other_pdb_id, chain_id, percentage_identity):
pass
def get_related_chains_ids(self, other_pdb_id):
pass
def get_related_chains_ids_and_identities(self, other_pdb_id):
pass
def __repr__(self):
pass
| 6 | 1 | 4 | 0 | 4 | 0 | 1 | 0.05 | 1 | 0 | 0 | 0 | 5 | 3 | 5 | 5 | 26 | 5 | 20 | 11 | 14 | 1 | 19 | 11 | 13 | 3 | 1 | 2 | 7 |
143,650 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/ligand.py
|
klab.bio.ligand.PDBLigand
|
class PDBLigand(Ligand):
'''A subclass of Ligand used to represent instances of ligands in a PDB file.
To save time processing the same ligand code multiple times per PDB, a Ligand object can be created and then instances
formed using the instantiate_from_ligand function e.g.:
gtpl = PDBLigand('GTP')
for seq_id in ('A 123 ', 'A 149A'):
l = PDBLigand.instantiate_from_ligand(gtpl, seq_id[0], seq_id[1:])
for seq_id in (('A 152 ', 'GTP'),): # e.g. if the ligand was renamed ' X ' in the PDB file for some reason but was known to be GTP
l = PDBLigand.instantiate_from_ligand(gtpl, seq_id[0][0], seq_id[0][1:], pdb_ligand_code = seq_id[1])
'''
def __init__(self, ligand_code, chain_id = None, sequence_id = None, pdb_ligand_code = None):
super(PDBLigand, self).__init__(ligand_code)
self.PDBLigandCode = pdb_ligand_code or ligand_code
self.Chain = None
self.SequenceID = None
self.set_id(chain_id, sequence_id)
def set_id(self, chain_id, sequence_id):
if sequence_id:
# Require all five columns from the PDB file.'''
assert(len(sequence_id) == 5 and sequence_id[:4].strip().isdigit())
self.SequenceID = sequence_id
assert(len(chain_id) == 1)
self.Chain = chain_id
def __repr__(self):
s = super(PDBLigand, self).__repr__()
if self.Chain and self.SequenceID:
s += '\nSequence ID : {0}{1}'.format(self.Chain, self.SequenceID)
if self.PDBLigandCode and self.PDBLigandCode != self.LigandCode:
s += '\nPDB code : {0}'.format(self.PDBLigandCode)
return s
@classmethod
def instantiate_from_ligand(cls, ligand, chain_id, sequence_id, pdb_ligand_code = None):
l = cls(ligand.LigandCode)
l.__dict__ = copy.deepcopy(ligand.__dict__)
l.PDBLigandCode = pdb_ligand_code or l.LigandCode
l.set_id(chain_id, sequence_id)
return l
@classmethod
def retrieve_data_from_rcsb(cls, ligand_code, pdb_id, chain_id, sequence_id, pdb_ligand_code = None, silent = True, cached_dir = None):
l = super(PDBLigand, cls).retrieve_data_from_rcsb(ligand_code, pdb_id = pdb_id, silent = silent, cached_dir = cached_dir)
l.pdb_id = pdb_id
l.PDBLigandCode = pdb_ligand_code or l.LigandCode
l.set_id(chain_id, sequence_id)
return l
|
class PDBLigand(Ligand):
'''A subclass of Ligand used to represent instances of ligands in a PDB file.
To save time processing the same ligand code multiple times per PDB, a Ligand object can be created and then instances
formed using the instantiate_from_ligand function e.g.:
gtpl = PDBLigand('GTP')
for seq_id in ('A 123 ', 'A 149A'):
l = PDBLigand.instantiate_from_ligand(gtpl, seq_id[0], seq_id[1:])
for seq_id in (('A 152 ', 'GTP'),): # e.g. if the ligand was renamed ' X ' in the PDB file for some reason but was known to be GTP
l = PDBLigand.instantiate_from_ligand(gtpl, seq_id[0][0], seq_id[0][1:], pdb_ligand_code = seq_id[1])
'''
def __init__(self, ligand_code, chain_id = None, sequence_id = None, pdb_ligand_code = None):
pass
def set_id(self, chain_id, sequence_id):
pass
def __repr__(self):
pass
@classmethod
def instantiate_from_ligand(cls, ligand, chain_id, sequence_id, pdb_ligand_code = None):
pass
@classmethod
def retrieve_data_from_rcsb(cls, ligand_code, pdb_id, chain_id, sequence_id, pdb_ligand_code = None, silent = True, cached_dir = None):
pass
| 8 | 1 | 6 | 0 | 6 | 0 | 2 | 0.29 | 1 | 1 | 0 | 0 | 3 | 3 | 5 | 15 | 56 | 12 | 34 | 14 | 26 | 10 | 32 | 12 | 26 | 3 | 2 | 1 | 8 |
143,651 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/ligand.py
|
klab.bio.ligand.PDBIon
|
class PDBIon(SimplePDBLigand):
'''A simple container class for the basic ion properties described in PDB files.'''
def __init__(self, *args, **kwargs):
super(PDBIon, self).__init__(*args, **kwargs)
self.Element = ''.join([c for c in self.PDBCode if c.isalpha()]).strip().lower()
self.Element = self.Element[0].upper() + self.Element[1:] # the elemental symbol
assert((1 <= len(self.Element) <= 2) or (self.Element.upper() in three_letter_ion_codes) or (self.Element.upper() == 'UNX'))
assert(self.NumberOfAtoms == None or self.NumberOfAtoms == 1)
def get_db_records(self, pdb_id, pdb_ion_code = None, file_content_id = None, ion_id = None):
# Extract the charge of the ion - we do not care about the number of ions
ion_formula = None
if self.Formula:
ion_formula = re.match('\s*\d+[(](.*?)[)]\s*', self.Formula)
if ion_formula:
ion_formula = ion_formula.group(1)
else:
ion_formula = self.Formula
iname = None
if self.Names:
iname = self.Names[0]
return dict(
Ion = dict(
PDBCode = self.PDBCode,
Formula = ion_formula,
Description = self.Description or iname
),
PDBIon = dict(
PDBFileID = pdb_id,
Chain = self.Chain,
SeqID = self.SequenceID,
PDBIonCode = pdb_ion_code or self.PDBCode, # the code may be changed in non-standard/non-RCSB PDB files
IonID = ion_id, # set to Ion.ID
ParamsFileContentID = file_content_id,
Element = self.Element
)
)
def get_element(self):
return self.Element
def __repr__(self):
return super(PDBIon, self).__repr__() + ', ' + self.Element + ' ion'
|
class PDBIon(SimplePDBLigand):
'''A simple container class for the basic ion properties described in PDB files.'''
def __init__(self, *args, **kwargs):
pass
def get_db_records(self, pdb_id, pdb_ion_code = None, file_content_id = None, ion_id = None):
pass
def get_element(self):
pass
def __repr__(self):
pass
| 5 | 1 | 10 | 1 | 9 | 1 | 2 | 0.13 | 1 | 2 | 0 | 0 | 4 | 4 | 4 | 7 | 49 | 9 | 38 | 11 | 33 | 5 | 22 | 8 | 17 | 4 | 2 | 2 | 7 |
143,652 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/ligand.py
|
klab.bio.ligand.LigandMap
|
class LigandMap(object):
'''A simple container class to map between ligands.
This is useful for keeping track of ligands in modified PDB files where the user has renamed the ligand ID (e.g. to "LIG" or chain/residue ID e.g. to chain "X").
'''
class _MapPoint(object):
'''A mapping from a single ligand in one PDB to a single ligand in another.'''
def __init__(self, from_pdb_code, from_pdb_residue_id, to_pdb_code, to_pdb_residue_id, strict = True):
'''PDB codes are the contents of columns [17:20] (Python format i.e. zero-indexed) of HETATM lines.
PDB residue IDs are the contents of columns [21:27] of HETATM lines.'''
assert((len(from_pdb_residue_id) == 6) and (len(to_pdb_residue_id) == 6))
assert(from_pdb_residue_id[1:5].strip().isdigit() and to_pdb_residue_id[1:5].strip().isdigit())
if strict:
assert((len(from_pdb_code) == 3) and (len(to_pdb_code) == 3))
else:
assert((1 <= len(from_pdb_code) <= 3) and (1 <= len(to_pdb_code) <= 3))
if len(from_pdb_code) < 3:
from_pdb_code = from_pdb_code.strip().rjust(3)
if len(to_pdb_code) < 3:
to_pdb_code = to_pdb_code.strip().rjust(3)
self.from_pdb_code = from_pdb_code
self.to_pdb_code = to_pdb_code
self.from_pdb_residue_id = from_pdb_residue_id
self.to_pdb_residue_id = to_pdb_residue_id
def __repr__(self):
return '{0} ({1}) -> {2} ({3})'.format(self.from_pdb_residue_id, self.from_pdb_code, self.to_pdb_residue_id, self.to_pdb_code)
def __init__(self):
self.mapping = {}
self.code_map = {}
def __repr__(self):
import pprint
return pprint.pformat(self.mapping)
@staticmethod
def from_tuples_dict(pair_dict):
'''pair_dict should be a dict mapping tuple (HET code, residue ID) -> (HET code, residue ID) e.g. {('MG ', 'A 204 ') : ('MG ', 'C 221 '), ...}.
HET codes and residue IDs should respectively correspond to columns 17:20 and 21:27 of the PDB file.
'''
lm = LigandMap()
for k, v in pair_dict.items():
lm.add(k[0], k[1], v[0], v[1])
return lm
@staticmethod
def from_code_map(ligand_code_map):
lm = LigandMap()
for k, v in ligand_code_map.items():
lm.add_code_mapping(k, v)
return lm
def add(self, from_pdb_code, from_pdb_residue_id, to_pdb_code, to_pdb_residue_id, strict = True):
assert(from_pdb_residue_id not in self.mapping)
self.mapping[from_pdb_residue_id] = LigandMap._MapPoint(from_pdb_code, from_pdb_residue_id, to_pdb_code, to_pdb_residue_id, strict = strict)
self.add_code_mapping(from_pdb_code, to_pdb_code)
def add_code_mapping(self, from_pdb_code, to_pdb_code):
'''Add a code mapping without a given instance.'''
# Consistency check - make sure that we always map the same code e.g. 'LIG' to the same code e.g. 'GTP'
if from_pdb_code in self.code_map:
assert(self.code_map[from_pdb_code] == to_pdb_code)
else:
self.code_map[from_pdb_code] = to_pdb_code
def map_code(self, from_pdb_code):
return self.code_map.get(from_pdb_code)
def is_injective(self):
'''Returns True if the mapping is injective (1-to-1).'''
codomain_residues = [v.to_pdb_residue_id for k, v in self.mapping.items()]
return(len(codomain_residues) == len(set(codomain_residues)))
def is_complete(self, all_domain_residue_ids):
'''Check that all ligands (specified via the set or list all_domain_residue_ids containing columns 21:27 of the
HETATM records) in the source PDB file are considered in the mapping.'''
mapped_domain_residues = sorted([v.from_pdb_residue_id for k, v in self.mapping.items()])
assert(len(all_domain_residue_ids) == len(set(all_domain_residue_ids)))
return mapped_domain_residues == sorted(all_domain_residue_ids)
|
class LigandMap(object):
'''A simple container class to map between ligands.
This is useful for keeping track of ligands in modified PDB files where the user has renamed the ligand ID (e.g. to "LIG" or chain/residue ID e.g. to chain "X").
'''
class _MapPoint(object):
'''A mapping from a single ligand in one PDB to a single ligand in another.'''
def __init__(self, from_pdb_code, from_pdb_residue_id, to_pdb_code, to_pdb_residue_id, strict = True):
'''PDB codes are the contents of columns [17:20] (Python format i.e. zero-indexed) of HETATM lines.
PDB residue IDs are the contents of columns [21:27] of HETATM lines.'''
pass
def __repr__(self):
pass
def __init__(self, from_pdb_code, from_pdb_residue_id, to_pdb_code, to_pdb_residue_id, strict = True):
pass
def __repr__(self):
pass
@staticmethod
def from_tuples_dict(pair_dict):
'''pair_dict should be a dict mapping tuple (HET code, residue ID) -> (HET code, residue ID) e.g. {('MG ', 'A 204 ') : ('MG ', 'C 221 '), ...}.
HET codes and residue IDs should respectively correspond to columns 17:20 and 21:27 of the PDB file.
'''
pass
@staticmethod
def from_code_map(ligand_code_map):
pass
def add(self, from_pdb_code, from_pdb_residue_id, to_pdb_code, to_pdb_residue_id, strict = True):
pass
def add_code_mapping(self, from_pdb_code, to_pdb_code):
'''Add a code mapping without a given instance.'''
pass
def map_code(self, from_pdb_code):
pass
def is_injective(self):
'''Returns True if the mapping is injective (1-to-1).'''
pass
def is_complete(self, all_domain_residue_ids):
'''Check that all ligands (specified via the set or list all_domain_residue_ids containing columns 21:27 of the
HETATM records) in the source PDB file are considered in the mapping.'''
pass
| 15 | 7 | 6 | 0 | 5 | 1 | 2 | 0.25 | 1 | 2 | 1 | 0 | 7 | 2 | 9 | 9 | 98 | 29 | 55 | 28 | 39 | 14 | 51 | 26 | 37 | 4 | 1 | 2 | 17 |
143,653 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/RosettaProtocols.py
|
klab.RosettaProtocols.RosettaProtocolGroup
|
class RosettaProtocolGroup:
def __init__(self, name, color, public = True):
self.name = name
self.protocols = []
self.size = 0
self.color = color
self.public = public
self.description = ""
def __getitem__(self, index):
return self.protocols[index]
def getProtocols(self):
return self.protocols
def setDescription(self, description):
self.description = description
def getDescription(self):
return self.description
def add(self, protocol):
self.protocols.append(protocol)
protocol.setGroup(self)
self.size += 1
def getSize(self):
return self.size
def getName(self):
return self.name
|
class RosettaProtocolGroup:
def __init__(self, name, color, public = True):
pass
def __getitem__(self, index):
pass
def getProtocols(self):
pass
def setDescription(self, description):
pass
def getDescription(self):
pass
def add(self, protocol):
pass
def getSize(self):
pass
def getName(self):
pass
| 9 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 8 | 6 | 8 | 8 | 31 | 7 | 24 | 15 | 15 | 0 | 24 | 15 | 15 | 1 | 0 | 0 | 8 |
143,654 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/alignment.py
|
klab.bio.alignment.MultipleSequenceAlignmentPrinter
|
class MultipleSequenceAlignmentPrinter(object):
'''A class for generating formatted strings from a multiple sequence alignment. These strings should be the result
of an MSA i.e. they should all have the same length.
'''
def __init__(self, sequence_names, sequences, sequence_tooltips = None):
if not sequence_tooltips:
sequence_tooltips = [None] * len(sequences)
assert(len(sequence_names) == len(sequences) and len(sequences) == len(sequence_tooltips) and len(sequence_names) > 1) # The sequence names must correspond with the number of sequences and we require at least two sequences
assert(len(set(sequence_names)) == len(sequence_names)) # sequence_names must be a list of unique names
# Make sure that if a sequence has tooltips then there is an injection between the residues and the tooltips (a tooltip
# may be None rather than a string)
for x in range(len(sequences)):
if sequence_tooltips[x]:
assert(len(str(sequences[x]).replace('-', '')) == len(sequence_tooltips[x]))
# Make sure that the sequence lengths are all the same size
sequence_lengths = list(map(len, sequences))
assert(len(set(sequence_lengths)) == 1)
self.sequence_length = sequence_lengths[0]
self.label_width = max(list(map(len, sequence_names)))
self.sequence_names = sequence_names
self.sequences = sequences
self.sequence_tooltips = sequence_tooltips
def to_lines(self, width = 80, reversed = False, line_separator = '\n'):
s = []
sequences, sequence_names = self.sequences, self.sequence_names
if reversed:
sequences, sequence_names = self.sequences[::-1], self.sequence_names[::-1]
if self.label_width + 2 < width:
headers = [sequence_name.ljust(self.label_width + 2) for sequence_name in sequence_names]
num_residues_per_line = width - self.label_width
sequence_strs = list(map(str, sequences))
for x in range(0, self.sequence_length, num_residues_per_line):
for y in range(len(sequence_strs)):
s.append('%s %s' % (headers[y], sequence_strs[y][x:x + num_residues_per_line]))
else:
raise Exception('The width (%d characters) is not large enough to display the sequence alignment.' % width)
return line_separator.join(s)
def to_html(self, width = 80, reversed = False, line_separator = '\n', header_separator = '_', add_tooltips = True, extra_tooltip_class = ''):
html = []
html.append('<div class="chain_alignment">')
sequences, sequence_names, sequence_tooltips = self.sequences, self.sequence_names, self.sequence_tooltips
num_sequences = len(sequences)
# Turn off tooltips if requested
if not(add_tooltips):
sequence_tooltips = [None] * num_sequences
residue_counters = [0] * num_sequences
if reversed:
sequences, sequence_names = self.sequences[::-1], self.sequence_names[::-1], self.sequence_tooltips[::-1]
if self.label_width + 2 < width:
# headers is a list of pairs split by header_separator. If header_separator is not specified then the
# second element will be an empty string
if header_separator:
headers = [sequence_name.split(header_separator) for sequence_name in sequence_names]
else:
headers = [[sequence_name, ''] for sequence_name in sequence_names]
num_residues_per_line = width - self.label_width
sequence_strs = list(map(str, sequences))
# x iterates over a chunk of the sequence alignment
for x in range(0, self.sequence_length, num_residues_per_line):
html.append('<div class="sequence_block">')
# Create a list, subsequence_list, where each entry corresponds to the chunk of the sequence alignment for each sequence
subsequence_list = []
residue_substrings = []
for y in range(num_sequences):
subsequence_list.append(self.sequences[y][x:x+num_residues_per_line])
residue_substrings.append([])
# check that the subsequences are the same length
subsequence_lengths = set(map(len, [rs for rs in subsequence_list]))
assert(len(subsequence_lengths) == 1)
subsequence_length = subsequence_lengths.pop()
# Iterate over all residues in the subsequences, marking up residues that differ
for z in range(subsequence_length):
residues = set([subsequence_list[y][z] for y in range(num_sequences) if subsequence_list[y][z] != '-'])
if len(residues) == 1:
# all residues are the same
for y in range(num_sequences):
tooltip = ''
tooltips = sequence_tooltips[y]
if subsequence_list[y][z] != '-':
residue_index = residue_counters[y]
if tooltips and tooltips[residue_index] != None:
tooltip = tooltips[residue_index]
residue_counters[y] += 1
residue_type = subsequence_list[y][z]
if tooltip:
residue_substrings[y].append('<span class="%s" title="%s %s">%s</span>' % (extra_tooltip_class, residue_type_1to3_map[residue_type], tooltip.strip(), residue_type))
elif tooltips:
residue_substrings[y].append('<span class="%s missing_ATOMs" title="No ATOM records">%s</span>' % (extra_tooltip_class, residue_type))
else:
residue_substrings[y].append(residue_type)
else:
# The residues differ - mark up the
for y in range(num_sequences):
tooltip = ''
tooltips = sequence_tooltips[y]
if subsequence_list[y][z] != '-':
residue_index = residue_counters[y]
if tooltips and tooltips[residue_index] != None:
tooltip = tooltips[residue_index]
residue_counters[y] += 1
if tooltip:
residue_type = subsequence_list[y][z]
residue_substrings[y].append('<span class="%s differing_residue" title="%s %s">%s</span>' % (extra_tooltip_class, residue_type_1to3_map[residue_type], tooltip.strip(), residue_type))
elif tooltips:
residue_substrings[y].append('<span class="%s differing_residue missing_ATOMs" title="No ATOM records">%s</span>' % (extra_tooltip_class, residue_type))
else:
residue_substrings[y].append('<span class="differing_residue">%s</span>' % (subsequence_list[y][z]))
for y in range(num_sequences):
html.append('<div class="sequence_alignment_line sequence_alignment_line_%s"><span>%s</span><span>%s</span><span>%s</span></div>' % (headers[y][0], headers[y][0], headers[y][1], ''.join(residue_substrings[y])))
html.append('</div>') # sequence_block
else:
raise Exception('The width (%d characters) is not large enough to display the sequence alignment.' % width)
html.append('</div>')
# Sanity check our tooltipping logic - ensure that the number of times we tried to assign a tooltip for a residue in a sequence matches the length of the sequence
assert(residue_counters == [len([c for c in str(seq).strip() if c != '-' ]) for seq in sequences])
return '\n'.join(html).replace(' class=""', '')
|
class MultipleSequenceAlignmentPrinter(object):
'''A class for generating formatted strings from a multiple sequence alignment. These strings should be the result
of an MSA i.e. they should all have the same length.
'''
def __init__(self, sequence_names, sequences, sequence_tooltips = None):
pass
def to_lines(self, width = 80, reversed = False, line_separator = '\n'):
pass
def to_html(self, width = 80, reversed = False, line_separator = '\n', header_separator = '_', add_tooltips = True, extra_tooltip_class = ''):
pass
| 4 | 1 | 46 | 8 | 33 | 5 | 10 | 0.19 | 1 | 6 | 0 | 0 | 3 | 5 | 3 | 3 | 145 | 29 | 100 | 37 | 96 | 19 | 92 | 37 | 88 | 20 | 1 | 7 | 29 |
143,655 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/alignment.py
|
klab.bio.alignment.PipelinePDBChainMapper
|
class PipelinePDBChainMapper(BasePDBChainMapper):
'''Similar to the removed PDBChainMapper class except this takes a list of PDB files which should be related in some way.
The matching is done pointwise, matching all PDBs in the list to each other.
This class is useful for a list of structures that are the result of a linear pipeline e.g. a scaffold structure (RCSB),
a model structure (Rosetta), and a design structure (experimental result).
The 'chain_mapping' member stores a mapping from a pair (pdb_name1, pdb_name2) to the mapping from chain IDs in pdb_name1 to
a MatchedChainList object. This object can be used to return the list of chain IDs in pdb_name2 related to the
respective chain in pdb_name1 based on sequence alignment. It can also be used to return the percentage identities
for this alignment. The old mapping and mapping_percentage_identity members of this class can be built from this member
e.g.
self.mapping[('Scaffold', 'ExpStructure')] == self.get_chain_mapping('Scaffold', 'ExpStructure')
The 'residue_id_mapping' member stores a mapping from a pair (pdb_name1, pdb_name2) to a mapping
'ATOM' -> chains_of_pdb_name_1 -> ATOM residues of that chain -> list of corresponding ATOM residues in the corresponding chains of pdb_name2
'SEQRES' -> chains_of_pdb_name_1 -> SEQRES residues of that chain -> pairs of (chain_id, corresponding SEQRES residue_id) in the corresponding chains of pdb_name2
For example, using the homodimer 3MW0 for both Scaffold and ExpStructure:
residue_id_mapping[('Scaffold', 'ExpStructure')]['ATOM']['A'] -> {'A 167 ': ['A 167 ', 'B 167 '], ...}
residue_id_mapping[('Scaffold', 'ExpStructure')]['SEQRES']['B'] -> {167 : [('A', 167), ('C', 167)], ...}
Objects of this class have a differing_atom_residue_ids mapping which maps the pair (pdb_name1, pdb_name2) to the list
of ATOM residues *in pdb_name1* that differ from those of pdb_name2. Note: there is some subtlety here in terms of
direction. For example, take this artificial example. We take a homodimer 3MWO as the scaffold and a monomer 1BN1
with identical sequence as the model. We mutate A110 in 1BN1. We then take 3MWO with a mutation on A106 as the design.
chain_mapper = ScaffoldModelDesignChainMapper.from_file_contents(retrieve_pdb('3MWO'), retrieve_pdb('1BN1').replace('ASP A 110', 'ASN A 110'), retrieve_pdb('3MWO').replace('GLU A 106', 'GLN A 106'))
differing_atom_residue_ids then looks like this:
('Model', 'ExpStructure') = ['A 106 ', 'A 110 '] # In Model, A110 is a mutation, reverted in ExpStructure. In ExpStructure, A106 is a mutation.
('Model', 'Scaffold') = ['A 110 '] # In Model, A110 is a mutation.
('ExpStructure', 'Model') = ['A 106 ', 'A 110 ', 'B 110 '] # In ExpStructure, A106 is a mutation. A110 and B110 are revertant mutations from the Model.
('ExpStructure', 'Scaffold') = ['A 106 '] # In ExpStructure, A106 is a mutation.
('Scaffold', 'ExpStructure') = ['A 106 ', 'B 106 '] # Note: In Scaffold, A106 is the wildtype which was mutated in ExpStructure. Since B106 also maps to A106, that is added to the list of differing residues.
('Scaffold', 'Model') = ['A 110 ', 'B 110 '] # In Scaffold, A110 and B110 are the wildtypes which was mutated in Model.
There is a subtlety here - the differing residue ids from Scaffold to ExpStructure are A106 and B106 corresponding to the
mutated A106 in the ExpStructure. However, the differing residue ids from ExpStructure to Scaffold has only one member - A106. This
makes sense as it is the only mutation however this may not be the desired behavior - one may wish instead to close
the list of residues over the relations mapping the residues between the structures i.e. to generate an equivalence
relation from the relation described by the mappings Scaffold->ExpStructure and ExpStructure->Scaffold. If that were done, then
('ExpStructure', 'Scaffold') would be ['A 106 ', 'B 106 '] as ExpStructure:A106 -> {Scaffold:A106, Scaffold:B106} and
Scaffold:B106 -> {ExpStructure:A106, ExpStructure:B106} so ExpStructure:A106 and ExpStructure:B106 are in the same equivalence class.
If use_seqres_sequences_if_possible is set, the alignment will use the SEQRES sequences when available. See match_pdb_chains
for more information.
'''
# Constructors
@staticmethod
def from_file_paths(pdb_paths, pdb_names, cut_off = 60.0, use_seqres_sequences_if_possible = True, strict = True):
assert(len(pdb_paths) == len(pdb_names) and len(pdb_paths) > 1)
pdbs = []
stage = None
try:
for x in range(len(pdb_paths)):
stage = pdb_names[x]
pdb_path = pdb_paths[x]
pdbs.append(PDB.from_filepath(pdb_path), strict = strict)
except (PDBParsingException, NonCanonicalResidueException, PDBValidationException) as e:
raise PDBParsingException("An error occurred while loading the %s structure: '%s'" % (stage, str(e)))
return PipelinePDBChainMapper(pdbs, pdb_names, cut_off = cut_off, use_seqres_sequences_if_possible = use_seqres_sequences_if_possible, strict = strict)
def __init__(self, pdbs, pdb_names, cut_off = 60.0, use_seqres_sequences_if_possible = True, strict = True):
assert(len(pdbs) == len(pdb_names) and len(pdbs) > 1)
assert(len(set(pdb_names)) == len(pdb_names)) # pdb_names must be a list of unique names
self.pdbs = pdbs
self.pdb_names = pdb_names
self.use_seqres_sequences_if_possible = use_seqres_sequences_if_possible
self.strict = strict
self.pdb_name_to_structure_mapping = {}
for x in range(len(pdb_names)):
self.pdb_name_to_structure_mapping[pdb_names[x]] = pdbs[x]
# differing_atom_residue_ids is a mapping from (pdb_name1, pdb_name2) to the list of ATOM residues *in pdb_name1* that differ from those of pdb_name2
self.differing_atom_residue_ids = {}
# chain_mapping is a mapping from (equivalence_class_id_1, equivalence_class_id_2) to the list of ATOM residues *in equivalence_class_id_1* that differ from those of equivalence_class_id_2
self.chain_mapping = {}
# Partition the set of PDBs over the ATOM coordinates i.e. if two PDBs have the same ATOM residues then they are
# part of the same equivalence class.
# equivalence_classes is a list of tuples [x, y] where:
# - x is map from chain ID to a Sequence object (using the ATOM sequence);
# - y is a subset of pdb_names;
# where given two distinct tuples [x_1, y_1] and [x_2, y_2], y_1 is mutually exclusive from y_2
# i.e. we have partitioned the set of PDB structures using an equivalence relation on the chain Sequences.
# todo: to agree with the old logic (which mapped all sequences and did not use an equivalence relation), we should consider structures to be equivalent
# if *both* their SEQRES and ATOM Sequences agree. However, requiring that their ATOM Sequences agree is generally strict enough as these sequences
# are more likely to vary (e.g. same sequence but missing coordinates)
equivalence_classes = []
sorted_objects = [(pdb_name, pdb_object) for pdb_name, pdb_object in sorted(self.pdb_name_to_structure_mapping.items())]
for pp in sorted_objects:
pdb_object = pp[1]
s = pdb_object.atom_sequences
found_class = None
p_sequences = pdb_object.atom_sequences
for tpl in equivalence_classes:
atom_sequence_set = tpl[0]
if atom_sequence_set == p_sequences:
tpl[1].append(pp[0])
found_class = True
if not found_class:
atom_sequence_set = pdb_object.atom_sequences
equivalence_classes.append([atom_sequence_set, [pp[0]]])
# partition_by_sequence is a map pdb_name -> Int where two pdb names in the same equivalence class map to the same integer (i.e. it is a partition)
# representative_pdbs is a map Int -> pdb_object mapping the equivalence classes (represented by an integer) to a representative PDB file
partition_by_sequence = {}
representative_pdbs = {}
for x in range(len(equivalence_classes)):
representative_pdbs[x] = self.pdb_name_to_structure_mapping[equivalence_classes[x][1][0]]
for pdb_name in equivalence_classes[x][1]:
partition_by_sequence[pdb_name] = x
self.partition_by_sequence = partition_by_sequence
self.representative_pdbs = representative_pdbs
# For each pair of equivalence classes, match each chain in the first representative pdb to its best match in the second representative pdb
# This section just creates the chain id->chain id mapping
representative_ids = sorted(representative_pdbs.keys())
for x in range(len(representative_ids) - 1):
for y in range(x + 1, len(representative_ids)):
representative_pdb_id_1 = representative_ids[x]
representative_pdb_id_2 = representative_ids[y]
rpdb_object_1 = representative_pdbs[representative_pdb_id_1]
rpdb_object_2 = representative_pdbs[representative_pdb_id_2]
mapping_key = (representative_pdb_id_1, representative_pdb_id_2)
reverse_mapping_key = (representative_pdb_id_2, representative_pdb_id_1)
self.chain_mapping[mapping_key] = {}
self.chain_mapping[reverse_mapping_key] = {}
# To allow for X cases, we allow the matcher to return multiple matches
# An artificial example X case would be 3MWO -> 1BN1 -> 3MWO where 3MWO_A and 3MWO_B both map to 1BN1_A
# In this case, we would like 1BN1_A to map to both 3MWO_A and 3MWO_B.
rpdb_name_1, rpdb_name_2 = 'EC{0}'.format(representative_pdb_id_1), 'EC{0}'.format(representative_pdb_id_2) # EC = "equivalence class"
chain_matches = match_pdb_chains(rpdb_object_1, rpdb_name_1, rpdb_object_2, rpdb_name_2, cut_off = cut_off, allow_multiple_matches = True, multiple_match_error_margin = 3.0, use_seqres_sequences_if_possible = self.use_seqres_sequences_if_possible)
reverse_mapping = {}
for rpdb1_chain_id, list_of_matches in chain_matches.items():
if list_of_matches:
mcl = MatchedChainList(rpdb_name_1, rpdb1_chain_id)
for l in list_of_matches:
mcl.add_chain(rpdb_name_2, l[0], l[1])
reverse_mapping[l[0]] = reverse_mapping.get(l[0], [])
reverse_mapping[l[0]].append((rpdb1_chain_id, l[1])) # reverse_mapping: chain in pdb2 -> list(tpl(chain in pdb1, %match)
self.chain_mapping[mapping_key][rpdb1_chain_id] = mcl
# Add the reverse mapping. For residues, we would want to realign the sequences in case the second sequence
# had an inserted residue which does not exist in the first sequences i.e. the relation is not symmetric.
# However, we treat the chain mapping as symmetric w.r.t. sequence identity (this saves computation as
# we do not realign the sequences).
for rpdb2_chain_id, list_of_matches in reverse_mapping.items():
mcl = MatchedChainList(rpdb_name_2, rpdb2_chain_id)
for l in list_of_matches:
mcl.add_chain(rpdb_name_1, l[0], l[1])
self.chain_mapping[reverse_mapping_key][rpdb2_chain_id] = mcl
self.residue_id_mapping = {}
# Create the residue ID -> residue ID mapping based on the chain mapping
self._map_residues()
# Private functions
def _map_residues(self):
'''For each pair of equivalence classes, match the residues of a chain in the first class to the residues of appropriate chains in the second class.
Note: we do a lot of repeated work here. Some of the lookups e.g. atom_sequences/seqres_sequences here could be cached.'''
pdbs = self.pdbs
pdb_names = self.pdb_names
partition_by_sequence = self.partition_by_sequence
representative_pdbs = self.representative_pdbs
representative_ids = sorted(representative_pdbs.keys())
# Map the SEQRES sequences to the ATOM sequences
# Note: The correct way to do this for RCSB files would be to use the SIFTS information like the ResidueRelatrix
# does. However, we have to consider the case where users upload PDB files which have not yet been deposited in
# the PDB so we have to resort to automatic sequence alignments. Ideally, we would store these alignments in a
# database and then do a lookup at this point. This would not only speed up the computation here but also allow
# us to manually fix misalignments (which will probably only occur due to gaps rather than mismatches).
seqres_to_atom_maps = {}
atom_to_seqres_maps = {}
for x in range(len(representative_ids)):
representative_id = representative_ids[x]
pdb_object = representative_pdbs[representative_id]
seqres_to_atom_map, atom_to_seqres_map = pdb_object.construct_seqres_to_atom_residue_map()
# todo: I tested the remainder of this class on PDBs with no SEQRES records so any code related to these maps is untested
# when these assertions fail, remove them and fix the code below accordingly
seqres_to_atom_maps[representative_id] = seqres_to_atom_map
atom_to_seqres_maps[representative_id] = atom_to_seqres_map
# Iterate over all pairs of representative PDBs and determine the residue mapping and sets of differing ATOM residues
# self.residue_id_mapping maps tuples of representative ids e.g. (0, 1) to residue_id_mapping where
# residue_id_mapping is a mapping: 'ATOM' -> chain_1_id -> residue_1_id -> tuple(chain_2_id, residue_2_id)
# where chain_x_id and residue_x_id are associated to representative_id_x
# self.differing_atom_residue_ids maps tuples of representative ids e.g. (0, 1) to PDB residues IDs which differ between
# the two representatives
for x in range(len(representative_ids) - 1):
for y in range(x + 1, len(representative_ids)):
representative_pdb_id_1 = representative_ids[x]
representative_pdb_id_2 = representative_ids[y]
rpdb_object_1 = representative_pdbs[representative_pdb_id_1]
rpdb_object_2 = representative_pdbs[representative_pdb_id_2]
mapping_key = (representative_pdb_id_1, representative_pdb_id_2)
reverse_mapping_key = mapping_key[::-1]
residue_id_mapping = {'ATOM' : {}, 'SEQRES' : {}} # todo: add the other types of mapping here e.g. FASTA and Rosetta
pdb1_differing_atom_residue_ids = []
pdb2_differing_atom_residue_ids = []
for pdb1_chain, pdb2_chains in self.get_representative_chain_mapping(mapping_key[0], mapping_key[1]).items():
# e.g. pdb1_chain = 'A', pdb2_chains = ['A', 'E']
residue_id_mapping['ATOM'][pdb1_chain] = {}
residue_id_mapping['SEQRES'][pdb1_chain] = {}
# Use the SEQRES or ATOM sequence appropriately
pdb1_chain_sequence_type, pdb1_chain_sequence = rpdb_object_1.get_annotated_chain_sequence_string(pdb1_chain, self.use_seqres_sequences_if_possible)
for pdb2_chain in pdb2_chains:
# Get the mapping between the sequences
# Note: sequences and mappings are 1-based following the UniProt convention
# The mapping returned from sa.get_residue_mapping is an abstract mapping between *sequences of characters*
# and knows nothing about residue identifiers e.g. ATOM residue IDs or whether the sequences are
# SEQRES or ATOM sequences
sa = SequenceAligner()
pdb2_chain_sequence_type, pdb2_chain_sequence = rpdb_object_2.get_annotated_chain_sequence_string(pdb2_chain, self.use_seqres_sequences_if_possible)
sa.add_sequence('%s_%s' % (representative_pdb_id_1, pdb1_chain), str(pdb1_chain_sequence))
sa.add_sequence('%s_%s' % (representative_pdb_id_2, pdb2_chain), str(pdb2_chain_sequence))
mapping, match_mapping = sa.get_residue_mapping()
# Since the mapping is only between sequences and we wish to use the original residue identifiers of
# the sequence e.g. the PDB/ATOM residue ID, we look this information up in the order mapping of the
# Sequence objects
for pdb1_residue_index, pdb2_residue_index in mapping.items():
pdb1_residue_id = pdb1_chain_sequence.order[pdb1_residue_index - 1] # order is a 0-based list
pdb2_residue_id = pdb2_chain_sequence.order[pdb2_residue_index - 1] # order is a 0-based list
pdb1_atom_residue_id, pdb2_atom_residue_id = None, None
if pdb1_chain_sequence_type == 'SEQRES' and pdb2_chain_sequence_type == 'SEQRES':
residue_id_mapping['SEQRES'][pdb1_chain][pdb1_residue_id] = residue_id_mapping['SEQRES'][pdb1_chain].get(pdb1_residue_id, [])
residue_id_mapping['SEQRES'][pdb1_chain][pdb1_residue_id].append((pdb2_chain, pdb2_residue_id))
pdb1_atom_residue_id = seqres_to_atom_maps.get(representative_pdb_id_1, {}).get(pdb1_chain, {}).get(pdb1_residue_id)
pdb2_atom_residue_id = seqres_to_atom_maps.get(representative_pdb_id_2, {}).get(pdb2_chain, {}).get(pdb2_residue_id)
if pdb1_atom_residue_id != None and pdb2_atom_residue_id != None:
residue_id_mapping['ATOM'][pdb1_chain][pdb1_atom_residue_id] = residue_id_mapping['ATOM'][pdb1_chain].get(pdb1_atom_residue_id, [])
residue_id_mapping['ATOM'][pdb1_chain][pdb1_atom_residue_id].append(pdb2_atom_residue_id)
elif pdb1_chain_sequence_type == 'SEQRES' and pdb2_chain_sequence_type == 'ATOM':
pdb1_atom_residue_id = seqres_to_atom_maps.get(representative_pdb_id_1, {}).get(pdb1_chain, {}).get(pdb1_residue_id)
if pdb1_atom_residue_id != None:
residue_id_mapping['ATOM'][pdb1_chain][pdb1_atom_residue_id] = residue_id_mapping['ATOM'][pdb1_chain].get(pdb1_atom_residue_id, [])
residue_id_mapping['ATOM'][pdb1_chain][pdb1_atom_residue_id].append(pdb2_residue_id)
elif pdb1_chain_sequence_type == 'ATOM' and pdb2_chain_sequence_type == 'SEQRES':
pdb2_atom_residue_id = seqres_to_atom_maps.get(representative_pdb_id_2, {}).get(pdb2_chain, {}).get(pdb2_residue_id)
if pdb2_atom_residue_id != None:
residue_id_mapping['ATOM'][pdb1_chain][pdb1_residue_id] = residue_id_mapping['ATOM'][pdb1_chain].get(pdb1_residue_id, [])
residue_id_mapping['ATOM'][pdb1_chain][pdb1_residue_id].append(pdb2_atom_residue_id)
elif pdb1_chain_sequence_type == 'ATOM' and pdb2_chain_sequence_type == 'ATOM':
residue_id_mapping['ATOM'][pdb1_chain][pdb1_residue_id] = residue_id_mapping['ATOM'][pdb1_chain].get(pdb1_residue_id, [])
residue_id_mapping['ATOM'][pdb1_chain][pdb1_residue_id].append(pdb2_residue_id)
else:
raise Exception('An exception occurred.') # this should not happen
# We store a *list* of corresponding residues i.e. if pdb1_chain matches pdb2_chain_1 and pdb2_chain_2
# then we may map a residue in pdb1_chain to a residue in each of those chains
#residue_id_mapping[pdb1_chain][pdb1_residue_id] = residue_id_mapping[pdb1_chain].get(pdb1_residue_id, [])
#residue_id_mapping[pdb1_chain][pdb1_residue_id].append(pdb2_residue_id)
# Determine which residues of each sequence differ between the sequences
# We ignore leading and trailing residues from both sequences
pdb1_residue_indices = list(mapping.keys())
pdb2_residue_indices = list(mapping.values())
differing_pdb1_indices = []
differing_pdb2_indices = []
for pdb1_residue_index, match_details in match_mapping.items():
if match_details.clustal == 0 or match_details.clustal == -1 or match_details.clustal == -2:
# The residues differed
differing_pdb1_indices.append(pdb1_residue_index)
differing_pdb2_indices.append(mapping[pdb1_residue_index])
# Convert the different sequence indices into PDB ATOM residue IDs. Sometimes there may not be a
# mapping from SEQRES residues to the ATOM residues e.g. missing density
for idx in differing_pdb1_indices:
if pdb1_chain_sequence_type == 'SEQRES':
pdb1_seqres_residue_id = pdb1_chain_sequence.order[idx - 1]
pdb1_atom_residue_id = seqres_to_atom_maps.get(representative_pdb_id_1, {}).get(pdb1_chain, {}).get(pdb1_seqres_residue_id)
if pdb1_atom_residue_id != None:
pdb1_differing_atom_residue_ids.append(pdb1_atom_residue_id)
elif pdb1_chain_sequence_type == 'ATOM':
pdb1_differing_atom_residue_ids.append(pdb1_chain_sequence.order[idx - 1])
for idx in differing_pdb2_indices:
if pdb2_chain_sequence_type == 'SEQRES':
pdb2_seqres_residue_id = pdb2_chain_sequence.order[idx - 1]
pdb2_atom_residue_id = seqres_to_atom_maps.get(representative_pdb_id_2, {}).get(pdb2_chain, {}).get(pdb2_seqres_residue_id)
if pdb2_atom_residue_id != None:
pdb2_differing_atom_residue_ids.append(pdb2_atom_residue_id)
elif pdb2_chain_sequence_type == 'ATOM':
pdb2_differing_atom_residue_ids.append(pdb2_chain_sequence.order[idx - 1])
self.residue_id_mapping[mapping_key] = residue_id_mapping
self.differing_atom_residue_ids[mapping_key] = pdb1_differing_atom_residue_ids
self.differing_atom_residue_ids[reverse_mapping_key] = pdb2_differing_atom_residue_ids
for k, v in sorted(self.differing_atom_residue_ids.items()):
self.differing_atom_residue_ids[k] = sorted(set(v)) # the list of residues may not be unique in general so we make it unique here
self.seqres_to_atom_maps = seqres_to_atom_maps
self.atom_to_seqres_maps = atom_to_seqres_maps
# Public functions
def get_representative_chain_mapping(self, representative_id_1, representative_id_2):
'''This replaces the old mapping member by constructing it from self.chain_mapping. This function returns a mapping from
chain IDs in pdb_name1 to chain IDs in pdb_name2.'''
d = {}
for pdb1_chain_id, matched_chain_list in self.chain_mapping[(representative_id_1, representative_id_2)].items():
d[pdb1_chain_id] = matched_chain_list.get_related_chains_ids('EC{0}'.format(representative_id_2))
return d
def get_chain_mapping(self, pdb_name1, pdb_name2):
'''This replaces the old mapping member by constructing it from self.chain_mapping. This function returns a mapping from
chain IDs in pdb_name1 to chain IDs in pdb_name2.'''
raise Exception('Implement. Map pdb_namex to its equivalence class, call get_representative_chain_mapping, and something something.')
pprint.pprint(self.chain_mapping)
d = {}
for pdb1_chain_id, matched_chain_list in self.chain_mapping[(pdb_name1, pdb_name2)].items():
d[pdb1_chain_id] = matched_chain_list.get_related_chains_ids(pdb_name2)
return d
def get_differing_atom_residue_ids(self, pdb_name, pdb_list = []):
'''Returns a list of residues in pdb_name which differ from the pdbs corresponding to the names in pdb_list.'''
# partition_by_sequence is a map pdb_name -> Int where two pdb names in the same equivalence class map to the same integer (i.e. it is a partition)
# representative_pdbs is a map Int -> pdb_object mapping the equivalence classes (represented by an integer) to a representative PDB file
# self.pdb_name_to_structure_mapping : pdb_name -> pdb_object
# Sanity checks
assert(pdb_name in self.pdb_names)
assert(set(pdb_list).intersection(set(self.pdb_names)) == set(pdb_list)) # the names in pdb_list must be in pdb_names
# 1. Get the representative structure for pdb_name
representative_pdb_id = self.partition_by_sequence[pdb_name]
representative_pdb = self.representative_pdbs[representative_pdb_id]
# 2. Get the other representative structures as dictated by pdb_list
other_representative_pdbs = set()
other_representative_pdb_ids = set()
if not pdb_list:
pdb_list = self.pdb_names
for opdb_name in pdb_list:
orepresentative_pdb_id = self.partition_by_sequence[opdb_name]
other_representative_pdb_ids.add(orepresentative_pdb_id)
other_representative_pdbs.add(self.representative_pdbs[orepresentative_pdb_id])
other_representative_pdbs.discard(representative_pdb)
other_representative_pdb_ids.discard(representative_pdb_id)
# Early out if pdb_list was empty (or all pdbs were in the same equivalence class)
if not other_representative_pdbs:
return []
# 3. Return all residues of pdb_name's representative which differ from all the other representatives
differing_atom_residue_ids = set()
for other_representative_pdb_id in other_representative_pdb_ids:
differing_atom_residue_ids = differing_atom_residue_ids.union(set(self.differing_atom_residue_ids[(representative_pdb_id, other_representative_pdb_id)]))
return sorted(differing_atom_residue_ids)
def get_sequence_alignment_printer_objects(self, pdb_list = [], reversed = True, width = 80, line_separator = '\n'):
'''Takes a list, pdb_list, of pdb names e.g. ['Model', 'Scaffold', ...] with which the object was created.
Using the first element of this list as a base, get the sequence alignments with chains in other members
of the list. For simplicity, if a chain in the first PDB matches multiple chains in another PDB, we only
return the alignment for one of the chains. If pdb_list is empty then the function defaults to the object's
members.
Returns a list of tuples (chain_id, sequence_alignment_printer_object). Each sequence_alignment_printer_object
can be used to generate a printable version of the sequence alignment. '''
raise Exception('Re-implement using the equivalence classes.')
if not pdb_list:
pdb_list = self.pdb_names
assert(len(set(pdb_list)) == len(pdb_list) and (len(pdb_list) > 1))
assert(sorted(set(pdb_list).intersection(set(self.pdb_names))) == sorted(set(pdb_list)))
primary_pdb = self.pdb_name_to_structure_mapping[pdb_list[0]]
primary_pdb_name = pdb_list[0]
primary_pdb_chains = sorted(primary_pdb.chain_atoms.keys())
sequence_alignment_printer_objects = []
for primary_pdb_chain in primary_pdb_chains:
sa = SequenceAligner()
# Add the primary PDB's sequence for the chain
primary_pdb_sequence_type, primary_pdb_sequence = primary_pdb.get_annotated_chain_sequence_string(primary_pdb_chain, self.use_seqres_sequences_if_possible)
sa.add_sequence('%s_%s' % (primary_pdb_name, primary_pdb_chain), str(primary_pdb_sequence))
other_chain_types_and_sequences = {}
for other_pdb_name in pdb_list[1:]:
other_pdb = self.pdb_name_to_structure_mapping[other_pdb_name]
other_chains = self.get_chain_mapping(primary_pdb_name, other_pdb_name).get(primary_pdb_chain)
#other_chain = self.mapping[(primary_pdb_name, other_pdb_name)].get(primary_pdb_chain)
if other_chains:
other_chain = sorted(other_chains)[0]
other_pdb_sequence_type, other_pdb_sequence = other_pdb.get_annotated_chain_sequence_string(other_chain, self.use_seqres_sequences_if_possible)
other_chain_types_and_sequences[other_pdb_name] = (other_pdb_sequence_type, other_pdb_sequence)
sa.add_sequence('%s_%s' % (other_pdb_name, other_chain), str(other_pdb_sequence))
if len(sa.records) > 1:
# If there are no corresponding sequences in any other PDB, do not return the non-alignment
sa.align()
#pdb1_alignment_str = sa._get_alignment_lines()['%s:%s' % (primary_pdb_name, pdb1_chain)]
#pdb2_alignment_str = sa._get_alignment_lines()['%s:%s' % (pdb2_name, pdb2_chain)]
sequence_names, sequences, sequence_tooltips = [], [], []
sequence_names.append('%s_%s' % (primary_pdb_name, primary_pdb_chain))
primary_pdb_alignment_lines = sa._get_alignment_lines()['%s_%s' % (primary_pdb_name, primary_pdb_chain)]
sequences.append(primary_pdb_alignment_lines)
sequence_tooltips.append(self.get_sequence_tooltips(primary_pdb, primary_pdb_sequence, primary_pdb_sequence_type, primary_pdb_name, primary_pdb_chain, primary_pdb_alignment_lines))
for other_pdb_name in pdb_list[1:]:
#other_chain = self.mapping[(primary_pdb_name, other_pdb_name)].get(primary_pdb_chain)
other_pdb = self.pdb_name_to_structure_mapping[other_pdb_name]
other_chains = self.get_chain_mapping(primary_pdb_name, other_pdb_name).get(primary_pdb_chain)
if other_chains:
other_chain = sorted(other_chains)[0]
sequence_names.append('%s_%s' % (other_pdb_name, other_chain))
other_pdb_alignment_lines = sa._get_alignment_lines()['%s_%s' % (other_pdb_name, other_chain)]
sequences.append(other_pdb_alignment_lines)
other_pdb_sequence_type, other_pdb_sequence = other_chain_types_and_sequences[other_pdb_name]
sequence_tooltips.append(self.get_sequence_tooltips(other_pdb, other_pdb_sequence, other_pdb_sequence_type, other_pdb_name, other_chain, other_pdb_alignment_lines))
sap = MultipleSequenceAlignmentPrinter(sequence_names, sequences, sequence_tooltips)
sequence_alignment_printer_objects.append((primary_pdb_chain, sap))
return sequence_alignment_printer_objects
def get_sequence_tooltips(self, pdb_object, pdb_sequence, pdb_sequence_type, pdb_name, pdb_chain, pdb_alignment_lines):
'''pdb_sequence is a Sequence object. pdb_sequence_type is a type returned by PDB.get_annotated_chain_sequence_string,
pdb_name is the name of the PDB used throughout this object e.g. 'Scaffold', pdb_chain is the chain of interest,
pdb_alignment_lines are the lines returned by SequenceAligner._get_alignment_lines.
This function returns a set of tooltips corresponding to the residues in the sequence. The tooltips are the ATOM
residue IDs. These tooltips can be used to generate useful (and/or interactive using JavaScript) sequence alignments
in HTML.
'''
raise Exception('Re-implement using the equivalence classes.')
tooltips = None
atom_sequence = pdb_object.atom_sequences.get(pdb_chain)
try:
if pdb_sequence_type == 'SEQRES':
seqres_to_atom_map = self.seqres_to_atom_maps.get(pdb_name, {}).get(pdb_chain, {})
tooltips = []
if seqres_to_atom_map:
idx = 1
for aligned_residue in pdb_alignment_lines.strip():
if aligned_residue != '-':
atom_residue = seqres_to_atom_map.get(idx)
if atom_residue:
# This is a sanity check to make sure that the tooltips are mapping the correct residues types to
# the correct residues types
assert(aligned_residue == atom_sequence.sequence[atom_residue].ResidueAA)
tooltips.append(atom_residue)
idx += 1
assert(len(tooltips) == len(str(pdb_sequence)))
elif pdb_sequence_type == 'ATOM':
tooltips = []
idx = 0
for aligned_residue in pdb_alignment_lines.strip():
if aligned_residue != '-':
# This is a sanity check to make sure that the tooltips are mapping the correct residues types to
# the correct residues types
assert(aligned_residue == pdb_sequence.sequence[pdb_sequence.order[idx]].ResidueAA)
tooltips.append(pdb_sequence.order[idx])
idx += 1
assert(len(tooltips) == len(str(pdb_sequence)))
except:
raise Exception('An error occurred during HTML tooltip creation for the multiple sequence alignment.')
return tooltips
def get_sequence_alignment_strings(self, pdb_list = [], reversed = True, width = 80, line_separator = '\n'):
'''Takes a list, pdb_list, of pdb names e.g. ['Model', 'Scaffold', ...] with which the object was created.
Using the first element of this list as a base, get the sequence alignments with chains in other members
of the list. For simplicity, if a chain in the first PDB matches multiple chains in another PDB, we only
return the alignment for one of the chains. If pdb_list is empty then the function defaults to the object's
members.
Returns one sequence alignment string for each chain mapping. Each line is a concatenation of lines of the
specified width, separated by the specified line separator.'''
raise Exception('Re-implement using the equivalence classes.')
sequence_alignment_printer_tuples = self.get_sequence_alignment_printer_objects(pdb_list = pdb_list, reversed = reversed, width = width, line_separator = line_separator)
alignment_strings = []
for sequence_alignment_printer_tuple in sequence_alignment_printer_tuples:
primary_pdb_chain = sequence_alignment_printer_tuple[0]
sap = sequence_alignment_printer_tuple[1]
alignment_strings.append(sap.to_lines(reversed = reversed, width = width, line_separator = line_separator))
return alignment_strings
def get_sequence_alignment_strings_as_html(self, pdb_list = [], reversed = False, width = 80, line_separator = '\n', extra_tooltip_class = ''):
'''Takes a list, pdb_list, of pdb names e.g. ['Model', 'Scaffold', ...] with which the object was created.
Using the first element of this list as a base, get the sequence alignments with chains in other members
of the list. For simplicity, if a chain in the first PDB matches multiple chains in another PDB, we only
return the alignment for one of the chains. If pdb_list is empty then the function defaults to the object's
members.
Returns HTML for the sequence alignments and an empty string if no alignments were made.'''
raise Exception('Re-implement using the equivalence classes.')
sequence_alignment_printer_tuples = self.get_sequence_alignment_printer_objects(pdb_list = pdb_list, reversed = reversed, width = width, line_separator = line_separator)
if not sequence_alignment_printer_tuples:
return ''
html = []
for sequence_alignment_printer_tuple in sequence_alignment_printer_tuples:
primary_pdb_chain = sequence_alignment_printer_tuple[0]
sap = sequence_alignment_printer_tuple[1]
html.append(sap.to_html(reversed = reversed, width = width, line_separator = line_separator, extra_tooltip_class = extra_tooltip_class))
return '\n'.join(html)
|
class PipelinePDBChainMapper(BasePDBChainMapper):
'''Similar to the removed PDBChainMapper class except this takes a list of PDB files which should be related in some way.
The matching is done pointwise, matching all PDBs in the list to each other.
This class is useful for a list of structures that are the result of a linear pipeline e.g. a scaffold structure (RCSB),
a model structure (Rosetta), and a design structure (experimental result).
The 'chain_mapping' member stores a mapping from a pair (pdb_name1, pdb_name2) to the mapping from chain IDs in pdb_name1 to
a MatchedChainList object. This object can be used to return the list of chain IDs in pdb_name2 related to the
respective chain in pdb_name1 based on sequence alignment. It can also be used to return the percentage identities
for this alignment. The old mapping and mapping_percentage_identity members of this class can be built from this member
e.g.
self.mapping[('Scaffold', 'ExpStructure')] == self.get_chain_mapping('Scaffold', 'ExpStructure')
The 'residue_id_mapping' member stores a mapping from a pair (pdb_name1, pdb_name2) to a mapping
'ATOM' -> chains_of_pdb_name_1 -> ATOM residues of that chain -> list of corresponding ATOM residues in the corresponding chains of pdb_name2
'SEQRES' -> chains_of_pdb_name_1 -> SEQRES residues of that chain -> pairs of (chain_id, corresponding SEQRES residue_id) in the corresponding chains of pdb_name2
For example, using the homodimer 3MW0 for both Scaffold and ExpStructure:
residue_id_mapping[('Scaffold', 'ExpStructure')]['ATOM']['A'] -> {'A 167 ': ['A 167 ', 'B 167 '], ...}
residue_id_mapping[('Scaffold', 'ExpStructure')]['SEQRES']['B'] -> {167 : [('A', 167), ('C', 167)], ...}
Objects of this class have a differing_atom_residue_ids mapping which maps the pair (pdb_name1, pdb_name2) to the list
of ATOM residues *in pdb_name1* that differ from those of pdb_name2. Note: there is some subtlety here in terms of
direction. For example, take this artificial example. We take a homodimer 3MWO as the scaffold and a monomer 1BN1
with identical sequence as the model. We mutate A110 in 1BN1. We then take 3MWO with a mutation on A106 as the design.
chain_mapper = ScaffoldModelDesignChainMapper.from_file_contents(retrieve_pdb('3MWO'), retrieve_pdb('1BN1').replace('ASP A 110', 'ASN A 110'), retrieve_pdb('3MWO').replace('GLU A 106', 'GLN A 106'))
differing_atom_residue_ids then looks like this:
('Model', 'ExpStructure') = ['A 106 ', 'A 110 '] # In Model, A110 is a mutation, reverted in ExpStructure. In ExpStructure, A106 is a mutation.
('Model', 'Scaffold') = ['A 110 '] # In Model, A110 is a mutation.
('ExpStructure', 'Model') = ['A 106 ', 'A 110 ', 'B 110 '] # In ExpStructure, A106 is a mutation. A110 and B110 are revertant mutations from the Model.
('ExpStructure', 'Scaffold') = ['A 106 '] # In ExpStructure, A106 is a mutation.
('Scaffold', 'ExpStructure') = ['A 106 ', 'B 106 '] # Note: In Scaffold, A106 is the wildtype which was mutated in ExpStructure. Since B106 also maps to A106, that is added to the list of differing residues.
('Scaffold', 'Model') = ['A 110 ', 'B 110 '] # In Scaffold, A110 and B110 are the wildtypes which was mutated in Model.
There is a subtlety here - the differing residue ids from Scaffold to ExpStructure are A106 and B106 corresponding to the
mutated A106 in the ExpStructure. However, the differing residue ids from ExpStructure to Scaffold has only one member - A106. This
makes sense as it is the only mutation however this may not be the desired behavior - one may wish instead to close
the list of residues over the relations mapping the residues between the structures i.e. to generate an equivalence
relation from the relation described by the mappings Scaffold->ExpStructure and ExpStructure->Scaffold. If that were done, then
('ExpStructure', 'Scaffold') would be ['A 106 ', 'B 106 '] as ExpStructure:A106 -> {Scaffold:A106, Scaffold:B106} and
Scaffold:B106 -> {ExpStructure:A106, ExpStructure:B106} so ExpStructure:A106 and ExpStructure:B106 are in the same equivalence class.
If use_seqres_sequences_if_possible is set, the alignment will use the SEQRES sequences when available. See match_pdb_chains
for more information.
'''
@staticmethod
def from_file_paths(pdb_paths, pdb_names, cut_off = 60.0, use_seqres_sequences_if_possible = True, strict = True):
pass
def __init__(self, pdbs, pdb_names, cut_off = 60.0, use_seqres_sequences_if_possible = True, strict = True):
pass
def _map_residues(self):
'''For each pair of equivalence classes, match the residues of a chain in the first class to the residues of appropriate chains in the second class.
Note: we do a lot of repeated work here. Some of the lookups e.g. atom_sequences/seqres_sequences here could be cached.'''
pass
def get_representative_chain_mapping(self, representative_id_1, representative_id_2):
'''This replaces the old mapping member by constructing it from self.chain_mapping. This function returns a mapping from
chain IDs in pdb_name1 to chain IDs in pdb_name2.'''
pass
def get_chain_mapping(self, pdb_name1, pdb_name2):
'''This replaces the old mapping member by constructing it from self.chain_mapping. This function returns a mapping from
chain IDs in pdb_name1 to chain IDs in pdb_name2.'''
pass
def get_differing_atom_residue_ids(self, pdb_name, pdb_list = []):
'''Returns a list of residues in pdb_name which differ from the pdbs corresponding to the names in pdb_list.'''
pass
def get_sequence_alignment_printer_objects(self, pdb_list = [], reversed = True, width = 80, line_separator = '\n'):
'''Takes a list, pdb_list, of pdb names e.g. ['Model', 'Scaffold', ...] with which the object was created.
Using the first element of this list as a base, get the sequence alignments with chains in other members
of the list. For simplicity, if a chain in the first PDB matches multiple chains in another PDB, we only
return the alignment for one of the chains. If pdb_list is empty then the function defaults to the object's
members.
Returns a list of tuples (chain_id, sequence_alignment_printer_object). Each sequence_alignment_printer_object
can be used to generate a printable version of the sequence alignment. '''
pass
def get_sequence_tooltips(self, pdb_object, pdb_sequence, pdb_sequence_type, pdb_name, pdb_chain, pdb_alignment_lines):
'''pdb_sequence is a Sequence object. pdb_sequence_type is a type returned by PDB.get_annotated_chain_sequence_string,
pdb_name is the name of the PDB used throughout this object e.g. 'Scaffold', pdb_chain is the chain of interest,
pdb_alignment_lines are the lines returned by SequenceAligner._get_alignment_lines.
This function returns a set of tooltips corresponding to the residues in the sequence. The tooltips are the ATOM
residue IDs. These tooltips can be used to generate useful (and/or interactive using JavaScript) sequence alignments
in HTML.
'''
pass
def get_sequence_alignment_strings(self, pdb_list = [], reversed = True, width = 80, line_separator = '\n'):
'''Takes a list, pdb_list, of pdb names e.g. ['Model', 'Scaffold', ...] with which the object was created.
Using the first element of this list as a base, get the sequence alignments with chains in other members
of the list. For simplicity, if a chain in the first PDB matches multiple chains in another PDB, we only
return the alignment for one of the chains. If pdb_list is empty then the function defaults to the object's
members.
Returns one sequence alignment string for each chain mapping. Each line is a concatenation of lines of the
specified width, separated by the specified line separator.'''
pass
def get_sequence_alignment_strings_as_html(self, pdb_list = [], reversed = False, width = 80, line_separator = '\n', extra_tooltip_class = ''):
'''Takes a list, pdb_list, of pdb names e.g. ['Model', 'Scaffold', ...] with which the object was created.
Using the first element of this list as a base, get the sequence alignments with chains in other members
of the list. For simplicity, if a chain in the first PDB matches multiple chains in another PDB, we only
return the alignment for one of the chains. If pdb_list is empty then the function defaults to the object's
members.
Returns HTML for the sequence alignments and an empty string if no alignments were made.'''
pass
| 12 | 9 | 49 | 8 | 30 | 12 | 8 | 0.54 | 1 | 12 | 7 | 3 | 9 | 12 | 10 | 12 | 561 | 114 | 297 | 140 | 285 | 159 | 289 | 138 | 278 | 25 | 2 | 7 | 75 |
143,656 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/alignment.py
|
klab.bio.alignment.PipelinePDBChainMapper_old
|
class PipelinePDBChainMapper_old(BasePDBChainMapper):
'''Similar to the removed PDBChainMapper class except this takes a list of PDB files which should be related in some way.
The matching is done pointwise, matching all PDBs in the list to each other.
This class is useful for a list of structures that are the result of a linear pipeline e.g. a scaffold structure (RCSB),
a model structure (Rosetta), and a design structure (experimental result).
The 'chain_mapping' member stores a mapping from a pair (pdb_name1, pdb_name2) to the mapping from chain IDs in pdb_name1 to
a MatchedChainList object. This object can be used to return the list of chain IDs in pdb_name2 related to the
respective chain in pdb_name1 based on sequence alignment. It can also be used to return the percentage identities
for this alignment. The old mapping and mapping_percentage_identity members of this class can be built from this member
e.g.
self.mapping[('Scaffold', 'ExpStructure')] == self.get_chain_mapping('Scaffold', 'ExpStructure')
The 'residue_id_mapping' member stores a mapping from a pair (pdb_name1, pdb_name2) to a mapping
'ATOM' -> chains_of_pdb_name_1 -> ATOM residues of that chain -> list of corresponding ATOM residues in the corresponding chains of pdb_name2
'SEQRES' -> chains_of_pdb_name_1 -> SEQRES residues of that chain -> pairs of (chain_id, corresponding SEQRES residue_id) in the corresponding chains of pdb_name2
For example, using the homodimer 3MW0 for both Scaffold and ExpStructure:
residue_id_mapping[('Scaffold', 'ExpStructure')]['ATOM']['A'] -> {'A 167 ': ['A 167 ', 'B 167 '], ...}
residue_id_mapping[('Scaffold', 'ExpStructure')]['SEQRES']['B'] -> {167 : [('A', 167), ('C', 167)], ...}
Objects of this class have a differing_atom_residue_ids mapping which maps the pair (pdb_name1, pdb_name2) to the list
of ATOM residues *in pdb_name1* that differ from those of pdb_name2. Note: there is some subtlety here in terms of
direction. For example, take this artificial example. We take a homodimer 3MWO as the scaffold and a monomer 1BN1
with identical sequence as the model. We mutate A110 in 1BN1. We then take 3MWO with a mutation on A106 as the design.
chain_mapper = ScaffoldModelDesignChainMapper.from_file_contents(retrieve_pdb('3MWO'), retrieve_pdb('1BN1').replace('ASP A 110', 'ASN A 110'), retrieve_pdb('3MWO').replace('GLU A 106', 'GLN A 106'))
differing_atom_residue_ids then looks like this:
('Model', 'ExpStructure') = ['A 106 ', 'A 110 '] # In Model, A110 is a mutation, reverted in ExpStructure. In ExpStructure, A106 is a mutation.
('Model', 'Scaffold') = ['A 110 '] # In Model, A110 is a mutation.
('ExpStructure', 'Model') = ['A 106 ', 'A 110 ', 'B 110 '] # In ExpStructure, A106 is a mutation. A110 and B110 are revertant mutations from the Model.
('ExpStructure', 'Scaffold') = ['A 106 '] # In ExpStructure, A106 is a mutation.
('Scaffold', 'ExpStructure') = ['A 106 ', 'B 106 '] # Note: In Scaffold, A106 is the wildtype which was mutated in ExpStructure. Since B106 also maps to A106, that is added to the list of differing residues.
('Scaffold', 'Model') = ['A 110 ', 'B 110 '] # In Scaffold, A110 and B110 are the wildtypes which was mutated in Model.
There is a subtlety here - the differing residue ids from Scaffold to ExpStructure are A106 and B106 corresponding to the
mutated A106 in the ExpStructure. However, the differing residue ids from ExpStructure to Scaffold has only one member - A106. This
makes sense as it is the only mutation however this may not be the desired behavior - one may wish instead to close
the list of residues over the relations mapping the residues between the structures i.e. to generate an equivalence
relation from the relation described by the mappings Scaffold->ExpStructure and ExpStructure->Scaffold. If that were done, then
('ExpStructure', 'Scaffold') would be ['A 106 ', 'B 106 '] as ExpStructure:A106 -> {Scaffold:A106, Scaffold:B106} and
Scaffold:B106 -> {ExpStructure:A106, ExpStructure:B106} so ExpStructure:A106 and ExpStructure:B106 are in the same equivalence class.
If use_seqres_sequences_if_possible is set, the alignment will use the SEQRES sequences when available. See match_pdb_chains
for more information.
'''
# Constructors
@staticmethod
def from_file_paths(pdb_paths, pdb_names, cut_off = 60.0, use_seqres_sequences_if_possible = True, strict = True):
assert(len(pdb_paths) == len(pdb_names) and len(pdb_paths) > 1)
pdbs = []
stage = None
try:
for x in range(len(pdb_paths)):
stage = pdb_names[x]
pdb_path = pdb_paths[x]
pdbs.append(PDB.from_filepath(pdb_path), strict = strict)
except (PDBParsingException, NonCanonicalResidueException, PDBValidationException) as e:
raise PDBParsingException("An error occurred while loading the %s structure: '%s'" % (stage, str(e)))
return PipelinePDBChainMapper(pdbs, pdb_names, cut_off = cut_off, use_seqres_sequences_if_possible = use_seqres_sequences_if_possible, strict = strict)
def __init__(self, pdbs, pdb_names, cut_off = 60.0, use_seqres_sequences_if_possible = True, strict = True):
assert(len(pdbs) == len(pdb_names) and len(pdbs) > 1)
assert(len(set(pdb_names)) == len(pdb_names)) # pdb_names must be a list of unique names
self.pdbs = pdbs
self.pdb_names = pdb_names
self.use_seqres_sequences_if_possible = use_seqres_sequences_if_possible
self.strict = strict
self.pdb_name_to_structure_mapping = {}
for x in range(len(pdb_names)):
self.pdb_name_to_structure_mapping[pdb_names[x]] = pdbs[x]
# differing_atom_residue_ids is a mapping from (pdb_name1, pdb_name2) to the list of ATOM residues *in pdb_name1* that differ from those of pdb_name2
self.differing_atom_residue_ids = {}
self.chain_mapping = {}
# For each pair of adjacent PDB files in the list, match each chain in the first pdb to its best match in the second pdb
# This section just creates the chain id->chain id mapping
for x in range(len(pdbs) - 1):
for y in range(x + 1, len(pdbs)):
pdb1, pdb2 = pdbs[x], pdbs[y]
pdb1_name, pdb2_name = pdb_names[x], pdb_names[y]
mapping_key = (pdb1_name, pdb2_name)
self.chain_mapping[mapping_key] = {}
self.differing_atom_residue_ids[mapping_key] = {}
# To allow for X cases, we allow the matcher to return multiple matches
# An artificial example X case would be 3MWO -> 1BN1 -> 3MWO where 3MWO_A and 3MWO_B both map to 1BN1_A
# In this case, we would like 1BN1_A to map to both 3MWO_A and 3MWO_B.
chain_matches = match_pdb_chains(pdb1, pdb1_name, pdb2, pdb2_name, cut_off = cut_off, allow_multiple_matches = True, multiple_match_error_margin = 3.0, use_seqres_sequences_if_possible = self.use_seqres_sequences_if_possible)
for pdb1_chain_id, list_of_matches in chain_matches.items():
if list_of_matches:
mcl = MatchedChainList(pdb1_name, pdb1_chain_id)
for l in list_of_matches:
mcl.add_chain(pdb2_name, l[0], l[1])
self.chain_mapping[mapping_key][pdb1_chain_id] = mcl
# todo: We could create the reverse entry from the results above which would be more efficient (match_pdb_chains
# performs a sequence alignment) but I will just repeat the logic here for now.
mapping_key = (pdb2_name, pdb1_name)
self.chain_mapping[mapping_key] = {}
self.differing_atom_residue_ids[mapping_key] = {}
chain_matches = match_pdb_chains(pdb2, pdb2_name, pdb1, pdb1_name, cut_off = cut_off, allow_multiple_matches = True, multiple_match_error_margin = 3.0, use_seqres_sequences_if_possible = self.use_seqres_sequences_if_possible)
for pdb2_chain_id, list_of_matches in chain_matches.items():
if list_of_matches:
mcl = MatchedChainList(pdb2_name, pdb2_chain_id)
for l in list_of_matches:
mcl.add_chain(pdb1_name, l[0], l[1])
self.chain_mapping[mapping_key][pdb2_chain_id] = mcl
self.residue_id_mapping = {}
# Create the residue ID -> residue ID mapping based on the chain mapping
self._map_residues()
# Private functions
def _map_residues(self):
'''For each pair of PDB files, match the residues of a chain in the first PDB to the residues of appropriate chains in the second PDB.
Note: we do a lot of repeated work here. Some of the lookups e.g. atom_sequences/seqres_sequences here could be cached.
If speed is important and the sequences are expected to be similar or have lots of repeats, we could use a list of unique sequences
as equivalence class representatives and then duplicate the matching for the other equivalent sequences.'''
pdbs = self.pdbs
pdb_names = self.pdb_names
# Map the SEQRES sequences to the ATOM sequences
# Note: The correct way to do this for RCSB files would be to use the SIFTS information like the ResidueRelatrix
# does. However, we have to consider the case where users upload PDB files which have not yet been deposited in
# the PDB so we have to resort to automatic sequence alignments. Ideally, we would store these alignments in a
# database and then do a lookup at this point. This would not only speed up the computation here but also allow
# us to manually fix misalignments (which will probably only occur due to gaps rather than mismatches).
seqres_to_atom_maps = {}
atom_to_seqres_maps = {}
for x in range(len(pdbs)):
pdb_object = pdbs[x]
pdb_name = pdb_names[x]
seqres_to_atom_map, atom_to_seqres_map = pdb_object.construct_seqres_to_atom_residue_map()
seqres_to_atom_maps[pdb_name] = seqres_to_atom_map
atom_to_seqres_maps[pdb_name] = atom_to_seqres_map
# Iterate over all pairs of PDBs and determine the residue mapping and sets of differing ATOM residues
for x in range(len(pdbs) - 1):
for y in range(x + 1, len(pdbs)):
pdb1, pdb2 = pdbs[x], pdbs[y]
pdb1_name, pdb2_name = pdb_names[x], pdb_names[y]
mapping_key = (pdb1_name, pdb2_name)
reverse_mapping_key = mapping_key[::-1]
residue_id_mapping = {'ATOM' : {}, 'SEQRES' : {}} # todo: add the other types of mapping here e.g. FASTA and Rosetta
pdb1_differing_atom_residue_ids = []
pdb2_differing_atom_residue_ids = []
for pdb1_chain, pdb2_chains in self.get_chain_mapping(mapping_key[0], mapping_key[1]).items():
#for pdb1_chain, pdb2_chain in self.chain_mapping[mapping_key].iteritems():
residue_id_mapping['ATOM'][pdb1_chain] = {}
residue_id_mapping['SEQRES'][pdb1_chain] = {}
# Use the SEQRES or ATOM sequence appropriately
pdb1_chain_sequence_type, pdb1_chain_sequence = pdb1.get_annotated_chain_sequence_string(pdb1_chain, self.use_seqres_sequences_if_possible)
for pdb2_chain in pdb2_chains:
# Get the mapping between the sequences
# Note: sequences and mappings are 1-based following the UniProt convention
# The mapping returned from sa.get_residue_mapping is an abstract mapping between *sequences of characters*
# and knows nothing about residue identifiers e.g. ATOM residue IDs or whether the sequences are
# SEQRES or ATOM sequences
sa = SequenceAligner()
pdb2_chain_sequence_type, pdb2_chain_sequence = pdb2.get_annotated_chain_sequence_string(pdb2_chain, self.use_seqres_sequences_if_possible)
sa.add_sequence('%s_%s' % (pdb1_name, pdb1_chain), str(pdb1_chain_sequence))
sa.add_sequence('%s_%s' % (pdb2_name, pdb2_chain), str(pdb2_chain_sequence))
mapping, match_mapping = sa.get_residue_mapping()
# Since the mapping is only between sequences and we wish to use the original residue identifiers of
# the sequence e.g. the PDB/ATOM residue ID, we look this information up in the order mapping of the
# Sequence objects
for pdb1_residue_index, pdb2_residue_index in mapping.items():
pdb1_residue_id = pdb1_chain_sequence.order[pdb1_residue_index - 1] # order is a 0-based list
pdb2_residue_id = pdb2_chain_sequence.order[pdb2_residue_index - 1] # order is a 0-based list
pdb1_atom_residue_id, pdb2_atom_residue_id = None, None
if pdb1_chain_sequence_type == 'SEQRES' and pdb2_chain_sequence_type == 'SEQRES':
residue_id_mapping['SEQRES'][pdb1_chain][pdb1_residue_id] = residue_id_mapping['SEQRES'][pdb1_chain].get(pdb1_residue_id, [])
residue_id_mapping['SEQRES'][pdb1_chain][pdb1_residue_id].append((pdb2_chain, pdb2_residue_id))
pdb1_atom_residue_id = seqres_to_atom_maps.get(pdb1_name, {}).get(pdb1_chain, {}).get(pdb1_residue_id)
pdb2_atom_residue_id = seqres_to_atom_maps.get(pdb2_name, {}).get(pdb2_chain, {}).get(pdb2_residue_id)
if pdb1_atom_residue_id != None and pdb2_atom_residue_id != None:
residue_id_mapping['ATOM'][pdb1_chain][pdb1_atom_residue_id] = residue_id_mapping['ATOM'][pdb1_chain].get(pdb1_atom_residue_id, [])
residue_id_mapping['ATOM'][pdb1_chain][pdb1_atom_residue_id].append(pdb2_atom_residue_id)
elif pdb1_chain_sequence_type == 'SEQRES' and pdb2_chain_sequence_type == 'ATOM':
pdb1_atom_residue_id = seqres_to_atom_maps.get(pdb1_name, {}).get(pdb1_chain, {}).get(pdb1_residue_id)
if pdb1_atom_residue_id != None:
residue_id_mapping['ATOM'][pdb1_chain][pdb1_atom_residue_id] = residue_id_mapping['ATOM'][pdb1_chain].get(pdb1_atom_residue_id, [])
residue_id_mapping['ATOM'][pdb1_chain][pdb1_atom_residue_id].append(pdb2_residue_id)
elif pdb1_chain_sequence_type == 'ATOM' and pdb2_chain_sequence_type == 'SEQRES':
pdb2_atom_residue_id = seqres_to_atom_maps.get(pdb2_name, {}).get(pdb2_chain, {}).get(pdb2_residue_id)
if pdb2_atom_residue_id != None:
residue_id_mapping['ATOM'][pdb1_chain][pdb1_residue_id] = residue_id_mapping['ATOM'][pdb1_chain].get(pdb1_residue_id, [])
residue_id_mapping['ATOM'][pdb1_chain][pdb1_residue_id].append(pdb2_atom_residue_id)
elif pdb1_chain_sequence_type == 'ATOM' and pdb2_chain_sequence_type == 'ATOM':
residue_id_mapping['ATOM'][pdb1_chain][pdb1_residue_id] = residue_id_mapping['ATOM'][pdb1_chain].get(pdb1_residue_id, [])
residue_id_mapping['ATOM'][pdb1_chain][pdb1_residue_id].append(pdb2_residue_id)
else:
raise Exception('An exception occurred.') # this should not happen
# We store a *list* of corresponding residues i.e. if pdb1_chain matches pdb2_chain_1 and pdb2_chain_2
# then we may map a residue in pdb1_chain to a residue in each of those chains
#residue_id_mapping[pdb1_chain][pdb1_residue_id] = residue_id_mapping[pdb1_chain].get(pdb1_residue_id, [])
#residue_id_mapping[pdb1_chain][pdb1_residue_id].append(pdb2_residue_id)
# Determine which residues of each sequence differ between the sequences
# We ignore leading and trailing residues from both sequences
pdb1_residue_indices = list(mapping.keys())
pdb2_residue_indices = list(mapping.values())
differing_pdb1_indices = []
differing_pdb2_indices = []
for pdb1_residue_index, match_details in match_mapping.items():
if match_details.clustal == 0 or match_details.clustal == -1 or match_details.clustal == -2:
# The residues differed
differing_pdb1_indices.append(pdb1_residue_index)
differing_pdb2_indices.append(mapping[pdb1_residue_index])
# Convert the different sequence indices into PDB ATOM residue IDs. Sometimes there may not be a
# mapping from SEQRES residues to the ATOM residues e.g. missing density
for idx in differing_pdb1_indices:
if pdb1_chain_sequence_type == 'SEQRES':
pdb1_seqres_residue_id = pdb1_chain_sequence.order[idx - 1]
pdb1_atom_residue_id = seqres_to_atom_maps.get(pdb1_name, {}).get(pdb1_chain, {}).get(pdb1_seqres_residue_id)
if pdb1_atom_residue_id != None:
pdb1_differing_atom_residue_ids.append(pdb1_atom_residue_id)
elif pdb1_chain_sequence_type == 'ATOM':
pdb1_differing_atom_residue_ids.append(pdb1_chain_sequence.order[idx - 1])
for idx in differing_pdb2_indices:
if pdb2_chain_sequence_type == 'SEQRES':
pdb2_seqres_residue_id = pdb2_chain_sequence.order[idx - 1]
pdb2_atom_residue_id = seqres_to_atom_maps.get(pdb2_name, {}).get(pdb2_chain, {}).get(pdb2_seqres_residue_id)
if pdb2_atom_residue_id != None:
pdb2_differing_atom_residue_ids.append(pdb2_atom_residue_id)
elif pdb2_chain_sequence_type == 'ATOM':
pdb2_differing_atom_residue_ids.append(pdb2_chain_sequence.order[idx - 1])
self.residue_id_mapping[mapping_key] = residue_id_mapping
self.differing_atom_residue_ids[mapping_key] = pdb1_differing_atom_residue_ids
self.differing_atom_residue_ids[reverse_mapping_key] = pdb2_differing_atom_residue_ids
for k, v in sorted(self.differing_atom_residue_ids.items()):
self.differing_atom_residue_ids[k] = sorted(set(v)) # the list of residues may not be unique in general so we make it unique here
self.seqres_to_atom_maps = seqres_to_atom_maps
self.atom_to_seqres_maps = atom_to_seqres_maps
# Public functions
def get_chain_mapping(self, pdb_name1, pdb_name2):
'''This replaces the old mapping member by constructing it from self.chain_mapping. This function returns a mapping from
chain IDs in pdb_name1 to chain IDs in pdb_name2.'''
d = {}
for pdb1_chain_id, matched_chain_list in self.chain_mapping[(pdb_name1, pdb_name2)].items():
d[pdb1_chain_id] = matched_chain_list.get_related_chains_ids(pdb_name2)
return d
def get_differing_atom_residue_ids(self, pdb_name, pdb_list):
'''Returns a list of residues in pdb_name which differ from the pdbs corresponding to the names in pdb_list.'''
assert(pdb_name in self.pdb_names)
assert(set(pdb_list).intersection(set(self.pdb_names)) == set(pdb_list)) # the names in pdb_list must be in pdb_names
differing_atom_residue_ids = set()
for other_pdb in pdb_list:
differing_atom_residue_ids = differing_atom_residue_ids.union(set(self.differing_atom_residue_ids[(pdb_name, other_pdb)]))
return sorted(differing_atom_residue_ids)
def get_sequence_alignment_printer_objects(self, pdb_list = [], reversed = True, width = 80, line_separator = '\n'):
'''Takes a list, pdb_list, of pdb names e.g. ['Model', 'Scaffold', ...] with which the object was created.
Using the first element of this list as a base, get the sequence alignments with chains in other members
of the list. For simplicity, if a chain in the first PDB matches multiple chains in another PDB, we only
return the alignment for one of the chains. If pdb_list is empty then the function defaults to the object's
members.
Returns a list of tuples (chain_id, sequence_alignment_printer_object). Each sequence_alignment_printer_object
can be used to generate a printable version of the sequence alignment. '''
if not pdb_list:
pdb_list = self.pdb_names
assert(len(set(pdb_list)) == len(pdb_list) and (len(pdb_list) > 1))
assert(sorted(set(pdb_list).intersection(set(self.pdb_names))) == sorted(set(pdb_list)))
primary_pdb = self.pdb_name_to_structure_mapping[pdb_list[0]]
primary_pdb_name = pdb_list[0]
primary_pdb_chains = sorted(primary_pdb.chain_atoms.keys())
sequence_alignment_printer_objects = []
for primary_pdb_chain in primary_pdb_chains:
sa = SequenceAligner()
# Add the primary PDB's sequence for the chain
primary_pdb_sequence_type, primary_pdb_sequence = primary_pdb.get_annotated_chain_sequence_string(primary_pdb_chain, self.use_seqres_sequences_if_possible)
sa.add_sequence('%s_%s' % (primary_pdb_name, primary_pdb_chain), str(primary_pdb_sequence))
other_chain_types_and_sequences = {}
for other_pdb_name in pdb_list[1:]:
other_pdb = self.pdb_name_to_structure_mapping[other_pdb_name]
other_chains = self.get_chain_mapping(primary_pdb_name, other_pdb_name).get(primary_pdb_chain)
#other_chain = self.mapping[(primary_pdb_name, other_pdb_name)].get(primary_pdb_chain)
if other_chains:
other_chain = sorted(other_chains)[0]
other_pdb_sequence_type, other_pdb_sequence = other_pdb.get_annotated_chain_sequence_string(other_chain, self.use_seqres_sequences_if_possible)
other_chain_types_and_sequences[other_pdb_name] = (other_pdb_sequence_type, other_pdb_sequence)
sa.add_sequence('%s_%s' % (other_pdb_name, other_chain), str(other_pdb_sequence))
if len(sa.records) > 1:
# If there are no corresponding sequences in any other PDB, do not return the non-alignment
sa.align()
#pdb1_alignment_str = sa._get_alignment_lines()['%s:%s' % (primary_pdb_name, pdb1_chain)]
#pdb2_alignment_str = sa._get_alignment_lines()['%s:%s' % (pdb2_name, pdb2_chain)]
sequence_names, sequences, sequence_tooltips = [], [], []
sequence_names.append('%s_%s' % (primary_pdb_name, primary_pdb_chain))
primary_pdb_alignment_lines = sa._get_alignment_lines()['%s_%s' % (primary_pdb_name, primary_pdb_chain)]
sequences.append(primary_pdb_alignment_lines)
sequence_tooltips.append(self.get_sequence_tooltips(primary_pdb, primary_pdb_sequence, primary_pdb_sequence_type, primary_pdb_name, primary_pdb_chain, primary_pdb_alignment_lines))
for other_pdb_name in pdb_list[1:]:
#other_chain = self.mapping[(primary_pdb_name, other_pdb_name)].get(primary_pdb_chain)
other_pdb = self.pdb_name_to_structure_mapping[other_pdb_name]
other_chains = self.get_chain_mapping(primary_pdb_name, other_pdb_name).get(primary_pdb_chain)
if other_chains:
other_chain = sorted(other_chains)[0]
sequence_names.append('%s_%s' % (other_pdb_name, other_chain))
other_pdb_alignment_lines = sa._get_alignment_lines()['%s_%s' % (other_pdb_name, other_chain)]
sequences.append(other_pdb_alignment_lines)
other_pdb_sequence_type, other_pdb_sequence = other_chain_types_and_sequences[other_pdb_name]
sequence_tooltips.append(self.get_sequence_tooltips(other_pdb, other_pdb_sequence, other_pdb_sequence_type, other_pdb_name, other_chain, other_pdb_alignment_lines))
sap = MultipleSequenceAlignmentPrinter(sequence_names, sequences, sequence_tooltips)
sequence_alignment_printer_objects.append((primary_pdb_chain, sap))
return sequence_alignment_printer_objects
def get_sequence_tooltips(self, pdb_object, pdb_sequence, pdb_sequence_type, pdb_name, pdb_chain, pdb_alignment_lines):
'''pdb_sequence is a Sequence object. pdb_sequence_type is a type returned by PDB.get_annotated_chain_sequence_string,
pdb_name is the name of the PDB used throughout this object e.g. 'Scaffold', pdb_chain is the chain of interest,
pdb_alignment_lines are the lines returned by SequenceAligner._get_alignment_lines.
This function returns a set of tooltips corresponding to the residues in the sequence. The tooltips are the ATOM
residue IDs. These tooltips can be used to generate useful (and/or interactive using JavaScript) sequence alignments
in HTML.
'''
tooltips = None
atom_sequence = pdb_object.atom_sequences.get(pdb_chain)
try:
if pdb_sequence_type == 'SEQRES':
seqres_to_atom_map = self.seqres_to_atom_maps.get(pdb_name, {}).get(pdb_chain, {})
tooltips = []
if seqres_to_atom_map:
idx = 1
for aligned_residue in pdb_alignment_lines.strip():
if aligned_residue != '-':
atom_residue = seqres_to_atom_map.get(idx)
if atom_residue:
# This is a sanity check to make sure that the tooltips are mapping the correct residues types to
# the correct residues types
assert(aligned_residue == atom_sequence.sequence[atom_residue].ResidueAA)
tooltips.append(atom_residue)
idx += 1
assert(len(tooltips) == len(str(pdb_sequence)))
elif pdb_sequence_type == 'ATOM':
tooltips = []
idx = 0
for aligned_residue in pdb_alignment_lines.strip():
if aligned_residue != '-':
# This is a sanity check to make sure that the tooltips are mapping the correct residues types to
# the correct residues types
assert(aligned_residue == pdb_sequence.sequence[pdb_sequence.order[idx]].ResidueAA)
tooltips.append(pdb_sequence.order[idx])
idx += 1
assert(len(tooltips) == len(str(pdb_sequence)))
except:
raise Exception('An error occurred during HTML tooltip creation for the multiple sequence alignment.')
return tooltips
def get_sequence_alignment_strings(self, pdb_list = [], reversed = True, width = 80, line_separator = '\n'):
'''Takes a list, pdb_list, of pdb names e.g. ['Model', 'Scaffold', ...] with which the object was created.
Using the first element of this list as a base, get the sequence alignments with chains in other members
of the list. For simplicity, if a chain in the first PDB matches multiple chains in another PDB, we only
return the alignment for one of the chains. If pdb_list is empty then the function defaults to the object's
members.
Returns one sequence alignment string for each chain mapping. Each line is a concatenation of lines of the
specified width, separated by the specified line separator.'''
sequence_alignment_printer_tuples = self.get_sequence_alignment_printer_objects(pdb_list = pdb_list, reversed = reversed, width = width, line_separator = line_separator)
alignment_strings = []
for sequence_alignment_printer_tuple in sequence_alignment_printer_tuples:
primary_pdb_chain = sequence_alignment_printer_tuple[0]
sap = sequence_alignment_printer_tuple[1]
alignment_strings.append(sap.to_lines(reversed = reversed, width = width, line_separator = line_separator))
return alignment_strings
def get_sequence_alignment_strings_as_html(self, pdb_list = [], reversed = False, width = 80, line_separator = '\n', extra_tooltip_class = ''):
'''Takes a list, pdb_list, of pdb names e.g. ['Model', 'Scaffold', ...] with which the object was created.
Using the first element of this list as a base, get the sequence alignments with chains in other members
of the list. For simplicity, if a chain in the first PDB matches multiple chains in another PDB, we only
return the alignment for one of the chains. If pdb_list is empty then the function defaults to the object's
members.
Returns HTML for the sequence alignments and an empty string if no alignments were made.'''
sequence_alignment_printer_tuples = self.get_sequence_alignment_printer_objects(pdb_list = pdb_list, reversed = reversed, width = width, line_separator = line_separator)
if not sequence_alignment_printer_tuples:
return ''
html = []
for sequence_alignment_printer_tuple in sequence_alignment_printer_tuples:
primary_pdb_chain = sequence_alignment_printer_tuple[0]
sap = sequence_alignment_printer_tuple[1]
html.append(sap.to_html(reversed = reversed, width = width, line_separator = line_separator, extra_tooltip_class = extra_tooltip_class))
return '\n'.join(html)
|
class PipelinePDBChainMapper_old(BasePDBChainMapper):
'''Similar to the removed PDBChainMapper class except this takes a list of PDB files which should be related in some way.
The matching is done pointwise, matching all PDBs in the list to each other.
This class is useful for a list of structures that are the result of a linear pipeline e.g. a scaffold structure (RCSB),
a model structure (Rosetta), and a design structure (experimental result).
The 'chain_mapping' member stores a mapping from a pair (pdb_name1, pdb_name2) to the mapping from chain IDs in pdb_name1 to
a MatchedChainList object. This object can be used to return the list of chain IDs in pdb_name2 related to the
respective chain in pdb_name1 based on sequence alignment. It can also be used to return the percentage identities
for this alignment. The old mapping and mapping_percentage_identity members of this class can be built from this member
e.g.
self.mapping[('Scaffold', 'ExpStructure')] == self.get_chain_mapping('Scaffold', 'ExpStructure')
The 'residue_id_mapping' member stores a mapping from a pair (pdb_name1, pdb_name2) to a mapping
'ATOM' -> chains_of_pdb_name_1 -> ATOM residues of that chain -> list of corresponding ATOM residues in the corresponding chains of pdb_name2
'SEQRES' -> chains_of_pdb_name_1 -> SEQRES residues of that chain -> pairs of (chain_id, corresponding SEQRES residue_id) in the corresponding chains of pdb_name2
For example, using the homodimer 3MW0 for both Scaffold and ExpStructure:
residue_id_mapping[('Scaffold', 'ExpStructure')]['ATOM']['A'] -> {'A 167 ': ['A 167 ', 'B 167 '], ...}
residue_id_mapping[('Scaffold', 'ExpStructure')]['SEQRES']['B'] -> {167 : [('A', 167), ('C', 167)], ...}
Objects of this class have a differing_atom_residue_ids mapping which maps the pair (pdb_name1, pdb_name2) to the list
of ATOM residues *in pdb_name1* that differ from those of pdb_name2. Note: there is some subtlety here in terms of
direction. For example, take this artificial example. We take a homodimer 3MWO as the scaffold and a monomer 1BN1
with identical sequence as the model. We mutate A110 in 1BN1. We then take 3MWO with a mutation on A106 as the design.
chain_mapper = ScaffoldModelDesignChainMapper.from_file_contents(retrieve_pdb('3MWO'), retrieve_pdb('1BN1').replace('ASP A 110', 'ASN A 110'), retrieve_pdb('3MWO').replace('GLU A 106', 'GLN A 106'))
differing_atom_residue_ids then looks like this:
('Model', 'ExpStructure') = ['A 106 ', 'A 110 '] # In Model, A110 is a mutation, reverted in ExpStructure. In ExpStructure, A106 is a mutation.
('Model', 'Scaffold') = ['A 110 '] # In Model, A110 is a mutation.
('ExpStructure', 'Model') = ['A 106 ', 'A 110 ', 'B 110 '] # In ExpStructure, A106 is a mutation. A110 and B110 are revertant mutations from the Model.
('ExpStructure', 'Scaffold') = ['A 106 '] # In ExpStructure, A106 is a mutation.
('Scaffold', 'ExpStructure') = ['A 106 ', 'B 106 '] # Note: In Scaffold, A106 is the wildtype which was mutated in ExpStructure. Since B106 also maps to A106, that is added to the list of differing residues.
('Scaffold', 'Model') = ['A 110 ', 'B 110 '] # In Scaffold, A110 and B110 are the wildtypes which was mutated in Model.
There is a subtlety here - the differing residue ids from Scaffold to ExpStructure are A106 and B106 corresponding to the
mutated A106 in the ExpStructure. However, the differing residue ids from ExpStructure to Scaffold has only one member - A106. This
makes sense as it is the only mutation however this may not be the desired behavior - one may wish instead to close
the list of residues over the relations mapping the residues between the structures i.e. to generate an equivalence
relation from the relation described by the mappings Scaffold->ExpStructure and ExpStructure->Scaffold. If that were done, then
('ExpStructure', 'Scaffold') would be ['A 106 ', 'B 106 '] as ExpStructure:A106 -> {Scaffold:A106, Scaffold:B106} and
Scaffold:B106 -> {ExpStructure:A106, ExpStructure:B106} so ExpStructure:A106 and ExpStructure:B106 are in the same equivalence class.
If use_seqres_sequences_if_possible is set, the alignment will use the SEQRES sequences when available. See match_pdb_chains
for more information.
'''
@staticmethod
def from_file_paths(pdb_paths, pdb_names, cut_off = 60.0, use_seqres_sequences_if_possible = True, strict = True):
pass
def __init__(self, pdbs, pdb_names, cut_off = 60.0, use_seqres_sequences_if_possible = True, strict = True):
pass
def _map_residues(self):
'''For each pair of PDB files, match the residues of a chain in the first PDB to the residues of appropriate chains in the second PDB.
Note: we do a lot of repeated work here. Some of the lookups e.g. atom_sequences/seqres_sequences here could be cached.
If speed is important and the sequences are expected to be similar or have lots of repeats, we could use a list of unique sequences
as equivalence class representatives and then duplicate the matching for the other equivalent sequences.'''
pass
def get_chain_mapping(self, pdb_name1, pdb_name2):
'''This replaces the old mapping member by constructing it from self.chain_mapping. This function returns a mapping from
chain IDs in pdb_name1 to chain IDs in pdb_name2.'''
pass
def get_differing_atom_residue_ids(self, pdb_name, pdb_list):
'''Returns a list of residues in pdb_name which differ from the pdbs corresponding to the names in pdb_list.'''
pass
def get_sequence_alignment_printer_objects(self, pdb_list = [], reversed = True, width = 80, line_separator = '\n'):
'''Takes a list, pdb_list, of pdb names e.g. ['Model', 'Scaffold', ...] with which the object was created.
Using the first element of this list as a base, get the sequence alignments with chains in other members
of the list. For simplicity, if a chain in the first PDB matches multiple chains in another PDB, we only
return the alignment for one of the chains. If pdb_list is empty then the function defaults to the object's
members.
Returns a list of tuples (chain_id, sequence_alignment_printer_object). Each sequence_alignment_printer_object
can be used to generate a printable version of the sequence alignment. '''
pass
def get_sequence_tooltips(self, pdb_object, pdb_sequence, pdb_sequence_type, pdb_name, pdb_chain, pdb_alignment_lines):
'''pdb_sequence is a Sequence object. pdb_sequence_type is a type returned by PDB.get_annotated_chain_sequence_string,
pdb_name is the name of the PDB used throughout this object e.g. 'Scaffold', pdb_chain is the chain of interest,
pdb_alignment_lines are the lines returned by SequenceAligner._get_alignment_lines.
This function returns a set of tooltips corresponding to the residues in the sequence. The tooltips are the ATOM
residue IDs. These tooltips can be used to generate useful (and/or interactive using JavaScript) sequence alignments
in HTML.
'''
pass
def get_sequence_alignment_strings(self, pdb_list = [], reversed = True, width = 80, line_separator = '\n'):
'''Takes a list, pdb_list, of pdb names e.g. ['Model', 'Scaffold', ...] with which the object was created.
Using the first element of this list as a base, get the sequence alignments with chains in other members
of the list. For simplicity, if a chain in the first PDB matches multiple chains in another PDB, we only
return the alignment for one of the chains. If pdb_list is empty then the function defaults to the object's
members.
Returns one sequence alignment string for each chain mapping. Each line is a concatenation of lines of the
specified width, separated by the specified line separator.'''
pass
def get_sequence_alignment_strings_as_html(self, pdb_list = [], reversed = False, width = 80, line_separator = '\n', extra_tooltip_class = ''):
'''Takes a list, pdb_list, of pdb names e.g. ['Model', 'Scaffold', ...] with which the object was created.
Using the first element of this list as a base, get the sequence alignments with chains in other members
of the list. For simplicity, if a chain in the first PDB matches multiple chains in another PDB, we only
return the alignment for one of the chains. If pdb_list is empty then the function defaults to the object's
members.
Returns HTML for the sequence alignments and an empty string if no alignments were made.'''
pass
| 11 | 8 | 42 | 7 | 27 | 10 | 7 | 0.53 | 1 | 13 | 8 | 0 | 8 | 10 | 9 | 11 | 448 | 87 | 241 | 108 | 230 | 127 | 233 | 106 | 223 | 25 | 2 | 7 | 65 |
143,657 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/alignment.py
|
klab.bio.alignment.ScaffoldModelChainMapper
|
class ScaffoldModelChainMapper(PipelinePDBChainMapper):
'''A convenience class for the special case where we are mapping specifically from a model structure to a scaffold structure and a design structure.'''
def __init__(self, scaffold_pdb, model_pdb, cut_off = 60.0, use_seqres_sequences_if_possible = True, strict = True, structure_1_name = None, structure_2_name = None):
self.scaffold_pdb = scaffold_pdb
self.model_pdb = model_pdb
self.structure_1_name = structure_1_name or 'Scaffold'
self.structure_2_name = structure_2_name or 'Model'
super(ScaffoldModelChainMapper, self).__init__([scaffold_pdb, model_pdb], [self.structure_1_name, self.structure_2_name], cut_off = cut_off, use_seqres_sequences_if_possible = use_seqres_sequences_if_possible, strict = strict)
@staticmethod
def from_file_paths(scaffold_pdb_path, model_pdb_path, cut_off = 60.0, strict = True, structure_1_name = None, structure_2_name = None):
try:
stage = 'scaffold'
scaffold_pdb = PDB.from_filepath(scaffold_pdb_path, strict = strict)
stage = 'model'
model_pdb = PDB.from_filepath(model_pdb_path, strict = strict)
except (PDBParsingException, NonCanonicalResidueException, PDBValidationException) as e:
raise PDBParsingException("An error occurred while loading the %s structure: '%s'" % (stage, str(e)))
return ScaffoldModelChainMapper(scaffold_pdb, model_pdb, cut_off = cut_off, strict = strict, structure_1_name = structure_1_name, structure_2_name = structure_2_name)
@staticmethod
def from_file_contents(scaffold_pdb_contents, model_pdb_contents, cut_off = 60.0, strict = True, structure_1_name = None, structure_2_name = None):
try:
stage = 'scaffold'
scaffold_pdb = PDB(scaffold_pdb_contents, strict = strict)
stage = 'model'
model_pdb = PDB(model_pdb_contents, strict = strict)
except (PDBParsingException, NonCanonicalResidueException, PDBValidationException) as e:
raise PDBParsingException("An error occurred while loading the %s structure: '%s'" % (stage, str(e)))
return ScaffoldModelChainMapper(scaffold_pdb, model_pdb, cut_off = cut_off, strict = strict, structure_1_name = structure_1_name, structure_2_name = structure_2_name)
def get_differing_model_residue_ids(self):
return self.get_differing_atom_residue_ids(self.structure_2_name, [self.structure_1_name])
def get_differing_scaffold_residue_ids(self):
return self.get_differing_atom_residue_ids(self.structure_1_name, [self.structure_2_name])
def generate_pymol_session(self, pymol_executable = 'pymol', settings = {}):
''' Generates the PyMOL session for the scaffold, model, and design structures.
Returns this session and the script which generated it.'''
b = BatchBuilder(pymol_executable = pymol_executable)
structures_list = [
(self.structure_1_name, self.scaffold_pdb.pdb_content, self.get_differing_scaffold_residue_ids()),
(self.structure_2_name, self.model_pdb.pdb_content, self.get_differing_model_residue_ids()),
]
PSE_files = b.run(ScaffoldModelDesignBuilder, [PDBContainer.from_content_triple(structures_list)], settings = settings)
return PSE_files[0], b.PSE_scripts[0]
|
class ScaffoldModelChainMapper(PipelinePDBChainMapper):
'''A convenience class for the special case where we are mapping specifically from a model structure to a scaffold structure and a design structure.'''
def __init__(self, scaffold_pdb, model_pdb, cut_off = 60.0, use_seqres_sequences_if_possible = True, strict = True, structure_1_name = None, structure_2_name = None):
pass
@staticmethod
def from_file_paths(scaffold_pdb_path, model_pdb_path, cut_off = 60.0, strict = True, structure_1_name = None, structure_2_name = None):
pass
@staticmethod
def from_file_contents(scaffold_pdb_contents, model_pdb_contents, cut_off = 60.0, strict = True, structure_1_name = None, structure_2_name = None):
pass
def get_differing_model_residue_ids(self):
pass
def get_differing_scaffold_residue_ids(self):
pass
def generate_pymol_session(self, pymol_executable = 'pymol', settings = {}):
''' Generates the PyMOL session for the scaffold, model, and design structures.
Returns this session and the script which generated it.'''
pass
| 9 | 2 | 7 | 1 | 6 | 0 | 1 | 0.08 | 1 | 9 | 7 | 0 | 4 | 4 | 6 | 18 | 57 | 15 | 39 | 24 | 30 | 3 | 34 | 20 | 27 | 2 | 3 | 1 | 8 |
143,658 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/debug/profile.py
|
klab.debug.profile.ProfileTimer
|
class ProfileTimer(object):
'''A dumb profiler. Cheap and cheerful.'''
def __init__(self):
self.stages = []
self.stage_times = {}
self.current_stage = None
self.last_start_time = None
self.stopped = True
def start(self, stage):
time_now = time.time()
self.stopped = False
if stage not in list(self.stage_times.keys()):
self.stages.append(stage)
if self.current_stage:
self.stage_times[self.current_stage] = self.stage_times.get(self.current_stage, 0)
self.stage_times[self.current_stage] += time_now - self.last_start_time
self.last_start_time = time_now
self.current_stage = stage
else:
self.current_stage = stage
self.last_start_time = time_now
self.stage_times[stage] = 0
def stop(self):
time_now = time.time()
if self.current_stage:
self.stage_times[self.current_stage] = self.stage_times.get(self.current_stage, 0)
self.stage_times[self.current_stage] += time_now - self.last_start_time
self.last_start_time = None
self.current_stage = None
self.stopped = True
def getTotalTime(self):
if not self.stopped:
return None
t = 0
for stage in self.stages:
t += self.stage_times[stage]
return t
def _getProfileForTerminal(self):
if not self.stopped:
return False
s = [colortext.make('Total time: %fs' % self.getTotalTime(), color = 'white', effect = colortext.BOLD)]
stage_times = sorted([self.stage_times[stage] for stage in self.stages])
if len(stage_times) < 10:
top_time_cutoff = stage_times[-2]
else:
top_time_cutoff = stage_times[-(len(stage_times) / 5)]
for stage in self.stages:
if self.stage_times[stage] == stage_times[-1]:
s.append(colortext.make(" %s: %fs" % (stage, self.stage_times[stage]), 'pink'))
elif self.stage_times[stage] >= top_time_cutoff:
s.append(colortext.make(" %s: %fs" % (stage, self.stage_times[stage]), 'red'))
else:
s.append(colortext.make(" %s: %fs" % (stage, self.stage_times[stage]), 'silver'))
return "\n".join(s)
def _getProfileForWeb(self):
if not self.stopped:
return False
s = ['<b>Total time: %fs</b>' % self.getTotalTime()]
stage_times = sorted([self.stage_times[stage] for stage in self.stages])
if len(stage_times) < 10:
top_time_cutoff = stage_times[-2]
else:
top_time_cutoff = stage_times[-(len(stage_times) / 5)]
for stage in self.stages:
if self.stage_times[stage] == stage_times[-1]:
s.append("<b><font color='#550000'>%s: %fs</font></b>" % (stage, self.stage_times[stage]))
elif self.stage_times[stage] >= top_time_cutoff:
s.append("<b><font color='red'>%s: %fs</font></b>" % (stage, self.stage_times[stage]))
else:
s.append("%s: %fs" % (stage, self.stage_times[stage]))
return "<br>".join(s)
def getProfile(self, html = True):
if html:
return self._getProfileForWeb()
else:
return self._getProfileForTerminal()
|
class ProfileTimer(object):
'''A dumb profiler. Cheap and cheerful.'''
def __init__(self):
pass
def start(self, stage):
pass
def stop(self):
pass
def getTotalTime(self):
pass
def _getProfileForTerminal(self):
pass
def _getProfileForWeb(self):
pass
def getProfile(self, html = True):
pass
| 8 | 1 | 12 | 1 | 11 | 0 | 3 | 0.03 | 1 | 1 | 0 | 0 | 7 | 5 | 7 | 7 | 90 | 14 | 75 | 23 | 67 | 2 | 67 | 23 | 59 | 6 | 1 | 2 | 23 |
143,659 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/alignment.py
|
klab.bio.alignment.ScaffoldModelDesignChainMapper
|
class ScaffoldModelDesignChainMapper(PipelinePDBChainMapper):
'''A convenience class for the special case where we are mapping specifically from a model structure to a scaffold structure and a design structure.
The scaffold structure is allowed to be missing.
'''
def __init__(self, scaffold_pdb, model_pdb, design_pdb, cut_off = 60.0, use_seqres_sequences_if_possible = True, strict = True):
self.scaffold_pdb = scaffold_pdb
self.model_pdb = model_pdb
self.design_pdb = design_pdb
if self.scaffold_pdb:
super(ScaffoldModelDesignChainMapper, self).__init__([scaffold_pdb, model_pdb, design_pdb], ['Scaffold', 'Model', 'ExpStructure'], cut_off = cut_off, use_seqres_sequences_if_possible = use_seqres_sequences_if_possible, strict = strict)
else:
super(ScaffoldModelDesignChainMapper, self).__init__([model_pdb, design_pdb], ['Model', 'ExpStructure'], cut_off = cut_off, use_seqres_sequences_if_possible = use_seqres_sequences_if_possible, strict = strict)
@staticmethod
def from_file_paths(scaffold_pdb_path, model_pdb_path, design_pdb_path, cut_off = 60.0, strict = True):
try:
stage = 'scaffold'
scaffold_pdb = None
if scaffold_pdb_path:
# Allow the scaffold to be null
scaffold_pdb = PDB.from_filepath(scaffold_pdb_path, strict = strict)
stage = 'model'
model_pdb = PDB.from_filepath(model_pdb_path, strict = strict)
stage = 'design'
design_pdb = PDB.from_filepath(design_pdb_path, strict = strict)
except (PDBParsingException, NonCanonicalResidueException, PDBValidationException) as e:
raise PDBParsingException("An error occurred while loading the %s structure: '%s'" % (stage, str(e)))
return ScaffoldModelDesignChainMapper(scaffold_pdb, model_pdb, design_pdb, cut_off = cut_off, strict = strict)
@staticmethod
def from_file_contents(scaffold_pdb_contents, model_pdb_contents, design_pdb_contents, cut_off = 60.0, strict = True):
try:
stage = 'scaffold'
scaffold_pdb = None
if scaffold_pdb_contents:
# Allow the scaffold to be null
scaffold_pdb = PDB(scaffold_pdb_contents, strict = strict)
stage = 'model'
model_pdb = PDB(model_pdb_contents, strict = strict)
stage = 'design'
design_pdb = PDB(design_pdb_contents, strict = strict)
except (PDBParsingException, NonCanonicalResidueException, PDBValidationException) as e:
#import traceback
#colortext.warning(traceback.format_exc())
raise PDBParsingException("An error occurred while loading the %s structure: '%s'" % (stage, str(e)))
return ScaffoldModelDesignChainMapper(scaffold_pdb, model_pdb, design_pdb, cut_off = cut_off, strict = strict)
def get_differing_model_residue_ids(self):
if self.scaffold_pdb:
return self.get_differing_atom_residue_ids('Model', ['Scaffold', 'ExpStructure'])
else:
return self.get_differing_atom_residue_ids('Model', ['ExpStructure'])
def get_differing_scaffold_residue_ids(self):
if self.scaffold_pdb:
return self.get_differing_atom_residue_ids('Scaffold', ['Model', 'ExpStructure'])
def get_differing_design_residue_ids(self):
if self.scaffold_pdb:
return self.get_differing_atom_residue_ids('ExpStructure', ['Scaffold', 'Model'])
else:
return self.get_differing_atom_residue_ids('ExpStructure', ['Model'])
def generate_pymol_session(self, pymol_executable = 'pymol', settings = {}):
''' Generates the PyMOL session for the scaffold, model, and design structures.
Returns this session and the script which generated it.'''
b = BatchBuilder(pymol_executable = pymol_executable)
if self.scaffold_pdb:
structures_list = [
('Scaffold', self.scaffold_pdb.pdb_content, self.get_differing_scaffold_residue_ids()),
('Model', self.model_pdb.pdb_content, self.get_differing_model_residue_ids()),
('ExpStructure', self.design_pdb.pdb_content, self.get_differing_design_residue_ids ()),
]
else:
structures_list = [
('Model', self.model_pdb.pdb_content, self.get_differing_model_residue_ids()),
('ExpStructure', self.design_pdb.pdb_content, self.get_differing_design_residue_ids ()),
]
PSE_files = b.run(ScaffoldModelDesignBuilder, [PDBContainer.from_content_triple(structures_list)], settings = settings)
return PSE_files[0], b.PSE_scripts[0]
|
class ScaffoldModelDesignChainMapper(PipelinePDBChainMapper):
'''A convenience class for the special case where we are mapping specifically from a model structure to a scaffold structure and a design structure.
The scaffold structure is allowed to be missing.
'''
def __init__(self, scaffold_pdb, model_pdb, design_pdb, cut_off = 60.0, use_seqres_sequences_if_possible = True, strict = True):
pass
@staticmethod
def from_file_paths(scaffold_pdb_path, model_pdb_path, design_pdb_path, cut_off = 60.0, strict = True):
pass
@staticmethod
def from_file_contents(scaffold_pdb_contents, model_pdb_contents, design_pdb_contents, cut_off = 60.0, strict = True):
pass
def get_differing_model_residue_ids(self):
pass
def get_differing_scaffold_residue_ids(self):
pass
def get_differing_design_residue_ids(self):
pass
def generate_pymol_session(self, pymol_executable = 'pymol', settings = {}):
''' Generates the PyMOL session for the scaffold, model, and design structures.
Returns this session and the script which generated it.'''
pass
| 10 | 2 | 11 | 1 | 9 | 1 | 2 | 0.14 | 1 | 9 | 7 | 0 | 5 | 3 | 7 | 19 | 86 | 12 | 65 | 26 | 55 | 9 | 52 | 22 | 44 | 3 | 3 | 2 | 16 |
143,660 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/RosettaProtocols.py
|
klab.RosettaProtocols.WebserverProtocols
|
class WebserverProtocols(object):
protocols = None
protocolGroups = None
def __init__(self):
protocols = []
protocolGroups = []
protocolGroups.append(RosettaProtocolGroup("Point Mutation", "#DCE9F4"))
proto = RosettaProtocol("One Mutation", "point_mutation")
proto.setBinaries("mini", "classic")
proto.setNumStructures(2,10,50)
protocolGroups[0].add(proto)
proto = RosettaProtocol("Multiple Mutations", "multiple_mutation")
proto.setBinaries("mini", "classic")
proto.setNumStructures(2,10,50)
protocolGroups[0].add(proto)
protocolGroups.append(RosettaProtocolGroup("Backrub Ensemble", "#B7FFE0"))
proto = RosettaProtocol("Backrub Ensemble", "no_mutation")
proto.setBinaries("classic", "mini")
proto.setNumStructures(2,10,50)
protocolGroups[1].add(proto)
proto = RosettaProtocol("Backrub Ensemble Design", "ensemble")
proto.setBinaries("ensemble")
proto.setNumStructures(2,10,50)
protocolGroups[1].add(proto)
protocolGroups.append(RosettaProtocolGroup("Sequence Tolerance", "#FFE2E2"))
proto = RosettaProtocol("Interface Sequence Tolerance", "sequence_tolerance")
proto.setBinaries("seqtolHK")
proto.setNumStructures(2,10,50)
protocolGroups[2].add(proto)
proto = RosettaProtocol("Generalized Protocol<br>(Fold / Interface)<br>Sequence Tolerance", "sequence_tolerance_SK")
proto.setBinaries("seqtolJMB", "seqtolP1")
proto.setNumStructures(20,50,150)
protocolGroups[2].add(proto)
# Private protocols for the lab go here
protocolGroups.append(RosettaProtocolGroup("Private Protocols", "#ffe2ba", public = False))
proto = RosettaProtocol("Multiple Sequence Tolerance", "multi_sequence_tolerance")
proto.setBinaries("multiseqtol")
proto.setNumStructures(2,20,100)
protocolGroups[3].add(proto)
# A flat list of the protocols
protocols = []
for i in range(len(protocolGroups)):
protocols.extend(protocolGroups[i].getProtocols())
self.protocolGroups = protocolGroups
self.protocols = protocols
def getProtocols(self):
return self.protocolGroups, self.protocols
def getProtocolDBNames(self):
dbnames = []
for p in self.protocols:
dbnames.append(p.dbname)
return dbnames
|
class WebserverProtocols(object):
def __init__(self):
pass
def getProtocols(self):
pass
def getProtocolDBNames(self):
pass
| 4 | 0 | 21 | 5 | 16 | 2 | 2 | 0.12 | 1 | 3 | 2 | 0 | 3 | 0 | 3 | 3 | 70 | 18 | 50 | 12 | 46 | 6 | 50 | 12 | 46 | 2 | 1 | 1 | 5 |
143,661 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/fcm/fcm.py
|
klab.fcm.fcm.PlateInfo
|
class PlateInfo:
def __init__ (self, name, value, new_positions):
self.name = name
if value == None:
self.value = np.nan
else:
self.value = value
self.positions = []
if isinstance(new_positions, list):
for new_position_range in new_positions:
self.add_position_range(new_position_range)
elif isinstance(new_positions, str):
self.add_position_range(new_positions)
else:
raise Exception('Input new positions must be a list or string')
def add_position_range(self, pos_range):
if '-' in pos_range:
first_pos_str, second_pos_str = pos_range.split('-')
first_pos = PlatePos(first_pos_str)
second_pos = PlatePos(second_pos_str)
first_pos_char_index = rows_in_plate.index(first_pos.row)
second_pos_char_index = rows_in_plate.index(second_pos.row)
for char_index in range(first_pos_char_index, second_pos_char_index + 1):
row = rows_in_plate[char_index]
for col in range(first_pos.col, second_pos.col + 1):
self.add_position( '%s%d' % (row, col) )
else:
self.add_position(pos_range)
def add_position(self, pos_str):
pos = PlatePos(pos_str)
if pos not in self.positions:
self.positions.append(pos)
self.positions.sort()
@property
def position_set(self):
return_set = set()
for pos in self.positions:
return_set.add(pos)
return return_set
def __repr__(self):
return str( self.positions )
|
class PlateInfo:
def __init__ (self, name, value, new_positions):
pass
def add_position_range(self, pos_range):
pass
def add_position_range(self, pos_range):
pass
@property
def position_set(self):
pass
def __repr__(self):
pass
| 7 | 0 | 8 | 1 | 8 | 0 | 3 | 0 | 0 | 6 | 1 | 0 | 5 | 3 | 5 | 5 | 48 | 7 | 41 | 22 | 34 | 0 | 36 | 21 | 30 | 5 | 0 | 3 | 14 |
143,662 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/fcm/fcm.py
|
klab.fcm.fcm.PlatePos
|
class PlatePos:
def __init__ (self, plate_position_str):
self.row = plate_position_str[0]
assert( self.row in rows_in_plate )
self.col = int(plate_position_str[1:])
# Returns the next position on the plate
@property
def next_pos(self):
if self.row_index == len(rows_in_plate)-1:
if self.col == cols_in_plate[-1]:
return None
if self.col == cols_in_plate[-1]:
next_pos_row = rows_in_plate[ self.row_index+1 ]
next_pos_col = 1
else:
next_pos_row = self.row
next_pos_col = self.col + 1
return PlatePos( '%s%d' % (next_pos_row, next_pos_col) )
@property
def row_index(self):
return rows_in_plate.index(self.row)
def __repr__(self):
return '%s%02d' % (self.row, self.col)
def __lt__ (self, other):
if self.row == other.row:
return self.col < other.col
else:
return self.row < other.row
def __hash__(self):
return hash( str(self) )
def __eq__(self, other):
return self.row == other.row and self.col == other.col
def __ne__(self, other):
return not self.__eq__(other)
|
class PlatePos:
def __init__ (self, plate_position_str):
pass
@property
def next_pos(self):
pass
@property
def row_index(self):
pass
def __repr__(self):
pass
def __lt__ (self, other):
pass
def __hash__(self):
pass
def __eq__(self, other):
pass
def __ne__(self, other):
pass
| 11 | 0 | 4 | 0 | 4 | 0 | 2 | 0.03 | 0 | 2 | 0 | 0 | 8 | 2 | 8 | 8 | 43 | 9 | 33 | 15 | 22 | 1 | 29 | 13 | 20 | 4 | 0 | 2 | 12 |
143,663 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/docopt.py
|
klab.docopt.Command
|
class Command(Argument):
def __init__(self, name, value=False):
self.name = name
self.value = value
def single_match(self, left):
for n, p in enumerate(left):
if type(p) is Argument:
if p.value == self.name:
return n, Command(self.name, True)
else:
break
return None, None
|
class Command(Argument):
def __init__(self, name, value=False):
pass
def single_match(self, left):
pass
| 3 | 0 | 6 | 0 | 6 | 0 | 3 | 0 | 1 | 2 | 0 | 0 | 2 | 2 | 2 | 14 | 14 | 2 | 12 | 6 | 9 | 0 | 11 | 6 | 8 | 4 | 4 | 3 | 5 |
143,664 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/basics.py
|
klab.bio.basics.PDBMutationPair
|
class PDBMutationPair(object):
'''A simple x,y container containing a relation pair of mutations:
one SEQRES mutation: these are the actual mutations from wildtype and will inform the structure even if the corresponding coordinates are missing; and
one ATOM mutation: these are the mutations that can be made in computational modeling based purely on the structure.
'''
def __init__(self, seqres_mutation, atom_mutation):
assert(seqres_mutation != None)
assert((atom_mutation == None) or (seqres_mutation.WildTypeAA == atom_mutation.WildTypeAA))
assert((atom_mutation == None) or (seqres_mutation.MutantAA == atom_mutation.MutantAA))
self.seqres_mutation = seqres_mutation
self.atom_mutation = atom_mutation
def __hash__(self):
'''This is used by the set() construction to remove duplicates in sets of RadialStrands.'''
return hash(str(self.seqres_mutation) + '|' + str(self.atom_mutation))
def __cmp__(self, other):
'''This is used by the set() construction to remove duplicates in sets of RadialStrands.'''
return self.seqres_mutation.__cmp__(other.seqres_mutation)
def __eq__(self, other):
# ow
return (self.seqres_mutation == other.seqres_mutation) and (self.atom_mutation == other.atom_mutation)
def __repr__(self):
# ow
return '[SEQ: {0}, ATOM: {1}]'.format(str(self.seqres_mutation), str(self.atom_mutation))
|
class PDBMutationPair(object):
'''A simple x,y container containing a relation pair of mutations:
one SEQRES mutation: these are the actual mutations from wildtype and will inform the structure even if the corresponding coordinates are missing; and
one ATOM mutation: these are the mutations that can be made in computational modeling based purely on the structure.
'''
def __init__(self, seqres_mutation, atom_mutation):
pass
def __hash__(self):
'''This is used by the set() construction to remove duplicates in sets of RadialStrands.'''
pass
def __cmp__(self, other):
'''This is used by the set() construction to remove duplicates in sets of RadialStrands.'''
pass
def __eq__(self, other):
pass
def __repr__(self):
pass
| 6 | 3 | 4 | 0 | 3 | 1 | 1 | 0.53 | 1 | 1 | 0 | 0 | 5 | 2 | 5 | 5 | 32 | 9 | 15 | 8 | 9 | 8 | 15 | 8 | 9 | 1 | 1 | 0 | 5 |
143,665 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/loggers/simple.py
|
klab.loggers.simple.ReportingObject
|
class ReportingObject(object):
'''A simple class to allow stdout suppression.'''
def __init__(self, silent = False):
self.silent = silent
def log(self, str, fn = None):
if not self.silent:
if fn:
fn(str)
else:
print(str)
def glog(self, *args, **kwargs):
# A more generic logging function accepting args and kwargs
if not self.silent:
if 'fn' in kwargs:
fn = kwargs['fn']
del kwargs['fn']
fn(*args, **kwargs)
kwargs['fn'] = fn
else:
if args and kwargs: print((args, kwargs))
elif kwargs: print(kwargs)
else: print(args)
|
class ReportingObject(object):
'''A simple class to allow stdout suppression.'''
def __init__(self, silent = False):
pass
def log(self, str, fn = None):
pass
def glog(self, *args, **kwargs):
pass
| 4 | 1 | 7 | 0 | 6 | 0 | 3 | 0.1 | 1 | 0 | 0 | 1 | 3 | 1 | 3 | 3 | 27 | 5 | 20 | 6 | 16 | 2 | 19 | 6 | 15 | 5 | 1 | 3 | 9 |
143,666 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/plot/rtools.py
|
klab.plot.rtools.RInterface
|
class RInterface(object):
@staticmethod
def _runRScript(r_script_filename, cwd = '.', remove_output = True):
# Reset to new current working directory
tmp_dir = False
if cwd == None:
tmp_dir = True
cwd = tempfile.mkdtemp( prefix = '%s-%s-%s_' % (time.strftime("%y%m%d"), getpass.getuser(), 'plot-working-dir') )
rscriptname = write_temp_file(cwd, r_script_filename)
p = subprocess.Popen(["R", "CMD", "BATCH", rscriptname], cwd = cwd)
while True:
time.sleep(0.3)
errcode = p.poll()
if errcode != None:
break
rout = "%s.Rout" % rscriptname
os.remove(rscriptname)
rout_contents = None
if os.path.exists(rout):
rout_contents = read_file(rout)
os.remove(rout)
if errcode != 0:
print(rout_contents )
raise Exception("The R script failed with error code %d." % errcode)
if tmp_dir and remove_output:
shutil.rmtree(cwd)
return rout_contents
@staticmethod
def correlation_coefficient_gplot(inputfname, output_filename, filetype, experiment_field = "Experimental", title = ''):
'''File suffix: pearsons_r_gplot
Description: Pearson's r
Filename: ggplot_pearsons.R
Priority: 1
'''
script_path = os.path.abspath(os.path.dirname(inspect.getsourcefile(sys.modules[__name__])))
r_script_filename = read_file(os.path.join(script_path, "ggplot_pearsons.R")) % vars()
return RInterface._runRScript(r_script_filename)
|
class RInterface(object):
@staticmethod
def _runRScript(r_script_filename, cwd = '.', remove_output = True):
pass
@staticmethod
def correlation_coefficient_gplot(inputfname, output_filename, filetype, experiment_field = "Experimental", title = ''):
'''File suffix: pearsons_r_gplot
Description: Pearson's r
Filename: ggplot_pearsons.R
Priority: 1
'''
pass
| 5 | 1 | 20 | 3 | 14 | 3 | 4 | 0.19 | 1 | 2 | 0 | 0 | 0 | 0 | 2 | 2 | 45 | 8 | 31 | 13 | 26 | 6 | 29 | 11 | 26 | 7 | 1 | 2 | 8 |
143,667 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/process.py
|
klab.process.Daemon
|
class Daemon(object):
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, redirect_output = True, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
self.debug = False
self.redirect_output = redirect_output
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError as e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError as e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# Redirect standard file descriptors for the live webserver
# if not self.debug:
# # this discards any output.
if self.redirect_output:
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'w', 1) # so = file(self.stdout, 'a+', 1)
se = file(self.stderr, 'w', 1) # se = file(self.stderr, 'a+', 1)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
file(self.pidfile,'w+').write("%s\n" % pid)
def writepid(self):
pid = str(os.getpid())
file(self.pidfile,'w+').write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def start(self):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
pid = None
if os.path.exists(self.pidfile):
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "pidfile %s already exist. Daemon already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
def stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
pgid = os.getpgid(pid)
os.killpg(pgid, SIGTERM) # let's kill the whole process tree so that there are no zombies left
# os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError as err:
err = str(err)
if err.find("No such process") > 0:
os.remove(self.pidfile)
else:
print(str(err))
sys.exit(1)
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self):
"""
You should override this method when you subclass Daemon. It will be called after the process has been
daemonized by start() or restart().
"""
|
class Daemon(object):
'''
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
'''
def __init__(self, pidfile, redirect_output = True, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
pass
def daemonize(self):
'''
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
'''
pass
def writepid(self):
pass
def delpid(self):
pass
def start(self):
'''
Start the daemon
'''
pass
def stop(self):
'''
Stop the daemon
'''
pass
def restart(self):
'''
Restart the daemon
'''
pass
def run(self):
'''
You should override this method when you subclass Daemon. It will be called after the process has been
daemonized by start() or restart().
'''
pass
| 9 | 6 | 15 | 1 | 11 | 5 | 3 | 0.48 | 1 | 3 | 0 | 0 | 8 | 6 | 8 | 8 | 136 | 16 | 85 | 29 | 76 | 41 | 84 | 27 | 75 | 6 | 1 | 2 | 21 |
143,668 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/process.py
|
klab.process.ProcessOutput
|
class ProcessOutput(object):
def __init__(self, stdout, stderr, errorcode):
self.stdout = stdout
self.stderr = stderr
self.errorcode = errorcode
def getError(self):
if self.errorcode != 0:
return("Errorcode: %d\n%s" % (self.errorcode, self.stderr))
return None
|
class ProcessOutput(object):
def __init__(self, stdout, stderr, errorcode):
pass
def getError(self):
pass
| 3 | 0 | 4 | 0 | 4 | 0 | 2 | 0 | 1 | 0 | 0 | 0 | 2 | 3 | 2 | 2 | 11 | 2 | 9 | 6 | 6 | 0 | 9 | 6 | 6 | 2 | 1 | 1 | 3 |
143,669 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/process3.py
|
klab.process3.Daemon
|
class Daemon(object):
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, redirect_output = True, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
self.debug = False
self.redirect_output = redirect_output
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError as e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError as e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# Redirect standard file descriptors for the live webserver
# if not self.debug:
# # this discards any output.
if self.redirect_output:
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'w', 1) # so = file(self.stdout, 'a+', 1)
se = file(self.stderr, 'w', 1) # se = file(self.stderr, 'a+', 1)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
file(self.pidfile,'w+').write("%s\n" % pid)
def writepid(self):
pid = str(os.getpid())
file(self.pidfile,'w+').write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def start(self):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
pid = None
if os.path.exists(self.pidfile):
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "pidfile %s already exist. Daemon already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
def stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
pgid = os.getpgid(pid)
os.killpg(pgid, SIGTERM) # let's kill the whole process tree so that there are no zombies left
# os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError as err:
err = str(err)
if err.find("No such process") > 0:
os.remove(self.pidfile)
else:
print((str(err)))
sys.exit(1)
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self):
"""
You should override this method when you subclass Daemon. It will be called after the process has been
daemonized by start() or restart().
"""
|
class Daemon(object):
'''
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
'''
def __init__(self, pidfile, redirect_output = True, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
pass
def daemonize(self):
'''
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
'''
pass
def writepid(self):
pass
def delpid(self):
pass
def start(self):
'''
Start the daemon
'''
pass
def stop(self):
'''
Stop the daemon
'''
pass
def restart(self):
'''
Restart the daemon
'''
pass
def run(self):
'''
You should override this method when you subclass Daemon. It will be called after the process has been
daemonized by start() or restart().
'''
pass
| 9 | 6 | 15 | 1 | 11 | 5 | 3 | 0.48 | 1 | 3 | 0 | 0 | 8 | 6 | 8 | 8 | 136 | 16 | 85 | 29 | 76 | 41 | 84 | 27 | 75 | 6 | 1 | 2 | 21 |
143,670 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/pymath/cartesian/spatialhash.py
|
klab.pymath.cartesian.spatialhash.SpatialHash
|
class SpatialHash:
def __init__(self, size):
self.size = size
self.dimensions = 0
self.quads = {}
def quadkey(self, position):
if len(position) != self.dimensions:
sys.exit()
quadrant = [0.]*self.dimensions
for i in range(self.dimensions):
quadrant[i] = int(math.floor(position[i]/self.size))
return tuple(quadrant)
def insert(self, position, data):
if self.dimensions == 0:
self.dimensions = len(position)
key = self.quadkey(position)
self.quads[key] = self.quads.get(key, []) + [(position, data)]
def nearby(self, position, radius):
minkey = self.quadkey([position[i] - radius for i in range(self.dimensions)])
maxkey = self.quadkey([position[i] + radius for i in range(self.dimensions)])
quadstosearch = [[i] for i in range(minkey[0], maxkey[0]+1)]
for i in range(1, self.dimensions):
newquads = []
for j in range(minkey[i], maxkey[i]+1):
newquads += [oldquad + [j] for oldquad in quadstosearch]
quadstosearch = newquads
radiussquared = radius*radius
results = []
for quad in quadstosearch:
quadrant = self.quads.get(tuple(quad))
if quadrant:
for pos, data in quadrant:
distsquared = 0
for i in range(self.dimensions):
distsquared += (position[i] - pos[i]) ** 2
if distsquared <= radiussquared:
results += [(pos, data)]
return results
|
class SpatialHash:
def __init__(self, size):
pass
def quadkey(self, position):
pass
def insert(self, position, data):
pass
def nearby(self, position, radius):
pass
| 5 | 0 | 12 | 3 | 9 | 0 | 4 | 0 | 0 | 3 | 0 | 0 | 4 | 3 | 4 | 4 | 54 | 16 | 38 | 22 | 33 | 0 | 38 | 22 | 33 | 8 | 0 | 4 | 14 |
143,671 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/pymath/cartesian/spatialhash.py
|
klab.pymath.cartesian.spatialhash.SpatialHash3D
|
class SpatialHash3D:
# Optimized version of SpatialHash for three dimensional space
# Optimizations work by assuming the following:
# The space is three dimensional;
# The area of a quadrant is r*r*r and r is always used as the radius for searching. This allows us to write a simple loop of the quadrants surrounding the position.
def __init__(self, size):
self.size = size
self.quads = {}
def quadkey(self, position):
msize = self.size
return (int(localfloor(position[0]/msize)), int(localfloor(position[1]/msize)), int(localfloor(position[2]/msize)))
# Avoids the overhead of tuple creation
def quadkeyxyz(self, posx, posy, posz):
msize = self.size
return (int(localfloor(posx/msize)), int(localfloor(posy/msize)), int(localfloor(posz/msize)))
def insert(self, position, data):
key = self.quadkey(position)
mquads = self.quads
mquads[key] = mquads.get(key, []) + [(position, data)]
def nearby(self, position):
radius = self.size
radiussquared = radius * radius
# Search all quadrants surrounding the position (a 3*3*3 cube with position in the center)
minkey = self.quadkeyxyz(position[0] - radius, position[1] - radius, position[2] - radius)
minx, miny, minz = minkey[0], minkey[1], minkey[2]
results = []
mquads = self.quads
for x in range(minx, minx + 3):
for y in range(miny, miny + 3):
for z in range(minz, minz + 3):
quadrant = mquads.get((x, y, z))
if quadrant:
for pos, data in quadrant:
distsquared = ((position[0] - pos[0]) ** 2) + ((position[1] - pos[1]) ** 2) + ((position[2] - pos[2]) ** 2)
if distsquared <= radiussquared:
results += [(pos, data)]
return results
|
class SpatialHash3D:
def __init__(self, size):
pass
def quadkey(self, position):
pass
def quadkeyxyz(self, posx, posy, posz):
pass
def insert(self, position, data):
pass
def nearby(self, position):
pass
| 6 | 0 | 7 | 1 | 6 | 0 | 2 | 0.19 | 0 | 2 | 0 | 0 | 5 | 2 | 5 | 5 | 48 | 11 | 31 | 24 | 25 | 6 | 31 | 24 | 25 | 7 | 0 | 6 | 11 |
143,672 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/rosetta/input_files.py
|
klab.rosetta.input_files.LoopsFile
|
class LoopsFile(object):
'''A class to manipulate loops files. Note that the indices in these files are 1-indexed i.e. A start position of 5
refers to the fifth residue of the sequence.'''
@staticmethod
def from_filepath(filepath, ignore_whitespace = True, ignore_errors = False):
return LoopsFile(read_file(filepath), ignore_whitespace = ignore_whitespace, ignore_errors = ignore_errors)
def __init__(self, contents, ignore_whitespace = True, ignore_errors = False):
self.data = []
self.parse_loops_file(contents, ignore_whitespace = ignore_whitespace, ignore_errors = ignore_errors)
def parse_loops_file(self, contents, ignore_whitespace = True, ignore_errors = False):
'''This parser is forgiving and allows leading whitespace.'''
for l in [l for l in contents.strip().split('\n') if l]:
try:
if ignore_whitespace:
l = l.strip()
tokens = l.split()
if len(tokens) < 3:
raise RosettaFileParsingException('Lines in a loops file must have at least three entries.')
if len(tokens) < 4:
tokens.append(None)
self.data.append(self.parse_loop_line(tokens))
except:
if ignore_errors:
continue
else:
raise
def parse_loop_line(self, tokens):
if tokens[0] != 'LOOP':
raise RosettaFileParsingException('Lines in a loops file must start with the keyword "LOOP".')
try:
if tokens[3] == None:
tokens[3] = 0 # add the default cut point residue number
res_numbers = list(map(int, tokens[1:4]))
if min(res_numbers) < 0:
raise RosettaFileParsingException('The cut point and start and end residues indices must be positive integers.')
if not((res_numbers[2] == 0) or res_numbers[0] <= res_numbers[2] <= res_numbers[1]):
raise RosettaFileParsingException('The cut point must lie between the start and end residues.')
except:
raise RosettaFileParsingException('Integers are expected in columns 2-4 of loops files.')
skip_rate = None
if len(tokens) > 4 and tokens[4] != None:
try:
skip_rate = float(tokens[4])
except:
raise RosettaFileParsingException('The skip rate in column 5 is expected to be a floating-point number.')
extend_loop = False
if len(tokens) > 5 and tokens[5] != None:
extend_loop = tokens[5].lower() # allow some typos
if extend_loop not in ['1', '0', 'true', 'false']:
raise RosettaFileParsingException('The extend loop argument in column 6 is expected to be "true", "false", "0", or "1".')
extend_loop = extend_loop == '1' or extend_loop == 'true'
d = dict(
start = res_numbers[0],
end = res_numbers[1],
cut_point = res_numbers[2],
skip_rate = skip_rate,
extend_loop = extend_loop
)
return d
def add(self, start, end, cut_point = None, skip_rate = None, extend_loop = None):
'''Add a new loop definition.'''
self.data.append(self.parse_loop_line(['LOOP', start, end, cut_point, skip_rate, extend_loop]))
assert(start <= end)
def get_distinct_segments(self, left_offset = 0, right_offset = 0, sequence_length = None):
'''Returns a list of segments (pairs of start and end positions) based on the loop definitions. The returned segments
merge overlapping loops e.g. if the loops file contains sections 32-40, 23-30, 28-33, and 43-46 then the returned
segments will be [(23, 40), (43, 46)].
This may not be the fastest way to calculate this (numpy?) but that is probably not an issue.
The offsets are used to select the residues surrounding the loop regions. For example, i.e. if a sequence segment
is 7 residues long at positions 13-19 and we require 9-mers, we must consider the segment from positions 5-27 so
that all possible 9-mers are considered.
'''
# Create a unique, sorted list of all loop terminus positions
positions = set()
for l in self.data:
assert(l['start'] <= l['end'])
if sequence_length:
# If we know the sequence length then we can return valid positions
positions = positions.union(list(range(max(1, l['start'] - left_offset + 1), min(sequence_length + 1, l['end'] + 1 + right_offset - 1)))) # For clarity, I did not simplify the expressions. The left_offset requires a +1 to be added, the right_offset requires a -1 to be added. The right offset also requires a +1 due to the way Python splicing works.
else:
# Otherwise, we may return positions outside the sequence length however Python splicing can handle this gracefully
positions = positions.union(list(range(max(1, l['start'] - left_offset + 1), l['end'] + 1 + right_offset - 1))) # For clarity, I did not simplify the expressions. The left_offset requires a +1 to be added, the right_offset requires a -1 to be added. The right offset also requires a +1 due to the way Python splicing works.
positions = sorted(positions)
# Iterate through the list to define the segments
segments = []
current_start = None
last_position = None
for p in positions:
if current_start == None:
current_start = p
last_position = p
else:
if p == last_position + 1:
last_position = p
else:
segments.append((current_start, last_position))
current_start = p
last_position = p
if current_start and last_position:
segments.append((current_start, last_position))
return segments
|
class LoopsFile(object):
'''A class to manipulate loops files. Note that the indices in these files are 1-indexed i.e. A start position of 5
refers to the fifth residue of the sequence.'''
@staticmethod
def from_filepath(filepath, ignore_whitespace = True, ignore_errors = False):
pass
def __init__(self, contents, ignore_whitespace = True, ignore_errors = False):
pass
def parse_loops_file(self, contents, ignore_whitespace = True, ignore_errors = False):
'''This parser is forgiving and allows leading whitespace.'''
pass
def parse_loop_line(self, tokens):
pass
def add(self, start, end, cut_point = None, skip_rate = None, extend_loop = None):
'''Add a new loop definition.'''
pass
def get_distinct_segments(self, left_offset = 0, right_offset = 0, sequence_length = None):
'''Returns a list of segments (pairs of start and end positions) based on the loop definitions. The returned segments
merge overlapping loops e.g. if the loops file contains sections 32-40, 23-30, 28-33, and 43-46 then the returned
segments will be [(23, 40), (43, 46)].
This may not be the fastest way to calculate this (numpy?) but that is probably not an issue.
The offsets are used to select the residues surrounding the loop regions. For example, i.e. if a sequence segment
is 7 residues long at positions 13-19 and we require 9-mers, we must consider the segment from positions 5-27 so
that all possible 9-mers are considered.
'''
pass
| 8 | 4 | 17 | 1 | 14 | 3 | 5 | 0.24 | 1 | 8 | 1 | 0 | 5 | 1 | 6 | 6 | 118 | 17 | 85 | 21 | 77 | 20 | 74 | 20 | 67 | 10 | 1 | 3 | 27 |
143,673 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/rosetta/input_files.py
|
klab.rosetta.input_files.Mutfile
|
class Mutfile (object):
'''Note: This class behaves differently to Resfile. It stores mutation information using the SimpleMutation class.
Rosetta mutfiles are text files split into sections where each section contains a number of mutations i.e. each
section defines one mutagenesis.
Mutfile objects represent the contents of these files by storing the mutations as a list of SimpleMutation lists.
'''
header_pattern = '^total\s+(\d+)\s*(?:#.*)?$'
mutation_group_header_pattern = '^\s*(\d+)\s*(?:#.*)?$'
mutation_pattern = '^\s*([A-Z])\s+(\d+)\s+([A-Z])\s*(?:#.*)?$'
@staticmethod
def from_file(filepath):
return Mutfile(open(filepath).read())
@staticmethod
def from_mutagenesis(mutations):
'''This is a special case (the common case) of from_mutations where there is only one mutagenesis/mutation group.'''
return Mutfile.from_mutageneses([mutations])
@staticmethod
def from_mutageneses(mutation_groups):
'''mutation_groups is expected to be a list containing lists of SimpleMutation objects.'''
mf = Mutfile()
mf.mutation_groups = mutation_groups
return mf
def __repr__(self):
'''Creates a mutfile from the set of mutation groups.'''
s = []
# Header
total_number_of_mutations = sum([len(mg) for mg in self.mutation_groups])
s.append('total %d' % total_number_of_mutations)
# Mutation groups
for mg in self.mutation_groups:
assert(len(mg) > 0)
s.append('%d' % len(mg))
# Mutation list
for m in mg:
s.append('%(WildTypeAA)s %(ResidueID)d %(MutantAA)s' % m.__dict__)
s.append('')
return '\n'.join(s)
def __init__(self, mutfile_content = None):
self.mutation_groups = []
if mutfile_content:
# Parse the file header
mutfile_content = mutfile_content.strip()
data_lines = [l for l in mutfile_content.split('\n') if l.strip()]
try:
num_mutations = int(re.match(Mutfile.header_pattern, data_lines[0]).group(1))
except:
raise RosettaFileParsingException('The mutfile has a bad header (expected "total n" where n is an integer).')
line_counter, mutation_groups = 1, []
while True:
if line_counter >= len(data_lines):
break
mutation_group_number = len(mutation_groups) + 1
# Parse the group header
try:
group_header = data_lines[line_counter]
line_counter += 1
num_mutations_in_group = int(re.match(Mutfile.mutation_group_header_pattern, group_header).group(1))
if num_mutations_in_group < 1:
raise RosettaFileParsingException('The mutfile has a record in mutation group %d: the number of reported mutations must be an integer greater than zero.' % mutation_group_number)
except:
raise RosettaFileParsingException('The mutfile has a bad header for mutation group %d.' % mutation_group_number)
# Parse the mutations in the group
try:
mutations = []
for mutation_line in data_lines[line_counter: line_counter + num_mutations_in_group]:
mtch = re.match(Mutfile.mutation_pattern, mutation_line)
mutations.append(SimpleMutation(mtch.group(1), int(mtch.group(2)), mtch.group(3)))
mutation_groups.append(mutations)
line_counter += num_mutations_in_group
except:
raise RosettaFileParsingException('An exception occurred while parsing the mutations for mutation group %d.' % mutation_group_number)
if sum([len(mg) for mg in mutation_groups]) != num_mutations:
raise RosettaFileParsingException('A total of %d mutations were expected from the file header but the file contained %d mutations.' % (num_mutations, sum([len(mg) for mg in mutation_groups])))
self.mutation_groups = mutation_groups
def get_total_mutation_count(self):
return sum([len(mg) for mg in self.mutation_groups])
|
class Mutfile (object):
'''Note: This class behaves differently to Resfile. It stores mutation information using the SimpleMutation class.
Rosetta mutfiles are text files split into sections where each section contains a number of mutations i.e. each
section defines one mutagenesis.
Mutfile objects represent the contents of these files by storing the mutations as a list of SimpleMutation lists.
'''
@staticmethod
def from_file(filepath):
pass
@staticmethod
def from_mutagenesis(mutations):
'''This is a special case (the common case) of from_mutations where there is only one mutagenesis/mutation group.'''
pass
@staticmethod
def from_mutageneses(mutation_groups):
'''mutation_groups is expected to be a list containing lists of SimpleMutation objects.'''
pass
def __repr__(self):
'''Creates a mutfile from the set of mutation groups.'''
pass
def __init__(self, mutfile_content = None):
pass
def get_total_mutation_count(self):
pass
| 10 | 4 | 12 | 2 | 9 | 2 | 3 | 0.27 | 1 | 3 | 2 | 0 | 3 | 1 | 6 | 6 | 93 | 17 | 62 | 27 | 52 | 17 | 59 | 24 | 52 | 10 | 1 | 4 | 17 |
143,674 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/rosetta/input_files.py
|
klab.rosetta.input_files.Resfile
|
class Resfile (object):
# Contains no support for noncanonical commands
# Contains no support for insertion codes
def __init__(self, input_resfile = None, input_mutageneses = None):
self.allaa = 'ACDEFGHIKLMNPQRSTVWY'
self.allaa_set = set()
for aa in self.allaa:
self.allaa_set.add(aa)
self.polar = 'DEHHKNQRST'
self.polar_set = set()
for aa in self.polar:
self.polar_set.add(aa)
self.apolar = 'ACFGILMPVWY'
self.apolar_set = set()
for aa in self.apolar:
self.apolar_set.add(aa)
self.design = {}
self.repack = {}
self.wild_type_residues = {} # Maps residue number ot their wild type identity, for sanity checking
self.global_commands = []
if input_resfile:
self.__init_from_file(input_resfile)
elif input_mutageneses:
self.__init_from_mutageneses(input_mutageneses)
else:
raise Exception("The Resfile __init__ function needs either an input resfile argument or mutageneses")
def __init_from_mutageneses(self, input_mutageneses):
self.global_commands.append('NATRO')
for simple_mutation in input_mutageneses:
assert( simple_mutation.Chain != None )
chain = simple_mutation.Chain
wt = simple_mutation.WildTypeAA
mut = simple_mutation.MutantAA
resnum = str(simple_mutation.ResidueID).strip()
if chain not in self.design:
self.design[chain] = {}
if resnum not in self.design[chain]:
self.design[chain][resnum] = set()
self.design[chain][resnum].add( mut )
if chain in self.wild_type_residues and resnum in self.wild_type_residues[chain]:
assert( self.wild_type_residues[chain][resnum] == wt )
else:
if chain not in self.wild_type_residues:
self.wild_type_residues[chain] = {}
self.wild_type_residues[chain][resnum] = wt
def __init_from_file(self, filename):
index_pattern = '^(\d+[a-zA-Z]*)\s+'
range_pattern = '^(\d+[a-zA-Z]*)\s+[-]\s+(\d+[a-zA-Z]*)\s+'
wildcard_pattern = '^[*]\s+'
command_pattern = '({}|{}|{})([A-Z])\s+([A-Z]+)\s*([A-Z]*)'.format(
index_pattern, range_pattern, wildcard_pattern)
before_start = True
with open(filename) as file:
for line in file:
if before_start:
if line.lower().startswith('start'):
before_start = False
else:
self.global_commands.append( line.strip() )
else:
index_match = re.match(index_pattern, line)
range_match = re.match(range_pattern, line)
wildcard_match = re.match(wildcard_pattern, line)
command_match = re.match(command_pattern, line)
if not command_match: continue
command = command_match.groups()[5].upper()
chain = command_match.groups()[4].upper()
if command_match.groups()[2]:
range_start = command_match.groups()[2]
else:
range_start = None
if command_match.groups()[3]:
range_end = command_match.groups()[3]
else:
range_end = None
if command_match.groups()[1]:
range_singleton = command_match.groups()[1]
else:
range_singleton = None
# Process chain/residue range
new_residues = []
if range_start and range_end:
range_start_num = int(''.join([x for x in
range_start if x.isdigit()])) + 1
range_end_num = int(''.join([x for x in
range_end if x.isdigit()]))
new_residues.append( range_start )
if range_start_num < range_end_num:
new_residues.extend( [str(x) for x in range(range_start_num, range_end_num+1)] )
new_residues.append( range_end )
elif range_singleton:
new_residues.append( range_singleton )
elif wildcard_match:
new_residues.append( '*' )
else:
raise Exception('No reference to residue number or range found')
if command == 'NATRO':
# Useless do-nothing command
continue
elif command == 'NATAA':
# Repack only command
if chain not in self.repack:
self.repack[chain] = []
self.repack[chain].extend( new_residues )
else:
# Design command
if chain not in self.design:
self.design[chain] = {}
for resnum in new_residues:
if command == 'ALLAA':
self.design[chain][resnum] = self.allaa_set
elif command == 'PIKAA':
allowed_restypes = set()
for restype in command_match.groups()[6].upper():
allowed_restypes.add(restype)
self.design[chain][resnum] = allowed_restypes
elif command == 'NOTAA':
allowed_restypes = set(self.allaa_set)
for restype in command_match.groups()[6].upper():
allowed_restypes.remove(restype)
self.design[chain][resnum] = allowed_restypes
elif command == 'POLAR':
self.design[chain][resnum] = self.polar_set
elif command == 'APOLAR':
self.design[chain][resnum] = self.apolar_set
else:
raise Exception("Error: command %s not recognized" % command)
@property
def designable(self):
# This method only returns residue numbers, and nothing to do with chains
# Any wild card chain commands will be ignored by this function
return_list = []
for chain in self.design:
for residue in self.design[chain]:
if residue != '*':
return_list.append(residue)
return sorted(return_list)
@property
def packable(self):
# This method only returns residue numbers, and nothing to do with chains
# Any wild card chain commands will be ignored by this function
return_list = []
for chain in self.repack:
for residue in self.repack[chain]:
if residue != '*':
return_list.append(residue)
return sorted(return_list + self.designable)
@property
def chains(self):
# Returns a list of all chains
return_set = set()
for chain in self.design:
return_set.add(chain)
for chain in self.repack:
return_set.add(chain)
return sorted(list(return_set))
@property
def residues(self):
# Returns a list of all residues
return_list = []
for chain in self.design:
for resnum in list(self.design[chain].keys()):
return_list.append(resnum)
for chain in self.repack:
for resnum in self.repack[chain]:
return_list.append(resnum)
return sorted(return_list)
@property
def design_positions(self):
return_dict = {}
for chain in self.design:
return_dict[chain] = sorted(self.design[chain].keys())
return return_dict
@property
def repack_positions(self):
return self.repack
@staticmethod
def from_mutagenesis(mutations):
'''This is a special case (the common case) of from_mutations where there is only one mutagenesis/mutation group.'''
return Resfile.from_mutageneses([mutations])
@staticmethod
def from_mutageneses(mutation_groups):
'''mutation_groups is expected to be a list containing lists of SimpleMutation objects.'''
return Resfile(input_mutageneses = mutation_groups)
def __repr__(self):
return_string = ''
for command in self.global_commands:
return_string += command + '\n'
return_string += 'start\n'
residues = self.residues
for chain in self.chains:
for residue in self.residues:
if chain in self.design and residue in self.design[chain]:
all_design_aas = ''
for aa in sorted(list(self.design[chain][residue])):
all_design_aas += aa
return_string += '%s %s PIKAA %s\n' % (str(residue), chain, all_design_aas)
if chain in self.repack and residue in self.repack[chain]:
return_string += '%s %s NATAA\n' % (str(residue), chain)
return return_string
def __eq__(self, other):
return self.__dict__ == other.__dict__
|
class Resfile (object):
def __init__(self, input_resfile = None, input_mutageneses = None):
pass
def __init_from_mutageneses(self, input_mutageneses):
pass
def __init_from_file(self, filename):
pass
@property
def designable(self):
pass
@property
def packable(self):
pass
@property
def chains(self):
pass
@property
def residues(self):
pass
@property
def design_positions(self):
pass
@property
def repack_positions(self):
pass
@staticmethod
def from_mutagenesis(mutations):
'''This is a special case (the common case) of from_mutations where there is only one mutagenesis/mutation group.'''
pass
@staticmethod
def from_mutageneses(mutation_groups):
'''mutation_groups is expected to be a list containing lists of SimpleMutation objects.'''
pass
def __repr__(self):
pass
def __eq__(self, other):
pass
| 22 | 2 | 16 | 2 | 14 | 1 | 5 | 0.08 | 1 | 6 | 0 | 0 | 11 | 10 | 13 | 13 | 235 | 33 | 188 | 80 | 166 | 15 | 160 | 71 | 146 | 24 | 1 | 7 | 65 |
143,675 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/unmerged/rpache/PDB_files.py
|
klab.unmerged.rpache.PDB_files.Model
|
class Model:
def __init__(self,serial='',chains=[]):
self.serial=serial
self.chains=chains
def out(self):
print(self.serial)
for chain in self.chains:
chain.out()
def write(self):
outstring='MODEL '+' '+self.serial+'\n'
for chain in self.chains:
outstring+=chain.write()
#-
outstring+='ENDMDL\n'
return outstring
def write_plain(self):
outstring=''
for chain in self.chains:
outstring+=chain.write()
#-
return outstring
|
class Model:
def __init__(self,serial='',chains=[]):
pass
def out(self):
pass
def write(self):
pass
def write_plain(self):
pass
| 5 | 0 | 5 | 0 | 5 | 1 | 2 | 0.11 | 0 | 0 | 0 | 0 | 4 | 2 | 4 | 4 | 24 | 3 | 19 | 12 | 14 | 2 | 19 | 12 | 14 | 2 | 0 | 1 | 7 |
143,676 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/unmerged/rpache/PDB_files.py
|
klab.unmerged.rpache.PDB_files.Gunziplines
|
class Gunziplines:
def __init__(self,fname):
self.f = Popen(['gunzip', '-c', fname], stdout=PIPE)
def readlines(self):
for line in self.f.stdout:
yield line
def killGunzip(self):
if self.f.poll() == None:
os.kill(self.f.pid,signal.SIGHUP)
|
class Gunziplines:
def __init__(self,fname):
pass
def readlines(self):
pass
def killGunzip(self):
pass
| 4 | 0 | 3 | 0 | 3 | 0 | 2 | 0 | 0 | 1 | 0 | 0 | 3 | 1 | 3 | 3 | 9 | 0 | 9 | 6 | 5 | 0 | 9 | 6 | 5 | 2 | 0 | 1 | 5 |
143,677 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/unmerged/rpache/PDB_files.py
|
klab.unmerged.rpache.PDB_files.Chain
|
class Chain:
def __init__(self,id='',residues=[],protein_id='',expression_system='',molecule=''):
self.id=id
self.residues=residues
self.canonical_residues=[]
for residue in residues:
if residue.is_canonical:
self.canonical_residues.append(residue)
#--
self.protein_id=protein_id
self.expression_system=expression_system
self.molecule=molecule
def out(self):
print(self.id)
for residue in self.residues:
residue.out()
def write(self):
outstring=''
for residue in self.residues:
outstring+=residue.write()
#-
outstring+='TER\n'
return outstring
|
class Chain:
def __init__(self,id='',residues=[],protein_id='',expression_system='',molecule=''):
pass
def out(self):
pass
def write(self):
pass
| 4 | 0 | 7 | 0 | 7 | 1 | 2 | 0.1 | 0 | 0 | 0 | 0 | 3 | 6 | 3 | 3 | 25 | 2 | 21 | 14 | 17 | 2 | 21 | 14 | 17 | 3 | 0 | 2 | 7 |
143,678 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/unmerged/rpache/PDB_files.py
|
klab.unmerged.rpache.PDB_files.Atom
|
class Atom:
def __init__(self,type='ATOM ',serial='',name='',alt_loc='',res_name='',chain_id='',res_seq='',x='',y='',z='',occupancy='',temp_factor='',spacer='',element='',charge=''):
self.type=type
self.serial=serial
self.name=name
self.alt_loc=alt_loc
self.res_name=res_name
self.chain_id=chain_id
self.res_seq=res_seq
self.x=x
self.y=y
self.z=z
self.occupancy=occupancy
self.temp_factor=temp_factor
self.spacer=spacer
self.element=element
self.charge=charge
def out(self):
print(self.type,self.serial,self.name,self.alt_loc,self.res_name,self.chain_id,self.res_seq,self.x,self.y,self.z,self.occupancy,self.temp_factor,self.element,self.charge)
def write(self):
return self.type+self.serial+' '+self.name+self.alt_loc+self.res_name+' '+self.chain_id+self.res_seq+' '+self.x+self.y+self.z+self.occupancy+self.temp_factor+self.spacer+self.element+self.charge+'\n'
|
class Atom:
def __init__(self,type='ATOM ',serial='',name='',alt_loc='',res_name='',chain_id='',res_seq='',x='',y='',z='',occupancy='',temp_factor='',spacer='',element='',charge=''):
pass
def out(self):
pass
def write(self):
pass
| 4 | 0 | 7 | 0 | 7 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 3 | 15 | 3 | 3 | 23 | 2 | 21 | 19 | 17 | 0 | 21 | 19 | 17 | 1 | 0 | 0 | 3 |
143,679 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/stats/dataframe.py
|
klab.stats.dataframe.DatasetDataFrame
|
class DatasetDataFrame(object):
'''A class to store benchmark data where once series contains the reference or experimental values and the remaining
columns contain predicted results from benchmark runs.'''
# Default name for the set of data for analysis
reference_dataset_name = 'Benchmark dataset'
def iprint(msg): # Note: no args, kwargs
print((str(msg)))
def __init__(self, dataframe,
reference_column_index = 0, restricted_predicted_column_indices = [], restricted_predicted_column_names = [],
reference_dataset_name = None, index_layers = [],
log_level = 10, log_fn = iprint):
'''Expects a dataframe where:
the indices correspond to some set of IDs e.g. dataset case IDs;
there is one reference column/series against which all other series will be pairwise compared.
For example, the reference column may contain experimental data and the remaining columns may contain predicted data
from multiple benchmarking runs.
The class handles multi-indexed data e.g. data grouped by 'Team' and then 'Method'. In that case, the index_layers
list can be specified to name these layers. In the example above, we would use index_layers = ['Team', 'Method'].
:param reference_column: The index of the column which contains the reference data series. Indices are zero-indexed.
:param restricted_predicted_column_indices: Indices of columns which should be considered for analysis.
:param restricted_predicted_column_indices: Names of columns which should be considered for analysis.
:param log_level: All log messages with levels set below log_level will be printed.
:param log_fn: The logging function. This expects exactly one argument which is then passed to the print statement.
By default, all non-reference columns are analyzed. If either restricted_predicted_column_indices or restricted_predicted_column_names
is set then the set of columns is restricted to the *union* of those two lists.
'''
# Setup
reference_dataset_name = reference_dataset_name or DatasetDataFrame.reference_dataset_name
all_series_names = dataframe.columns.values
num_series = len(all_series_names)
assert(num_series == len(set(all_series_names)))
if restricted_predicted_column_names != []:
assert(isinstance(restricted_predicted_column_names, list))
for n in restricted_predicted_column_names:
# Allow either string indexing (typical) or tuple indexing (for multi-indexed columns)
if isinstance(n, tuple):
# Multi-indexing case
try:
t = dataframe[n]
except:
raise Exception('Could not find multi-indexed column {0}.'.format(n))
else:
assert(isinstance(n, str))
assert(n in dataframe.columns.values)
if restricted_predicted_column_indices != []:
assert (isinstance(restricted_predicted_column_indices, list))
for n in restricted_predicted_column_indices:
assert(isinstance(n, int))
assert(0 <= n < num_series)
# Dataframe variables
# If a multi-indexed dataframe is passed then the column names will be tuples rather than simple types and self.multi_indexed will be set
self.log_level = log_level
self.log_fn = log_fn
self.df = dataframe
# Handle single- and multi-indexed dataframes
self.multi_indexed = False
self.index_layers = index_layers
if len(dataframe.columns) > 0 and len(dataframe.columns.values[0]) > 1:
self.multi_indexed = True
if self.index_layers:
assert(len(self.index_layers) == len(dataframe.columns.values[0]))
self.index_layers = tuple(map(str, self.index_layers))
else:
self.index_layers = tuple(['Group {0}'.format(n + 1) for n in range(len(dataframe.columns.values[0]))])
else:
if self.index_layers:
assert(len(self.index_layers) == 1)
else:
self.index_layers = ['Group']
self.index_layers = tuple(self.index_layers)
self.series_index = {}
self.series_names = dict(list(zip(list(range(num_series)), list(dataframe.columns.values))))
self.reference_dataset_name = reference_dataset_name
for k, v in self.series_names.items():
self.series_index[v] = k
assert(len(self.series_index) == len(self.series_names))
# Set up series
# self.reference_series is the name of the reference series' column
# self.data_series is the list of names of the prediction series' columns
if not (0 <= reference_column_index < num_series):
raise Exception('Reference column index {0} is out of bounds (bounds are 0,..,{1}).'.format(reference_column_index, num_series - 1))
elif reference_column_index in restricted_predicted_column_indices:
raise Exception('The reference column index {0} was specified as a prediction series in the restricted_predicted_column_indices list ({1}).'.format(reference_column_index, ', '.join(map(str, restricted_predicted_column_indices))))
elif self.series_names[reference_column_index] in restricted_predicted_column_names:
raise Exception('The reference column {0} was specified as a prediction series in the restricted_predicted_column_names list ({1}).'.format(self.series_names[reference_column_index], ', '.join(restricted_predicted_column_names)))
self.reference_series = self.series_names[reference_column_index]
self.data_series = self.get_series_names(column_indices = restricted_predicted_column_indices, column_names = restricted_predicted_column_names)
self.analysis = {}
# Keep a reference to the full dataframe
self.df_full = dataframe
# During analysis, we consider the subset of cases where all series have data so we use dropna to filter out the
# cases with missing data. To prevent filtering out too many cases, we first restrict the dataset to the series
# of interest
self.df = dataframe.copy()
dropped_series = [n for n in all_series_names if n not in [self.reference_series] + self.data_series]
self.df.drop(dropped_series, axis = 1, inplace = True)
# Add a series with the absolute errors
for dseries in self.data_series:
# It may be cleaner to create new dataframes for the absolute error values but we are already creating a new dataframe for the pruned data
# todo: do we ever use these columns? not for reporting although it could be useful when dumping to text to create them at that point
if self.multi_indexed:
new_series = list(dseries)
new_series[-1] = new_series[-1] + '_abs_error'
new_series = tuple(new_series)
assert(new_series not in dataframe.columns.values)
self.df[new_series] = abs(self.df[self.reference_series] - self.df[dseries])
else:
assert(dseries + '_abs_error' not in dataframe.columns.values)
self.df[dseries + '_abs_error'] = abs(self.df[self.reference_series] - self.df[dseries])
# Now that we have pruned by column, we drop records with missing data
self.df_pruned = self.df.dropna()
num_pruned_cases = len(self.df) - len(self.df_pruned)
if num_pruned_cases:
self.log('{0} cases do not have data for all series. Datasets pruned to the set of common records will be asterisked in the tabular results.'.format(num_pruned_cases), 3)
@staticmethod
def from_csv(id_field_column = 0, reference_field_column = 1, reference_dataset_name = None,
id_col_name = None, reference_col_name = None, comparison_col_name_prefix = 'Predicted',
columns_to_omit = [], columns_to_ignore = [],
headers_included = True, separator = ',', ignore_lines_starting_with = None,
log_level = 10, log_fn = iprint,
):
"""
:param id_field_column: The column index (zero-indexed) whose corresponding column contains the ID (record/case ID) field. Values in this field must be integer values.
:param reference_field_column: The column index (zero-indexed) whose corresponding column contains the reference data (X-axis) e.g. experimental measurements or reference data. Values in this field are assumed to be integers or floating-point values. Otherwise, they will be imported as NaN.
:param reference_dataset_name: The name of the dataset being analyzed.
:param id_col_name: The name of the ID column. This will override any name in the header. If the header does not exist, this defaults to "ID".
:param reference_col_name: The name of the ID column. This will override any name in the header. If the header does not exist, this defaults to "Experimental".
:param comparison_col_name_prefix: Any remaining unnamed columns will be named (comparison_col_name_prefix + '_' + i) with integer i increasing.
:param columns_to_omit: These columns will be omitted from the dataframe.
:param columns_to_ignore: These columns will be included in the dataframe but not considered in analysis.
:param headers_included: Whether the CSV file has a header line. If this line exists, it must be the first parsable (non-blank and which does not start with the value of ignore_lines_starting_with) line in the file.
:param separator: Column separator. For CSV imports, ',' should be used. For TSV imports, '\t' should be used.
:param ignore_lines_starting_with. Any lines starting with this string will be ignored during parsing. This allows you to include comments in the CSV file.
:param log_level: See DatasetDataFrame constructor.
:param log_fn: See DatasetDataFrame constructor.
Note: Unlike the DatasetDataFrame constructor, this function does not currently handle multi-indexed tables.
"""
raise Exception('todo: Implement this.')
# ignore all columns_to_ignore
# if headers_included, set id_col_name, reference_col_name, etc.
# if not id_col_name: id_col_name = 'RecordID'
# if not reference_col_name: reference_col_name = 'Experimental'
# if not comparison_col_name_prefix: comparison_col_name_prefix = 'Predicted'
# assert num columns >= 2
# Set up these variables
dataframe = None
# dataset IDs should be used for the index
# reference_column_index should be the first column
# remaining columns should contain prediction data
return DatasetDataFrame(
dataframe,
reference_dataset_name = reference_dataset_name,
log_level = log_level,
log_fn = log_fn)
def log(self, msg, lvl = 0):
'''Log messages according to the logging level (0 is highest priority).'''
if self.log_level >= lvl:
self.log_fn(msg)
def get_series_names(self, column_indices = [], column_names = []):
'''Returns the series' names corresponding to column_indices and column_names.
"names" here are:
- strings for single-indexed dataframes; or
- tuples for multi-indexed dataframes.
If both parameters are empty then all column names are returned.
'''
n = []
if not column_indices and not column_names:
for k, v in sorted(self.series_names.items()):
# Iterate by index to preserve document order
if v != self.reference_series:
n.append(k)
else:
s = set([self.series_names[x] for x in column_indices])
t = set([self.series_index[x] for x in column_names])
n = sorted(s.union(t))
assert(n)
return [self.series_names[x] for x in n]
def _analyze(self):
'''Run-once function to generate analysis over all series, considering both full and partial data.
Initializes the self.analysis dict which maps:
(non-reference) column/series -> 'full' and/or 'partial' -> stats dict returned by get_xy_dataset_statistics
'''
if not self.analysis:
for dseries in self.data_series:
# Count number of non-NaN rows
dseries_count = self.df[dseries].count()
assert(len(self.df_pruned) <= dseries_count <= len(self.df) or dseries_count)
self.analysis[dseries] = dict(
partial = None,
full = None,
)
# Compute the statistics for the common records
stats = get_xy_dataset_statistics_pandas(self.df_pruned, self.reference_series, dseries,
fcorrect_x_cutoff = 1.0, fcorrect_y_cutoff = 1.0,
bootstrap_data = False,
x_fuzzy_range = 0.1,
y_scalar = 1.0, ignore_null_values = True)
if (len(self.df_pruned) == len(self.df)):
# There are no pruned records so these are actually the full stats
self.analysis[dseries]['full'] = dict(data = stats, description = format_stats(stats, floating_point_format = '%0.3f', sci_notation_format = '%.2E', return_string = True))
else:
# Store the results for the partial dataset
self.analysis[dseries]['partial'] = dict(data = stats, description = format_stats(stats, floating_point_format = '%0.3f', sci_notation_format = '%.2E', return_string = True))
if dseries_count > len(self.df_pruned):
# This dataset has records which are not in the pruned dataset
stats = get_xy_dataset_statistics_pandas(self.df, self.reference_series, dseries,
fcorrect_x_cutoff = 1.0, fcorrect_y_cutoff = 1.0,
bootstrap_data = False,
x_fuzzy_range = 0.1,
y_scalar = 1.0, ignore_null_values = True)
self.analysis[dseries]['full'] = dict(data = stats, description = format_stats(stats, floating_point_format = '%0.3f', sci_notation_format = '%.2E', return_string = True))
return self.analysis
def to_csv(self, filepath):
return self.df.to_csv(filepath)
def get_stats(self):
self._analyze()
return self.analysis
def summarize(self, series_ = None, subset_ = None, summary_title_formatter = None, color = True):
self._analyze()
summary = []
for series, subset in sorted(self.analysis.items()):
if series_ == None or series_ == series:
ttl, sub_summary = series, []
for subset_type, v in sorted(subset.items()):
if subset_ == None or subset_ == series:
if v:
if color:
sub_summary.append(colortext.make('Subset: ' + subset_type, 'yellow'))
else:
sub_summary.append('Subset: ' + subset_type)
sub_summary.append(self._summarize_case(v['data']))
if sub_summary:
if summary_title_formatter:
summary += [summary_title_formatter(series)] + sub_summary
else:
summary += [series] + sub_summary
if summary:
return '\n'.join(summary)
else:
return None
def _summarize_case(self, data):
assert(data)
s = []
data = copy.deepcopy(data)
# Some of the data is currently returned as tuples
data['significant_beneficient_sensitivity_n'] = data['significant_beneficient_sensitivity'][1]
data['significant_beneficient_sensitivity'] = data['significant_beneficient_sensitivity'][0]
data['significant_beneficient_specificity_n'] = data['significant_beneficient_specificity'][1]
data['significant_beneficient_specificity'] = data['significant_beneficient_specificity'][0]
data['pearsonr'], data['pearsonr_pvalue'] = data['pearsonr']
data['spearmanr'], data['spearmanr_pvalue'] = data['spearmanr']
return '''
Cardinality
\tn : {n:d}
\tNull cases: {num_null_cases:d}
Correlation
\tR-value: {pearsonr:<10.03f} Slope: {pearsonr_slope:<10.03f} pvalue={pearsonr_pvalue:.2E}\t (Pearson's R)
\tR-value: {pearsonr_origin:<10.03f} Slope: {pearsonr_slope_origin:<10.03f} - \t (Pearson's R through origin)
\trho : {spearmanr:<10.03f} - pvalue={pearsonr_pvalue:.2E}\t (Spearman's rho)
Error:
\tMAE : {MAE:<10.03f} Scaled MAE: {scaled_MAE:.03f}
\tFraction correct (FC): {fraction_correct:<10.03f} Fuzzy FC : {fraction_correct_fuzzy_linear:.03f}
Error (normalized, signficance = {std_dev_cutoff:.2f} standard deviations):
\tPercent correct sign (accuracy): {accuracy:.01%}
\tPercent significant predictions with correct sign (specificity): {specificity:.01%}
\tPercent significant experimental hits with correct prediction sign (sensitivity): {sensitivity:.01%}
\tPercent significant predictions are significant experimental hits (significance specificity): {significance_specificity:.01%}
\tPercent significant experimental hits are predicted significant hits (significance sensitivity): {significance_sensitivity:.01%}
\tPercent significant beneficial experimental hits are predicted significant beneficial hits (significant beneficient sensitivity): {significant_beneficient_sensitivity:.01%}
\tPercent significant beneficial predictions are significant beneficial experimental hits (significant beneficient specificity): {significant_beneficient_specificity:.01%}
\n'''.format(**data)
def tabulate(self, restricted_predicted_column_indices = [], restricted_predicted_column_names = [], dataset_name = None):
'''Returns summary analysis from the dataframe as a DataTable object.
DataTables are wrapped pandas dataframes which can be combined if the have the same width. This is useful for combining multiple analyses.
DataTables can be printed to terminal as a tabular string using their representation function (i.e. print(data_table)).
This function (tabulate) looks at specific analysis; this class (DatasetDataFrame) can be subclassed for custom tabulation.'''
self._analyze()
data_series = self.get_series_names(column_indices = restricted_predicted_column_indices, column_names = restricted_predicted_column_names)
# Determine the multi-index headers
group_names = []
for l in self.index_layers:
group_names.append(l)
# Set up the table headers
headers = ['Dataset'] + group_names + ['n', 'R', 'rho', 'MAE', 'Fraction correct ', 'FC sign', 'SB sensitivity', 'SB specificity']
table_rows = []
for dseries in data_series:
if isinstance(dseries, tuple):
dseries_l = list(dseries)
else:
assert(isinstance(dseries, str))
dseries_l = [dseries]
results = []
assert (len(self.index_layers) == len(dseries))
if self.analysis.get(dseries, {}).get('partial') and self.analysis.get(dseries, {}).get('full'):# data_series in self.analysis[dseries]['full']:
results.append((dseries_l[:-1] + [dseries_l[-1] + '*'], self.analysis[dseries]['partial']))
results.append((dseries_l[:-1] + [dseries_l[-1]], self.analysis[dseries]['full']))
elif (self.analysis.get(dseries, {}).get('partial')):
results.append((dseries_l[:-1] + [dseries_l[-1] + '*'], self.analysis[dseries]['partial']))
elif (self.analysis.get(dseries, {}).get('full')):
results = [(dseries, self.analysis[dseries]['full'])]
for result in results:
n = result[1]['data']['n']
R = result[1]['data']['pearsonr'][0]
rho = result[1]['data']['spearmanr'][0]
mae = result[1]['data']['MAE']
fraction_correct = result[1]['data']['fraction_correct']
accuracy = result[1]['data']['accuracy']
SBSensitivity = '{0:.3f} / {1}'.format(result[1]['data']['significant_beneficient_sensitivity'][0], result[1]['data']['significant_beneficient_sensitivity'][1])
SBSpecificity = '{0:.3f} / {1}'.format(result[1]['data']['significant_beneficient_specificity'][0], result[1]['data']['significant_beneficient_specificity'][1])
method = result[0]
if isinstance(method, tuple):
method = list(method)
table_rows.append([dataset_name or self.reference_dataset_name] + method +
[n, R, rho, mae, fraction_correct, accuracy, SBSensitivity, SBSpecificity])
# Convert the lists into a (wrapped) pandas dataframe to make use of the pandas formatting code to save reinventing the wheel...
return DataTable(pandas.DataFrame(table_rows, columns = headers), self.index_layers)
|
class DatasetDataFrame(object):
'''A class to store benchmark data where once series contains the reference or experimental values and the remaining
columns contain predicted results from benchmark runs.'''
def iprint(msg):
pass
def __init__(self, dataframe,
reference_column_index = 0, restricted_predicted_column_indices = [], restricted_predicted_column_names = [],
reference_dataset_name = None, index_layers = [],
log_level = 10, log_fn = iprint):
'''Expects a dataframe where:
the indices correspond to some set of IDs e.g. dataset case IDs;
there is one reference column/series against which all other series will be pairwise compared.
For example, the reference column may contain experimental data and the remaining columns may contain predicted data
from multiple benchmarking runs.
The class handles multi-indexed data e.g. data grouped by 'Team' and then 'Method'. In that case, the index_layers
list can be specified to name these layers. In the example above, we would use index_layers = ['Team', 'Method'].
:param reference_column: The index of the column which contains the reference data series. Indices are zero-indexed.
:param restricted_predicted_column_indices: Indices of columns which should be considered for analysis.
:param restricted_predicted_column_indices: Names of columns which should be considered for analysis.
:param log_level: All log messages with levels set below log_level will be printed.
:param log_fn: The logging function. This expects exactly one argument which is then passed to the print statement.
By default, all non-reference columns are analyzed. If either restricted_predicted_column_indices or restricted_predicted_column_names
is set then the set of columns is restricted to the *union* of those two lists.
'''
pass
@staticmethod
def from_csv(id_field_column = 0, reference_field_column = 1, reference_dataset_name = None,
id_col_name = None, reference_col_name = None, comparison_col_name_prefix = 'Predicted',
columns_to_omit = [], columns_to_ignore = [],
headers_included = True, separator = ',', ignore_lines_starting_with = None,
log_level = 10, log_fn = iprint,
):
'''
:param id_field_column: The column index (zero-indexed) whose corresponding column contains the ID (record/case ID) field. Values in this field must be integer values.
:param reference_field_column: The column index (zero-indexed) whose corresponding column contains the reference data (X-axis) e.g. experimental measurements or reference data. Values in this field are assumed to be integers or floating-point values. Otherwise, they will be imported as NaN.
:param reference_dataset_name: The name of the dataset being analyzed.
:param id_col_name: The name of the ID column. This will override any name in the header. If the header does not exist, this defaults to "ID".
:param reference_col_name: The name of the ID column. This will override any name in the header. If the header does not exist, this defaults to "Experimental".
:param comparison_col_name_prefix: Any remaining unnamed columns will be named (comparison_col_name_prefix + '_' + i) with integer i increasing.
:param columns_to_omit: These columns will be omitted from the dataframe.
:param columns_to_ignore: These columns will be included in the dataframe but not considered in analysis.
:param headers_included: Whether the CSV file has a header line. If this line exists, it must be the first parsable (non-blank and which does not start with the value of ignore_lines_starting_with) line in the file.
:param separator: Column separator. For CSV imports, ',' should be used. For TSV imports, ' ' should be used.
:param ignore_lines_starting_with. Any lines starting with this string will be ignored during parsing. This allows you to include comments in the CSV file.
:param log_level: See DatasetDataFrame constructor.
:param log_fn: See DatasetDataFrame constructor.
Note: Unlike the DatasetDataFrame constructor, this function does not currently handle multi-indexed tables.
'''
pass
def log(self, msg, lvl = 0):
'''Log messages according to the logging level (0 is highest priority).'''
pass
def get_series_names(self, column_indices = [], column_names = []):
'''Returns the series' names corresponding to column_indices and column_names.
"names" here are:
- strings for single-indexed dataframes; or
- tuples for multi-indexed dataframes.
If both parameters are empty then all column names are returned.
'''
pass
def _analyze(self):
'''Run-once function to generate analysis over all series, considering both full and partial data.
Initializes the self.analysis dict which maps:
(non-reference) column/series -> 'full' and/or 'partial' -> stats dict returned by get_xy_dataset_statistics
'''
pass
def to_csv(self, filepath):
pass
def get_stats(self):
pass
def summarize(self, series_ = None, subset_ = None, summary_title_formatter = None, color = True):
pass
def _summarize_case(self, data):
pass
def tabulate(self, restricted_predicted_column_indices = [], restricted_predicted_column_names = [], dataset_name = None):
'''Returns summary analysis from the dataframe as a DataTable object.
DataTables are wrapped pandas dataframes which can be combined if the have the same width. This is useful for combining multiple analyses.
DataTables can be printed to terminal as a tabular string using their representation function (i.e. print(data_table)).
This function (tabulate) looks at specific analysis; this class (DatasetDataFrame) can be subclassed for custom tabulation.'''
pass
| 13 | 7 | 32 | 4 | 21 | 8 | 5 | 0.38 | 1 | 11 | 1 | 0 | 10 | 12 | 11 | 11 | 382 | 66 | 230 | 74 | 209 | 88 | 171 | 65 | 159 | 17 | 1 | 6 | 52 |
143,680 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/stats/dataframe.py
|
klab.stats.dataframe.DataTable
|
class DataTable(object):
def __init__(self, dataframe, index_layers):
self.df = dataframe
self.index_layers = index_layers
self.headers = self.df.columns.values
def __add__(self, other):
if len(self.headers) != len(other.headers):
raise Exception('The two tables have different widths.')
if len(self.index_layers) != len(other.index_layers):
raise Exception('The two tables have different levels of grouping.')
if sorted(self.headers) != sorted(other.headers):
# todo: Think about what to do if the headers disagree (ignore and combine anyway or add additional columns?). Should we at least require the index layers to be identical?
raise Exception('The two tables have different headers and may contain different data.')
return DataTable(pandas.concat([self.df, other.df]), self.index_layers)
def __repr__(self):
'''
Note: This is very sensitive to the width of the table. Move the formatters into the constructor instead.
Reinventing the wheel to get around problem justifying text columns - the justify argument to to_string only
justifies the headers because that is a really sensible thing to do...'''
text_formatters = []
for i in range(len(self.index_layers)):
h = self.headers[1 + i]
max_str_len = max(len(h), self.df[h].map(str).map(len).max())
if max_str_len >= 7:
max_str_len += 3 # extra space
text_formatters.append('{{:<{}}}'.format(max_str_len).format)
return self.df.to_string(
index = False,
justify = 'left',
col_space = 9,
formatters = [None] + text_formatters +
['{:,d}'.format] + # n
(['{:.3f}'.format] * 5) +
(['{}'.format] * 2)
)
|
class DataTable(object):
def __init__(self, dataframe, index_layers):
pass
def __add__(self, other):
pass
def __repr__(self):
'''
Note: This is very sensitive to the width of the table. Move the formatters into the constructor instead.
Reinventing the wheel to get around problem justifying text columns - the justify argument to to_string only
justifies the headers because that is a really sensible thing to do...'''
pass
| 4 | 1 | 12 | 1 | 10 | 2 | 3 | 0.23 | 1 | 3 | 0 | 0 | 3 | 3 | 3 | 3 | 41 | 6 | 30 | 11 | 26 | 7 | 22 | 11 | 18 | 4 | 1 | 2 | 8 |
143,681 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/stats/counters.py
|
klab.stats.counters.FrequencyCounter
|
class FrequencyCounter(object):
def __init__(self):
self.items = {}
def add(self, item):
self.items[item] = self.items.get(item, 0)
self.items[item] += 1
|
class FrequencyCounter(object):
def __init__(self):
pass
def add(self, item):
pass
| 3 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 2 | 1 | 2 | 2 | 8 | 2 | 6 | 4 | 3 | 0 | 6 | 4 | 3 | 1 | 1 | 0 | 2 |
143,682 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/scoretype.py
|
klab.scoretype.ScoreGroup
|
class ScoreGroup(object):
def __init__(self, comment):
self.comment = comment
self.score_terms = []
def add(self, score_term, comment = None):
self.score_terms.append(dict(name = score_term, comment = comment))
def __len__(self):
return len(self.score_terms)
|
class ScoreGroup(object):
def __init__(self, comment):
pass
def add(self, score_term, comment = None):
pass
def __len__(self):
pass
| 4 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 3 | 2 | 3 | 3 | 11 | 3 | 8 | 6 | 4 | 0 | 8 | 6 | 4 | 1 | 1 | 0 | 3 |
143,683 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/rosetta/log_files.py
|
klab.rosetta.log_files.LoopModelingTrajectory
|
class LoopModelingTrajectory:
prefix = 'protocols.loop_modeling.Loop(Builder|Protocol): '
stage_prefix = 'protocols.loop_modeler.LoopModeler: '
shared_pats = {
'index': '[,0-9]+',
'int': '[0-9]+',
'float': '[-.0-9]+',
'bool': '[01]',
}
build_pat = re.compile(stage_prefix + '\s*Build Stage')
centroid_pat = re.compile(stage_prefix + '\s*Centroid Stage')
fullatom_pat = re.compile(stage_prefix + '\s*Fullatom Stage')
ramp_sfxn_pat = re.compile(prefix +
'Ramp score function:\s*'
'score: (?P<score>{float}) REU; '
'chainbreak: (?P<chainbreak>{float}); '
'fa_rep: (?P<fa_rep>{float}); '
'rama: (?P<rama>{float}); '
'rama2b: (?P<rama2b>{float});'
.format(**shared_pats),
)
ramp_temp_pat = re.compile(prefix +
'Ramp temperature:\s*'
'temperature: (?P<temperature>{float});'
.format(**shared_pats),
)
propose_move_pat = re.compile(prefix +
'Propose move:\s*'
'iteration: (?P<iteration>{index}); '
'proposed: (?P<proposed>{bool}); '
'accepted: (?P<accepted>{bool}); '
'score: (?P<score>{float}) REU; '
'(RMSD: (?P<rmsd>{float}) Å; )?'
'time: (?P<time>{float}) s;'
.format(**shared_pats),
)
final_score_pat = re.compile(prefix +
'Final Score:\s*(?P<score>{float}) REU; '
'Chainbreak:\s*(?P<chainbreak>{float}) REU'
.format(**shared_pats),
)
final_rmsd_pat = re.compile(prefix +
'Final RMSD:\s*(?P<rmsd>{float}) Å '
'\(all loops, backbone heavy atom, no superposition\)'
.format(**shared_pats),
)
elapsed_time_path = re.compile(prefix +
'Elapsed Time:\s*(?P<time>{int}) sec'
.format(**shared_pats),
)
@classmethod
def from_path(cls, path, *args, **kwargs):
with open(path) as file:
stdout = file.read()
return cls(stdout, *args, **kwargs)
@classmethod
def from_stdout(cls, stdout, *args, **kwargs):
return cls(stdout, *args, **kwargs)
def __init__(self, stdout):
self.scores = {}
self.rmsds = {}
self.moves = {}
self.times = {}
lines = stdout.split('\n')
curr_stage = None
curr_sfxn = None
curr_temp = None
for line in lines:
curr_stage = self.parse_stage(line) or curr_stage
curr_sfxn = self.parse_ramp_sfxn(line) or curr_sfxn
curr_temp = self.parse_ramp_temp(line) or curr_temp
if not curr_stage:
continue
move = self.parse_move(line, curr_sfxn, curr_temp)
if move:
self.moves.setdefault(curr_stage, []).append(move)
self.scores[curr_stage] = \
self.parse_final_score(line) or self.scores.get(curr_stage)
self.rmsds[curr_stage] = \
self.parse_final_rmsd(line) or self.rmsds.get(curr_stage)
self.times[curr_stage] = \
self.parse_elapsed_time(line) or self.times.get(curr_stage)
@classmethod
def parse_stage(cls, line):
if cls.build_pat.match(line):
return 'build'
if cls.centroid_pat.match(line):
return 'centroid'
if cls.fullatom_pat.match(line):
return 'fullatom'
@classmethod
def parse_ramp_sfxn(cls, line):
ramp_sfxn = cls.ramp_sfxn_pat.match(line)
if ramp_sfxn:
return {
'chainbreak': float(ramp_sfxn.group('chainbreak')),
'fa_rep': float(ramp_sfxn.group('fa_rep')),
'rama': float(ramp_sfxn.group('rama')),
'rama2b': float(ramp_sfxn.group('rama2b')),
}
@classmethod
def parse_ramp_temp(cls, line):
ramp_temp = cls.ramp_temp_pat.match(line)
if ramp_temp:
return float(ramp_temp.group('temperature'))
@classmethod
def parse_move(cls, line, curr_sfxn, curr_temp):
propose_move = cls.propose_move_pat.match(line)
if propose_move:
return {
'iteration': propose_move.group('iteration'),
'proposed': propose_move.group('proposed') == '1',
'accepted': propose_move.group('accepted') == '1',
'score': float(propose_move.group('score')),
'rmsd': float(propose_move.group('rmsd')),
'time': float(propose_move.group('time')),
'temperature': curr_temp,
'scorefxn': curr_sfxn,
}
@classmethod
def parse_final_score(cls, line):
final_score = cls.final_score_pat.match(line)
if final_score:
return float(final_score.group('score'))
@classmethod
def parse_final_rmsd(cls, line):
final_rmsd = cls.final_rmsd_pat.match(line)
if final_rmsd:
return float(final_rmsd.group('rmsd'))
@classmethod
def parse_elapsed_time(cls, line):
elapsed_time = cls.elapsed_time_path.match(line)
if elapsed_time:
return float(elapsed_time.group('time'))
|
class LoopModelingTrajectory:
@classmethod
def from_path(cls, path, *args, **kwargs):
pass
@classmethod
def from_stdout(cls, stdout, *args, **kwargs):
pass
def __init__(self, stdout):
pass
@classmethod
def parse_stage(cls, line):
pass
@classmethod
def parse_ramp_sfxn(cls, line):
pass
@classmethod
def parse_ramp_temp(cls, line):
pass
@classmethod
def parse_move(cls, line, curr_sfxn, curr_temp):
pass
@classmethod
def parse_final_score(cls, line):
pass
@classmethod
def parse_final_rmsd(cls, line):
pass
@classmethod
def parse_elapsed_time(cls, line):
pass
| 20 | 0 | 8 | 1 | 8 | 0 | 2 | 0 | 0 | 1 | 0 | 0 | 1 | 4 | 10 | 10 | 152 | 18 | 134 | 50 | 114 | 0 | 71 | 40 | 60 | 4 | 0 | 2 | 22 |
143,684 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/rosetta/input_files_test.py
|
klab.rosetta.input_files_test.TestResfile
|
class TestResfile(unittest.TestCase):
def setUp(self):
self.resfile_contents = 'NATRO\nSTART\n\n' + \
'3 - 20 A ALLAA\n' + \
'22 - 23 A PIKAA ASDF\n' + \
'25 A PIKAA Y\n' + \
'27 A NOTAA C\n' + \
'30B B NATAA\n' + \
'* C NATAA\n'
self.write_temp_resfile(self.resfile_contents)
def tearDown(self):
os.remove(self.temp_resfile)
def write_temp_resfile(self, resfile_contents):
self.temp_resfile = None
with tempfile.NamedTemporaryFile(delete = False) as f:
self.temp_resfile = f.name
f.write(resfile_contents)
self.rf = input_files.Resfile(self.temp_resfile)
def test_init_from_resfile(self):
designable_range = list(range(3, 21)) + [22, 23, 25, 27]
packable_range = designable_range + ['30B']
designable_range_str = sorted([str(x) for x in designable_range])
packable_range_str = sorted([str(x) for x in packable_range])
self.assertListEqual(self.rf.designable, designable_range_str)
self.assertListEqual(self.rf.packable, packable_range_str)
self.assertListEqual(self.rf.global_commands, ['NATRO'])
design = { 'A' : designable_range_str }
repack = { 'B' : ['30B'], 'C' : ['*'] }
self.assertDictEqual(self.rf.design_positions, design)
self.assertDictEqual(self.rf.repack_positions, repack)
for chain in self.rf.design:
for resnum in self.rf.design[chain]:
resnum_int = int(resnum)
if resnum_int >= 3 and resnum_int <= 20:
self.assertEqual(self.rf.design[chain][resnum], self.rf.allaa_set)
elif resnum_int == 22 or resnum_int == 23:
self.assertEqual(self.rf.design[chain][resnum], set(['A', 'S', 'D', 'F']))
elif resnum_int == 25:
self.assertEqual(self.rf.design[chain][resnum], set(['Y']))
elif resnum_int == 27:
check_set = set(self.rf.allaa_set)
check_set.remove('C')
self.assertEqual(self.rf.design[chain][resnum], check_set)
def test_init_from_simple_resfile(self):
# This mostly tests what the Resfile class in input_files did
# before the commits of July 21 2015
os.remove(self.temp_resfile) # Remove default resfile created by setUp
resfile_contents = 'NATRO\nSTART\n\n3 A NATAA\n'
self.write_temp_resfile(resfile_contents)
self.assertListEqual(self.rf.global_commands, ['NATRO'])
self.assertListEqual(self.rf.designable, [])
self.assertListEqual(self.rf.packable, ['3'])
def test_init_from_mutageneses(self):
mutations = [
SimpleMutation('A', '1', 'S', 'Z'),
SimpleMutation('P', '3', 'W', 'Z')
]
rf = input_files.Resfile.from_mutageneses(mutations)
designable_range = ['1', '3']
packable_range = ['1', '3']
self.assertListEqual(rf.designable, designable_range)
self.assertListEqual(rf.packable, packable_range)
self.assertListEqual(rf.global_commands, ['NATRO'])
design = { 'Z' : designable_range }
repack = {}
self.assertDictEqual(rf.design_positions, design)
self.assertDictEqual(rf.repack_positions, repack)
self.assertDictEqual(rf.design, {'Z': {'1': set(['S']), '3': set(['W'])}} )
self.assertDictEqual(rf.repack, {})
def test_repr(self):
original_rf_repr = str(self.rf)
original_rf = self.rf
os.remove(self.temp_resfile) # Remove default resfile created by setUp
self.write_temp_resfile(original_rf_repr)
self.assertMultiLineEqual(original_rf_repr, str(self.rf))
self.assertEqual(original_rf, self.rf)
def test_chains(self):
self.assertListEqual(self.rf.chains, ['A', 'B', 'C'])
def test_residues(self):
self.assertListEqual(self.rf.residues,
sorted(['3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15',
'16', '17', '18', '19', '20', '22', '23', '25', '27', '30B', '*'])
)
|
class TestResfile(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def write_temp_resfile(self, resfile_contents):
pass
def test_init_from_resfile(self):
pass
def test_init_from_simple_resfile(self):
pass
def test_init_from_mutageneses(self):
pass
def test_repr(self):
pass
def test_chains(self):
pass
def test_residues(self):
pass
| 10 | 0 | 11 | 2 | 9 | 0 | 2 | 0.05 | 1 | 7 | 2 | 0 | 9 | 3 | 9 | 81 | 106 | 23 | 81 | 33 | 71 | 4 | 66 | 32 | 56 | 7 | 2 | 3 | 15 |
143,685 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/rosetta/input_files.py
|
klab.rosetta.input_files.SecondaryStructureDefinition
|
class SecondaryStructureDefinition(object):
'''A class to manipulate secondary structure assignment files. These files are not standard Rosetta files; we use them
for our fragment generation. For that reason, they may change over time until we fix on a flexible format. The
indices in these files are 1-indexed and use Rosetta numbering i.e. if the first chain has 90 residues and the second
has 40 residues, a position of 96 refers to the sixth residue of the second chain. The file format is whitespace-separated
columns. The first column is a residue ID or a residue range. The second column is a string consisting of characters
'H', 'E', and 'L', representing helix, sheet, and loop structure respectively.
Comments are allowed. Lines with comments must start with a '#' symbol.
Example file:
1339 HEL
# An expected helix
1354-1359 H
# Helix or sheet structure
1360,1370-1380 HE
'''
@staticmethod
def from_filepath(filepath, ignore_whitespace = True, ignore_errors = False):
return SecondaryStructureDefinition(read_file(filepath))
def __init__(self, contents):
self.data = {}
self.parse_ss_def_file(contents)
def parse_ss_def_file(self, contents):
'''This parser is forgiving and allows leading whitespace.'''
mapping = {}
for l in [l.strip() for l in contents.split('\n') if l.strip() and not(l.strip().startswith('#'))]:
tokens = l.split()
if len(tokens) != 2:
raise RosettaFileParsingException('Lines in a secondary structure definition file must have exactly two entries.')
positions = parse_range(tokens[0])
ss = sorted(set(tokens[1].upper()))
for p in positions:
if mapping.get(p) and mapping[p] != ss:
raise RosettaFileParsingException('There are conflicting definitions for residue %d (%s and %s).' % (p, ''.join(mapping[p]), ''.join(ss)))
mapping[p] = ss
self.data = mapping
|
class SecondaryStructureDefinition(object):
'''A class to manipulate secondary structure assignment files. These files are not standard Rosetta files; we use them
for our fragment generation. For that reason, they may change over time until we fix on a flexible format. The
indices in these files are 1-indexed and use Rosetta numbering i.e. if the first chain has 90 residues and the second
has 40 residues, a position of 96 refers to the sixth residue of the second chain. The file format is whitespace-separated
columns. The first column is a residue ID or a residue range. The second column is a string consisting of characters
'H', 'E', and 'L', representing helix, sheet, and loop structure respectively.
Comments are allowed. Lines with comments must start with a '#' symbol.
Example file:
1339 HEL
# An expected helix
1354-1359 H
# Helix or sheet structure
1360,1370-1380 HE
'''
@staticmethod
def from_filepath(filepath, ignore_whitespace = True, ignore_errors = False):
pass
def __init__(self, contents):
pass
def parse_ss_def_file(self, contents):
'''This parser is forgiving and allows leading whitespace.'''
pass
| 5 | 2 | 7 | 0 | 6 | 1 | 2 | 0.8 | 1 | 2 | 1 | 0 | 2 | 1 | 3 | 3 | 41 | 6 | 20 | 12 | 15 | 16 | 19 | 11 | 15 | 5 | 1 | 3 | 7 |
143,686 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/fs/stats.py
|
klab.fs.stats.FileCounter
|
class FileCounter(object):
'''Counts the number of files in all directories below the specified root directory. We keep a count per directory as well as a count under a directory i.e. the sum of the number of files in the directory plus the number of all files under that directory.'''
def __init__(self, top_dir):
if top_dir.endswith('/'):
top_dir = top_dir[:-1]
self.root = top_dir
self.counts = {}
self.cumulative_counts = {}
self.depth_list = []
self.count_files(top_dir)
self.depth_list = sorted(self.depth_list, reverse = True)
self.create_cumulative_counts()
def count_files(self, top_dir):
for dirpath, dirnames, filenames in os.walk(top_dir, topdown=True, onerror=None, followlinks=False):
if dirpath.endswith('/'):
dirpath = dirpath[:-1]
depth = len([t for t in dirpath.split('/') if t])
self.depth_list.append((depth, dirpath))
assert(self.counts.get(dirpath) == None)
self.counts[dirpath] = len(filenames)
def create_cumulative_counts(self):
for k, v in self.counts.items():
self.cumulative_counts[k] = v
for tpl in self.depth_list:
child_dir = tpl[1]
prnt_dir = os.path.split(child_dir)[0]
if child_dir != self.root:
self.cumulative_counts[prnt_dir] += self.cumulative_counts[child_dir]
def send_email(self, email_address, cut_off = None):
s = ['Cumulative file counts for directories under %s.\n' % self.root]
for k, v in sorted(iter(self.cumulative_counts.items()), key = lambda x:-x[1]):
if v:
if not(cut_off) or v >= cut_off:
s.append('%s: %d' % (k, v))
msg = '\n'.join(s)
write_file('/tmp/filecount_output.txt', msg)
ms = MailServer()
ms.sendgmail('Directory file count statistics', [email_address], msg, pw_filepath = '/admin/pw/google')
|
class FileCounter(object):
'''Counts the number of files in all directories below the specified root directory. We keep a count per directory as well as a count under a directory i.e. the sum of the number of files in the directory plus the number of all files under that directory.'''
def __init__(self, top_dir):
pass
def count_files(self, top_dir):
pass
def create_cumulative_counts(self):
pass
def send_email(self, email_address, cut_off = None):
pass
| 5 | 1 | 10 | 1 | 9 | 0 | 3 | 0.03 | 1 | 1 | 1 | 0 | 4 | 4 | 4 | 4 | 44 | 6 | 37 | 19 | 32 | 1 | 37 | 19 | 32 | 4 | 1 | 3 | 13 |
143,687 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/rosetta/input_files.py
|
klab.rosetta.input_files.RosettaFileParsingException
|
class RosettaFileParsingException(Exception): pass
|
class RosettaFileParsingException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,688 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/latex/latex_report.py
|
klab.latex.latex_report.LatexText
|
class LatexText(LatexPage):
# Each "text" object will be turned into a paragraph
def __init__ (self, text, color = None):
self.text = []
self.color = color
if text:
self.add_text(text)
def add_text(self, text):
self.text.append( make_latex_safe(text.strip()) )
def generate_latex(self):
if self.color:
return_str = '{\\color{%s} ' % self.color
else:
return_str = ''
return_str += self.generate_plaintext()
if self.color:
return_str += '}'
return return_str
def generate_plaintext(self):
return_str = ''
if len(self.text) > 1:
for text in self.text[:-1]:
return_str += text + '\n\n'
return_str += self.text[-1] + '\n'
return return_str
|
class LatexText(LatexPage):
def __init__ (self, text, color = None):
pass
def add_text(self, text):
pass
def generate_latex(self):
pass
def generate_plaintext(self):
pass
| 5 | 0 | 6 | 1 | 6 | 0 | 2 | 0.04 | 1 | 0 | 0 | 0 | 4 | 2 | 4 | 5 | 30 | 5 | 24 | 10 | 19 | 1 | 23 | 10 | 18 | 3 | 2 | 2 | 9 |
143,689 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/latex/latex_report.py
|
klab.latex.latex_report.LatexSubSection
|
class LatexSubSection(LatexPageSection):
def __init__(self, title, subtext = None, clearpage = False):
super(LatexSubSection, self).__init__(title, subtext, clearpage)
self.section_latex_func = 'subsection'
|
class LatexSubSection(LatexPageSection):
def __init__(self, title, subtext = None, clearpage = False):
pass
| 2 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 1 | 1 | 1 | 5 | 4 | 0 | 4 | 3 | 2 | 0 | 4 | 3 | 2 | 1 | 3 | 0 | 1 |
143,690 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/latex/latex_report.py
|
klab.latex.latex_report.LatexTable
|
class LatexTable(LatexPage):
def __init__ (self, header_row, data_rows, header_text = None, column_format = None):
self.num_columns = len(header_row)
for data_row in data_rows:
if self.num_columns != len(data_row):
print('Header row:', header_row)
print('Data row:', data_row)
raise Exception('This data row has a different number of columns than the header row')
self.set_column_format(column_format)
self.header_row = [make_latex_safe( x.strip() ) for x in header_row]
self.data_rows = [[make_latex_safe( x.strip() ) for x in data_row] for data_row in data_rows]
if header_text:
self.header_text = make_latex_safe( header_text.strip() )
else:
self.header_text = None
def set_column_format(self, column_format):
self.column_format = column_format
if column_format:
assert( len(column_format) == self.num_columns )
def generate_latex(self):
if self.column_format:
column_format = ' '.join( self.column_format )
else:
column_format = ( 'c ' * self.num_columns )
return_str = '\n\n'
return_str += '\\begin{table}[H]\\begin{center}\n'
return_str += '\\begin{tabular}{ %s}\n' % column_format
return_str += self.row_to_latex_row(self.header_row)
return_str += '\\hline\n'
for row in self.data_rows:
return_str += self.row_to_latex_row(row)
return_str += '\\end{tabular}\n'
if self.header_text:
return_str += '\\caption{%s}\n' % self.header_text
return_str += '\\end{center}\\end{table}\n\n\n'
return return_str
def generate_plaintext(self):
l = copy.deepcopy(self.data_rows)
l.insert(0, self.header_row)
return format_list_table(l)
def row_to_latex_row(self, row):
return_str = ''
if len(row) > 1:
for x in row[:-1]:
return_str += '%s & ' % str(x)
if len(row) > 0:
return_str += '%s' % str(row[-1])
return_str += '\\\\\n'
return return_str
|
class LatexTable(LatexPage):
def __init__ (self, header_row, data_rows, header_text = None, column_format = None):
pass
def set_column_format(self, column_format):
pass
def generate_latex(self):
pass
def generate_plaintext(self):
pass
def row_to_latex_row(self, row):
pass
| 6 | 0 | 10 | 1 | 10 | 0 | 3 | 0 | 1 | 2 | 0 | 1 | 5 | 5 | 5 | 6 | 56 | 7 | 49 | 18 | 43 | 0 | 47 | 18 | 41 | 4 | 2 | 2 | 15 |
143,691 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/latex/latex_report.py
|
klab.latex.latex_report.LatexPandasTable
|
class LatexPandasTable(LatexTable):
def __init__ (self, df, caption_text = None, header = True, float_format = None, sparsify = True):
self.df = df
if caption_text:
self.caption_text = make_latex_safe( caption_text )
else:
self.caption_text = caption_text
self.header = header
self.float_format = float_format
self.sparsify = sparsify
def generate_latex(self):
latex = '\\begin{table}[H]\n'
latex += self.df.to_latex( header = self.header, float_format = self.float_format, sparsify = self.sparsify )
if self.caption_text:
latex += '\\caption{%s}\n' % str(self.caption_text)
latex += '\\end{table}\n'
return latex
|
class LatexPandasTable(LatexTable):
def __init__ (self, df, caption_text = None, header = True, float_format = None, sparsify = True):
pass
def generate_latex(self):
pass
| 3 | 0 | 8 | 0 | 8 | 0 | 2 | 0 | 1 | 1 | 0 | 0 | 2 | 5 | 2 | 8 | 18 | 1 | 17 | 9 | 14 | 0 | 16 | 9 | 13 | 2 | 3 | 1 | 4 |
143,692 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/fs/zip_util.py
|
klab.fs.zip_util.CachedLineReader
|
class CachedLineReader(object):
def __init__ (self, fname):
self.line_reader = LineReader(fname)
self.cached_lines = [line for line in self.line_reader.readlines()]
self.line_reader.close()
def readlines(self):
return self.cached_lines
|
class CachedLineReader(object):
def __init__ (self, fname):
pass
def readlines(self):
pass
| 3 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 2 | 2 | 2 | 8 | 1 | 7 | 5 | 4 | 0 | 7 | 5 | 4 | 1 | 1 | 0 | 2 |
143,693 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/basics.py
|
klab.bio.basics.PDBResidue
|
class PDBResidue(Residue):
def __init__(self, Chain, ResidueID, ResidueAA, residue_type, Residue3AA = None):
'''Residue3AA has to be used when matching non-canonical residues/HETATMs to the SEQRES record e.g. 34H in 1A2C.'''
assert(len(Chain) == 1)
assert(len(ResidueID) == 5)
super(PDBResidue, self).__init__(Chain, ResidueID, ResidueAA, residue_type)
self.Residue3AA = Residue3AA
def add_position(self, x, y, z):
self.x, self.y, self.z = x, y, z
def __repr__(self):
return "%s%s" % (self.Chain, self.ResidueID)
def get_residue_id(self):
return "%s%s" % (self.Chain, self.ResidueID)
|
class PDBResidue(Residue):
def __init__(self, Chain, ResidueID, ResidueAA, residue_type, Residue3AA = None):
'''Residue3AA has to be used when matching non-canonical residues/HETATMs to the SEQRES record e.g. 34H in 1A2C.'''
pass
def add_position(self, x, y, z):
pass
def __repr__(self):
pass
def get_residue_id(self):
pass
| 5 | 1 | 3 | 0 | 3 | 0 | 1 | 0.08 | 1 | 1 | 0 | 1 | 4 | 4 | 4 | 9 | 17 | 4 | 12 | 7 | 7 | 1 | 12 | 7 | 7 | 1 | 2 | 0 | 4 |
143,694 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/fs/zip_util.py
|
klab.fs.zip_util.LineReader
|
class LineReader(object):
def __init__(self,fname):
if fname.endswith('.gz'):
if not os.path.isfile(fname):
raise IOError(fname)
self.f = Popen(['gunzip', '-c', fname], stdout=PIPE, stderr=PIPE)
self.zipped=True
else:
self.f = open(fname,'r')
self.zipped=False
def readlines(self):
if self.zipped:
for line in self.f.stdout:
yield line
else:
for line in self.f.readlines():
yield line
def close(self):
if self.zipped:
if self.f.poll() == None:
os.kill(self.f.pid, signal.SIGHUP)
else:
self.f.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __iter__(self):
return self.readlines()
|
class LineReader(object):
def __init__(self,fname):
pass
def readlines(self):
pass
def close(self):
pass
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
def __iter__(self):
pass
| 7 | 0 | 5 | 0 | 5 | 0 | 2 | 0 | 1 | 1 | 0 | 0 | 6 | 2 | 6 | 6 | 29 | 0 | 29 | 10 | 22 | 0 | 26 | 10 | 19 | 4 | 1 | 2 | 13 |
143,695 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/general/structures.py
|
klab.general.structures.Bunch
|
class Bunch(object):
'''Common Python idiom.'''
def __init__(self, **kwds):
self.__dict__.update(kwds)
def keys(self):
return list(self.__dict__.keys())
def __getitem__(self, key):
return self.__dict__.__getitem__(key)
def get(self, item):
return self.__dict__.get(item, None)
def pprint(self):
# todo: only works for one level at present
import pprint
return pprint.pformat(self.__dict__)
|
class Bunch(object):
'''Common Python idiom.'''
def __init__(self, **kwds):
pass
def keys(self):
pass
def __getitem__(self, key):
pass
def get(self, item):
pass
def pprint(self):
pass
| 6 | 1 | 2 | 0 | 2 | 0 | 1 | 0.17 | 1 | 1 | 0 | 1 | 5 | 0 | 5 | 5 | 18 | 4 | 12 | 7 | 5 | 2 | 12 | 7 | 5 | 1 | 1 | 0 | 5 |
143,696 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/general/structures.py
|
klab.general.structures.DeepNonStrictNestedBunch
|
class DeepNonStrictNestedBunch(NonStrictNestedBunch):
'''Similar to a NonStrictNestedBunch but we allow deep lookups for elements which do not exist.'''
def __getattr__(self, key):
return self.__dict__.get(key, NonStrictNestedBunch({}))
|
class DeepNonStrictNestedBunch(NonStrictNestedBunch):
'''Similar to a NonStrictNestedBunch but we allow deep lookups for elements which do not exist.'''
def __getattr__(self, key):
pass
| 2 | 1 | 2 | 0 | 2 | 0 | 1 | 0.33 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 11 | 5 | 1 | 3 | 2 | 1 | 1 | 3 | 2 | 1 | 1 | 4 | 0 | 1 |
143,697 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/general/structures.py
|
klab.general.structures.NestedBunch
|
class NestedBunch(Bunch):
'''A class to turn a dict into an object with dot accessors e.g.
nb = NestedBunch({'k1' : {'k2' : v}})
nb.k1.k2 # returns v
Handles simple data types and subclasses of dict which behave appropriately e.g. JSON.
'''
def __init__(self, d):
for k, v in d.items():
if isinstance(v, dict): self.__dict__[k] = self.__class__(v)
else: self.__dict__[k] = v
@classmethod
def from_JSON(cls, json_string):
import json
return cls(json.loads(json_string))
def __repr__(self):
return str(self.__dict__)
|
class NestedBunch(Bunch):
'''A class to turn a dict into an object with dot accessors e.g.
nb = NestedBunch({'k1' : {'k2' : v}})
nb.k1.k2 # returns v
Handles simple data types and subclasses of dict which behave appropriately e.g. JSON.
'''
def __init__(self, d):
pass
@classmethod
def from_JSON(cls, json_string):
pass
def __repr__(self):
pass
| 5 | 1 | 3 | 0 | 3 | 0 | 2 | 0.45 | 1 | 2 | 0 | 2 | 2 | 0 | 3 | 8 | 18 | 2 | 11 | 7 | 5 | 5 | 11 | 6 | 6 | 3 | 2 | 2 | 5 |
143,698 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/general/structures.py
|
klab.general.structures.NonStrictNestedBunch
|
class NonStrictNestedBunch(NestedBunch):
'''Similar to a NestedBunch but we allow shallow lookups for elements which do not exist.'''
def __bool__(self):
return len(self.__dict__) != 0
def __getattr__(self, key):
return self.__dict__.get(key)
|
class NonStrictNestedBunch(NestedBunch):
'''Similar to a NestedBunch but we allow shallow lookups for elements which do not exist.'''
def __bool__(self):
pass
def __getattr__(self, key):
pass
| 3 | 1 | 2 | 0 | 2 | 0 | 1 | 0.2 | 1 | 0 | 0 | 1 | 2 | 0 | 2 | 10 | 8 | 2 | 5 | 3 | 2 | 1 | 5 | 3 | 2 | 1 | 3 | 0 | 2 |
143,699 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/general/structures.py
|
klab.general.structures.nested_dict
|
class nested_dict(dict):
@staticmethod
def from_dict(d):
n = nested_dict()
if isinstance(d, collections.Mapping):
for k, v in d.items():
n[k] = v
else:
n[d] = None
return n
def update(self, u):
''' Works like dict.update(dict) but handles nested dicts.
From http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth.
'''
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = nested_dict.from_dict(self.get(k, {}))
r.update(v)
self[k] = r
elif isinstance(self, collections.Mapping):
self[k] = u[k]
else:
self.__dict__ = dict(k = u[k])
|
class nested_dict(dict):
@staticmethod
def from_dict(d):
pass
def update(self, u):
''' Works like dict.update(dict) but handles nested dicts.
From http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth.
'''
pass
| 4 | 1 | 11 | 0 | 9 | 2 | 4 | 0.15 | 1 | 0 | 0 | 0 | 1 | 1 | 2 | 29 | 25 | 2 | 20 | 9 | 16 | 3 | 16 | 8 | 13 | 4 | 2 | 2 | 7 |
143,700 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/general/strutil.py
|
klab.general.strutil.BadException
|
class BadException(Exception): pass
|
class BadException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,701 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/latex/latex_report.py
|
klab.latex.latex_report.LatexReport
|
class LatexReport:
def __init__(self, latex_template_file = None, table_of_contents = True, number_compilations = 3):
self.latex_template_file = latex_template_file
self.number_compilations = int( number_compilations )
self.title_page_title = None
self.title_page_subtitle = None
self.abstract_text = []
self.latex = None
self.content = []
self.chapters = []
self.table_of_contents = table_of_contents
def set_title_page(self, title = '', subtitle = ''):
if title != '':
self.title_page_title = make_latex_safe(title)
if subtitle != '':
self.title_page_subtitle = make_latex_safe(subtitle)
def add_to_abstract(self, abstract_text):
self.abstract_text.append( make_latex_safe(abstract_text) )
def extend_abstract(self, abstract_lines):
self.abstract_text.extend( abstract_lines )
def add_section_page(self, title = '', subtext = None, clearpage = True):
self.content.append(
LatexPageSection(title, subtext, clearpage)
)
def add_plot(self, plot_filename, plot_title = None):
self.content.append(
LatexPagePlot(plot_filename, plot_title)
)
def add_chapter(self, chapter):
self.chapters.append(chapter)
def set_latex_from_strings(self, latex_strings):
self.latex = ''
for s in latex_strings:
if s.endswith('\n'):
self.latex += s
else:
self.latex += s + '\n'
def generate_latex_chapter(self):
latex_strings = []
latex_strings.append( '\\chapter{%s}\n\n\\clearpage\n\n' % self.title_page_title )
if self.title_page_subtitle != '' and self.title_page_subtitle != None:
latex_strings.append( '\\textbf{%s}\n\n' % self.title_page_subtitle)
# if self.table_of_contents:
# latex_strings.append( '\\minitoc\n\n' )
if len( self.abstract_text ) > 0:
latex_strings.extend( self.generate_abstract_lines() )
for content_obj in self.content:
latex_strings.append( content_obj.generate_latex() )
self.set_latex_from_strings( latex_strings )
return self.latex
def generate_latex(self, output_type='pdf'):
if output_type == 'pdf':
latex_strings = [document_header]
elif output_type == 'html':
latex_strings = [html_document_header]
make_title_page = False
if self.title_page_title != None and self.title_page_title != '':
latex_strings.append( '\\title{%s}' % self.title_page_title )
make_title_page = True
if self.title_page_subtitle != None and self.title_page_subtitle != '':
latex_strings.append( '\\subtitle{%s}' % self.title_page_subtitle )
make_title_page = True
if make_title_page:
latex_strings.append('\\date{\\today}')
latex_strings.append('\\begin{document}\n\\maketitle')
else:
latex_strings.append('\\begin{document}\n')
if self.table_of_contents:
latex_strings.append('\\tableofcontents\n\n\\clearpage\n\n')
if len( self.abstract_text ) > 0:
latex_strings.append('\\begin{abstract}\n')
latex_strings.extend( self.generate_abstract_lines() )
latex_strings.append('\\end{abstract}\n\n')
for content_obj in self.content:
latex_strings.append( content_obj.generate_latex() )
for chapter_obj in self.chapters:
latex_strings.append( chapter_obj.generate_latex_chapter() )
latex_strings.append( '\\end{document}' )
self.set_latex_from_strings( latex_strings )
return self.latex
def generate_abstract_lines(self):
latex_strings = []
if len( self.abstract_text ) > 0:
for abstract_text_paragraph in self.abstract_text:
latex_strings.append( abstract_text_paragraph + '\n\n' )
return latex_strings
def generate_pdf_report(self, report_filepath, copy_tex_file_dir = True, verbose = True, compile_pdf = True):
self.generate_latex( output_type = 'pdf' )
out_dir = tempfile.mkdtemp( prefix = '%s-%s-tmp-latex-pdf_' % (time.strftime("%y%m%d"), getpass.getuser()) )
if verbose:
print('Outputting latex files to temporary directory:', out_dir)
tmp_latex_file = os.path.join(out_dir, 'report.tex')
with open(tmp_latex_file, 'w') as f:
f.write(self.latex)
if compile_pdf:
for x in range(self.number_compilations):
latex_output = subprocess.check_output( ['pdflatex', 'report.tex'], cwd = out_dir )
tmp_latex_pdf = os.path.join(out_dir, 'report.pdf')
assert( os.path.isfile(tmp_latex_pdf) )
shutil.copy( tmp_latex_pdf, report_filepath )
if copy_tex_file_dir:
shutil.copytree(
out_dir,
os.path.join(os.path.dirname(report_filepath), 'latex_files')
)
shutil.rmtree(out_dir)
def generate_html_report(self, report_filepath):
self.generate_latex( output_type = 'html' )
out_dir = tempfile.mkdtemp( prefix = '%s-%s-tmp-latex-html_' % (time.strftime("%y%m%d"), getpass.getuser()) )
tmp_latex_file = os.path.join(out_dir, 'report.tex')
with open(tmp_latex_file, 'w') as f:
f.write(self.latex)
for x in range(self.number_compilations):
latex_output = subprocess.check_output( ['htlatex', 'report.tex'], cwd = out_dir )
raise Exception("Output files not yet copied from: " + out_dir)
shutil.rmtree(out_dir)
def generate_plaintext(self):
# Returns saved information as plaintext string
return_strings = []
if len( self.abstract_text ) > 0:
return_strings.append('Abstract:\n')
for abstract_text_paragraph in self.abstract_text:
return_strings.append( abstract_text_paragraph + '\n\n' )
for content_obj in self.content:
latex_strings.append( content_obj.generate_plaintext() )
return_str = ''
for return_string in return_strings:
if return_string.endswith('\n'):
return_str += return_string
else:
return_str += return_string + '\n'
return return_str
|
class LatexReport:
def __init__(self, latex_template_file = None, table_of_contents = True, number_compilations = 3):
pass
def set_title_page(self, title = '', subtitle = ''):
pass
def add_to_abstract(self, abstract_text):
pass
def extend_abstract(self, abstract_lines):
pass
def add_section_page(self, title = '', subtext = None, clearpage = True):
pass
def add_plot(self, plot_filename, plot_title = None):
pass
def add_chapter(self, chapter):
pass
def set_latex_from_strings(self, latex_strings):
pass
def generate_latex_chapter(self):
pass
def generate_latex_chapter(self):
pass
def generate_abstract_lines(self):
pass
def generate_pdf_report(self, report_filepath, copy_tex_file_dir = True, verbose = True, compile_pdf = True):
pass
def generate_html_report(self, report_filepath):
pass
def generate_plaintext(self):
pass
| 15 | 0 | 11 | 2 | 9 | 0 | 3 | 0.02 | 0 | 5 | 2 | 0 | 14 | 9 | 14 | 14 | 166 | 34 | 129 | 49 | 114 | 3 | 118 | 47 | 103 | 10 | 0 | 2 | 42 |
143,702 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/google/gcalendar.py
|
klab.google.gcalendar.BasicEvent
|
class BasicEvent(object):
def __init__(self, calendar_object, start_dt, end_dt, location = None, summary = None, description = None, visibility = 'default', email_map = {}, username_map = {}):
'''start_dt should be a datetime.date object for all-day events or a datetime.datetime object for ranged events. Similarly for end_dt.' \
'''
e = {}
self.timezone_string = calendar_object.timezone_string
assert(visibility == 'default' or visibility == 'public' or visibility == 'private' or visibility == 'confidential')
if isinstance(start_dt, date):
e['start'] = {'date' : start_dt.isoformat(), 'timeZone' : self.timezone_string}
else:
assert(isinstance(start_dt, datetime))
e['start'] = {'dateTime' : start_dt.isoformat(), 'timeZone' : self.timezone_string}
if isinstance(end_dt, date):
e['end'] = {'date' : end_dt.isoformat(), 'timeZone' : self.timezone_string}
else:
assert(isinstance(end_dt, datetime))
e['end'] = {'dateTime' : end_dt.isoformat(), 'timeZone' : self.timezone_string}
e['summary'] = summary
e['description'] = description or summary
e['location'] = location
e['status'] = 'confirmed'
self.email_map = email_map
self.username_map = username_map
self.event = e
def initialize_tagged_copy(self):
e = copy.deepcopy(self.event)
e['extendedProperties'] = e.get('extendedProperties', {})
e['extendedProperties']['shared'] = e['extendedProperties'].get('shared', {})
assert(not(e['extendedProperties']['shared'].get('event_type')))
return e
# Main calendar
def create_lab_meeting(self, event_type, presenters, foodie = None, locked = False):
'Presenters can be a comma-separated list of presenters.'
e = self.initialize_tagged_copy()
summary_texts = {
'Lab meeting' : 'Kortemme Lab meeting',
'Kortemme/DeGrado joint meeting' : 'DeGrado/Kortemme labs joint meeting'
}
assert(summary_texts.get(event_type))
e['extendedProperties']['shared']['event_type'] = event_type
e['extendedProperties']['shared']['Presenters'] = presenters
e['extendedProperties']['shared']['Food'] = foodie
e['extendedProperties']['shared']['Locked meeting'] = locked
print(presenters)
print([[p for p in presenters.split(',')] + [foodie]])
participants = [p.strip() for p in ([p for p in presenters.split(',')] + [foodie]) if p and p.strip()]
participants = [p for p in [self.email_map.get(p) for p in participants] if p]
participant_names = [self.username_map.get(p.strip(), p.strip()) for p in presenters.split(',') if p.strip()]
if participants:
e['extendedProperties']['shared']['ParticipantList'] = ','.join(participants)
if not e['summary']:
e['summary'] = '%s: %s' % (summary_texts[event_type], ', '.join(participant_names))
e['description'] = e['description'] or e['summary']
return e
def create_journal_club_meeting(self, presenters, food_vendor, paper = None):
'Presenters can be a comma-separated list of presenters.'
e = self.initialize_tagged_copy()
e['extendedProperties']['shared']['event_type'] = 'Journal club'
e['extendedProperties']['shared']['Presenters'] = presenters
e['extendedProperties']['shared']['Food vendor'] = food_vendor
e['extendedProperties']['shared']['Paper'] = paper
participants = [p.strip() for p in [p for p in presenters.split(',')] if p and p.strip()]
participants = [p for p in [self.email_map.get(p) for p in participants] if p]
participant_names = [self.username_map.get(p.strip(), p.strip()) for p in presenters.split(',') if p.strip()]
if participants:
e['extendedProperties']['shared']['ParticipantList'] = ','.join(participants)
if not e['summary']:
e['summary'] = 'Journal club: %s' % (', '.join(participant_names))
e['description'] = e['description'] or e['summary']
return e
# Notices calendar
def create_birthday(self, celebrant, caker):
e = self.initialize_tagged_copy()
e['summary'] # overwrite summary
e['extendedProperties']['shared']['event_type'] = 'Birthday'
e['extendedProperties']['shared']['Celebrant'] = celebrant
e['extendedProperties']['shared']['Bringer Of CAKE!'] = caker
participants = [p for p in [self.email_map.get(celebrant), self.email_map.get(caker)] if p]
if participants:
e['extendedProperties']['shared']['ParticipantList'] = ','.join(participants)
e['summary'] = "%s's birthday" % self.username_map.get(celebrant, celebrant)
e['description'] = e['summary']
e['gadget'] = {
'display' : 'icon',
'iconLink' : 'https://guybrush.ucsf.edu/images/cake.png',
'title' : e['summary'],
}
return e
|
class BasicEvent(object):
def __init__(self, calendar_object, start_dt, end_dt, location = None, summary = None, description = None, visibility = 'default', email_map = {}, username_map = {}):
'''start_dt should be a datetime.date object for all-day events or a datetime.datetime object for ranged events. Similarly for end_dt.' '''
pass
def initialize_tagged_copy(self):
pass
def create_lab_meeting(self, event_type, presenters, foodie = None, locked = False):
'''Presenters can be a comma-separated list of presenters.'''
pass
def create_journal_club_meeting(self, presenters, food_vendor, paper = None):
'''Presenters can be a comma-separated list of presenters.'''
pass
def create_birthday(self, celebrant, caker):
pass
| 6 | 3 | 17 | 0 | 16 | 1 | 2 | 0.09 | 1 | 2 | 0 | 0 | 5 | 4 | 5 | 5 | 100 | 12 | 82 | 21 | 76 | 7 | 73 | 21 | 67 | 3 | 1 | 1 | 12 |
143,703 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/google/gauth.py
|
klab.google.gauth.OAuthCredentials
|
class OAuthCredentials(NestedBunch):
@staticmethod
def from_JSON(oauth_json, type = "service"):
'''At the time of writing, keys include:
client_secret, client_email, redirect_uris (list), client_x509_cert_url, client_id, javascript_origins (list)
auth_provider_x509_cert_url, auth_uri, token_uri.'''
assert(type == "service" or type == "web")
return NestedBunch(json.loads(oauth_json)[type])
|
class OAuthCredentials(NestedBunch):
@staticmethod
def from_JSON(oauth_json, type = "service"):
'''At the time of writing, keys include:
client_secret, client_email, redirect_uris (list), client_x509_cert_url, client_id, javascript_origins (list)
auth_provider_x509_cert_url, auth_uri, token_uri.'''
pass
| 3 | 1 | 6 | 0 | 3 | 3 | 1 | 0.6 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 9 | 9 | 1 | 5 | 3 | 2 | 3 | 4 | 2 | 2 | 1 | 3 | 0 | 1 |
143,704 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/biblio/doi.py
|
klab.biblio.doi.CrossRefException
|
class CrossRefException(Exception): pass
|
class CrossRefException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,705 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/benchmarking/analysis/ddg_monomeric_stability_analysis.py
|
klab.benchmarking.analysis.ddg_monomeric_stability_analysis.DBBenchmarkRun
|
class DBBenchmarkRun(BenchmarkRun):
'''Our database storage has a different (more detailed) data structure than the JSON dump so we need to override some classes.'''
csv_headers = [
'DatasetID', 'PDBFileID', 'Mutations', 'NumberOfMutations', 'Experimental', 'Predicted', 'AbsoluteError', 'StabilityClassification',
'ResidueCharges', 'VolumeChange',
'WildTypeDSSPType', 'WildTypeDSSPSimpleSSType', 'WildTypeDSSPExposure',
'WildTypeSCOPClass', 'WildTypeSCOPFold', 'WildTypeSCOPClassification',
'WildTypeExposure', 'WildTypeAA', 'MutantAA', 'HasGPMutation',
'PDBResolution', 'PDBResolutionBin', 'NumberOfResidues', 'NumberOfDerivativeErrors',
]
def __init__(self, *args, **kwargs):
super(DBBenchmarkRun, self).__init__(*args, **kwargs)
self.analysis_sets = []
def get_analysis_sets(self, record):
if not self.analysis_sets:
if record['DDG'] != None:
self.analysis_sets = sorted(record['DDG'].keys())
return self.analysis_sets
def is_this_record_a_derived_mutation(self, record):
'''Returns True if a record is marked as a derived record i.e. the DDG value is calculated from one source ("reverse"
mutation) or two sources (a "mutation triangle") without a separate experiment having taken place. This property
is marked in the Kortemme lab database when we have determined that this is indeed the case. Otherwise, return
False.
For purely computational dataframes, we should always return False.'''
if self.contains_experimental_data:
for analysis_set in self.get_analysis_sets(record):
ddg_details = record['DDG'][analysis_set]
if ddg_details and ddg_details['IsDerivedValue']:
return True
return False
else:
# Computational dataframe case
return False
def get_record_mutations(self, record):
return record['PDBMutations']
def reset_csv_headers(self):
analysis_sets = None
for record in list(self.dataset_cases.values()):
analysis_sets = self.get_analysis_sets(record)
break
if analysis_sets:
self.csv_headers.remove('Experimental')
self.csv_headers.remove('AbsoluteError')
self.csv_headers.remove('StabilityClassification')
for analysis_set in analysis_sets:
self.csv_headers.append(BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set))
self.csv_headers.append(BenchmarkRun.get_analysis_set_fieldname('AbsoluteError', analysis_set))
self.csv_headers.append(BenchmarkRun.get_analysis_set_fieldname('StabilityClassification', analysis_set))
def get_experimental_ddg_values(self, record, dataframe_record):
'''Adds the mean experimental value associated with each analysis set to the dataframe row.'''
new_idxs = []
for analysis_set in self.get_analysis_sets(record):
ddg_details = record['DDG'][analysis_set]
exp_ddg_fieldname = BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set)
new_idxs.append(exp_ddg_fieldname)
dataframe_record[exp_ddg_fieldname] = None
if ddg_details:
dataframe_record[exp_ddg_fieldname] = ddg_details['MeanDDG']
# Update the CSV headers
try:
idx = self.csv_headers.index('Experimental')
self.csv_headers = self.csv_headers[:idx] + new_idxs + self.csv_headers[idx + 1:]
except ValueError as e: pass
def compute_stability_classification(self, predicted_data, record, dataframe_record):
'''Calculate the stability classification for the analysis cases. Must be called after get_experimental_ddg_values.'''
new_idxs = []
stability_classication_x_cutoff, stability_classication_y_cutoff = self.stability_classication_x_cutoff, self.stability_classication_y_cutoff
for analysis_set in self.get_analysis_sets(record):
ddg_details = record['DDG'][analysis_set]
exp_ddg_fieldname = BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set)
stability_classification_fieldname = BenchmarkRun.get_analysis_set_fieldname('StabilityClassification', analysis_set)
new_idxs.append(stability_classification_fieldname)
dataframe_record[stability_classification_fieldname] = None
if ddg_details:
stability_classification = None
if dataframe_record[exp_ddg_fieldname] != None:
stability_classification = fraction_correct([dataframe_record[exp_ddg_fieldname]], [predicted_data[self.ddg_analysis_type]], x_cutoff = stability_classication_x_cutoff, y_cutoff = stability_classication_y_cutoff)
stability_classification = int(stability_classification)
assert(stability_classification == 0 or stability_classification == 1)
dataframe_record[stability_classification_fieldname] = stability_classification
# Update the CSV headers
try:
idx = self.csv_headers.index('StabilityClassification')
self.csv_headers = self.csv_headers[:idx] + new_idxs + self.csv_headers[idx + 1:]
except ValueError as e: pass
def compute_absolute_error(self, predicted_data, record, dataframe_record):
'''Calculate the absolute error for the analysis cases. Must be called after get_experimental_ddg_values.'''
new_idxs = []
for analysis_set in self.get_analysis_sets(record):
ddg_details = record['DDG'][analysis_set]
exp_ddg_fieldname = BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set)
absolute_error_fieldname = BenchmarkRun.get_analysis_set_fieldname('AbsoluteError', analysis_set)
new_idxs.append(absolute_error_fieldname)
dataframe_record[absolute_error_fieldname] = None
if ddg_details and predicted_data[self.ddg_analysis_type] != None:
absolute_error = abs(dataframe_record[exp_ddg_fieldname] - predicted_data[self.ddg_analysis_type])
dataframe_record[absolute_error_fieldname] = absolute_error
# Update the CSV headers
try:
idx = self.csv_headers.index('AbsoluteError')
self.csv_headers = self.csv_headers[:idx] + new_idxs + self.csv_headers[idx + 1:]
except ValueError as e: pass
def get_record_pdb_file_id(self, record):
return record['Structure']['PDBFileID']
def count_residues(self, record, pdb_record):
NumberOfResidues = 0
try:
pdb_chains = set(record['Structure']['Partners']['L'] + record['Structure']['Partners']['R'])
assert(len(pdb_chains) > 1) # we expect non-monomeric cases
for pdb_chain in pdb_chains:
NumberOfResidues += len(pdb_record.get('Chains', {}).get(pdb_chain, {}).get('Sequence', ''))
except: pass
return NumberOfResidues
|
class DBBenchmarkRun(BenchmarkRun):
'''Our database storage has a different (more detailed) data structure than the JSON dump so we need to override some classes.'''
def __init__(self, *args, **kwargs):
pass
def get_analysis_sets(self, record):
pass
def is_this_record_a_derived_mutation(self, record):
'''Returns True if a record is marked as a derived record i.e. the DDG value is calculated from one source ("reverse"
mutation) or two sources (a "mutation triangle") without a separate experiment having taken place. This property
is marked in the Kortemme lab database when we have determined that this is indeed the case. Otherwise, return
False.
For purely computational dataframes, we should always return False.'''
pass
def get_record_mutations(self, record):
pass
def reset_csv_headers(self):
pass
def get_experimental_ddg_values(self, record, dataframe_record):
'''Adds the mean experimental value associated with each analysis set to the dataframe row.'''
pass
def compute_stability_classification(self, predicted_data, record, dataframe_record):
'''Calculate the stability classification for the analysis cases. Must be called after get_experimental_ddg_values.'''
pass
def compute_absolute_error(self, predicted_data, record, dataframe_record):
'''Calculate the absolute error for the analysis cases. Must be called after get_experimental_ddg_values.'''
pass
def get_record_pdb_file_id(self, record):
pass
def count_residues(self, record, pdb_record):
pass
| 11 | 5 | 11 | 1 | 9 | 1 | 3 | 0.14 | 1 | 5 | 0 | 1 | 10 | 1 | 10 | 72 | 141 | 28 | 100 | 44 | 89 | 14 | 96 | 41 | 85 | 5 | 3 | 3 | 30 |
143,706 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/benchmarking/analysis/ddg_monomeric_stability_analysis.py
|
klab.benchmarking.analysis.ddg_monomeric_stability_analysis.BenchmarkRun
|
class BenchmarkRun(ReportingObject):
'''A object to contain benchmark run data which can be used to analyze that run or else to cross-analyze the run with another run.'''
# Class variables
amino_acid_details = {}
CAA, PAA, HAA = set(), set(), set()
# Human-readable descriptions for the volume breakdown
by_volume_descriptions = dict(
SL = 'small-to-large mutations',
LS = 'large-to-small mutations',
XX = 'no change in volume',
)
csv_headers = [
'DatasetID', 'PDBFileID', 'Mutations', 'NumberOfMutations', 'Experimental', 'Predicted', 'AbsoluteError', 'StabilityClassification',
'ResidueCharges', 'VolumeChange',
'WildTypeDSSPType', 'WildTypeDSSPSimpleSSType', 'WildTypeDSSPExposure',
'WildTypeSCOPClass', 'WildTypeSCOPFold', 'WildTypeSCOPClassification',
'WildTypeExposure', 'WildTypeAA', 'MutantAA', 'HasGPMutation',
'PDBResolution', 'PDBResolutionBin', 'NumberOfResidues', 'NumberOfDerivativeErrors',
]
def __init__(self, benchmark_run_name, dataset_cases, analysis_data, contains_experimental_data = True, benchmark_run_directory = None, use_single_reported_value = False,
ddg_analysis_type = None,
calculate_scalar_adjustments = True,
description = None, dataset_description = None, credit = None, generate_plots = True, report_analysis = True, include_derived_mutations = False, recreate_graphs = False, silent = False, burial_cutoff = 0.25,
additional_join_parameters = {},
stability_classication_x_cutoff = 1.0, stability_classication_y_cutoff = 1.0, use_existing_benchmark_data = False, store_data_on_disk = True, misc_dataframe_attributes = {},
terminal_width = 200, restrict_to = set(), remove_cases = set()):
self.contains_experimental_data = contains_experimental_data
self.analysis_sets = [''] # some subclasses store values for multiple analysis sets
self.calculate_scalar_adjustments = calculate_scalar_adjustments
self.csv_headers = copy.deepcopy(self.__class__.csv_headers)
self.additional_join_parameters = additional_join_parameters
if 'ddg_analysis_type' in additional_join_parameters:
if ddg_analysis_type != None:
assert( ddg_analysis_type == additional_join_parameters['ddg_analysis_type']['long_name'] )
self.ddg_analysis_type = additional_join_parameters['ddg_analysis_type']['long_name']
else:
assert( ddg_analysis_type != None )
self.ddg_analysis_type = ddg_analysis_type
if not self.contains_experimental_data:
self.csv_headers.remove('Experimental')
self.csv_headers.remove('AbsoluteError')
self.csv_headers.remove('StabilityClassification')
self.terminal_width = terminal_width # Used for printing the dataframe to a terminal. Set this to be less than the width of your terminal in columns.
self.amino_acid_details, self.CAA, self.PAA, self.HAA = BenchmarkRun.get_amino_acid_details()
self.benchmark_run_name = benchmark_run_name
self.benchmark_run_directory = benchmark_run_directory
self.dataset_cases = copy.deepcopy(dataset_cases)
self.analysis_data = copy.deepcopy(analysis_data)
self.analysis_directory = None
self.subplot_directory = None
self.restrict_to = restrict_to
self.remove_cases = remove_cases
self.use_single_reported_value = use_single_reported_value
self.description = description
self.dataset_description = dataset_description
self.credit = credit
self.generate_plots = generate_plots
self.report_analysis = report_analysis
self.silent = silent
self.include_derived_mutations = include_derived_mutations
self.burial_cutoff = burial_cutoff
self.recreate_graphs = recreate_graphs
self.stability_classication_x_cutoff = stability_classication_x_cutoff
self.stability_classication_y_cutoff = stability_classication_y_cutoff
self.scalar_adjustments = {}
self.store_data_on_disk = store_data_on_disk
self.misc_dataframe_attributes = misc_dataframe_attributes
assert(credit not in self.misc_dataframe_attributes)
self.misc_dataframe_attributes['Credit'] = credit
self.metric_latex_objects = []
self.stored_metrics_df = pandas.DataFrame()
if self.store_data_on_disk:
# This may be False in some cases e.g. when interfacing with a database
self.analysis_csv_input_filepath = os.path.join(self.benchmark_run_directory, 'analysis_input.csv')
self.analysis_json_input_filepath = os.path.join(self.benchmark_run_directory, 'analysis_input.json')
self.analysis_raw_data_input_filepath = os.path.join(self.benchmark_run_directory, 'benchmark_data.json')
self.analysis_pandas_input_filepath = os.path.join(self.benchmark_run_directory, 'analysis_input.pandas')
assert(os.path.exists(self.analysis_csv_input_filepath))
assert(os.path.exists(self.analysis_json_input_filepath))
assert(os.path.exists(self.analysis_raw_data_input_filepath))
else:
self.analysis_csv_input_filepath, self.analysis_json_input_filepath, self.analysis_raw_data_input_filepath, self.analysis_pandas_input_filepath = None, None, None, None
self.use_existing_benchmark_data = use_existing_benchmark_data
self.ddg_analysis_type_description = None
self.filter_data()
def add_stored_metric_to_df(self, case_description, case_length, case_stats):
# Reformat statistics to put a column for each stat type
stats = {}
for case_stat in case_stats:
stats[ case_stat[0] ] = [case_stat[1]]
stats[ case_stat[0] + '-p-val' ] = [case_stat[2]]
df = pandas.DataFrame.from_dict(stats)
num_rows = len(df.index)
df.loc[:,'case_description'] = pandas.Series([case_description for x in range(num_rows)], index=df.index)
df.loc[:,'benchmark_run_name'] = pandas.Series([self.benchmark_run_name for x in range(num_rows)], index=df.index)
df.loc[:,'n'] = pandas.Series([case_length for x in range(num_rows)], index=df.index)
self.stored_metrics_df = pandas.concat([self.stored_metrics_df, df])
def filter_data(self):
'''A very rough filtering step to remove certain data.
todo: It is probably best to do this do the actual dataframe rather than at this point.
todo: We currently only handle one filtering criterium.
'''
if not self.dataset_cases or not self.analysis_data:
# colortext.error('No dataset cases or analysis (DDG) data were passed. Cannot filter the data. If you are using an existing dataframe, this may explain why no data was passed.')
return
if self.restrict_to or self.remove_cases:
# Remove any cases with missing data
available_cases = set(self.analysis_data.keys())
missing_dataset_cases = [k for k in list(self.dataset_cases.keys()) if k not in available_cases]
for k in missing_dataset_cases:
del self.dataset_cases[k]
cases_to_remove = set()
if self.restrict_to:
# Remove cases which do not meet the restriction criteria
if 'Exposed' in self.restrict_to:
for k, v in self.dataset_cases.items():
for m in v['PDBMutations']:
if (m.get('ComplexExposure') or m.get('MonomericExposure')) <= self.burial_cutoff:
cases_to_remove.add(k)
break
if self.remove_cases:
# Remove cases which meet the removal criteria
if 'Exposed' in self.remove_cases:
for k, v in self.dataset_cases.items():
for m in v['PDBMutations']:
if (m.get('ComplexExposure') or m.get('MonomericExposure')) > self.burial_cutoff:
cases_to_remove.add(k)
break
if cases_to_remove:
colortext.warning('Filtering out {0} records.'.format(len(cases_to_remove)))
for k in cases_to_remove:
del self.dataset_cases[k]
del self.analysis_data[k]
def __repr__(self):
'''Simple printer - we print the dataframe.'''
with pandas.option_context('display.max_rows', None, 'display.max_columns', None, 'display.width', self.terminal_width):
return '{0}'.format(self.dataframe)
@staticmethod
def get_analysis_set_fieldname(prefix, analysis_set):
if analysis_set:
return '{0}_{1}'.format(prefix, analysis_set)
else:
return prefix
@staticmethod
def get_amino_acid_details():
if not BenchmarkRun.amino_acid_details:
# Amino acid properties
polarity_map = {'polar' : 'P', 'charged' : 'C', 'hydrophobic' : 'H'}
aromaticity_map = {'aliphatic' : 'L', 'aromatic' : 'R', 'neither' : '-'}
amino_acid_detail_headers = 'Code,Long code,Name,Polarity,Aromaticity,Hydrophobicity pH7,Sidechain acidity,pKa,Average mass,van der Waals volume,Size,Is tiny?'
amino_acid_details_ = [
'A,ALA,Alanine,non-polar,aliphatic,hydrophobic,neutral,NULL,71.0788,67,small,1',
'C,CYS,Cysteine,polar,neither,hydrophilic,neutral,8.7,103.1388,86,small,1',
'D,ASP,Aspartic acid,charged,neither,hydrophilic,acidic,3.9,115.0886,91,small,0',
'E,GLU,Glutamic acid,charged,neither,hydrophilic,acidic,4.5,129.1155,109,large,0',
'F,PHE,Phenylalanine,non-polar,aromatic,hydrophobic,neutral,NULL,147.1766,135,large,0',
'G,GLY,Glycine,polar,neither,hydrophilic,neutral,NULL,57.0519,48,small,1',
'H,HIS,Histidine,charged,neither,hydrophilic,basic,6.04,137.1411,118,large,0',
'I,ILE,Isoleucine,non-polar,aliphatic,hydrophobic,neutral,NULL,113.1594,124,large,0',
'K,LYS,Lysine,charged,neither,hydrophilic,basic,10.54,128.1741,135,large,0',
'L,LEU,Leucine,non-polar,aliphatic,hydrophobic,neutral,NULL,113.1594,124,large,0',
'M,MET,Methionine,non-polar,aliphatic,hydrophobic,neutral,NULL,131.1986,124,large,0',
'N,ASN,Asparagine,polar,neither,hydrophilic,neutral,NULL,114.1039,96,small,0',
'P,PRO,Proline,non-polar,neither,hydrophobic,neutral,NULL,97.1167,90,small,0',
'Q,GLN,Glutamine,polar,neither,hydrophilic,neutral,NULL,128.1307,114,large,0',
'R,ARG,Arginine,charged,neither,hydrophilic,basic,12.48,156.1875,148,large,0',
'S,SER,Serine,polar,neither,hydrophilic,neutral,NULL,87.0782,73,small,1',
'T,THR,Threonine,polar,neither,hydrophilic,neutral,NULL,101.1051,93,small,0',
'V,VAL,Valine,non-polar,aliphatic,hydrophobic,neutral,NULL,99.1326,105,small,0',
'W,TRP,Tryptophan,non-polar,aromatic,hydrophobic,neutral,NULL,186.2132,163,large,0',
'Y,TYR,Tyrosine,polar,aromatic,hydrophobic,neutral,10.46,163.176,141,large,0' # Note: we treat tyrosine as hydrophobic in the polar/charged vs hydrophobic/Non-polar plot
]
amino_acid_detail_headers = [t.strip() for t in amino_acid_detail_headers.split(',') if t.strip()]
for aad in amino_acid_details_:
tokens = aad.split(',')
assert(len(tokens) == len(amino_acid_detail_headers))
d = {}
for x in range(len(amino_acid_detail_headers)):
d[amino_acid_detail_headers[x]] = tokens[x]
aa_code = d['Code']
BenchmarkRun.amino_acid_details[aa_code] = d
del d['Code']
d['Polarity'] = polarity_map.get(d['Polarity'], 'H')
d['Aromaticity'] = aromaticity_map[d['Aromaticity']]
d['Average mass'] = float(d['Average mass'])
d['Is tiny?'] = d['Is tiny?'] == 1
d['van der Waals volume'] = float(d['van der Waals volume'])
try: d['pKa'] = float(d['pKa'])
except: d['pKa'] = None
if aa_code == 'Y':
BenchmarkRun.HAA.add(aa_code) # Note: Treating tyrosine as hydrophobic
elif d['Polarity'] == 'C':
BenchmarkRun.CAA.add(aa_code)
elif d['Polarity'] == 'P':
BenchmarkRun.PAA.add(aa_code)
elif d['Polarity'] == 'H':
BenchmarkRun.HAA.add(aa_code)
assert(len(BenchmarkRun.CAA.intersection(BenchmarkRun.PAA)) == 0 and len(BenchmarkRun.PAA.intersection(BenchmarkRun.HAA)) == 0 and len(BenchmarkRun.HAA.intersection(BenchmarkRun.CAA)) == 0)
return BenchmarkRun.amino_acid_details, BenchmarkRun.CAA, BenchmarkRun.PAA, BenchmarkRun.HAA
def report(self, str, fn = None):
if (not self.silent) and (self.report_analysis):
if fn:
fn(str)
else:
print(str)
def create_analysis_directory(self, analysis_directory = None):
if self.analysis_directory:
return
if analysis_directory:
if not(os.path.isdir(analysis_directory)):
try:
os.makedirs(analysis_directory)
assert(os.path.isdir(analysis_directory))
self.analysis_directory = analysis_directory
except Exception as e:
raise colortext.Exception('An exception occurred creating the subplot directory %s.' % analysis_directory)
else:
self.analysis_directory = tempfile.mkdtemp( prefix = '%s-%s-%s_' % (time.strftime("%y%m%d"), getpass.getuser(), self.benchmark_run_name) )
def create_subplot_directory(self, analysis_directory = None):
if self.subplot_directory:
return
self.create_analysis_directory(analysis_directory = analysis_directory)
self.subplot_directory = os.path.join(self.analysis_directory, self.benchmark_run_name + '_subplots')
if not os.path.isdir(self.subplot_directory):
os.makedirs(self.subplot_directory)
def read_dataframe_from_content(self, hdfstore_blob):
fname = write_temp_file('/tmp', hdfstore_blob, ftype = 'wb')
try:
self.read_dataframe(fname)
os.remove(fname)
except:
os.remove(fname)
raise
def read_dataframe(self, analysis_pandas_input_filepath, read_scalar_adjustments = True, fail_on_missing_scalar_adjustments = False):
remove_file = False
if len(os.path.splitext(analysis_pandas_input_filepath)) > 1 and os.path.splitext(analysis_pandas_input_filepath)[1] == '.gz':
content = read_file(analysis_pandas_input_filepath)
analysis_pandas_input_filepath = write_temp_file('/tmp', content, ftype = 'wb')
remove_file = True
# We do not use "self.dataframe = store['dataframe']" as we used append in write_dataframe
self.dataframe = pandas.read_hdf(analysis_pandas_input_filepath, 'dataframe')
store = pandas.HDFStore(analysis_pandas_input_filepath)
self.scalar_adjustments = store['scalar_adjustments'].to_dict()
self.ddg_analysis_type = store['ddg_analysis_type'].to_dict()['ddg_analysis_type']
if read_scalar_adjustments:
try:
self.calculate_scalar_adjustments = store['calculate_scalar_adjustments'].to_dict()['calculate_scalar_adjustments']
except:
if not fail_on_missing_scalar_adjustments:
colortext.warning('The calculate_scalar_adjustments scalar was expected to be found in the pandas dataframe but is missing.')
self.calculate_scalar_adjustments = None
else:
raise
else:
self.calculate_scalar_adjustments = None
self.ddg_analysis_type_description = store['ddg_analysis_type_description'].to_dict()['ddg_analysis_type_description']
# Handle our old dataframe format
try:
self.misc_dataframe_attributes = store['misc_dataframe_attributes'].to_dict()
except: pass
# Handle our new dataframe format
try:
misc_dataframe_attribute_names = list(store['misc_dataframe_attribute_names'].to_dict().keys())
for k in misc_dataframe_attribute_names:
assert(k not in self.misc_dataframe_attributes)
self.misc_dataframe_attributes[k] = store[k].to_dict()[k]
except: pass
if 'Credit' in self.misc_dataframe_attributes:
self.credit = self.misc_dataframe_attributes['Credit']
store.close()
if remove_file:
os.remove(analysis_pandas_input_filepath)
def set_dataframe(self, dataframe, verbose = True):
self.dataframe = dataframe
# Report the SCOPe classification counts
SCOP_classifications = set(dataframe['WildTypeSCOPClassification'].values.tolist())
SCOP_folds = set(dataframe['WildTypeSCOPFold'].values.tolist())
SCOP_classes = set(dataframe['WildTypeSCOPClass'].values.tolist())
self.log('The mutated residues span {0} unique SCOP(e) classifications in {1} unique SCOP(e) folds and {2} unique SCOP(e) classes.'.format(len(SCOP_classifications), len(SCOP_folds), len(SCOP_classes)), colortext.message)
# Plot the optimum y-cutoff over a range of x-cutoffs for the fraction correct metric (when experimental data is available).
# Include the user's cutoff in the range.
if self.contains_experimental_data and self.calculate_scalar_adjustments:
if len(self.analysis_sets) == 0 and len(self.scalar_adjustments):
self.analysis_sets = list(self.scalar_adjustments.keys())
self.log('Determining scalar adjustments with which to scale the predicted values to improve the fraction correct measurement.', colortext.warning)
for analysis_set in self.analysis_sets:#scalar_adjustments.keys():
self.scalar_adjustments[analysis_set], plot_filename = self.plot_optimum_prediction_fraction_correct_cutoffs_over_range(analysis_set, min(self.stability_classication_x_cutoff, 0.5), max(self.stability_classication_x_cutoff, 3.0), suppress_plot = True, verbose = verbose)
# Add new columns derived from the adjusted values
for analysis_set in self.analysis_sets:
dataframe[BenchmarkRun.get_analysis_set_fieldname('Predicted_adj', analysis_set)] = dataframe['Predicted'] / self.scalar_adjustments[analysis_set]
dataframe[BenchmarkRun.get_analysis_set_fieldname('AbsoluteError_adj', analysis_set)] = (dataframe[BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set)] - dataframe[BenchmarkRun.get_analysis_set_fieldname('Predicted_adj', analysis_set)]).abs()
add_fraction_correct_values_to_dataframe(dataframe, BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set), BenchmarkRun.get_analysis_set_fieldname('Predicted_adj', analysis_set), BenchmarkRun.get_analysis_set_fieldname('StabilityClassification_adj', analysis_set), x_cutoff = self.stability_classication_x_cutoff, y_cutoff = self.stability_classication_y_cutoff, ignore_null_values = True)
# Write the dataframe out to CSV
if self.store_data_on_disk:
self.write_dataframe_to_csv(self.analysis_csv_input_filepath)
# Write the dataframe out to JSON
# Note: I rolled my own as dataframe.to_dict(orient = 'records') gives us the correct format but discards the DatasetID (index) field
json_records = {}
indices = dataframe.index.values.tolist()
for i in indices:
json_records[i] = {}
for k, v in dataframe.to_dict().items():
for i, v in v.items():
assert(k not in json_records[i])
json_records[i][k] = v
if self.analysis_json_input_filepath and self.store_data_on_disk:
write_file(self.analysis_json_input_filepath, json.dumps(json_records, indent = 4, sort_keys=True))
# Write the values computed in this function out to disk
analysis_pandas_input_filepath = self.analysis_pandas_input_filepath
if self.store_data_on_disk:
if os.path.exists(analysis_pandas_input_filepath):
os.remove(analysis_pandas_input_filepath)
else:
analysis_pandas_input_filepath = write_temp_file('/tmp', '', ftype = 'wb')
try:
analysis_pandas_input_filepath = self.write_dataframe(analysis_pandas_input_filepath)
dataframe_blob = read_file(analysis_pandas_input_filepath, binary = True)
if not self.store_data_on_disk:
os.remove(analysis_pandas_input_filepath)
except Exception as e:
if not self.store_data_on_disk:
os.remove(analysis_pandas_input_filepath)
raise
return dataframe_blob
def write_dataframe(self, analysis_pandas_input_filepath):
store = pandas.HDFStore(analysis_pandas_input_filepath)
# Using "store['dataframe'] = self.dataframe" throws a warning since some String columns contain null values i.e. mixed content
# To get around this, we use the append function (see https://github.com/pydata/pandas/issues/4415)
store.append('dataframe', self.dataframe)
store['scalar_adjustments'] = pandas.Series(self.scalar_adjustments)
store['ddg_analysis_type'] = pandas.Series(dict(ddg_analysis_type = self.ddg_analysis_type))
store['calculate_scalar_adjustments'] = pandas.Series(dict(calculate_scalar_adjustments = self.calculate_scalar_adjustments))
store['ddg_analysis_type_description'] = pandas.Series(dict(ddg_analysis_type_description = self.ddg_analysis_type_description))
store['misc_dataframe_attribute_names'] = pandas.Series(dict.fromkeys(self.misc_dataframe_attributes, True))
for k, v in self.misc_dataframe_attributes.items():
# misc_dataframe_attributes may have mixed content so we add the contents individually
assert((k not in list(store.keys())) and ('/' + k not in list(store.keys())))
store[k] = pandas.Series({k : v})
store.close()
with gzip.open(analysis_pandas_input_filepath + '.gz', 'wb') as f:
f.write(read_file(analysis_pandas_input_filepath, binary = True))
os.remove(analysis_pandas_input_filepath)
return analysis_pandas_input_filepath + '.gz'
def create_dataframe(self, pdb_data = {}, verbose = True):
'''This function creates a dataframe (a matrix with one row per dataset record and one column for fields of interest)
from the benchmark run and the dataset data.
For rows with multiple mutations, there may be multiple values for some fields e.g. wildtype residue exposure.
We take the approach of marking these records as None (to be read as: N/A).
Another approach is to take averages of continuous and binary values.
This function also determines scalar_adjustments used to scale the predictions to try to improve the fraction
correct score and the MAE.
'''
if self.use_existing_benchmark_data and self.store_data_on_disk and os.path.exists(self.analysis_pandas_input_filepath):
self.read_dataframe(self.analysis_pandas_input_filepath)
return
analysis_data = self.analysis_data
dataset_cases = self.dataset_cases
# Create XY data
if self.store_data_on_disk:
self.log('Creating the analysis input file %s and human-readable CSV and JSON versions %s and %s.' % (self.analysis_pandas_input_filepath, self.analysis_csv_input_filepath, self.analysis_json_input_filepath))
if len(analysis_data) > len(dataset_cases):
raise colortext.Exception('ERROR: There seems to be an error - there are more predictions than cases in the dataset. Exiting.')
elif len(analysis_data) < len(dataset_cases):
self.log('\nWARNING: %d cases missing for analysis; there are %d predictions in the output directory but %d cases in the dataset. The analysis below does not cover the complete dataset.\n' % (len(dataset_cases) - len(analysis_data), len(analysis_data), len(dataset_cases)), colortext.error)
# ddg_analysis_type can be set to 'DDG', 'DDG_Top[x]' (e.g. 'DDG_Top3'), eyc.
# 'DDG' uses the value reported by the application. For the Rosetta application ddg_monomer by Kellogg et al., this is the value output at the end of a run (which is not the recommended value - the publication uses take_lowest := 3).
# 'DDG_Top3' (generated by default) uses the metric from Kellogg et al. based on the three lowest scoring mutant structures and the three lowest scoring wildtype structures
if self.use_single_reported_value or self.ddg_analysis_type == 'DDG':
assert( self.ddg_analysis_type == 'DDG' )
self.ddg_analysis_type_description = '\nThe predicted DDG value per case is the single DDG value reported by the application.'
elif self.ddg_analysis_type[4:].startswith('Top') and int(self.ddg_analysis_type[7:]) == 3:
take_lowest = int(self.ddg_analysis_type[7:])
self.ddg_analysis_type_description = '\nThe predicted DDG value per case is computed using the {0} lowest-scoring mutant structures and the {0} lowest-scoring wildtype structures as in the paper by Kellogg et al.'.format(take_lowest)
elif self.ddg_analysis_type[4:].startswith('Top'):
take_lowest = int(self.ddg_analysis_type[7:])
self.ddg_analysis_type_description = '\nThe predicted DDG value per case is computed using the {0} lowest-scoring mutant structures and the {0} lowest-scoring wildtype structures.'.format(take_lowest)
elif self.ddg_analysis_type[4:].startswith('Random'):
ddg_analysis_type = self.ddg_analysis_type[4:]
if len( ddg_analysis_type ) > len('Random'):
self.ddg_analysis_type_description = '\nThe predicted DDG value per case is computed by pairing {0} random mutant structures with {0} random wildtype structures.'.format( int(ddg_analysis_type[len('Random'):]) )
else:
self.ddg_analysis_type_description = '\nThe predicted DDG value per case is computed by pairing random mutant structures with random wildtype structures.'
elif self.ddg_analysis_type[4:] == 'AvgAllPairs':
self.ddg_analysis_type_description = '\nThe predicted DDG value per case is computed by constructing all pairs of all mutant structures with all wildtype structures.'
elif self.ddg_analysis_type[4:] == 'MatchPairs':
self.ddg_analysis_type_description = '\nThe predicted DDG value per case is computed by matching each wildtype structure with its corresponding (round number) mutant structure.'
elif self.ddg_analysis_type[4:].startswith( 'CplxBoltzWT' ):
assert( len(self.ddg_analysis_type[4:]) > len( 'CplxBoltzWT' ) )
self.ddg_analysis_type_description = '\nThe predicted DDG value per case is computed by boltzmann weighting matching DDG scores on the wildtype complex score (temperature %.2f).' % float(self.ddg_analysis_type[4+len('CplxBoltzWT'):])
elif self.ddg_analysis_type[4:].startswith( 'CplxBoltzMut' ):
assert( len(self.ddg_analysis_type[4:]) > len( 'CplxBoltzMut' ) )
self.ddg_analysis_type_description = '\nThe predicted DDG value per case is computed by boltzmann weighting matching DDG scores on the mutant complex score (temperature %.2f).' % float(self.ddg_analysis_type[4+len('CplxBoltzMut'):])
elif self.ddg_analysis_type[4:].startswith( 'CplxBoltzBoth' ):
assert( len(self.ddg_analysis_type[4:]) > len( 'CplxBoltzBoth' ) )
self.ddg_analysis_type_description = '\nThe predicted DDG value per case is computed by boltzmann weighting matching DDG scores on both the mutant complex score and the wildtype complex score, added together (temperature %.2f).' % float(self.ddg_analysis_type[4+len('CplxBoltzBoth'):])
else:
raise Exception("Couldn't parse ddg_analysis_type: " + str(ddg_analysis_type))
self.log(self.ddg_analysis_type_description)
# Initialize the data structures
#csv_file = []
# Set the PDB input path
if not pdb_data:
try:
pdb_data_ = json.loads(read_file('../../input/json/pdbs.json'))
for k, v in pdb_data_.items():
pdb_data[k.upper()] = v
except Exception as e:
self.log('input/json/pdbs.json could not be found - PDB-specific analysis cannot be performed.', colortext.error)
else:
# Normalize to upper case to avoid matching problems later
new_pdb_data = {}
for k, v in pdb_data.items():
assert(k.upper() not in new_pdb_data)
new_pdb_data[k.upper()] = v
pdb_data = new_pdb_data
# Determine columns specific to the prediction data to be added
additional_prediction_data_columns = set()
for adv in list(analysis_data.values()):
additional_prediction_data_columns = additional_prediction_data_columns.union(set(adv.keys()))
assert(len(additional_prediction_data_columns.intersection(set(self.csv_headers))) == 0)
assert(self.ddg_analysis_type in additional_prediction_data_columns)
additional_prediction_data_columns.remove(self.ddg_analysis_type)
additional_prediction_data_columns = sorted(additional_prediction_data_columns)
# Initialize the dataframe
self.reset_csv_headers() # this is necessary for the DBBenchmarkRun class which is missing the Experimental, AbsoluteError, and StabilityClassification columns since it adds new columns per analysis set.
res = pandas.DataFrame(columns=(self.csv_headers + additional_prediction_data_columns))
dataframe_columns = self.csv_headers + additional_prediction_data_columns
additional_prediction_data_columns = tuple(additional_prediction_data_columns)
# Create the dataframe
dataframe_table = {}
indices = []
for record_id, predicted_data in sorted(analysis_data.items()):
dataframe_record = self.get_dataframe_row(dataset_cases, predicted_data, pdb_data, record_id, additional_prediction_data_columns)
if dataframe_record:
indices.append(dataframe_record['DatasetID'])
for h in dataframe_columns:
dataframe_table[h] = dataframe_table.get(h, [])
dataframe_table[h].append(dataframe_record[h])
assert(sorted(dataframe_columns) == sorted(dataframe_record.keys()))
dataframe = pandas.DataFrame(dataframe_table, index = indices)
return self.set_dataframe(dataframe, verbose = verbose)
def write_dataframe_to_csv(self, output_path):
# Write the dataframe out to CSV
self.dataframe.to_csv(output_path, sep = ',', header = True)
def reset_csv_headers(self):
pass
def is_this_record_a_derived_mutation(self, record):
'''Different callers to this class store this information differently so we make it class-dependent and subclass.'''
if record['DerivedMutation']:
return True
def get_record_mutations(self, record):
'''Different callers should use the same name here but they currently do not.'''
return record['Mutations']
def get_experimental_ddg_values(self, record, dataframe_record):
dataframe_record['Experimental'] = record['DDG']
def compute_stability_classification(self, predicted_data, record, dataframe_record):
'''Calculate the stability classification for this case.'''
stability_classification, stability_classication_x_cutoff, stability_classication_y_cutoff = None, self.stability_classication_x_cutoff, self.stability_classication_y_cutoff
if record['DDG'] != None:
stability_classification = fraction_correct([record['DDG']], [predicted_data[self.ddg_analysis_type]], x_cutoff = stability_classication_x_cutoff, y_cutoff = stability_classication_y_cutoff)
stability_classification = int(stability_classification)
assert(stability_classification == 0 or stability_classification == 1)
dataframe_record['StabilityClassification'] = stability_classification
def compute_absolute_error(self, predicted_data, record, dataframe_record):
'''Calculate the absolute error for this case.'''
absolute_error = abs(record['DDG'] - predicted_data[self.ddg_analysis_type])
dataframe_record['AbsoluteError'] = absolute_error
def get_record_pdb_file_id(self, record):
return record['PDBFileID']
def count_residues(self, record, pdb_record):
'''Count the number of residues in the chains for the case.'''
mutations = self.get_record_mutations(record)
pdb_chains = set([m['Chain'] for m in mutations])
assert(len(pdb_chains) == 1) # we expect monomeric cases
pdb_chain = pdb_chains.pop()
return len(pdb_record.get('Chains', {}).get(pdb_chain, {}).get('Sequence', ''))
def get_dataframe_row(self, dataset_cases, predicted_data, pdb_data, record_id, additional_prediction_data_columns):
'''Create a dataframe row for a prediction.'''
# Ignore derived mutations if appropriate
record = dataset_cases[record_id]
if self.is_this_record_a_derived_mutation(record) and not self.include_derived_mutations:
return None
amino_acid_details, CAA, PAA, HAA = self.amino_acid_details, self.CAA, self.PAA, self.HAA
burial_cutoff = self.burial_cutoff
# Initialize variables. For ambiguous cases where the set of distinct values has multiple values, we default to None
residue_charge, residue_charges = None, set()
exposure, exposures = None, set()
volume_change, volume_changes = None, set()
record_wtaa, wtaas = None, set()
record_mutaa, mutaas = None, set()
DSSPSimpleSSType, DSSPSimpleSSTypes = None, set()
DSSPType, DSSPTypes = None, set()
DSSPExposure, DSSPExposures = None, set()
scops = set()
mutation_string = []
num_derivative_errors = predicted_data.get('Errors', {}).get('Derivative error count', 0)
run_time = predicted_data.get('RunTime', None)
max_memory = predicted_data.get('MaxMemory', None)
mutations = self.get_record_mutations(record)
for m in mutations:
wtaa = m['WildTypeAA']
mutaa = m['MutantAA']
mutation_string.append('{0} {1}{2}{3}'.format(m['Chain'], m['WildTypeAA'], m['ResidueID'], m['MutantAA']))
# Residue types and chain
wtaas.add(wtaa)
mutaas.add(mutaa)
if m.get('SCOP class'):
scops.add(m['SCOP class'])
DSSPSimpleSSTypes.add(m['DSSPSimpleSSType'])
DSSPTypes.add(m['DSSPType'])
DSSPExposures.add(m['DSSPExposure'])
# Burial
if m['DSSPExposure'] != None:
if m['DSSPExposure'] > burial_cutoff:
exposures.add('E')
else:
exposures.add('B')
else:
exposures.add(None)
# Volume
if amino_acid_details[wtaa]['van der Waals volume'] < amino_acid_details[mutaa]['van der Waals volume']:
volume_changes.add('SL')
elif amino_acid_details[wtaa]['van der Waals volume'] > amino_acid_details[mutaa]['van der Waals volume']:
volume_changes.add('LS')
elif amino_acid_details[wtaa]['van der Waals volume'] == amino_acid_details[mutaa]['van der Waals volume']:
volume_changes.add('XX')
# Charge
if ((wtaa in CAA or wtaa in PAA) and (mutaa in HAA)) or ((mutaa in CAA or mutaa in PAA) and (wtaa in HAA)):
residue_charges.add('Change')
elif (wtaa in CAA or wtaa in PAA) and (mutaa in CAA or mutaa in PAA):
residue_charges.add('Polar/Charged')
elif (wtaa in HAA) and (mutaa in HAA):
residue_charges.add('Hydrophobic/Non-polar')
else:
raise colortext.Exception('Should not reach here.')
# Create a string representing the mutations (useful for labeling rather than analysis)
mutation_string = '; '.join(mutation_string)
# Taking unique values, determine the residue charges of the wildtype and mutant residues, the wildtype residue exposure, and the relative change in van der Waals volume
if len(residue_charges) == 1: residue_charge = residue_charges.pop()
if len(exposures) == 1: exposure = exposures.pop()
if len(volume_changes) == 1: volume_change = volume_changes.pop()
# Taking unique values, determine the wildtype and mutant residue types
all_residues = wtaas.union(mutaas)
if len(wtaas) == 1: record_wtaa = wtaas.pop()
if len(mutaas) == 1: record_mutaa = mutaas.pop()
# Taking unique values, determine the secondary structure and residue exposures from the DSSP data in the dataset
if len(DSSPSimpleSSTypes) == 1: DSSPSimpleSSType = DSSPSimpleSSTypes.pop()
if len(DSSPTypes) == 1: DSSPType = DSSPTypes.pop()
if len(DSSPExposures) == 1: DSSPExposure = DSSPExposures.pop()
# Determine the SCOP classification from the SCOPe data in the dataset
full_scop_classification, scop_class, scop_fold = None, None, None
if len(scops) > 1:
self.log('Warning: There is more than one SCOPe class for record {0}.'.format(record_id), colortext.warning)
elif len(scops) == 1:
full_scop_classification = scops.pop()
scop_tokens = full_scop_classification.split('.')
scop_class = scop_tokens[0]
if len(scop_tokens) > 1:
scop_fold = '.'.join(scop_tokens[0:2])
# Partition the data by PDB resolution with bins: N/A, <1.5, 1.5-<2.0, 2.0-<2.5, >=2.5
pdb_record = pdb_data.get(self.get_record_pdb_file_id(record).upper())
pdb_resolution_bin = None
pdb_resolution = pdb_record.get('Resolution')
if pdb_resolution != None:
if pdb_resolution < 1.5:
pdb_resolution_bin = '<1.5'
elif pdb_resolution < 2.0:
pdb_resolution_bin = '1.5-2.0'
elif pdb_resolution < 2.5:
pdb_resolution_bin = '2.0-2.5'
else:
pdb_resolution_bin = '>=2.5'
pdb_resolution_bin = pdb_resolution_bin or 'N/A'
# Mark mutations involving glycine or proline
has_gp_mutation = 'G' in all_residues or 'P' in all_residues
# Create the data matrix
dataframe_record = dict(
DatasetID = record_id,
PDBFileID = self.get_record_pdb_file_id(record),
Mutations = mutation_string,
NumberOfMutations = len(mutations),
Predicted = predicted_data[self.ddg_analysis_type],
ResidueCharges = residue_charge,
VolumeChange = volume_change,
HasGPMutation = int(has_gp_mutation),
WildTypeDSSPType = DSSPType,
WildTypeDSSPSimpleSSType = DSSPSimpleSSType,
WildTypeDSSPExposure = DSSPExposure,
WildTypeSCOPClass = scop_class,
WildTypeSCOPFold = scop_fold,
WildTypeSCOPClassification = full_scop_classification,
WildTypeExposure = exposure,
WildTypeAA = record_wtaa,
MutantAA = record_mutaa,
PDBResolution = pdb_record.get('Resolution'),
PDBResolutionBin = pdb_resolution_bin,
NumberOfResidues = self.count_residues(record, pdb_record) or None,
NumberOfDerivativeErrors = num_derivative_errors,
RunTime = run_time,
MaxMemory = max_memory,
)
for c in additional_prediction_data_columns:
dataframe_record[c] = predicted_data.get(c)
if self.contains_experimental_data:
# These fields are particular to dataframes containing experimental values e.g. for benchmarking runs or for
# datasets where we have associated experimental values
self.get_experimental_ddg_values(record, dataframe_record)
self.compute_stability_classification(predicted_data, record, dataframe_record)
self.compute_absolute_error(predicted_data, record, dataframe_record)
return dataframe_record
def analyze_all(self, analysis_directory = None):
'''This function runs the analysis and creates the plots and summary file.'''
for analysis_set in self.analysis_sets:
self.analyze(analysis_set, analysis_directory = analysis_directory)
def analyze(self, analysis_set = '', analysis_directory = None):
'''This function runs the analysis and creates the plots and summary file.'''
self.calculate_metrics(analysis_set, analysis_directory = analysis_directory)
if self.generate_plots:
self.plot(analysis_set, analysis_directory = analysis_directory)
def full_analysis(self, analysis_set, output_directory, verbose = True, compile_pdf = True, quick_plots = False):
'''Combines calculate_metrics, write_dataframe_to_csv, and plot'''
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
self.analysis_directory = output_directory
self.calculate_metrics(analysis_set = analysis_set, analysis_directory = output_directory, verbose = verbose)
self.write_dataframe_to_csv( os.path.join(output_directory, 'data.csv') )
# Return latex_report
return self.plot(analysis_set = analysis_set, analysis_directory = output_directory, matplotlib_plots = True, verbose = verbose, compile_pdf = compile_pdf, quick_plots = quick_plots)
def get_definitive_name(self, unique_ajps, join_character = '-', prepend_label = True):
"""
Generates a definitive name for this benchmark run object, based on
unique additional join parameters (as passed)
"""
name = ''
for ajp in unique_ajps:
if len(name) > 0:
name += join_character
if prepend_label:
name += str(ajp) + '_'
name += str(self.additional_join_parameters[ajp]['short_name'])
if name == '':
name = 'ddg-benchmark'
return name
@staticmethod
def get_unique_ajps( benchmark_runs ):
"""
Determines which join parameters are unique
"""
br_ajps = {}
for br in benchmark_runs:
for ajp in br.additional_join_parameters:
if ajp not in br_ajps:
br_ajps[ajp] = set()
br_ajps[ajp].add( br.additional_join_parameters[ajp]['short_name'] )
unique_ajps = []
for ajp in br_ajps:
if len( br_ajps[ajp] ) > 1:
unique_ajps.append( ajp )
return unique_ajps
@staticmethod
def get_common_subset(
benchmark_runs,
verbose = False,
):
common_ids = set( benchmark_runs[0].dataframe.dropna(subset=['Predicted'])[['DatasetID']].as_matrix().flatten() )
for br in benchmark_runs[1:]:
common_ids = common_ids.intersection( set(br.dataframe.dropna(subset=['Predicted'])[['DatasetID']].as_matrix().flatten()) )
if verbose:
print('Common dataset size will be:', len(common_ids))
if len(common_ids) == 0:
for br in benchmark_runs:
common_ids = sorted( list( br.dataframe.dropna(subset=['Predicted'])[['DatasetID']].as_matrix().flatten() ) )
print(br.get_definitive_name(unique_ajps, join_character = '-'), common_ids[:10])
raise Exception('No data to make report on!')
# limited_benchmark_runs = []
for br in benchmark_runs:
br.set_dataframe( br.dataframe.loc[br.dataframe['DatasetID'].isin(common_ids)], verbose = verbose )
# limited_benchmark_runs.append( br )
# benchmark_runs = limited_benchmark_runs
return common_ids
@staticmethod
def analyze_multiple(
benchmark_runs,
analysis_sets = [],
# Singleton arguments
analysis_directory = None,
remove_existing_analysis_directory = True,
quick_plots = False,
use_multiprocessing = True,
verbose = True,
compile_pdf = True,
limit_to_complete_presence = True,
all_by_all_comparisons = False,
):
'''This function runs the analysis for multiple input settings'''
if remove_existing_analysis_directory and os.path.isdir(analysis_directory):
shutil.rmtree(analysis_directory)
unique_ajps = BenchmarkRun.get_unique_ajps( benchmark_runs )
if limit_to_complete_presence:
BenchmarkRun.get_common_subset( benchmark_runs, verbose = not use_multiprocessing )
unique_ajps = BenchmarkRun.get_unique_ajps( benchmark_runs )
### Process each benchmark run object individually
if use_multiprocessing:
pool = mp.Pool()
singleton_chapters = []
calculated_brs = []
def save_latex_report(t):
br, unique_name, latex_report = t
latex_report.set_title_page( title = unique_name )
singleton_chapters.append( latex_report )
calculated_brs.append( br )
for br in benchmark_runs:
for analysis_set in analysis_sets:
unique_name = br.get_definitive_name(unique_ajps, join_character = '\n')
filepath_unique_name = br.get_definitive_name(unique_ajps, join_character = '-')
subdir = os.path.join(analysis_directory, os.path.join('analysis_sets', os.path.join(analysis_set, filepath_unique_name) ) )
if use_multiprocessing:
pool.apply_async( _full_analysis_mp_alias, ( br, analysis_set, subdir, unique_name, False, quick_plots ), callback = save_latex_report )
else:
print('Individual report saving in:', subdir)
save_latex_report( _full_analysis_mp_alias( br, analysis_set, subdir, unique_name, True, quick_plots ) )
if use_multiprocessing:
pool.close()
pool.join()
benchmark_runs = calculated_brs
### Pointwise all-by-all comparison
comparison_chapters = []
if all_by_all_comparisons:
if use_multiprocessing:
pool = mp.Pool()
def save_latex_report(t):
latex_report = t
comparison_chapters.append( latex_report )
comparisons_subdir = os.path.join(analysis_directory, 'comparison_analysis_sets')
for analysis_set in analysis_sets:
analysis_set_subdir = os.path.join(comparisons_subdir, analysis_set)
for i, br_i in enumerate(benchmark_runs):
for j, br_j in enumerate(benchmark_runs):
if i > j:
if use_multiprocessing:
br_i_copy = copy.deepcopy( br_i )
br_j_copy = copy.deepcopy( br_j )
pool.apply_async( _compare_mp_alias, (br_i_copy, br_j_copy, analysis_set, analysis_set_subdir, unique_ajps, False), callback = save_latex_report )
else:
save_latex_report( _compare_mp_alias(br_i, br_j, analysis_set, analysis_set_subdir, unique_ajps, True) )
if use_multiprocessing:
pool.close()
pool.join()
intro_report = lr.LatexReport()
intro_report.set_title_page('All data comparison')
# All data series comparison
# Get joined stats comparison dataframe
stats_df = BenchmarkRun.get_stats_comparison_dataframe(
benchmark_runs, unique_ajps,
output_csv = os.path.join(analysis_directory, 'analysis_metrics.csv'),
)
intro_report.add_section_page( title = 'Case comparison tables' )
intro_report.content.extend( BenchmarkRun.make_case_description_tables( stats_df ) )
intro_report.add_section_page('All data plots')
subplot_directory = os.path.join(analysis_directory, 'subplots')
if not os.path.isdir( subplot_directory ):
os.makedirs(subplot_directory)
runtime_df = benchmark_runs[0]._get_dataframe_columns( ['RunTime'] )
runtime_df.columns = [ benchmark_runs[0].get_definitive_name(unique_ajps, join_character = '\n', prepend_label = False) ]
for br in benchmark_runs[1:]:
inner_runtime_df = br._get_dataframe_columns( ['RunTime'] )
inner_runtime_df.columns = [ br.get_definitive_name(unique_ajps, join_character = '\n', prepend_label = False) ]
runtime_df = runtime_df.merge(
inner_runtime_df,
left_index = True,
right_index = True,
)
intro_report.add_plot(
general_matplotlib.plot_box(
runtime_df,
output_directory = subplot_directory,
plot_title = 'Prediction Run Times',
output_name = 'runtimes',
fig_height = 6.7,
fig_width = 10,
ylabel = 'Run time (minutes)',
xlabel = 'Prediction Set',
verbose = verbose,
xtick_fontsize = 4,
log_y = True,
label_n = False,
rotation_angle = 45,
),
plot_title = 'Run times'
)
# Report concatenation
main_latex_report = lr.LatexReport()
main_latex_report.set_title_page('$\Delta\Delta G$ Report')
main_latex_report.add_chapter(intro_report)
for chapter in comparison_chapters:
main_latex_report.add_chapter(chapter)
for chapter in singleton_chapters:
main_latex_report.add_chapter(chapter)
main_latex_report.generate_pdf_report(
os.path.join( analysis_directory, 'report.pdf' ),
verbose = verbose,
compile_pdf = compile_pdf,
)
print(os.path.join( analysis_directory, 'report.pdf' ))
def compare(self, other, analysis_set, output_directory, unique_ajps, verbose = True, compile_pdf = True):
"""
Generate comparison latex report in specified output directory
Returns LatexReport object
"""
self_unique_name = self.get_definitive_name(unique_ajps)
other_unique_name = other.get_definitive_name(unique_ajps)
output_directory = os.path.join(output_directory, os.path.join(self_unique_name, other_unique_name) )
assert( not os.path.isdir( output_directory ) )
subplot_directory = os.path.join(output_directory, 'plots')
if not os.path.isdir(subplot_directory):
os.makedirs(subplot_directory)
report = lr.LatexReport( table_of_contents = False )
# Construct dataframe comparing our predictions to other's predictions
both_predictions = pandas.concat(
[
self.add_identifying_columns_to_df(self.dataframe[['Predicted']], unique_ajps),
other.add_identifying_columns_to_df(other.dataframe[['Predicted']], unique_ajps),
],
join = 'outer',
).sort_index()
both_predictions_subtracted = subtract_row_pairs_for_display(
both_predictions,
output_csv = os.path.join(output_directory, 'predictions_v_predictions.csv'),
merge_df = self.dataframe,
verbose = verbose,
)
# Construct dataframe comparing our diff with experimental values to other's
self_diff = self.get_pred_minus_exp_dataframe(analysis_set, unique_ajps)
other_diff = other.get_pred_minus_exp_dataframe(analysis_set, unique_ajps)
diffs_df = pandas.concat(
[self_diff, other_diff],
join = 'outer',
)
diffs_df = subtract_row_pairs_for_display(
diffs_df,
output_csv = os.path.join(output_directory, 'diffs_v_diffs.csv'),
merge_df = self.dataframe,
verbose = verbose,
)
report.set_title_page(
'%s vs %s' % (
self.get_definitive_name(unique_ajps, join_character = '\n'),
other.get_definitive_name(unique_ajps, join_character = '\n')
)
)
predictions_v_predictions_df = self.dataframe[['Predicted']].merge(
other.dataframe[['Predicted']],
left_index = True,
right_index = True,
)
predictions_v_predictions_df.columns = [self_unique_name, other_unique_name]
report.add_plot( general_matplotlib.make_corr_plot(predictions_v_predictions_df, predictions_v_predictions_df.columns.values[0], predictions_v_predictions_df.columns.values[1], output_directory = subplot_directory, plot_title = 'Prediction comparison', axis_label_size = 8.0, output_name = 'vs_scatter', fig_height = 7, fig_width = 8, verbose = verbose, plot_11_line = True ), plot_title = 'Experimental vs. Predicted scatterplot (with density binning)' )
diff_v_diff_dataframe = self.get_pred_minus_exp_dataframe(analysis_set).merge(
other.get_pred_minus_exp_dataframe(analysis_set),
left_index = True,
right_index = True,
)
report.add_section_page( title = 'Plots' )
diff_v_diff_dataframe.columns = [self_unique_name, other_unique_name]
report.add_plot( general_matplotlib.make_corr_plot(diff_v_diff_dataframe, diff_v_diff_dataframe.columns.values[0], diff_v_diff_dataframe.columns.values[1], output_directory = subplot_directory, plot_title = 'Error v. Error', axis_label_size = 7.0, output_name = 'diff_vs_scatter', fig_height = 7, fig_width = 8, verbose = verbose, plot_11_line = True ), plot_title = 'Outliers --- Error (Predicted - Experimental) v. error. \\ x-axis=%s \\ y-axis=%s' % (diff_v_diff_dataframe.columns.values[0], diff_v_diff_dataframe.columns.values[1]) )
report.add_section_page( title = 'Tables' )
report.content.append( lr.LatexPandasTable(
diffs_df, float_format = float_format_2sigfig,
caption_text = 'Outliers --- Comparison of error (Predicted - Experimental) for first prediction set (%s) vs second set of predictions (%s). Values sorted by descending absolute delta.' % (self_unique_name, other_unique_name),
) )
report.content.append( lr.LatexPandasTable(
both_predictions_subtracted, float_format = float_format_2sigfig,
caption_text = 'Direct comparison of predicted values. Values sorted by descending absolute delta.',
) )
# Get joined stats comparison dataframe
for case_table in BenchmarkRun.make_case_description_tables(BenchmarkRun.get_stats_comparison_dataframe(
[self, other], unique_ajps,
output_csv = os.path.join(output_directory, 'comparison_metrics.csv'),
)):
report.content.append( case_table )
report.generate_pdf_report(
os.path.join(output_directory, 'comparison.pdf'),
verbose = verbose,
compile_pdf = compile_pdf,
)
if verbose:
print('Comparison report saved to:', os.path.join(output_directory, 'comparison.pdf'))
return report
@staticmethod
def make_case_description_tables(stats_df, sort_by = "Pearson's R"):
stats_columns = ['n', 'Fraction correct', "Pearson's R", 'MAE']
stats_columns_names = ['n', 'FC', "R", 'MAE']
select_columns = list( stats_columns )
select_columns.append( 'case_description' )
stats_df = stats_df[ select_columns ]
# Put tables for complete datasets first
first_cases_to_process = set( ['complete dataset', 'complete dataset (scaled)'] )
other_cases = set( stats_df['case_description'].unique() )
first_cases_to_process.intersection_update( other_cases )
other_cases.difference_update( first_cases_to_process )
other_cases = sorted( list( other_cases ) )
cases = sorted( list( first_cases_to_process ) )
cases.extend( other_cases)
# Make subcase tables
report_content = []
for case in cases:
if case == 'complete dataset (scaled)':
inner_sort_by = 'MAE'
sort_ascending = True
else:
inner_sort_by = sort_by
sort_ascending = False
inner_df = stats_df[ stats_df['case_description'] == case ]
inner_df = inner_df.sort_values(by = inner_sort_by, ascending = sort_ascending)
inner_df = inner_df[ stats_columns ]
inner_df.columns = stats_columns_names
report_content.append( lr.LatexPandasTable(
inner_df,
float_format = float_format_3sigfig,
caption_text = case + ". Abbreviations: FC = fraction correct, R = Pearson's R" ,
) )
return report_content
@staticmethod
def make_specific_case_table(stats_df, case, sort_by = "Pearson's R"):
stats_columns = ['n', 'Fraction correct', "Pearson's R", 'MAE']
stats_columns_names = ['n', 'FC', "R", 'MAE']
select_columns = list( stats_columns )
select_columns.append('case_description')
stats_df = stats_df[ select_columns ]
inner_df = stats_df[ stats_df['case_description'] == case ]
inner_df = inner_df.sort_values(by = sort_by, ascending = False)
inner_df = inner_df[ stats_columns ]
inner_df.columns = stats_columns_names
return lr.LatexPandasTable(
inner_df,
float_format = float_format_3sigfig,
caption_text = case + ". Abbreviations: FC = fraction correct, R = Pearson's R" ,
)
@staticmethod
def get_stats_comparison_dataframe(benchmark_runs, unique_ajps, output_csv = None):
annotated_stats_dfs = [
br.add_identifying_columns_to_df(
br.stored_metrics_df,
unique_ajps,
reset_index = True,
)
for br in benchmark_runs
]
stats_df = pandas.concat(annotated_stats_dfs)
stats_df = stats_df.sort_index()
if output_csv:
stats_df.to_csv( output_csv )
return stats_df
def get_pred_minus_exp_dataframe(self, analysis_set, unique_ajps = None):
exp_name = BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set)
return_df = self.dataframe['Predicted'].subtract( self.dataframe[exp_name] ).to_frame()
return_df.columns = ['delta' + '-' + exp_name]
return_df.index.name = 'ID'
if unique_ajps:
return self.add_identifying_columns_to_df(return_df, unique_ajps)
else:
return return_df
def add_identifying_columns_to_df(self, df, unique_ajps, reset_index = False):
if not reset_index:
df.index.name = 'ID'
for ajp in unique_ajps:
df[ajp] = self.additional_join_parameters[ajp]['short_name']
df.set_index(ajp, append = True, inplace = True)
if reset_index:
df = df.reset_index( level = 0 ).drop('level_0', axis = 1)
return df
def calculate_metrics(self, analysis_set = '', analysis_directory = None, drop_missing = True, case_n_cutoff = 5, verbose = True):
'''Calculates the main metrics for the benchmark run and writes them to file and LaTeX object.'''
dataframe = self.dataframe
if drop_missing:
dataframe = dataframe.dropna(subset=['Predicted'])
if self.calculate_scalar_adjustments:
scalar_adjustment = self.scalar_adjustments[analysis_set]
experimental_field = BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set)
self.metric_latex_objects.append( lr.LatexPageSection('Data tables', None, True) )
intro_text = lr.LatexText( text = self.ddg_analysis_type_description )
header_row = ['Statistic name', '{Value}', 'p-value']
stats_column_format = ['l', 'S[table-format=3.2]', 'l']
if self.include_derived_mutations:
running_analysis_str = '\nDerived mutations in analysis are included):'
else:
running_analysis_str = '\nDerived mutations in analysis are omitted):'
intro_text.add_text(running_analysis_str)
if verbose:
self.report(running_analysis_str, fn = colortext.message)
classification_cutoffs_str = 'The stability classification cutoffs are: Experimental=%0.2f kcal/mol, Predicted=%0.2f energy units.' % (self.stability_classication_x_cutoff, self.stability_classication_y_cutoff)
intro_text.add_text( classification_cutoffs_str )
if verbose:
self.report(classification_cutoffs_str, fn = colortext.warning)
self.metric_latex_objects.append( intro_text )
amino_acid_details, CAA, PAA, HAA = self.amino_acid_details, self.CAA, self.PAA, self.HAA
# This dict is used for the print-statement below
volume_groups = {}
for aa_code, aa_details in amino_acid_details.items():
v = int(aa_details['van der Waals volume']) # Note: I only convert to int here to match the old script behavior and because all volumes are integer values so it does not do any harm
volume_groups[v] = volume_groups.get(v, [])
volume_groups[v].append(aa_code)
section_latex_objs = []
section_latex_objs.append( lr.LatexSubSection(
'Breakdown by volume',
'A case is considered a small-to-large (resp. large-to-small) mutation if all of the wildtype residues have a smaller (resp. larger) van der Waals volume than the corresponding mutant residue. The order is defined as %s so some cases are considered to have no change in volume e.g. MET -> LEU.' % (' < '.join([''.join(sorted(v)) for k, v in sorted(volume_groups.items())]))
) )
for subcase in ('XX', 'SL', 'LS'):
subcase_dataframe = dataframe[dataframe['VolumeChange'] == subcase]
table_header = 'Statistics - %s (%d cases)' % (BenchmarkRun.by_volume_descriptions[subcase], len(subcase_dataframe))
if len(subcase_dataframe) >= 8:
list_stats = format_stats(get_xy_dataset_statistics_pandas(subcase_dataframe, experimental_field, 'Predicted', fcorrect_x_cutoff = self.stability_classication_x_cutoff, fcorrect_y_cutoff = self.stability_classication_y_cutoff, ignore_null_values = True, run_standardized_analysis = False), return_string = False)
section_latex_objs.append( lr.LatexTable(
header_row,
list_stats,
column_format = stats_column_format,
header_text = table_header
))
self.add_stored_metric_to_df(BenchmarkRun.by_volume_descriptions[subcase], len(subcase_dataframe), list_stats)
else:
section_latex_objs.append( lr.LatexText(
'Not enough data for analysis of mutations ''%s'' (at least 8 cases are required).' % BenchmarkRun.by_volume_descriptions[subcase]
))
if verbose:
self.report('\n'.join([x.generate_plaintext() for x in section_latex_objs]), fn = colortext.sprint)
self.metric_latex_objects.extend( section_latex_objs )
section_latex_objs = []
section_latex_objs.append( lr.LatexSubSection(
'Mutations to alanine',
'And mutations not to alanine'
))
subcase_dataframe = dataframe[dataframe['MutantAA'] == 'A']
if len(subcase_dataframe) > 0:
table_header = 'Statistics - all mutations to alanine (including multiple mutations, if they are all to alanine) (%d cases)' % len(subcase_dataframe)
list_stats = format_stats(get_xy_dataset_statistics_pandas(subcase_dataframe, experimental_field, 'Predicted', fcorrect_x_cutoff = self.stability_classication_x_cutoff, fcorrect_y_cutoff = self.stability_classication_y_cutoff, ignore_null_values = True, run_standardized_analysis = False), return_string = False)
section_latex_objs.append( lr.LatexTable(
header_row,
list_stats,
column_format = stats_column_format,
header_text = table_header
))
self.add_stored_metric_to_df('all mutations to alanine', len(subcase_dataframe), list_stats)
subcase_dataframe = dataframe[(dataframe['MutantAA'] == 'A') & (dataframe['NumberOfMutations'] == 1)]
if len(subcase_dataframe) > 0:
table_header = 'Statistics - single mutations to alanine (%d cases)' % len(subcase_dataframe)
list_stats = format_stats(get_xy_dataset_statistics_pandas(subcase_dataframe, experimental_field, 'Predicted', fcorrect_x_cutoff = self.stability_classication_x_cutoff, fcorrect_y_cutoff = self.stability_classication_y_cutoff, ignore_null_values = True, run_standardized_analysis = False), return_string = False)
section_latex_objs.append( lr.LatexTable(
header_row,
list_stats,
column_format = stats_column_format,
header_text = table_header
))
self.add_stored_metric_to_df('single mutations to alanine', len(subcase_dataframe), list_stats)
subcase_dataframe = dataframe[(dataframe['MutantAA'] == 'A') & (dataframe['NumberOfMutations'] != 1)]
if len(subcase_dataframe) > 0:
table_header = 'Statistics - multiple mutations to alanine (%d cases)' % len(subcase_dataframe)
list_stats = format_stats(get_xy_dataset_statistics_pandas(subcase_dataframe, experimental_field, 'Predicted', fcorrect_x_cutoff = self.stability_classication_x_cutoff, fcorrect_y_cutoff = self.stability_classication_y_cutoff, ignore_null_values = True, run_standardized_analysis = False), return_string = False)
section_latex_objs.append( lr.LatexTable(
header_row,
list_stats,
column_format = stats_column_format,
header_text = table_header
))
self.add_stored_metric_to_df('multiple mutations to alanine', len(subcase_dataframe), list_stats)
subcase_dataframe = dataframe[dataframe['MutantAA'] != 'A']
if len(subcase_dataframe) > 0:
table_header = 'Statistics - mutations to anything other than alanine (including multiple mutations that include a non-alanine mutation) (%d cases)' % len(subcase_dataframe)
list_stats = format_stats(get_xy_dataset_statistics_pandas(subcase_dataframe, experimental_field, 'Predicted', fcorrect_x_cutoff = self.stability_classication_x_cutoff, fcorrect_y_cutoff = self.stability_classication_y_cutoff, ignore_null_values = True, run_standardized_analysis = False), return_string = False)
section_latex_objs.append( lr.LatexTable(
header_row,
list_stats,
column_format = stats_column_format,
header_text = table_header
))
self.add_stored_metric_to_df('mutations not to alanine', len(subcase_dataframe), list_stats)
if verbose and len(section_latex_objs) > 0:
self.report('\n'.join([x.generate_plaintext() for x in section_latex_objs]), fn = colortext.sprint)
self.metric_latex_objects.extend( section_latex_objs )
section_latex_objs = []
subcase_dataframe = dataframe[dataframe['HasGPMutation'] == 1]
if len(subcase_dataframe) > 0:
table_header = 'Statistics - cases with G or P (%d cases)' % len(subcase_dataframe)
list_stats = format_stats(get_xy_dataset_statistics_pandas(subcase_dataframe, experimental_field, 'Predicted', fcorrect_x_cutoff = self.stability_classication_x_cutoff, fcorrect_y_cutoff = self.stability_classication_y_cutoff, ignore_null_values = True, run_standardized_analysis = False), return_string = False)
section_latex_objs.append( lr.LatexTable(
header_row,
list_stats,
column_format = stats_column_format,
header_text = table_header
))
self.add_stored_metric_to_df('cases with G or P', len(subcase_dataframe), list_stats)
subcase_dataframe = dataframe[dataframe['HasGPMutation'] == 0]
if len(subcase_dataframe) > 0:
table_header = 'Statistics - cases without G or P (%d cases)' % len(subcase_dataframe)
list_stats = format_stats(get_xy_dataset_statistics_pandas(subcase_dataframe, experimental_field, 'Predicted', fcorrect_x_cutoff = self.stability_classication_x_cutoff, fcorrect_y_cutoff = self.stability_classication_y_cutoff, ignore_null_values = True, run_standardized_analysis = False), return_string = False)
section_latex_objs.append( lr.LatexTable(
header_row,
list_stats,
column_format = stats_column_format,
header_text = table_header
))
self.add_stored_metric_to_df('cases without G or P', len(subcase_dataframe), list_stats)
if len(section_latex_objs) > 0:
section_latex_objs.insert( 0, lr.LatexSubSection(
'Separating out mutations involving glycine or proline.',
'This cases may involve changes to secondary structure so we separate them out here.'
))
if verbose and len(section_latex_objs) > 0:
self.report('\n'.join([x.generate_plaintext() for x in section_latex_objs]), fn = colortext.sprint)
self.metric_latex_objects.extend( section_latex_objs )
#### Single mutations
section_latex_objs = []
section_latex_objs.append( lr.LatexSubSection(
'Number of mutations',
))
subcase_dataframe = dataframe[dataframe['NumberOfMutations'] == 1]
if len(subcase_dataframe) >= case_n_cutoff:
table_header = 'Statistics - single mutations (%d cases)' % len(subcase_dataframe)
list_stats = format_stats(get_xy_dataset_statistics_pandas(subcase_dataframe, experimental_field, 'Predicted', fcorrect_x_cutoff = self.stability_classication_x_cutoff, fcorrect_y_cutoff = self.stability_classication_x_cutoff, ignore_null_values = True, run_standardized_analysis = False), return_string = False)
section_latex_objs.append( lr.LatexTable(
header_row,
list_stats,
column_format = stats_column_format,
header_text = table_header
))
self.add_stored_metric_to_df('single mutations', len(subcase_dataframe), list_stats)
subcase_dataframe = dataframe[dataframe['NumberOfMutations'] > 1]
if len(subcase_dataframe) >= case_n_cutoff:
table_header = 'Statistics - multiple mutations (%d cases)' % len(subcase_dataframe)
list_stats = format_stats(get_xy_dataset_statistics_pandas(subcase_dataframe, experimental_field, 'Predicted', fcorrect_x_cutoff = self.stability_classication_x_cutoff, fcorrect_y_cutoff = self.stability_classication_x_cutoff, ignore_null_values = True, run_standardized_analysis = False), return_string = False)
section_latex_objs.append( lr.LatexTable(
header_row,
list_stats,
column_format = stats_column_format,
header_text = table_header
))
self.add_stored_metric_to_df('multiple mutations', len(subcase_dataframe), list_stats)
# subcase_dataframe = dataframe[(dataframe.NumberOfMutations >= 2) & (dataframe.NumberOfMutations <= 5)]
# if len(subcase_dataframe) >= case_n_cutoff:
# table_header = 'Statistics - 2-4 mutations (%d cases)' % len(subcase_dataframe)
# list_stats = format_stats(get_xy_dataset_statistics_pandas(subcase_dataframe, experimental_field, 'Predicted', fcorrect_x_cutoff = self.stability_classication_x_cutoff, fcorrect_y_cutoff = self.stability_classication_x_cutoff, ignore_null_values = True, run_standardized_analysis = False), return_string = False)
# section_latex_objs.append( LatexTable(
# header_row,
# list_stats,
# column_format = stats_column_format,
# header_text = table_header
# ))
# self.add_stored_metric_to_df('2-4 mutations', len(subcase_dataframe), list_stats)
# mutation_cutoffs = [5, 10, 20, 50, 100, 200]
# for i, mutation_cutoff in enumerate(mutation_cutoffs):
# if len(mutation_cutoffs) - 1 == i:
# break
# next_cutoff = mutation_cutoffs[i+1]
# subcase_dataframe = dataframe[(dataframe.NumberOfMutations >= mutation_cutoff) & (dataframe.NumberOfMutations <= next_cutoff)]
# if len(subcase_dataframe) >= case_n_cutoff:
# table_header = 'Statistics - %d $<=$ number of mutations $<=$ %d (%d cases)' % (mutation_cutoff, next_cutoff, len(subcase_dataframe))
# list_stats = format_stats(get_xy_dataset_statistics_pandas(subcase_dataframe, experimental_field, 'Predicted', fcorrect_x_cutoff = self.stability_classication_x_cutoff, fcorrect_y_cutoff = self.stability_classication_x_cutoff, ignore_null_values = True, run_standardized_analysis = False), return_string = False)
# section_latex_objs.append( LatexTable(
# header_row,
# list_stats,
# column_format = stats_column_format,
# header_text = table_header
# ))
# self.add_stored_metric_to_df('%d <= mutations<= %d' % (mutation_cutoff, next_cutoff), len(subcase_dataframe), list_stats)
if verbose:
self.report('\n'.join([x.generate_plaintext() for x in section_latex_objs]), fn = colortext.sprint)
self.metric_latex_objects.extend( section_latex_objs )
####
#### Complete dataset (scaled)
if self.calculate_scalar_adjustments:
section_latex_objs = []
section_latex_objs.append( lr.LatexSubSection(
'Entire dataset using a scaling factor of 1/%.03f to improve the fraction correct metric.' % scalar_adjustment,
'Warning: Results in this section use an averaged scaling factor to improve the value for the fraction correct metric. This scalar will vary over benchmark runs so these results should not be interpreted as performance results; they should be considered as what could be obtained if the predicted values were scaled by a "magic" value.'
))
table_header = 'Statistics - complete dataset (scaled) (%d cases)' % len(dataframe)
# For these statistics, we assume that we have reduced any scaling issues and use the same cutoff for the Y-axis as the user specified for the X-axis
list_stats = format_stats(get_xy_dataset_statistics_pandas(dataframe, experimental_field, BenchmarkRun.get_analysis_set_fieldname('Predicted_adj', analysis_set), fcorrect_x_cutoff = self.stability_classication_x_cutoff, fcorrect_y_cutoff = self.stability_classication_x_cutoff, ignore_null_values = True, run_standardized_analysis = False), return_string = False)
section_latex_objs.append( lr.LatexTable(
header_row,
list_stats,
column_format = stats_column_format,
header_text = table_header
))
self.add_stored_metric_to_df('complete dataset (scaled)', len(dataframe), list_stats)
if verbose:
self.report('\n'.join([x.generate_plaintext() for x in section_latex_objs]), fn = colortext.sprint)
self.metric_latex_objects.extend( section_latex_objs )
####
section_latex_objs = []
section_latex_objs.append( lr.LatexSubSection(
'Entire dataset',
'Overall statistics'
))
table_header = 'Statistics - complete dataset (%d cases)' % len(dataframe)
# For these statistics, we assume that we have reduced any scaling issues and use the same cutoff for the Y-axis as the user specified for the X-axis
list_stats = format_stats(get_xy_dataset_statistics_pandas(dataframe, experimental_field, 'Predicted', fcorrect_x_cutoff = self.stability_classication_x_cutoff, fcorrect_y_cutoff = self.stability_classication_x_cutoff, ignore_null_values = True, run_standardized_analysis = False), return_string = False)
section_latex_objs.append( lr.LatexTable(
header_row,
list_stats,
column_format = stats_column_format,
header_text = table_header
))
self.add_stored_metric_to_df('complete dataset', len(dataframe), list_stats)
if verbose:
self.report('\n'.join([x.generate_plaintext() for x in section_latex_objs]), fn = colortext.sprint)
self.metric_latex_objects.extend( section_latex_objs )
# There is probably a better way of writing the pandas code here
record_with_most_errors = (dataframe[['PDBFileID', 'NumberOfDerivativeErrors', 'Mutations']].sort_values(by = 'NumberOfDerivativeErrors')).tail(1)
record_index = record_with_most_errors.index.tolist()[0]
pdb_id, num_errors, mutation_str = dataframe.loc[record_index, 'PDBFileID'], dataframe.loc[record_index, 'NumberOfDerivativeErrors'], dataframe.loc[record_index, 'Mutations']
if num_errors > 0:
error_detection_text = '\n\nDerivative errors were found in the run. Record #{0} - {1}, {2} - has the most amount ({3}) of derivative errors.'.format(record_index, pdb_id, mutation_str, num_errors)
self.metric_latex_objects.append( lr.LatexText(error_detection_text, color = 'red') )
if verbose:
self.report(error_detection_text, fn = colortext.warning)
# Write the analysis to file
self.create_analysis_directory(analysis_directory)
self.metrics_filepath = os.path.join(self.analysis_directory, '{0}_metrics.txt'.format(self.benchmark_run_name))
write_file(self.metrics_filepath, '\n'.join([x.generate_plaintext() for x in self.metric_latex_objects]))
def plot(self, analysis_set = '', analysis_directory = None, matplotlib_plots = True, verbose = True, compile_pdf = True, quick_plots = False):
if matplotlib_plots:
from klab.plot import general_matplotlib
old_generate_plots = self.generate_plots # todo: hacky - replace with option to return graphs in memory
self.generate_plots = True
self.create_subplot_directory(analysis_directory) # Create a directory for plots
# Save metric data (if it exists)
if len(self.stored_metrics_df.index) > 0:
self.stored_metrics_df.to_csv( os.path.join(analysis_directory, 'metrics.csv') )
analysis_set_prefix = ''
if analysis_set:
analysis_set_prefix = '_{0}'.format(analysis_set)
analysis_file_prefix = os.path.abspath( os.path.join( self.subplot_directory, self.benchmark_run_name + analysis_set_prefix + '_' ) )
dataframe = self.dataframe
latex_report = lr.LatexReport()
latex_report.content.extend( self.metric_latex_objects ) # Add on table sections generated in calculate_metrics
# Create a subtitle for the first page
subtitle = self.benchmark_run_name
if self.description:
subtitle += ' ' + self.description
if self.dataset_description and self.dataset_description != self.description:
subtitle += ' ' + self.dataset_description
if analysis_set and analysis_set != self.dataset_description:
subtitle += ' ({0})'.format(analysis_set)
# Plot which y-cutoff yields the best value for the fraction correct metric
scalar_adjustment, scalar_adjustment_calculation_plot = self.plot_optimum_prediction_fraction_correct_cutoffs_over_range(analysis_set, min(self.stability_classication_x_cutoff, 0.5), max(self.stability_classication_x_cutoff, 3.0), suppress_plot = False, analysis_file_prefix = analysis_file_prefix, verbose = verbose)
if self.calculate_scalar_adjustments:
assert(self.scalar_adjustments[analysis_set] == scalar_adjustment)
# Plot which the optimum y-cutoff given the specified or default x-cutoff
optimal_predictive_cutoff_plot = self.plot_optimum_prediction_fraction_correct_cutoffs(analysis_set, analysis_file_prefix, self.stability_classication_x_cutoff, verbose = verbose)
# Identify the column with the experimental values for the analysis_set
experimental_series = BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set)
latex_report.set_title_page( title = '$\Delta\Delta G$ Report --- %s' % self.benchmark_run_name, subtitle = subtitle )
if self.credit:
latex_report.add_to_abstract('Prediction set scoring credit: ' + self.credit)
latex_report.add_section_page( title = 'Main plots' )
if matplotlib_plots:
latex_report.add_plot( general_matplotlib.plot_scatter(self.dataframe, experimental_series, 'Predicted', output_directory = self.subplot_directory, density_plot = True, plot_title = 'Experimental vs. Prediction', output_name = 'experimental_prediction_scatter', fig_height = 7, fig_width = 9, verbose = verbose ), plot_title = 'Experimental vs. Predicted scatterplot (with density binning)' )
latex_report.add_plot( general_matplotlib.make_corr_plot(self.dataframe, experimental_series, 'Predicted', output_directory = self.subplot_directory, plot_title = 'Experimental vs. Prediction', fig_height = 7, fig_width = 9, verbose = verbose ), plot_title = 'Experimental vs. Predicted scatterplot, with histograms and linear fit statistics. The p-value here (if present) indicates the likelihood that a random set of this many points would produce a correlation at least as strong as the observed correlation.' )
if not quick_plots:
if matplotlib_plots:
single_mutations_dataframe = dataframe[dataframe['NumberOfMutations'] == 1]
if len(single_mutations_dataframe) > 0:
latex_report.add_plot( general_matplotlib.make_corr_plot(single_mutations_dataframe, experimental_series, 'Predicted', output_name = 'single_mutations_histogram_fit_scatter', output_directory = self.subplot_directory, plot_title = 'Experimental vs. Prediction', fig_height = 6, fig_width = 7, verbose = verbose), plot_title = 'Single mutations data subset' )
multiple_mutations_dataframe = dataframe[dataframe['NumberOfMutations'] > 1]
if len(multiple_mutations_dataframe) > 0:
latex_report.add_plot( general_matplotlib.make_corr_plot(multiple_mutations_dataframe, experimental_series, 'Predicted', output_name = 'multiple_mutations_histogram_fit_scatter', output_directory = self.subplot_directory, plot_title = 'Experimental vs. Prediction', fig_height = 6, fig_width = 7, verbose = verbose), plot_title = 'Multiple mutations data subset' )
subcase_dataframe = dataframe[dataframe['MutantAA'] == 'A']
output_name = 'all_alanine_mutations_fit_scatter'
plot_title = 'Experimental vs. Prediction'
fig_title = 'All mutations to alanine data subset'
if len(subcase_dataframe) > 0:
latex_report.add_plot( general_matplotlib.make_corr_plot(subcase_dataframe, experimental_series, 'Predicted', output_name = output_name, output_directory = self.subplot_directory, plot_title = plot_title, fig_height = 6, fig_width = 7, verbose = verbose), plot_title = fig_title )
subcase_dataframe = dataframe[(dataframe['MutantAA'] == 'A') & (dataframe['NumberOfMutations'] == 1)]
output_name = 'single_alanine_mutations_fit_scatter'
plot_title = 'Experimental vs. Prediction'
fig_title = 'All single mutations to alanine data subset'
if len(subcase_dataframe) > 0:
latex_report.add_plot( general_matplotlib.make_corr_plot(subcase_dataframe, experimental_series, 'Predicted', output_name = output_name, output_directory = self.subplot_directory, plot_title = plot_title, fig_height = 6, fig_width = 7, verbose = verbose), plot_title = fig_title )
subcase_dataframe = dataframe[(dataframe['MutantAA'] == 'A') & (dataframe['NumberOfMutations'] != 1)]
output_name = 'multiple_alanine_mutations_fit_scatter'
plot_title = 'Experimental vs. Prediction'
fig_title = 'All multiple mutations to alanine data subset'
if len(subcase_dataframe) > 0:
latex_report.add_plot( general_matplotlib.make_corr_plot(subcase_dataframe, experimental_series, 'Predicted', output_name = output_name, output_directory = self.subplot_directory, plot_title = plot_title, fig_height = 6, fig_width = 7, verbose = verbose), plot_title = fig_title )
subcase_dataframe = dataframe[dataframe['MutantAA'] != 'A']
output_name = 'all_non_alanine_mutations_fit_scatter'
plot_title = 'Experimental vs. Prediction'
fig_title = 'All mutations to anything but alanine data subset'
if len(subcase_dataframe) > 0:
latex_report.add_plot( general_matplotlib.make_corr_plot(subcase_dataframe, experimental_series, 'Predicted', output_name = output_name, output_directory = self.subplot_directory, plot_title = plot_title, fig_height = 6, fig_width = 7, verbose = verbose), plot_title = fig_title )
latex_report.add_plot(
general_matplotlib.plot_box(
self._get_dataframe_columns( ['RunTime'] ),
output_directory = self.subplot_directory,
plot_title = 'Prediction Run Time',
output_name = 'runtime',
fig_height = 6,
fig_width = 7,
ylabel = 'Run time (minutes)',
xlabel = 'Prediction Set',
verbose = verbose,
),
plot_title = 'Run time'
)
# Plot a histogram of the absolute errors
absolute_error_series = BenchmarkRun.get_analysis_set_fieldname('AbsoluteError', analysis_set)
latex_report.add_plot(self.plot_absolute_error_histogram('{0}absolute_errors'.format(analysis_file_prefix), absolute_error_series, analysis_set = analysis_set, verbose = verbose), plot_title = 'Absolute error histogram')
latex_report.add_section_page( title = 'Adjustments', subtext = 'Optimization of the cutoffs\nfor the fraction correct metric' )
latex_report.add_plot(scalar_adjustment_calculation_plot, plot_title = 'Scalar adjustment calculation plot')
latex_report.add_plot(optimal_predictive_cutoff_plot, plot_title = 'Optimal predictive cutoff plot')
# Create a scatterplot and histogram for the adjusted results
if self.calculate_scalar_adjustments:
adjusted_predicted_value_series = BenchmarkRun.get_analysis_set_fieldname('Predicted_adj', analysis_set)
adjusted_absolute_error_series = BenchmarkRun.get_analysis_set_fieldname('AbsoluteError_adj', analysis_set)
main_adj_scatterplot = '{0}main_adjusted_with_scalar_scatterplot.png'.format(analysis_file_prefix)
if not(os.path.exists(main_adj_scatterplot) and not(self.recreate_graphs)):
if verbose:
self.log('Saving scatterplot to %s.' % main_adj_scatterplot)
plot_pandas(dataframe, experimental_series, adjusted_predicted_value_series, main_adj_scatterplot, RInterface.correlation_coefficient_gplot, title = 'Experimental vs. Prediction: adjusted scale')
latex_report.add_plot(main_adj_scatterplot, plot_title = 'Main adj. scatterplot')
latex_report.add_plot(self.plot_absolute_error_histogram('{0}absolute_errors_adjusted_with_scalar'.format(analysis_file_prefix, verbose = verbose), adjusted_absolute_error_series, analysis_set = analysis_set, verbose = verbose), plot_title = 'Absolute errors adjusted with scalar')
# Scatterplots colored by residue context / change on mutation
latex_report.add_section_page( title = 'Residue context' )
latex_report.add_plot(self.scatterplot_generic('Experimental vs. Prediction - Residue charges', self.scatterplot_charges, '{0}scatterplot_charges.png'.format(analysis_file_prefix), analysis_set = analysis_set, verbose = verbose), plot_title = 'Experimental vs. Prediction - Residue charges')
latex_report.add_plot(self.scatterplot_generic('Experimental vs. Prediction - Exposure (cutoff = %0.2f)' % self.burial_cutoff, self.scatterplot_exposure, '{0}scatterplot_exposure.png'.format(analysis_file_prefix), analysis_set = analysis_set, verbose = verbose), plot_title = 'Experimental vs. Prediction - Exposure (cutoff = %0.2f)' % self.burial_cutoff)
latex_report.add_plot(self.scatterplot_generic('Experimental vs. Prediction - Change in volume', self.scatterplot_volume, '{0}scatterplot_volume.png'.format(analysis_file_prefix), analysis_set = analysis_set, verbose = verbose), plot_title = 'Experimental vs. Prediction - Change in volume')
latex_report.add_plot(self.scatterplot_generic('Experimental vs. Prediction - Wildtype residue s.s.', self.scatterplot_ss, '{0}scatterplot_ss.png'.format(analysis_file_prefix), analysis_set = analysis_set, verbose = verbose), plot_title = 'Experimental vs. Prediction - Wildtype residue s.s.')
# Scatterplots colored by SCOPe classification
SCOP_classifications = set(dataframe['WildTypeSCOPClassification'].values.tolist())
SCOP_folds = set(dataframe['WildTypeSCOPFold'].values.tolist())
SCOP_classes = set(dataframe['WildTypeSCOPClass'].values.tolist())
scop_section_page_generated = False
if len(SCOP_classes) <= 25:
if len(SCOP_classes) == 1 and ((None in SCOP_classes) or (numpy.isnan(sorted(SCOP_classes)[0]) or not(sorted(SCOP_classes)[0]))):
if verbose:
print('There are no defined SCOP classes. Skipping the SCOP class plot.')
else:
if not scop_section_page_generated:
latex_report.add_section_page( title = 'SCOPe classes' )
scop_section_page_generated = True
latex_report.add_plot(self.scatterplot_generic('Experimental vs. Prediction - WT residue SCOP class', self.scatterplot_scop_class, '{0}scatterplot_scop_class.png'.format(analysis_file_prefix, verbose = verbose), analysis_set = analysis_set), plot_title = 'Experimental vs. Prediction - WT residue SCOP class')
if len(SCOP_folds) <= 25:
if len(SCOP_folds) == 1 and ((None in SCOP_folds) or (numpy.isnan(sorted(SCOP_folds)[0]) or not(sorted(SCOP_folds)[0]))):
if verbose:
print('There are no defined SCOP folds. Skipping the SCOP fold plot.')
else:
if not scop_section_page_generated:
latex_report.add_section_page( title = 'SCOPe classes' )
scop_section_page_generated = True
latex_report.add_plot(self.scatterplot_generic('Experimental vs. Prediction - WT residue SCOP fold', self.scatterplot_scop_fold, '{0}scatterplot_scop_fold.png'.format(analysis_file_prefix), analysis_set = analysis_set, verbose = verbose), plot_title = 'Experimental vs. Prediction - WT residue SCOP fold')
if len(SCOP_classifications) <= 25:
if len(SCOP_classifications) == 1 and ((None in SCOP_classifications) or (numpy.isnan(sorted(SCOP_classifications)[0]) or not(sorted(SCOP_classifications)[0]))):
if verbose:
print('There are no defined SCOP classifications. Skipping the SCOP classification plot.')
else:
if not scop_section_page_generated:
latex_report.add_section_page( title = 'SCOPe classes' )
scop_section_page_generated = True
latex_report.add_plot(self.scatterplot_generic('Experimental vs. Prediction - WT residue SCOP classification', self.scatterplot_scop_classification, '{0}scatterplot_scop_classification.png'.format(analysis_file_prefix), analysis_set = analysis_set, verbose = verbose), plot_title = 'Experimental vs. Prediction - WT residue SCOP classification')
# Scatterplots colored by residue types
latex_report.add_section_page( title = 'Residue types' )
latex_report.add_plot(self.scatterplot_generic('Experimental vs. Prediction - Wildtype', self.scatterplot_wildtype_aa, '{0}scatterplot_wildtype_aa.png'.format(analysis_file_prefix), analysis_set = analysis_set, verbose = verbose), plot_title = 'Experimental vs. Prediction - Wildtype')
latex_report.add_plot(self.scatterplot_generic('Experimental vs. Prediction - Mutant', self.scatterplot_mutant_aa, '{0}scatterplot_mutant_aa.png'.format(analysis_file_prefix), analysis_set = analysis_set, verbose = verbose), plot_title = 'Experimental vs. Prediction - Mutant')
latex_report.add_plot(self.scatterplot_generic('Experimental vs. Prediction - Glycine/Proline', self.scatterplot_GP, '{0}scatterplot_gp.png'.format(analysis_file_prefix), analysis_set = analysis_set, verbose = verbose), plot_title = 'Experimental vs. Prediction - Glycine/Proline')
# Scatterplots colored PDB resolution and chain length
latex_report.add_section_page( title = 'Chain properties' )
latex_report.add_plot(self.scatterplot_generic('Experimental vs. Prediction - PDB resolution', self.scatterplot_pdb_res_binned, '{0}scatterplot_pdb_res_binned.png'.format(analysis_file_prefix), analysis_set = analysis_set, verbose = verbose), plot_title = 'Experimental vs. Prediction - PDB resolution')
latex_report.add_plot(self.scatterplot_generic('Experimental vs. Prediction - Chain length', self.scatterplot_chain_length, '{0}scatterplot_chain_length.png'.format(analysis_file_prefix), analysis_set = analysis_set, verbose = verbose), plot_title = 'Experimental vs. Prediction - Chain length')
# Errors / debugging
latex_report.add_section_page( title = 'Errors / debugging' )
latex_report.add_plot(self.plot_derivative_error_barchart(analysis_file_prefix, verbose = verbose), plot_title = 'Derivative error barchart')
####### End extended plots section #########
# Copy the analysis input files into the analysis directory - these files are duplicated but it makes it easier to share data
if self.analysis_csv_input_filepath:
shutil.copyfile(self.analysis_csv_input_filepath, self.analysis_directory)
if self.analysis_json_input_filepath:
shutil.copyfile(self.analysis_json_input_filepath, self.analysis_directory)
if self.analysis_raw_data_input_filepath:
shutil.copyfile(self.analysis_raw_data_input_filepath, self.analysis_directory)
# Combine the plots into a PDF file
latex_report.generate_pdf_report(
os.path.join( self.analysis_directory, '{0}_benchmark_plots.pdf'.format(self.benchmark_run_name) ),
verbose = verbose,
compile_pdf = compile_pdf,
)
if verbose:
self.log('Report written to: ' + os.path.join( self.analysis_directory, '{0}_benchmark_plots.pdf'.format(self.benchmark_run_name) ) )
self.generate_plots = old_generate_plots
return latex_report
def determine_optimum_fraction_correct_cutoffs(self, analysis_set, dataframe, stability_classication_x_cutoff):
'''Determines the value of stability_classication_y_cutoff which approximately maximizes the fraction correct
measurement w.r.t. a fixed stability_classication_x_cutoff. This function uses discrete sampling and so it
may miss the actual maximum. We use two rounds of sampling: i) a coarse-grained sampling (0.1 energy unit
intervals); and ii) finer sampling (0.01 unit intervals).
In both rounds, we choose the one corresponding to a lower value for the cutoff in cases of multiple maxima.'''
# Determine the value for the fraction correct y-value (predicted) cutoff which will approximately yield the
# maximum fraction-correct value
fraction_correct_range = []
experimental_field = BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set)
# Round 1 : Coarse sampling. Test 0.5 -> 8.0 in 0.1 increments
for z in range(5, 80):
w = float(z) / 10.0
fraction_correct_range.append((w, fraction_correct_pandas(dataframe, experimental_field, 'Predicted', x_cutoff = stability_classication_x_cutoff, y_cutoff = w, ignore_null_values = True)))
max_value_cutoff, max_value = fraction_correct_range[0][0], fraction_correct_range[0][1]
for p in fraction_correct_range:
if p[1] > max_value:
max_value_cutoff, max_value = p[0], p[1]
# Round 2 : Finer sampling. Test max_value_cutoff - 0.1 -> max_value_cutoff + 0.1 in 0.01 increments
for z in range(int((max_value_cutoff - 0.1) * 100), int((max_value_cutoff + 0.1) * 100)):
w = float(z) / 100.0
fraction_correct_range.append((w, fraction_correct_pandas(dataframe, experimental_field, 'Predicted', x_cutoff = stability_classication_x_cutoff, y_cutoff = w, ignore_null_values = True)))
fraction_correct_range = sorted(set(fraction_correct_range)) # sort so that we find the lowest cutoff value in case of duplicate fraction correct values
max_value_cutoff, max_value = fraction_correct_range[0][0], fraction_correct_range[0][1]
for p in fraction_correct_range:
if p[1] > max_value:
max_value_cutoff, max_value = p[0], p[1]
return max_value_cutoff, max_value, fraction_correct_range
def plot_optimum_prediction_fraction_correct_cutoffs(self, analysis_set, analysis_file_prefix, stability_classication_x_cutoff, verbose = True):
# Determine the optimal values
max_value_cutoff, max_value, fraction_correct_range = self.determine_optimum_fraction_correct_cutoffs(analysis_set, self.dataframe, stability_classication_x_cutoff)
# Filenames
output_filename_prefix = '{0}optimum_fraction_correct_at_{1}_kcal_mol'.format(analysis_file_prefix, '%.2f' % stability_classication_x_cutoff)
plot_filename = output_filename_prefix + '.png'
csv_filename = output_filename_prefix + '.txt'
R_filename = output_filename_prefix + '.R'
if os.path.exists(plot_filename) and not(self.recreate_graphs):
return plot_filename
# Create CSV input
lines = ['NeutralityCutoff,FractionCorrect,C']
for p in fraction_correct_range:
if p[1] == max_value:
lines.append(','.join(map(str, (p[0], p[1], 'best'))))
else:
lines.append(','.join(map(str, (p[0], p[1], 'other'))))
if verbose:
print(csv_filename)
write_file(csv_filename, '\n'.join(lines))
# Create plot
if self.generate_plots:
title = 'Optimum cutoff for fraction correct metric at %0.2f kcal/mol' % stability_classication_x_cutoff
r_script = '''library(ggplot2)
library(gridExtra)
library(scales)
library(qualV)
png('%(plot_filename)s', height=4096, width=4096, bg="white", res=600)
plot_data <- read.csv('%(csv_filename)s', header=T)
plot_scale <- scale_color_manual(
values = c( "best" = '#00dd00', "other" = '#666666'),
labels = c( "best" = "Best", "other" = "Other"),
guide = "none") # do not show the legend
best_y = max(plot_data$FractionCorrect)
p <- ggplot(data = plot_data, aes(x = NeutralityCutoff, y = FractionCorrect)) +
plot_scale +
xlab("Neutrality cutoff (energy units)") +
ylab("Fraction correct") +
ggtitle("%(title)s") +
geom_point(aes(color = C)) +
geom_line() +
geom_smooth() +
geom_text(hjust=0, size=4, color="black", aes(6.5, best_y, fontface="plain", family = "sans", label=sprintf("Max = %(max_value)0.2f\\nCutoff = %(max_value_cutoff)0.2f")))
p
dev.off()'''
if verbose:
self.log('Saving plot of approximate optimal fraction correct cutoffs to %s.' % plot_filename)
RInterface._runRScript(r_script % locals())
return plot_filename
def plot_optimum_prediction_fraction_correct_cutoffs_over_range(self, analysis_set, min_stability_classication_x_cutoff, max_stability_classication_x_cutoff, suppress_plot = False, analysis_file_prefix = None, verbose = True):
'''Plots the optimum cutoff for the predictions to maximize the fraction correct metric over a range of experimental cutoffs.
Returns the average scalar corresponding to the best value of fraction correct over a range of cutoff values for the experimental cutoffs.'''
# Filenames
analysis_set_prefix = ''
#if analysis_set:
# analysis_set_prefix = '_{0}'.format(analysis_set)
plot_filename = None
if not suppress_plot:
output_filename_prefix = '{0}{1}optimum_fraction_correct_at_varying_kcal_mol'.format(analysis_file_prefix, analysis_set_prefix)
plot_filename = output_filename_prefix + '.png'
csv_filename = output_filename_prefix + '.txt'
# Create CSV input
lines = ['ExperimentalCutoff,BestPredictionCutoff']
x_cutoff = min_stability_classication_x_cutoff
x_values = []
y_values = []
avg_scale = 0
plot_graph = self.generate_plots and not(suppress_plot)
while x_cutoff < max_stability_classication_x_cutoff + 0.1:
max_value_cutoff, max_value, fraction_correct_range = self.determine_optimum_fraction_correct_cutoffs(analysis_set, self.dataframe, x_cutoff)
if plot_graph:
lines.append(','.join(map(str, (x_cutoff, max_value_cutoff))))
x_values.append(x_cutoff)
y_values.append(max_value_cutoff)
avg_scale += max_value_cutoff / x_cutoff
x_cutoff += 0.1
if plot_graph:
write_file(csv_filename, '\n'.join(lines))
# Determine the average scalar needed to fit the plot
avg_scale = avg_scale / len(x_values)
x_values = numpy.array(x_values)
y_values = numpy.array(y_values)
scalars = y_values / x_values
average_scalar = numpy.mean(scalars)
plot_label_1 = 'Scalar == %0.2f' % average_scalar
plot_label_2 = 'sigma == %0.2f' % numpy.std(scalars)
# Create plot
if plot_graph:
if not(os.path.exists(plot_filename) and not(self.recreate_graphs)):
if verbose:
self.log('Saving scatterplot to %s.' % plot_filename)
self.log('Saving plot of approximate optimal fraction correct cutoffs over varying experimental cutoffs to %s.' % plot_filename)
title = 'Optimum cutoff for fraction correct metric at varying experimental cutoffs'
if analysis_set:
title += ' for {0}'.format(analysis_set)
r_script = '''library(ggplot2)
library(gridExtra)
library(scales)
library(qualV)
png('%(plot_filename)s', height=4096, width=4096, bg="white", res=600)
plot_data <- read.csv('%(csv_filename)s', header=T)
max_y = max(plot_data$BestPredictionCutoff)
p <- ggplot(data = plot_data, aes(x = ExperimentalCutoff, y = BestPredictionCutoff)) +
xlab("Experimental cutoff (kcal/mol)") +
ylab("Optimal prediction cutoff (energy units)") +
ggtitle("%(title)s") +
geom_point() +
geom_line() +
geom_smooth() +
geom_text(hjust=0, size=4, color="black", aes(0.5, max_y, fontface="plain", family = "sans", label="%(plot_label_1)s"), parse = T) +
geom_text(hjust=0, size=4, color="black", aes(0.5, max_y - 0.5, fontface="plain", family = "sans", label="%(plot_label_2)s"), parse = T)
p
dev.off()'''
RInterface._runRScript(r_script % locals())
return average_scalar, plot_filename
def _get_dataframe_columns(self, column_names):
new_dataframe = self.dataframe.copy()
new_dataframe = new_dataframe[column_names]
new_dataframe.columns = [name + '_' + self.benchmark_run_name for name in new_dataframe.columns]
return new_dataframe
def plot_derivative_error_barchart(self, analysis_file_prefix, verbose = True):
# Filenames
output_filename_prefix = '{0}errors_by_pdb_id'.format(analysis_file_prefix)
plot_filename = output_filename_prefix + '.png'
csv_filename = output_filename_prefix + '.txt'
R_filename = output_filename_prefix + '.R'
new_dataframe = self.dataframe.copy()
new_dataframe = new_dataframe[['PDBFileID', 'NumberOfDerivativeErrors']]
new_dataframe.columns = ['PDB', 'AverageDerivativeErrorCount']
new_dataframe = new_dataframe.groupby(['PDB'])['AverageDerivativeErrorCount'].mean()
new_dataframe.to_csv(csv_filename, sep = ',', header = True)
if os.path.exists(plot_filename) and not(self.recreate_graphs):
return plot_filename
# Create plot
firebrick = plot_colors['firebrick']
brown = plot_colors['brown']
if verbose:
self.log('Saving barchart to %s.' % plot_filename)
title = 'Average count of Inaccurate G! errors by PDB ID'
r_script = '''library(ggplot2)
library(gridExtra)
library(scales)
library(qualV)
png('%(plot_filename)s', height=4096, width=4096, bg="white", res=600)
plot_data <- read.csv('%(csv_filename)s', header=T)
b <- ggplot(plot_data, aes(x=PDB, y=AverageDerivativeErrorCount)) +
geom_bar(stat='identity', colour = "%(brown)s", fill = "%(firebrick)s") +
ggtitle("%(title)s") +
xlab("PDB ID") +
ylab("Derivative errors (average)") +
coord_flip()
b
#m <- ggplot(plot_data, aes(x=AbsoluteError)) +
# geom_histogram(colour = "%(brown)s", fill = "%(firebrick)s", binwidth = 0.5) +
# ggtitle("%(title)s") +
# xlab("Absolute error (kcal/mol - energy units)") +
# ylab("Number of cases")
#m
dev.off()'''
RInterface._runRScript(r_script % locals())
return plot_filename
def plot_absolute_error_histogram(self, output_filename_prefix, data_series, analysis_set = '', verbose = True):
# Filenames
plot_filename = output_filename_prefix + '.png'
csv_filename = output_filename_prefix + '.txt'
R_filename = output_filename_prefix + '.R'
if os.path.exists(plot_filename) and not(self.recreate_graphs):
return plot_filename
# Create CSV input
new_dataframe = self.dataframe[[data_series]]
new_dataframe.to_csv(csv_filename, sep = ',', header = True)
if not self.generate_plots:
return
# Create plot
if verbose:
self.log('Saving scatterplot to %s.' % plot_filename)
title = 'Distribution of absolute errors (prediction - observed)'
if analysis_set:
title += ' for {0}'.format(analysis_set)
r_script = '''library(ggplot2)
library(gridExtra)
library(scales)
library(qualV)
png('%(plot_filename)s', height=4096, width=4096, bg="white", res=600)
plot_data <- read.csv('%(csv_filename)s', header=T)
m <- ggplot(plot_data, aes(x=%(data_series)s)) +
geom_histogram(colour = "darkgreen", fill = "green", binwidth = 0.5) +
ggtitle("%(title)s") +
xlab("Absolute error (kcal/mol - energy units)") +
ylab("Number of cases")
m
dev.off()'''
RInterface._runRScript(r_script % locals())
return plot_filename
def scatterplot_generic(self, title, plotfn, plot_filename, analysis_set = '', verbose = True):
if os.path.exists(plot_filename) and not(self.recreate_graphs):
return plot_filename
csv_filename = os.path.splitext(plot_filename)[0] + '.txt'
plot_commands = plotfn(title, csv_filename, analysis_set = analysis_set)
r_script = '''library(ggplot2)
library(gridExtra)
library(scales)
library(qualV)
png('%(plot_filename)s', height=4096, width=4096, bg="white", res=600)
plot_data <- read.csv('%(csv_filename)s', header=T)
%(plot_commands)s
dev.off()''' % locals()
if self.generate_plots:
if verbose:
self.log('Saving scatterplot to %s.' % plot_filename)
RInterface._runRScript(r_script)
return plot_filename
def scatterplot_color_by_series(self, colorseries, xseries = "Experimental", yseries = "Predicted", title = '', plot_scale = '', point_opacity = 0.4, extra_commands = '', analysis_set = '', verbose = True):
experimental_field = BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set)
# Compute MAE
mae_str = ''
if xseries == experimental_field:
if yseries == 'Predicted':
mae_str = self.dataframe[BenchmarkRun.get_analysis_set_fieldname('AbsoluteError', analysis_set)].mean()
elif yseries == 'Predicted_adj':
mae_str = self.dataframe[BenchmarkRun.get_analysis_set_fieldname('AbsoluteError_adj', analysis_set)].mean()
if mae_str:
mae_str = 'MAE = {0:.3f}'.format(mae_str)
plot_scale_line = ''
plot_scale_argument = ''
if plot_scale:
plot_scale_line = plot_scale.strip()
plot_scale_argument = '\n plot_scale +'
return '''
opacity <- %(point_opacity)s
coefs <- coef(lm(%(yseries)s~%(xseries)s, data = plot_data))
coefs
fitcoefs = coef(lm(%(yseries)s~0 + %(xseries)s, data = plot_data))
fitlmv_Predicted <- as.numeric(fitcoefs[1])
lmv_intercept <- as.numeric(coefs[1])
lmv_Predicted <- as.numeric(coefs[2])
lm(plot_data$%(yseries)s~plot_data$%(xseries)s)
fitcoefs
xlabel <- expression(paste(plain("%(xseries)s ")*Delta*Delta*plain("G (kcal/mol)")))
ylabel <- expression(paste(plain("%(yseries)s ")*Delta*Delta*plain(G)))
rvalue <- cor(plot_data$%(yseries)s, plot_data$%(xseries)s)
minx <- min(plot_data$%(xseries)s)
maxx <- max(plot_data$%(xseries)s)
miny <- min(plot_data$%(yseries)s)
maxy <- max(plot_data$%(yseries)s)
xpos <- minx + ((maxx - minx) * 0.05)
ypos_cor <- maxy - ((maxy - miny) * 0.015)
ypos_mae <- maxy - ((maxy - miny) * 0.055)
%(plot_scale_line)s
p <- ggplot(data = plot_data, aes(x = %(xseries)s, y = %(yseries)s)) +%(plot_scale_argument)s %(extra_commands)s
xlab("Experimental (kcal/mol)") +
ylab("Predictions (energy units)") +
ggtitle("%(title)s") +
geom_point(aes(color = %(colorseries)s), alpha = I(opacity), shape = I(19)) +
geom_abline(size = 0.25, intercept = lmv_intercept, slope = lmv_Predicted) +
geom_abline(color="blue",size = 0.25, intercept = 0, slope = fitlmv_Predicted) +
geom_text(hjust=0, size=4, aes(xpos, ypos_cor, fontface="plain", family = "sans", label=sprintf("R = %%0.3f", round(rvalue, digits = 4)))) +
geom_text(hjust=0, size=4, aes(xpos, ypos_mae, fontface="plain", family = "sans", label="%(mae_str)s"))
p
''' % locals()
def scatterplot_charges(self, title, csv_filename, analysis_set = ''):
'''Scatterplot by residue charge.'''
# Create CSV input
new_dataframe = self.dataframe.copy()
experimental_field = BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set)
new_dataframe = new_dataframe[[experimental_field, 'Predicted', 'ResidueCharges']]
new_dataframe['Opacity'] = 0.4
new_dataframe = new_dataframe.dropna(subset = [experimental_field, 'Predicted'])
new_dataframe.to_csv(csv_filename, sep = ',', header = True)
plot_scale = '''
plot_scale <- scale_color_manual(
values = c( "None" = '#777777', "Change" = '%(cornflower_blue)s', "Polar/Charged" = 'magenta', "Hydrophobic/Non-polar" = 'green'),
labels = c( "None" = "N/A", "Change" = "Change", "Polar/Charged" = "Polar/Charged", "Hydrophobic/Non-polar" = "Hydrophobic/Non-polar"))''' % plot_colors
return self.scatterplot_color_by_series(xseries = experimental_field, colorseries = "ResidueCharges", title = title, plot_scale = plot_scale, point_opacity = 0.6, analysis_set = analysis_set)
def scatterplot_exposure(self, title, csv_filename, analysis_set = ''):
'''Scatterplot by exposure class.'''
# Create CSV input
new_dataframe = self.dataframe.copy()
experimental_field = BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set)
new_dataframe = new_dataframe[[experimental_field, 'Predicted', 'WildTypeExposure']]
new_dataframe['Opacity'] = 0.4
new_dataframe = new_dataframe.dropna(subset = [experimental_field, 'Predicted'])
new_dataframe.columns = [experimental_field, 'Predicted', 'Exposure', 'Opacity'] # rename the exposure column
new_dataframe.to_csv(csv_filename, sep = ',', header = True)
plot_scale = '''
plot_scale <- scale_color_manual(
values = c( "None" = '#777777', "B" = '%(brown)s', "E" = '%(purple)s'),
labels = c( "None" = "N/A", "B" = "Buried", "E" = "Exposed"))''' % plot_colors
return self.scatterplot_color_by_series(xseries = experimental_field, colorseries = "Exposure", title = title, plot_scale = plot_scale, analysis_set = analysis_set)
def scatterplot_volume(self, title, csv_filename, analysis_set = '', verbose = True):
'''Scatterplot by change in volume upon mutation.'''
# Create CSV input
new_dataframe = self.dataframe.copy()
experimental_field = BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set)
new_dataframe = new_dataframe[[experimental_field, 'Predicted', 'VolumeChange']]
new_dataframe['Opacity'] = 0.4
new_dataframe = new_dataframe.dropna(subset = [experimental_field, 'Predicted'])
new_dataframe.to_csv(csv_filename, sep = ',', header = True)
plot_scale = '''
plot_scale <- scale_color_manual(
values = c( "None" = '#777777', "SL" = '%(brown)s', "LS" = '%(purple)s', 'XX' = "%(cornflower_blue)s"),
labels = c( "None" = "N/A", "SL" = "Increase", "LS" = "Decrease", "XX" = "No change"))''' % plot_colors
return self.scatterplot_color_by_series(xseries = experimental_field, colorseries = "VolumeChange", title = title, plot_scale = plot_scale, analysis_set = analysis_set, verbose = verbose)
def scatterplot_ss(self, title, csv_filename, analysis_set = '', verbose = True):
'''Scatterplot by secondary structure.'''
# Create CSV input
new_dataframe = self.dataframe.copy()
experimental_field = BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set)
new_dataframe = new_dataframe[[experimental_field, 'Predicted', 'WildTypeDSSPSimpleSSType']]
new_dataframe['Opacity'] = 0.4
new_dataframe = new_dataframe.dropna(subset = [experimental_field, 'Predicted'])
new_dataframe.columns = [experimental_field, 'Predicted', 'WTSecondaryStructure', 'Opacity'] # rename the s.s. column
new_dataframe.to_csv(csv_filename, sep = ',', header = True)
plot_scale = '''
plot_scale <- scale_color_manual(
name="Secondary structure",
values = c( "None" = '#777777', "H" = 'magenta', "S" = 'orange', "O" = '%(cornflower_blue)s'),
labels = c( "None" = "N/A", "H" = "Helix", "S" = "Sheet", "O" = "Other"))''' % plot_colors
return self.scatterplot_color_by_series(xseries = experimental_field, colorseries = "WTSecondaryStructure", title = title, plot_scale = plot_scale, point_opacity = 0.6, analysis_set = analysis_set, verbose = verbose)
def scatterplot_scop_class(self, title, csv_filename, analysis_set = '', verbose = True):
'''Scatterplot by SCOPe class.'''
# Create CSV input
new_dataframe = self.dataframe.copy()
experimental_field = BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set)
new_dataframe = new_dataframe[[experimental_field, 'Predicted', 'WildTypeSCOPClass']]
new_dataframe['Opacity'] = 0.4
new_dataframe = new_dataframe.dropna(subset = [experimental_field, 'Predicted'])
new_dataframe.to_csv(csv_filename, sep = ',', header = True)
return self.scatterplot_color_by_series(xseries = experimental_field, colorseries = "WildTypeSCOPClass", title = title, point_opacity = 0.6, analysis_set = analysis_set, verbose = verbose)
def scatterplot_scop_fold(self, title, csv_filename, analysis_set = '', verbose = True):
'''Scatterplot by SCOPe fold.'''
# Create CSV input
new_dataframe = self.dataframe.copy()
experimental_field = BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set)
new_dataframe = new_dataframe[[experimental_field, 'Predicted', 'WildTypeSCOPFold']]
new_dataframe['Opacity'] = 0.4
new_dataframe = new_dataframe.dropna(subset = [experimental_field, 'Predicted'])
new_dataframe.to_csv(csv_filename, sep = ',', header = True)
return self.scatterplot_color_by_series(xseries = experimental_field, colorseries = "WildTypeSCOPFold", title = title, point_opacity = 0.6, analysis_set = analysis_set)
def scatterplot_scop_classification(self, title, csv_filename, analysis_set = ''):
'''Scatterplot by SCOPe classification.'''
# Create CSV input
new_dataframe = self.dataframe.copy()
experimental_field = BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set)
new_dataframe = new_dataframe[[experimental_field, 'Predicted', 'WildTypeSCOPClassification']]
new_dataframe['Opacity'] = 0.4
new_dataframe = new_dataframe.dropna(subset = [experimental_field, 'Predicted'])
new_dataframe.to_csv(csv_filename, sep = ',', header = True)
return self.scatterplot_color_by_series(xseries = experimental_field, colorseries = "WildTypeSCOPClassification", title = title, point_opacity = 0.6, analysis_set = analysis_set, verbose = verbose)
def scatterplot_wildtype_aa(self, title, csv_filename, analysis_set = '', verbose = True):
'''Scatterplot by wildtype residue.'''
# Create CSV input
new_dataframe = self.dataframe.copy()
experimental_field = BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set)
new_dataframe = new_dataframe[[experimental_field, 'Predicted', 'WildTypeAA']]
new_dataframe['Opacity'] = 0.4
new_dataframe = new_dataframe.dropna(subset = [experimental_field, 'Predicted'])
new_dataframe.to_csv(csv_filename, sep = ',', header = True)
plot_scale = '''
plot_scale <- scale_color_manual(
name="Residue",
values = c( "None" = '#808080', "A" = '#FF0000', "C" = '#BFBF00', "D" = '#008000', "E" = "#80FFFF", "F" = "#8080FF", "G" = "#BF40BF", "H" = "#A0A424", "I" = "#411BEA", "K" = "#1EAC41", "L" = "#F0C80E", "M" = "#B430E5", "N" = "#ED7651", "P" = "#19CB97", "Q" = "#362698", "R" = "#7E7EB8", "S" = "#603000", "T" = "#A71818", "V" = "#DF8020", "W" = "#E75858", "Y" = "#082008"),
labels = c( "None" = "N/A", "A" = "A", "C" = "C", "D" = "D", "E" = "E", "F" = "F", "G" = "G", "H" = "H", "I" = "I", "K" = "K", "L" = "L", "M" = "M", "N" = "N", "P" = "P", "Q" = "Q", "R" = "R", "S" = "S", "T" = "T", "V" = "V", "W" = "W", "Y" = "Y"))
''' % plot_colors
return self.scatterplot_color_by_series(xseries = experimental_field, colorseries = "WildTypeAA", title = title, plot_scale = plot_scale, point_opacity = 0.6, analysis_set = analysis_set, verbose = verbose)
def scatterplot_mutant_aa(self, title, csv_filename, analysis_set = '', verbose = True):
'''Scatterplot by mutant residue.'''
# Create CSV input
new_dataframe = self.dataframe.copy()
experimental_field = BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set)
new_dataframe = new_dataframe[[experimental_field, 'Predicted', 'MutantAA']]
new_dataframe['Opacity'] = 0.4
new_dataframe = new_dataframe.dropna(subset = [experimental_field, 'Predicted'])
new_dataframe.to_csv(csv_filename, sep = ',', header = True)
plot_scale = '''
plot_scale <- scale_color_manual(
name="Residue",
values = c( "None" = '#808080', "A" = '#FF0000', "C" = '#BFBF00', "D" = '#008000', "E" = "#80FFFF", "F" = "#8080FF", "G" = "#BF40BF", "H" = "#A0A424", "I" = "#411BEA", "K" = "#1EAC41", "L" = "#F0C80E", "M" = "#B430E5", "N" = "#ED7651", "P" = "#19CB97", "Q" = "#362698", "R" = "#7E7EB8", "S" = "#603000", "T" = "#A71818", "V" = "#DF8020", "W" = "#E75858", "Y" = "#082008"),
labels = c( "None" = "N/A", "A" = "A", "C" = "C", "D" = "D", "E" = "E", "F" = "F", "G" = "G", "H" = "H", "I" = "I", "K" = "K", "L" = "L", "M" = "M", "N" = "N", "P" = "P", "Q" = "Q", "R" = "R", "S" = "S", "T" = "T", "V" = "V", "W" = "W", "Y" = "Y"))
''' % plot_colors
return self.scatterplot_color_by_series(xseries = experimental_field, colorseries = "MutantAA", title = title, plot_scale = plot_scale, point_opacity = 0.6, analysis_set = analysis_set, verbose = verbose)
def scatterplot_GP(self, title, csv_filename, analysis_set = '', verbose = True):
# Create CSV input
new_dataframe = self.dataframe.copy()
experimental_field = BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set)
new_dataframe = new_dataframe[[experimental_field, 'Predicted', 'HasGPMutation']]
new_dataframe['GP'] = numpy.where(new_dataframe['HasGPMutation'] == 1, 'GP', 'Other')
new_dataframe['Opacity'] = numpy.where(new_dataframe['HasGPMutation'] == 1, 0.9, 0.5)
new_dataframe = new_dataframe[[experimental_field, 'Predicted', 'GP', 'Opacity']]
new_dataframe = new_dataframe.dropna(subset = [experimental_field, 'Predicted'])
new_dataframe.to_csv(csv_filename, sep = ',', header = True)
plot_scale = '''plot_scale <- scale_color_manual(
name="Glycine/Proline",
values = c( "None" = '#777777', "GP" = '%(neon_green)s', "Other" = '#440077'),
labels = c( "None" = "N/A", "GP" = "GP", "Other" = "Other"))''' % plot_colors
return self.scatterplot_color_by_series(xseries = experimental_field, colorseries = "GP", title = title, plot_scale = plot_scale, point_opacity = 0.75, analysis_set = analysis_set, verbose = verbose)
def scatterplot_pdb_res_binned(self, title, csv_filename, analysis_set = '', verbose = True):
'''Scatterplot by binned PDB resolution.'''
# Create CSV input
new_dataframe = self.dataframe.copy()
experimental_field = BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set)
new_dataframe = new_dataframe[[experimental_field, 'Predicted', 'PDBResolutionBin']]
new_dataframe['Opacity'] = 0.4
new_dataframe = new_dataframe.dropna(subset = [experimental_field, 'Predicted'])
new_dataframe.to_csv(csv_filename, sep = ',', header = True)
plot_scale = '''
plot_scale <- scale_color_manual(
name = "Resolution",
values = c( "N/A" = '#777777', "<1.5" = '#0052aE', "1.5-2.0" = '#554C54', '2.0-2.5' = "#FFA17F", '>=2.5' = "#ce4200")
)''' % plot_colors
return self.scatterplot_color_by_series(xseries = experimental_field, colorseries = "PDBResolutionBin", title = title, plot_scale = plot_scale, point_opacity = 0.75, analysis_set = analysis_set, verbose = verbose)
def scatterplot_chain_length(self, title, csv_filename, analysis_set = '', verbose = True):
'''Scatterplot by chain length.'''
# Create CSV input
new_dataframe = self.dataframe.copy()
experimental_field = BenchmarkRun.get_analysis_set_fieldname('Experimental', analysis_set)
new_dataframe = new_dataframe[[experimental_field, 'Predicted', 'NumberOfResidues']]
new_dataframe['Opacity'] = 0.4
new_dataframe.columns = [experimental_field, 'Predicted', 'Residues', 'Opacity'] # rename the monomer length column
new_dataframe = new_dataframe.dropna(subset = [experimental_field, 'Predicted'])
new_dataframe.to_csv(csv_filename, sep = ',', header = True)
plot_scale = '''
plot_scale <- scale_color_manual(
name = "Resolution",
values = c( "N/A" = '#777777', "<1.5" = '#0052aE', "1.5-2.0" = '#554C54', '2.0-2.5' = "#FFA17F", '>=2.5' = "#ce4200")
)''' % plot_colors
extra_commands ='\n scale_colour_gradient(low="yellow", high="#880000") +'
return self.scatterplot_color_by_series(xseries = experimental_field, colorseries = "Residues", title = title, plot_scale = '', point_opacity = 0.75, extra_commands = extra_commands, analysis_set = analysis_set, verbose = verbose)
|
class BenchmarkRun(ReportingObject):
'''A object to contain benchmark run data which can be used to analyze that run or else to cross-analyze the run with another run.'''
def __init__(self, benchmark_run_name, dataset_cases, analysis_data, contains_experimental_data = True, benchmark_run_directory = None, use_single_reported_value = False,
ddg_analysis_type = None,
calculate_scalar_adjustments = True,
description = None, dataset_description = None, credit = None, generate_plots = True, report_analysis = True, include_derived_mutations = False, recreate_graphs = False, silent = False, burial_cutoff = 0.25,
additional_join_parameters = {},
stability_classication_x_cutoff = 1.0, stability_classication_y_cutoff = 1.0, use_existing_benchmark_data = False, store_data_on_disk = True, misc_dataframe_attributes = {},
terminal_width = 200, restrict_to = set(), remove_cases = set()):
pass
def add_stored_metric_to_df(self, case_description, case_length, case_stats):
pass
def filter_data(self):
'''A very rough filtering step to remove certain data.
todo: It is probably best to do this do the actual dataframe rather than at this point.
todo: We currently only handle one filtering criterium.
'''
pass
def __repr__(self):
'''Simple printer - we print the dataframe.'''
pass
@staticmethod
def get_analysis_set_fieldname(prefix, analysis_set):
pass
@staticmethod
def get_amino_acid_details():
pass
def report(self, str, fn = None):
pass
def create_analysis_directory(self, analysis_directory = None):
pass
def create_subplot_directory(self, analysis_directory = None):
pass
def read_dataframe_from_content(self, hdfstore_blob):
pass
def read_dataframe_from_content(self, hdfstore_blob):
pass
def set_dataframe(self, dataframe, verbose = True):
pass
def write_dataframe(self, analysis_pandas_input_filepath):
pass
def create_dataframe(self, pdb_data = {}, verbose = True):
'''This function creates a dataframe (a matrix with one row per dataset record and one column for fields of interest)
from the benchmark run and the dataset data.
For rows with multiple mutations, there may be multiple values for some fields e.g. wildtype residue exposure.
We take the approach of marking these records as None (to be read as: N/A).
Another approach is to take averages of continuous and binary values.
This function also determines scalar_adjustments used to scale the predictions to try to improve the fraction
correct score and the MAE.
'''
pass
def write_dataframe_to_csv(self, output_path):
pass
def reset_csv_headers(self):
pass
def is_this_record_a_derived_mutation(self, record):
'''Different callers to this class store this information differently so we make it class-dependent and subclass.'''
pass
def get_record_mutations(self, record):
'''Different callers should use the same name here but they currently do not.'''
pass
def get_experimental_ddg_values(self, record, dataframe_record):
pass
def compute_stability_classification(self, predicted_data, record, dataframe_record):
'''Calculate the stability classification for this case.'''
pass
def compute_absolute_error(self, predicted_data, record, dataframe_record):
'''Calculate the absolute error for this case.'''
pass
def get_record_pdb_file_id(self, record):
pass
def count_residues(self, record, pdb_record):
'''Count the number of residues in the chains for the case.'''
pass
def get_dataframe_row(self, dataset_cases, predicted_data, pdb_data, record_id, additional_prediction_data_columns):
'''Create a dataframe row for a prediction.'''
pass
def analyze_all(self, analysis_directory = None):
'''This function runs the analysis and creates the plots and summary file.'''
pass
def analyze_all(self, analysis_directory = None):
'''This function runs the analysis and creates the plots and summary file.'''
pass
def full_analysis(self, analysis_set, output_directory, verbose = True, compile_pdf = True, quick_plots = False):
'''Combines calculate_metrics, write_dataframe_to_csv, and plot'''
pass
def get_definitive_name(self, unique_ajps, join_character = '-', prepend_label = True):
'''
Generates a definitive name for this benchmark run object, based on
unique additional join parameters (as passed)
'''
pass
@staticmethod
def get_unique_ajps( benchmark_runs ):
'''
Determines which join parameters are unique
'''
pass
@staticmethod
def get_common_subset(
benchmark_runs,
verbose = False,
):
pass
@staticmethod
def analyze_multiple(
benchmark_runs,
analysis_sets = [],
# Singleton arguments
analysis_directory = None,
remove_existing_analysis_directory = True,
quick_plots = False,
use_multiprocessing = True,
verbose = True,
compile_pdf = True,
limit_to_complete_presence = True,
all_by_all_comparisons = False,
):
'''This function runs the analysis for multiple input settings'''
pass
def save_latex_report(t):
pass
def save_latex_report(t):
pass
def compare(self, other, analysis_set, output_directory, unique_ajps, verbose = True, compile_pdf = True):
'''
Generate comparison latex report in specified output directory
Returns LatexReport object
'''
pass
@staticmethod
def make_case_description_tables(stats_df, sort_by = "Pearson's R"):
pass
@staticmethod
def make_specific_case_table(stats_df, case, sort_by = "Pearson's R"):
pass
@staticmethod
def get_stats_comparison_dataframe(benchmark_runs, unique_ajps, output_csv = None):
pass
def get_pred_minus_exp_dataframe(self, analysis_set, unique_ajps = None):
pass
def add_identifying_columns_to_df(self, df, unique_ajps, reset_index = False):
pass
def calculate_metrics(self, analysis_set = '', analysis_directory = None, drop_missing = True, case_n_cutoff = 5, verbose = True):
'''Calculates the main metrics for the benchmark run and writes them to file and LaTeX object.'''
pass
def plot(self, analysis_set = '', analysis_directory = None, matplotlib_plots = True, verbose = True, compile_pdf = True, quick_plots = False):
pass
def determine_optimum_fraction_correct_cutoffs(self, analysis_set, dataframe, stability_classication_x_cutoff):
'''Determines the value of stability_classication_y_cutoff which approximately maximizes the fraction correct
measurement w.r.t. a fixed stability_classication_x_cutoff. This function uses discrete sampling and so it
may miss the actual maximum. We use two rounds of sampling: i) a coarse-grained sampling (0.1 energy unit
intervals); and ii) finer sampling (0.01 unit intervals).
In both rounds, we choose the one corresponding to a lower value for the cutoff in cases of multiple maxima.'''
pass
def plot_optimum_prediction_fraction_correct_cutoffs(self, analysis_set, analysis_file_prefix, stability_classication_x_cutoff, verbose = True):
pass
def plot_optimum_prediction_fraction_correct_cutoffs_over_range(self, analysis_set, min_stability_classication_x_cutoff, max_stability_classication_x_cutoff, suppress_plot = False, analysis_file_prefix = None, verbose = True):
'''Plots the optimum cutoff for the predictions to maximize the fraction correct metric over a range of experimental cutoffs.
Returns the average scalar corresponding to the best value of fraction correct over a range of cutoff values for the experimental cutoffs.'''
pass
def _get_dataframe_columns(self, column_names):
pass
def plot_derivative_error_barchart(self, analysis_file_prefix, verbose = True):
pass
def plot_absolute_error_histogram(self, output_filename_prefix, data_series, analysis_set = '', verbose = True):
pass
def scatterplot_generic(self, title, plotfn, plot_filename, analysis_set = '', verbose = True):
pass
def scatterplot_color_by_series(self, colorseries, xseries = "Experimental", yseries = "Predicted", title = '', plot_scale = '', point_opacity = 0.4, extra_commands = '', analysis_set = '', verbose = True):
pass
def scatterplot_charges(self, title, csv_filename, analysis_set = ''):
'''Scatterplot by residue charge.'''
pass
def scatterplot_exposure(self, title, csv_filename, analysis_set = ''):
'''Scatterplot by exposure class.'''
pass
def scatterplot_volume(self, title, csv_filename, analysis_set = '', verbose = True):
'''Scatterplot by change in volume upon mutation.'''
pass
def scatterplot_ss(self, title, csv_filename, analysis_set = '', verbose = True):
'''Scatterplot by secondary structure.'''
pass
def scatterplot_scop_class(self, title, csv_filename, analysis_set = '', verbose = True):
'''Scatterplot by SCOPe class.'''
pass
def scatterplot_scop_fold(self, title, csv_filename, analysis_set = '', verbose = True):
'''Scatterplot by SCOPe fold.'''
pass
def scatterplot_scop_classification(self, title, csv_filename, analysis_set = ''):
'''Scatterplot by SCOPe classification.'''
pass
def scatterplot_wildtype_aa(self, title, csv_filename, analysis_set = '', verbose = True):
'''Scatterplot by wildtype residue.'''
pass
def scatterplot_mutant_aa(self, title, csv_filename, analysis_set = '', verbose = True):
'''Scatterplot by mutant residue.'''
pass
def scatterplot_GP(self, title, csv_filename, analysis_set = '', verbose = True):
pass
def scatterplot_pdb_res_binned(self, title, csv_filename, analysis_set = '', verbose = True):
'''Scatterplot by binned PDB resolution.'''
pass
def scatterplot_chain_length(self, title, csv_filename, analysis_set = '', verbose = True):
'''Scatterplot by chain length.'''
pass
| 70 | 31 | 33 | 4 | 27 | 4 | 5 | 0.14 | 1 | 22 | 8 | 1 | 51 | 39 | 59 | 62 | 2,169 | 324 | 1,636 | 427 | 1,545 | 237 | 1,205 | 393 | 1,142 | 37 | 2 | 6 | 317 |
143,707 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/benchmarking/analysis/ddg_binding_affinity_analysis.py
|
klab.benchmarking.analysis.ddg_binding_affinity_analysis.DBBenchmarkRun
|
class DBBenchmarkRun(GenericDBBenchmarkRun):
def get_dataframe_row(self, dataset_cases, predicted_data, pdb_data, record_id, additional_prediction_data_columns):
'''Create a dataframe row for a prediction.'''
record = dataset_cases[record_id]
for m in record['PDBMutations']:
assert('DSSPSimpleSSType' not in m)
m['DSSPSimpleSSType'] = dssp_elision.get(m['ComplexDSSP']) or dssp_elision.get(m['MonomericDSSP'])
m['DSSPType'] = m.get('ComplexDSSP') or m.get('MonomericDSSP')
m['DSSPExposure'] = m.get('ComplexExposure') or m.get('MonomericExposure')
dataframe_record = super(DBBenchmarkRun, self).get_dataframe_row(dataset_cases, predicted_data, pdb_data, record_id, additional_prediction_data_columns)
# add columns
return dataframe_record
|
class DBBenchmarkRun(GenericDBBenchmarkRun):
def get_dataframe_row(self, dataset_cases, predicted_data, pdb_data, record_id, additional_prediction_data_columns):
'''Create a dataframe row for a prediction.'''
pass
| 2 | 1 | 12 | 1 | 9 | 2 | 2 | 0.2 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 73 | 14 | 2 | 10 | 5 | 8 | 2 | 10 | 5 | 8 | 2 | 4 | 1 | 2 |
143,708 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/view_commit_log.py
|
klab.view_commit_log.Reporter
|
class Reporter:
def __init__(self,task,report_interval=1):
self.report_interval=report_interval # (seconds)
self.start=time.time()
self.lastreport=self.start
self.task=task
print('Starting '+task)
def report(self,n):
t=time.time()
if self.lastreport<(t-self.report_interval):
self.lastreport=t
sys.stdout.write(" Processed: "+str(n)+" \r" )
sys.stdout.flush()
def done(self):
print('Done %s, took %.3f seconds\n' % (self.task,time.time()-self.start))
|
class Reporter:
def __init__(self,task,report_interval=1):
pass
def report(self,n):
pass
def done(self):
pass
| 4 | 0 | 5 | 0 | 5 | 0 | 1 | 0.07 | 0 | 1 | 0 | 0 | 3 | 4 | 3 | 3 | 15 | 0 | 15 | 9 | 11 | 1 | 15 | 9 | 11 | 2 | 0 | 1 | 4 |
143,709 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/unmerged/rpache/PDB_files.py
|
klab.unmerged.rpache.PDB_files.PDB
|
class PDB:
def __init__(self,id='',filename='',title='',resolution=None,models=[]):
self.id=id
self.filename=filename
self.title=title
self.resolution=resolution
self.models=models
def out(self):
print(self.id)
for model in self.models:
model.out()
def write(self):
outstring=''
if len(self.models)>1:
for model in self.models:
outstring+=model.write()
else:
outstring+=self.models[0].write_plain()
#-
outstring+='END\n'
return outstring
|
class PDB:
def __init__(self,id='',filename='',title='',resolution=None,models=[]):
pass
def out(self):
pass
def write(self):
pass
| 4 | 0 | 7 | 0 | 6 | 0 | 2 | 0.05 | 0 | 0 | 0 | 0 | 3 | 5 | 3 | 3 | 23 | 2 | 20 | 12 | 16 | 1 | 19 | 12 | 15 | 3 | 0 | 2 | 6 |
143,710 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/biblio/doi.py
|
klab.biblio.doi.DOI
|
class DOI(PublicationInterface):
''' A class to retrieve information about a journal publication (only journals at present until I get more test cases) from a DOI string.
Instantiate an object with a DOI string e.g. "a = DOI('10.1371/journal.pone.0063906')". The information is stored in
the issue and article members of the object in a hierarchical format similar to what is returned by the CrossRef website e.g.
issue:
issue_date:
online: 2013-05-22 00:00:00
issn: 1932-6203
volume: 8
full_title: PLoS ONE
abbrev_title: PLoS ONE
issue: 5
article:
authors:
{'surname': u'Lyskov', 'given_name': u'Sergey'}
{'surname': u'Chou', 'given_name': u'Fang-Chieh'}
{'surname': u'Conch\xfair', 'given_name': u'Shane \xd3.'}
{'surname': u'Der', 'given_name': u'Bryan S.'}
{'surname': u'Drew', 'given_name': u'Kevin'}
{'surname': u'Kuroda', 'given_name': u'Daisuke'}
{'surname': u'Xu', 'given_name': u'Jianqing'}
{'surname': u'Weitzner', 'given_name': u'Brian D.'}
{'surname': u'Renfrew', 'given_name': u'P. Douglas'}
{'surname': u'Sripakdeevong', 'given_name': u'Parin'}
{'surname': u'Borgo', 'given_name': u'Benjamin'}
{'surname': u'Havranek', 'given_name': u'James J.'}
{'surname': u'Kuhlman', 'given_name': u'Brian'}
{'surname': u'Kortemme', 'given_name': u'Tanja'}
{'surname': u'Bonneau', 'given_name': u'Richard'}
{'surname': u'Gray', 'given_name': u'Jeffrey J.'}
{'surname': u'Das', 'given_name': u'Rhiju'}
issue_date:
online: 2013-05-22 00:00:00
title: Serverification of Molecular Modeling Applications: The Rosetta Online Server That Includes Everyone (ROSIE)
'''
record_types = {
'journal' : 'Journal',
'book' : 'Book',
'conference' : 'Conference',
'dissertation' : 'Dissertation',
'report-paper' : 'Report',
'standard' : 'Standard',
'database' : 'Database',
#'sa_component', ?
}
def __init__(self, doi):
# Allow for 'doi:10.1038/nature12443' or '10.1038/nature12443'
doi = doi.strip()
if doi.lower().startswith('doi:'):
doi = doi[4:].strip()
self.issue = {}
self.article = {}
self.published_dates = []
self.doi = doi
self.data = self.get_info()
self.parse()
# Record retrieval
def get_info(self):
'Retrieve the data from CrossRef.'
escaped_doi = urllib.parse.quote(self.doi, '')
html = get_resource("www.crossref.org", '/guestquery?queryType=doi&restype=unixref&doi=%s&doi_search=Search' % escaped_doi)
xml_matches = []
for m in re.finditer('(<doi_records>.*?</doi_records>)', html, re.DOTALL):
xml_matches.append(m.group(0))
if len(xml_matches) == 0:
raise DOIRetrievalException('No matches found for the DOI "%s".' % self.doi)
elif len(xml_matches) == 1:
return xml_matches[0]
else:
raise DOIRetrievalException('Multiple (%d) matches found for the DOI "%s".' % (len(xml_matches), self.doi))
# Helper functions
def extract_node_data(self, tag, fieldnames):
#print(colortext.make(str(fieldnames), 'cyan'))
if tag and len(tag) == 1:
return self.extract_node_data_2(tag[0], fieldnames)
def extract_node_data_2(self, tag, fieldnames):
d = {}
for f in fieldnames:
t = tag.getElementsByTagName(f)
if len(t) > 0:
d[f] = t[0].childNodes[0].nodeValue
return d
# Main parsing function
def parse(self):
data = self.data
try:
self._dom = parseString(data)
except Exception as e:
raise DOIRetrievalException("An error occurred while parsing the XML for the DOI record.\n%s" % str(e))
try:
main_tag = self._dom.getElementsByTagName("doi_records")
assert(len(main_tag) == 1)
record_tag = main_tag[0].getElementsByTagName("doi_record")
assert(len(record_tag) == 1)
crossref_tag = record_tag[0].getElementsByTagName("crossref")
assert(len(crossref_tag) == 1)
except Exception as e:
raise DOIRetrievalException("The XML format does not fit the expected format.\n%s" % str(e))
crossref_tag = crossref_tag[0]
child_nodes = []
for c in range(len(crossref_tag.childNodes)):
if crossref_tag.childNodes[c].toxml().strip():
child_nodes.append(crossref_tag.childNodes[c].nodeName)
if len(child_nodes) == 0:
raise RecordTypeParsingNotImplementedException
if len(child_nodes) > 1:
raise RecordTypeParsingNotImplementedException
else:
tag_type = child_nodes[0]
self.record_type = tag_type
if tag_type == 'journal':
journal_tag = crossref_tag.getElementsByTagName("journal")
if len(journal_tag) == 1:
return self.parse_journal_data_xml(journal_tag[0])
elif tag_type in ['book', 'conference', 'sa_component', 'dissertation', 'report-paper', 'standard', 'database']:
print((self.data))
raise RecordTypeParsingNotImplementedException
elif tag_type == 'error':
error_tag = crossref_tag.getElementsByTagName("error")
if len(error_tag) == 1:
error_msg = None
try: error_msg = error_tag[0].childNodes[0].nodeValue
except: pass
if error_msg:
raise CrossRefException
else:
raise CrossRefException
else:
raise CrossRefException
else:
raise UnexpectedRecordTypeException
# Parsing functions for specific record types
# todo: Only the journal type is added now. When I add more record types, figure out the overlap with other types
# (using the XSD - http://doi.crossref.org/schemas/crossref4.3.0.xsd) and separate out the blocks below into common
# functions e.g. author name parsing from the contributors tag.
#
# contributor tags are common to the journal, conference, book, series, report-paper, standard, and database types.
# person_name is used by the contributor type and used in dissertation (rather than an enclosing contributor tag).
#
# It makes sense to separate these out into a contributor tag parsing function and an person_name parsing function.
#
# Look here also: http://doi.crossref.org/schemas/common4.3.0.xsd as that spec defines the subtypes like contributors.
def parse_journal_data_xml(self, journal_tag):
self.issue['meta_data'] = self.extract_node_data(journal_tag.getElementsByTagName("journal_metadata"), ['full_title', 'abbrev_title', 'issn'])
journal_issue_tag = journal_tag.getElementsByTagName("journal_issue")
assert(len(journal_issue_tag) <= 1)
if len(journal_issue_tag) == 1:
journal_issue_tag = journal_issue_tag[0]
publication_dates = journal_issue_tag.getElementsByTagName("publication_date")
for publication_date in publication_dates:
media_type = publication_date.getAttribute('media_type').strip() or 'unknown_media'
if media_type:
self.issue['__issue_date'] = self.issue.get('__issue_date', {})
self.issue['__issue_date'][media_type] = self.extract_node_data_2(publication_date, ['year', 'month', 'day'])
volume_tag = journal_issue_tag.getElementsByTagName("journal_volume")
if len(volume_tag) == 1:
self.issue['volume'] = int(volume_tag[0].getElementsByTagName("volume")[0].childNodes[0].nodeValue) # this type-cast may be too strong e.g. for electronic editions
issue_tag = journal_issue_tag.getElementsByTagName("issue")
if len(issue_tag) == 1:
self.issue['issue'] = issue_tag[0].childNodes[0].nodeValue # not necessarily an int e.g. pmid:23193287 / doi:10.1093/nar/gks1195
# Parse Journal Article information
article_tag = journal_tag.getElementsByTagName("journal_article")
if len(article_tag) == 1:
article_tag = article_tag[0]
# Titles
# A hack to deal with titles with embedded HTML
tag = article_tag.getElementsByTagName("titles")
if tag and len(tag) == 1:
inner_tag = tag[0].getElementsByTagName('title')
if inner_tag and len(inner_tag) == 1:
inner_tag_xml = inner_tag[0].toxml()
article_title = ' '.join(inner_tag_xml.replace('<title>', '').replace('</title>', '').split())
idx = article_title.find('<![CDATA[')
if idx != -1:
right_idx = article_title[idx+9:].find(']]>') + 9
if right_idx != -1:
article_title = ('%s %s' % (article_title[idx+9:right_idx], article_title[right_idx+3:])).strip()
self.article['title'] = article_title
inner_tag = tag[0].getElementsByTagName('subtitle')
if inner_tag and len(inner_tag) == 1:
inner_tag_xml = inner_tag[0].toxml()
article_subtitle = ' '.join(inner_tag_xml.replace('<subtitle>', '').replace('</subtitle>', '').split())
idx = article_subtitle.find('<![CDATA[')
if idx != -1:
right_idx = article_subtitle[idx+9:].find(']]>') + 9
if right_idx != -1:
article_subtitle = ('%s %s' % (article_subtitle[idx+9:right_idx], article_subtitle[right_idx+3:])).strip()
self.article['subtitle'] = article_subtitle
#title_data = self.extract_node_data(article_tag.getElementsByTagName("titles"), ['title', 'subtitle'])
#if title_data.get('title'):
# self.article['title'] = title_data['title']
# self.article['subtitle'] = title_data['subtitle']
publication_dates = article_tag.getElementsByTagName("publication_date") or []
for publication_date in publication_dates:
media_type = publication_date.getAttribute('media_type').strip() or 'unknown_media'
if media_type:
self.article['__issue_date'] = self.article.get('__issue_date', {})
self.article['__issue_date'][media_type] = self.extract_node_data_2(publication_date, ['year', 'month', 'day'])
self.article['authors'] = []
if article_tag.getElementsByTagName("contributors"):
for contributor in article_tag.getElementsByTagName("contributors")[0].getElementsByTagName("person_name"):
if contributor.getAttribute('contributor_role') == "author":
fields = self.extract_node_data_2(contributor, ['given_name', 'surname'])
# A hack to fix bad records e.g. 10.1016/j.neuro.2006.03.023 where the authors' names are all in uppercase.
# Note that in this case, it does not fix the missing apostrophe in "O'Gara" or the missing hyphen/capitalization in "Leigh-Logan".
for k, v in fields.items():
if v.isupper():
fields[k] = v.title()
self.article['authors'].append(fields)
if not self.article['authors']:
raise NoAuthorsFoundException('Could not find any authors in the CrossRef record.')
article_pages = self.extract_node_data(article_tag.getElementsByTagName("pages"), ['first_page', 'last_page']) or {}
for k, v in article_pages.items():
self.article[k] = v
# Convert dates
for media_type, date_fields in self.issue.get('__issue_date', {}).items():
for t_, v_ in date_fields.items():
date_fields[t_] = int(v_)
self.issue['issue_date'] = self.issue.get('issue_date', {})
if date_fields.get('year') and date_fields.get('month') and date_fields.get('day'):
dt = datetime.date(date_fields['year'], date_fields['month'], date_fields['day'])
self.issue['issue_date'][media_type] = dt
self.published_dates.append(dt)
for media_type, date_fields in self.article.get('__issue_date', {}).items():
for t_, v_ in date_fields.items():
date_fields[t_] = int(v_)
self.article['issue_date'] = self.article.get('issue_date', {})
if date_fields.get('year') and date_fields.get('month') and date_fields.get('day'):
dt = datetime.date(date_fields['year'], date_fields['month'], date_fields['day'])
self.article['issue_date'][media_type] = dt
self.published_dates.append(dt)
# Move the issue meta_data to the top issue level
for k, v in (self.issue['meta_data'] or {}).items():
assert(k not in self.issue)
self.issue[k] = v
del self.issue['meta_data']
# String printing / data retrieval functions
def get_pubmed_id(self):
return None
def get_url(self):
return 'http://dx.doi.org/%s' % self.doi
def convert_to_ris(self):
d = self.to_dict()
RIS = []
doi_to_ris_record_type_mapping = {
'Journal' : 'JOUR'
}
if doi_to_ris_record_type_mapping.get(d['RecordType']):
RIS.append('TY - %s' % doi_to_ris_record_type_mapping[d['RecordType']])
else:
raise Exception("The logic needed to parse records of type '%s' has not been implemented yet." % d['RecordType'])
if d.get('Title'):
RIS.append('T1 - %(Title)s' % d)
for author in d['authors']:
if author['MiddleNames']:
first_names = ' '.join([author.get('FirstName')] + author['MiddleNames'].split())
else:
first_names = author.get('FirstName')
if author['Surname']:
RIS.append('A1 - %s, %s' % (author['Surname'], first_names))
else:
RIS.append('A1 - %s' % first_names)
if d.get('PublicationName'):
RIS.append('JF - %(PublicationName)s' % d)
if publication_abbreviations.get(d['PublicationName']):
RIS.append('JA - %s' % publication_abbreviations[d['PublicationName']])
if d.get('Volume'):
RIS.append('VL - %(Volume)s' % d)
if d.get('Issue'):
RIS.append('IS - %(Issue)s' % d)
if d.get('StartPage'):
RIS.append('SP - %(StartPage)s' % d)
if d.get('EndPage'):
RIS.append('EP - %(EndPage)s' % d)
if d.get('DOI'):
RIS.append('M3 - %(DOI)s' % d)
RIS.append('UR - http://dx.doi.org/%(DOI)s' % d)
if d.get('PublicationDate'):
RIS.append('Y1 - %(PublicationDate)s' % d)
elif d.get('PublicationYear'):
RIS.append('Y1 - %(PublicationYear)s' % d)
RIS.append('ER - ')
return '\n'.join(RIS)
def get_earliest_date(self):
'''Returns the earliest date as a string.'''
if self.published_dates:
return str(sorted(self.published_dates)[0]).replace('-', '/')
return None
def get_year(self):
article_date = self.issue.get('__issue_date') or self.article.get('__issue_date')
if article_date:
for media_type, fields in article_date.items():
if fields.get('year'):
# break on the first found year
return str(fields.get('year'))
return None
def to_dict(self):
'''A representation of that publication data that matches the schema we use in our databases.'''
if not self.record_type == 'journal':
# todo: it may be worthwhile creating subclasses for each entry type (journal, conference, etc.) with a common
# API e.g. to_json which creates output appropriately
raise Exception('This function has only been tested on journal entries at present.')
author_list = []
authors = self.article.get('authors', [])
for x in range(len(authors)):
author = authors[x]
first_name = None
middle_names = None
if author.get('given_name'):
names = author['given_name'].split()
first_name = names[0]
middle_names = (' '.join(names[1:])) or None
author_list.append(
dict(
AuthorOrder = x + 1,
FirstName = first_name,
MiddleNames = middle_names,
Surname = author.get('surname')
)
)
return dict(
Title = self.article.get('title'),
PublicationName = self.issue.get('full_title'),
Volume = self.issue.get('volume'),
Issue = self.issue.get('issue'),
StartPage = self.article.get('first_page'),
EndPage = self.article.get('last_page'),
PublicationYear = self.get_year(),
PublicationDate = self.get_earliest_date(),
RIS = None,
DOI = self.doi,
PubMedID = self.get_pubmed_id(),
URL = 'http://dx.doi.org/%s' % self.doi,
ISSN = None, # eight-digit number
authors = author_list,
#
RecordType = DOI.record_types.get(self.record_type)
)
def to_string2(self, html = False, add_url = False):
if not self.record_type == 'journal':
raise Exception('This function has only been tested on journal entries at present.')
author_str = []
for author in self.article.get('authors', []):
author_str.append(('%s %s' % (author.get('given_name'), author.get('surname'))).strip())
author_str = (', '.join(author_str))
if html and author_str:
author_str = '<span class="publication_authors">%s.</span>' % author_str
title_str = self.article.get('title', '')
if title_str:
if add_url:
title_str = '<a href="%s" target="_blank">%s</a>' % (self.get_url(), title_str)
if html and title_str:
title_str = '<span class="publication_title">%s.</span>' % title_str
issue_str = ''
if self.issue.get('full_title'):
issue_str += self.issue['full_title']
if self.issue.get('volume'):
if self.issue.get('issue'):
issue_str += ' %s(%s)' % (self.issue['volume'], self.issue['issue'])
else:
issue_str += ' %s' % self.issue['volume']
if self.article.get('first_page'):
issue_str += ':%s' % self.article['first_page']
if self.article.get('last_page'):
issue_str += '-%s' % self.article['last_page']
if html and issue_str:
issue_str = '<span class="publication_issue">%s.</span>' % issue_str
earliest_date = self.get_earliest_date()
if earliest_date:
article_date = earliest_date
else:
article_date = self.get_year()
if html and article_date:
article_date = '<span class="publication_date">%s.</span>' % article_date
s = None
if html:
s = ' '.join([c for c in [author_str, title_str, issue_str, article_date] if c])
else:
s = '. '.join([c for c in [author_str, title_str, issue_str, article_date] if c])
if s:
s = s + '.'
return s
def __repr__(self):
s = ['issue']
for k, v in self.issue.items():
if not k.startswith('__'):
if type(v) == type(self.issue):
s.append(' %s:' % k)
for k_, v_ in v.items():
s.append(' %s: %s' % (k_, str(v_)))
else:
s.append(' %s: %s' % (k, str(v)))
s.append('article')
for k, v in self.article.items():
if not k.startswith('__'):
if type(v) == type(self.issue):
s.append(' %s:' % k)
for k_, v_ in v.items():
s.append(' %s: %s' % (k_, str(v_)))
elif type(v) == type(s):
s.append(' %s:' % k)
for v_ in v:
s.append(' %s' % str(v_))
else:
s.append(' %s: %s' % (k, str(v)))
return "\n".join(s)
|
class DOI(PublicationInterface):
''' A class to retrieve information about a journal publication (only journals at present until I get more test cases) from a DOI string.
Instantiate an object with a DOI string e.g. "a = DOI('10.1371/journal.pone.0063906')". The information is stored in
the issue and article members of the object in a hierarchical format similar to what is returned by the CrossRef website e.g.
issue:
issue_date:
online: 2013-05-22 00:00:00
issn: 1932-6203
volume: 8
full_title: PLoS ONE
abbrev_title: PLoS ONE
issue: 5
article:
authors:
{'surname': u'Lyskov', 'given_name': u'Sergey'}
{'surname': u'Chou', 'given_name': u'Fang-Chieh'}
{'surname': u'Conchúir', 'given_name': u'Shane Ó.'}
{'surname': u'Der', 'given_name': u'Bryan S.'}
{'surname': u'Drew', 'given_name': u'Kevin'}
{'surname': u'Kuroda', 'given_name': u'Daisuke'}
{'surname': u'Xu', 'given_name': u'Jianqing'}
{'surname': u'Weitzner', 'given_name': u'Brian D.'}
{'surname': u'Renfrew', 'given_name': u'P. Douglas'}
{'surname': u'Sripakdeevong', 'given_name': u'Parin'}
{'surname': u'Borgo', 'given_name': u'Benjamin'}
{'surname': u'Havranek', 'given_name': u'James J.'}
{'surname': u'Kuhlman', 'given_name': u'Brian'}
{'surname': u'Kortemme', 'given_name': u'Tanja'}
{'surname': u'Bonneau', 'given_name': u'Richard'}
{'surname': u'Gray', 'given_name': u'Jeffrey J.'}
{'surname': u'Das', 'given_name': u'Rhiju'}
issue_date:
online: 2013-05-22 00:00:00
title: Serverification of Molecular Modeling Applications: The Rosetta Online Server That Includes Everyone (ROSIE)
'''
def __init__(self, doi):
pass
def get_info(self):
'''Retrieve the data from CrossRef.'''
pass
def extract_node_data(self, tag, fieldnames):
pass
def extract_node_data_2(self, tag, fieldnames):
pass
def parse(self):
pass
def parse_journal_data_xml(self, journal_tag):
pass
def get_pubmed_id(self):
pass
def get_url(self):
pass
def convert_to_ris(self):
pass
def get_earliest_date(self):
'''Returns the earliest date as a string.'''
pass
def get_year(self):
pass
def to_dict(self):
'''A representation of that publication data that matches the schema we use in our databases.'''
pass
def to_string2(self, html = False, add_url = False):
pass
def __repr__(self):
pass
| 15 | 4 | 28 | 3 | 23 | 2 | 8 | 0.22 | 1 | 12 | 5 | 1 | 14 | 7 | 14 | 24 | 484 | 78 | 336 | 86 | 321 | 73 | 287 | 85 | 272 | 30 | 2 | 6 | 110 |
143,711 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/google/gcalendar.py
|
klab.google.gcalendar.GoogleCalendar
|
class GoogleCalendar(object):
''' A class to interact with a set of Google calendars. This is used by our local lab website and by the meetings script.
The class methods are split up following the API here:
https://developers.google.com/resources/api-libraries/documentation/calendar/v3/python/latest/ '''
@staticmethod
def from_file(oauth_json_filepath, calendar_ids):
return GoogleCalendar(read_file(oauth_json_filepath), calendar_ids)
def __init__(self, oauth_json, calendar_ids):
'''oauth_json is a JSON string which should contain login credentials for OAuth 2.0.
calendar_ids is a list of calendar aliases to connect to and should be defined in oauth_json["calendars"].
We use calendar aliases e.g. "main" or "biosensor meetings" for convenience.
'''
oc = OAuthCredentials.from_JSON(oauth_json)
configured_calendar_ids = NestedBunch.from_JSON(oauth_json).calendars
for calendar_id in calendar_ids:
assert(calendar_id in list(configured_calendar_ids.keys()))
self.calendar_ids = calendar_ids
# Request both read/write (calendar) and read-only access (calendar.readonly)
credentials = SignedJwtAssertionCredentials(oc.client_email, oc.private_key, scope=['https://www.googleapis.com/auth/calendar', 'https://www.googleapis.com/auth/calendar.readonly'])
http_auth = credentials.authorize(httplib2.Http())
# Create a service object for the Google Calendar v3 API
self.service = build('calendar', 'v3', http = http_auth)
self.timezone_string = 'America/Los_Angeles'
self.timezone = pytz.timezone(self.timezone_string)
self.configured_calendar_ids = configured_calendar_ids
# Access control lists (acl)
def get_acl_list(self, calendar_id):
return self.service.acl().list(calendarId = self.configured_calendar_ids[calendar_id]).execute() # note: not using pagination here yet
def get_calendar_users(self, calendar_id):
users = {}
acl_list = self.get_acl_list(calendar_id)
if acl_list:
for item in acl_list['items']:
nb = DeepNonStrictNestedBunch(item)
users[nb.role]= users.get(nb.role, [])
if nb.scope.type == 'user':
if nb.scope.value.find('@group.calendar.google.com') == -1 and nb.scope.value.find('@developer.gserviceaccount.com') == -1:
users[nb.role].append(nb.scope.value)
users[nb.role] = sorted(users[nb.role])
return DeepNonStrictNestedBunch(users)
# Calendar list (calendarList)
def get_calendars(self):
calendars = []
cl = self.service.calendarList().list().execute()
for c in cl.get('items', []):
nb = DeepNonStrictNestedBunch(c)
calendars.append(nb)
return calendars
def get_calendar(self, calendar_id):
return DeepNonStrictNestedBunch(self.service.calendarList().get(calendarId = self.configured_calendar_ids[calendar_id]).execute())
# Calendar and event colors (colors)
def get_colors(self):
import pprint
clrs = self.service.colors().get().execute()
pprint.pprint(clrs)
# Calendar events (events)
def get_events_within_a_given_month(self, year, month, day = 1, hour = 0, minute = 0, second = 0):
now = datetime.now(tz=self.timezone) # timezone?
start_time = datetime(year=year, month=month, day=day, hour=hour, minute=minute, second=second, tzinfo=self.timezone)
if start_time.month == 12:
end_time = datetime(year = start_time.year, month = 12, day = 31, hour=23, minute=59, second=59, tzinfo=self.timezone)
else:
end_time = datetime(year = start_time.year, month = start_time.month + 1, day = 1, hour=0, minute=0, second=0, tzinfo=self.timezone)
end_time = end_time - timedelta(seconds = 1)
start_time = start_time.isoformat()
end_time = end_time.isoformat()
return self.get_events(start_time, end_time)
def get_upcoming_events_within_the_current_month(self):
now = datetime.now(tz=self.timezone) # timezone?
return self.get_events_within_a_given_month(now.year, now.month, day = now.day, hour = now.hour, minute = now.minute, second = now.second)
def get_upcoming_event_lists_for_the_remainder_of_the_month(self, year = None, month = None):
'''Return the set of events as triple of (today's events, events for the remainder of the week, events for the remainder of the month).'''
events = []
if year == None and month == None:
now = datetime.now(tz=self.timezone) # timezone?
else:
now = datetime(year=year, month=month, day=1, hour=0, minute=0, second=0, tzinfo=self.timezone)
# Get today's events, including past events
start_time = datetime(year=now.year, month=now.month, day=now.day, hour=0, minute=0, second=0, tzinfo=self.timezone)
end_time = datetime(year = start_time.year, month = start_time.month, day = start_time.day, hour=23, minute=59, second=59, tzinfo=self.timezone)
events.append(self.get_events(start_time.isoformat(), end_time.isoformat()))
# Get this week's events
if now.weekday() < 6:
start_time = datetime(year=now.year, month=now.month, day=now.day + 1, hour=0, minute=0, second=0, tzinfo=self.timezone)
end_time = start_time + timedelta(days = 6 - now.weekday())
# We do still want to return events in the next month if they fall within this week. Otherwise
#if end_time.month != now.month:
# end_time = end_time - timedelta(days = end_time.day)
# end_time = datetime(year = end_time.year, month = end_time.month, day = end_time.day, hour=23, minute=59, second=59, tzinfo=self.timezone)
#else:
end_time = end_time + timedelta(seconds = -1)
#end_time = datetime(year = end_time.year, month = end_time.month, day = end_time.day - 1, hour=23, minute=59, second=59, tzinfo=self.timezone)
events.append(self.get_events(start_time.isoformat(), end_time.isoformat()))
else:
events.append([])
# Get this remaining events in the month
start_time = end_time + timedelta(seconds = 1)
if start_time.month == now.month:
if now.month == 12:
end_time = datetime(year = start_time.year, month = 12, day = 31, hour=23, minute=59, second=59, tzinfo=self.timezone)
else:
end_time = datetime(year = start_time.year, month = start_time.month + 1, day = 1, hour=0, minute=0, second=0, tzinfo=self.timezone)
end_time = end_time - timedelta(seconds = 1)
events.append(self.get_events(start_time.isoformat(), end_time.isoformat()))
else:
events.append([])
return events
def get_upcoming_events_within_the_current_week(self):
'''Returns the events from the calendar for the next days_to_look_ahead days.'''
now = datetime.now(tz=self.timezone) # timezone?
start_time = datetime(year=now.year, month=now.month, day=now.day, hour=now.hour, minute=now.minute, second=now.second, tzinfo=self.timezone)
end_time = start_time + timedelta(days = 6 - now.weekday())
end_time = datetime(year = end_time.year, month = end_time.month, day = end_time.day, hour=23, minute=59, second=59, tzinfo=self.timezone)
assert(end_time.weekday() == 6)
start_time = start_time.isoformat()
end_time = end_time.isoformat()
return self.get_events(start_time, end_time)
def get_upcoming_events_for_today(self):
return self.get_upcoming_events(1)
def get_upcoming_events(self, days_to_look_ahead):
'''Returns the events from the calendar for the next days_to_look_ahead days.'''
now = datetime.now(tz=self.timezone) # timezone?
start_time = datetime(year=now.year, month=now.month, day=now.day, hour=now.hour, minute=now.minute, second=now.second, tzinfo=self.timezone)
end_time = start_time + timedelta(days = days_to_look_ahead)
start_time = start_time.isoformat()
end_time = end_time.isoformat()
return self.get_events(start_time, end_time)
def get_event(self, calendar_id, event_id):
event = self.service.events().get(calendarId = self.configured_calendar_ids[calendar_id], eventId=event_id).execute()
nb = DeepNonStrictNestedBunch(event)
dt = None
if nb.start.dateTime:
dt = dateutil.parser.parse(nb.start.dateTime)
elif nb.start.date:
dt = dateutil.parser.parse(nb.start.date)
dt = datetime(year = dt.year, month = dt.month, day = dt.day, hour=0, minute=0, second=0, tzinfo=self.timezone)
if dt:
nb.datetime_o = dt
nb.calendar_id = calendar_id
return nb
def get_events(self, start_time, end_time, ignore_cancelled = True, get_recurring_events_as_instances = True, restrict_to_calendars = []):
'''A wrapper for events().list. Returns the events from the calendar within the specified times. Some of the interesting fields are:
description, end, htmlLink, location, organizer, start, summary
Note: "Cancelled instances of recurring events (but not the underlying recurring event) will still be included if showDeleted and singleEvents are both False."
'''
es = []
calendar_ids = restrict_to_calendars or self.calendar_ids
for calendar_id in calendar_ids:
now = datetime.now(tz = self.timezone)
events = []
page_token = None
while True:
events = self.service.events().list(pageToken=page_token, maxResults = 250, calendarId = self.configured_calendar_ids[calendar_id], timeMin = start_time, timeMax = end_time, showDeleted = False).execute()
for event in events['items']:
dt = None
nb = DeepNonStrictNestedBunch(event)
assert(not(nb._event))
nb._event = event # keep the original event as returned in case we want to reuse it e.g. insert it into another calendar
if (not ignore_cancelled) or (nb.status != 'cancelled'):
# Ignore cancelled events
if nb.recurrence:
if get_recurring_events_as_instances:
# Retrieve all occurrences of the recurring event within the timeframe
es += self.get_recurring_events(calendar_id, nb.id, start_time, end_time)
else:
es.append(nb)
elif nb.start.dateTime:
dt = dateutil.parser.parse(nb.start.dateTime)
elif nb.start.date:
dt = dateutil.parser.parse(nb.start.date)
dt = datetime(year = dt.year, month = dt.month, day = dt.day, hour=0, minute=0, second=0, tzinfo=self.timezone)
if dt:
nb.datetime_o = dt
nb.calendar_id = calendar_id
es.append(nb)
page_token = events.get('nextPageToken')
if not page_token:
break
es.sort(key=lambda x: x.datetime_o)
return es
def get_recurring_events(self, calendar_id, event_id, start_time, end_time, maxResults = None):
'''A wrapper for events().instances. Returns the list of recurring events for the given calendar alias within the specified timeframe.'''
es = []
page_token = None
while True:
events = self.service.events().instances(calendarId = self.configured_calendar_ids[calendar_id], eventId = event_id, pageToken=page_token, timeMin = start_time, timeMax = end_time, maxResults = maxResults, showDeleted = False).execute()
for event in events['items']:
dt = None
nb = DeepNonStrictNestedBunch(event)
assert(not(nb._event))
nb._event = event # keep the original event as returned in case we want to reuse it e.g. insert it into another calendar
if nb.start.date:
dt = dateutil.parser.parse(nb.start.date + 'T00:00:00-08:00')
elif nb.start.dateTime:
dt = dateutil.parser.parse(nb.start.dateTime)
nb.datetime_o = dt
nb.calendar_id = calendar_id
es.append(nb)
page_token = events.get('nextPageToken')
if not page_token:
break
return es
# Administration
#### Quarters and holiday creation: main calendar
def add_company_quarter(self, company_name, quarter_name, dt, calendar_id = 'notices'):
'''Adds a company_name quarter event to the calendar. dt should be a date object. Returns True if the event was added.'''
assert(calendar_id in list(self.configured_calendar_ids.keys()))
calendarId = self.configured_calendar_ids[calendar_id]
quarter_name = quarter_name.title()
quarter_numbers = {
'Spring' : 1,
'Summer' : 2,
'Fall' : 3,
'Winter' : 4
}
assert(quarter_name in list(quarter_numbers.keys()))
start_time = datetime(year=dt.year, month=dt.month, day=dt.day, hour=0, minute=0, second=0, tzinfo=self.timezone) + timedelta(days = -1)
end_time = start_time + timedelta(days = 3, seconds = -1)
summary = '%s %s Quarter begins' % (company_name, quarter_name)
# Do not add the quarter multiple times
events = self.get_events(start_time.isoformat(), end_time.isoformat(), ignore_cancelled = True)
for event in events:
if event.summary.find(summary) != -1:
return False
event_body = {
'summary' : summary,
'description' : summary,
'start' : {'date' : dt.isoformat(), 'timeZone' : self.timezone_string},
'end' : {'date' : dt.isoformat(), 'timeZone' : self.timezone_string},
'status' : 'confirmed',
'gadget' : {
'display' : 'icon',
'iconLink' : 'https://guybrush.ucsf.edu/images/Q%d_32.png' % quarter_numbers[quarter_name],
'title' : summary,
},
'extendedProperties' : {
'shared' : {
'event_type' : '%s quarter' % company_name,
'quarter_name' : quarter_name
}
}
}
colortext.warning('\n%s\n' % pprint.pformat(event_body))
created_event = self.service.events().insert(calendarId = self.configured_calendar_ids[calendar_id], body = event_body).execute()
return True
def add_holiday(self, start_dt, holiday_name, end_dt = None, calendar_id = 'notices'):
'''Adds a holiday event to the calendar. start_dt and end_dt (if supplied) should be date objects. Returns True if the event was added.'''
assert(calendar_id in list(self.configured_calendar_ids.keys()))
calendarId = self.configured_calendar_ids[calendar_id]
# Note: end_date is one day ahead e.g. for the New Years' holiday Dec 31-Jan 1st, we specify the end_date as Jan 2nd. This is what the calendar expects.
if not end_dt:
end_dt = start_dt
start_date = date(year=start_dt.year, month=start_dt.month, day=start_dt.day)#, tzinfo=self.timezone)
end_date = date(year=end_dt.year, month=end_dt.month, day=end_dt.day) + timedelta(days = 1) #, tzinfo=self.timezone)
start_time = datetime(year=start_dt.year, month=start_dt.month, day=start_dt.day, hour=0, minute=0, second=0, tzinfo=self.timezone) + timedelta(days = -1)
end_time = datetime(year=end_dt.year, month=end_dt.month, day=end_dt.day, hour=23, minute=59, second=59, tzinfo=self.timezone) + timedelta(days = 2)
# Do not add the quarter multiple times
events = self.get_events((start_time + timedelta(days = -1)).isoformat(), (end_time + timedelta(days = 1)).isoformat(), ignore_cancelled = True)
for event in events:
if event.summary.find(holiday_name) != -1:
return False
event_body = {
'summary' : holiday_name,
'description' : holiday_name,
'start' : {'date' : start_date.isoformat(), 'timeZone' : self.timezone_string},
'end' : {'date' : end_date.isoformat(), 'timeZone' : self.timezone_string},
'status' : 'confirmed',
'extendedProperties' : {
'shared' : {
'event_type' : 'Holiday'
}
}
}
if abs((end_date - start_date).days) > 7:
raise Exception('The range of dates from {0} to {1} is greater than expected. Please check to make sure that the dates are correct.'.format(start_date, end_date))
elif end_date < start_date:
raise Exception('Error: The end date {1} occurs before the start date ({0}).'.format(start_date, end_date))
created_event = self.service.events().insert(calendarId = self.configured_calendar_ids[calendar_id], body = event_body).execute()
return True
def remove_all_events(self, calendar_id):
'''Removes all events from a calendar. WARNING: Be very careful using this.'''
# todo: incomplete
now = datetime.now(tz=self.timezone) # timezone?
start_time = datetime(year=now.year - 1, month=now.month, day=now.day, hour=now.hour, minute=now.minute, second=now.second, tzinfo=self.timezone)
end_time = datetime(year=now.year + 1, month=now.month, day=now.day, hour=now.hour, minute=now.minute, second=now.second, tzinfo=self.timezone)
start_time = start_time.isoformat()
end_time = end_time.isoformat()
#events = self.service.events().list(calendarId = self.configured_calendar_ids[calendar_id], showDeleted = False).execute()
events = self.service.events().list(calendarId = self.configured_calendar_ids[calendar_id], timeMin = start_time, timeMax = end_time, showDeleted = False).execute()
print((len(events['items'])))
for event in events['items']:
dt = None
nb = DeepNonStrictNestedBunch(event)
#print(event)
if (nb.summary or nb.description or '').find('presentation') != -1:
print((nb.id))
print((nb.summary or nb.description))
print((nb.start))
#### Meetings creation: main calendar
# Tag events. This is all that is needed for the Rosetta development and regular meetings
def tag_event(self, calendar_id, event_id, extendedProperties):
'''Add extendedProperties to a meeting. Warning: extendedProperties must contain only shared and private dicts and
their contents will overwrite anything in the event's extendedProperties i.e. we do *not* deep-merge the dicts.
'''
event_body = self.service.events().get(calendarId = self.configured_calendar_ids[calendar_id], eventId=event_id).execute()
event_body['extendedProperties'] = event_body.get('extendedProperties', {})
event_body['extendedProperties']['shared'] = event_body['extendedProperties'].get('shared', {})
event_body['extendedProperties']['private'] = event_body['extendedProperties'].get('private', {})
assert(sorted(set(extendedProperties.keys()).union(set(['shared', 'private']))) == ['private', 'shared'])
for k, v in extendedProperties['shared'].items():
event_body['extendedProperties']['shared'][k] = v
for k, v in extendedProperties['private'].items():
event_body['extendedProperties']['private'][k] = v
raise Exception('not tested yet')
updated_event = self.service.events().update(calendarId = self.configured_calendar_ids[calendar_id], eventId = event_id, body = event_body).execute()
# Lab meetings
def add_lab_meeting(self, calendar_id, start_dt, end_dt, location, presenters, foodie, summary = None, description = None, visibility = 'default', username_map = {}, email_map = {}):
e = BasicEvent(self, start_dt, end_dt, location = location, summary = summary, description = description, visibility = visibility, username_map = username_map, email_map = email_map)
event = e.create_lab_meeting('Lab meeting', presenters, foodie)
colortext.warning(pprint.pformat(event))
# Journal club meetings
def add_journal_club_meeting(self, calendar_id, start_dt, end_dt, location, presenters, food_vendor, paper = None, summary = None, description = None, visibility = 'default', username_map = {}, email_map = {}):
e = BasicEvent(self, start_dt, end_dt, location = location, summary = summary, description = description, visibility = visibility, username_map = username_map, email_map = email_map)
event = e.create_journal_club_meeting(presenters, food_vendor, paper = paper)
colortext.warning(pprint.pformat(event))
# Kortemme/DeGrado labs joint meetings
def add_kortemme_degrado_joint_meeting(self, calendar_id, start_dt, end_dt, location, presenters, summary = None, description = None, visibility = 'default', username_map = {}, email_map = {}):
e = BasicEvent(self, start_dt, end_dt, location = location, summary = summary, description = description, visibility = visibility, username_map = username_map, email_map = email_map)
event = e.create_lab_meeting('Kortemme/DeGrado joint meeting', presenters, locked = True)
colortext.warning(pprint.pformat(event))
#### Meetings creation: notices calendar
def add_birthday(self, start_dt, end_dt, location, celebrant, caker, summary = None, description = None, visibility = 'default', username_map = {}, email_map = {}, calendar_id = 'notices'):
e = BasicEvent(self, start_dt, end_dt, location = location, summary = summary, description = description, visibility = visibility, username_map = username_map, email_map = email_map)
event_body = e.create_birthday(celebrant, caker)
created_event = self.service.events().insert(calendarId = self.configured_calendar_ids[calendar_id], body = event_body).execute()
return created_event
# Deprecated - remove these when we switch over to the new system
# Getters, deleters
def getAllEvents(self, calendar_id, year = None, month = None):
# See note above for query parameters
#query = gdata.calendar.client.CalendarEventQuery()
#query.max_results = 2**31-1
#query.singleevents = "true"
start_time = None
end_time = None
if year:
if month and (type(month) == type(1)) and month >= 1 and month <=12:
start_time = "%d-%d-01T00:00:00-08:00" % (year, month)
end_time = "%d-%d-31T23:59:00-08:00" % (year, month)
else:
start_time = "%d-01-01T00:00:00-08:00" % year
end_time = "%d-12-31T23:59:00-08:00" % year
events = self.service.events().list(
calendarId = self.configured_calendar_ids[calendar_id],
timeMin = start_time,
timeMax = end_time,
singleEvents = True,
maxResults = 2**31-1,
showDeleted = False).execute()
#print(query, self.URI)
#feed = self.client.GetCalendarEventFeed(q=query, uri = self.URI)
#events = []
#for event in events:
# events.append(event)
# eventIDText = event.id.text
# eventEditURL = event.GetEditLink().href
# eventHTMLURL = event.GetHtmlLink().href
return events.get('items')
def getEventsTable(self, calendar_id, year = None, month = None):
eventstbl = {}
events = self.getAllEvents(calendar_id, year, month)
for event in events:
event = DeepNonStrictNestedBunch (event)
if event.start and event.location and event.status != 'cancelled':
EventTitle = event.summary
if event.start.get('dateTime'):
startdate = event.start['dateTime']
startdate = time.strptime(startdate[0:19], '%Y-%m-%dT%H:%M:%S')
startdate = datetime.fromtimestamp(time.mktime(startdate))
elif event.start.get('date'):
startdate = event.start['date']
startdate = time.strptime(startdate, '%Y-%m-%d')
startdate = datetime.fromtimestamp(time.mktime(startdate))
else:
raise Exception('Cannot determine start date.')
if event.end.get('dateTime'):
enddate = event.end['dateTime']
enddate = time.strptime(enddate[0:19], '%Y-%m-%dT%H:%M:%S')
enddate = datetime.fromtimestamp(time.mktime(enddate))
elif event.end.get('date'):
enddate = event.end['date']
enddate = time.strptime(enddate, '%Y-%m-%d')
enddate = datetime.fromtimestamp(time.mktime(enddate))
else:
raise Exception('Cannot determine end date.')
isBirthday = EventTitle.find("birthday") != -1
location = event.get('location')
eventstbl[(startdate, EventTitle)] = {"event": event, "enddate" : enddate, "location" : location, "title" : EventTitle}
#for k in sorted(eventstbl.keys()):
# print(k, eventstbl[k]["title"])
return eventstbl
def updateEvents(self, calendar_id, newEvents):
currentEvents = self.getEventsTable(calendar_id)
#colortext.message(newEvents)
#colortext.warning(currentEvents)
# Events to remove
toRemove = []
for startdateTitle, event in sorted(currentEvents.items()):
if event["title"].find("birthday") != -1:
# Don't remove birthdays
continue
if newEvents.get(startdateTitle):
newEvent = newEvents[startdateTitle]
if newEvent["enddate"] == event["enddate"]:
if event["location"].startswith(newEvent["location"]):
if str(newEvent["title"]) == str(event["title"]):
# Don't remove events which are in both newEvents and the calendar
continue
# Remove events which are on the calendar but not in newEvents
toRemove.append(startdateTitle)
# Events to add
toAdd = []
for startdateTitle, event in sorted(newEvents.items()):
if currentEvents.get(startdateTitle):
currentEvent = currentEvents[startdateTitle]
if currentEvent["enddate"] == event["enddate"]:
if currentEvent["location"].startswith(event["location"]):
if str(currentEvent["title"]) == str(event["title"]):
# Don't add events which are in both newEvents and the calendar
continue
# Add events which are in newEvents but not on the calendar
toAdd.append(startdateTitle)
if toRemove:
colortext.error("Removing these %d events:" % len(toRemove))
for dtTitle in toRemove:
colortext.warning(dtTitle)
self.removeEvent(calendar_id, currentEvents[dtTitle]["event"].id)
if toAdd:
colortext.message("Adding these %d events:" % len(toAdd))
for dtTitle in toAdd:
newEvent = newEvents[dtTitle]
#print(dtTitle, newEvent)
self.addNewEvent(calendar_id, dtTitle[0], newEvent["enddate"], newEvent["location"], newEvent["title"])
def removeEvent(self, calendar_id, event_id):
for i in range(3):
try:
assert(self.service.events().get(calendarId = self.configured_calendar_ids[calendar_id], eventId = event_id).execute())
self.service.events().delete(calendarId = self.configured_calendar_ids[calendar_id], eventId = event_id).execute()
break
except Exception as e:
colortext.error("An error occurred:")
colortext.error(e)
colortext.error("Trying again.")
time.sleep(2)
def addNewEvent(self, calendar_id, startdate, enddate, location, title):
colortext.message("\nAdding %s on %s at %s" % (title, startdate, location))
#start_time = startdate.strftime('%Y-%m-%dT%H:%M:%S').isoformat()
#end_time = enddate.strftime('%Y-%m-%dT%H:%M:%S').isoformat()
start_time = startdate.isoformat()
end_time = enddate.isoformat()
loc = location
if loc.startswith("Tahoe"):
loc = "%s, 10 minutes outside Truckee, CA @ 39.328455,-120.184078" % loc
else:
if location.startswith("BH "):
loc = "%s, Byers Hall" % loc
loc = "%s, removeEvent/Mission Bay, San Francisco, CA @ 37.767952,-122.392214" % loc
for i in range(3):
try:
self.service.events().insert(
calendarId = self.configured_calendar_ids[calendar_id],
body = {
"start" : {
"timeZone" : self.timezone_string,
"dateTime" : start_time,
},
"end" : {
"timeZone" : self.timezone_string,
"dateTime" : end_time,
},
"location" : loc,
"summary" : title,
"description" : title
}).execute()
break
except Exception as e:
colortext.error("An error occurred:")
colortext.error(traceback.format_exc())
colortext.error(e)
colortext.error("Trying again.")
time.sleep(2)
### Birthdays - rewrite these functions
def add_bidet(self):
raise Exception('update')
main_calendar = GoogleCalendar.from_file('/admin/calendars.json', ['main'])
notices_calendar = GoogleCalendar.from_file('/admin/calendars.json', ['notices'])
timezone = main_calendar.timezone
event_ids = set()
seen_notices = set()
for year in range(2014, 2017):
#for year in range(2014, 2015):
colortext.message('\n\nTagging events in %d:\n' % year)
extra_days = 0
if year % 4 == 0:
extra_days = 1
start_time = datetime(year=year, month=1, day=1, hour=0, minute=0, second=0, tzinfo=timezone)
end_time = start_time + timedelta(days = 730 + extra_days, seconds = -1)
start_time, end_time = start_time.isoformat(), end_time.isoformat()
#main_meetings = main_calendar.get_events(start_time, end_time, ignore_cancelled = True, get_recurring_events_as_instances = False)
#for m in main_meetings:
# if m.extendedProperties.shared:
# event_type = m.extendedProperties.shared['event_type']
# if event_type == 'Birthday'
notices = notices_calendar.get_events(start_time, end_time, ignore_cancelled = True, get_recurring_events_as_instances = False)
for n in notices:
if n.id in seen_notices:
continue
seen_notices.add(n.id)
if n.extendedProperties.shared and n.extendedProperties.shared.event_type:
event_type = n.extendedProperties.shared['event_type']
if event_type == 'Birthday':
print((n.summary, n.id))
print((n.start))
event_body = main_calendar.service.events().get(calendarId = main_calendar.configured_calendar_ids["notices"], eventId=n.id).execute()
event_body['gadget'] = {
'display' : 'icon',
'iconLink' : 'https://guybrush.ucsf.edu/images/cake.png',
'title' : n.summary,
#'type' : 'application/x-google-gadgets+xml',
}
created_event = main_calendar.service.events().update(calendarId = main_calendar.configured_calendar_ids["notices"], eventId = n.id, body = event_body).execute()
def updateBirthdays(self, bdays):
raise Exception('update')
eventstbl = self.getEventsTable("main")
for dt, details in sorted(bdays.items()):
bdaykey = datetime(dt.year, dt.month, dt.day)
if eventstbl.get((bdaykey, details["title"])):
if str(eventstbl[(bdaykey, details["title"])]["title"]) == str(details["title"]):
continue
colortext.message("adding " + details["title"])
self.addBirthday(dt, details["title"], details["location"])
def addBirthday(self, dt, title, location):
raise Exception('update')
#if recurrence_data is None:
# recurrence_data = ('DTSTART;VALUE=DATE:20070501\r\n'
# + 'DTEND;VALUE=DATE:20070502\r\n'
# + 'RRULE:FREQ=WEEKLY;BYDAY=Tu;UNTIL=20070904\r\n')
raise Exception('add this functionality')
dtstart ="DATE:%d%0.2d%0.2dT070000" % (dt.year, dt.month, dt.day)
dtend ="DATE:%d%0.2d%0.2dT235900" % (dt.year, dt.month, dt.day)
untildt ="%d%0.2d%0.2d" % (dt.year + 10, dt.month, dt.day)
recurrence_data = ('DTSTART;VALUE=%s\r\n' % dtstart) + ('DTEND;VALUE=%s\r\n' % dtend) + ('RRULE:FREQ=YEARLY;UNTIL=%s\r\n' % untildt)
event = gdata.calendar.data.CalendarEventEntry()
event.title = atom.data.Title(text=title)
event.content = atom.data.Content(text=title)
event.where.append(gdata.calendar.data.CalendarWhere(value=location))
# Set a recurring event
event.recurrence = gdata.data.Recurrence(text=recurrence_data)
self.addEvent(event)
# Utility functions
def printAllEvents(self, calendar_id, year = None):
colortext.message('Events on Calendar: %s' % (self.get_calendar(calendar_id).summary))
eventstbl = self.getEventsTable(calendar_id, year)
for startdateTitle, details in sorted(eventstbl.items()):
startdate = startdateTitle[0]
print((("%s -> %s at %s: %s" % (startdate, details["enddate"], details["location"][0:details["location"].find("@")], details["title"])).encode('ascii', 'ignore')))
def remove_all_cancelled_events(self, calendar_ids = []):
for calendar_id in calendar_ids or self.calendar_ids:
colortext.message('Removing cancelled events in %s' % calendar_id)
events = self.service.events().list(calendarId = self.configured_calendar_ids[calendar_id]).execute()
print((len(events['items'])))
for event in events['items']:
dt = None
nb = DeepNonStrictNestedBunch(event)
if nb.status == 'cancelled':
if nb.recurringEventId:
colortext.warning(nb.recurringEventId)
# Retrieve all occurrences of the recurring event within the timeframe
start_time = datetime(year=2010, month=1, day=1, tzinfo=self.timezone).isoformat()
end_time = datetime(year=2015, month=1, day=1, tzinfo=self.timezone).isoformat()
for e in self.get_recurring_events(calendar_id, nb.id, start_time, end_time, maxResults = 10):
print(e)
else:
colortext.warning(nb)
|
class GoogleCalendar(object):
''' A class to interact with a set of Google calendars. This is used by our local lab website and by the meetings script.
The class methods are split up following the API here:
https://developers.google.com/resources/api-libraries/documentation/calendar/v3/python/latest/ '''
@staticmethod
def from_file(oauth_json_filepath, calendar_ids):
pass
def __init__(self, oauth_json, calendar_ids):
'''oauth_json is a JSON string which should contain login credentials for OAuth 2.0.
calendar_ids is a list of calendar aliases to connect to and should be defined in oauth_json["calendars"].
We use calendar aliases e.g. "main" or "biosensor meetings" for convenience.
'''
pass
def get_acl_list(self, calendar_id):
pass
def get_calendar_users(self, calendar_id):
pass
def get_calendars(self):
pass
def get_calendar_users(self, calendar_id):
pass
def get_colors(self):
pass
def get_events_within_a_given_month(self, year, month, day = 1, hour = 0, minute = 0, second = 0):
pass
def get_upcoming_events_within_the_current_month(self):
pass
def get_upcoming_event_lists_for_the_remainder_of_the_month(self, year = None, month = None):
'''Return the set of events as triple of (today's events, events for the remainder of the week, events for the remainder of the month).'''
pass
def get_upcoming_events_within_the_current_week(self):
'''Returns the events from the calendar for the next days_to_look_ahead days.'''
pass
def get_upcoming_events_for_today(self):
pass
def get_upcoming_events_within_the_current_month(self):
'''Returns the events from the calendar for the next days_to_look_ahead days.'''
pass
def get_events_within_a_given_month(self, year, month, day = 1, hour = 0, minute = 0, second = 0):
pass
def get_events_within_a_given_month(self, year, month, day = 1, hour = 0, minute = 0, second = 0):
'''A wrapper for events().list. Returns the events from the calendar within the specified times. Some of the interesting fields are:
description, end, htmlLink, location, organizer, start, summary
Note: "Cancelled instances of recurring events (but not the underlying recurring event) will still be included if showDeleted and singleEvents are both False."
'''
pass
def get_recurring_events(self, calendar_id, event_id, start_time, end_time, maxResults = None):
'''A wrapper for events().instances. Returns the list of recurring events for the given calendar alias within the specified timeframe.'''
pass
def add_company_quarter(self, company_name, quarter_name, dt, calendar_id = 'notices'):
'''Adds a company_name quarter event to the calendar. dt should be a date object. Returns True if the event was added.'''
pass
def add_holiday(self, start_dt, holiday_name, end_dt = None, calendar_id = 'notices'):
'''Adds a holiday event to the calendar. start_dt and end_dt (if supplied) should be date objects. Returns True if the event was added.'''
pass
def remove_all_events(self, calendar_id):
'''Removes all events from a calendar. WARNING: Be very careful using this.'''
pass
def tag_event(self, calendar_id, event_id, extendedProperties):
'''Add extendedProperties to a meeting. Warning: extendedProperties must contain only shared and private dicts and
their contents will overwrite anything in the event's extendedProperties i.e. we do *not* deep-merge the dicts.
'''
pass
def add_lab_meeting(self, calendar_id, start_dt, end_dt, location, presenters, foodie, summary = None, description = None, visibility = 'default', username_map = {}, email_map = {}):
pass
def add_journal_club_meeting(self, calendar_id, start_dt, end_dt, location, presenters, food_vendor, paper = None, summary = None, description = None, visibility = 'default', username_map = {}, email_map = {}):
pass
def add_kortemme_degrado_joint_meeting(self, calendar_id, start_dt, end_dt, location, presenters, summary = None, description = None, visibility = 'default', username_map = {}, email_map = {}):
pass
def add_birthday(self, start_dt, end_dt, location, celebrant, caker, summary = None, description = None, visibility = 'default', username_map = {}, email_map = {}, calendar_id = 'notices'):
pass
def getAllEvents(self, calendar_id, year = None, month = None):
pass
def getEventsTable(self, calendar_id, year = None, month = None):
pass
def updateEvents(self, calendar_id, newEvents):
pass
def removeEvent(self, calendar_id, event_id):
pass
def addNewEvent(self, calendar_id, startdate, enddate, location, title):
pass
def add_bidet(self):
pass
def updateBirthdays(self, bdays):
pass
def addBirthday(self, dt, title, location):
pass
def printAllEvents(self, calendar_id, year = None):
pass
def remove_all_cancelled_events(self, calendar_ids = []):
pass
| 36 | 11 | 18 | 1 | 14 | 3 | 3 | 0.22 | 1 | 13 | 4 | 0 | 33 | 5 | 34 | 34 | 711 | 127 | 489 | 183 | 452 | 106 | 413 | 180 | 377 | 16 | 1 | 6 | 118 |
143,712 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/latex/latex_report.py
|
klab.latex.latex_report.LatexPage
|
class LatexPage(object):
def generate_plaintext(self):
return ''
|
class LatexPage(object):
def generate_plaintext(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 4 | 1 | 0 | 1 | 1 | 3 | 0 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 1 | 0 | 1 |
143,713 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/latex/latex_report.py
|
klab.latex.latex_report.LatexPagePlot
|
class LatexPagePlot(LatexPage):
def __init__(self, plot_filename, plot_title):
plot_filename = os.path.abspath( plot_filename )
if not os.path.isfile( plot_filename ):
print()
print(plot_filename)
raise Exception('Above plot filename is not a file!')
self.plot_filename = plot_filename
if plot_title:
self.plot_title = make_latex_safe(plot_title)
else:
self.plot_title = ''
def generate_latex(self):
return_str = '\\begin{figure}[H]\n'
return_str += ' \\includegraphics[width=\\textwidth]{{%s}%s}\n' % (os.path.splitext(self.plot_filename)[0], os.path.splitext(self.plot_filename)[1])
if self.plot_title != '':
return_str += ' \\caption{%s}\n' % self.plot_title
return_str += '\\end{figure}\n'
return return_str
|
class LatexPagePlot(LatexPage):
def __init__(self, plot_filename, plot_title):
pass
def generate_latex(self):
pass
| 3 | 0 | 9 | 0 | 9 | 0 | 3 | 0 | 1 | 1 | 0 | 0 | 2 | 2 | 2 | 3 | 20 | 1 | 19 | 6 | 16 | 0 | 18 | 6 | 15 | 3 | 2 | 1 | 5 |
143,714 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/latex/latex_report.py
|
klab.latex.latex_report.LatexPageSection
|
class LatexPageSection(LatexPage):
def __init__(self, title, subtext = None, clearpage = True):
self.title = make_latex_safe(title)
self.clearpage = clearpage
if subtext:
self.subtext = make_latex_safe(subtext)
else:
self.subtext = None
self.section_latex_func = 'section'
def generate_latex(self):
return_str = ''
if self.clearpage:
return_str += '\n\\clearpage\n\n'
return_str += '\\%s{%s}\n' % (self.section_latex_func, self.title)
if self.subtext:
return_str += '\\textit{%s}\n' % self.subtext
return_str += '\n'
return return_str
def generate_plaintext(self):
return_str = ''
if self.clearpage:
return_str += '\n\n'
return_str += '\n\n%s %s\n\n' % (self.section_latex_func.upper(), self.title)
if self.subtext:
return_str = return_str[:-1] + '%s' % self.subtext
return_str += '\n\n'
return return_str
|
class LatexPageSection(LatexPage):
def __init__(self, title, subtext = None, clearpage = True):
pass
def generate_latex(self):
pass
def generate_plaintext(self):
pass
| 4 | 0 | 9 | 0 | 9 | 0 | 3 | 0 | 1 | 0 | 0 | 1 | 3 | 4 | 3 | 4 | 29 | 2 | 27 | 10 | 23 | 0 | 26 | 10 | 22 | 3 | 2 | 1 | 8 |
143,715 |
Kortemme-Lab/pull_into_place
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kortemme-Lab_pull_into_place/pull_into_place/commands/plot_funnels.py
|
pull_into_place.commands.plot_funnels.main.PipDesign
|
class PipDesign (smd.Design):
def _load_models(self, use_cache):
self._models, self._metrics = structures.load(
self.directory,
use_cache=use_cache,
require_io_dir=False,
)
|
class PipDesign (smd.Design):
def _load_models(self, use_cache):
pass
| 2 | 0 | 6 | 0 | 6 | 1 | 1 | 0.29 | 1 | 0 | 0 | 0 | 1 | 3 | 1 | 1 | 8 | 1 | 7 | 4 | 5 | 2 | 3 | 3 | 1 | 1 | 1 | 0 | 1 |
143,716 |
Kozea/Flask-WeasyPrint
|
tests/__init__.py
|
tests.Config
|
class Config:
GRAPH_COLORS = ['#0C3795', '#752641', '#E47F00']
|
class Config:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 2 | 2 | 1 | 1 | 2 | 2 | 1 | 0 | 0 | 0 | 0 |
143,717 |
Kozea/cairocffi
|
cairocffi/surfaces.py
|
cairocffi.surfaces.Win32Surface
|
class Win32Surface(Surface): # pragma: no cover
"""Creates a cairo surface that targets the given DC.
The DC will be queried for its initial clip extents, and this
will be used as the size of the cairo surface. The resulting
surface will always be of format CAIRO_FORMAT_RGB24; should
you need another surface format, you will need to create one
through cairo_win32_surface_create_with_dib().
:param hdc :
The DC to create a surface for,
as obtained from ``win32gui.CreateDC``.
**Note**: this unsafely inteprets an integer as a pointer.
Make sure it actually points to a valid DC!
:type hdc: int
*New in cairocffi 0.8*
"""
def __init__(self, hdc):
pointer = cairo.cairo_win32_surface_create(ffi.cast('void*', hdc))
Surface.__init__(self, pointer)
|
class Win32Surface(Surface):
'''Creates a cairo surface that targets the given DC.
The DC will be queried for its initial clip extents, and this
will be used as the size of the cairo surface. The resulting
surface will always be of format CAIRO_FORMAT_RGB24; should
you need another surface format, you will need to create one
through cairo_win32_surface_create_with_dib().
:param hdc :
The DC to create a surface for,
as obtained from ``win32gui.CreateDC``.
**Note**: this unsafely inteprets an integer as a pointer.
Make sure it actually points to a valid DC!
:type hdc: int
*New in cairocffi 0.8*
'''
def __init__(self, hdc):
pass
| 2 | 1 | 3 | 0 | 3 | 0 | 1 | 3.75 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 26 | 22 | 4 | 4 | 3 | 2 | 15 | 4 | 3 | 2 | 1 | 2 | 0 | 1 |
143,718 |
Kozea/cairocffi
|
utils/mkconstants.py
|
mkconstants.PrintEnumsVisitor
|
class PrintEnumsVisitor(pycparser.c_ast.NodeVisitor):
def visit_Decl(self, node): # noqa: N802
if node.name and node.name.startswith('CAIRO_'): # len('CAIRO_') == 6
if node.init.type == 'string':
print('%s = b%s' % (node.name[6:], node.init.value))
else:
print('%s = %s' % (node.name[6:], node.init.value))
print('')
def visit_Enum(self, node): # noqa: N802
value = 0
for enumerator in node.values.enumerators:
if enumerator.value is not None:
value_string = parse_constant(enumerator.value)
value = int(value_string, 0)
else:
value_string = str(value)
assert enumerator.name.startswith('CAIRO_') # len('CAIRO_') == 6
print('%s = %s' % (enumerator.name[6:], value_string))
value += 1
print('')
|
class PrintEnumsVisitor(pycparser.c_ast.NodeVisitor):
def visit_Decl(self, node):
pass
def visit_Enum(self, node):
pass
| 3 | 0 | 10 | 0 | 10 | 2 | 3 | 0.2 | 1 | 2 | 0 | 0 | 2 | 0 | 2 | 2 | 21 | 1 | 20 | 6 | 17 | 4 | 18 | 6 | 15 | 3 | 1 | 2 | 6 |
143,719 |
Kozea/cairocffi
|
cairocffi/xcb.py
|
cairocffi.xcb.XCBSurface
|
class XCBSurface(Surface):
"""The XCB surface is used to render cairo graphics to X Window System
windows and pixmaps using the XCB library.
Creates a cairo surface that targets the given drawable (pixmap or window).
.. note::
This class works using objects and libraries in ``xcffib``.
:param conn: The ``xcffib.Connection`` for an open XCB connection
:param drawable:
An XID corresponding to an XCB drawable (a pixmap or a window)
:param visual: An ``xcffib.xproto.VISUALTYPE`` object.
:param width: integer
:param height: integer
"""
def __init__(self, conn, drawable, visual, width, height):
c_visual = visualtype_to_c_struct(visual)
p = cairo.cairo_xcb_surface_create(
conn._conn, drawable, c_visual, width, height)
Surface.__init__(self, p)
def set_size(self, width, height):
"""
Informs cairo of the new size of the X Drawable underlying the surface.
For a surface created for a Window (rather than a Pixmap), this
function must be called each time the size of the window changes (for
a subwindow, you are normally resizing the window yourself, but for a
toplevel window, it is necessary to listen for
:class:`xcffib.xproto.ConfigureNotifyEvent`'s).
A Pixmap can never change size, so it is never necessary to call this
function on a surface created for a Pixmap.
:param width: integer
:param height: integer
"""
cairo.cairo_xcb_surface_set_size(self._pointer, width, height)
self._check_status()
|
class XCBSurface(Surface):
'''The XCB surface is used to render cairo graphics to X Window System
windows and pixmaps using the XCB library.
Creates a cairo surface that targets the given drawable (pixmap or window).
.. note::
This class works using objects and libraries in ``xcffib``.
:param conn: The ``xcffib.Connection`` for an open XCB connection
:param drawable:
An XID corresponding to an XCB drawable (a pixmap or a window)
:param visual: An ``xcffib.xproto.VISUALTYPE`` object.
:param width: integer
:param height: integer
'''
def __init__(self, conn, drawable, visual, width, height):
pass
def set_size(self, width, height):
'''
Informs cairo of the new size of the X Drawable underlying the surface.
For a surface created for a Window (rather than a Pixmap), this
function must be called each time the size of the window changes (for
a subwindow, you are normally resizing the window yourself, but for a
toplevel window, it is necessary to listen for
:class:`xcffib.xproto.ConfigureNotifyEvent`'s).
A Pixmap can never change size, so it is never necessary to call this
function on a surface created for a Pixmap.
:param width: integer
:param height: integer
'''
pass
| 3 | 2 | 12 | 2 | 4 | 6 | 1 | 2.67 | 1 | 0 | 0 | 0 | 2 | 0 | 2 | 27 | 41 | 8 | 9 | 5 | 6 | 24 | 8 | 5 | 5 | 1 | 2 | 0 | 2 |
143,720 |
Kozea/cairocffi
|
cairocffi/surfaces.py
|
cairocffi.surfaces.Win32PrintingSurface
|
class Win32PrintingSurface(Surface): # pragma: no cover
"""Creates a cairo surface that targets the given DC.
The DC will be queried for its initial clip extents,
and this will be used as the size of the cairo surface.
The DC should be a printing DC; antialiasing will be ignored,
and GDI will be used as much as possible to draw to the surface.
The returned surface will be wrapped using the paginated surface
to provide correct complex rendering behaviour;
cairo_surface_show_page() and associated methods must be used
for correct output.
:param hdc:
The DC to create a surface for,
as obtained from ``win32gui.CreateDC``.
**Note**: this unsafely inteprets an integer as a pointer.
Make sure it actually points to a valid DC!
:type hdc: int
*New in cairocffi 0.6*
"""
def __init__(self, hdc):
pointer = cairo.cairo_win32_printing_surface_create(
ffi.cast('void*', hdc))
Surface.__init__(self, pointer)
|
class Win32PrintingSurface(Surface):
'''Creates a cairo surface that targets the given DC.
The DC will be queried for its initial clip extents,
and this will be used as the size of the cairo surface.
The DC should be a printing DC; antialiasing will be ignored,
and GDI will be used as much as possible to draw to the surface.
The returned surface will be wrapped using the paginated surface
to provide correct complex rendering behaviour;
cairo_surface_show_page() and associated methods must be used
for correct output.
:param hdc:
The DC to create a surface for,
as obtained from ``win32gui.CreateDC``.
**Note**: this unsafely inteprets an integer as a pointer.
Make sure it actually points to a valid DC!
:type hdc: int
*New in cairocffi 0.6*
'''
def __init__(self, hdc):
pass
| 2 | 1 | 4 | 0 | 4 | 0 | 1 | 3.6 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 26 | 27 | 5 | 5 | 3 | 3 | 18 | 4 | 3 | 2 | 1 | 2 | 0 | 1 |
143,721 |
Kozea/cairocffi
|
cairocffi/surfaces.py
|
cairocffi.surfaces.PSSurface
|
class PSSurface(Surface):
"""Creates a PostScript surface of the specified size in PostScript points
to be written to ``target``.
Note that the size of individual pages of the PostScript output can vary.
See :meth:`set_size`.
``target`` can be :obj:`None` to specify no output.
This will generate a surface that may be queried and used as a source,
without generating a temporary file.
The PostScript surface backend recognizes the ``image/jpeg`` MIME type
for the data attached to a surface (see :meth:`~Surface.set_mime_data`)
when it is used as a source pattern for drawing on this surface.
If it is specified, the PostScript backend emits an image
with the content of MIME data (with the ``/DCTDecode`` filter)
instead of a surface snapshot (with the ``/FlateDecode`` filter),
which typically produces PostScript with a smaller file size.
:param target:
A filename,
a binary mode :term:`file object` with a ``write`` method,
or :obj:`None`.
:param width_in_points:
Width of the surface, in points (1 point == 1/72.0 inch)
:param height_in_points:
Height of the surface, in points (1 point == 1/72.0 inch)
:type width_in_points: float
:type height_in_points: float
"""
def __init__(self, target, width_in_points, height_in_points):
if hasattr(target, 'write') or target is None:
write_func = _make_write_func(target)
pointer = cairo.cairo_ps_surface_create_for_stream(
write_func, ffi.NULL, width_in_points, height_in_points)
else:
write_func = None
pointer = cairo.cairo_ps_surface_create(
_encode_filename(target), width_in_points, height_in_points)
Surface.__init__(self, pointer, target_keep_alive=write_func)
def dsc_comment(self, comment):
""" Emit a comment into the PostScript output for the given surface.
The comment is expected to conform to
the PostScript Language Document Structuring Conventions (DSC).
Please see that manual for details on the available comments
and their meanings.
In particular, the ``%%IncludeFeature`` comment allows
a device-independent means of controlling printer device features.
So the PostScript Printer Description Files Specification
will also be a useful reference.
The comment string must begin with a percent character (%)
and the total length of the string
(including any initial percent characters)
must not exceed 255 bytes.
Violating either of these conditions will
place surface into an error state.
But beyond these two conditions,
this method will not enforce conformance of the comment
with any particular specification.
The comment string should not have a trailing newline.
The DSC specifies different sections
in which particular comments can appear.
This method provides for comments to be emitted
within three sections:
the header, the Setup section, and the PageSetup section.
Comments appearing in the first two sections
apply to the entire document
while comments in the BeginPageSetup section
apply only to a single page.
For comments to appear in the header section,
this method should be called after the surface is created,
but before a call to :meth:`dsc_begin_setup`.
For comments to appear in the Setup section,
this method should be called after a call to :meth:`dsc_begin_setup`
but before a call to :meth:`dsc_begin_page_setup`.
For comments to appear in the PageSetup section,
this method should be called after a call to
:meth:`dsc_begin_page_setup`.
Note that it is only necessary to call :meth:`dsc_begin_page_setup`
for the first page of any surface.
After a call to :meth:`~Surface.show_page`
or :meth:`~Surface.copy_page`
comments are unambiguously directed
to the PageSetup section of the current page.
But it doesn't hurt to call this method
at the beginning of every page
as that consistency may make the calling code simpler.
As a final note,
cairo automatically generates several comments on its own.
As such, applications must not manually generate
any of the following comments:
Header section: ``%!PS-Adobe-3.0``, ``%%Creator``, ``%%CreationDate``,
``%%Pages``, ``%%BoundingBox``, ``%%DocumentData``,
``%%LanguageLevel``, ``%%EndComments``.
Setup section: ``%%BeginSetup``, ``%%EndSetup``.
PageSetup section: ``%%BeginPageSetup``, ``%%PageBoundingBox``,
``%%EndPageSetup``.
Other sections: ``%%BeginProlog``, ``%%EndProlog``, ``%%Page``,
``%%Trailer``, ``%%EOF``.
"""
cairo.cairo_ps_surface_dsc_comment(
self._pointer, _encode_string(comment))
self._check_status()
def dsc_begin_setup(self):
"""Indicate that subsequent calls to :meth:`dsc_comment` should
direct comments to the Setup section of the PostScript output.
This method should be called at most once per surface,
and must be called before any call to :meth:`dsc_begin_page_setup`
and before any drawing is performed to the surface.
See :meth:`dsc_comment` for more details.
"""
cairo.cairo_ps_surface_dsc_begin_setup(self._pointer)
self._check_status()
def dsc_begin_page_setup(self):
"""Indicate that subsequent calls to :meth:`dsc_comment` should
direct comments to the PageSetup section of the PostScript output.
This method is only needed for the first page of a surface.
It must be called after any call to :meth:`dsc_begin_setup`
and before any drawing is performed to the surface.
See :meth:`dsc_comment` for more details.
"""
cairo.cairo_ps_surface_dsc_begin_page_setup(self._pointer)
self._check_status()
def set_eps(self, eps):
"""
If ``eps`` is True,
the PostScript surface will output Encapsulated PostScript.
This method should only be called
before any drawing operations have been performed on the current page.
The simplest way to do this is to call this method
immediately after creating the surface.
An Encapsulated PostScript file should never contain
more than one page.
"""
cairo.cairo_ps_surface_set_eps(self._pointer, bool(eps))
self._check_status()
def get_eps(self):
"""Check whether the PostScript surface will output
Encapsulated PostScript.
"""
return bool(cairo.cairo_ps_surface_get_eps(self._pointer))
def set_size(self, width_in_points, height_in_points):
"""Changes the size of a PostScript surface
for the current (and subsequent) pages.
This method should only be called
before any drawing operations have been performed on the current page.
The simplest way to do this is to call this method
immediately after creating the surface
or immediately after completing a page with either
:meth:`~Surface.show_page` or :meth:`~Surface.copy_page`.
:param width_in_points:
New width of the page, in points (1 point == 1/72.0 inch)
:param height_in_points:
New height of the page, in points (1 point == 1/72.0 inch)
:type width_in_points: float
:type height_in_points: float
"""
cairo.cairo_ps_surface_set_size(
self._pointer, width_in_points, height_in_points)
self._check_status()
def restrict_to_level(self, level):
"""Restricts the generated PostScript file to ``level``.
See :meth:`get_levels` for a list of available level values
that can be used here.
This method should only be called
before any drawing operations have been performed on the given surface.
The simplest way to do this is to call this method
immediately after creating the surface.
:param version: A :ref:`PS_LEVEL` string.
"""
cairo.cairo_ps_surface_restrict_to_level(self._pointer, level)
self._check_status()
@staticmethod
def get_levels():
"""Return the list of supported PostScript levels.
See :meth:`restrict_to_level`.
:return: A list of :ref:`PS_LEVEL` strings.
"""
levels = ffi.new('cairo_ps_level_t const **')
num_levels = ffi.new('int *')
cairo.cairo_ps_get_levels(levels, num_levels)
levels = levels[0]
return [levels[i] for i in range(num_levels[0])]
@staticmethod
def ps_level_to_string(level):
"""Return the string representation of the given :ref:`PS_LEVEL`.
See :meth:`get_levels` for a way to get
the list of valid level ids.
"""
c_string = cairo.cairo_ps_level_to_string(level)
if c_string == ffi.NULL:
raise ValueError(level)
return ffi.string(c_string).decode('ascii')
|
class PSSurface(Surface):
'''Creates a PostScript surface of the specified size in PostScript points
to be written to ``target``.
Note that the size of individual pages of the PostScript output can vary.
See :meth:`set_size`.
``target`` can be :obj:`None` to specify no output.
This will generate a surface that may be queried and used as a source,
without generating a temporary file.
The PostScript surface backend recognizes the ``image/jpeg`` MIME type
for the data attached to a surface (see :meth:`~Surface.set_mime_data`)
when it is used as a source pattern for drawing on this surface.
If it is specified, the PostScript backend emits an image
with the content of MIME data (with the ``/DCTDecode`` filter)
instead of a surface snapshot (with the ``/FlateDecode`` filter),
which typically produces PostScript with a smaller file size.
:param target:
A filename,
a binary mode :term:`file object` with a ``write`` method,
or :obj:`None`.
:param width_in_points:
Width of the surface, in points (1 point == 1/72.0 inch)
:param height_in_points:
Height of the surface, in points (1 point == 1/72.0 inch)
:type width_in_points: float
:type height_in_points: float
'''
def __init__(self, target, width_in_points, height_in_points):
pass
def dsc_comment(self, comment):
''' Emit a comment into the PostScript output for the given surface.
The comment is expected to conform to
the PostScript Language Document Structuring Conventions (DSC).
Please see that manual for details on the available comments
and their meanings.
In particular, the ``%%IncludeFeature`` comment allows
a device-independent means of controlling printer device features.
So the PostScript Printer Description Files Specification
will also be a useful reference.
The comment string must begin with a percent character (%)
and the total length of the string
(including any initial percent characters)
must not exceed 255 bytes.
Violating either of these conditions will
place surface into an error state.
But beyond these two conditions,
this method will not enforce conformance of the comment
with any particular specification.
The comment string should not have a trailing newline.
The DSC specifies different sections
in which particular comments can appear.
This method provides for comments to be emitted
within three sections:
the header, the Setup section, and the PageSetup section.
Comments appearing in the first two sections
apply to the entire document
while comments in the BeginPageSetup section
apply only to a single page.
For comments to appear in the header section,
this method should be called after the surface is created,
but before a call to :meth:`dsc_begin_setup`.
For comments to appear in the Setup section,
this method should be called after a call to :meth:`dsc_begin_setup`
but before a call to :meth:`dsc_begin_page_setup`.
For comments to appear in the PageSetup section,
this method should be called after a call to
:meth:`dsc_begin_page_setup`.
Note that it is only necessary to call :meth:`dsc_begin_page_setup`
for the first page of any surface.
After a call to :meth:`~Surface.show_page`
or :meth:`~Surface.copy_page`
comments are unambiguously directed
to the PageSetup section of the current page.
But it doesn't hurt to call this method
at the beginning of every page
as that consistency may make the calling code simpler.
As a final note,
cairo automatically generates several comments on its own.
As such, applications must not manually generate
any of the following comments:
Header section: ``%!PS-Adobe-3.0``, ``%%Creator``, ``%%CreationDate``,
``%%Pages``, ``%%BoundingBox``, ``%%DocumentData``,
``%%LanguageLevel``, ``%%EndComments``.
Setup section: ``%%BeginSetup``, ``%%EndSetup``.
PageSetup section: ``%%BeginPageSetup``, ``%%PageBoundingBox``,
``%%EndPageSetup``.
Other sections: ``%%BeginProlog``, ``%%EndProlog``, ``%%Page``,
``%%Trailer``, ``%%EOF``.
'''
pass
def dsc_begin_setup(self):
'''Indicate that subsequent calls to :meth:`dsc_comment` should
direct comments to the Setup section of the PostScript output.
This method should be called at most once per surface,
and must be called before any call to :meth:`dsc_begin_page_setup`
and before any drawing is performed to the surface.
See :meth:`dsc_comment` for more details.
'''
pass
def dsc_begin_page_setup(self):
'''Indicate that subsequent calls to :meth:`dsc_comment` should
direct comments to the PageSetup section of the PostScript output.
This method is only needed for the first page of a surface.
It must be called after any call to :meth:`dsc_begin_setup`
and before any drawing is performed to the surface.
See :meth:`dsc_comment` for more details.
'''
pass
def set_eps(self, eps):
'''
If ``eps`` is True,
the PostScript surface will output Encapsulated PostScript.
This method should only be called
before any drawing operations have been performed on the current page.
The simplest way to do this is to call this method
immediately after creating the surface.
An Encapsulated PostScript file should never contain
more than one page.
'''
pass
def get_eps(self):
'''Check whether the PostScript surface will output
Encapsulated PostScript.
'''
pass
def set_size(self, width_in_points, height_in_points):
'''Changes the size of a PostScript surface
for the current (and subsequent) pages.
This method should only be called
before any drawing operations have been performed on the current page.
The simplest way to do this is to call this method
immediately after creating the surface
or immediately after completing a page with either
:meth:`~Surface.show_page` or :meth:`~Surface.copy_page`.
:param width_in_points:
New width of the page, in points (1 point == 1/72.0 inch)
:param height_in_points:
New height of the page, in points (1 point == 1/72.0 inch)
:type width_in_points: float
:type height_in_points: float
'''
pass
def restrict_to_level(self, level):
'''Restricts the generated PostScript file to ``level``.
See :meth:`get_levels` for a list of available level values
that can be used here.
This method should only be called
before any drawing operations have been performed on the given surface.
The simplest way to do this is to call this method
immediately after creating the surface.
:param version: A :ref:`PS_LEVEL` string.
'''
pass
@staticmethod
def get_levels():
'''Return the list of supported PostScript levels.
See :meth:`restrict_to_level`.
:return: A list of :ref:`PS_LEVEL` strings.
'''
pass
@staticmethod
def ps_level_to_string(level):
'''Return the string representation of the given :ref:`PS_LEVEL`.
See :meth:`get_levels` for a way to get
the list of valid level ids.
'''
pass
| 13 | 10 | 19 | 3 | 4 | 12 | 1 | 3.11 | 1 | 3 | 0 | 0 | 8 | 0 | 10 | 35 | 236 | 47 | 46 | 18 | 33 | 143 | 39 | 16 | 28 | 2 | 2 | 1 | 12 |
143,722 |
Kozea/cairocffi
|
cairocffi/surfaces.py
|
cairocffi.surfaces.SVGSurface
|
class SVGSurface(Surface):
"""Creates a SVG surface of the specified size in points
to be written to ``target``.
``target`` can be :obj:`None` to specify no output.
This will generate a surface that may be queried and used as a source,
without generating a temporary file.
The SVG surface backend recognizes the following MIME types
for the data attached to a surface (see :meth:`~Surface.set_mime_data`)
when it is used as a source pattern for drawing on this surface:
``image/png``,
``image/jpeg`` and
``text/x-uri``.
If any of them is specified, the SVG backend emits a href
with the content of MIME data instead of a surface snapshot
(PNG, Base64-encoded) in the corresponding image tag.
The unofficial MIME type ``text/x-uri`` is examined first.
If present, the URL is emitted as is:
assuring the correctness of URL is left to the client code.
If ``text/x-uri`` is not present,
but ``image/jpeg`` or ``image/png`` is specified,
the corresponding data is Base64-encoded and emitted.
:param target:
A filename,
a binary mode :term:`file object` with a ``write`` method,
or :obj:`None`.
:param width_in_points:
Width of the surface, in points (1 point == 1/72.0 inch)
:param height_in_points:
Height of the surface, in points (1 point == 1/72.0 inch)
:type width_in_points: float
:type height_in_points: float
"""
def __init__(self, target, width_in_points, height_in_points):
if hasattr(target, 'write') or target is None:
write_func = _make_write_func(target)
pointer = cairo.cairo_svg_surface_create_for_stream(
write_func, ffi.NULL, width_in_points, height_in_points)
else:
write_func = None
pointer = cairo.cairo_svg_surface_create(
_encode_filename(target), width_in_points, height_in_points)
Surface.__init__(self, pointer, target_keep_alive=write_func)
def restrict_to_version(self, version):
"""Restricts the generated SVG file to ``version``.
See :meth:`get_versions` for a list of available version values
that can be used here.
This method should only be called
before any drawing operations have been performed on the given surface.
The simplest way to do this is to call this method
immediately after creating the surface.
:param version: A :ref:`SVG_VERSION` string.
"""
cairo.cairo_svg_surface_restrict_to_version(self._pointer, version)
self._check_status()
def set_document_unit(self, unit):
"""Use specified unit for width and height of generated SVG file.
See ``SVG_UNIT_*`` enumerated values for a list of available unit
values that can be used here.
This function can be called at any time before generating the SVG file.
However to minimize the risk of ambiguities it's recommended to call it
before any drawing operations have been performed on the given surface,
to make it clearer what the unit used in the drawing operations is.
The simplest way to do this is to call this function immediately after
creating the SVG surface.
Note if this function is never called, the default unit for SVG
documents generated by cairo will be "pt". This is for historical
reasons.
:param unit: SVG unit.
*New in cairo 1.16.*
*New in cairocffi 0.9.*
"""
cairo.cairo_svg_surface_set_document_unit(self._pointer, unit)
self._check_status()
def get_document_unit(self):
"""Get the unit of the SVG surface.
If the surface passed as an argument is not a SVG surface, the function
sets the error status to ``STATUS_SURFACE_TYPE_MISMATCH`` and
returns :data:`SVG_UNIT_USER`.
:return: The SVG unit of the SVG surface.
*New in cairo 1.16.*
*New in cairocffi 0.9.*
"""
unit = cairo.cairo_svg_surface_get_document_unit(self._pointer)
self._check_status()
return unit
@staticmethod
def get_versions():
"""Return the list of supported SVG versions.
See :meth:`restrict_to_version`.
:return: A list of :ref:`SVG_VERSION` strings.
"""
versions = ffi.new('cairo_svg_version_t const **')
num_versions = ffi.new('int *')
cairo.cairo_svg_get_versions(versions, num_versions)
versions = versions[0]
return [versions[i] for i in range(num_versions[0])]
@staticmethod
def version_to_string(version):
"""Return the string representation of the given :ref:`SVG_VERSION`.
See :meth:`get_versions` for a way to get
the list of valid version ids.
"""
c_string = cairo.cairo_svg_version_to_string(version)
if c_string == ffi.NULL:
raise ValueError(version)
return ffi.string(c_string).decode('ascii')
|
class SVGSurface(Surface):
'''Creates a SVG surface of the specified size in points
to be written to ``target``.
``target`` can be :obj:`None` to specify no output.
This will generate a surface that may be queried and used as a source,
without generating a temporary file.
The SVG surface backend recognizes the following MIME types
for the data attached to a surface (see :meth:`~Surface.set_mime_data`)
when it is used as a source pattern for drawing on this surface:
``image/png``,
``image/jpeg`` and
``text/x-uri``.
If any of them is specified, the SVG backend emits a href
with the content of MIME data instead of a surface snapshot
(PNG, Base64-encoded) in the corresponding image tag.
The unofficial MIME type ``text/x-uri`` is examined first.
If present, the URL is emitted as is:
assuring the correctness of URL is left to the client code.
If ``text/x-uri`` is not present,
but ``image/jpeg`` or ``image/png`` is specified,
the corresponding data is Base64-encoded and emitted.
:param target:
A filename,
a binary mode :term:`file object` with a ``write`` method,
or :obj:`None`.
:param width_in_points:
Width of the surface, in points (1 point == 1/72.0 inch)
:param height_in_points:
Height of the surface, in points (1 point == 1/72.0 inch)
:type width_in_points: float
:type height_in_points: float
'''
def __init__(self, target, width_in_points, height_in_points):
pass
def restrict_to_version(self, version):
'''Restricts the generated SVG file to ``version``.
See :meth:`get_versions` for a list of available version values
that can be used here.
This method should only be called
before any drawing operations have been performed on the given surface.
The simplest way to do this is to call this method
immediately after creating the surface.
:param version: A :ref:`SVG_VERSION` string.
'''
pass
def set_document_unit(self, unit):
'''Use specified unit for width and height of generated SVG file.
See ``SVG_UNIT_*`` enumerated values for a list of available unit
values that can be used here.
This function can be called at any time before generating the SVG file.
However to minimize the risk of ambiguities it's recommended to call it
before any drawing operations have been performed on the given surface,
to make it clearer what the unit used in the drawing operations is.
The simplest way to do this is to call this function immediately after
creating the SVG surface.
Note if this function is never called, the default unit for SVG
documents generated by cairo will be "pt". This is for historical
reasons.
:param unit: SVG unit.
*New in cairo 1.16.*
*New in cairocffi 0.9.*
'''
pass
def get_document_unit(self):
'''Get the unit of the SVG surface.
If the surface passed as an argument is not a SVG surface, the function
sets the error status to ``STATUS_SURFACE_TYPE_MISMATCH`` and
returns :data:`SVG_UNIT_USER`.
:return: The SVG unit of the SVG surface.
*New in cairo 1.16.*
*New in cairocffi 0.9.*
'''
pass
@staticmethod
def get_versions():
'''Return the list of supported SVG versions.
See :meth:`restrict_to_version`.
:return: A list of :ref:`SVG_VERSION` strings.
'''
pass
@staticmethod
def version_to_string(version):
'''Return the string representation of the given :ref:`SVG_VERSION`.
See :meth:`get_versions` for a way to get
the list of valid version ids.
'''
pass
| 9 | 6 | 16 | 4 | 5 | 7 | 1 | 2.12 | 1 | 2 | 0 | 0 | 4 | 0 | 6 | 31 | 138 | 32 | 34 | 15 | 25 | 72 | 29 | 13 | 22 | 2 | 2 | 1 | 8 |
143,723 |
Kozea/cairocffi
|
cairocffi/context.py
|
cairocffi.context.Context
|
class Context(object):
"""A :class:`Context` contains the current state of the rendering device,
including coordinates of yet to be drawn shapes.
Cairo contexts are central to cairo
and all drawing with cairo is always done to a :class:`Context` object.
:param target: The target :class:`Surface` object.
Cairo contexts can be used as Python :ref:`context managers <with>`.
See :meth:`save`.
"""
def __init__(self, target):
self._init_pointer(cairo.cairo_create(target._pointer))
def _init_pointer(self, pointer):
self._pointer = ffi.gc(pointer, _keepref(cairo, cairo.cairo_destroy))
self._check_status()
def _check_status(self):
_check_status(cairo.cairo_status(self._pointer))
@classmethod
def _from_pointer(cls, pointer, incref):
"""Wrap an existing ``cairo_t *`` cdata pointer.
:type incref: bool
:param incref:
Whether increase the :ref:`reference count <refcounting>` now.
:return:
A new :class:`Context` instance.
"""
if pointer == ffi.NULL:
raise ValueError('Null pointer')
if incref:
cairo.cairo_reference(pointer)
self = object.__new__(cls)
cls._init_pointer(self, pointer)
return self
def get_target(self):
"""Return this context’s target surface.
:returns:
An instance of :class:`Surface` or one of its sub-classes,
a new Python object referencing the existing cairo surface.
"""
return Surface._from_pointer(
cairo.cairo_get_target(self._pointer), incref=True)
#
# Save / restore
#
def save(self):
"""Makes a copy of the current state of this context
and saves it on an internal stack of saved states.
When :meth:`restore` is called,
the context will be restored to the saved state.
Multiple calls to :meth:`save` and :meth:`restore` can be nested;
each call to :meth:`restore` restores the state
from the matching paired :meth:`save`.
Instead of using :meth:`save` and :meth:`restore` directly,
it is recommended to use a :ref:`with statement <with>`::
with context:
do_something(context)
… which is equivalent to::
context.save()
try:
do_something(context)
finally:
context.restore()
"""
cairo.cairo_save(self._pointer)
self._check_status()
def restore(self):
"""Restores the context to the state saved
by a preceding call to :meth:`save`
and removes that state from the stack of saved states.
"""
cairo.cairo_restore(self._pointer)
self._check_status()
def __enter__(self):
self.save()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.restore()
#
# Groups
#
def push_group(self):
"""Temporarily redirects drawing to an intermediate surface
known as a group.
The redirection lasts until the group is completed
by a call to :meth:`pop_group` or :meth:`pop_group_to_source`.
These calls provide the result of any drawing
to the group as a pattern,
(either as an explicit object, or set as the source pattern).
This group functionality can be convenient
for performing intermediate compositing.
One common use of a group is to render objects
as opaque within the group, (so that they occlude each other),
and then blend the result with translucence onto the destination.
Groups can be nested arbitrarily deep
by making balanced calls to :meth:`push_group` / :meth:`pop_group`.
Each call pushes / pops the new target group onto / from a stack.
The :meth:`push_group` method calls :meth:`save`
so that any changes to the graphics state
will not be visible outside the group,
(the pop_group methods call :meth:`restore`).
By default the intermediate group will have
a content type of :obj:`COLOR_ALPHA <CONTENT_COLOR_ALPHA>`.
Other content types can be chosen for the group
by using :meth:`push_group_with_content` instead.
As an example,
here is how one might fill and stroke a path with translucence,
but without any portion of the fill being visible under the stroke::
context.push_group()
context.set_source(fill_pattern)
context.fill_preserve()
context.set_source(stroke_pattern)
context.stroke()
context.pop_group_to_source()
context.paint_with_alpha(alpha)
"""
cairo.cairo_push_group(self._pointer)
self._check_status()
def push_group_with_content(self, content):
"""Temporarily redirects drawing to an intermediate surface
known as a group.
The redirection lasts until the group is completed
by a call to :meth:`pop_group` or :meth:`pop_group_to_source`.
These calls provide the result of any drawing
to the group as a pattern,
(either as an explicit object, or set as the source pattern).
The group will have a content type of ``content``.
The ability to control this content type
is the only distinction between this method and :meth:`push_group`
which you should see for a more detailed description
of group rendering.
:param content: A :ref:`CONTENT` string.
"""
cairo.cairo_push_group_with_content(self._pointer, content)
self._check_status()
def pop_group(self):
"""Terminates the redirection begun by a call to :meth:`push_group`
or :meth:`push_group_with_content`
and returns a new pattern containing the results
of all drawing operations performed to the group.
The :meth:`pop_group` method calls :meth:`restore`,
(balancing a call to :meth:`save` by the push_group method),
so that any changes to the graphics state
will not be visible outside the group.
:returns:
A newly created :class:`SurfacePattern`
containing the results of all drawing operations
performed to the group.
"""
return Pattern._from_pointer(
cairo.cairo_pop_group(self._pointer), incref=False)
def pop_group_to_source(self):
"""Terminates the redirection begun by a call to :meth:`push_group`
or :meth:`push_group_with_content`
and installs the resulting pattern
as the source pattern in the given cairo context.
The behavior of this method is equivalent to::
context.set_source(context.pop_group())
"""
cairo.cairo_pop_group_to_source(self._pointer)
self._check_status()
def get_group_target(self):
"""Returns the current destination surface for the context.
This is either the original target surface
as passed to :class:`Context`
or the target surface for the current group as started
by the most recent call to :meth:`push_group`
or :meth:`push_group_with_content`.
"""
return Surface._from_pointer(
cairo.cairo_get_group_target(self._pointer), incref=True)
#
# Sources
#
def set_source_rgba(self, red, green, blue, alpha=1):
"""Sets the source pattern within this context to a solid color.
This color will then be used for any subsequent drawing operation
until a new source pattern is set.
The color and alpha components are
floating point numbers in the range 0 to 1.
If the values passed in are outside that range, they will be clamped.
The default source pattern is opaque black,
(that is, it is equivalent to ``context.set_source_rgba(0, 0, 0)``).
:param red: Red component of the color.
:param green: Green component of the color.
:param blue: Blue component of the color.
:param alpha:
Alpha component of the color.
1 (the default) is opaque, 0 fully transparent.
:type red: float
:type green: float
:type blue: float
:type alpha: float
"""
cairo.cairo_set_source_rgba(self._pointer, red, green, blue, alpha)
self._check_status()
def set_source_rgb(self, red, green, blue):
"""Same as :meth:`set_source_rgba` with alpha always 1.
Exists for compatibility with pycairo.
"""
cairo.cairo_set_source_rgb(self._pointer, red, green, blue)
self._check_status()
def set_source_surface(self, surface, x=0, y=0):
"""This is a convenience method for creating a pattern from surface
and setting it as the source in this context with :meth:`set_source`.
The ``x`` and ``y`` parameters give the user-space coordinate
at which the surface origin should appear.
(The surface origin is its upper-left corner
before any transformation has been applied.)
The ``x`` and ``y`` parameters are negated
and then set as translation values in the pattern matrix.
Other than the initial translation pattern matrix, as described above,
all other pattern attributes, (such as its extend mode),
are set to the default values as in :class:`SurfacePattern`.
The resulting pattern can be queried with :meth:`get_source`
so that these attributes can be modified if desired,
(eg. to create a repeating pattern with :meth:`Pattern.set_extend`).
:param surface:
A :class:`Surface` to be used to set the source pattern.
:param x: User-space X coordinate for surface origin.
:param y: User-space Y coordinate for surface origin.
:type x: float
:type y: float
"""
cairo.cairo_set_source_surface(self._pointer, surface._pointer, x, y)
self._check_status()
def set_source(self, source):
"""Sets the source pattern within this context to ``source``.
This pattern will then be used for any subsequent drawing operation
until a new source pattern is set.
.. note::
The pattern's transformation matrix will be locked
to the user space in effect at the time of :meth:`set_source`.
This means that further modifications
of the current transformation matrix
will not affect the source pattern.
See :meth:`Pattern.set_matrix`.
The default source pattern is opaque black,
(that is, it is equivalent to ``context.set_source_rgba(0, 0, 0)``).
:param source:
A :class:`Pattern` to be used
as the source for subsequent drawing operations.
"""
cairo.cairo_set_source(self._pointer, source._pointer)
self._check_status()
def get_source(self):
"""Return this context’s source.
:returns:
An instance of :class:`Pattern` or one of its sub-classes,
a new Python object referencing the existing cairo pattern.
"""
return Pattern._from_pointer(
cairo.cairo_get_source(self._pointer), incref=True)
#
# Context parameters
#
def set_antialias(self, antialias):
"""Set the :ref:`ANTIALIAS` of the rasterizer used for drawing shapes.
This value is a hint,
and a particular backend may or may not support a particular value.
At the current time,
no backend supports :obj:`SUBPIXEL <ANTIALIAS_SUBPIXEL>`
when drawing shapes.
Note that this option does not affect text rendering,
instead see :meth:`FontOptions.set_antialias`.
:param antialias: An :ref:`ANTIALIAS` string.
"""
cairo.cairo_set_antialias(self._pointer, antialias)
self._check_status()
def get_antialias(self):
"""Return the :ref:`ANTIALIAS` string."""
return cairo.cairo_get_antialias(self._pointer)
def set_dash(self, dashes, offset=0):
"""Sets the dash pattern to be used by :meth:`stroke`.
A dash pattern is specified by dashes, a list of positive values.
Each value provides the length of alternate "on" and "off"
portions of the stroke.
``offset`` specifies an offset into the pattern
at which the stroke begins.
Each "on" segment will have caps applied
as if the segment were a separate sub-path.
In particular, it is valid to use an "on" length of 0
with :obj:`LINE_CAP_ROUND` or :obj:`LINE_CAP_SQUARE`
in order to distributed dots or squares along a path.
Note: The length values are in user-space units
as evaluated at the time of stroking.
This is not necessarily the same as the user space
at the time of :meth:`set_dash`.
If ``dashes`` is empty dashing is disabled.
If it is of length 1 a symmetric pattern is assumed
with alternating on and off portions of the size specified
by the single value.
:param dashes:
A list of floats specifying alternate lengths
of on and off stroke portions.
:type offset: float
:param offset:
An offset into the dash pattern at which the stroke should start.
:raises:
:exc:`CairoError`
if any value in dashes is negative,
or if all values are 0.
The context will be put into an error state.
"""
cairo.cairo_set_dash(
self._pointer, ffi.new('double[]', dashes), len(dashes), offset)
self._check_status()
def get_dash(self):
"""Return the current dash pattern.
:returns:
A ``(dashes, offset)`` tuple of a list and a float.
``dashes`` is a list of floats,
empty if no dashing is in effect.
"""
dashes = ffi.new('double[]', cairo.cairo_get_dash_count(self._pointer))
offset = ffi.new('double *')
cairo.cairo_get_dash(self._pointer, dashes, offset)
self._check_status()
return list(dashes), offset[0]
def get_dash_count(self):
"""Same as ``len(context.get_dash()[0])``."""
# Not really useful with get_dash() returning a list,
# but retained for compatibility with pycairo.
return cairo.cairo_get_dash_count(self._pointer)
def set_fill_rule(self, fill_rule):
"""Set the current :ref:`FILL_RULE` within the cairo context.
The fill rule is used to determine which regions are inside
or outside a complex (potentially self-intersecting) path.
The current fill rule affects both :meth:`fill` and :meth:`clip`.
The default fill rule is :obj:`WINDING <FILL_RULE_WINDING>`.
:param fill_rule: A :ref:`FILL_RULE` string.
"""
cairo.cairo_set_fill_rule(self._pointer, fill_rule)
self._check_status()
def get_fill_rule(self):
"""Return the current :ref:`FILL_RULE` string."""
return cairo.cairo_get_fill_rule(self._pointer)
def set_line_cap(self, line_cap):
"""Set the current :ref:`LINE_CAP` within the cairo context.
As with the other stroke parameters,
the current line cap style is examined by
:meth:`stroke` and :meth:`stroke_extents`,
but does not have any effect during path construction.
The default line cap is :obj:`BUTT <LINE_CAP_BUTT>`.
:param line_cap: A :ref:`LINE_CAP` string.
"""
cairo.cairo_set_line_cap(self._pointer, line_cap)
self._check_status()
def get_line_cap(self):
"""Return the current :ref:`LINE_CAP` string."""
return cairo.cairo_get_line_cap(self._pointer)
def set_line_join(self, line_join):
"""Set the current :ref:`LINE_JOIN` within the cairo context.
As with the other stroke parameters,
the current line cap style is examined by
:meth:`stroke` and :meth:`stroke_extents`,
but does not have any effect during path construction.
The default line cap is :obj:`MITER <LINE_JOIN_MITER>`.
:param line_join: A :ref:`LINE_JOIN` string.
"""
cairo.cairo_set_line_join(self._pointer, line_join)
self._check_status()
def get_line_join(self):
"""Return the current :ref:`LINE_JOIN` string."""
return cairo.cairo_get_line_join(self._pointer)
def set_line_width(self, width):
"""Sets the current line width within the cairo context.
The line width value specifies the diameter of a pen
that is circular in user space,
(though device-space pen may be an ellipse in general
due to scaling / shear / rotation of the CTM).
.. note::
When the description above refers to user space and CTM
it refers to the user space and CTM in effect
at the time of the stroking operation,
not the user space and CTM in effect
at the time of the call to :meth:`set_line_width`.
The simplest usage makes both of these spaces identical.
That is, if there is no change to the CTM
between a call to :meth:`set_line_width`
and the stroking operation,
then one can just pass user-space values to :meth:`set_line_width`
and ignore this note.
As with the other stroke parameters,
the current line cap style is examined by
:meth:`stroke` and :meth:`stroke_extents`,
but does not have any effect during path construction.
The default line width value is 2.0.
:type width: float
:param width: The new line width.
"""
cairo.cairo_set_line_width(self._pointer, width)
self._check_status()
def get_line_width(self):
"""Return the current line width as a float."""
return cairo.cairo_get_line_width(self._pointer)
def set_miter_limit(self, limit):
"""Sets the current miter limit within the cairo context.
If the current line join style is set to :obj:`MITER <LINE_JOIN_MITER>`
(see :meth:`set_line_join`),
the miter limit is used to determine
whether the lines should be joined with a bevel instead of a miter.
Cairo divides the length of the miter by the line width.
If the result is greater than the miter limit,
the style is converted to a bevel.
As with the other stroke parameters,
the current line cap style is examined by
:meth:`stroke` and :meth:`stroke_extents`,
but does not have any effect during path construction.
The default miter limit value is 10.0,
which will convert joins with interior angles less than 11 degrees
to bevels instead of miters.
For reference,
a miter limit of 2.0 makes the miter cutoff at 60 degrees,
and a miter limit of 1.414 makes the cutoff at 90 degrees.
A miter limit for a desired angle can be computed as:
``miter_limit = 1. / sin(angle / 2.)``
:param limit: The miter limit to set.
:type limit: float
"""
cairo.cairo_set_miter_limit(self._pointer, limit)
self._check_status()
def get_miter_limit(self):
"""Return the current miter limit as a float."""
return cairo.cairo_get_miter_limit(self._pointer)
def set_operator(self, operator):
"""Set the current :ref:`OPERATOR`
to be used for all drawing operations.
The default operator is :obj:`OVER <OPERATOR_OVER>`.
:param operator: A :ref:`OPERATOR` string.
"""
cairo.cairo_set_operator(self._pointer, operator)
self._check_status()
def get_operator(self):
"""Return the current :ref:`OPERATOR` string."""
return cairo.cairo_get_operator(self._pointer)
def set_tolerance(self, tolerance):
"""Sets the tolerance used when converting paths into trapezoids.
Curved segments of the path will be subdivided
until the maximum deviation between the original path
and the polygonal approximation is less than tolerance.
The default value is 0.1.
A larger value will give better performance,
a smaller value, better appearance.
(Reducing the value from the default value of 0.1
is unlikely to improve appearance significantly.)
The accuracy of paths within Cairo is limited
by the precision of its internal arithmetic,
and the prescribed tolerance is restricted
to the smallest representable internal value.
:type tolerance: float
:param tolerance: The tolerance, in device units (typically pixels)
"""
cairo.cairo_set_tolerance(self._pointer, tolerance)
self._check_status()
def get_tolerance(self):
"""Return the current tolerance as a float."""
return cairo.cairo_get_tolerance(self._pointer)
#
# CTM: Current transformation matrix
#
def translate(self, tx, ty):
"""Modifies the current transformation matrix (CTM)
by translating the user-space origin by ``(tx, ty)``.
This offset is interpreted as a user-space coordinate
according to the CTM in place before the new call to :meth:`translate`.
In other words, the translation of the user-space origin takes place
after any existing transformation.
:param tx: Amount to translate in the X direction
:param ty: Amount to translate in the Y direction
:type tx: float
:type ty: float
"""
cairo.cairo_translate(self._pointer, tx, ty)
self._check_status()
def scale(self, sx, sy=None):
"""Modifies the current transformation matrix (CTM)
by scaling the X and Y user-space axes
by ``sx`` and ``sy`` respectively.
The scaling of the axes takes place after
any existing transformation of user space.
If ``sy`` is omitted, it is the same as ``sx``
so that scaling preserves aspect ratios.
:param sx: Scale factor in the X direction.
:param sy: Scale factor in the Y direction.
:type sx: float
:type sy: float
"""
if sy is None:
sy = sx
cairo.cairo_scale(self._pointer, sx, sy)
self._check_status()
def rotate(self, radians):
"""Modifies the current transformation matrix (CTM)
by rotating the user-space axes by angle ``radians``.
The rotation of the axes takes places
after any existing transformation of user space.
:type radians: float
:param radians:
Angle of rotation, in radians.
The direction of rotation is defined such that positive angles
rotate in the direction from the positive X axis
toward the positive Y axis.
With the default axis orientation of cairo,
positive angles rotate in a clockwise direction.
"""
cairo.cairo_rotate(self._pointer, radians)
self._check_status()
def transform(self, matrix):
"""Modifies the current transformation matrix (CTM)
by applying ``matrix`` as an additional transformation.
The new transformation of user space takes place
after any existing transformation.
:param matrix:
A transformation :class:`Matrix`
to be applied to the user-space axes.
"""
cairo.cairo_transform(self._pointer, matrix._pointer)
self._check_status()
def set_matrix(self, matrix):
"""Modifies the current transformation matrix (CTM)
by setting it equal to ``matrix``.
:param matrix:
A transformation :class:`Matrix` from user space to device space.
"""
cairo.cairo_set_matrix(self._pointer, matrix._pointer)
self._check_status()
def get_matrix(self):
"""Return a copy of the current transformation matrix (CTM)."""
matrix = Matrix()
cairo.cairo_get_matrix(self._pointer, matrix._pointer)
self._check_status()
return matrix
def identity_matrix(self):
"""Resets the current transformation matrix (CTM)
by setting it equal to the identity matrix.
That is, the user-space and device-space axes will be aligned
and one user-space unit will transform to one device-space unit.
"""
cairo.cairo_identity_matrix(self._pointer)
self._check_status()
def user_to_device(self, x, y):
"""Transform a coordinate from user space to device space
by multiplying the given point
by the current transformation matrix (CTM).
:param x: X position.
:param y: Y position.
:type x: float
:type y: float
:returns: A ``(device_x, device_y)`` tuple of floats.
"""
xy = ffi.new('double[2]', [x, y])
cairo.cairo_user_to_device(self._pointer, xy + 0, xy + 1)
self._check_status()
return tuple(xy)
def user_to_device_distance(self, dx, dy):
"""Transform a distance vector from user space to device space.
This method is similar to :meth:`Context.user_to_device`
except that the translation components of the CTM
will be ignored when transforming ``(dx, dy)``.
:param dx: X component of a distance vector.
:param dy: Y component of a distance vector.
:type x: float
:type y: float
:returns: A ``(device_dx, device_dy)`` tuple of floats.
"""
xy = ffi.new('double[2]', [dx, dy])
cairo.cairo_user_to_device_distance(self._pointer, xy + 0, xy + 1)
self._check_status()
return tuple(xy)
def device_to_user(self, x, y):
"""Transform a coordinate from device space to user space
by multiplying the given point
by the inverse of the current transformation matrix (CTM).
:param x: X position.
:param y: Y position.
:type x: float
:type y: float
:returns: A ``(user_x, user_y)`` tuple of floats.
"""
xy = ffi.new('double[2]', [x, y])
cairo.cairo_device_to_user(self._pointer, xy + 0, xy + 1)
self._check_status()
return tuple(xy)
def device_to_user_distance(self, dx, dy):
"""Transform a distance vector from device space to user space.
This method is similar to :meth:`Context.device_to_user`
except that the translation components of the inverse CTM
will be ignored when transforming ``(dx, dy)``.
:param dx: X component of a distance vector.
:param dy: Y component of a distance vector.
:type x: float
:type y: float
:returns: A ``(user_dx, user_dy)`` tuple of floats.
"""
xy = ffi.new('double[2]', [dx, dy])
cairo.cairo_device_to_user_distance(self._pointer, xy + 0, xy + 1)
self._check_status()
return tuple(xy)
#
# Path
#
def has_current_point(self):
"""Returns whether a current point is defined on the current path.
See :meth:`get_current_point`.
"""
return bool(cairo.cairo_has_current_point(self._pointer))
def get_current_point(self):
"""Return the current point of the current path,
which is conceptually the final point reached by the path so far.
The current point is returned in the user-space coordinate system.
If there is no defined current point
or if the context is in an error status,
``(0, 0)`` is returned.
It is possible to check this in advance with :meth:`has_current_point`.
Most path construction methods alter the current point.
See the following for details on how they affect the current point:
:meth:`new_path`,
:meth:`new_sub_path`,
:meth:`append_path`,
:meth:`close_path`,
:meth:`move_to`,
:meth:`line_to`,
:meth:`curve_to`,
:meth:`rel_move_to`,
:meth:`rel_line_to`,
:meth:`rel_curve_to`,
:meth:`arc`,
:meth:`arc_negative`,
:meth:`rectangle`,
:meth:`text_path`,
:meth:`glyph_path`.
Some methods use and alter the current point
but do not otherwise change current path:
:meth:`show_text`,
:meth:`show_glyphs`,
:meth:`show_text_glyphs`.
Some methods unset the current path and as a result, current point:
:meth:`fill`,
:meth:`stroke`.
:returns:
A ``(x, y)`` tuple of floats, the coordinates of the current point.
"""
# I’d prefer returning None if self.has_current_point() is False
# But keep (0, 0) for compat with pycairo.
xy = ffi.new('double[2]')
cairo.cairo_get_current_point(self._pointer, xy + 0, xy + 1)
self._check_status()
return tuple(xy)
def new_path(self):
""" Clears the current path.
After this call there will be no path and no current point.
"""
cairo.cairo_new_path(self._pointer)
self._check_status()
def new_sub_path(self):
"""Begin a new sub-path.
Note that the existing path is not affected.
After this call there will be no current point.
In many cases, this call is not needed
since new sub-paths are frequently started with :meth:`move_to`.
A call to :meth:`new_sub_path` is particularly useful
when beginning a new sub-path with one of the :meth:`arc` calls.
This makes things easier as it is no longer necessary
to manually compute the arc's initial coordinates
for a call to :meth:`move_to`.
"""
cairo.cairo_new_sub_path(self._pointer)
self._check_status()
def move_to(self, x, y):
"""Begin a new sub-path.
After this call the current point will be ``(x, y)``.
:param x: X position of the new point.
:param y: Y position of the new point.
:type float: x
:type float: y
"""
cairo.cairo_move_to(self._pointer, x, y)
self._check_status()
def rel_move_to(self, dx, dy):
"""Begin a new sub-path.
After this call the current point will be offset by ``(dx, dy)``.
Given a current point of ``(x, y)``,
``context.rel_move_to(dx, dy)`` is logically equivalent to
``context.move_to(x + dx, y + dy)``.
:param dx: The X offset.
:param dy: The Y offset.
:type float: dx
:type float: dy
:raises:
:exc:`CairoError` if there is no current point.
Doing so will cause leave the context in an error state.
"""
cairo.cairo_rel_move_to(self._pointer, dx, dy)
self._check_status()
def line_to(self, x, y):
"""Adds a line to the path from the current point
to position ``(x, y)`` in user-space coordinates.
After this call the current point will be ``(x, y)``.
If there is no current point before the call to :meth:`line_to`
this method will behave as ``context.move_to(x, y)``.
:param x: X coordinate of the end of the new line.
:param y: Y coordinate of the end of the new line.
:type float: x
:type float: y
"""
cairo.cairo_line_to(self._pointer, x, y)
self._check_status()
def rel_line_to(self, dx, dy):
""" Relative-coordinate version of :meth:`line_to`.
Adds a line to the path from the current point
to a point that is offset from the current point
by ``(dx, dy)`` in user space.
After this call the current point will be offset by ``(dx, dy)``.
Given a current point of ``(x, y)``,
``context.rel_line_to(dx, dy)`` is logically equivalent to
``context.line_to(x + dx, y + dy)``.
:param dx: The X offset to the end of the new line.
:param dy: The Y offset to the end of the new line.
:type float: dx
:type float: dy
:raises:
:exc:`CairoError` if there is no current point.
Doing so will cause leave the context in an error state.
"""
cairo.cairo_rel_line_to(self._pointer, dx, dy)
self._check_status()
def rectangle(self, x, y, width, height):
"""Adds a closed sub-path rectangle
of the given size to the current path
at position ``(x, y)`` in user-space coordinates.
This method is logically equivalent to::
context.move_to(x, y)
context.rel_line_to(width, 0)
context.rel_line_to(0, height)
context.rel_line_to(-width, 0)
context.close_path()
:param x: The X coordinate of the top left corner of the rectangle.
:param y: The Y coordinate of the top left corner of the rectangle.
:param width: Width of the rectangle.
:param height: Height of the rectangle.
:type float: x
:type float: y
:type float: width
:type float: heigth
"""
cairo.cairo_rectangle(self._pointer, x, y, width, height)
self._check_status()
def arc(self, xc, yc, radius, angle1, angle2):
"""Adds a circular arc of the given radius to the current path.
The arc is centered at ``(xc, yc)``,
begins at ``angle1``
and proceeds in the direction of increasing angles
to end at ``angle2``.
If ``angle2`` is less than ``angle1``
it will be progressively increased by ``2 * pi``
until it is greater than ``angle1``.
If there is a current point,
an initial line segment will be added to the path
to connect the current point to the beginning of the arc.
If this initial line is undesired,
it can be avoided by calling :meth:`new_sub_path`
before calling :meth:`arc`.
Angles are measured in radians.
An angle of 0 is in the direction of the positive X axis
(in user space).
An angle of ``pi / 2`` radians (90 degrees)
is in the direction of the positive Y axis (in user space).
Angles increase in the direction from the positive X axis
toward the positive Y axis.
So with the default transformation matrix,
angles increase in a clockwise direction.
(To convert from degrees to radians, use ``degrees * pi / 180``.)
This method gives the arc in the direction of increasing angles;
see :meth:`arc_negative` to get the arc
in the direction of decreasing angles.
The arc is circular in user space.
To achieve an elliptical arc,
you can scale the current transformation matrix
by different amounts in the X and Y directions.
For example, to draw an ellipse in the box
given by x, y, width, height::
from math import pi
with context:
context.translate(x + width / 2., y + height / 2.)
context.scale(width / 2., height / 2.)
context.arc(0, 0, 1, 0, 2 * pi)
:param xc: X position of the center of the arc.
:param yc: Y position of the center of the arc.
:param radius: The radius of the arc.
:param angle1: The start angle, in radians.
:param angle2: The end angle, in radians.
:type xc: float
:type yc: float
:type radius: float
:type angle1: float
:type angle2: float
"""
cairo.cairo_arc(self._pointer, xc, yc, radius, angle1, angle2)
self._check_status()
def arc_negative(self, xc, yc, radius, angle1, angle2):
"""Adds a circular arc of the given radius to the current path.
The arc is centered at ``(xc, yc)``,
begins at ``angle1``
and proceeds in the direction of decreasing angles
to end at ``angle2``.
If ``angle2`` is greater than ``angle1``
it will be progressively decreased by ``2 * pi``
until it is greater than ``angle1``.
See :meth:`arc` for more details.
This method differs only in
the direction of the arc between the two angles.
:param xc: X position of the center of the arc.
:param yc: Y position of the center of the arc.
:param radius: The radius of the arc.
:param angle1: The start angle, in radians.
:param angle2: The end angle, in radians.
:type xc: float
:type yc: float
:type radius: float
:type angle1: float
:type angle2: float
"""
cairo.cairo_arc_negative(self._pointer, xc, yc, radius, angle1, angle2)
self._check_status()
def curve_to(self, x1, y1, x2, y2, x3, y3):
"""Adds a cubic Bézier spline to the path
from the current point
to position ``(x3, y3)`` in user-space coordinates,
using ``(x1, y1)`` and ``(x2, y2)`` as the control points.
After this call the current point will be ``(x3, y3)``.
If there is no current point before the call to :meth:`curve_to`
this method will behave as if preceded by
a call to ``context.move_to(x1, y1)``.
:param x1: The X coordinate of the first control point.
:param y1: The Y coordinate of the first control point.
:param x2: The X coordinate of the second control point.
:param y2: The Y coordinate of the second control point.
:param x3: The X coordinate of the end of the curve.
:param y3: The Y coordinate of the end of the curve.
:type x1: float
:type y1: float
:type x2: float
:type y2: float
:type x3: float
:type y3: float
"""
cairo.cairo_curve_to(self._pointer, x1, y1, x2, y2, x3, y3)
self._check_status()
def rel_curve_to(self, dx1, dy1, dx2, dy2, dx3, dy3):
""" Relative-coordinate version of :meth:`curve_to`.
All offsets are relative to the current point.
Adds a cubic Bézier spline to the path from the current point
to a point offset from the current point by ``(dx3, dy3)``,
using points offset by ``(dx1, dy1)`` and ``(dx2, dy2)``
as the control points.
After this call the current point will be offset by ``(dx3, dy3)``.
Given a current point of ``(x, y)``,
``context.rel_curve_to(dx1, dy1, dx2, dy2, dx3, dy3)``
is logically equivalent to
``context.curve_to(x+dx1, y+dy1, x+dx2, y+dy2, x+dx3, y+dy3)``.
:param dx1: The X offset to the first control point.
:param dy1: The Y offset to the first control point.
:param dx2: The X offset to the second control point.
:param dy2: The Y offset to the second control point.
:param dx3: The X offset to the end of the curve.
:param dy3: The Y offset to the end of the curve.
:type dx1: float
:type dy1: float
:type dx2: float
:type dy2: float
:type dx3: float
:type dy3: float
:raises:
:exc:`CairoError` if there is no current point.
Doing so will cause leave the context in an error state.
"""
cairo.cairo_rel_curve_to(self._pointer, dx1, dy1, dx2, dy2, dx3, dy3)
self._check_status()
def text_path(self, text):
"""Adds closed paths for text to the current path.
The generated path if filled,
achieves an effect similar to that of :meth:`show_text`.
Text conversion and positioning is done similar to :meth:`show_text`.
Like :meth:`show_text`,
after this call the current point is moved to the origin of where
the next glyph would be placed in this same progression.
That is, the current point will be at the origin of the final glyph
offset by its advance values.
This allows for chaining multiple calls to to :meth:`text_path`
without having to set current point in between.
:param text: The text to show, as an Unicode or UTF-8 string.
.. note::
The :meth:`text_path` method is part of
what the cairo designers call the "toy" text API.
It is convenient for short demos and simple programs,
but it is not expected to be adequate
for serious text-using applications.
See :ref:`fonts` for details,
and :meth:`glyph_path` for the "real" text path API in cairo.
"""
cairo.cairo_text_path(self._pointer, _encode_string(text))
self._check_status()
def glyph_path(self, glyphs):
"""Adds closed paths for the glyphs to the current path.
The generated path if filled,
achieves an effect similar to that of :meth:`show_glyphs`.
:param glyphs:
The glyphs to show.
See :meth:`show_text_glyphs` for the data structure.
"""
glyphs = ffi.new('cairo_glyph_t[]', glyphs)
cairo.cairo_glyph_path(self._pointer, glyphs, len(glyphs))
self._check_status()
def close_path(self):
"""Adds a line segment to the path
from the current point
to the beginning of the current sub-path,
(the most recent point passed to cairo_move_to()),
and closes this sub-path.
After this call the current point will be
at the joined endpoint of the sub-path.
The behavior of :meth:`close_path` is distinct
from simply calling :meth:`line_to` with the equivalent coordinate
in the case of stroking.
When a closed sub-path is stroked,
there are no caps on the ends of the sub-path.
Instead, there is a line join
connecting the final and initial segments of the sub-path.
If there is no current point before the call to :meth:`close_path`,
this method will have no effect.
"""
cairo.cairo_close_path(self._pointer)
self._check_status()
def copy_path(self):
"""Return a copy of the current path.
:returns:
A list of ``(path_operation, coordinates)`` tuples
of a :ref:`PATH_OPERATION` string
and a tuple of floats coordinates
whose content depends on the operation type:
* :obj:`MOVE_TO <PATH_MOVE_TO>`: 1 point ``(x, y)``
* :obj:`LINE_TO <PATH_LINE_TO>`: 1 point ``(x, y)``
* :obj:`CURVE_TO <PATH_CURVE_TO>`: 3 points
``(x1, y1, x2, y2, x3, y3)``
* :obj:`CLOSE_PATH <PATH_CLOSE_PATH>` 0 points ``()`` (empty tuple)
"""
path = cairo.cairo_copy_path(self._pointer)
result = list(_iter_path(path))
cairo.cairo_path_destroy(path)
return result
def copy_path_flat(self):
"""Return a flattened copy of the current path
This method is like :meth:`copy_path`
except that any curves in the path will be approximated
with piecewise-linear approximations,
(accurate to within the current tolerance value,
see :meth:`set_tolerance`).
That is,
the result is guaranteed to not have any elements
of type :obj:`CURVE_TO <PATH_CURVE_TO>`
which will instead be replaced by
a series of :obj:`LINE_TO <PATH_LINE_TO>` elements.
:returns:
A list of ``(path_operation, coordinates)`` tuples.
See :meth:`copy_path` for the data structure.
"""
path = cairo.cairo_copy_path_flat(self._pointer)
result = list(_iter_path(path))
cairo.cairo_path_destroy(path)
return result
def append_path(self, path):
"""Append ``path`` onto the current path.
The path may be either the return value from one of :meth:`copy_path`
or :meth:`copy_path_flat` or it may be constructed manually.
:param path:
An iterable of tuples
in the same format as returned by :meth:`copy_path`.
"""
# Both objects need to stay alive
# until after cairo.cairo_append_path() is finished, but not after.
path, _ = _encode_path(path)
cairo.cairo_append_path(self._pointer, path)
self._check_status()
def path_extents(self):
"""Computes a bounding box in user-space coordinates
covering the points on the current path.
If the current path is empty,
returns an empty rectangle ``(0, 0, 0, 0)``.
Stroke parameters, fill rule, surface dimensions and clipping
are not taken into account.
Contrast with :meth:`fill_extents` and :meth:`stroke_extents`
which return the extents of only the area that would be "inked"
by the corresponding drawing operations.
The result of :meth:`path_extents`
is defined as equivalent to the limit of :meth:`stroke_extents`
with :obj:`LINE_CAP_ROUND` as the line width approaches 0,
(but never reaching the empty-rectangle
returned by :meth:`stroke_extents` for a line width of 0).
Specifically, this means that zero-area sub-paths
such as :meth:`move_to`; :meth:`line_to()` segments,
(even degenerate cases
where the coordinates to both calls are identical),
will be considered as contributing to the extents.
However, a lone :meth:`move_to` will not contribute
to the results of :meth:`path_extents`.
:return:
A ``(x1, y1, x2, y2)`` tuple of floats:
the left, top, right and bottom of the resulting extents,
respectively.
"""
extents = ffi.new('double[4]')
cairo.cairo_path_extents(
self._pointer, extents + 0, extents + 1, extents + 2, extents + 3)
self._check_status()
return tuple(extents)
#
# Drawing operators
#
def paint(self):
"""A drawing operator that paints the current source everywhere
within the current clip region.
"""
cairo.cairo_paint(self._pointer)
self._check_status()
def paint_with_alpha(self, alpha):
"""A drawing operator that paints the current source everywhere
within the current clip region
using a mask of constant alpha value alpha.
The effect is similar to :meth:`paint`,
but the drawing is faded out using the ``alpha`` value.
:type alpha: float
:param alpha: Alpha value, between 0 (transparent) and 1 (opaque).
"""
cairo.cairo_paint_with_alpha(self._pointer, alpha)
self._check_status()
def mask(self, pattern):
"""A drawing operator that paints the current source
using the alpha channel of ``pattern`` as a mask.
(Opaque areas of ``pattern`` are painted with the source,
transparent areas are not painted.)
:param pattern: A :class:`Pattern` object.
"""
cairo.cairo_mask(self._pointer, pattern._pointer)
self._check_status()
def mask_surface(self, surface, surface_x=0, surface_y=0):
"""A drawing operator that paints the current source
using the alpha channel of ``surface`` as a mask.
(Opaque areas of ``surface`` are painted with the source,
transparent areas are not painted.)
:param pattern: A :class:`Surface` object.
:param surface_x: X coordinate at which to place the origin of surface.
:param surface_y: Y coordinate at which to place the origin of surface.
:type surface_x: float
:type surface_y: float
"""
cairo.cairo_mask_surface(
self._pointer, surface._pointer, surface_x, surface_y)
self._check_status()
def fill(self):
"""A drawing operator that fills the current path
according to the current fill rule,
(each sub-path is implicitly closed before being filled).
After :meth:`fill`,
the current path will be cleared from the cairo context.
See :meth:`set_fill_rule` and :meth:`fill_preserve`.
"""
cairo.cairo_fill(self._pointer)
self._check_status()
def fill_preserve(self):
"""A drawing operator that fills the current path
according to the current fill rule,
(each sub-path is implicitly closed before being filled).
Unlike :meth:`fill`,
:meth:`fill_preserve` preserves the path within the cairo context.
See :meth:`set_fill_rule` and :meth:`fill`.
"""
cairo.cairo_fill_preserve(self._pointer)
self._check_status()
def fill_extents(self):
"""Computes a bounding box in user-space coordinates
covering the area that would be affected, (the "inked" area),
by a :meth:`fill` operation given the current path and fill parameters.
If the current path is empty,
returns an empty rectangle ``(0, 0, 0, 0)``.
Surface dimensions and clipping are not taken into account.
Contrast with :meth:`path_extents` which is similar,
but returns non-zero extents for some paths with no inked area,
(such as a simple line segment).
Note that :meth:`fill_extents` must necessarily do more work
to compute the precise inked areas in light of the fill rule,
so :meth:`path_extents` may be more desirable for sake of performance
if the non-inked path extents are desired.
See :meth:`fill`, :meth:`set_fill_rule` and :meth:`fill_preserve`.
:return:
A ``(x1, y1, x2, y2)`` tuple of floats:
the left, top, right and bottom of the resulting extents,
respectively.
"""
extents = ffi.new('double[4]')
cairo.cairo_fill_extents(
self._pointer, extents + 0, extents + 1, extents + 2, extents + 3)
self._check_status()
return tuple(extents)
def in_fill(self, x, y):
"""Tests whether the given point is inside the area
that would be affected by a :meth:`fill` operation
given the current path and filling parameters.
Surface dimensions and clipping are not taken into account.
See :meth:`fill`, :meth:`set_fill_rule` and :meth:`fill_preserve`.
:param x: X coordinate of the point to test
:param y: Y coordinate of the point to test
:type x: float
:type y: float
:returns: A boolean.
"""
return bool(cairo.cairo_in_fill(self._pointer, x, y))
def stroke(self):
"""A drawing operator that strokes the current path
according to the current line width, line join, line cap,
and dash settings.
After :meth:`stroke`,
the current path will be cleared from the cairo context.
See :meth:`set_line_width`, :meth:`set_line_join`,
:meth:`set_line_cap`, :meth:`set_dash`, and :meth:`stroke_preserve`.
Note: Degenerate segments and sub-paths are treated specially
and provide a useful result.
These can result in two different situations:
1. Zero-length "on" segments set in :meth:`set_dash`.
If the cap style is :obj:`ROUND <LINE_CAP_ROUND>`
or :obj:`SQUARE <LINE_CAP_SQUARE>`
then these segments will be drawn
as circular dots or squares respectively.
In the case of :obj:`SQUARE <LINE_CAP_SQUARE>`,
the orientation of the squares is determined
by the direction of the underlying path.
2. A sub-path created by :meth:`move_to` followed
by either a :meth:`close_path`
or one or more calls to :meth:`line_to`
to the same coordinate as the :meth:`move_to`.
If the cap style is :obj:`ROUND <LINE_CAP_ROUND>`
then these sub-paths will be drawn as circular dots.
Note that in the case of :obj:`SQUARE <LINE_CAP_SQUARE>`
a degenerate sub-path will not be drawn at all,
(since the correct orientation is indeterminate).
In no case will a cap style of :obj:`BUTT <LINE_CAP_BUTT>`
cause anything to be drawn
in the case of either degenerate segments or sub-paths.
"""
cairo.cairo_stroke(self._pointer)
self._check_status()
def stroke_preserve(self):
"""A drawing operator that strokes the current path
according to the current line width, line join, line cap,
and dash settings.
Unlike :meth:`stroke`,
:meth:`stroke_preserve` preserves the path within the cairo context.
See :meth:`set_line_width`, :meth:`set_line_join`,
:meth:`set_line_cap`, :meth:`set_dash`, and :meth:`stroke`.
"""
cairo.cairo_stroke_preserve(self._pointer)
self._check_status()
def stroke_extents(self):
"""Computes a bounding box in user-space coordinates
covering the area that would be affected, (the "inked" area),
by a :meth:`stroke` operation given the current path
and stroke parameters.
If the current path is empty,
returns an empty rectangle ``(0, 0, 0, 0)``.
Surface dimensions and clipping are not taken into account.
Note that if the line width is set to exactly zero,
then :meth:`stroke_extents` will return an empty rectangle.
Contrast with :meth:`path_extents`
which can be used to compute the non-empty bounds
as the line width approaches zero.
Note that :meth:`stroke_extents` must necessarily do more work
to compute the precise inked areas in light of the stroke parameters,
so :meth:`path_extents` may be more desirable for sake of performance
if the non-inked path extents are desired.
See :meth:`stroke`, :meth:`set_line_width`, :meth:`set_line_join`,
:meth:`set_line_cap`, :meth:`set_dash`, and :meth:`stroke_preserve`.
:return:
A ``(x1, y1, x2, y2)`` tuple of floats:
the left, top, right and bottom of the resulting extents,
respectively.
"""
extents = ffi.new('double[4]')
cairo.cairo_stroke_extents(
self._pointer, extents + 0, extents + 1, extents + 2, extents + 3)
self._check_status()
return tuple(extents)
def in_stroke(self, x, y):
"""Tests whether the given point is inside the area
that would be affected by a :meth:`stroke` operation
given the current path and stroking parameters.
Surface dimensions and clipping are not taken into account.
See :meth:`stroke`, :meth:`set_line_width`, :meth:`set_line_join`,
:meth:`set_line_cap`, :meth:`set_dash`, and :meth:`stroke_preserve`.
:param x: X coordinate of the point to test
:param y: Y coordinate of the point to test
:type x: float
:type y: float
:returns: A boolean.
"""
return bool(cairo.cairo_in_stroke(self._pointer, x, y))
def clip(self):
"""Establishes a new clip region
by intersecting the current clip region
with the current path as it would be filled by :meth:`fill`
and according to the current fill rule (see :meth:`set_fill_rule`).
After :meth:`clip`,
the current path will be cleared from the cairo context.
The current clip region affects all drawing operations
by effectively masking out any changes to the surface
that are outside the current clip region.
Calling :meth:`clip` can only make the clip region smaller,
never larger.
But the current clip is part of the graphics state,
so a temporary restriction of the clip region can be achieved
by calling :meth:`clip` within a :meth:`save` / :meth:`restore` pair.
The only other means of increasing the size of the clip region
is :meth:`reset_clip`.
"""
cairo.cairo_clip(self._pointer)
self._check_status()
def clip_preserve(self):
"""Establishes a new clip region
by intersecting the current clip region
with the current path as it would be filled by :meth:`fill`
and according to the current fill rule (see :meth:`set_fill_rule`).
Unlike :meth:`clip`,
:meth:`clip_preserve` preserves the path within the cairo context.
The current clip region affects all drawing operations
by effectively masking out any changes to the surface
that are outside the current clip region.
Calling :meth:`clip_preserve` can only make the clip region smaller,
never larger.
But the current clip is part of the graphics state,
so a temporary restriction of the clip region can be achieved
by calling :meth:`clip_preserve`
within a :meth:`save` / :meth:`restore` pair.
The only other means of increasing the size of the clip region
is :meth:`reset_clip`.
"""
cairo.cairo_clip_preserve(self._pointer)
self._check_status()
def clip_extents(self):
"""Computes a bounding box in user coordinates
covering the area inside the current clip.
:return:
A ``(x1, y1, x2, y2)`` tuple of floats:
the left, top, right and bottom of the resulting extents,
respectively.
"""
extents = ffi.new('double[4]')
cairo.cairo_clip_extents(
self._pointer, extents + 0, extents + 1, extents + 2, extents + 3)
self._check_status()
return tuple(extents)
def copy_clip_rectangle_list(self):
"""Return the current clip region as a list of rectangles
in user coordinates.
:return:
A list of rectangles,
as ``(x, y, width, height)`` tuples of floats.
:raises:
:exc:`CairoError`
if the clip region cannot be represented as a list
of user-space rectangles.
"""
rectangle_list = cairo.cairo_copy_clip_rectangle_list(self._pointer)
_check_status(rectangle_list.status)
rectangles = rectangle_list.rectangles
result = []
for i in range(rectangle_list.num_rectangles):
rect = rectangles[i]
result.append((rect.x, rect.y, rect.width, rect.height))
cairo.cairo_rectangle_list_destroy(rectangle_list)
return result
def in_clip(self, x, y):
"""Tests whether the given point is inside the area
that would be visible through the current clip,
i.e. the area that would be filled by a :meth:`paint` operation.
See :meth:`clip`, and :meth:`clip_preserve`.
:param x: X coordinate of the point to test
:param y: Y coordinate of the point to test
:type x: float
:type y: float
:returns: A boolean.
*New in cairo 1.10.*
"""
return bool(cairo.cairo_in_clip(self._pointer, x, y))
def reset_clip(self):
"""Reset the current clip region to its original, unrestricted state.
That is, set the clip region to an infinitely large shape
containing the target surface.
Equivalently, if infinity is too hard to grasp,
one can imagine the clip region being reset
to the exact bounds of the target surface.
Note that code meant to be reusable
should not call :meth:`reset_clip`
as it will cause results unexpected by higher-level code
which calls :meth:`clip`.
Consider using :meth:`cairo` and :meth:`restore` around :meth:`clip`
as a more robust means of temporarily restricting the clip region.
"""
cairo.cairo_reset_clip(self._pointer)
self._check_status()
#
# Fonts
#
def select_font_face(self, family='', slant=constants.FONT_SLANT_NORMAL,
weight=constants.FONT_WEIGHT_NORMAL):
"""Selects a family and style of font from a simplified description
as a family name, slant and weight.
.. note::
The :meth:`select_font_face` method is part of
what the cairo designers call the "toy" text API.
It is convenient for short demos and simple programs,
but it is not expected to be adequate
for serious text-using applications.
See :ref:`fonts` for details.
Cairo provides no operation to list available family names
on the system (this is a "toy", remember),
but the standard CSS2 generic family names,
(``"serif"``, ``"sans-serif"``, ``"cursive"``, ``"fantasy"``,
``"monospace"``),
are likely to work as expected.
If family starts with the string ``"cairo:"``,
or if no native font backends are compiled in,
cairo will use an internal font family.
The internal font family recognizes many modifiers
in the family string,
most notably, it recognizes the string ``"monospace"``.
That is, the family name ``"cairo:monospace"``
will use the monospace version of the internal font family.
If text is drawn without a call to :meth:`select_font_face`,
(nor :meth:`set_font_face` nor :meth:`set_scaled_font`),
the default family is platform-specific,
but is essentially ``"sans-serif"``.
Default slant is :obj:`NORMAL <FONT_SLANT_NORMAL>`,
and default weight is :obj:`NORMAL <FONT_WEIGHT_NORMAL>`.
This method is equivalent to a call to :class:`ToyFontFace`
followed by :meth:`set_font_face`.
"""
cairo.cairo_select_font_face(
self._pointer, _encode_string(family), slant, weight)
self._check_status()
def set_font_face(self, font_face):
"""Replaces the current font face with ``font_face``.
:param font_face:
A :class:`FontFace` object,
or :obj:`None` to restore the default font.
"""
font_face = font_face._pointer if font_face is not None else ffi.NULL
cairo.cairo_set_font_face(self._pointer, font_face)
self._check_status()
def get_font_face(self):
"""Return the current font face.
:param font_face:
A new :class:`FontFace` object
wrapping an existing cairo object.
"""
return FontFace._from_pointer(
cairo.cairo_get_font_face(self._pointer), incref=True)
def set_font_size(self, size):
"""Sets the current font matrix to a scale by a factor of ``size``,
replacing any font matrix previously set with :meth:`set_font_size`
or :meth:`set_font_matrix`.
This results in a font size of size user space units.
(More precisely, this matrix will result in the font's
em-square being a size by size square in user space.)
If text is drawn without a call to :meth:`set_font_size`,
(nor :meth:`set_font_matrix` nor :meth:`set_scaled_font`),
the default font size is 10.0.
:param size: The new font size, in user space units
:type size: float
"""
cairo.cairo_set_font_size(self._pointer, size)
self._check_status()
def set_font_matrix(self, matrix):
"""Sets the current font matrix to ``matrix``.
The font matrix gives a transformation
from the design space of the font
(in this space, the em-square is 1 unit by 1 unit)
to user space.
Normally, a simple scale is used (see :meth:`set_font_size`),
but a more complex font matrix can be used
to shear the font or stretch it unequally along the two axes
:param matrix:
A :class:`Matrix`
describing a transform to be applied to the current font.
"""
cairo.cairo_set_font_matrix(self._pointer, matrix._pointer)
self._check_status()
def get_font_matrix(self):
"""Copies the current font matrix. See :meth:`set_font_matrix`.
:returns: A new :class:`Matrix`.
"""
matrix = Matrix()
cairo.cairo_get_font_matrix(self._pointer, matrix._pointer)
self._check_status()
return matrix
def set_font_options(self, font_options):
"""Sets a set of custom font rendering options.
Rendering options are derived by merging these options
with the options derived from underlying surface;
if the value in options has a default value
(like :obj:`ANTIALIAS_DEFAULT`),
then the value from the surface is used.
:param font_options: A :class:`FontOptions` object.
"""
cairo.cairo_set_font_options(self._pointer, font_options._pointer)
self._check_status()
def get_font_options(self):
"""Retrieves font rendering options set via :meth:`set_font_options`.
Note that the returned options do not include any options
derived from the underlying surface;
they are literally the options passed to :meth:`set_font_options`.
:return: A new :class:`FontOptions` object.
"""
font_options = FontOptions()
cairo.cairo_get_font_options(self._pointer, font_options._pointer)
return font_options
def set_scaled_font(self, scaled_font):
"""Replaces the current font face, font matrix, and font options
with those of ``scaled_font``.
Except for some translation, the current CTM of the context
should be the same as that of the ``scaled_font``,
which can be accessed using :meth:`ScaledFont.get_ctm`.
:param scaled_font: A :class:`ScaledFont` object.
"""
cairo.cairo_set_scaled_font(self._pointer, scaled_font._pointer)
self._check_status()
def get_scaled_font(self):
"""Return the current scaled font.
:return:
A new :class:`ScaledFont` object,
wrapping an existing cairo object.
"""
return ScaledFont._from_pointer(
cairo.cairo_get_scaled_font(self._pointer), incref=True)
def font_extents(self):
"""Return the extents of the currently selected font.
Values are given in the current user-space coordinate system.
Because font metrics are in user-space coordinates, they are mostly,
but not entirely, independent of the current transformation matrix.
If you call :meth:`context.scale(2) <scale>`,
text will be drawn twice as big,
but the reported text extents will not be doubled.
They will change slightly due to hinting
(so you can't assume that metrics are independent
of the transformation matrix),
but otherwise will remain unchanged.
:returns:
A ``(ascent, descent, height, max_x_advance, max_y_advance)``
tuple of floats.
``ascent``
The distance that the font extends above the baseline.
Note that this is not always exactly equal to
the maximum of the extents of all the glyphs in the font,
but rather is picked to express the font designer's intent
as to how the font should align with elements above it.
``descent``
The distance that the font extends below the baseline.
This value is positive for typical fonts
that include portions below the baseline.
Note that this is not always exactly equal
to the maximum of the extents of all the glyphs in the font,
but rather is picked to express the font designer's intent
as to how the font should align with elements below it.
``height``
The recommended vertical distance between baselines
when setting consecutive lines of text with the font.
This is greater than ``ascent + descent``
by a quantity known as the line spacing or external leading.
When space is at a premium, most fonts can be set
with only a distance of ``ascent + descent`` between lines.
``max_x_advance``
The maximum distance in the X direction
that the origin is advanced for any glyph in the font.
``max_y_advance``
The maximum distance in the Y direction
that the origin is advanced for any glyph in the font.
This will be zero for normal fonts used for horizontal writing.
(The scripts of East Asia are sometimes written vertically.)
"""
extents = ffi.new('cairo_font_extents_t *')
cairo.cairo_font_extents(self._pointer, extents)
self._check_status()
# returning extents as is would be a nice API,
# but return a tuple for compat with pycairo.
return (
extents.ascent, extents.descent, extents.height,
extents.max_x_advance, extents.max_y_advance)
#
# Text
#
def text_extents(self, text):
"""Returns the extents for a string of text.
The extents describe a user-space rectangle
that encloses the "inked" portion of the text,
(as it would be drawn by :meth:`show_text`).
Additionally, the ``x_advance`` and ``y_advance`` values
indicate the amount by which the current point would be advanced
by :meth:`show_text`.
Note that whitespace characters do not directly contribute
to the size of the rectangle (``width`` and ``height``).
They do contribute indirectly by changing the position
of non-whitespace characters.
In particular, trailing whitespace characters are likely
to not affect the size of the rectangle,
though they will affect the x_advance and y_advance values.
Because text extents are in user-space coordinates,
they are mostly, but not entirely,
independent of the current transformation matrix.
If you call :meth:`context.scale(2) <scale>`,
text will be drawn twice as big,
but the reported text extents will not be doubled.
They will change slightly due to hinting
(so you can't assume that metrics are independent
of the transformation matrix),
but otherwise will remain unchanged.
:param text: The text to measure, as an Unicode or UTF-8 string.
:returns:
A ``(x_bearing, y_bearing, width, height, x_advance, y_advance)``
tuple of floats.
``x_bearing``
The horizontal distance
from the origin to the leftmost part of the glyphs as drawn.
Positive if the glyphs lie entirely to the right of the origin.
``y_bearing``
The vertical distance
from the origin to the topmost part of the glyphs as drawn.
Positive only if the glyphs lie completely below the origin;
will usually be negative.
``width``
Width of the glyphs as drawn.
``height``
Height of the glyphs as drawn.
``x_advance``
Distance to advance in the X direction
after drawing these glyphs.
``y_advance``
Distance to advance in the Y direction
after drawing these glyphs.
Will typically be zero except for vertical text layout
as found in East-Asian languages.
"""
extents = ffi.new('cairo_text_extents_t *')
cairo.cairo_text_extents(self._pointer, _encode_string(text), extents)
self._check_status()
# returning extents as is would be a nice API,
# but return a tuple for compat with pycairo.
return (
extents.x_bearing, extents.y_bearing,
extents.width, extents.height,
extents.x_advance, extents.y_advance)
def glyph_extents(self, glyphs):
"""Returns the extents for a list of glyphs.
The extents describe a user-space rectangle
that encloses the "inked" portion of the glyphs,
(as it would be drawn by :meth:`show_glyphs`).
Additionally, the ``x_advance`` and ``y_advance`` values
indicate the amount by which the current point would be advanced
by :meth:`show_glyphs`.
:param glyphs:
A list of glyphs.
See :meth:`show_text_glyphs` for the data structure.
:returns:
A ``(x_bearing, y_bearing, width, height, x_advance, y_advance)``
tuple of floats.
See :meth:`text_extents` for details.
"""
glyphs = ffi.new('cairo_glyph_t[]', glyphs)
extents = ffi.new('cairo_text_extents_t *')
cairo.cairo_glyph_extents(
self._pointer, glyphs, len(glyphs), extents)
self._check_status()
return (
extents.x_bearing, extents.y_bearing,
extents.width, extents.height,
extents.x_advance, extents.y_advance)
def show_text(self, text):
"""A drawing operator that generates the shape from a string text,
rendered according to the current
font :meth:`face <set_font_face>`,
font :meth:`size <set_font_size>`
(font :meth:`matrix <set_font_matrix>`),
and font :meth:`options <set_font_options>`.
This method first computes a set of glyphs for the string of text.
The first glyph is placed so that its origin is at the current point.
The origin of each subsequent glyph
is offset from that of the previous glyph
by the advance values of the previous glyph.
After this call the current point is moved
to the origin of where the next glyph would be placed
in this same progression.
That is, the current point will be at
the origin of the final glyph offset by its advance values.
This allows for easy display of a single logical string
with multiple calls to :meth:`show_text`.
:param text: The text to show, as an Unicode or UTF-8 string.
.. note::
This method is part of
what the cairo designers call the "toy" text API.
It is convenient for short demos and simple programs,
but it is not expected to be adequate
for serious text-using applications.
See :ref:`fonts` for details
and :meth:`show_glyphs` for the "real" text display API in cairo.
"""
cairo.cairo_show_text(self._pointer, _encode_string(text))
self._check_status()
def show_glyphs(self, glyphs):
"""A drawing operator that generates the shape from a list of glyphs,
rendered according to the current
font :meth:`face <set_font_face>`,
font :meth:`size <set_font_size>`
(font :meth:`matrix <set_font_matrix>`),
and font :meth:`options <set_font_options>`.
:param glyphs:
The glyphs to show.
See :meth:`show_text_glyphs` for the data structure.
"""
glyphs = ffi.new('cairo_glyph_t[]', glyphs)
cairo.cairo_show_glyphs(self._pointer, glyphs, len(glyphs))
self._check_status()
def show_text_glyphs(self, text, glyphs, clusters, cluster_flags=0):
"""This operation has rendering effects similar to :meth:`show_glyphs`
but, if the target surface supports it
(see :meth:`Surface.has_show_text_glyphs`),
uses the provided text and cluster mapping
to embed the text for the glyphs shown in the output.
If the target does not support the extended attributes,
this method acts like the basic :meth:`show_glyphs`
as if it had been passed ``glyphs``.
The mapping between ``text`` and ``glyphs``
is provided by an list of clusters.
Each cluster covers a number of UTF-8 text bytes and glyphs,
and neighboring clusters cover neighboring areas
of ``text`` and ``glyphs``.
The clusters should collectively cover ``text`` and ``glyphs``
in entirety.
:param text:
The text to show, as an Unicode or UTF-8 string.
Because of how ``cluster`` work,
using UTF-8 bytes might be more convenient.
:param glyphs:
A list of glyphs.
Each glyph is a ``(glyph_id, x, y)`` tuple.
``glyph_id`` is an opaque integer.
Its exact interpretation depends on the font technology being used.
``x`` and ``y`` are the float offsets
in the X and Y direction
between the origin used for drawing or measuring the string
and the origin of this glyph.
Note that the offsets are not cumulative.
When drawing or measuring text,
each glyph is individually positioned
with respect to the overall origin.
:param clusters:
A list of clusters.
A text cluster is a minimal mapping of some glyphs
corresponding to some UTF-8 text,
represented as a ``(num_bytes, num_glyphs)`` tuple of integers,
the number of UTF-8 bytes and glyphs covered by the cluster.
For a cluster to be valid,
both ``num_bytes`` and ``num_glyphs`` should be non-negative,
and at least one should be non-zero.
Note that clusters with zero glyphs
are not as well supported as normal clusters.
For example, PDF rendering applications
typically ignore those clusters when PDF text is being selected.
:type cluster_flags: int
:param cluster_flags:
Flags (as a bit field) for the cluster mapping.
The first cluster always covers bytes
from the beginning of ``text``.
If ``cluster_flags`` does not have
the :obj:`TEXT_CLUSTER_FLAG_BACKWARD` flag set,
the first cluster also covers the beginning of ``glyphs``,
otherwise it covers the end of the ``glyphs`` list
and following clusters move backward.
"""
glyphs = ffi.new('cairo_glyph_t[]', glyphs)
clusters = ffi.new('cairo_text_cluster_t[]', clusters)
cairo.cairo_show_text_glyphs(
self._pointer, _encode_string(text), -1,
glyphs, len(glyphs), clusters, len(clusters), cluster_flags)
self._check_status()
#
# Pages
#
def show_page(self):
"""Emits and clears the current page
for backends that support multiple pages.
Use :meth:`copy_page` if you don't want to clear the page.
This is a convenience method
that simply calls :meth:`Surface.show_page`
on the context’s target.
"""
cairo.cairo_show_page(self._pointer)
self._check_status()
def copy_page(self):
"""Emits the current page for backends that support multiple pages,
but doesn't clear it,
so the contents of the current page will be retained
for the next page too.
Use :meth:`show_page` if you want to clear the page.
This is a convenience method
that simply calls :meth:`Surface.copy_page`
on the context’s target.
"""
cairo.cairo_copy_page(self._pointer)
self._check_status()
#
# Tags
#
def tag_begin(self, tag_name, attributes=None):
"""Marks the beginning of the ``tag_name`` structure.
Call :meth:`tag_end` with the same ``tag_name`` to mark the end of the
structure.
The attributes string is of the form "key1=value2 key2=value2 ...".
Values may be boolean (true/false or 1/0), integer, float, string, or
an array.
String values are enclosed in single quotes ('). Single quotes and
backslashes inside the string should be escaped with a backslash.
Boolean values may be set to true by only specifying the key. eg the
attribute string "key" is the equivalent to "key=true".
Arrays are enclosed in '[]'. eg "rect=[1.2 4.3 2.0 3.0]".
If no attributes are required, ``attributes`` can be omitted, an empty
string or None.
See cairo's Tags and Links Description for the list of tags and
attributes.
Invalid nesting of tags or invalid attributes will cause the context to
shutdown with a status of ``CAIRO_STATUS_TAG_ERROR``.
See :meth:`tag_end`.
:param tag_name: tag name
:param attributes: tag attributes
*New in cairo 1.16.*
*New in cairocffi 0.9.*
"""
if attributes is None:
attributes = ''
cairo.cairo_tag_begin(
self._pointer, _encode_string(tag_name),
_encode_string(attributes))
self._check_status()
def tag_end(self, tag_name):
"""Marks the end of the ``tag_name`` structure.
Invalid nesting of tags will cause @cr to shutdown with a status of
``CAIRO_STATUS_TAG_ERROR``.
See :meth:`tag_begin`.
:param tag_name: tag name
*New in cairo 1.16.*
*New in cairocffi 0.9.*
"""
cairo.cairo_tag_end(self._pointer, _encode_string(tag_name))
self._check_status()
def set_hairline(self, enabled):
"""Sets lines within the cairo context to be hairlines.
Hairlines are logically zero-width lines that are drawn at the thinnest
renderable width possible in the current context.
:type enabled: bool
:param enabled: whether or not to set hairline mode
*New in cairo 1.18.*
"""
cairo.cairo_set_hairline(self._pointer, int(enabled))
self._check_status()
def get_hairline(self):
"""Returns whether or not hairline mode is set, as set by cairo_set_hairline().
:returns: whether hairline mode is set
*New in cairo 1.18.*
"""
return bool(cairo.cairo_get_hairline(self._pointer))
|
class Context(object):
'''A :class:`Context` contains the current state of the rendering device,
including coordinates of yet to be drawn shapes.
Cairo contexts are central to cairo
and all drawing with cairo is always done to a :class:`Context` object.
:param target: The target :class:`Surface` object.
Cairo contexts can be used as Python :ref:`context managers <with>`.
See :meth:`save`.
'''
def __init__(self, target):
pass
def _init_pointer(self, pointer):
pass
def _check_status(self):
pass
@classmethod
def _from_pointer(cls, pointer, incref):
'''Wrap an existing ``cairo_t *`` cdata pointer.
:type incref: bool
:param incref:
Whether increase the :ref:`reference count <refcounting>` now.
:return:
A new :class:`Context` instance.
'''
pass
def get_target(self):
'''Return this context’s target surface.
:returns:
An instance of :class:`Surface` or one of its sub-classes,
a new Python object referencing the existing cairo surface.
'''
pass
def save(self):
'''Makes a copy of the current state of this context
and saves it on an internal stack of saved states.
When :meth:`restore` is called,
the context will be restored to the saved state.
Multiple calls to :meth:`save` and :meth:`restore` can be nested;
each call to :meth:`restore` restores the state
from the matching paired :meth:`save`.
Instead of using :meth:`save` and :meth:`restore` directly,
it is recommended to use a :ref:`with statement <with>`::
with context:
do_something(context)
… which is equivalent to::
context.save()
try:
do_something(context)
finally:
context.restore()
'''
pass
def restore(self):
'''Restores the context to the state saved
by a preceding call to :meth:`save`
and removes that state from the stack of saved states.
'''
pass
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def push_group(self):
'''Temporarily redirects drawing to an intermediate surface
known as a group.
The redirection lasts until the group is completed
by a call to :meth:`pop_group` or :meth:`pop_group_to_source`.
These calls provide the result of any drawing
to the group as a pattern,
(either as an explicit object, or set as the source pattern).
This group functionality can be convenient
for performing intermediate compositing.
One common use of a group is to render objects
as opaque within the group, (so that they occlude each other),
and then blend the result with translucence onto the destination.
Groups can be nested arbitrarily deep
by making balanced calls to :meth:`push_group` / :meth:`pop_group`.
Each call pushes / pops the new target group onto / from a stack.
The :meth:`push_group` method calls :meth:`save`
so that any changes to the graphics state
will not be visible outside the group,
(the pop_group methods call :meth:`restore`).
By default the intermediate group will have
a content type of :obj:`COLOR_ALPHA <CONTENT_COLOR_ALPHA>`.
Other content types can be chosen for the group
by using :meth:`push_group_with_content` instead.
As an example,
here is how one might fill and stroke a path with translucence,
but without any portion of the fill being visible under the stroke::
context.push_group()
context.set_source(fill_pattern)
context.fill_preserve()
context.set_source(stroke_pattern)
context.stroke()
context.pop_group_to_source()
context.paint_with_alpha(alpha)
'''
pass
def push_group_with_content(self, content):
'''Temporarily redirects drawing to an intermediate surface
known as a group.
The redirection lasts until the group is completed
by a call to :meth:`pop_group` or :meth:`pop_group_to_source`.
These calls provide the result of any drawing
to the group as a pattern,
(either as an explicit object, or set as the source pattern).
The group will have a content type of ``content``.
The ability to control this content type
is the only distinction between this method and :meth:`push_group`
which you should see for a more detailed description
of group rendering.
:param content: A :ref:`CONTENT` string.
'''
pass
def pop_group(self):
'''Terminates the redirection begun by a call to :meth:`push_group`
or :meth:`push_group_with_content`
and returns a new pattern containing the results
of all drawing operations performed to the group.
The :meth:`pop_group` method calls :meth:`restore`,
(balancing a call to :meth:`save` by the push_group method),
so that any changes to the graphics state
will not be visible outside the group.
:returns:
A newly created :class:`SurfacePattern`
containing the results of all drawing operations
performed to the group.
'''
pass
def pop_group_to_source(self):
'''Terminates the redirection begun by a call to :meth:`push_group`
or :meth:`push_group_with_content`
and installs the resulting pattern
as the source pattern in the given cairo context.
The behavior of this method is equivalent to::
context.set_source(context.pop_group())
'''
pass
def get_group_target(self):
'''Returns the current destination surface for the context.
This is either the original target surface
as passed to :class:`Context`
or the target surface for the current group as started
by the most recent call to :meth:`push_group`
or :meth:`push_group_with_content`.
'''
pass
def set_source_rgba(self, red, green, blue, alpha=1):
'''Sets the source pattern within this context to a solid color.
This color will then be used for any subsequent drawing operation
until a new source pattern is set.
The color and alpha components are
floating point numbers in the range 0 to 1.
If the values passed in are outside that range, they will be clamped.
The default source pattern is opaque black,
(that is, it is equivalent to ``context.set_source_rgba(0, 0, 0)``).
:param red: Red component of the color.
:param green: Green component of the color.
:param blue: Blue component of the color.
:param alpha:
Alpha component of the color.
1 (the default) is opaque, 0 fully transparent.
:type red: float
:type green: float
:type blue: float
:type alpha: float
'''
pass
def set_source_rgba(self, red, green, blue, alpha=1):
'''Same as :meth:`set_source_rgba` with alpha always 1.
Exists for compatibility with pycairo.
'''
pass
def set_source_surface(self, surface, x=0, y=0):
'''This is a convenience method for creating a pattern from surface
and setting it as the source in this context with :meth:`set_source`.
The ``x`` and ``y`` parameters give the user-space coordinate
at which the surface origin should appear.
(The surface origin is its upper-left corner
before any transformation has been applied.)
The ``x`` and ``y`` parameters are negated
and then set as translation values in the pattern matrix.
Other than the initial translation pattern matrix, as described above,
all other pattern attributes, (such as its extend mode),
are set to the default values as in :class:`SurfacePattern`.
The resulting pattern can be queried with :meth:`get_source`
so that these attributes can be modified if desired,
(eg. to create a repeating pattern with :meth:`Pattern.set_extend`).
:param surface:
A :class:`Surface` to be used to set the source pattern.
:param x: User-space X coordinate for surface origin.
:param y: User-space Y coordinate for surface origin.
:type x: float
:type y: float
'''
pass
def set_source_rgba(self, red, green, blue, alpha=1):
'''Sets the source pattern within this context to ``source``.
This pattern will then be used for any subsequent drawing operation
until a new source pattern is set.
.. note::
The pattern's transformation matrix will be locked
to the user space in effect at the time of :meth:`set_source`.
This means that further modifications
of the current transformation matrix
will not affect the source pattern.
See :meth:`Pattern.set_matrix`.
The default source pattern is opaque black,
(that is, it is equivalent to ``context.set_source_rgba(0, 0, 0)``).
:param source:
A :class:`Pattern` to be used
as the source for subsequent drawing operations.
'''
pass
def get_source(self):
'''Return this context’s source.
:returns:
An instance of :class:`Pattern` or one of its sub-classes,
a new Python object referencing the existing cairo pattern.
'''
pass
def set_antialias(self, antialias):
'''Set the :ref:`ANTIALIAS` of the rasterizer used for drawing shapes.
This value is a hint,
and a particular backend may or may not support a particular value.
At the current time,
no backend supports :obj:`SUBPIXEL <ANTIALIAS_SUBPIXEL>`
when drawing shapes.
Note that this option does not affect text rendering,
instead see :meth:`FontOptions.set_antialias`.
:param antialias: An :ref:`ANTIALIAS` string.
'''
pass
def get_antialias(self):
'''Return the :ref:`ANTIALIAS` string.'''
pass
def set_dash(self, dashes, offset=0):
'''Sets the dash pattern to be used by :meth:`stroke`.
A dash pattern is specified by dashes, a list of positive values.
Each value provides the length of alternate "on" and "off"
portions of the stroke.
``offset`` specifies an offset into the pattern
at which the stroke begins.
Each "on" segment will have caps applied
as if the segment were a separate sub-path.
In particular, it is valid to use an "on" length of 0
with :obj:`LINE_CAP_ROUND` or :obj:`LINE_CAP_SQUARE`
in order to distributed dots or squares along a path.
Note: The length values are in user-space units
as evaluated at the time of stroking.
This is not necessarily the same as the user space
at the time of :meth:`set_dash`.
If ``dashes`` is empty dashing is disabled.
If it is of length 1 a symmetric pattern is assumed
with alternating on and off portions of the size specified
by the single value.
:param dashes:
A list of floats specifying alternate lengths
of on and off stroke portions.
:type offset: float
:param offset:
An offset into the dash pattern at which the stroke should start.
:raises:
:exc:`CairoError`
if any value in dashes is negative,
or if all values are 0.
The context will be put into an error state.
'''
pass
def get_dash(self):
'''Return the current dash pattern.
:returns:
A ``(dashes, offset)`` tuple of a list and a float.
``dashes`` is a list of floats,
empty if no dashing is in effect.
'''
pass
def get_dash_count(self):
'''Same as ``len(context.get_dash()[0])``.'''
pass
def set_fill_rule(self, fill_rule):
'''Set the current :ref:`FILL_RULE` within the cairo context.
The fill rule is used to determine which regions are inside
or outside a complex (potentially self-intersecting) path.
The current fill rule affects both :meth:`fill` and :meth:`clip`.
The default fill rule is :obj:`WINDING <FILL_RULE_WINDING>`.
:param fill_rule: A :ref:`FILL_RULE` string.
'''
pass
def get_fill_rule(self):
'''Return the current :ref:`FILL_RULE` string.'''
pass
def set_line_cap(self, line_cap):
'''Set the current :ref:`LINE_CAP` within the cairo context.
As with the other stroke parameters,
the current line cap style is examined by
:meth:`stroke` and :meth:`stroke_extents`,
but does not have any effect during path construction.
The default line cap is :obj:`BUTT <LINE_CAP_BUTT>`.
:param line_cap: A :ref:`LINE_CAP` string.
'''
pass
def get_line_cap(self):
'''Return the current :ref:`LINE_CAP` string.'''
pass
def set_line_join(self, line_join):
'''Set the current :ref:`LINE_JOIN` within the cairo context.
As with the other stroke parameters,
the current line cap style is examined by
:meth:`stroke` and :meth:`stroke_extents`,
but does not have any effect during path construction.
The default line cap is :obj:`MITER <LINE_JOIN_MITER>`.
:param line_join: A :ref:`LINE_JOIN` string.
'''
pass
def get_line_join(self):
'''Return the current :ref:`LINE_JOIN` string.'''
pass
def set_line_width(self, width):
'''Sets the current line width within the cairo context.
The line width value specifies the diameter of a pen
that is circular in user space,
(though device-space pen may be an ellipse in general
due to scaling / shear / rotation of the CTM).
.. note::
When the description above refers to user space and CTM
it refers to the user space and CTM in effect
at the time of the stroking operation,
not the user space and CTM in effect
at the time of the call to :meth:`set_line_width`.
The simplest usage makes both of these spaces identical.
That is, if there is no change to the CTM
between a call to :meth:`set_line_width`
and the stroking operation,
then one can just pass user-space values to :meth:`set_line_width`
and ignore this note.
As with the other stroke parameters,
the current line cap style is examined by
:meth:`stroke` and :meth:`stroke_extents`,
but does not have any effect during path construction.
The default line width value is 2.0.
:type width: float
:param width: The new line width.
'''
pass
def get_line_width(self):
'''Return the current line width as a float.'''
pass
def set_miter_limit(self, limit):
'''Sets the current miter limit within the cairo context.
If the current line join style is set to :obj:`MITER <LINE_JOIN_MITER>`
(see :meth:`set_line_join`),
the miter limit is used to determine
whether the lines should be joined with a bevel instead of a miter.
Cairo divides the length of the miter by the line width.
If the result is greater than the miter limit,
the style is converted to a bevel.
As with the other stroke parameters,
the current line cap style is examined by
:meth:`stroke` and :meth:`stroke_extents`,
but does not have any effect during path construction.
The default miter limit value is 10.0,
which will convert joins with interior angles less than 11 degrees
to bevels instead of miters.
For reference,
a miter limit of 2.0 makes the miter cutoff at 60 degrees,
and a miter limit of 1.414 makes the cutoff at 90 degrees.
A miter limit for a desired angle can be computed as:
``miter_limit = 1. / sin(angle / 2.)``
:param limit: The miter limit to set.
:type limit: float
'''
pass
def get_miter_limit(self):
'''Return the current miter limit as a float.'''
pass
def set_operator(self, operator):
'''Set the current :ref:`OPERATOR`
to be used for all drawing operations.
The default operator is :obj:`OVER <OPERATOR_OVER>`.
:param operator: A :ref:`OPERATOR` string.
'''
pass
def get_operator(self):
'''Return the current :ref:`OPERATOR` string.'''
pass
def set_tolerance(self, tolerance):
'''Sets the tolerance used when converting paths into trapezoids.
Curved segments of the path will be subdivided
until the maximum deviation between the original path
and the polygonal approximation is less than tolerance.
The default value is 0.1.
A larger value will give better performance,
a smaller value, better appearance.
(Reducing the value from the default value of 0.1
is unlikely to improve appearance significantly.)
The accuracy of paths within Cairo is limited
by the precision of its internal arithmetic,
and the prescribed tolerance is restricted
to the smallest representable internal value.
:type tolerance: float
:param tolerance: The tolerance, in device units (typically pixels)
'''
pass
def get_tolerance(self):
'''Return the current tolerance as a float.'''
pass
def translate(self, tx, ty):
'''Modifies the current transformation matrix (CTM)
by translating the user-space origin by ``(tx, ty)``.
This offset is interpreted as a user-space coordinate
according to the CTM in place before the new call to :meth:`translate`.
In other words, the translation of the user-space origin takes place
after any existing transformation.
:param tx: Amount to translate in the X direction
:param ty: Amount to translate in the Y direction
:type tx: float
:type ty: float
'''
pass
def scale(self, sx, sy=None):
'''Modifies the current transformation matrix (CTM)
by scaling the X and Y user-space axes
by ``sx`` and ``sy`` respectively.
The scaling of the axes takes place after
any existing transformation of user space.
If ``sy`` is omitted, it is the same as ``sx``
so that scaling preserves aspect ratios.
:param sx: Scale factor in the X direction.
:param sy: Scale factor in the Y direction.
:type sx: float
:type sy: float
'''
pass
def rotate(self, radians):
'''Modifies the current transformation matrix (CTM)
by rotating the user-space axes by angle ``radians``.
The rotation of the axes takes places
after any existing transformation of user space.
:type radians: float
:param radians:
Angle of rotation, in radians.
The direction of rotation is defined such that positive angles
rotate in the direction from the positive X axis
toward the positive Y axis.
With the default axis orientation of cairo,
positive angles rotate in a clockwise direction.
'''
pass
def transform(self, matrix):
'''Modifies the current transformation matrix (CTM)
by applying ``matrix`` as an additional transformation.
The new transformation of user space takes place
after any existing transformation.
:param matrix:
A transformation :class:`Matrix`
to be applied to the user-space axes.
'''
pass
def set_matrix(self, matrix):
'''Modifies the current transformation matrix (CTM)
by setting it equal to ``matrix``.
:param matrix:
A transformation :class:`Matrix` from user space to device space.
'''
pass
def get_matrix(self):
'''Return a copy of the current transformation matrix (CTM).'''
pass
def identity_matrix(self):
'''Resets the current transformation matrix (CTM)
by setting it equal to the identity matrix.
That is, the user-space and device-space axes will be aligned
and one user-space unit will transform to one device-space unit.
'''
pass
def user_to_device(self, x, y):
'''Transform a coordinate from user space to device space
by multiplying the given point
by the current transformation matrix (CTM).
:param x: X position.
:param y: Y position.
:type x: float
:type y: float
:returns: A ``(device_x, device_y)`` tuple of floats.
'''
pass
def user_to_device_distance(self, dx, dy):
'''Transform a distance vector from user space to device space.
This method is similar to :meth:`Context.user_to_device`
except that the translation components of the CTM
will be ignored when transforming ``(dx, dy)``.
:param dx: X component of a distance vector.
:param dy: Y component of a distance vector.
:type x: float
:type y: float
:returns: A ``(device_dx, device_dy)`` tuple of floats.
'''
pass
def device_to_user(self, x, y):
'''Transform a coordinate from device space to user space
by multiplying the given point
by the inverse of the current transformation matrix (CTM).
:param x: X position.
:param y: Y position.
:type x: float
:type y: float
:returns: A ``(user_x, user_y)`` tuple of floats.
'''
pass
def device_to_user_distance(self, dx, dy):
'''Transform a distance vector from device space to user space.
This method is similar to :meth:`Context.device_to_user`
except that the translation components of the inverse CTM
will be ignored when transforming ``(dx, dy)``.
:param dx: X component of a distance vector.
:param dy: Y component of a distance vector.
:type x: float
:type y: float
:returns: A ``(user_dx, user_dy)`` tuple of floats.
'''
pass
def has_current_point(self):
'''Returns whether a current point is defined on the current path.
See :meth:`get_current_point`.
'''
pass
def get_current_point(self):
'''Return the current point of the current path,
which is conceptually the final point reached by the path so far.
The current point is returned in the user-space coordinate system.
If there is no defined current point
or if the context is in an error status,
``(0, 0)`` is returned.
It is possible to check this in advance with :meth:`has_current_point`.
Most path construction methods alter the current point.
See the following for details on how they affect the current point:
:meth:`new_path`,
:meth:`new_sub_path`,
:meth:`append_path`,
:meth:`close_path`,
:meth:`move_to`,
:meth:`line_to`,
:meth:`curve_to`,
:meth:`rel_move_to`,
:meth:`rel_line_to`,
:meth:`rel_curve_to`,
:meth:`arc`,
:meth:`arc_negative`,
:meth:`rectangle`,
:meth:`text_path`,
:meth:`glyph_path`.
Some methods use and alter the current point
but do not otherwise change current path:
:meth:`show_text`,
:meth:`show_glyphs`,
:meth:`show_text_glyphs`.
Some methods unset the current path and as a result, current point:
:meth:`fill`,
:meth:`stroke`.
:returns:
A ``(x, y)`` tuple of floats, the coordinates of the current point.
'''
pass
def new_path(self):
''' Clears the current path.
After this call there will be no path and no current point.
'''
pass
def new_sub_path(self):
'''Begin a new sub-path.
Note that the existing path is not affected.
After this call there will be no current point.
In many cases, this call is not needed
since new sub-paths are frequently started with :meth:`move_to`.
A call to :meth:`new_sub_path` is particularly useful
when beginning a new sub-path with one of the :meth:`arc` calls.
This makes things easier as it is no longer necessary
to manually compute the arc's initial coordinates
for a call to :meth:`move_to`.
'''
pass
def move_to(self, x, y):
'''Begin a new sub-path.
After this call the current point will be ``(x, y)``.
:param x: X position of the new point.
:param y: Y position of the new point.
:type float: x
:type float: y
'''
pass
def rel_move_to(self, dx, dy):
'''Begin a new sub-path.
After this call the current point will be offset by ``(dx, dy)``.
Given a current point of ``(x, y)``,
``context.rel_move_to(dx, dy)`` is logically equivalent to
``context.move_to(x + dx, y + dy)``.
:param dx: The X offset.
:param dy: The Y offset.
:type float: dx
:type float: dy
:raises:
:exc:`CairoError` if there is no current point.
Doing so will cause leave the context in an error state.
'''
pass
def line_to(self, x, y):
'''Adds a line to the path from the current point
to position ``(x, y)`` in user-space coordinates.
After this call the current point will be ``(x, y)``.
If there is no current point before the call to :meth:`line_to`
this method will behave as ``context.move_to(x, y)``.
:param x: X coordinate of the end of the new line.
:param y: Y coordinate of the end of the new line.
:type float: x
:type float: y
'''
pass
def rel_line_to(self, dx, dy):
''' Relative-coordinate version of :meth:`line_to`.
Adds a line to the path from the current point
to a point that is offset from the current point
by ``(dx, dy)`` in user space.
After this call the current point will be offset by ``(dx, dy)``.
Given a current point of ``(x, y)``,
``context.rel_line_to(dx, dy)`` is logically equivalent to
``context.line_to(x + dx, y + dy)``.
:param dx: The X offset to the end of the new line.
:param dy: The Y offset to the end of the new line.
:type float: dx
:type float: dy
:raises:
:exc:`CairoError` if there is no current point.
Doing so will cause leave the context in an error state.
'''
pass
def rectangle(self, x, y, width, height):
'''Adds a closed sub-path rectangle
of the given size to the current path
at position ``(x, y)`` in user-space coordinates.
This method is logically equivalent to::
context.move_to(x, y)
context.rel_line_to(width, 0)
context.rel_line_to(0, height)
context.rel_line_to(-width, 0)
context.close_path()
:param x: The X coordinate of the top left corner of the rectangle.
:param y: The Y coordinate of the top left corner of the rectangle.
:param width: Width of the rectangle.
:param height: Height of the rectangle.
:type float: x
:type float: y
:type float: width
:type float: heigth
'''
pass
def arc(self, xc, yc, radius, angle1, angle2):
'''Adds a circular arc of the given radius to the current path.
The arc is centered at ``(xc, yc)``,
begins at ``angle1``
and proceeds in the direction of increasing angles
to end at ``angle2``.
If ``angle2`` is less than ``angle1``
it will be progressively increased by ``2 * pi``
until it is greater than ``angle1``.
If there is a current point,
an initial line segment will be added to the path
to connect the current point to the beginning of the arc.
If this initial line is undesired,
it can be avoided by calling :meth:`new_sub_path`
before calling :meth:`arc`.
Angles are measured in radians.
An angle of 0 is in the direction of the positive X axis
(in user space).
An angle of ``pi / 2`` radians (90 degrees)
is in the direction of the positive Y axis (in user space).
Angles increase in the direction from the positive X axis
toward the positive Y axis.
So with the default transformation matrix,
angles increase in a clockwise direction.
(To convert from degrees to radians, use ``degrees * pi / 180``.)
This method gives the arc in the direction of increasing angles;
see :meth:`arc_negative` to get the arc
in the direction of decreasing angles.
The arc is circular in user space.
To achieve an elliptical arc,
you can scale the current transformation matrix
by different amounts in the X and Y directions.
For example, to draw an ellipse in the box
given by x, y, width, height::
from math import pi
with context:
context.translate(x + width / 2., y + height / 2.)
context.scale(width / 2., height / 2.)
context.arc(0, 0, 1, 0, 2 * pi)
:param xc: X position of the center of the arc.
:param yc: Y position of the center of the arc.
:param radius: The radius of the arc.
:param angle1: The start angle, in radians.
:param angle2: The end angle, in radians.
:type xc: float
:type yc: float
:type radius: float
:type angle1: float
:type angle2: float
'''
pass
def arc_negative(self, xc, yc, radius, angle1, angle2):
'''Adds a circular arc of the given radius to the current path.
The arc is centered at ``(xc, yc)``,
begins at ``angle1``
and proceeds in the direction of decreasing angles
to end at ``angle2``.
If ``angle2`` is greater than ``angle1``
it will be progressively decreased by ``2 * pi``
until it is greater than ``angle1``.
See :meth:`arc` for more details.
This method differs only in
the direction of the arc between the two angles.
:param xc: X position of the center of the arc.
:param yc: Y position of the center of the arc.
:param radius: The radius of the arc.
:param angle1: The start angle, in radians.
:param angle2: The end angle, in radians.
:type xc: float
:type yc: float
:type radius: float
:type angle1: float
:type angle2: float
'''
pass
def curve_to(self, x1, y1, x2, y2, x3, y3):
'''Adds a cubic Bézier spline to the path
from the current point
to position ``(x3, y3)`` in user-space coordinates,
using ``(x1, y1)`` and ``(x2, y2)`` as the control points.
After this call the current point will be ``(x3, y3)``.
If there is no current point before the call to :meth:`curve_to`
this method will behave as if preceded by
a call to ``context.move_to(x1, y1)``.
:param x1: The X coordinate of the first control point.
:param y1: The Y coordinate of the first control point.
:param x2: The X coordinate of the second control point.
:param y2: The Y coordinate of the second control point.
:param x3: The X coordinate of the end of the curve.
:param y3: The Y coordinate of the end of the curve.
:type x1: float
:type y1: float
:type x2: float
:type y2: float
:type x3: float
:type y3: float
'''
pass
def rel_curve_to(self, dx1, dy1, dx2, dy2, dx3, dy3):
''' Relative-coordinate version of :meth:`curve_to`.
All offsets are relative to the current point.
Adds a cubic Bézier spline to the path from the current point
to a point offset from the current point by ``(dx3, dy3)``,
using points offset by ``(dx1, dy1)`` and ``(dx2, dy2)``
as the control points.
After this call the current point will be offset by ``(dx3, dy3)``.
Given a current point of ``(x, y)``,
``context.rel_curve_to(dx1, dy1, dx2, dy2, dx3, dy3)``
is logically equivalent to
``context.curve_to(x+dx1, y+dy1, x+dx2, y+dy2, x+dx3, y+dy3)``.
:param dx1: The X offset to the first control point.
:param dy1: The Y offset to the first control point.
:param dx2: The X offset to the second control point.
:param dy2: The Y offset to the second control point.
:param dx3: The X offset to the end of the curve.
:param dy3: The Y offset to the end of the curve.
:type dx1: float
:type dy1: float
:type dx2: float
:type dy2: float
:type dx3: float
:type dy3: float
:raises:
:exc:`CairoError` if there is no current point.
Doing so will cause leave the context in an error state.
'''
pass
def text_path(self, text):
'''Adds closed paths for text to the current path.
The generated path if filled,
achieves an effect similar to that of :meth:`show_text`.
Text conversion and positioning is done similar to :meth:`show_text`.
Like :meth:`show_text`,
after this call the current point is moved to the origin of where
the next glyph would be placed in this same progression.
That is, the current point will be at the origin of the final glyph
offset by its advance values.
This allows for chaining multiple calls to to :meth:`text_path`
without having to set current point in between.
:param text: The text to show, as an Unicode or UTF-8 string.
.. note::
The :meth:`text_path` method is part of
what the cairo designers call the "toy" text API.
It is convenient for short demos and simple programs,
but it is not expected to be adequate
for serious text-using applications.
See :ref:`fonts` for details,
and :meth:`glyph_path` for the "real" text path API in cairo.
'''
pass
def glyph_path(self, glyphs):
'''Adds closed paths for the glyphs to the current path.
The generated path if filled,
achieves an effect similar to that of :meth:`show_glyphs`.
:param glyphs:
The glyphs to show.
See :meth:`show_text_glyphs` for the data structure.
'''
pass
def close_path(self):
'''Adds a line segment to the path
from the current point
to the beginning of the current sub-path,
(the most recent point passed to cairo_move_to()),
and closes this sub-path.
After this call the current point will be
at the joined endpoint of the sub-path.
The behavior of :meth:`close_path` is distinct
from simply calling :meth:`line_to` with the equivalent coordinate
in the case of stroking.
When a closed sub-path is stroked,
there are no caps on the ends of the sub-path.
Instead, there is a line join
connecting the final and initial segments of the sub-path.
If there is no current point before the call to :meth:`close_path`,
this method will have no effect.
'''
pass
def copy_path(self):
'''Return a copy of the current path.
:returns:
A list of ``(path_operation, coordinates)`` tuples
of a :ref:`PATH_OPERATION` string
and a tuple of floats coordinates
whose content depends on the operation type:
* :obj:`MOVE_TO <PATH_MOVE_TO>`: 1 point ``(x, y)``
* :obj:`LINE_TO <PATH_LINE_TO>`: 1 point ``(x, y)``
* :obj:`CURVE_TO <PATH_CURVE_TO>`: 3 points
``(x1, y1, x2, y2, x3, y3)``
* :obj:`CLOSE_PATH <PATH_CLOSE_PATH>` 0 points ``()`` (empty tuple)
'''
pass
def copy_path_flat(self):
'''Return a flattened copy of the current path
This method is like :meth:`copy_path`
except that any curves in the path will be approximated
with piecewise-linear approximations,
(accurate to within the current tolerance value,
see :meth:`set_tolerance`).
That is,
the result is guaranteed to not have any elements
of type :obj:`CURVE_TO <PATH_CURVE_TO>`
which will instead be replaced by
a series of :obj:`LINE_TO <PATH_LINE_TO>` elements.
:returns:
A list of ``(path_operation, coordinates)`` tuples.
See :meth:`copy_path` for the data structure.
'''
pass
def append_path(self, path):
'''Append ``path`` onto the current path.
The path may be either the return value from one of :meth:`copy_path`
or :meth:`copy_path_flat` or it may be constructed manually.
:param path:
An iterable of tuples
in the same format as returned by :meth:`copy_path`.
'''
pass
def path_extents(self):
'''Computes a bounding box in user-space coordinates
covering the points on the current path.
If the current path is empty,
returns an empty rectangle ``(0, 0, 0, 0)``.
Stroke parameters, fill rule, surface dimensions and clipping
are not taken into account.
Contrast with :meth:`fill_extents` and :meth:`stroke_extents`
which return the extents of only the area that would be "inked"
by the corresponding drawing operations.
The result of :meth:`path_extents`
is defined as equivalent to the limit of :meth:`stroke_extents`
with :obj:`LINE_CAP_ROUND` as the line width approaches 0,
(but never reaching the empty-rectangle
returned by :meth:`stroke_extents` for a line width of 0).
Specifically, this means that zero-area sub-paths
such as :meth:`move_to`; :meth:`line_to()` segments,
(even degenerate cases
where the coordinates to both calls are identical),
will be considered as contributing to the extents.
However, a lone :meth:`move_to` will not contribute
to the results of :meth:`path_extents`.
:return:
A ``(x1, y1, x2, y2)`` tuple of floats:
the left, top, right and bottom of the resulting extents,
respectively.
'''
pass
def paint(self):
'''A drawing operator that paints the current source everywhere
within the current clip region.
'''
pass
def paint_with_alpha(self, alpha):
'''A drawing operator that paints the current source everywhere
within the current clip region
using a mask of constant alpha value alpha.
The effect is similar to :meth:`paint`,
but the drawing is faded out using the ``alpha`` value.
:type alpha: float
:param alpha: Alpha value, between 0 (transparent) and 1 (opaque).
'''
pass
def mask(self, pattern):
'''A drawing operator that paints the current source
using the alpha channel of ``pattern`` as a mask.
(Opaque areas of ``pattern`` are painted with the source,
transparent areas are not painted.)
:param pattern: A :class:`Pattern` object.
'''
pass
def mask_surface(self, surface, surface_x=0, surface_y=0):
'''A drawing operator that paints the current source
using the alpha channel of ``surface`` as a mask.
(Opaque areas of ``surface`` are painted with the source,
transparent areas are not painted.)
:param pattern: A :class:`Surface` object.
:param surface_x: X coordinate at which to place the origin of surface.
:param surface_y: Y coordinate at which to place the origin of surface.
:type surface_x: float
:type surface_y: float
'''
pass
def fill(self):
'''A drawing operator that fills the current path
according to the current fill rule,
(each sub-path is implicitly closed before being filled).
After :meth:`fill`,
the current path will be cleared from the cairo context.
See :meth:`set_fill_rule` and :meth:`fill_preserve`.
'''
pass
def fill_preserve(self):
'''A drawing operator that fills the current path
according to the current fill rule,
(each sub-path is implicitly closed before being filled).
Unlike :meth:`fill`,
:meth:`fill_preserve` preserves the path within the cairo context.
See :meth:`set_fill_rule` and :meth:`fill`.
'''
pass
def fill_extents(self):
'''Computes a bounding box in user-space coordinates
covering the area that would be affected, (the "inked" area),
by a :meth:`fill` operation given the current path and fill parameters.
If the current path is empty,
returns an empty rectangle ``(0, 0, 0, 0)``.
Surface dimensions and clipping are not taken into account.
Contrast with :meth:`path_extents` which is similar,
but returns non-zero extents for some paths with no inked area,
(such as a simple line segment).
Note that :meth:`fill_extents` must necessarily do more work
to compute the precise inked areas in light of the fill rule,
so :meth:`path_extents` may be more desirable for sake of performance
if the non-inked path extents are desired.
See :meth:`fill`, :meth:`set_fill_rule` and :meth:`fill_preserve`.
:return:
A ``(x1, y1, x2, y2)`` tuple of floats:
the left, top, right and bottom of the resulting extents,
respectively.
'''
pass
def in_fill(self, x, y):
'''Tests whether the given point is inside the area
that would be affected by a :meth:`fill` operation
given the current path and filling parameters.
Surface dimensions and clipping are not taken into account.
See :meth:`fill`, :meth:`set_fill_rule` and :meth:`fill_preserve`.
:param x: X coordinate of the point to test
:param y: Y coordinate of the point to test
:type x: float
:type y: float
:returns: A boolean.
'''
pass
def stroke(self):
'''A drawing operator that strokes the current path
according to the current line width, line join, line cap,
and dash settings.
After :meth:`stroke`,
the current path will be cleared from the cairo context.
See :meth:`set_line_width`, :meth:`set_line_join`,
:meth:`set_line_cap`, :meth:`set_dash`, and :meth:`stroke_preserve`.
Note: Degenerate segments and sub-paths are treated specially
and provide a useful result.
These can result in two different situations:
1. Zero-length "on" segments set in :meth:`set_dash`.
If the cap style is :obj:`ROUND <LINE_CAP_ROUND>`
or :obj:`SQUARE <LINE_CAP_SQUARE>`
then these segments will be drawn
as circular dots or squares respectively.
In the case of :obj:`SQUARE <LINE_CAP_SQUARE>`,
the orientation of the squares is determined
by the direction of the underlying path.
2. A sub-path created by :meth:`move_to` followed
by either a :meth:`close_path`
or one or more calls to :meth:`line_to`
to the same coordinate as the :meth:`move_to`.
If the cap style is :obj:`ROUND <LINE_CAP_ROUND>`
then these sub-paths will be drawn as circular dots.
Note that in the case of :obj:`SQUARE <LINE_CAP_SQUARE>`
a degenerate sub-path will not be drawn at all,
(since the correct orientation is indeterminate).
In no case will a cap style of :obj:`BUTT <LINE_CAP_BUTT>`
cause anything to be drawn
in the case of either degenerate segments or sub-paths.
'''
pass
def stroke_preserve(self):
'''A drawing operator that strokes the current path
according to the current line width, line join, line cap,
and dash settings.
Unlike :meth:`stroke`,
:meth:`stroke_preserve` preserves the path within the cairo context.
See :meth:`set_line_width`, :meth:`set_line_join`,
:meth:`set_line_cap`, :meth:`set_dash`, and :meth:`stroke`.
'''
pass
def stroke_extents(self):
'''Computes a bounding box in user-space coordinates
covering the area that would be affected, (the "inked" area),
by a :meth:`stroke` operation given the current path
and stroke parameters.
If the current path is empty,
returns an empty rectangle ``(0, 0, 0, 0)``.
Surface dimensions and clipping are not taken into account.
Note that if the line width is set to exactly zero,
then :meth:`stroke_extents` will return an empty rectangle.
Contrast with :meth:`path_extents`
which can be used to compute the non-empty bounds
as the line width approaches zero.
Note that :meth:`stroke_extents` must necessarily do more work
to compute the precise inked areas in light of the stroke parameters,
so :meth:`path_extents` may be more desirable for sake of performance
if the non-inked path extents are desired.
See :meth:`stroke`, :meth:`set_line_width`, :meth:`set_line_join`,
:meth:`set_line_cap`, :meth:`set_dash`, and :meth:`stroke_preserve`.
:return:
A ``(x1, y1, x2, y2)`` tuple of floats:
the left, top, right and bottom of the resulting extents,
respectively.
'''
pass
def in_stroke(self, x, y):
'''Tests whether the given point is inside the area
that would be affected by a :meth:`stroke` operation
given the current path and stroking parameters.
Surface dimensions and clipping are not taken into account.
See :meth:`stroke`, :meth:`set_line_width`, :meth:`set_line_join`,
:meth:`set_line_cap`, :meth:`set_dash`, and :meth:`stroke_preserve`.
:param x: X coordinate of the point to test
:param y: Y coordinate of the point to test
:type x: float
:type y: float
:returns: A boolean.
'''
pass
def clip(self):
'''Establishes a new clip region
by intersecting the current clip region
with the current path as it would be filled by :meth:`fill`
and according to the current fill rule (see :meth:`set_fill_rule`).
After :meth:`clip`,
the current path will be cleared from the cairo context.
The current clip region affects all drawing operations
by effectively masking out any changes to the surface
that are outside the current clip region.
Calling :meth:`clip` can only make the clip region smaller,
never larger.
But the current clip is part of the graphics state,
so a temporary restriction of the clip region can be achieved
by calling :meth:`clip` within a :meth:`save` / :meth:`restore` pair.
The only other means of increasing the size of the clip region
is :meth:`reset_clip`.
'''
pass
def clip_preserve(self):
'''Establishes a new clip region
by intersecting the current clip region
with the current path as it would be filled by :meth:`fill`
and according to the current fill rule (see :meth:`set_fill_rule`).
Unlike :meth:`clip`,
:meth:`clip_preserve` preserves the path within the cairo context.
The current clip region affects all drawing operations
by effectively masking out any changes to the surface
that are outside the current clip region.
Calling :meth:`clip_preserve` can only make the clip region smaller,
never larger.
But the current clip is part of the graphics state,
so a temporary restriction of the clip region can be achieved
by calling :meth:`clip_preserve`
within a :meth:`save` / :meth:`restore` pair.
The only other means of increasing the size of the clip region
is :meth:`reset_clip`.
'''
pass
def clip_extents(self):
'''Computes a bounding box in user coordinates
covering the area inside the current clip.
:return:
A ``(x1, y1, x2, y2)`` tuple of floats:
the left, top, right and bottom of the resulting extents,
respectively.
'''
pass
def copy_clip_rectangle_list(self):
'''Return the current clip region as a list of rectangles
in user coordinates.
:return:
A list of rectangles,
as ``(x, y, width, height)`` tuples of floats.
:raises:
:exc:`CairoError`
if the clip region cannot be represented as a list
of user-space rectangles.
'''
pass
def in_clip(self, x, y):
'''Tests whether the given point is inside the area
that would be visible through the current clip,
i.e. the area that would be filled by a :meth:`paint` operation.
See :meth:`clip`, and :meth:`clip_preserve`.
:param x: X coordinate of the point to test
:param y: Y coordinate of the point to test
:type x: float
:type y: float
:returns: A boolean.
*New in cairo 1.10.*
'''
pass
def reset_clip(self):
'''Reset the current clip region to its original, unrestricted state.
That is, set the clip region to an infinitely large shape
containing the target surface.
Equivalently, if infinity is too hard to grasp,
one can imagine the clip region being reset
to the exact bounds of the target surface.
Note that code meant to be reusable
should not call :meth:`reset_clip`
as it will cause results unexpected by higher-level code
which calls :meth:`clip`.
Consider using :meth:`cairo` and :meth:`restore` around :meth:`clip`
as a more robust means of temporarily restricting the clip region.
'''
pass
def select_font_face(self, family='', slant=constants.FONT_SLANT_NORMAL,
weight=constants.FONT_WEIGHT_NORMAL):
'''Selects a family and style of font from a simplified description
as a family name, slant and weight.
.. note::
The :meth:`select_font_face` method is part of
what the cairo designers call the "toy" text API.
It is convenient for short demos and simple programs,
but it is not expected to be adequate
for serious text-using applications.
See :ref:`fonts` for details.
Cairo provides no operation to list available family names
on the system (this is a "toy", remember),
but the standard CSS2 generic family names,
(``"serif"``, ``"sans-serif"``, ``"cursive"``, ``"fantasy"``,
``"monospace"``),
are likely to work as expected.
If family starts with the string ``"cairo:"``,
or if no native font backends are compiled in,
cairo will use an internal font family.
The internal font family recognizes many modifiers
in the family string,
most notably, it recognizes the string ``"monospace"``.
That is, the family name ``"cairo:monospace"``
will use the monospace version of the internal font family.
If text is drawn without a call to :meth:`select_font_face`,
(nor :meth:`set_font_face` nor :meth:`set_scaled_font`),
the default family is platform-specific,
but is essentially ``"sans-serif"``.
Default slant is :obj:`NORMAL <FONT_SLANT_NORMAL>`,
and default weight is :obj:`NORMAL <FONT_WEIGHT_NORMAL>`.
This method is equivalent to a call to :class:`ToyFontFace`
followed by :meth:`set_font_face`.
'''
pass
def set_font_face(self, font_face):
'''Replaces the current font face with ``font_face``.
:param font_face:
A :class:`FontFace` object,
or :obj:`None` to restore the default font.
'''
pass
def get_font_face(self):
'''Return the current font face.
:param font_face:
A new :class:`FontFace` object
wrapping an existing cairo object.
'''
pass
def set_font_size(self, size):
'''Sets the current font matrix to a scale by a factor of ``size``,
replacing any font matrix previously set with :meth:`set_font_size`
or :meth:`set_font_matrix`.
This results in a font size of size user space units.
(More precisely, this matrix will result in the font's
em-square being a size by size square in user space.)
If text is drawn without a call to :meth:`set_font_size`,
(nor :meth:`set_font_matrix` nor :meth:`set_scaled_font`),
the default font size is 10.0.
:param size: The new font size, in user space units
:type size: float
'''
pass
def set_font_matrix(self, matrix):
'''Sets the current font matrix to ``matrix``.
The font matrix gives a transformation
from the design space of the font
(in this space, the em-square is 1 unit by 1 unit)
to user space.
Normally, a simple scale is used (see :meth:`set_font_size`),
but a more complex font matrix can be used
to shear the font or stretch it unequally along the two axes
:param matrix:
A :class:`Matrix`
describing a transform to be applied to the current font.
'''
pass
def get_font_matrix(self):
'''Copies the current font matrix. See :meth:`set_font_matrix`.
:returns: A new :class:`Matrix`.
'''
pass
def set_font_options(self, font_options):
'''Sets a set of custom font rendering options.
Rendering options are derived by merging these options
with the options derived from underlying surface;
if the value in options has a default value
(like :obj:`ANTIALIAS_DEFAULT`),
then the value from the surface is used.
:param font_options: A :class:`FontOptions` object.
'''
pass
def get_font_options(self):
'''Retrieves font rendering options set via :meth:`set_font_options`.
Note that the returned options do not include any options
derived from the underlying surface;
they are literally the options passed to :meth:`set_font_options`.
:return: A new :class:`FontOptions` object.
'''
pass
def set_scaled_font(self, scaled_font):
'''Replaces the current font face, font matrix, and font options
with those of ``scaled_font``.
Except for some translation, the current CTM of the context
should be the same as that of the ``scaled_font``,
which can be accessed using :meth:`ScaledFont.get_ctm`.
:param scaled_font: A :class:`ScaledFont` object.
'''
pass
def get_scaled_font(self):
'''Return the current scaled font.
:return:
A new :class:`ScaledFont` object,
wrapping an existing cairo object.
'''
pass
def font_extents(self):
'''Return the extents of the currently selected font.
Values are given in the current user-space coordinate system.
Because font metrics are in user-space coordinates, they are mostly,
but not entirely, independent of the current transformation matrix.
If you call :meth:`context.scale(2) <scale>`,
text will be drawn twice as big,
but the reported text extents will not be doubled.
They will change slightly due to hinting
(so you can't assume that metrics are independent
of the transformation matrix),
but otherwise will remain unchanged.
:returns:
A ``(ascent, descent, height, max_x_advance, max_y_advance)``
tuple of floats.
``ascent``
The distance that the font extends above the baseline.
Note that this is not always exactly equal to
the maximum of the extents of all the glyphs in the font,
but rather is picked to express the font designer's intent
as to how the font should align with elements above it.
``descent``
The distance that the font extends below the baseline.
This value is positive for typical fonts
that include portions below the baseline.
Note that this is not always exactly equal
to the maximum of the extents of all the glyphs in the font,
but rather is picked to express the font designer's intent
as to how the font should align with elements below it.
``height``
The recommended vertical distance between baselines
when setting consecutive lines of text with the font.
This is greater than ``ascent + descent``
by a quantity known as the line spacing or external leading.
When space is at a premium, most fonts can be set
with only a distance of ``ascent + descent`` between lines.
``max_x_advance``
The maximum distance in the X direction
that the origin is advanced for any glyph in the font.
``max_y_advance``
The maximum distance in the Y direction
that the origin is advanced for any glyph in the font.
This will be zero for normal fonts used for horizontal writing.
(The scripts of East Asia are sometimes written vertically.)
'''
pass
def text_extents(self, text):
'''Returns the extents for a string of text.
The extents describe a user-space rectangle
that encloses the "inked" portion of the text,
(as it would be drawn by :meth:`show_text`).
Additionally, the ``x_advance`` and ``y_advance`` values
indicate the amount by which the current point would be advanced
by :meth:`show_text`.
Note that whitespace characters do not directly contribute
to the size of the rectangle (``width`` and ``height``).
They do contribute indirectly by changing the position
of non-whitespace characters.
In particular, trailing whitespace characters are likely
to not affect the size of the rectangle,
though they will affect the x_advance and y_advance values.
Because text extents are in user-space coordinates,
they are mostly, but not entirely,
independent of the current transformation matrix.
If you call :meth:`context.scale(2) <scale>`,
text will be drawn twice as big,
but the reported text extents will not be doubled.
They will change slightly due to hinting
(so you can't assume that metrics are independent
of the transformation matrix),
but otherwise will remain unchanged.
:param text: The text to measure, as an Unicode or UTF-8 string.
:returns:
A ``(x_bearing, y_bearing, width, height, x_advance, y_advance)``
tuple of floats.
``x_bearing``
The horizontal distance
from the origin to the leftmost part of the glyphs as drawn.
Positive if the glyphs lie entirely to the right of the origin.
``y_bearing``
The vertical distance
from the origin to the topmost part of the glyphs as drawn.
Positive only if the glyphs lie completely below the origin;
will usually be negative.
``width``
Width of the glyphs as drawn.
``height``
Height of the glyphs as drawn.
``x_advance``
Distance to advance in the X direction
after drawing these glyphs.
``y_advance``
Distance to advance in the Y direction
after drawing these glyphs.
Will typically be zero except for vertical text layout
as found in East-Asian languages.
'''
pass
def glyph_extents(self, glyphs):
'''Returns the extents for a list of glyphs.
The extents describe a user-space rectangle
that encloses the "inked" portion of the glyphs,
(as it would be drawn by :meth:`show_glyphs`).
Additionally, the ``x_advance`` and ``y_advance`` values
indicate the amount by which the current point would be advanced
by :meth:`show_glyphs`.
:param glyphs:
A list of glyphs.
See :meth:`show_text_glyphs` for the data structure.
:returns:
A ``(x_bearing, y_bearing, width, height, x_advance, y_advance)``
tuple of floats.
See :meth:`text_extents` for details.
'''
pass
def show_text(self, text):
'''A drawing operator that generates the shape from a string text,
rendered according to the current
font :meth:`face <set_font_face>`,
font :meth:`size <set_font_size>`
(font :meth:`matrix <set_font_matrix>`),
and font :meth:`options <set_font_options>`.
This method first computes a set of glyphs for the string of text.
The first glyph is placed so that its origin is at the current point.
The origin of each subsequent glyph
is offset from that of the previous glyph
by the advance values of the previous glyph.
After this call the current point is moved
to the origin of where the next glyph would be placed
in this same progression.
That is, the current point will be at
the origin of the final glyph offset by its advance values.
This allows for easy display of a single logical string
with multiple calls to :meth:`show_text`.
:param text: The text to show, as an Unicode or UTF-8 string.
.. note::
This method is part of
what the cairo designers call the "toy" text API.
It is convenient for short demos and simple programs,
but it is not expected to be adequate
for serious text-using applications.
See :ref:`fonts` for details
and :meth:`show_glyphs` for the "real" text display API in cairo.
'''
pass
def show_glyphs(self, glyphs):
'''A drawing operator that generates the shape from a list of glyphs,
rendered according to the current
font :meth:`face <set_font_face>`,
font :meth:`size <set_font_size>`
(font :meth:`matrix <set_font_matrix>`),
and font :meth:`options <set_font_options>`.
:param glyphs:
The glyphs to show.
See :meth:`show_text_glyphs` for the data structure.
'''
pass
def show_text_glyphs(self, text, glyphs, clusters, cluster_flags=0):
'''This operation has rendering effects similar to :meth:`show_glyphs`
but, if the target surface supports it
(see :meth:`Surface.has_show_text_glyphs`),
uses the provided text and cluster mapping
to embed the text for the glyphs shown in the output.
If the target does not support the extended attributes,
this method acts like the basic :meth:`show_glyphs`
as if it had been passed ``glyphs``.
The mapping between ``text`` and ``glyphs``
is provided by an list of clusters.
Each cluster covers a number of UTF-8 text bytes and glyphs,
and neighboring clusters cover neighboring areas
of ``text`` and ``glyphs``.
The clusters should collectively cover ``text`` and ``glyphs``
in entirety.
:param text:
The text to show, as an Unicode or UTF-8 string.
Because of how ``cluster`` work,
using UTF-8 bytes might be more convenient.
:param glyphs:
A list of glyphs.
Each glyph is a ``(glyph_id, x, y)`` tuple.
``glyph_id`` is an opaque integer.
Its exact interpretation depends on the font technology being used.
``x`` and ``y`` are the float offsets
in the X and Y direction
between the origin used for drawing or measuring the string
and the origin of this glyph.
Note that the offsets are not cumulative.
When drawing or measuring text,
each glyph is individually positioned
with respect to the overall origin.
:param clusters:
A list of clusters.
A text cluster is a minimal mapping of some glyphs
corresponding to some UTF-8 text,
represented as a ``(num_bytes, num_glyphs)`` tuple of integers,
the number of UTF-8 bytes and glyphs covered by the cluster.
For a cluster to be valid,
both ``num_bytes`` and ``num_glyphs`` should be non-negative,
and at least one should be non-zero.
Note that clusters with zero glyphs
are not as well supported as normal clusters.
For example, PDF rendering applications
typically ignore those clusters when PDF text is being selected.
:type cluster_flags: int
:param cluster_flags:
Flags (as a bit field) for the cluster mapping.
The first cluster always covers bytes
from the beginning of ``text``.
If ``cluster_flags`` does not have
the :obj:`TEXT_CLUSTER_FLAG_BACKWARD` flag set,
the first cluster also covers the beginning of ``glyphs``,
otherwise it covers the end of the ``glyphs`` list
and following clusters move backward.
'''
pass
def show_page(self):
'''Emits and clears the current page
for backends that support multiple pages.
Use :meth:`copy_page` if you don't want to clear the page.
This is a convenience method
that simply calls :meth:`Surface.show_page`
on the context’s target.
'''
pass
def copy_page(self):
'''Emits the current page for backends that support multiple pages,
but doesn't clear it,
so the contents of the current page will be retained
for the next page too.
Use :meth:`show_page` if you want to clear the page.
This is a convenience method
that simply calls :meth:`Surface.copy_page`
on the context’s target.
'''
pass
def tag_begin(self, tag_name, attributes=None):
'''Marks the beginning of the ``tag_name`` structure.
Call :meth:`tag_end` with the same ``tag_name`` to mark the end of the
structure.
The attributes string is of the form "key1=value2 key2=value2 ...".
Values may be boolean (true/false or 1/0), integer, float, string, or
an array.
String values are enclosed in single quotes ('). Single quotes and
backslashes inside the string should be escaped with a backslash.
Boolean values may be set to true by only specifying the key. eg the
attribute string "key" is the equivalent to "key=true".
Arrays are enclosed in '[]'. eg "rect=[1.2 4.3 2.0 3.0]".
If no attributes are required, ``attributes`` can be omitted, an empty
string or None.
See cairo's Tags and Links Description for the list of tags and
attributes.
Invalid nesting of tags or invalid attributes will cause the context to
shutdown with a status of ``CAIRO_STATUS_TAG_ERROR``.
See :meth:`tag_end`.
:param tag_name: tag name
:param attributes: tag attributes
*New in cairo 1.16.*
*New in cairocffi 0.9.*
'''
pass
def tag_end(self, tag_name):
'''Marks the end of the ``tag_name`` structure.
Invalid nesting of tags will cause @cr to shutdown with a status of
``CAIRO_STATUS_TAG_ERROR``.
See :meth:`tag_begin`.
:param tag_name: tag name
*New in cairo 1.16.*
*New in cairocffi 0.9.*
'''
pass
def set_hairline(self, enabled):
'''Sets lines within the cairo context to be hairlines.
Hairlines are logically zero-width lines that are drawn at the thinnest
renderable width possible in the current context.
:type enabled: bool
:param enabled: whether or not to set hairline mode
*New in cairo 1.18.*
'''
pass
def get_hairline(self):
'''Returns whether or not hairline mode is set, as set by cairo_set_hairline().
:returns: whether hairline mode is set
*New in cairo 1.18.*
'''
pass
| 111 | 105 | 19 | 3 | 4 | 12 | 1 | 3.53 | 1 | 13 | 7 | 0 | 108 | 1 | 109 | 109 | 2,195 | 421 | 392 | 141 | 280 | 1,382 | 364 | 139 | 254 | 3 | 1 | 1 | 115 |
143,724 |
Kozea/cairocffi
|
cairocffi/__init__.py
|
cairocffi._keepref
|
class _keepref(object): # noqa: N801
"""Function wrapper that keeps a reference to another object."""
def __init__(self, ref, func):
self.ref = ref
self.func = func
def __call__(self, *args, **kwargs):
self.func(*args, **kwargs)
|
class _keepref(object):
'''Function wrapper that keeps a reference to another object.'''
def __init__(self, ref, func):
pass
def __call__(self, *args, **kwargs):
pass
| 3 | 1 | 3 | 0 | 3 | 0 | 1 | 0.33 | 1 | 0 | 0 | 0 | 2 | 2 | 2 | 2 | 8 | 1 | 6 | 5 | 3 | 2 | 6 | 5 | 3 | 1 | 1 | 0 | 2 |
143,725 |
Kozea/cairocffi
|
cairocffi/fonts.py
|
cairocffi.fonts.ScaledFont
|
class ScaledFont(object):
"""Creates a :class:`ScaledFont` object from a font face and matrices
that describe the size of the font
and the environment in which it will be used.
:param font_face: A :class:`FontFace` object.
:type font_matrix: Matrix
:param font_matrix:
Font space to user space transformation matrix for the font.
In the simplest case of a N point font,
this matrix is just a scale by N,
but it can also be used to shear the font
or stretch it unequally along the two axes.
If omitted, a scale by 10 matrix is assumed (ie. a 10 point font size).
See :class:`Context.set_font_matrix`.
:type ctm: Matrix
:param ctm:
User to device transformation matrix with which the font will be used.
If omitted, an identity matrix is assumed.
:param options:
The :class:`FontOptions` object to use
when getting metrics for the font and rendering with it.
If omitted, the default options are assumed.
"""
def __init__(self, font_face, font_matrix=None, ctm=None, options=None):
if font_matrix is None:
font_matrix = Matrix()
font_matrix.scale(10) # Default font size
if ctm is None:
ctm = Matrix()
if options is None:
options = FontOptions()
self._init_pointer(cairo.cairo_scaled_font_create(
font_face._pointer, font_matrix._pointer,
ctm._pointer, options._pointer))
def _init_pointer(self, pointer):
self._pointer = ffi.gc(
pointer, _keepref(cairo, cairo.cairo_scaled_font_destroy))
self._check_status()
def _check_status(self):
_check_status(cairo.cairo_scaled_font_status(self._pointer))
@staticmethod
def _from_pointer(pointer, incref):
"""Wrap an existing ``cairo_scaled_font_t *`` cdata pointer.
:type incref: bool
:param incref:
Whether increase the :ref:`reference count <refcounting>` now.
:return: A new :class:`ScaledFont` instance.
"""
if pointer == ffi.NULL:
raise ValueError('Null pointer')
if incref:
cairo.cairo_scaled_font_reference(pointer)
self = object.__new__(ScaledFont)
ScaledFont._init_pointer(self, pointer)
return self
def get_font_face(self):
"""Return the font face that this scaled font uses.
:returns:
A new instance of :class:`FontFace` (or one of its sub-classes).
Might wrap be the same font face passed to :class:`ScaledFont`,
but this does not hold true for all possible cases.
"""
return FontFace._from_pointer(
cairo.cairo_scaled_font_get_font_face(self._pointer), incref=True)
def get_font_options(self):
"""Copies the scaled font’s options.
:returns: A new :class:`FontOptions` object.
"""
font_options = FontOptions()
cairo.cairo_scaled_font_get_font_options(
self._pointer, font_options._pointer)
return font_options
def get_font_matrix(self):
"""Copies the scaled font’s font matrix.
:returns: A new :class:`Matrix` object.
"""
matrix = Matrix()
cairo.cairo_scaled_font_get_font_matrix(self._pointer, matrix._pointer)
self._check_status()
return matrix
def get_ctm(self):
"""Copies the scaled font’s font current transform matrix.
Note that the translation offsets ``(x0, y0)`` of the CTM
are ignored by :class:`ScaledFont`.
So, the matrix this method returns always has 0 as ``x0`` and ``y0``.
:returns: A new :class:`Matrix` object.
"""
matrix = Matrix()
cairo.cairo_scaled_font_get_ctm(self._pointer, matrix._pointer)
self._check_status()
return matrix
def get_scale_matrix(self):
"""Copies the scaled font’s scaled matrix.
The scale matrix is product of the font matrix
and the ctm associated with the scaled font,
and hence is the matrix mapping from font space to device space.
:returns: A new :class:`Matrix` object.
"""
matrix = Matrix()
cairo.cairo_scaled_font_get_scale_matrix(
self._pointer, matrix._pointer)
self._check_status()
return matrix
def extents(self):
"""Return the scaled font’s extents.
See :meth:`Context.font_extents`.
:returns:
A ``(ascent, descent, height, max_x_advance, max_y_advance)``
tuple of floats.
"""
extents = ffi.new('cairo_font_extents_t *')
cairo.cairo_scaled_font_extents(self._pointer, extents)
self._check_status()
return (
extents.ascent, extents.descent, extents.height,
extents.max_x_advance, extents.max_y_advance)
def text_extents(self, text):
"""Returns the extents for a string of text.
The extents describe a user-space rectangle
that encloses the "inked" portion of the text,
(as it would be drawn by :meth:`Context.show_text`).
Additionally, the ``x_advance`` and ``y_advance`` values
indicate the amount by which the current point would be advanced
by :meth:`Context.show_text`.
:param text: The text to measure, as an Unicode or UTF-8 string.
:returns:
A ``(x_bearing, y_bearing, width, height, x_advance, y_advance)``
tuple of floats.
See :meth:`Context.text_extents` for details.
"""
extents = ffi.new('cairo_text_extents_t *')
cairo.cairo_scaled_font_text_extents(
self._pointer, _encode_string(text), extents)
self._check_status()
return (
extents.x_bearing, extents.y_bearing,
extents.width, extents.height,
extents.x_advance, extents.y_advance)
def glyph_extents(self, glyphs):
"""Returns the extents for a list of glyphs.
The extents describe a user-space rectangle
that encloses the "inked" portion of the glyphs,
(as it would be drawn by :meth:`Context.show_glyphs`).
Additionally, the ``x_advance`` and ``y_advance`` values
indicate the amount by which the current point would be advanced
by :meth:`Context.show_glyphs`.
:param glyphs:
A list of glyphs, as returned by :meth:`text_to_glyphs`.
Each glyph is a ``(glyph_id, x, y)`` tuple
of an integer and two floats.
:returns:
A ``(x_bearing, y_bearing, width, height, x_advance, y_advance)``
tuple of floats.
See :meth:`Context.text_extents` for details.
"""
glyphs = ffi.new('cairo_glyph_t[]', glyphs)
extents = ffi.new('cairo_text_extents_t *')
cairo.cairo_scaled_font_glyph_extents(
self._pointer, glyphs, len(glyphs), extents)
self._check_status()
return (
extents.x_bearing, extents.y_bearing,
extents.width, extents.height,
extents.x_advance, extents.y_advance)
def text_to_glyphs(self, x, y, text, with_clusters):
"""Converts a string of text to a list of glyphs,
optionally with cluster mapping,
that can be used to render later using this scaled font.
The output values can be readily passed to
:meth:`Context.show_text_glyphs`, :meth:`Context.show_glyphs`
or related methods,
assuming that the exact same :class:`ScaledFont`
is used for the operation.
:type x: float
:type y: float
:type with_clusters: bool
:param x: X position to place first glyph.
:param y: Y position to place first glyph.
:param text: The text to convert, as an Unicode or UTF-8 string.
:param with_clusters: Whether to compute the cluster mapping.
:returns:
A ``(glyphs, clusters, clusters_flags)`` tuple
if ``with_clusters`` is true, otherwise just ``glyphs``.
See :meth:`Context.show_text_glyphs` for the data structure.
.. note::
This method is part of
what the cairo designers call the "toy" text API.
It is convenient for short demos and simple programs,
but it is not expected to be adequate
for serious text-using applications.
See :ref:`fonts` for details
and :meth:`Context.show_glyphs`
for the "real" text display API in cairo.
"""
glyphs = ffi.new('cairo_glyph_t **', ffi.NULL)
num_glyphs = ffi.new('int *')
if with_clusters:
clusters = ffi.new('cairo_text_cluster_t **', ffi.NULL)
num_clusters = ffi.new('int *')
cluster_flags = ffi.new('cairo_text_cluster_flags_t *')
else:
clusters = ffi.NULL
num_clusters = ffi.NULL
cluster_flags = ffi.NULL
# TODO: Pass len_utf8 explicitly to support NULL bytes?
status = cairo.cairo_scaled_font_text_to_glyphs(
self._pointer, x, y, _encode_string(text), -1,
glyphs, num_glyphs, clusters, num_clusters, cluster_flags)
glyphs = ffi.gc(glyphs[0], _keepref(cairo, cairo.cairo_glyph_free))
if with_clusters:
clusters = ffi.gc(
clusters[0], _keepref(cairo, cairo.cairo_text_cluster_free))
_check_status(status)
glyphs = [
(glyph.index, glyph.x, glyph.y)
for i in range(num_glyphs[0])
for glyph in [glyphs[i]]]
if with_clusters:
clusters = [
(cluster.num_bytes, cluster.num_glyphs)
for i in range(num_clusters[0])
for cluster in [clusters[i]]]
return glyphs, clusters, cluster_flags[0]
else:
return glyphs
|
class ScaledFont(object):
'''Creates a :class:`ScaledFont` object from a font face and matrices
that describe the size of the font
and the environment in which it will be used.
:param font_face: A :class:`FontFace` object.
:type font_matrix: Matrix
:param font_matrix:
Font space to user space transformation matrix for the font.
In the simplest case of a N point font,
this matrix is just a scale by N,
but it can also be used to shear the font
or stretch it unequally along the two axes.
If omitted, a scale by 10 matrix is assumed (ie. a 10 point font size).
See :class:`Context.set_font_matrix`.
:type ctm: Matrix
:param ctm:
User to device transformation matrix with which the font will be used.
If omitted, an identity matrix is assumed.
:param options:
The :class:`FontOptions` object to use
when getting metrics for the font and rendering with it.
If omitted, the default options are assumed.
'''
def __init__(self, font_face, font_matrix=None, ctm=None, options=None):
pass
def _init_pointer(self, pointer):
pass
def _check_status(self):
pass
@staticmethod
def _from_pointer(pointer, incref):
'''Wrap an existing ``cairo_scaled_font_t *`` cdata pointer.
:type incref: bool
:param incref:
Whether increase the :ref:`reference count <refcounting>` now.
:return: A new :class:`ScaledFont` instance.
'''
pass
def get_font_face(self):
'''Return the font face that this scaled font uses.
:returns:
A new instance of :class:`FontFace` (or one of its sub-classes).
Might wrap be the same font face passed to :class:`ScaledFont`,
but this does not hold true for all possible cases.
'''
pass
def get_font_options(self):
'''Copies the scaled font’s options.
:returns: A new :class:`FontOptions` object.
'''
pass
def get_font_matrix(self):
'''Copies the scaled font’s font matrix.
:returns: A new :class:`Matrix` object.
'''
pass
def get_ctm(self):
'''Copies the scaled font’s font current transform matrix.
Note that the translation offsets ``(x0, y0)`` of the CTM
are ignored by :class:`ScaledFont`.
So, the matrix this method returns always has 0 as ``x0`` and ``y0``.
:returns: A new :class:`Matrix` object.
'''
pass
def get_scale_matrix(self):
'''Copies the scaled font’s scaled matrix.
The scale matrix is product of the font matrix
and the ctm associated with the scaled font,
and hence is the matrix mapping from font space to device space.
:returns: A new :class:`Matrix` object.
'''
pass
def extents(self):
'''Return the scaled font’s extents.
See :meth:`Context.font_extents`.
:returns:
A ``(ascent, descent, height, max_x_advance, max_y_advance)``
tuple of floats.
'''
pass
def text_extents(self, text):
'''Returns the extents for a string of text.
The extents describe a user-space rectangle
that encloses the "inked" portion of the text,
(as it would be drawn by :meth:`Context.show_text`).
Additionally, the ``x_advance`` and ``y_advance`` values
indicate the amount by which the current point would be advanced
by :meth:`Context.show_text`.
:param text: The text to measure, as an Unicode or UTF-8 string.
:returns:
A ``(x_bearing, y_bearing, width, height, x_advance, y_advance)``
tuple of floats.
See :meth:`Context.text_extents` for details.
'''
pass
def glyph_extents(self, glyphs):
'''Returns the extents for a list of glyphs.
The extents describe a user-space rectangle
that encloses the "inked" portion of the glyphs,
(as it would be drawn by :meth:`Context.show_glyphs`).
Additionally, the ``x_advance`` and ``y_advance`` values
indicate the amount by which the current point would be advanced
by :meth:`Context.show_glyphs`.
:param glyphs:
A list of glyphs, as returned by :meth:`text_to_glyphs`.
Each glyph is a ``(glyph_id, x, y)`` tuple
of an integer and two floats.
:returns:
A ``(x_bearing, y_bearing, width, height, x_advance, y_advance)``
tuple of floats.
See :meth:`Context.text_extents` for details.
'''
pass
def text_to_glyphs(self, x, y, text, with_clusters):
'''Converts a string of text to a list of glyphs,
optionally with cluster mapping,
that can be used to render later using this scaled font.
The output values can be readily passed to
:meth:`Context.show_text_glyphs`, :meth:`Context.show_glyphs`
or related methods,
assuming that the exact same :class:`ScaledFont`
is used for the operation.
:type x: float
:type y: float
:type with_clusters: bool
:param x: X position to place first glyph.
:param y: Y position to place first glyph.
:param text: The text to convert, as an Unicode or UTF-8 string.
:param with_clusters: Whether to compute the cluster mapping.
:returns:
A ``(glyphs, clusters, clusters_flags)`` tuple
if ``with_clusters`` is true, otherwise just ``glyphs``.
See :meth:`Context.show_text_glyphs` for the data structure.
.. note::
This method is part of
what the cairo designers call the "toy" text API.
It is convenient for short demos and simple programs,
but it is not expected to be adequate
for serious text-using applications.
See :ref:`fonts` for details
and :meth:`Context.show_glyphs`
for the "real" text display API in cairo.
'''
pass
| 15 | 11 | 18 | 2 | 8 | 7 | 2 | 1.09 | 1 | 6 | 4 | 0 | 12 | 1 | 13 | 13 | 266 | 41 | 108 | 31 | 93 | 118 | 80 | 29 | 66 | 4 | 1 | 1 | 21 |
143,726 |
Kozea/cairocffi
|
cairocffi/surfaces.py
|
cairocffi.surfaces.Surface
|
class Surface(object):
"""The base class for all surface types.
Should not be instantiated directly, but see :doc:`cffi_api`.
An instance may be returned for cairo surface types
that are not (yet) defined in cairocffi.
A :class:`Surface` represents an image,
either as the destination of a drawing operation
or as source when drawing onto another surface.
To draw to a :class:`Surface`,
create a cairo :class:`Context` with the surface as the target.
There are different sub-classes of :class:`Surface`
for different drawing backends;
for example, :class:`ImageSurface` is a bitmap image in memory.
The initial contents of a surface after creation
depend upon the manner of its creation.
If cairo creates the surface and backing storage for the user,
it will be initially cleared;
for example, :class:`ImageSurface` and :meth:`create_similar`.
Alternatively, if the user passes in a reference
to some backing storage and asks cairo to wrap that in a :class:`Surface`,
then the contents are not modified;
for example, :class:`ImageSurface` with a ``data`` argument.
"""
def __init__(self, pointer, target_keep_alive=None):
self._pointer = ffi.gc(
pointer, _keepref(cairo, cairo.cairo_surface_destroy))
self._check_status()
if hasattr(target_keep_alive, '__array_interface__'):
is_empty = target_keep_alive.size == 0
else:
is_empty = target_keep_alive in (None, ffi.NULL)
if not is_empty:
keep_alive = KeepAlive(target_keep_alive)
_check_status(cairo.cairo_surface_set_user_data(
self._pointer, SURFACE_TARGET_KEY, *keep_alive.closure))
keep_alive.save()
def _check_status(self):
_check_status(cairo.cairo_surface_status(self._pointer))
@staticmethod
def _from_pointer(pointer, incref):
"""Wrap an existing ``cairo_surface_t *`` cdata pointer.
:type incref: bool
:param incref:
Whether increase the :ref:`reference count <refcounting>` now.
:return:
A new instance of :class:`Surface` or one of its sub-classes,
depending on the surface’s type.
"""
if pointer == ffi.NULL:
raise ValueError('Null pointer')
if incref:
cairo.cairo_surface_reference(pointer)
self = object.__new__(SURFACE_TYPE_TO_CLASS.get(
cairo.cairo_surface_get_type(pointer), Surface))
Surface.__init__(self, pointer) # Skip the subclass’s __init__
return self
def create_similar(self, content, width, height):
"""Create a new surface that is as compatible as possible
for uploading to and the use in conjunction with this surface.
For example the new surface will have the same fallback resolution
and :class:`FontOptions`.
Generally, the new surface will also use the same backend as other,
unless that is not possible for some reason.
Initially the surface contents are all 0
(transparent if contents have transparency, black otherwise.)
Use :meth:`create_similar_image` if you need an image surface
which can be painted quickly to the target surface.
:param content: the :ref:`CONTENT` string for the new surface.
:param width: width of the new surface (in device-space units)
:param height: height of the new surface (in device-space units)
:type content: str
:type width: int
:type height: int
:returns: A new instance of :class:`Surface` or one of its subclasses.
"""
return Surface._from_pointer(
cairo.cairo_surface_create_similar(
self._pointer, content, width, height),
incref=False)
def create_similar_image(self, content, width, height):
"""
Create a new image surface that is as compatible as possible
for uploading to and the use in conjunction with this surface.
However, this surface can still be used like any normal image surface.
Initially the surface contents are all 0
(transparent if contents have transparency, black otherwise.)
Use :meth:`create_similar` if you don't need an image surface.
:param format: the :ref:`FORMAT` string for the new surface
:param width: width of the new surface, (in device-space units)
:param height: height of the new surface (in device-space units)
:type format: str
:type width: int
:type height: int
:returns: A new :class:`ImageSurface` instance.
"""
return Surface._from_pointer(
cairo.cairo_surface_create_similar_image(
self._pointer, content, width, height),
incref=False)
def create_for_rectangle(self, x, y, width, height):
"""
Create a new surface that is a rectangle within this surface.
All operations drawn to this surface are then clipped and translated
onto the target surface.
Nothing drawn via this sub-surface outside of its bounds
is drawn onto the target surface,
making this a useful method for passing constrained child surfaces
to library routines that draw directly onto the parent surface,
i.e. with no further backend allocations,
double buffering or copies.
.. note::
As of cairo 1.12,
the semantics of subsurfaces have not been finalized yet
unless the rectangle is in full device units,
is contained within the extents of the target surface,
and the target or subsurface's device transforms are not changed.
:param x:
The x-origin of the sub-surface
from the top-left of the target surface (in device-space units)
:param y:
The y-origin of the sub-surface
from the top-left of the target surface (in device-space units)
:param width:
Width of the sub-surface (in device-space units)
:param height:
Height of the sub-surface (in device-space units)
:type x: float
:type y: float
:type width: float
:type height: float
:returns:
A new :class:`Surface` object.
*New in cairo 1.10.*
"""
return Surface._from_pointer(
cairo.cairo_surface_create_for_rectangle(
self._pointer, x, y, width, height),
incref=False)
def get_content(self):
"""Returns the :ref:`CONTENT` string of this surface,
which indicates whether the surface contains color
and/or alpha information.
"""
return cairo.cairo_surface_get_content(self._pointer)
def has_show_text_glyphs(self):
"""Returns whether the surface supports sophisticated
:meth:`Context.show_text_glyphs` operations.
That is, whether it actually uses the text and cluster data
provided to a :meth:`Context.show_text_glyphs` call.
.. note::
Even if this method returns :obj:`False`,
:meth:`Context.show_text_glyphs` operation targeted at surface
will still succeed.
It just will act like a :meth:`Context.show_glyphs` operation.
Users can use this method to avoid computing UTF-8 text
and cluster mapping if the target surface does not use it.
"""
return bool(cairo.cairo_surface_has_show_text_glyphs(self._pointer))
def set_device_offset(self, x_offset, y_offset):
""" Sets an offset that is added to the device coordinates
determined by the CTM when drawing to surface.
One use case for this method is
when we want to create a :class:`Surface` that redirects drawing
for a portion of an onscreen surface
to an offscreen surface in a way that is
completely invisible to the user of the cairo API.
Setting a transformation via :meth:`Context.translate`
isn't sufficient to do this,
since methods like :meth:`Context.device_to_user`
will expose the hidden offset.
Note that the offset affects drawing to the surface
as well as using the surface in a source pattern.
:param x_offset:
The offset in the X direction, in device units
:param y_offset:
The offset in the Y direction, in device units
"""
cairo.cairo_surface_set_device_offset(
self._pointer, x_offset, y_offset)
self._check_status()
def get_device_offset(self):
"""Returns the previous device offset set by :meth:`set_device_offset`.
:returns: ``(x_offset, y_offset)``
"""
offsets = ffi.new('double[2]')
cairo.cairo_surface_get_device_offset(
self._pointer, offsets + 0, offsets + 1)
return tuple(offsets)
def set_fallback_resolution(self, x_pixels_per_inch, y_pixels_per_inch):
"""
Set the horizontal and vertical resolution for image fallbacks.
When certain operations aren't supported natively by a backend,
cairo will fallback by rendering operations to an image
and then overlaying that image onto the output.
For backends that are natively vector-oriented,
this method can be used to set the resolution
used for these image fallbacks,
(larger values will result in more detailed images,
but also larger file sizes).
Some examples of natively vector-oriented backends are
the ps, pdf, and svg backends.
For backends that are natively raster-oriented,
image fallbacks are still possible,
but they are always performed at the native device resolution.
So this method has no effect on those backends.
.. note::
The fallback resolution only takes effect
at the time of completing a page
(with :meth:`show_page` or :meth:`copy_page`)
so there is currently no way to have
more than one fallback resolution in effect on a single page.
The default fallback resoultion is
300 pixels per inch in both dimensions.
:param x_pixels_per_inch: horizontal resolution in pixels per inch
:type x_pixels_per_inch: float
:param y_pixels_per_inch: vertical resolution in pixels per inch
:type y_pixels_per_inch: float
"""
cairo.cairo_surface_set_fallback_resolution(
self._pointer, x_pixels_per_inch, y_pixels_per_inch)
self._check_status()
def get_fallback_resolution(self):
"""Returns the previous fallback resolution
set by :meth:`set_fallback_resolution`,
or default fallback resolution if never set.
:returns: ``(x_pixels_per_inch, y_pixels_per_inch)``
"""
ppi = ffi.new('double[2]')
cairo.cairo_surface_get_fallback_resolution(
self._pointer, ppi + 0, ppi + 1)
return tuple(ppi)
def get_font_options(self):
"""Retrieves the default font rendering options for the surface.
This allows display surfaces to report the correct subpixel order
for rendering on them,
print surfaces to disable hinting of metrics and so forth.
The result can then be used with :class:`ScaledFont`.
:returns: A new :class:`FontOptions` object.
"""
font_options = FontOptions()
cairo.cairo_surface_get_font_options(
self._pointer, font_options._pointer)
return font_options
def set_device_scale(self, x_scale, y_scale):
"""Sets a scale that is multiplied to the device coordinates determined
by the CTM when drawing to surface.
One common use for this is to render to very high resolution display
devices at a scale factor, so that code that assumes 1 pixel will be a
certain size will still work. Setting a transformation via
cairo_translate() isn't sufficient to do this, since functions like
cairo_device_to_user() will expose the hidden scale.
Note that the scale affects drawing to the surface as well as using the
surface in a source pattern.
:param x_scale: the scale in the X direction, in device units.
:param y_scale: the scale in the Y direction, in device units.
*New in cairo 1.14.*
*New in cairocffi 0.9.*
"""
cairo.cairo_surface_set_device_scale(self._pointer, x_scale, y_scale)
self._check_status()
def get_device_scale(self):
"""Returns the previous device offset set by :meth:`set_device_scale`.
*New in cairo 1.14.*
*New in cairocffi 0.9.*
"""
size = ffi.new('double[2]')
cairo.cairo_surface_get_device_scale(self._pointer, size + 0, size + 1)
return tuple(size)
def set_mime_data(self, mime_type, data):
"""
Attach an image in the format ``mime_type`` to this surface.
To remove the data from a surface,
call this method with same mime type and :obj:`None` for data.
The attached image (or filename) data can later
be used by backends which support it
(currently: PDF, PS, SVG and Win32 Printing surfaces)
to emit this data instead of making a snapshot of the surface.
This approach tends to be faster
and requires less memory and disk space.
The recognized MIME types are the following:
``"image/png"``
The Portable Network Graphics image file format (ISO/IEC 15948).
``"image/jpeg"``
The Joint Photographic Experts Group (JPEG)
image coding standard (ISO/IEC 10918-1).
``"image/jp2"``
The Joint Photographic Experts Group (JPEG) 2000
image coding standard (ISO/IEC 15444-1).
``"text/x-uri"``
URL for an image file (unofficial MIME type).
See corresponding backend surface docs
for details about which MIME types it can handle.
Caution: the associated MIME data will be discarded
if you draw on the surface afterwards.
Use this method with care.
:param str mime_type: The MIME type of the image data.
:param bytes data: The image data to attach to the surface.
*New in cairo 1.10.*
"""
mime_type = ffi.new('char[]', mime_type.encode('utf8'))
if data is None:
_check_status(cairo.cairo_surface_set_mime_data(
self._pointer, mime_type, ffi.NULL, 0, ffi.NULL, ffi.NULL))
else:
# TODO: avoid making a copy here if possible.
length = len(data)
data = ffi.new('unsigned char[]', data)
keep_alive = KeepAlive(data, mime_type)
_check_status(cairo.cairo_surface_set_mime_data(
self._pointer, mime_type, data, length,
*keep_alive.closure))
keep_alive.save() # Only on success
def get_mime_data(self, mime_type):
"""Return mime data previously attached to surface
using the specified mime type.
:param str mime_type: The MIME type of the image data.
:returns:
A CFFI buffer object, or :obj:`None`
if no data has been attached with the given mime type.
*New in cairo 1.10.*
"""
buffer_address = ffi.new('unsigned char **')
buffer_length = ffi.new('unsigned long *')
mime_type = ffi.new('char[]', mime_type.encode('utf8'))
cairo.cairo_surface_get_mime_data(
self._pointer, mime_type, buffer_address, buffer_length)
return (ffi.buffer(buffer_address[0], buffer_length[0])
if buffer_address[0] != ffi.NULL else None)
def supports_mime_type(self, mime_type):
"""Return whether surface supports ``mime_type``.
:param str mime_type: The MIME type of the image data.
*New in cairo 1.12.*
"""
mime_type = ffi.new('char[]', mime_type.encode('utf8'))
return bool(cairo.cairo_surface_supports_mime_type(
self._pointer, mime_type))
def mark_dirty(self):
"""Tells cairo that drawing has been done to surface
using means other than cairo,
and that cairo should reread any cached areas.
Note that you must call :meth:`flush` before doing such drawing.
"""
cairo.cairo_surface_mark_dirty(self._pointer)
self._check_status()
def mark_dirty_rectangle(self, x, y, width, height):
"""
Like :meth:`mark_dirty`,
but drawing has been done only to the specified rectangle,
so that cairo can retain cached contents
for other parts of the surface.
Any cached clip set on the surface will be reset by this method,
to make sure that future cairo calls have the clip set
that they expect.
:param x: X coordinate of dirty rectangle.
:param y: Y coordinate of dirty rectangle.
:param width: Width of dirty rectangle.
:param height: Height of dirty rectangle.
:type x: float
:type y: float
:type width: float
:type height: float
"""
cairo.cairo_surface_mark_dirty_rectangle(
self._pointer, x, y, width, height)
self._check_status()
def show_page(self):
"""Emits and clears the current page
for backends that support multiple pages.
Use :meth:`copy_page` if you don't want to clear the page.
:meth:`Context.show_page` is a convenience method for this.
"""
cairo.cairo_surface_show_page(self._pointer)
self._check_status()
def copy_page(self):
"""Emits the current page for backends that support multiple pages,
but doesn't clear it,
so that the contents of the current page will be retained
for the next page.
Use :meth:`show_page` if you want to get an empty page
after the emission.
"""
cairo.cairo_surface_copy_page(self._pointer)
self._check_status()
def flush(self):
"""Do any pending drawing for the surface
and also restore any temporary modifications
cairo has made to the surface's state.
This method must be called before switching
from drawing on the surface with cairo
to drawing on it directly with native APIs.
If the surface doesn't support direct access,
then this method does nothing.
"""
cairo.cairo_surface_flush(self._pointer)
self._check_status()
def finish(self):
"""This method finishes the surface
and drops all references to external resources.
For example, for the Xlib backend it means that
cairo will no longer access the drawable, which can be freed.
After calling :meth:`finish` the only valid operations on a surface
are getting and setting user data, flushing and finishing it.
Further drawing to the surface will not affect the surface
but will instead trigger a :class:`CairoError`
with a ``SURFACE_FINISHED`` status.
When the surface is garbage-collected, cairo will call :meth:`finish()`
if it hasn't been called already,
before freeing the resources associated with the surface.
"""
cairo.cairo_surface_finish(self._pointer)
self._check_status()
def write_to_png(self, target=None):
"""Writes the contents of surface as a PNG image.
:param target:
A filename,
a binary mode :term:`file object` with a `write` method,
or :obj:`None`.
:returns:
If ``target`` is :obj:`None`,
return the PNG contents as a byte string.
"""
return_bytes = target is None
if return_bytes:
target = io.BytesIO()
if hasattr(target, 'write'):
try:
write_func = _make_write_func(target)
_check_status(cairo.cairo_surface_write_to_png_stream(
self._pointer, write_func, ffi.NULL))
except (SystemError, MemoryError): # pragma: no cover
# Callback creation has failed
if hasattr(target, 'name'):
# File-like object has a name, write here
_check_status(cairo.cairo_surface_write_to_png(
self._pointer, _encode_filename(target.name)))
else:
# Use a fallback temporary file
with NamedTemporaryFile('wb', delete=False) as fd:
filename = fd.name
_check_status(cairo.cairo_surface_write_to_png(
self._pointer, _encode_filename(filename)))
png_file = Path(filename)
target.write(png_file.read_bytes())
png_file.unlink()
else:
_check_status(cairo.cairo_surface_write_to_png(
self._pointer, _encode_filename(target)))
if return_bytes:
return target.getvalue()
|
class Surface(object):
'''The base class for all surface types.
Should not be instantiated directly, but see :doc:`cffi_api`.
An instance may be returned for cairo surface types
that are not (yet) defined in cairocffi.
A :class:`Surface` represents an image,
either as the destination of a drawing operation
or as source when drawing onto another surface.
To draw to a :class:`Surface`,
create a cairo :class:`Context` with the surface as the target.
There are different sub-classes of :class:`Surface`
for different drawing backends;
for example, :class:`ImageSurface` is a bitmap image in memory.
The initial contents of a surface after creation
depend upon the manner of its creation.
If cairo creates the surface and backing storage for the user,
it will be initially cleared;
for example, :class:`ImageSurface` and :meth:`create_similar`.
Alternatively, if the user passes in a reference
to some backing storage and asks cairo to wrap that in a :class:`Surface`,
then the contents are not modified;
for example, :class:`ImageSurface` with a ``data`` argument.
'''
def __init__(self, pointer, target_keep_alive=None):
pass
def _check_status(self):
pass
@staticmethod
def _from_pointer(pointer, incref):
'''Wrap an existing ``cairo_surface_t *`` cdata pointer.
:type incref: bool
:param incref:
Whether increase the :ref:`reference count <refcounting>` now.
:return:
A new instance of :class:`Surface` or one of its sub-classes,
depending on the surface’s type.
'''
pass
def create_similar(self, content, width, height):
'''Create a new surface that is as compatible as possible
for uploading to and the use in conjunction with this surface.
For example the new surface will have the same fallback resolution
and :class:`FontOptions`.
Generally, the new surface will also use the same backend as other,
unless that is not possible for some reason.
Initially the surface contents are all 0
(transparent if contents have transparency, black otherwise.)
Use :meth:`create_similar_image` if you need an image surface
which can be painted quickly to the target surface.
:param content: the :ref:`CONTENT` string for the new surface.
:param width: width of the new surface (in device-space units)
:param height: height of the new surface (in device-space units)
:type content: str
:type width: int
:type height: int
:returns: A new instance of :class:`Surface` or one of its subclasses.
'''
pass
def create_similar_image(self, content, width, height):
'''
Create a new image surface that is as compatible as possible
for uploading to and the use in conjunction with this surface.
However, this surface can still be used like any normal image surface.
Initially the surface contents are all 0
(transparent if contents have transparency, black otherwise.)
Use :meth:`create_similar` if you don't need an image surface.
:param format: the :ref:`FORMAT` string for the new surface
:param width: width of the new surface, (in device-space units)
:param height: height of the new surface (in device-space units)
:type format: str
:type width: int
:type height: int
:returns: A new :class:`ImageSurface` instance.
'''
pass
def create_for_rectangle(self, x, y, width, height):
'''
Create a new surface that is a rectangle within this surface.
All operations drawn to this surface are then clipped and translated
onto the target surface.
Nothing drawn via this sub-surface outside of its bounds
is drawn onto the target surface,
making this a useful method for passing constrained child surfaces
to library routines that draw directly onto the parent surface,
i.e. with no further backend allocations,
double buffering or copies.
.. note::
As of cairo 1.12,
the semantics of subsurfaces have not been finalized yet
unless the rectangle is in full device units,
is contained within the extents of the target surface,
and the target or subsurface's device transforms are not changed.
:param x:
The x-origin of the sub-surface
from the top-left of the target surface (in device-space units)
:param y:
The y-origin of the sub-surface
from the top-left of the target surface (in device-space units)
:param width:
Width of the sub-surface (in device-space units)
:param height:
Height of the sub-surface (in device-space units)
:type x: float
:type y: float
:type width: float
:type height: float
:returns:
A new :class:`Surface` object.
*New in cairo 1.10.*
'''
pass
def get_content(self):
'''Returns the :ref:`CONTENT` string of this surface,
which indicates whether the surface contains color
and/or alpha information.
'''
pass
def has_show_text_glyphs(self):
'''Returns whether the surface supports sophisticated
:meth:`Context.show_text_glyphs` operations.
That is, whether it actually uses the text and cluster data
provided to a :meth:`Context.show_text_glyphs` call.
.. note::
Even if this method returns :obj:`False`,
:meth:`Context.show_text_glyphs` operation targeted at surface
will still succeed.
It just will act like a :meth:`Context.show_glyphs` operation.
Users can use this method to avoid computing UTF-8 text
and cluster mapping if the target surface does not use it.
'''
pass
def set_device_offset(self, x_offset, y_offset):
''' Sets an offset that is added to the device coordinates
determined by the CTM when drawing to surface.
One use case for this method is
when we want to create a :class:`Surface` that redirects drawing
for a portion of an onscreen surface
to an offscreen surface in a way that is
completely invisible to the user of the cairo API.
Setting a transformation via :meth:`Context.translate`
isn't sufficient to do this,
since methods like :meth:`Context.device_to_user`
will expose the hidden offset.
Note that the offset affects drawing to the surface
as well as using the surface in a source pattern.
:param x_offset:
The offset in the X direction, in device units
:param y_offset:
The offset in the Y direction, in device units
'''
pass
def get_device_offset(self):
'''Returns the previous device offset set by :meth:`set_device_offset`.
:returns: ``(x_offset, y_offset)``
'''
pass
def set_fallback_resolution(self, x_pixels_per_inch, y_pixels_per_inch):
'''
Set the horizontal and vertical resolution for image fallbacks.
When certain operations aren't supported natively by a backend,
cairo will fallback by rendering operations to an image
and then overlaying that image onto the output.
For backends that are natively vector-oriented,
this method can be used to set the resolution
used for these image fallbacks,
(larger values will result in more detailed images,
but also larger file sizes).
Some examples of natively vector-oriented backends are
the ps, pdf, and svg backends.
For backends that are natively raster-oriented,
image fallbacks are still possible,
but they are always performed at the native device resolution.
So this method has no effect on those backends.
.. note::
The fallback resolution only takes effect
at the time of completing a page
(with :meth:`show_page` or :meth:`copy_page`)
so there is currently no way to have
more than one fallback resolution in effect on a single page.
The default fallback resoultion is
300 pixels per inch in both dimensions.
:param x_pixels_per_inch: horizontal resolution in pixels per inch
:type x_pixels_per_inch: float
:param y_pixels_per_inch: vertical resolution in pixels per inch
:type y_pixels_per_inch: float
'''
pass
def get_fallback_resolution(self):
'''Returns the previous fallback resolution
set by :meth:`set_fallback_resolution`,
or default fallback resolution if never set.
:returns: ``(x_pixels_per_inch, y_pixels_per_inch)``
'''
pass
def get_font_options(self):
'''Retrieves the default font rendering options for the surface.
This allows display surfaces to report the correct subpixel order
for rendering on them,
print surfaces to disable hinting of metrics and so forth.
The result can then be used with :class:`ScaledFont`.
:returns: A new :class:`FontOptions` object.
'''
pass
def set_device_scale(self, x_scale, y_scale):
'''Sets a scale that is multiplied to the device coordinates determined
by the CTM when drawing to surface.
One common use for this is to render to very high resolution display
devices at a scale factor, so that code that assumes 1 pixel will be a
certain size will still work. Setting a transformation via
cairo_translate() isn't sufficient to do this, since functions like
cairo_device_to_user() will expose the hidden scale.
Note that the scale affects drawing to the surface as well as using the
surface in a source pattern.
:param x_scale: the scale in the X direction, in device units.
:param y_scale: the scale in the Y direction, in device units.
*New in cairo 1.14.*
*New in cairocffi 0.9.*
'''
pass
def get_device_scale(self):
'''Returns the previous device offset set by :meth:`set_device_scale`.
*New in cairo 1.14.*
*New in cairocffi 0.9.*
'''
pass
def set_mime_data(self, mime_type, data):
'''
Attach an image in the format ``mime_type`` to this surface.
To remove the data from a surface,
call this method with same mime type and :obj:`None` for data.
The attached image (or filename) data can later
be used by backends which support it
(currently: PDF, PS, SVG and Win32 Printing surfaces)
to emit this data instead of making a snapshot of the surface.
This approach tends to be faster
and requires less memory and disk space.
The recognized MIME types are the following:
``"image/png"``
The Portable Network Graphics image file format (ISO/IEC 15948).
``"image/jpeg"``
The Joint Photographic Experts Group (JPEG)
image coding standard (ISO/IEC 10918-1).
``"image/jp2"``
The Joint Photographic Experts Group (JPEG) 2000
image coding standard (ISO/IEC 15444-1).
``"text/x-uri"``
URL for an image file (unofficial MIME type).
See corresponding backend surface docs
for details about which MIME types it can handle.
Caution: the associated MIME data will be discarded
if you draw on the surface afterwards.
Use this method with care.
:param str mime_type: The MIME type of the image data.
:param bytes data: The image data to attach to the surface.
*New in cairo 1.10.*
'''
pass
def get_mime_data(self, mime_type):
'''Return mime data previously attached to surface
using the specified mime type.
:param str mime_type: The MIME type of the image data.
:returns:
A CFFI buffer object, or :obj:`None`
if no data has been attached with the given mime type.
*New in cairo 1.10.*
'''
pass
def supports_mime_type(self, mime_type):
'''Return whether surface supports ``mime_type``.
:param str mime_type: The MIME type of the image data.
*New in cairo 1.12.*
'''
pass
def mark_dirty(self):
'''Tells cairo that drawing has been done to surface
using means other than cairo,
and that cairo should reread any cached areas.
Note that you must call :meth:`flush` before doing such drawing.
'''
pass
def mark_dirty_rectangle(self, x, y, width, height):
'''
Like :meth:`mark_dirty`,
but drawing has been done only to the specified rectangle,
so that cairo can retain cached contents
for other parts of the surface.
Any cached clip set on the surface will be reset by this method,
to make sure that future cairo calls have the clip set
that they expect.
:param x: X coordinate of dirty rectangle.
:param y: Y coordinate of dirty rectangle.
:param width: Width of dirty rectangle.
:param height: Height of dirty rectangle.
:type x: float
:type y: float
:type width: float
:type height: float
'''
pass
def show_page(self):
'''Emits and clears the current page
for backends that support multiple pages.
Use :meth:`copy_page` if you don't want to clear the page.
:meth:`Context.show_page` is a convenience method for this.
'''
pass
def copy_page(self):
'''Emits the current page for backends that support multiple pages,
but doesn't clear it,
so that the contents of the current page will be retained
for the next page.
Use :meth:`show_page` if you want to get an empty page
after the emission.
'''
pass
def flush(self):
'''Do any pending drawing for the surface
and also restore any temporary modifications
cairo has made to the surface's state.
This method must be called before switching
from drawing on the surface with cairo
to drawing on it directly with native APIs.
If the surface doesn't support direct access,
then this method does nothing.
'''
pass
def finish(self):
'''This method finishes the surface
and drops all references to external resources.
For example, for the Xlib backend it means that
cairo will no longer access the drawable, which can be freed.
After calling :meth:`finish` the only valid operations on a surface
are getting and setting user data, flushing and finishing it.
Further drawing to the surface will not affect the surface
but will instead trigger a :class:`CairoError`
with a ``SURFACE_FINISHED`` status.
When the surface is garbage-collected, cairo will call :meth:`finish()`
if it hasn't been called already,
before freeing the resources associated with the surface.
'''
pass
def write_to_png(self, target=None):
'''Writes the contents of surface as a PNG image.
:param target:
A filename,
a binary mode :term:`file object` with a `write` method,
or :obj:`None`.
:returns:
If ``target`` is :obj:`None`,
return the PNG contents as a byte string.
'''
pass
| 27 | 24 | 20 | 3 | 6 | 11 | 1 | 2.12 | 1 | 9 | 3 | 8 | 24 | 1 | 25 | 25 | 551 | 102 | 145 | 44 | 118 | 307 | 112 | 42 | 86 | 6 | 1 | 4 | 36 |
143,727 |
Kozea/cairocffi
|
cairocffi/__init__.py
|
cairocffi.CairoError
|
class CairoError(Exception):
"""Raised when cairo returns an error status."""
def __init__(self, message, status):
super(CairoError, self).__init__(message)
self.status = status
|
class CairoError(Exception):
'''Raised when cairo returns an error status.'''
def __init__(self, message, status):
pass
| 2 | 1 | 3 | 0 | 3 | 0 | 1 | 0.25 | 1 | 1 | 0 | 0 | 1 | 1 | 1 | 11 | 5 | 0 | 4 | 3 | 2 | 1 | 4 | 3 | 2 | 1 | 3 | 0 | 1 |
143,728 |
Kozea/cairocffi
|
cairocffi/fonts.py
|
cairocffi.fonts.FontFace
|
class FontFace(object):
"""The base class for all font face types.
Should not be instantiated directly, but see :doc:`cffi_api`.
An instance may be returned for cairo font face types
that are not (yet) defined in cairocffi.
"""
def __init__(self, pointer):
self._pointer = ffi.gc(
pointer, _keepref(cairo, cairo.cairo_font_face_destroy))
self._check_status()
def _check_status(self):
_check_status(cairo.cairo_font_face_status(self._pointer))
@staticmethod
def _from_pointer(pointer, incref):
"""Wrap an existing ``cairo_font_face_t *`` cdata pointer.
:type incref: bool
:param incref:
Whether increase the :ref:`reference count <refcounting>` now.
:return:
A new instance of :class:`FontFace` or one of its sub-classes,
depending on the face’s type.
"""
if pointer == ffi.NULL:
raise ValueError('Null pointer')
if incref:
cairo.cairo_font_face_reference(pointer)
self = object.__new__(FONT_TYPE_TO_CLASS.get(
cairo.cairo_font_face_get_type(pointer), FontFace))
FontFace.__init__(self, pointer) # Skip the subclass’s __init__
return self
|
class FontFace(object):
'''The base class for all font face types.
Should not be instantiated directly, but see :doc:`cffi_api`.
An instance may be returned for cairo font face types
that are not (yet) defined in cairocffi.
'''
def __init__(self, pointer):
pass
def _check_status(self):
pass
@staticmethod
def _from_pointer(pointer, incref):
'''Wrap an existing ``cairo_font_face_t *`` cdata pointer.
:type incref: bool
:param incref:
Whether increase the :ref:`reference count <refcounting>` now.
:return:
A new instance of :class:`FontFace` or one of its sub-classes,
depending on the face’s type.
'''
pass
| 5 | 2 | 8 | 1 | 5 | 3 | 2 | 0.82 | 1 | 2 | 1 | 1 | 2 | 1 | 3 | 3 | 36 | 6 | 17 | 7 | 12 | 14 | 14 | 6 | 10 | 3 | 1 | 1 | 5 |
143,729 |
Kozea/cairocffi
|
utils/cairo_coverage.py
|
cairo_coverage.Visitor
|
class Visitor(pycparser.c_ast.NodeVisitor):
def visit_Decl(self, node): # noqa: N802
for _, child in node.children():
if isinstance(child, pycparser.c_ast.FuncDecl):
if ('cairo.' + node.name) not in ALL_THE_CODE and not (
node.name.endswith('user_data')):
print(node.name)
break
|
class Visitor(pycparser.c_ast.NodeVisitor):
def visit_Decl(self, node):
pass
| 2 | 0 | 7 | 0 | 7 | 1 | 4 | 0.13 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 8 | 0 | 8 | 3 | 6 | 1 | 7 | 3 | 5 | 4 | 1 | 3 | 4 |
143,730 |
Kozea/cairocffi
|
cairocffi/fonts.py
|
cairocffi.fonts.ToyFontFace
|
class ToyFontFace(FontFace):
"""Creates a font face from a triplet of family, slant, and weight.
These font faces are used in implementation of cairo’s "toy" font API.
If family is the zero-length string ``""``,
the platform-specific default family is assumed.
The default family then can be queried using :meth:`get_family`.
The :meth:`Context.select_font_face` method uses this to create font faces.
See that method for limitations and other details of toy font faces.
:param family: a font family name, as an Unicode or UTF-8 string.
:param slant: The :ref:`FONT_SLANT` string for the font face.
:param weight: The :ref:`FONT_WEIGHT` string for the font face.
"""
def __init__(self, family='', slant=constants.FONT_SLANT_NORMAL,
weight=constants.FONT_WEIGHT_NORMAL):
FontFace.__init__(self, cairo.cairo_toy_font_face_create(
_encode_string(family), slant, weight))
def get_family(self):
"""Return this font face’s family name."""
return ffi.string(cairo.cairo_toy_font_face_get_family(
self._pointer)).decode('utf8', 'replace')
def get_slant(self):
"""Return this font face’s :ref:`FONT_SLANT` string."""
return cairo.cairo_toy_font_face_get_slant(self._pointer)
def get_weight(self):
"""Return this font face’s :ref:`FONT_WEIGHT` string."""
return cairo.cairo_toy_font_face_get_weight(self._pointer)
|
class ToyFontFace(FontFace):
'''Creates a font face from a triplet of family, slant, and weight.
These font faces are used in implementation of cairo’s "toy" font API.
If family is the zero-length string ``""``,
the platform-specific default family is assumed.
The default family then can be queried using :meth:`get_family`.
The :meth:`Context.select_font_face` method uses this to create font faces.
See that method for limitations and other details of toy font faces.
:param family: a font family name, as an Unicode or UTF-8 string.
:param slant: The :ref:`FONT_SLANT` string for the font face.
:param weight: The :ref:`FONT_WEIGHT` string for the font face.
'''
def __init__(self, family='', slant=constants.FONT_SLANT_NORMAL,
weight=constants.FONT_WEIGHT_NORMAL):
pass
def get_family(self):
'''Return this font face’s family name.'''
pass
def get_slant(self):
'''Return this font face’s :ref:`FONT_SLANT` string.'''
pass
def get_weight(self):
'''Return this font face’s :ref:`FONT_WEIGHT` string.'''
pass
| 5 | 4 | 4 | 0 | 3 | 1 | 1 | 1.17 | 1 | 0 | 0 | 0 | 4 | 0 | 4 | 7 | 33 | 7 | 12 | 6 | 6 | 14 | 9 | 5 | 4 | 1 | 2 | 0 | 4 |
143,731 |
Kozea/cairocffi
|
cairocffi/matrix.py
|
cairocffi.matrix.Matrix
|
class Matrix(object):
"""A 2D transformation matrix.
Matrices are used throughout cairo to convert between
different coordinate spaces.
A :class:`Matrix` holds an affine transformation,
such as a scale, rotation, shear, or a combination of these.
The transformation of a point (x,y) is given by::
x_new = xx * x + xy * y + x0
y_new = yx * x + yy * y + y0
The current transformation matrix of a :class:`Context`,
represented as a :class:`Matrix`,
defines the transformation from user-space coordinates
to device-space coordinates.
See :meth:`Context.get_matrix` and :meth:`Context.set_matrix`.
The default values produce an identity matrix.
Matrices can be compared with ``m1 == m2`` and ``m2 != m2``
as well as multiplied with ``m3 = m1 * m2``.
"""
def __init__(self, xx=1, yx=0, xy=0, yy=1, x0=0, y0=0):
self._pointer = ffi.new('cairo_matrix_t *')
cairo.cairo_matrix_init(self._pointer, xx, yx, xy, yy, x0, y0)
@classmethod
def init_rotate(cls, radians):
"""Return a new :class:`Matrix` for a transformation
that rotates by ``radians``.
:type radians: float
:param radians:
Angle of rotation, in radians.
The direction of rotation is defined such that
positive angles rotate in the direction
from the positive X axis toward the positive Y axis.
With the default axis orientation of cairo,
positive angles rotate in a clockwise direction.
"""
result = cls()
cairo.cairo_matrix_init_rotate(result._pointer, radians)
return result
def as_tuple(self):
"""Return all of the matrix’s components.
:returns: A ``(xx, yx, xy, yy, x0, y0)`` tuple of floats.
"""
ptr = self._pointer
return (ptr.xx, ptr.yx, ptr.xy, ptr.yy, ptr.x0, ptr.y0)
def copy(self):
"""Return a new copy of this matrix."""
return type(self)(*self.as_tuple())
def __getitem__(self, index):
return getattr(
self._pointer, ('xx', 'yx', 'xy', 'yy', 'x0', 'y0')[index])
def __iter__(self):
return iter(self.as_tuple())
def __eq__(self, other):
return self.as_tuple() == other.as_tuple()
def __ne__(self, other):
return self.as_tuple() != other.as_tuple()
def __repr__(self):
class_ = type(self)
return '%s(%g, %g, %g, %g, %g, %g)' % (
(class_.__name__, *self.as_tuple()))
def multiply(self, other):
"""Multiply with another matrix
and return the result as a new :class:`Matrix` object.
Same as ``self * other``.
"""
res = Matrix()
cairo.cairo_matrix_multiply(
res._pointer, self._pointer, other._pointer)
return res
__mul__ = multiply
def translate(self, tx, ty):
"""Applies a translation by ``tx``, ``ty``
to the transformation in this matrix.
The effect of the new transformation is to
first translate the coordinates by ``tx`` and ``ty``,
then apply the original transformation to the coordinates.
.. note::
This changes the matrix in-place.
:param tx: Amount to translate in the X direction.
:param ty: Amount to translate in the Y direction.
:type tx: float
:type ty: float
"""
cairo.cairo_matrix_translate(self._pointer, tx, ty)
def scale(self, sx, sy=None):
"""Applies scaling by ``sx``, ``sy``
to the transformation in this matrix.
The effect of the new transformation is to
first scale the coordinates by ``sx`` and ``sy``,
then apply the original transformation to the coordinates.
If ``sy`` is omitted, it is the same as ``sx``
so that scaling preserves aspect ratios.
.. note::
This changes the matrix in-place.
:param sx: Scale factor in the X direction.
:param sy: Scale factor in the Y direction.
:type sx: float
:type sy: float
"""
if sy is None:
sy = sx
cairo.cairo_matrix_scale(self._pointer, sx, sy)
def rotate(self, radians):
"""Applies a rotation by ``radians``
to the transformation in this matrix.
The effect of the new transformation is to
first rotate the coordinates by ``radians``,
then apply the original transformation to the coordinates.
.. note::
This changes the matrix in-place.
:type radians: float
:param radians:
Angle of rotation, in radians.
The direction of rotation is defined such that positive angles
rotate in the direction from the positive X axis
toward the positive Y axis.
With the default axis orientation of cairo,
positive angles rotate in a clockwise direction.
"""
cairo.cairo_matrix_rotate(self._pointer, radians)
def invert(self):
"""Changes matrix to be the inverse of its original value.
Not all transformation matrices have inverses;
if the matrix collapses points together (it is degenerate),
then it has no inverse and this function will fail.
.. note::
This changes the matrix in-place.
:raises: :exc:`CairoError` on degenerate matrices.
"""
_check_status(cairo.cairo_matrix_invert(self._pointer))
def inverted(self):
"""Return the inverse of this matrix. See :meth:`invert`.
:raises: :exc:`CairoError` on degenerate matrices.
:returns: A new :class:`Matrix` object.
"""
matrix = self.copy()
matrix.invert()
return matrix
def transform_point(self, x, y):
"""Transforms the point ``(x, y)`` by this matrix.
:param x: X position.
:param y: Y position.
:type x: float
:type y: float
:returns: A ``(new_x, new_y)`` tuple of floats.
"""
xy = ffi.new('double[2]', [x, y])
cairo.cairo_matrix_transform_point(self._pointer, xy + 0, xy + 1)
return tuple(xy)
def transform_distance(self, dx, dy):
"""Transforms the distance vector ``(dx, dy)`` by this matrix.
This is similar to :meth:`transform_point`
except that the translation components of the transformation
are ignored.
The calculation of the returned vector is as follows::
dx2 = dx1 * xx + dy1 * xy
dy2 = dx1 * yx + dy1 * yy
Affine transformations are position invariant,
so the same vector always transforms to the same vector.
If ``(x1, y1)`` transforms to ``(x2, y2)``
then ``(x1 + dx1, y1 + dy1)`` will transform
to ``(x1 + dx2, y1 + dy2)`` for all values of ``x1`` and ``x2``.
:param dx: X component of a distance vector.
:param dy: Y component of a distance vector.
:type dx: float
:type dy: float
:returns: A ``(new_dx, new_dy)`` tuple of floats.
"""
xy = ffi.new('double[2]', [dx, dy])
cairo.cairo_matrix_transform_distance(self._pointer, xy + 0, xy + 1)
return tuple(xy)
def _component_property(name): # noqa: N805
return property(
lambda self: getattr(self._pointer, name),
lambda self, value: setattr(self._pointer, name, value),
doc='Read-write attribute access to a single float component.')
xx = _component_property('xx')
yx = _component_property('yx')
xy = _component_property('xy')
yy = _component_property('yy')
x0 = _component_property('x0')
y0 = _component_property('y0')
del _component_property
|
class Matrix(object):
'''A 2D transformation matrix.
Matrices are used throughout cairo to convert between
different coordinate spaces.
A :class:`Matrix` holds an affine transformation,
such as a scale, rotation, shear, or a combination of these.
The transformation of a point (x,y) is given by::
x_new = xx * x + xy * y + x0
y_new = yx * x + yy * y + y0
The current transformation matrix of a :class:`Context`,
represented as a :class:`Matrix`,
defines the transformation from user-space coordinates
to device-space coordinates.
See :meth:`Context.get_matrix` and :meth:`Context.set_matrix`.
The default values produce an identity matrix.
Matrices can be compared with ``m1 == m2`` and ``m2 != m2``
as well as multiplied with ``m3 = m1 * m2``.
'''
def __init__(self, xx=1, yx=0, xy=0, yy=1, x0=0, y0=0):
pass
@classmethod
def init_rotate(cls, radians):
'''Return a new :class:`Matrix` for a transformation
that rotates by ``radians``.
:type radians: float
:param radians:
Angle of rotation, in radians.
The direction of rotation is defined such that
positive angles rotate in the direction
from the positive X axis toward the positive Y axis.
With the default axis orientation of cairo,
positive angles rotate in a clockwise direction.
'''
pass
def as_tuple(self):
'''Return all of the matrix’s components.
:returns: A ``(xx, yx, xy, yy, x0, y0)`` tuple of floats.
'''
pass
def copy(self):
'''Return a new copy of this matrix.'''
pass
def __getitem__(self, index):
pass
def __iter__(self):
pass
def __eq__(self, other):
pass
def __ne__(self, other):
pass
def __repr__(self):
pass
def multiply(self, other):
'''Multiply with another matrix
and return the result as a new :class:`Matrix` object.
Same as ``self * other``.
'''
pass
def translate(self, tx, ty):
'''Applies a translation by ``tx``, ``ty``
to the transformation in this matrix.
The effect of the new transformation is to
first translate the coordinates by ``tx`` and ``ty``,
then apply the original transformation to the coordinates.
.. note::
This changes the matrix in-place.
:param tx: Amount to translate in the X direction.
:param ty: Amount to translate in the Y direction.
:type tx: float
:type ty: float
'''
pass
def scale(self, sx, sy=None):
'''Applies scaling by ``sx``, ``sy``
to the transformation in this matrix.
The effect of the new transformation is to
first scale the coordinates by ``sx`` and ``sy``,
then apply the original transformation to the coordinates.
If ``sy`` is omitted, it is the same as ``sx``
so that scaling preserves aspect ratios.
.. note::
This changes the matrix in-place.
:param sx: Scale factor in the X direction.
:param sy: Scale factor in the Y direction.
:type sx: float
:type sy: float
'''
pass
def rotate(self, radians):
'''Applies a rotation by ``radians``
to the transformation in this matrix.
The effect of the new transformation is to
first rotate the coordinates by ``radians``,
then apply the original transformation to the coordinates.
.. note::
This changes the matrix in-place.
:type radians: float
:param radians:
Angle of rotation, in radians.
The direction of rotation is defined such that positive angles
rotate in the direction from the positive X axis
toward the positive Y axis.
With the default axis orientation of cairo,
positive angles rotate in a clockwise direction.
'''
pass
def invert(self):
'''Changes matrix to be the inverse of its original value.
Not all transformation matrices have inverses;
if the matrix collapses points together (it is degenerate),
then it has no inverse and this function will fail.
.. note::
This changes the matrix in-place.
:raises: :exc:`CairoError` on degenerate matrices.
'''
pass
def inverted(self):
'''Return the inverse of this matrix. See :meth:`invert`.
:raises: :exc:`CairoError` on degenerate matrices.
:returns: A new :class:`Matrix` object.
'''
pass
def transform_point(self, x, y):
'''Transforms the point ``(x, y)`` by this matrix.
:param x: X position.
:param y: Y position.
:type x: float
:type y: float
:returns: A ``(new_x, new_y)`` tuple of floats.
'''
pass
def transform_distance(self, dx, dy):
'''Transforms the distance vector ``(dx, dy)`` by this matrix.
This is similar to :meth:`transform_point`
except that the translation components of the transformation
are ignored.
The calculation of the returned vector is as follows::
dx2 = dx1 * xx + dy1 * xy
dy2 = dx1 * yx + dy1 * yy
Affine transformations are position invariant,
so the same vector always transforms to the same vector.
If ``(x1, y1)`` transforms to ``(x2, y2)``
then ``(x1 + dx1, y1 + dy1)`` will transform
to ``(x1 + dx2, y1 + dy2)`` for all values of ``x1`` and ``x2``.
:param dx: X component of a distance vector.
:param dy: Y component of a distance vector.
:type dx: float
:type dy: float
:returns: A ``(new_dx, new_dy)`` tuple of floats.
'''
pass
def _component_property(name):
pass
| 20 | 12 | 10 | 2 | 3 | 6 | 1 | 1.73 | 1 | 3 | 0 | 0 | 17 | 1 | 18 | 18 | 236 | 54 | 67 | 35 | 47 | 116 | 60 | 34 | 41 | 2 | 1 | 1 | 19 |
143,732 |
Kozea/cairocffi
|
cairocffi/patterns.py
|
cairocffi.patterns.Gradient
|
class Gradient(Pattern):
"""
The common parent of :class:`LinearGradient` and :class:`RadialGradient`.
Should not be instantiated directly.
"""
def add_color_stop_rgba(self, offset, red, green, blue, alpha=1):
"""Adds a translucent color stop to a gradient pattern.
The offset specifies the location along the gradient's control vector.
For example,
a linear gradient's control vector is from (x0,y0) to (x1,y1)
while a radial gradient's control vector is
from any point on the start circle
to the corresponding point on the end circle.
If two (or more) stops are specified with identical offset values,
they will be sorted
according to the order in which the stops are added
(stops added earlier before stops added later).
This can be useful for reliably making sharp color transitions
instead of the typical blend.
The color components and offset are in the range 0 to 1.
If the values passed in are outside that range, they will be clamped.
:param offset: Location along the gradient's control vector
:param red: Red component of the color.
:param green: Green component of the color.
:param blue: Blue component of the color.
:param alpha:
Alpha component of the color.
1 (the default) is opaque, 0 fully transparent.
:type offset: float
:type red: float
:type green: float
:type blue: float
:type alpha: float
"""
cairo.cairo_pattern_add_color_stop_rgba(
self._pointer, offset, red, green, blue, alpha)
self._check_status()
def add_color_stop_rgb(self, offset, red, green, blue):
"""Same as :meth:`add_color_stop_rgba` with ``alpha=1``.
Kept for compatibility with pycairo.
"""
cairo.cairo_pattern_add_color_stop_rgb(
self._pointer, offset, red, green, blue)
self._check_status()
def get_color_stops(self):
"""Return this gradient’s color stops so far.
:returns:
A list of ``(offset, red, green, blue, alpha)`` tuples of floats.
"""
count = ffi.new('int *')
_check_status(cairo.cairo_pattern_get_color_stop_count(
self._pointer, count))
stops = []
stop = ffi.new('double[5]')
for i in range(count[0]):
_check_status(cairo.cairo_pattern_get_color_stop_rgba(
self._pointer, i,
stop + 0, stop + 1, stop + 2, stop + 3, stop + 4))
stops.append(tuple(stop))
return stops
|
class Gradient(Pattern):
'''
The common parent of :class:`LinearGradient` and :class:`RadialGradient`.
Should not be instantiated directly.
'''
def add_color_stop_rgba(self, offset, red, green, blue, alpha=1):
'''Adds a translucent color stop to a gradient pattern.
The offset specifies the location along the gradient's control vector.
For example,
a linear gradient's control vector is from (x0,y0) to (x1,y1)
while a radial gradient's control vector is
from any point on the start circle
to the corresponding point on the end circle.
If two (or more) stops are specified with identical offset values,
they will be sorted
according to the order in which the stops are added
(stops added earlier before stops added later).
This can be useful for reliably making sharp color transitions
instead of the typical blend.
The color components and offset are in the range 0 to 1.
If the values passed in are outside that range, they will be clamped.
:param offset: Location along the gradient's control vector
:param red: Red component of the color.
:param green: Green component of the color.
:param blue: Blue component of the color.
:param alpha:
Alpha component of the color.
1 (the default) is opaque, 0 fully transparent.
:type offset: float
:type red: float
:type green: float
:type blue: float
:type alpha: float
'''
pass
def add_color_stop_rgba(self, offset, red, green, blue, alpha=1):
'''Same as :meth:`add_color_stop_rgba` with ``alpha=1``.
Kept for compatibility with pycairo.
'''
pass
def get_color_stops(self):
'''Return this gradient’s color stops so far.
:returns:
A list of ``(offset, red, green, blue, alpha)`` tuples of floats.
'''
pass
| 4 | 4 | 21 | 3 | 7 | 12 | 1 | 1.86 | 1 | 2 | 0 | 2 | 3 | 0 | 3 | 12 | 71 | 11 | 21 | 8 | 17 | 39 | 16 | 8 | 12 | 2 | 2 | 1 | 4 |
143,733 |
Kozea/cairocffi
|
cairocffi/fonts.py
|
cairocffi.fonts.FontOptions
|
class FontOptions(object):
"""An opaque object holding all options that are used when rendering fonts.
Individual features of a :class:`FontOptions`
can be set or accessed using method
named ``set_FEATURE_NAME`` and ``get_FEATURE_NAME``,
like :meth:`set_antialias` and :meth:`get_antialias`.
New features may be added to :class:`FontOptions` in the future.
For this reason, ``==``, :meth:`copy`, :meth:`merge`, and :func:`hash`
should be used to check for equality copy,, merge,
or compute a hash value of :class:`FontOptions` objects.
:param values:
Call the corresponding ``set_FEATURE_NAME`` methods
after creating a new :class:`FontOptions`::
options = FontOptions()
options.set_antialias(cairocffi.ANTIALIAS_BEST)
assert FontOptions(antialias=cairocffi.ANTIALIAS_BEST) == options
"""
def __init__(self, **values):
self._init_pointer(cairo.cairo_font_options_create())
for name, value in values.items():
getattr(self, 'set_' + name)(value)
def _init_pointer(self, pointer):
self._pointer = ffi.gc(
pointer, _keepref(cairo, cairo.cairo_font_options_destroy))
self._check_status()
def _check_status(self):
_check_status(cairo.cairo_font_options_status(self._pointer))
def copy(self):
"""Return a new :class:`FontOptions` with the same values."""
cls = type(self)
other = object.__new__(cls)
cls._init_pointer(other, cairo.cairo_font_options_copy(self._pointer))
return other
def merge(self, other):
"""Merges non-default options from ``other``,
replacing existing values.
This operation can be thought of as somewhat similar
to compositing other onto options
with the operation of :obj:`OVER <OPERATOR_OVER>`.
"""
cairo.cairo_font_options_merge(self._pointer, other._pointer)
_check_status(cairo.cairo_font_options_status(self._pointer))
def __hash__(self):
return cairo.cairo_font_options_hash(self._pointer)
def __eq__(self, other):
return cairo.cairo_font_options_equal(self._pointer, other._pointer)
def __ne__(self, other):
return not self == other
equal = __eq__
hash = __hash__
def set_antialias(self, antialias):
"""Changes the :ref:`ANTIALIAS` for the font options object.
This specifies the type of antialiasing to do when rendering text.
"""
cairo.cairo_font_options_set_antialias(self._pointer, antialias)
self._check_status()
def get_antialias(self):
"""Return the :ref:`ANTIALIAS` string for the font options object."""
return cairo.cairo_font_options_get_antialias(self._pointer)
def set_subpixel_order(self, subpixel_order):
"""Changes the :ref:`SUBPIXEL_ORDER` for the font options object.
The subpixel order specifies the order of color elements
within each pixel on the display device
when rendering with an antialiasing mode of
:obj:`SUBPIXEL <ANTIALIAS_SUBPIXEL>`.
"""
cairo.cairo_font_options_set_subpixel_order(
self._pointer, subpixel_order)
self._check_status()
def get_subpixel_order(self):
"""Return the :ref:`SUBPIXEL_ORDER` string
for the font options object.
"""
return cairo.cairo_font_options_get_subpixel_order(self._pointer)
def set_hint_style(self, hint_style):
"""Changes the :ref:`HINT_STYLE` for the font options object.
This controls whether to fit font outlines to the pixel grid,
and if so, whether to optimize for fidelity or contrast.
"""
cairo.cairo_font_options_set_hint_style(self._pointer, hint_style)
self._check_status()
def get_hint_style(self):
"""Return the :ref:`HINT_STYLE` string for the font options object."""
return cairo.cairo_font_options_get_hint_style(self._pointer)
def set_hint_metrics(self, hint_metrics):
"""Changes the :ref:`HINT_METRICS` for the font options object.
This controls whether metrics are quantized
to integer values in device units.
"""
cairo.cairo_font_options_set_hint_metrics(self._pointer, hint_metrics)
self._check_status()
def get_hint_metrics(self):
"""Return the :ref:`HINT_METRICS` string
for the font options object.
"""
return cairo.cairo_font_options_get_hint_metrics(self._pointer)
def set_variations(self, variations):
"""Sets the OpenType font variations for the font options object.
Font variations are specified as a string with a format that is similar
to the CSS font-variation-settings. The string contains a
comma-separated list of axis assignments, which each assignment
consists of a 4-character axis name and a value, separated by
whitespace and optional equals sign.
:param variations: the new font variations, or ``None``.
*New in cairo 1.16.*
*New in cairocffi 0.9.*
"""
if variations is None:
variations = ffi.NULL
else:
variations = _encode_string(variations)
cairo.cairo_font_options_set_variations(self._pointer, variations)
self._check_status()
def get_variations(self):
"""Gets the OpenType font variations for the font options object.
See :meth:`set_variations` for details about the
string format.
:return: the font variations for the font options object. The
returned string belongs to the ``options`` and must not be modified.
It is valid until either the font options object is destroyed or the
font variations in this object is modified with
:meth:`set_variations`.
*New in cairo 1.16.*
*New in cairocffi 0.9.*
"""
variations = cairo.cairo_font_options_get_variations(self._pointer)
if variations != ffi.NULL:
return ffi.string(variations).decode('utf8', 'replace')
|
class FontOptions(object):
'''An opaque object holding all options that are used when rendering fonts.
Individual features of a :class:`FontOptions`
can be set or accessed using method
named ``set_FEATURE_NAME`` and ``get_FEATURE_NAME``,
like :meth:`set_antialias` and :meth:`get_antialias`.
New features may be added to :class:`FontOptions` in the future.
For this reason, ``==``, :meth:`copy`, :meth:`merge`, and :func:`hash`
should be used to check for equality copy,, merge,
or compute a hash value of :class:`FontOptions` objects.
:param values:
Call the corresponding ``set_FEATURE_NAME`` methods
after creating a new :class:`FontOptions`::
options = FontOptions()
options.set_antialias(cairocffi.ANTIALIAS_BEST)
assert FontOptions(antialias=cairocffi.ANTIALIAS_BEST) == options
'''
def __init__(self, **values):
pass
def _init_pointer(self, pointer):
pass
def _check_status(self):
pass
def copy(self):
'''Return a new :class:`FontOptions` with the same values.'''
pass
def merge(self, other):
'''Merges non-default options from ``other``,
replacing existing values.
This operation can be thought of as somewhat similar
to compositing other onto options
with the operation of :obj:`OVER <OPERATOR_OVER>`.
'''
pass
def __hash__(self):
pass
def __eq__(self, other):
pass
def __ne__(self, other):
pass
def set_antialias(self, antialias):
'''Changes the :ref:`ANTIALIAS` for the font options object.
This specifies the type of antialiasing to do when rendering text.
'''
pass
def get_antialias(self):
'''Return the :ref:`ANTIALIAS` string for the font options object.'''
pass
def set_subpixel_order(self, subpixel_order):
'''Changes the :ref:`SUBPIXEL_ORDER` for the font options object.
The subpixel order specifies the order of color elements
within each pixel on the display device
when rendering with an antialiasing mode of
:obj:`SUBPIXEL <ANTIALIAS_SUBPIXEL>`.
'''
pass
def get_subpixel_order(self):
'''Return the :ref:`SUBPIXEL_ORDER` string
for the font options object.
'''
pass
def set_hint_style(self, hint_style):
'''Changes the :ref:`HINT_STYLE` for the font options object.
This controls whether to fit font outlines to the pixel grid,
and if so, whether to optimize for fidelity or contrast.
'''
pass
def get_hint_style(self):
'''Return the :ref:`HINT_STYLE` string for the font options object.'''
pass
def set_hint_metrics(self, hint_metrics):
'''Changes the :ref:`HINT_METRICS` for the font options object.
This controls whether metrics are quantized
to integer values in device units.
'''
pass
def get_hint_metrics(self):
'''Return the :ref:`HINT_METRICS` string
for the font options object.
'''
pass
def set_variations(self, variations):
'''Sets the OpenType font variations for the font options object.
Font variations are specified as a string with a format that is similar
to the CSS font-variation-settings. The string contains a
comma-separated list of axis assignments, which each assignment
consists of a 4-character axis name and a value, separated by
whitespace and optional equals sign.
:param variations: the new font variations, or ``None``.
*New in cairo 1.16.*
*New in cairocffi 0.9.*
'''
pass
def get_variations(self):
'''Gets the OpenType font variations for the font options object.
See :meth:`set_variations` for details about the
string format.
:return: the font variations for the font options object. The
returned string belongs to the ``options`` and must not be modified.
It is valid until either the font options object is destroyed or the
font variations in this object is modified with
:meth:`set_variations`.
*New in cairo 1.16.*
*New in cairocffi 0.9.*
'''
pass
| 19 | 13 | 7 | 1 | 3 | 3 | 1 | 1.17 | 1 | 2 | 1 | 0 | 18 | 1 | 18 | 18 | 168 | 40 | 59 | 25 | 40 | 69 | 56 | 25 | 37 | 2 | 1 | 1 | 21 |
143,734 |
Kozea/cairocffi
|
cairocffi/patterns.py
|
cairocffi.patterns.Pattern
|
class Pattern(object):
"""The base class for all pattern types.
Should not be instantiated directly, but see :doc:`cffi_api`.
An instance may be returned for cairo pattern types
that are not (yet) defined in cairocffi.
A :class:`Pattern` represents a source when drawing onto a surface.
There are different sub-classes of :class:`Pattern`,
for different types of sources;
for example, :class:`SolidPattern` is a pattern for a solid color.
Other than instantiating the various :class:`Pattern` sub-classes,
some of the pattern types can be implicitly created
using various :class:`Context`; for example :meth:`Context.set_source_rgb`.
"""
def __init__(self, pointer):
self._pointer = ffi.gc(
pointer, _keepref(cairo, cairo.cairo_pattern_destroy))
self._check_status()
def _check_status(self):
_check_status(cairo.cairo_pattern_status(self._pointer))
@staticmethod
def _from_pointer(pointer, incref):
"""Wrap an existing ``cairo_pattern_t *`` cdata pointer.
:type incref: bool
:param incref:
Whether increase the :ref:`reference count <refcounting>` now.
:return:
A new instance of :class:`Pattern` or one of its sub-classes,
depending on the pattern’s type.
"""
if pointer == ffi.NULL:
raise ValueError('Null pointer')
if incref:
cairo.cairo_pattern_reference(pointer)
self = object.__new__(PATTERN_TYPE_TO_CLASS.get(
cairo.cairo_pattern_get_type(pointer), Pattern))
Pattern.__init__(self, pointer) # Skip the subclass’s __init__
return self
def set_extend(self, extend):
"""
Sets the mode to be used for drawing outside the area of this pattern.
See :ref:`EXTEND` for details on the semantics of each extend strategy.
The default extend mode is
:obj:`NONE <EXTEND_NONE>` for :class:`SurfacePattern`
and :obj:`PAD <EXTEND_PAD>` for :class:`Gradient` patterns.
"""
cairo.cairo_pattern_set_extend(self._pointer, extend)
self._check_status()
def get_extend(self):
"""Gets the current extend mode for this pattern.
:returns: A :ref:`EXTEND` string.
"""
return cairo.cairo_pattern_get_extend(self._pointer)
# pycairo only has filters on SurfacePattern,
# but cairo seems to accept it on any pattern.
def set_filter(self, filter):
"""Sets the filter to be used for resizing when using this pattern.
See :ref:`FILTER` for details on each filter.
Note that you might want to control filtering
even when you do not have an explicit :class:`Pattern`,
(for example when using :meth:`Context.set_source_surface`).
In these cases, it is convenient to use :meth:`Context.get_source`
to get access to the pattern that cairo creates implicitly.
For example::
context.get_source().set_filter(cairocffi.FILTER_NEAREST)
"""
cairo.cairo_pattern_set_filter(self._pointer, filter)
self._check_status()
def get_filter(self):
"""Return the current filter string for this pattern.
See :ref:`FILTER` for details on each filter.
"""
return cairo.cairo_pattern_get_filter(self._pointer)
def set_matrix(self, matrix):
"""Sets the pattern’s transformation matrix to ``matrix``.
This matrix is a transformation from user space to pattern space.
When a pattern is first created
it always has the identity matrix for its transformation matrix,
which means that pattern space is initially identical to user space.
**Important:**
Please note that the direction of this transformation matrix
is from user space to pattern space.
This means that if you imagine the flow
from a pattern to user space (and on to device space),
then coordinates in that flow will be transformed
by the inverse of the pattern matrix.
For example, if you want to make a pattern appear twice as large
as it does by default the correct code to use is::
pattern.set_matrix(Matrix(xx=0.5, yy=0.5))
Meanwhile, using values of 2 rather than 0.5 in the code above
would cause the pattern to appear at half of its default size.
Also, please note the discussion of the user-space locking semantics
of :meth:`Context.set_source`.
:param matrix: A :class:`Matrix` to be copied into the pattern.
"""
cairo.cairo_pattern_set_matrix(self._pointer, matrix._pointer)
self._check_status()
def get_matrix(self):
"""Copies the pattern’s transformation matrix.
:retuns: A new :class:`Matrix` object.
"""
matrix = Matrix()
cairo.cairo_pattern_get_matrix(self._pointer, matrix._pointer)
self._check_status()
return matrix
|
class Pattern(object):
'''The base class for all pattern types.
Should not be instantiated directly, but see :doc:`cffi_api`.
An instance may be returned for cairo pattern types
that are not (yet) defined in cairocffi.
A :class:`Pattern` represents a source when drawing onto a surface.
There are different sub-classes of :class:`Pattern`,
for different types of sources;
for example, :class:`SolidPattern` is a pattern for a solid color.
Other than instantiating the various :class:`Pattern` sub-classes,
some of the pattern types can be implicitly created
using various :class:`Context`; for example :meth:`Context.set_source_rgb`.
'''
def __init__(self, pointer):
pass
def _check_status(self):
pass
@staticmethod
def _from_pointer(pointer, incref):
'''Wrap an existing ``cairo_pattern_t *`` cdata pointer.
:type incref: bool
:param incref:
Whether increase the :ref:`reference count <refcounting>` now.
:return:
A new instance of :class:`Pattern` or one of its sub-classes,
depending on the pattern’s type.
'''
pass
def set_extend(self, extend):
'''
Sets the mode to be used for drawing outside the area of this pattern.
See :ref:`EXTEND` for details on the semantics of each extend strategy.
The default extend mode is
:obj:`NONE <EXTEND_NONE>` for :class:`SurfacePattern`
and :obj:`PAD <EXTEND_PAD>` for :class:`Gradient` patterns.
'''
pass
def get_extend(self):
'''Gets the current extend mode for this pattern.
:returns: A :ref:`EXTEND` string.
'''
pass
def set_filter(self, filter):
'''Sets the filter to be used for resizing when using this pattern.
See :ref:`FILTER` for details on each filter.
Note that you might want to control filtering
even when you do not have an explicit :class:`Pattern`,
(for example when using :meth:`Context.set_source_surface`).
In these cases, it is convenient to use :meth:`Context.get_source`
to get access to the pattern that cairo creates implicitly.
For example::
context.get_source().set_filter(cairocffi.FILTER_NEAREST)
'''
pass
def get_filter(self):
'''Return the current filter string for this pattern.
See :ref:`FILTER` for details on each filter.
'''
pass
def set_matrix(self, matrix):
'''Sets the pattern’s transformation matrix to ``matrix``.
This matrix is a transformation from user space to pattern space.
When a pattern is first created
it always has the identity matrix for its transformation matrix,
which means that pattern space is initially identical to user space.
**Important:**
Please note that the direction of this transformation matrix
is from user space to pattern space.
This means that if you imagine the flow
from a pattern to user space (and on to device space),
then coordinates in that flow will be transformed
by the inverse of the pattern matrix.
For example, if you want to make a pattern appear twice as large
as it does by default the correct code to use is::
pattern.set_matrix(Matrix(xx=0.5, yy=0.5))
Meanwhile, using values of 2 rather than 0.5 in the code above
would cause the pattern to appear at half of its default size.
Also, please note the discussion of the user-space locking semantics
of :meth:`Context.set_source`.
:param matrix: A :class:`Matrix` to be copied into the pattern.
'''
pass
def get_matrix(self):
'''Copies the pattern’s transformation matrix.
:retuns: A new :class:`Matrix` object.
'''
pass
| 11 | 8 | 12 | 2 | 4 | 6 | 1 | 2 | 1 | 3 | 2 | 3 | 8 | 1 | 9 | 9 | 137 | 33 | 35 | 14 | 24 | 70 | 32 | 13 | 22 | 3 | 1 | 1 | 11 |
143,735 |
Kozea/cairocffi
|
cairocffi/patterns.py
|
cairocffi.patterns.RadialGradient
|
class RadialGradient(Gradient):
"""Creates a new radial gradient pattern between the two circles
defined by (cx0, cy0, radius0) and (cx1, cy1, radius1).
Before using the gradient pattern, a number of color stops
should be defined using :meth:`~Gradient.add_color_stop_rgba`.
Note: The coordinates here are in pattern space.
For a new pattern, pattern space is identical to user space,
but the relationship between the spaces can be changed
with :meth:`~Pattern.set_matrix`.
:param cx0: X coordinate of the start circle.
:param cy0: Y coordinate of the start circle.
:param radius0: Radius of the start circle.
:param cx1: X coordinate of the end circle.
:param cy1: Y coordinate of the end circle.
:param radius1: Y coordinate of the end circle.
:type cx0: float
:type cy0: float
:type radius0: float
:type cx1: float
:type cy1: float
:type radius1: float
"""
def __init__(self, cx0, cy0, radius0, cx1, cy1, radius1):
Pattern.__init__(self, cairo.cairo_pattern_create_radial(
cx0, cy0, radius0, cx1, cy1, radius1))
def get_radial_circles(self):
"""Return this radial gradient’s endpoint circles,
each specified as a center coordinate and a radius.
:returns: A ``(cx0, cy0, radius0, cx1, cy1, radius1)`` tuple of floats.
"""
circles = ffi.new('double[6]')
_check_status(cairo.cairo_pattern_get_radial_circles(
self._pointer, circles + 0, circles + 1, circles + 2,
circles + 3, circles + 4, circles + 5))
return tuple(circles)
|
class RadialGradient(Gradient):
'''Creates a new radial gradient pattern between the two circles
defined by (cx0, cy0, radius0) and (cx1, cy1, radius1).
Before using the gradient pattern, a number of color stops
should be defined using :meth:`~Gradient.add_color_stop_rgba`.
Note: The coordinates here are in pattern space.
For a new pattern, pattern space is identical to user space,
but the relationship between the spaces can be changed
with :meth:`~Pattern.set_matrix`.
:param cx0: X coordinate of the start circle.
:param cy0: Y coordinate of the start circle.
:param radius0: Radius of the start circle.
:param cx1: X coordinate of the end circle.
:param cy1: Y coordinate of the end circle.
:param radius1: Y coordinate of the end circle.
:type cx0: float
:type cy0: float
:type radius0: float
:type cx1: float
:type cy1: float
:type radius1: float
'''
def __init__(self, cx0, cy0, radius0, cx1, cy1, radius1):
pass
def get_radial_circles(self):
'''Return this radial gradient’s endpoint circles,
each specified as a center coordinate and a radius.
:returns: A ``(cx0, cy0, radius0, cx1, cy1, radius1)`` tuple of floats.
'''
pass
| 3 | 2 | 8 | 1 | 5 | 2 | 1 | 2.5 | 1 | 1 | 0 | 0 | 2 | 0 | 2 | 14 | 41 | 6 | 10 | 4 | 7 | 25 | 7 | 4 | 4 | 1 | 3 | 0 | 2 |
143,736 |
Kozea/cairocffi
|
cairocffi/patterns.py
|
cairocffi.patterns.SolidPattern
|
class SolidPattern(Pattern):
"""Creates a new pattern corresponding to a solid color.
The color and alpha components are in the range 0 to 1.
If the values passed in are outside that range, they will be clamped.
:param red: Red component of the color.
:param green: Green component of the color.
:param blue: Blue component of the color.
:param alpha:
Alpha component of the color.
1 (the default) is opaque, 0 fully transparent.
:type red: float
:type green: float
:type blue: float
:type alpha: float
"""
def __init__(self, red, green, blue, alpha=1):
Pattern.__init__(
self, cairo.cairo_pattern_create_rgba(red, green, blue, alpha))
def get_rgba(self):
"""Returns the solid pattern’s color.
:returns: a ``(red, green, blue, alpha)`` tuple of floats.
"""
rgba = ffi.new('double[4]')
_check_status(cairo.cairo_pattern_get_rgba(
self._pointer, rgba + 0, rgba + 1, rgba + 2, rgba + 3))
return tuple(rgba)
|
class SolidPattern(Pattern):
'''Creates a new pattern corresponding to a solid color.
The color and alpha components are in the range 0 to 1.
If the values passed in are outside that range, they will be clamped.
:param red: Red component of the color.
:param green: Green component of the color.
:param blue: Blue component of the color.
:param alpha:
Alpha component of the color.
1 (the default) is opaque, 0 fully transparent.
:type red: float
:type green: float
:type blue: float
:type alpha: float
'''
def __init__(self, red, green, blue, alpha=1):
pass
def get_rgba(self):
'''Returns the solid pattern’s color.
:returns: a ``(red, green, blue, alpha)`` tuple of floats.
'''
pass
| 3 | 2 | 7 | 1 | 4 | 2 | 1 | 1.89 | 1 | 1 | 0 | 0 | 2 | 0 | 2 | 11 | 31 | 5 | 9 | 4 | 6 | 17 | 7 | 4 | 4 | 1 | 2 | 0 | 2 |
143,737 |
Kozea/cairocffi
|
cairocffi/patterns.py
|
cairocffi.patterns.SurfacePattern
|
class SurfacePattern(Pattern):
"""Create a new pattern for the given surface.
:param surface: A :class:`Surface` object.
"""
def __init__(self, surface):
Pattern.__init__(
self, cairo.cairo_pattern_create_for_surface(surface._pointer))
def get_surface(self):
"""Return this :class:`SurfacePattern`’s surface.
:returns:
An instance of :class:`Surface` or one of its sub-classes,
a new Python object referencing the existing cairo surface.
"""
surface_p = ffi.new('cairo_surface_t **')
_check_status(cairo.cairo_pattern_get_surface(
self._pointer, surface_p))
return Surface._from_pointer(surface_p[0], incref=True)
|
class SurfacePattern(Pattern):
'''Create a new pattern for the given surface.
:param surface: A :class:`Surface` object.
'''
def __init__(self, surface):
pass
def get_surface(self):
'''Return this :class:`SurfacePattern`’s surface.
:returns:
An instance of :class:`Surface` or one of its sub-classes,
a new Python object referencing the existing cairo surface.
'''
pass
| 3 | 2 | 8 | 1 | 4 | 3 | 1 | 0.89 | 1 | 1 | 1 | 0 | 2 | 0 | 2 | 11 | 22 | 5 | 9 | 4 | 6 | 8 | 7 | 4 | 4 | 1 | 2 | 0 | 2 |
143,738 |
Kozea/cairocffi
|
cairocffi/pixbuf.py
|
cairocffi.pixbuf.ImageLoadingError
|
class ImageLoadingError(ValueError):
"""PixBuf returned an error when loading an image.
The image data is probably corrupted.
"""
|
class ImageLoadingError(ValueError):
'''PixBuf returned an error when loading an image.
The image data is probably corrupted.
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 6 | 2 | 1 | 1 | 0 | 3 | 1 | 1 | 0 | 0 | 4 | 0 | 0 |
143,739 |
Kozea/cairocffi
|
cairocffi/pixbuf.py
|
cairocffi.pixbuf.Pixbuf
|
class Pixbuf(object):
"""Wrap a ``GdkPixbuf`` pointer and simulate methods."""
def __init__(self, pointer):
gobject.g_object_ref(pointer)
self._pointer = ffi.gc(pointer, gobject.g_object_unref)
def __getattr__(self, name):
function = getattr(gdk_pixbuf, 'gdk_pixbuf_' + name)
return partial(function, self._pointer)
|
class Pixbuf(object):
'''Wrap a ``GdkPixbuf`` pointer and simulate methods.'''
def __init__(self, pointer):
pass
def __getattr__(self, name):
pass
| 3 | 1 | 3 | 0 | 3 | 0 | 1 | 0.14 | 1 | 1 | 0 | 0 | 2 | 1 | 2 | 2 | 9 | 1 | 7 | 5 | 4 | 1 | 7 | 5 | 4 | 1 | 1 | 0 | 2 |
143,740 |
Kozea/cairocffi
|
cairocffi/surfaces.py
|
cairocffi.surfaces.RecordingSurface
|
class RecordingSurface(Surface):
"""A recording surface is a surface that records all drawing operations
at the highest level of the surface backend interface,
(that is, the level of paint, mask, stroke, fill, and show_text_glyphs).
The recording surface can then be "replayed" against any target surface
by using it as a source surface.
If you want to replay a surface so that the results in ``target``
will be identical to the results that would have been obtained
if the original operations applied to the recording surface
had instead been applied to the target surface,
you can use code like this::
context = Context(target)
context.set_source_surface(recording_surface, 0, 0)
context.paint()
A recording surface is logically unbounded,
i.e. it has no implicit constraint on the size of the drawing surface.
However, in practice this is rarely useful as you wish to replay
against a particular target surface with known bounds.
For this case, it is more efficient to specify the target extents
to the recording surface upon creation.
The recording phase of the recording surface is careful
to snapshot all necessary objects (paths, patterns, etc.),
in order to achieve accurate replay.
:param content: The :ref:`CONTENT` string of the recording surface
:param extents:
The extents to record
as a ``(x, y, width, height)`` tuple of floats in device units,
or :obj:`None` to record unbounded operations.
``(x, y)`` are the coordinates of the top-left corner of the rectangle,
``(width, height)`` its dimensions.
*New in cairo 1.10*
*New in cairocffi 0.2*
"""
def __init__(self, content, extents):
extents = (ffi.new('cairo_rectangle_t *', extents)
if extents is not None else ffi.NULL)
Surface.__init__(
self, cairo.cairo_recording_surface_create(content, extents))
def get_extents(self):
"""Return the extents of the recording-surface.
:returns:
A ``(x, y, width, height)`` tuple of floats,
or :obj:`None` if the surface is unbounded.
*New in cairo 1.12*
"""
extents = ffi.new('cairo_rectangle_t *')
if cairo.cairo_recording_surface_get_extents(self._pointer, extents):
return (extents.x, extents.y, extents.width, extents.height)
def ink_extents(self):
"""Measures the extents of the operations
stored within the recording-surface.
This is useful to compute the required size of an image surface
(or equivalent) into which to replay the full sequence
of drawing operations.
:return: A ``(x, y, width, height)`` tuple of floats.
"""
extents = ffi.new('double[4]')
cairo.cairo_recording_surface_ink_extents(
self._pointer, extents + 0, extents + 1, extents + 2, extents + 3)
self._check_status()
return tuple(extents)
|
class RecordingSurface(Surface):
'''A recording surface is a surface that records all drawing operations
at the highest level of the surface backend interface,
(that is, the level of paint, mask, stroke, fill, and show_text_glyphs).
The recording surface can then be "replayed" against any target surface
by using it as a source surface.
If you want to replay a surface so that the results in ``target``
will be identical to the results that would have been obtained
if the original operations applied to the recording surface
had instead been applied to the target surface,
you can use code like this::
context = Context(target)
context.set_source_surface(recording_surface, 0, 0)
context.paint()
A recording surface is logically unbounded,
i.e. it has no implicit constraint on the size of the drawing surface.
However, in practice this is rarely useful as you wish to replay
against a particular target surface with known bounds.
For this case, it is more efficient to specify the target extents
to the recording surface upon creation.
The recording phase of the recording surface is careful
to snapshot all necessary objects (paths, patterns, etc.),
in order to achieve accurate replay.
:param content: The :ref:`CONTENT` string of the recording surface
:param extents:
The extents to record
as a ``(x, y, width, height)`` tuple of floats in device units,
or :obj:`None` to record unbounded operations.
``(x, y)`` are the coordinates of the top-left corner of the rectangle,
``(width, height)`` its dimensions.
*New in cairo 1.10*
*New in cairocffi 0.2*
'''
def __init__(self, content, extents):
pass
def get_extents(self):
'''Return the extents of the recording-surface.
:returns:
A ``(x, y, width, height)`` tuple of floats,
or :obj:`None` if the surface is unbounded.
*New in cairo 1.12*
'''
pass
def ink_extents(self):
'''Measures the extents of the operations
stored within the recording-surface.
This is useful to compute the required size of an image surface
(or equivalent) into which to replay the full sequence
of drawing operations.
:return: A ``(x, y, width, height)`` tuple of floats.
'''
pass
| 4 | 3 | 11 | 2 | 5 | 4 | 2 | 2.81 | 1 | 1 | 0 | 0 | 3 | 0 | 3 | 28 | 76 | 15 | 16 | 6 | 12 | 45 | 13 | 6 | 9 | 2 | 2 | 1 | 5 |
143,741 |
Kozea/cairocffi
|
cairocffi/surfaces.py
|
cairocffi.surfaces.PDFSurface
|
class PDFSurface(Surface):
"""Creates a PDF surface of the specified size in PostScript points
to be written to ``target``.
Note that the size of individual pages of the PDF output can vary.
See :meth:`set_size`.
The PDF surface backend recognizes the following MIME types
for the data attached to a surface (see :meth:`~Surface.set_mime_data`)
when it is used as a source pattern for drawing on this surface:
``image/jpeg`` and
``image/jp2``.
If any of them is specified, the PDF backend emits an image
with the content of MIME data
(with the ``/DCTDecode`` or ``/JPXDecode`` filter, respectively)
instead of a surface snapshot
(with the ``/FlateDecode`` filter),
which typically produces PDF with a smaller file size.
``target`` can be :obj:`None` to specify no output.
This will generate a surface that may be queried and used as a source,
without generating a temporary file.
:param target:
A filename,
a binary mode :term:`file object` with a ``write`` method,
or :obj:`None`.
:param width_in_points:
Width of the surface, in points (1 point == 1/72.0 inch)
:param height_in_points:
Height of the surface, in points (1 point == 1/72.0 inch)
:type width_in_points: float
:type height_in_points: float
"""
def __init__(self, target, width_in_points, height_in_points):
if hasattr(target, 'write') or target is None:
write_func = _make_write_func(target)
pointer = cairo.cairo_pdf_surface_create_for_stream(
write_func, ffi.NULL, width_in_points, height_in_points)
else:
write_func = None
pointer = cairo.cairo_pdf_surface_create(
_encode_filename(target), width_in_points, height_in_points)
Surface.__init__(self, pointer, target_keep_alive=write_func)
def set_size(self, width_in_points, height_in_points):
"""Changes the size of a PDF surface
for the current (and subsequent) pages.
This method should only be called
before any drawing operations have been performed on the current page.
The simplest way to do this is to call this method
immediately after creating the surface
or immediately after completing a page with either
:meth:`~Surface.show_page` or :meth:`~Surface.copy_page`.
:param width_in_points:
New width of the page, in points (1 point = 1/72.0 inch)
:param height_in_points:
New height of the page, in points (1 point = 1/72.0 inch)
:type width_in_points: float
:type height_in_points: float
"""
cairo.cairo_pdf_surface_set_size(
self._pointer, width_in_points, height_in_points)
self._check_status()
def add_outline(self, parent_id, utf8, link_attribs, flags=None):
"""Add an item to the document outline hierarchy.
The outline has the ``utf8`` name and links to the location specified
by ``link_attribs``. Link attributes have the same keys and values as
the Link Tag, excluding the ``rect`` attribute. The item will be a
child of the item with id ``parent_id``. Use ``PDF_OUTLINE_ROOT`` as
the parent id of top level items.
:param parent_id:
the id of the parent item or ``PDF_OUTLINE_ROOT`` if this is a
top level item.
:param utf8: the name of the outline.
:param link_attribs:
the link attributes specifying where this outline links to.
:param flags: outline item flags.
:return: the id for the added item.
*New in cairo 1.16.*
*New in cairocffi 0.9.*
"""
if flags is None:
flags = 0
value = cairo.cairo_pdf_surface_add_outline(
self._pointer, parent_id, _encode_string(utf8),
_encode_string(link_attribs), flags)
self._check_status()
return value
def set_metadata(self, metadata, utf8):
"""Sets document metadata.
The ``PDF_METADATA_CREATE_DATE`` and ``PDF_METADATA_MOD_DATE``
values must be in ISO-8601 format: YYYY-MM-DDThh:mm:ss. An optional
timezone of the form "[+/-]hh:mm" or "Z" for UTC time can be appended.
All other metadata values can be any UTF-8 string.
:param metadata: the metadata item to set.
:param utf8: metadata value.
*New in cairo 1.16.*
*New in cairocffi 0.9.*
"""
cairo.cairo_pdf_surface_set_metadata(
self._pointer, metadata, _encode_string(utf8))
self._check_status()
def set_page_label(self, utf8):
"""Set page label for the current page.
:param utf8: the page label.
*New in cairo 1.16.*
*New in cairocffi 0.9.*
"""
cairo.cairo_pdf_surface_set_page_label(
self._pointer, _encode_string(utf8))
def set_thumbnail_size(self, width, height):
"""Set thumbnail image size for the current and all subsequent pages.
Setting a width or height of 0 disables thumbnails for the current and
subsequent pages.
:param width: thumbnail width.
:param height: thumbnail height.
*New in cairo 1.16.*
*New in cairocffi 0.9.*
"""
cairo.cairo_pdf_surface_set_thumbnail_size(
self._pointer, width, height)
def restrict_to_version(self, version):
"""Restricts the generated PDF file to ``version``.
See :meth:`get_versions` for a list of available version values
that can be used here.
This method should only be called
before any drawing operations have been performed on the given surface.
The simplest way to do this is to call this method
immediately after creating the surface.
:param version: A :ref:`PDF_VERSION` string.
*New in cairo 1.10.*
"""
cairo.cairo_pdf_surface_restrict_to_version(self._pointer, version)
self._check_status()
@staticmethod
def get_versions():
"""Return the list of supported PDF versions.
See :meth:`restrict_to_version`.
:return: A list of :ref:`PDF_VERSION` strings.
*New in cairo 1.10.*
"""
versions = ffi.new('cairo_pdf_version_t const **')
num_versions = ffi.new('int *')
cairo.cairo_pdf_get_versions(versions, num_versions)
versions = versions[0]
return [versions[i] for i in range(num_versions[0])]
@staticmethod
def version_to_string(version):
"""Return the string representation of the given :ref:`PDF_VERSION`.
See :meth:`get_versions` for a way to get
the list of valid version ids.
*New in cairo 1.10.*
"""
c_string = cairo.cairo_pdf_version_to_string(version)
if c_string == ffi.NULL:
raise ValueError(version)
return ffi.string(c_string).decode('ascii')
|
class PDFSurface(Surface):
'''Creates a PDF surface of the specified size in PostScript points
to be written to ``target``.
Note that the size of individual pages of the PDF output can vary.
See :meth:`set_size`.
The PDF surface backend recognizes the following MIME types
for the data attached to a surface (see :meth:`~Surface.set_mime_data`)
when it is used as a source pattern for drawing on this surface:
``image/jpeg`` and
``image/jp2``.
If any of them is specified, the PDF backend emits an image
with the content of MIME data
(with the ``/DCTDecode`` or ``/JPXDecode`` filter, respectively)
instead of a surface snapshot
(with the ``/FlateDecode`` filter),
which typically produces PDF with a smaller file size.
``target`` can be :obj:`None` to specify no output.
This will generate a surface that may be queried and used as a source,
without generating a temporary file.
:param target:
A filename,
a binary mode :term:`file object` with a ``write`` method,
or :obj:`None`.
:param width_in_points:
Width of the surface, in points (1 point == 1/72.0 inch)
:param height_in_points:
Height of the surface, in points (1 point == 1/72.0 inch)
:type width_in_points: float
:type height_in_points: float
'''
def __init__(self, target, width_in_points, height_in_points):
pass
def set_size(self, width_in_points, height_in_points):
'''Changes the size of a PDF surface
for the current (and subsequent) pages.
This method should only be called
before any drawing operations have been performed on the current page.
The simplest way to do this is to call this method
immediately after creating the surface
or immediately after completing a page with either
:meth:`~Surface.show_page` or :meth:`~Surface.copy_page`.
:param width_in_points:
New width of the page, in points (1 point = 1/72.0 inch)
:param height_in_points:
New height of the page, in points (1 point = 1/72.0 inch)
:type width_in_points: float
:type height_in_points: float
'''
pass
def add_outline(self, parent_id, utf8, link_attribs, flags=None):
'''Add an item to the document outline hierarchy.
The outline has the ``utf8`` name and links to the location specified
by ``link_attribs``. Link attributes have the same keys and values as
the Link Tag, excluding the ``rect`` attribute. The item will be a
child of the item with id ``parent_id``. Use ``PDF_OUTLINE_ROOT`` as
the parent id of top level items.
:param parent_id:
the id of the parent item or ``PDF_OUTLINE_ROOT`` if this is a
top level item.
:param utf8: the name of the outline.
:param link_attribs:
the link attributes specifying where this outline links to.
:param flags: outline item flags.
:return: the id for the added item.
*New in cairo 1.16.*
*New in cairocffi 0.9.*
'''
pass
def set_metadata(self, metadata, utf8):
'''Sets document metadata.
The ``PDF_METADATA_CREATE_DATE`` and ``PDF_METADATA_MOD_DATE``
values must be in ISO-8601 format: YYYY-MM-DDThh:mm:ss. An optional
timezone of the form "[+/-]hh:mm" or "Z" for UTC time can be appended.
All other metadata values can be any UTF-8 string.
:param metadata: the metadata item to set.
:param utf8: metadata value.
*New in cairo 1.16.*
*New in cairocffi 0.9.*
'''
pass
def set_page_label(self, utf8):
'''Set page label for the current page.
:param utf8: the page label.
*New in cairo 1.16.*
*New in cairocffi 0.9.*
'''
pass
def set_thumbnail_size(self, width, height):
'''Set thumbnail image size for the current and all subsequent pages.
Setting a width or height of 0 disables thumbnails for the current and
subsequent pages.
:param width: thumbnail width.
:param height: thumbnail height.
*New in cairo 1.16.*
*New in cairocffi 0.9.*
'''
pass
def restrict_to_version(self, version):
'''Restricts the generated PDF file to ``version``.
See :meth:`get_versions` for a list of available version values
that can be used here.
This method should only be called
before any drawing operations have been performed on the given surface.
The simplest way to do this is to call this method
immediately after creating the surface.
:param version: A :ref:`PDF_VERSION` string.
*New in cairo 1.10.*
'''
pass
@staticmethod
def get_versions():
'''Return the list of supported PDF versions.
See :meth:`restrict_to_version`.
:return: A list of :ref:`PDF_VERSION` strings.
*New in cairo 1.10.*
'''
pass
@staticmethod
def version_to_string(version):
'''Return the string representation of the given :ref:`PDF_VERSION`.
See :meth:`get_versions` for a way to get
the list of valid version ids.
*New in cairo 1.10.*
'''
pass
| 12 | 9 | 17 | 4 | 5 | 8 | 1 | 2.12 | 1 | 2 | 0 | 0 | 7 | 0 | 9 | 34 | 199 | 46 | 49 | 18 | 37 | 104 | 38 | 16 | 28 | 2 | 2 | 1 | 12 |
143,742 |
Kozea/cairocffi
|
cairocffi/surfaces.py
|
cairocffi.surfaces.KeepAlive
|
class KeepAlive(object):
"""
Keep some objects alive until a callback is called.
:attr:`closure` is a tuple of cairo_destroy_func_t and void* cdata objects,
as expected by cairo_surface_set_mime_data().
Either :meth:`save` must be called before the callback,
or none of them must be called.
"""
instances = set() # noqa: RUF012
def __init__(self, *objects):
self.objects = objects
weakself = weakref.ref(self)
def closure(_):
value = weakself()
if value is not None:
value.instances.remove(value)
callback = ffi.callback(
'cairo_destroy_func_t', closure)
# cairo wants a non-NULL closure pointer.
self.closure = (callback, callback)
def save(self):
"""Start keeping a reference to the passed objects."""
self.instances.add(self)
|
class KeepAlive(object):
'''
Keep some objects alive until a callback is called.
:attr:`closure` is a tuple of cairo_destroy_func_t and void* cdata objects,
as expected by cairo_surface_set_mime_data().
Either :meth:`save` must be called before the callback,
or none of them must be called.
'''
def __init__(self, *objects):
pass
def closure(_):
pass
def save(self):
'''Start keeping a reference to the passed objects.'''
pass
| 4 | 2 | 7 | 1 | 5 | 1 | 1 | 0.71 | 1 | 0 | 0 | 0 | 2 | 2 | 2 | 2 | 29 | 6 | 14 | 10 | 10 | 10 | 13 | 10 | 9 | 2 | 1 | 1 | 4 |
143,743 |
Kozea/cairocffi
|
cairocffi/patterns.py
|
cairocffi.patterns.LinearGradient
|
class LinearGradient(Gradient):
"""Create a new linear gradient
along the line defined by (x0, y0) and (x1, y1).
Before using the gradient pattern, a number of color stops
should be defined using :meth:`~Gradient.add_color_stop_rgba`.
Note: The coordinates here are in pattern space.
For a new pattern, pattern space is identical to user space,
but the relationship between the spaces can be changed
with :meth:`~Pattern.set_matrix`.
:param x0: X coordinate of the start point.
:param y0: Y coordinate of the start point.
:param x1: X coordinate of the end point.
:param y1: Y coordinate of the end point.
:type x0: float
:type y0: float
:type x1: float
:type y1: float
"""
def __init__(self, x0, y0, x1, y1):
Pattern.__init__(
self, cairo.cairo_pattern_create_linear(x0, y0, x1, y1))
def get_linear_points(self):
"""Return this linear gradient’s endpoints.
:returns: A ``(x0, y0, x1, y1)`` tuple of floats.
"""
points = ffi.new('double[4]')
_check_status(cairo.cairo_pattern_get_linear_points(
self._pointer, points + 0, points + 1, points + 2, points + 3))
return tuple(points)
|
class LinearGradient(Gradient):
'''Create a new linear gradient
along the line defined by (x0, y0) and (x1, y1).
Before using the gradient pattern, a number of color stops
should be defined using :meth:`~Gradient.add_color_stop_rgba`.
Note: The coordinates here are in pattern space.
For a new pattern, pattern space is identical to user space,
but the relationship between the spaces can be changed
with :meth:`~Pattern.set_matrix`.
:param x0: X coordinate of the start point.
:param y0: Y coordinate of the start point.
:param x1: X coordinate of the end point.
:param y1: Y coordinate of the end point.
:type x0: float
:type y0: float
:type x1: float
:type y1: float
'''
def __init__(self, x0, y0, x1, y1):
pass
def get_linear_points(self):
'''Return this linear gradient’s endpoints.
:returns: A ``(x0, y0, x1, y1)`` tuple of floats.
'''
pass
| 3 | 2 | 7 | 1 | 4 | 2 | 1 | 2.22 | 1 | 1 | 0 | 0 | 2 | 0 | 2 | 14 | 35 | 6 | 9 | 4 | 6 | 20 | 7 | 4 | 4 | 1 | 3 | 0 | 2 |
143,744 |
Kozea/cairocffi
|
cairocffi/surfaces.py
|
cairocffi.surfaces.ImageSurface
|
class ImageSurface(Surface):
"""Creates an image surface of the specified format and dimensions.
If ``data`` is not :obj:`None`
its initial contents will be used as the initial image contents;
you must explicitly clear the buffer,
using, for example, :meth:`Context.rectangle` and :meth:`Context.fill`
if you want it cleared.
.. note::
Currently only :class:`array.array` buffers are supported on PyPy.
Otherwise, the surface contents are all initially 0.
(Specifically, within each pixel, each color or alpha channel
belonging to format will be 0.
The contents of bits within a pixel,
but not belonging to the given format are undefined).
:param format: :ref:`FORMAT` string for the surface to create.
:param width: Width of the surface, in pixels.
:param height: Height of the surface, in pixels.
:param data:
Buffer supplied in which to write contents,
or :obj:`None` to create a new buffer.
:param stride:
The number of bytes between the start of rows
in the buffer as allocated.
This value should always be computed by :meth:`format_stride_for_width`
before allocating the data buffer.
If omitted but ``data`` is given,
:meth:`format_stride_for_width` is used.
:type format: str
:type width: int
:type height: int
:type stride: int
"""
def __init__(self, format, width, height, data=None, stride=None):
if data is None:
pointer = cairo.cairo_image_surface_create(format, width, height)
else:
if stride is None:
stride = self.format_stride_for_width(format, width)
address, length = from_buffer(data)
if length < stride * height:
raise ValueError('Got a %d bytes buffer, needs at least %d.'
% (length, stride * height))
pointer = cairo.cairo_image_surface_create_for_data(
ffi.cast('unsigned char*', address), format, width, height,
stride)
Surface.__init__(self, pointer, target_keep_alive=data)
@classmethod
def create_for_data(cls, data, format, width, height, stride=None):
"""Same as ``ImageSurface(format, width, height, data, stride)``.
Exists for compatibility with pycairo.
"""
return cls(format, width, height, data, stride)
@staticmethod
def format_stride_for_width(format, width):
"""
This method provides a stride value (byte offset between rows)
that will respect all alignment requirements
of the accelerated image-rendering code within cairo.
Typical usage will be of the form::
from cairocffi import ImageSurface
stride = ImageSurface.format_stride_for_width(format, width)
data = bytearray(stride * height)
surface = ImageSurface(format, width, height, data, stride)
:param format: A :ref:`FORMAT` string.
:param width: The desired width of the surface, in pixels.
:type format: str
:type width: int
:returns:
The appropriate stride to use given the desired format and width,
or -1 if either the format is invalid or the width too large.
"""
return cairo.cairo_format_stride_for_width(format, width)
@classmethod
def create_from_png(cls, source):
"""Decode a PNG file into a new image surface.
:param source:
A filename or
a binary mode :term:`file object` with a ``read`` method.
If you already have a byte string in memory,
use :class:`io.BytesIO`.
:returns: A new :class:`ImageSurface` instance.
"""
if hasattr(source, 'read'):
read_func = _make_read_func(source)
pointer = cairo.cairo_image_surface_create_from_png_stream(
read_func, ffi.NULL)
else:
pointer = cairo.cairo_image_surface_create_from_png(
_encode_filename(source))
self = object.__new__(cls)
Surface.__init__(self, pointer) # Skip ImageSurface.__init__
return self
def get_data(self):
"""Return the buffer pointing to the image’s pixel data,
encoded according to the surface’s :ref:`FORMAT` string.
A call to :meth:`~Surface.flush` is required before accessing the pixel
data to ensure that all pending drawing operations are finished.
A call to :meth:`~Surface.mark_dirty` is required
after the data is modified.
:returns: A read-write CFFI buffer object.
"""
return ffi.buffer(
cairo.cairo_image_surface_get_data(self._pointer),
self.get_stride() * self.get_height())
def get_format(self):
"""Return the :ref:`FORMAT` string of the surface."""
return cairo.cairo_image_surface_get_format(self._pointer)
def get_width(self):
"""Return the width of the surface, in pixels."""
return cairo.cairo_image_surface_get_width(self._pointer)
def get_height(self):
"""Return the width of the surface, in pixels."""
return cairo.cairo_image_surface_get_height(self._pointer)
def get_stride(self):
"""Return the stride of the image surface in bytes
(or 0 if surface is not an image surface).
The stride is the distance in bytes
from the beginning of one row of the image data
to the beginning of the next row.
"""
return cairo.cairo_image_surface_get_stride(self._pointer)
|
class ImageSurface(Surface):
'''Creates an image surface of the specified format and dimensions.
If ``data`` is not :obj:`None`
its initial contents will be used as the initial image contents;
you must explicitly clear the buffer,
using, for example, :meth:`Context.rectangle` and :meth:`Context.fill`
if you want it cleared.
.. note::
Currently only :class:`array.array` buffers are supported on PyPy.
Otherwise, the surface contents are all initially 0.
(Specifically, within each pixel, each color or alpha channel
belonging to format will be 0.
The contents of bits within a pixel,
but not belonging to the given format are undefined).
:param format: :ref:`FORMAT` string for the surface to create.
:param width: Width of the surface, in pixels.
:param height: Height of the surface, in pixels.
:param data:
Buffer supplied in which to write contents,
or :obj:`None` to create a new buffer.
:param stride:
The number of bytes between the start of rows
in the buffer as allocated.
This value should always be computed by :meth:`format_stride_for_width`
before allocating the data buffer.
If omitted but ``data`` is given,
:meth:`format_stride_for_width` is used.
:type format: str
:type width: int
:type height: int
:type stride: int
'''
def __init__(self, format, width, height, data=None, stride=None):
pass
@classmethod
def create_for_data(cls, data, format, width, height, stride=None):
'''Same as ``ImageSurface(format, width, height, data, stride)``.
Exists for compatibility with pycairo.
'''
pass
@staticmethod
def format_stride_for_width(format, width):
'''
This method provides a stride value (byte offset between rows)
that will respect all alignment requirements
of the accelerated image-rendering code within cairo.
Typical usage will be of the form::
from cairocffi import ImageSurface
stride = ImageSurface.format_stride_for_width(format, width)
data = bytearray(stride * height)
surface = ImageSurface(format, width, height, data, stride)
:param format: A :ref:`FORMAT` string.
:param width: The desired width of the surface, in pixels.
:type format: str
:type width: int
:returns:
The appropriate stride to use given the desired format and width,
or -1 if either the format is invalid or the width too large.
'''
pass
@classmethod
def create_from_png(cls, source):
'''Decode a PNG file into a new image surface.
:param source:
A filename or
a binary mode :term:`file object` with a ``read`` method.
If you already have a byte string in memory,
use :class:`io.BytesIO`.
:returns: A new :class:`ImageSurface` instance.
'''
pass
def get_data(self):
'''Return the buffer pointing to the image’s pixel data,
encoded according to the surface’s :ref:`FORMAT` string.
A call to :meth:`~Surface.flush` is required before accessing the pixel
data to ensure that all pending drawing operations are finished.
A call to :meth:`~Surface.mark_dirty` is required
after the data is modified.
:returns: A read-write CFFI buffer object.
'''
pass
def get_format(self):
'''Return the :ref:`FORMAT` string of the surface.'''
pass
def get_width(self):
'''Return the width of the surface, in pixels.'''
pass
def get_height(self):
'''Return the width of the surface, in pixels.'''
pass
def get_stride(self):
'''Return the stride of the image surface in bytes
(or 0 if surface is not an image surface).
The stride is the distance in bytes
from the beginning of one row of the image data
to the beginning of the next row.
'''
pass
| 13 | 9 | 11 | 1 | 5 | 5 | 1 | 1.71 | 1 | 1 | 0 | 0 | 6 | 0 | 9 | 34 | 146 | 25 | 45 | 18 | 32 | 77 | 33 | 15 | 23 | 4 | 2 | 2 | 13 |
143,745 |
Kozea/pygal
|
Kozea_pygal/pygal/graph/line.py
|
pygal.graph.line.Line
|
class Line(Graph):
"""Line graph class"""
def __init__(self, *args, **kwargs):
"""Set _self_close as False, it's True for Radar like Line"""
self._self_close = False
super(Line, self).__init__(*args, **kwargs)
@cached_property
def _values(self):
"""Getter for series values (flattened)"""
return [
val[1] for serie in self.series for val in
(serie.interpolated if self.interpolate else serie.points)
if val[1] is not None and (not self.logarithmic or val[1] > 0)
]
@cached_property
def _secondary_values(self):
"""Getter for secondary series values (flattened)"""
return [
val[1] for serie in self.secondary_series for val in
(serie.interpolated if self.interpolate else serie.points)
if val[1] is not None and (not self.logarithmic or val[1] > 0)
]
def _fill(self, values):
"""Add extra values to fill the line"""
zero = self.view.y(min(max(self.zero, self._box.ymin), self._box.ymax))
# Check to see if the data has been padded with "none's"
# Fill doesn't work correctly otherwise
end = len(values) - 1
while end > 0:
x, y = values[end]
if self.missing_value_fill_truncation == "either":
if x is not None and y is not None:
break
elif self.missing_value_fill_truncation == "x":
if x is not None:
break
elif self.missing_value_fill_truncation == "y":
if y is not None:
break
else:
raise ValueError(
"Invalid value ({}) for config key "
"'missing_value_fill_truncation';"
" Use 'x', 'y' or 'either'".format(
self.missing_value_fill_truncation
)
)
end -= 1
return ([(values[0][0], zero)] + values + [(values[end][0], zero)])
def line(self, serie, rescale=False):
"""Draw the line serie"""
serie_node = self.svg.serie(serie)
if rescale and self.secondary_series:
points = self._rescale(serie.points)
else:
points = serie.points
view_values = list(map(self.view, points))
if serie.show_dots:
for i, (x, y) in enumerate(view_values):
if None in (x, y):
continue
if self.logarithmic:
if points[i][1] is None or points[i][1] <= 0:
continue
if (serie.show_only_major_dots and self.x_labels
and i < len(self.x_labels)
and self.x_labels[i] not in self._x_labels_major):
continue
metadata = serie.metadata.get(i)
classes = []
if x > self.view.width / 2:
classes.append('left')
if y > self.view.height / 2:
classes.append('top')
classes = ' '.join(classes)
self._confidence_interval(
serie_node['overlay'], x, y, serie.values[i], metadata
)
dots = decorate(
self.svg,
self.svg.node(serie_node['overlay'], class_="dots"),
metadata
)
val = self._format(serie, i)
alter(
self.svg.transposable_node(
dots,
'circle',
cx=x,
cy=y,
r=serie.dots_size,
class_='dot reactive tooltip-trigger'
), metadata
)
self._tooltip_data(
dots, val, x, y, xlabel=self._get_x_label(i)
)
self._static_value(
serie_node, val, x + self.style.value_font_size,
y + self.style.value_font_size, metadata
)
if serie.stroke:
if self.interpolate:
points = serie.interpolated
if rescale and self.secondary_series:
points = self._rescale(points)
view_values = list(map(self.view, points))
if serie.fill:
view_values = self._fill(view_values)
if serie.allow_interruptions:
# view_values are in form [(x1, y1), (x2, y2)]. We
# need to split that into multiple sequences if a
# None is present here
sequences = []
cur_sequence = []
for x, y in view_values:
if y is None and len(cur_sequence) > 0:
# emit current subsequence
sequences.append(cur_sequence)
cur_sequence = []
elif y is None: # just discard
continue
else:
cur_sequence.append((x, y)) # append the element
if len(cur_sequence) > 0: # emit last possible sequence
sequences.append(cur_sequence)
else:
# plain vanilla rendering
sequences = [view_values]
if self.logarithmic:
for seq in sequences:
for ele in seq[::-1]:
y = points[seq.index(ele)][1]
if y is None or y <= 0:
del seq[seq.index(ele)]
for seq in sequences:
self.svg.line(
serie_node['plot'],
seq,
close=self._self_close,
class_='line reactive' +
(' nofill' if not serie.fill else '')
)
def _compute(self):
"""Compute y min and max and y scale and set labels"""
# X Labels
if self.horizontal:
self._x_pos = [
x / (self._len - 1) for x in range(self._len)
][::-1] if self._len != 1 else [.5] # Center if only one value
else:
self._x_pos = [
x / (self._len - 1) for x in range(self._len)
] if self._len != 1 else [.5] # Center if only one value
self._points(self._x_pos)
if self.include_x_axis:
# Y Label
self._box.ymin = min(self._min or 0, 0)
self._box.ymax = max(self._max or 0, 0)
else:
self._box.ymin = self._min
self._box.ymax = self._max
def _plot(self):
"""Plot the serie lines and secondary serie lines"""
for serie in self.series:
self.line(serie)
for serie in self.secondary_series:
self.line(serie, True)
|
class Line(Graph):
'''Line graph class'''
def __init__(self, *args, **kwargs):
'''Set _self_close as False, it's True for Radar like Line'''
pass
@cached_property
def _values(self):
'''Getter for series values (flattened)'''
pass
@cached_property
def _secondary_values(self):
'''Getter for secondary series values (flattened)'''
pass
def _fill(self, values):
'''Add extra values to fill the line'''
pass
def line(self, serie, rescale=False):
'''Draw the line serie'''
pass
def _compute(self):
'''Compute y min and max and y scale and set labels'''
pass
def _plot(self):
'''Plot the serie lines and secondary serie lines'''
pass
| 10 | 8 | 25 | 2 | 21 | 3 | 7 | 0.15 | 1 | 6 | 0 | 4 | 7 | 3 | 7 | 76 | 188 | 20 | 151 | 31 | 141 | 22 | 94 | 26 | 86 | 25 | 4 | 5 | 46 |
143,746 |
Kozea/pygal
|
Kozea_pygal/pygal/graph/horizontalstackedline.py
|
pygal.graph.horizontalstackedline.HorizontalStackedLine
|
class HorizontalStackedLine(HorizontalGraph, StackedLine):
"""Horizontal Stacked Line graph"""
def _plot(self):
"""Draw the lines in reverse order"""
for serie in self.series[::-1]:
self.line(serie)
for serie in self.secondary_series[::-1]:
self.line(serie, True)
|
class HorizontalStackedLine(HorizontalGraph, StackedLine):
'''Horizontal Stacked Line graph'''
def _plot(self):
'''Draw the lines in reverse order'''
pass
| 2 | 2 | 6 | 0 | 5 | 1 | 3 | 0.33 | 2 | 0 | 0 | 0 | 1 | 0 | 1 | 87 | 9 | 1 | 6 | 3 | 4 | 2 | 6 | 3 | 4 | 3 | 6 | 1 | 3 |
143,747 |
Kozea/pygal
|
Kozea_pygal/pygal/graph/horizontalstackedbar.py
|
pygal.graph.horizontalstackedbar.HorizontalStackedBar
|
class HorizontalStackedBar(HorizontalGraph, StackedBar):
"""Horizontal Stacked Bar graph"""
|
class HorizontalStackedBar(HorizontalGraph, StackedBar):
'''Horizontal Stacked Bar graph'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 2 | 0 | 1 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 6 | 0 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.