id
int64 0
843k
| repository_name
stringlengths 7
55
| file_path
stringlengths 9
332
| class_name
stringlengths 3
290
| human_written_code
stringlengths 12
4.36M
| class_skeleton
stringlengths 19
2.2M
| total_program_units
int64 1
9.57k
| total_doc_str
int64 0
4.2k
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
300
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
176
| CountClassBase
float64 0
48
| CountClassCoupled
float64 0
589
| CountClassCoupledModified
float64 0
581
| CountClassDerived
float64 0
5.37k
| CountDeclInstanceMethod
float64 0
4.2k
| CountDeclInstanceVariable
float64 0
299
| CountDeclMethod
float64 0
4.2k
| CountDeclMethodAll
float64 0
4.2k
| CountLine
float64 1
115k
| CountLineBlank
float64 0
9.01k
| CountLineCode
float64 0
94.4k
| CountLineCodeDecl
float64 0
46.1k
| CountLineCodeExe
float64 0
91.3k
| CountLineComment
float64 0
27k
| CountStmt
float64 1
93.2k
| CountStmtDecl
float64 0
46.1k
| CountStmtExe
float64 0
90.2k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
6k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
143,548 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/box_backup.py
|
klab.box_backup.OAuthConnector
|
class OAuthConnector(boxsdk.OAuth2):
'''
Overrides the Box OAuth class with calls to the matching oauth2client Credentials functions
'''
def __init__(
self,
credentials
):
self._credentials = credentials
self._current_chunked_upload_abort_url = None
@property
def access_token(self):
"""
Get the current access token.
:return:
current access token
:rtype:
`unicode`
"""
return self._credentials.get_access_token().access_token
def get_authorization_url(self, redirect_url):
raise Exception('Not implemented')
def authenticate(self, auth_code):
"""
:return:
(access_token, refresh_token)
:rtype:
(`unicode`, `unicode`)
"""
return self.access_token, None
def refresh(self, access_token_to_refresh):
return self.access_token, None
def send_token_request(self, data, access_token, expect_refresh_token=True):
"""
:return:
The access token and refresh token.
:rtype:
(`unicode`, `unicode`)
"""
return self.access_token, None
def revoke(self):
"""
Revoke the authorization for the current access/refresh token pair.
"""
http = transport.get_http_object()
self._credentials.revoke(http)
|
class OAuthConnector(boxsdk.OAuth2):
'''
Overrides the Box OAuth class with calls to the matching oauth2client Credentials functions
'''
def __init__(
self,
credentials
):
pass
@property
def access_token(self):
'''
Get the current access token.
:return:
current access token
:rtype:
`unicode`
'''
pass
def get_authorization_url(self, redirect_url):
pass
def authenticate(self, auth_code):
'''
:return:
(access_token, refresh_token)
:rtype:
(`unicode`, `unicode`)
'''
pass
def refresh(self, access_token_to_refresh):
pass
def send_token_request(self, data, access_token, expect_refresh_token=True):
'''
:return:
The access token and refresh token.
:rtype:
(`unicode`, `unicode`)
'''
pass
def revoke(self):
'''
Revoke the authorization for the current access/refresh token pair.
'''
pass
| 9 | 5 | 6 | 0 | 3 | 3 | 1 | 1.19 | 1 | 1 | 0 | 0 | 7 | 2 | 7 | 7 | 52 | 6 | 21 | 15 | 9 | 25 | 17 | 11 | 9 | 1 | 1 | 0 | 7 |
143,549 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/pymolmod/colors.py
|
klab.bio.pymolmod.colors.PyMOLStructure
|
class PyMOLStructure(PyMOLStructureBase):
'''A simple structure-containing class to store parameters used to display a structure. Open to heavy modification as we add more
customization.'''
def __init__(self, pdb_object, structure_name, residues_of_interest = [], label_all_residues_of_interest = False, **kwargs):
'''The chain_seed_color kwarg can be either:
- a triple of R,G,B values e.g. [0.5, 1.0, 0.75] where each value is between 0.0 and 1.0;
- a hex string #RRGGBB e.g. #77ffaa;
- a name defined in the predefined dict above e.g. "aquamarine".
'''
self.pdb_object = pdb_object
self.structure_name = structure_name
self.add_residues_of_interest(residues_of_interest)
self.label_all_residues_of_interest = label_all_residues_of_interest
self.chain_colors = kwargs.get('chain_colors') or {}
# Set up per-chain colors
try:
if not self.chain_colors and kwargs.get('chain_seed_color'):
chain_seed_color = kwargs.get('chain_seed_color')
if isinstance(chain_seed_color, str) or isinstance(chain_seed_color, str):
chain_seed_color = str(chain_seed_color)
if chain_seed_color.startswith('#'):
if len(chain_seed_color) != 7:
chain_seed_color = None
else:
trpl = predefined.get(chain_seed_color)
chain_seed_color = None
if trpl:
chain_seed_color = mpl_colors.rgb2hex(trpl)
elif isinstance(chain_seed_color, list) and len(chain_seed_color) == 3:
chain_seed_color = mpl_colors.rgb2hex(chain_seed_color)
if chain_seed_color.startswith('#') and len(chain_seed_color) == 7:
# todo: We are moving between color spaces multiple times so are probably introducing artifacts due to rounding. Rewrite this to minimize this movement.
chain_seed_color = chain_seed_color[1:]
hsl_color = colorsys.rgb_to_hls(int(chain_seed_color[0:2], 16)/255.0, int(chain_seed_color[2:4], 16)/255.0, int(chain_seed_color[4:6], 16)/255.0)
chain_seed_hue = int(360.0 * hsl_color[0])
chain_seed_saturation = max(0.15, hsl_color[1]) # otherwise some colors e.g. near-black will not yield any alternate colors
chain_seed_lightness = max(0.15, hsl_color[2]) # otherwise some colors e.g. near-black will not yield any alternate colors
min_colors_in_wheel = 4 # choose at least 4 colors - this usually results in a wider variety of colors and prevents clashes e.g. given 2 chains in both mut and wt, wt seeded with blue, and mut seeded with yellow, we will get a clash
chain_ids = sorted(pdb_object.atom_sequences.keys())
# Choose complementary colors, respecting the original saturation and lightness values
chain_colors = ggplot_color_wheel(max(len(chain_ids), min_colors_in_wheel), start = chain_seed_hue, saturation_adjustment = None, saturation = chain_seed_saturation, lightness = chain_seed_lightness)
assert(len(chain_colors) >= len(chain_ids))
self.chain_colors = {}
for i in range(len(chain_ids)):
self.chain_colors[chain_ids[i]] = str(list(mpl_colors.hex2color('#' + chain_colors[i])))
# Force use of the original seed as this may have been altered above in the "= max(" statements
self.chain_colors[chain_ids[0]] = str(list(mpl_colors.hex2color('#' + chain_seed_color)))
except Exception as e:
print('An exception occurred setting the chain colors. Ignoring exception and resuming with default colors.')
print((str(e)))
print((traceback.format_exc()))
super(PyMOLStructure, self).__init__(
backbone_color = kwargs.get('backbone_color'), backbone_display = kwargs.get('backbone_display'),
sidechain_color = kwargs.get('sidechain_color'), sidechain_display = kwargs.get('sidechain_display'),
hetatm_color = kwargs.get('hetatm_color'), hetatm_display = kwargs.get('hetatm_display'),
visible = kwargs.get('visible', True),
)
def add_residues_of_interest(self, residues_of_interest):
# todo: we should check the residue IDs against the PDB object to make sure that the coordinates exist
# For now, do a simple assignment
if residues_of_interest:
self.residues_of_interest = residues_of_interest
|
class PyMOLStructure(PyMOLStructureBase):
'''A simple structure-containing class to store parameters used to display a structure. Open to heavy modification as we add more
customization.'''
def __init__(self, pdb_object, structure_name, residues_of_interest = [], label_all_residues_of_interest = False, **kwargs):
'''The chain_seed_color kwarg can be either:
- a triple of R,G,B values e.g. [0.5, 1.0, 0.75] where each value is between 0.0 and 1.0;
- a hex string #RRGGBB e.g. #77ffaa;
- a name defined in the predefined dict above e.g. "aquamarine".
'''
pass
def add_residues_of_interest(self, residues_of_interest):
pass
| 3 | 2 | 34 | 5 | 24 | 9 | 6 | 0.41 | 1 | 6 | 0 | 0 | 2 | 5 | 2 | 3 | 77 | 15 | 49 | 19 | 46 | 20 | 42 | 18 | 39 | 10 | 2 | 5 | 12 |
143,550 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/smallmolecule.py
|
klab.bio.smallmolecule.Molecule
|
class Molecule:
def __init__(self, pdb_path, molecule_name, chain = None):
self.atom_types = []
self.atom_type_nums = []
self.coords = []
self.molecule_name = molecule_name
self.chain = chain
self.lines = []
self.resnum = None
with open(pdb_path, 'r') as f:
for line in f:
residue_name = line[17:20].strip()
if line.startswith('HETATM') and residue_name == self.molecule_name:
chain = line[21]
if self.chain and self.chain != chain:
continue
resnum = int( line[22:26].strip() )
if self.resnum == None:
self.resnum = resnum
else:
assert( resnum == self.resnum )
name = line[12:16].strip()
self.atom_types.append( name[0] )
self.atom_type_nums.append( name[1:] )
x = float( line[30:38].strip() )
y = float( line[38:46].strip() )
z = float( line[46:54].strip() )
self.names.append( name )
self.coords.append( (x, y, z) )
self.lines.append(line)
def lines_to_write(self):
coords_as_strings = self.coords_as_strings
lines = []
for i, line in enumerate(self.lines):
if self.chain:
chain = self.chain
else:
chain = line[21]
lines.append(
line[:21] + chain +
string.rjust( '%d' % self.resnum, 4) +
line[26:30] +
coords_as_strings[i][0] +
coords_as_strings[i][1] +
coords_as_strings[i][2] +
line[54:]
)
return lines
def write_to_file(self, pdb_path, file_mode = 'w'):
with open(pdb_path, file_mode) as f:
for line in self.lines_to_write():
f.write(line)
@property
def coords_as_strings(self):
ret = []
for coords in self.coords:
x, y, z = coords
x = '%.3f' % x
y = '%.3f' % y
z = '%.3f' % z
x = string.rjust(x, 8)
y = string.rjust(y, 8)
z = string.rjust(z, 8)
ret.append( (x, y, z) )
return ret
def get_coords_for_name(self, atom_name):
index = self.get_index_for_name(atom_name)
return self.coords[index]
def set_coords_for_name(self, atom_name, new_coords):
assert( len(new_coords) == 3 )
index = self.get_index_for_name(atom_name)
self.coords[index] = new_coords
def get_index_for_name(self, atom_name):
names = self.names
names_set = set(names)
assert( len(names) == len(names_set) )
index = names.index(atom_name)
return index
@property
def names(self):
return [ '%s%s' % (x,y) for x,y in zip(self.atom_types, self.atom_type_nums) ]
@property
def names_set(self):
s = set()
for name in self.names:
assert( name not in s )
s.add(name)
return s
def align_to_other(self, other, mapping, self_root_pair, other_root_pair = None):
'''
root atoms are atom which all other unmapped atoms will be mapped off of
'''
if other_root_pair == None:
other_root_pair = self_root_pair
assert( len(self_root_pair) == len(other_root_pair) )
unmoved_atom_names = []
new_coords = [ None for x in range( len(self_root_pair) ) ]
for atom in self.names:
if atom in self_root_pair:
i = self_root_pair.index(atom)
assert( new_coords[i] == None )
new_coords[i] = self.get_coords_for_name(atom)
if atom in mapping:
other_atom = mapping[atom]
self.set_coords_for_name( atom, other.get_coords_for_name(other_atom) )
else:
unmoved_atom_names.append(atom)
# Move unmoved coordinates after all other atoms have been moved (so that
# references will have been moved already)
if None in new_coords:
print(new_coords)
assert( None not in new_coords )
ref_coords = [other.get_coords_for_name(x) for x in other_root_pair]
# Calculate translation and rotation matrices
U, new_centroid, ref_centroid = calc_rotation_translation_matrices( ref_coords, new_coords )
for atom in unmoved_atom_names:
original_coord = self.get_coords_for_name(atom)
self.set_coords_for_name( atom, rotate_and_translate_coord(original_coord, U, new_centroid, ref_centroid) )
self.chain = other.chain
def replace_in_pdb(self, pdb_path, name_to_replace):
# First pass to read lines, remove old molecule, and find position to insert
lines = []
position_to_insert = None
with open(pdb_path, 'r') as f:
for line in f:
residue_name = line[17:20].strip()
if len(line) >= 22:
chain = line[21]
else:
chain = None
if line.startswith('HETATM') and residue_name == name_to_replace and ((self.chain == None) or (chain == self.chain)):
if not position_to_insert:
position_to_insert = len(lines)
self.resnum = int( line[22:26].strip() ) # Change resnum to match molecule to replace
elif line.startswith('HETNAM'):
lines.append( line.replace(name_to_replace, self.molecule_name) )
elif not line.startswith('CONECT'):
lines.append(line)
assert( position_to_insert != None )
# Insert lines for this molecule
lines = lines[:position_to_insert] + self.lines_to_write() + lines[position_to_insert:]
# Renumber atoms
lines = renumber_atoms(lines)
# Now overwrite
with open(pdb_path, 'w') as f:
for line in lines:
f.write(line)
|
class Molecule:
def __init__(self, pdb_path, molecule_name, chain = None):
pass
def lines_to_write(self):
pass
def write_to_file(self, pdb_path, file_mode = 'w'):
pass
@property
def coords_as_strings(self):
pass
def get_coords_for_name(self, atom_name):
pass
def set_coords_for_name(self, atom_name, new_coords):
pass
def get_index_for_name(self, atom_name):
pass
@property
def names(self):
pass
@property
def names_set(self):
pass
def align_to_other(self, other, mapping, self_root_pair, other_root_pair = None):
'''
root atoms are atom which all other unmapped atoms will be mapped off of
'''
pass
def replace_in_pdb(self, pdb_path, name_to_replace):
pass
| 15 | 1 | 14 | 1 | 12 | 1 | 3 | 0.08 | 0 | 6 | 0 | 0 | 11 | 7 | 11 | 11 | 173 | 26 | 137 | 60 | 122 | 11 | 120 | 54 | 108 | 8 | 0 | 4 | 33 |
143,551 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/chainsequence.py
|
klab.chainsequence.ChainSequences
|
class ChainSequences(UserDict.DictMixin):
"""A class for holding PDB chain sequences"""
def __init__(self):
self.seqs = {}
self.chains = []
def __getitem__(self, key):
if type(key) == int:
return self.seqs[self.chains[key]]
else:
return self.seqs[key]
def __setitem__(self, key, value):
if type(key) == int:
self.seqs[self.chains[key]] = value
else:
self.seqs[key] = value
if key not in self.chains:
self.chains += [key]
def __delitem__(self, key):
if type(key) == int:
del self.seqs[self.chains[key]]
del self.chains[key]
else:
del self.seqs[key]
self.chains.remove(key)
def keys(self):
return self.chains
def parse_seqres(self, pdb):
"""Parse the SEQRES entries into the object"""
seqresre = re.compile("SEQRES")
seqreslines = [line for line in pdb.lines if seqresre.match(line)]
for line in seqreslines:
chain = line[11]
resnames = line[19:70].strip()
self.setdefault(chain, [])
self[chain] += resnames.split()
def parse_atoms(self, pdb):
"""Parse the ATOM entries into the object"""
atomre = re.compile("ATOM")
atomlines = [line for line in pdb.lines if atomre.match(line)]
chainresnums = {}
for line in atomlines:
chain = line[21]
resname = line[17:20]
resnum = line[22:27]
#print resnum
chainresnums.setdefault(chain, [])
if resnum in chainresnums[chain]:
assert self[chain][chainresnums[chain].index(resnum)] == resname
else:
if resnum[-1] == ' ':
self.setdefault(chain, [])
self[chain] += [resname]
chainresnums[chain] += [resnum]
return chainresnums
def seqres_lines(self):
"""Generate SEQRES lines representing the contents"""
lines = []
for chain in list(self.keys()):
seq = self[chain]
serNum = 1
startidx = 0
while startidx < len(seq):
endidx = min(startidx+13, len(seq))
lines += ["SEQRES %2i %s %4i %s\n" % (serNum, chain, len(seq), " ".join(seq[startidx:endidx]))]
serNum += 1
startidx += 13
return lines
def replace_seqres(self, pdb, update_atoms = True):
"""Replace SEQRES lines with a new sequence, optionally removing
mutated sidechains"""
newpdb = PDB()
inserted_seqres = False
entries_before_seqres = set(["HEADER", "OBSLTE", "TITLE", "CAVEAT", "COMPND", "SOURCE",
"KEYWDS", "EXPDTA", "AUTHOR", "REVDAT", "SPRSDE", "JRNL",
"REMARK", "DBREF", "SEQADV"])
mutated_resids = {}
if update_atoms:
old_seqs = ChainSequences()
chainresnums = old_seqs.parse_atoms(pdb)
assert list(self.keys()) == list(old_seqs.keys())
for chain in list(self.keys()):
assert len(self[chain]) == len(old_seqs[chain])
for i in range(len(self[chain])):
if self[chain][i] != old_seqs[chain][i]:
resid = chain + chainresnums[chain][i]
mutated_resids[resid] = self[chain][i]
for line in pdb.lines:
entry = line[0:6]
if (not inserted_seqres) and entry not in entries_before_seqres:
inserted_seqres = True
newpdb.lines += self.seqres_lines()
if update_atoms and entry == "ATOM ":
resid = line[21:27]
atom = line[12:16].strip()
if resid not in mutated_resids:
newpdb.lines += [line]
else:
newpdb.lines += [line[:17] + mutated_resids[resid] + line[20:]]
elif entry != "SEQRES":
newpdb.lines += [line]
if update_atoms:
newpdb.remove_nonbackbone_atoms(list(mutated_resids.keys()))
return newpdb
|
class ChainSequences(UserDict.DictMixin):
'''A class for holding PDB chain sequences'''
def __init__(self):
pass
def __getitem__(self, key):
pass
def __setitem__(self, key, value):
pass
def __delitem__(self, key):
pass
def keys(self):
pass
def parse_seqres(self, pdb):
'''Parse the SEQRES entries into the object'''
pass
def parse_atoms(self, pdb):
'''Parse the ATOM entries into the object'''
pass
def seqres_lines(self):
'''Generate SEQRES lines representing the contents'''
pass
def replace_seqres(self, pdb, update_atoms = True):
'''Replace SEQRES lines with a new sequence, optionally removing
mutated sidechains'''
pass
| 10 | 5 | 14 | 2 | 11 | 1 | 3 | 0.07 | 1 | 5 | 0 | 0 | 9 | 2 | 9 | 9 | 134 | 31 | 96 | 40 | 86 | 7 | 88 | 40 | 78 | 11 | 1 | 4 | 29 |
143,552 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/sifts.py
|
klab.bio.sifts.SIFTSResidue
|
class SIFTSResidue(object):
# Same as AtomSite but no x, y, z data
fields = [
# Field name SIFTS attribute name Values Expected type that we store
# SEQRES / FASTA fields
'PDBeChainID', # residue PDB chain ID Character
'PDBeResidueID', # residue PDBe sequence index Integer
'PDBeResidue3AA', # residue PDBe residue type String (length=3)
# ATOM record fields
'PDBChainID', # dbChainId PDB chain ID Character
'PDBResidueID', # dbResNum PDB residue ID String (alphanumeric)
'PDBResidue3AA', # dbResName PDB residue type String (length=3)
'WasNotObserved', # residueDetail.Annotation Not_Observed Boolean
# UniProt record fields
'UniProtAC', # dbAccessionId AC e.g. "P00734" String (length=6)
'UniProtResidueIndex', # dbResNum UniProt/UniParc sequence index Integer
'UniProtResidue1AA', # dbResName UniProt/UniParc residue type Character
]
def __init__(self, PDBeChainID, PDBeResidueID, PDBeResidue3AA):
self.clear()
self._add_pdbe_residue(PDBeChainID, PDBeResidueID, PDBeResidue3AA)
def clear(self):
d = self.__dict__
for f in self.__class__.fields:
d[f] = None
d['WasNotObserved'] = False
def add_pdb_residue(self, PDBChainID, PDBResidueID, PDBResidue3AA):
assert(not(self.PDBChainID))
assert(len(PDBChainID) == 1)
assert(not(self.PDBResidueID))
assert(PDBResidueID.isalnum() or int(PDBResidueID) != None)
assert(not(self.PDBResidue3AA))
assert(len(PDBResidue3AA) == 3)
self.PDBChainID, self.PDBResidueID, self.PDBResidue3AA = PDBChainID, PDBResidueID, PDBResidue3AA
def _add_pdbe_residue(self, PDBeChainID, PDBeResidueID, PDBeResidue3AA):
assert(not(self.PDBeChainID))
assert(len(PDBeChainID) == 1)
assert(not(self.PDBeResidueID))
assert(PDBeResidueID.isdigit())
assert(not(self.PDBeResidue3AA))
assert(len(PDBeResidue3AA) == 3)
self.PDBeChainID, self.PDBeResidueID, self.PDBeResidue3AA = PDBeChainID, int(PDBeResidueID), PDBeResidue3AA
def add_uniprot_residue(self, UniProtAC, UniProtResidueIndex, UniProtResidue1AA):
assert(not(self.UniProtAC))
assert(len(UniProtAC) == 6)
assert(not(self.UniProtResidueIndex))
assert(UniProtResidueIndex.isdigit())
assert(not(self.UniProtResidue1AA))
assert(len(UniProtResidue1AA) == 1)
self.UniProtAC, self.UniProtResidueIndex, self.UniProtResidue1AA = UniProtAC, int(UniProtResidueIndex), UniProtResidue1AA
def has_pdb_to_uniprot_mapping(self):
return self.PDBChainID and self.UniProtAC
def get_pdb_residue_id(self):
d = self.__dict__
if d['PDBResidueID'].isdigit():
residue_identifier = '%s%s ' % (d['PDBChainID'], str(d['PDBResidueID']).rjust(4))
else:
residue_identifier = '%s%s' % (d['PDBChainID'], str(d['PDBResidueID']).rjust(5))
assert(len(residue_identifier) == 6)
return residue_identifier
def __repr__(self):
# For debugging
return '\n'.join([('%s : %s' % (f.ljust(23), self.__dict__[f])) for f in self.__class__.fields if self.__dict__[f] != None])
|
class SIFTSResidue(object):
def __init__(self, PDBeChainID, PDBeResidueID, PDBeResidue3AA):
pass
def clear(self):
pass
def add_pdb_residue(self, PDBChainID, PDBResidueID, PDBResidue3AA):
pass
def _add_pdbe_residue(self, PDBeChainID, PDBeResidueID, PDBeResidue3AA):
pass
def add_uniprot_residue(self, UniProtAC, UniProtResidueIndex, UniProtResidue1AA):
pass
def has_pdb_to_uniprot_mapping(self):
pass
def get_pdb_residue_id(self):
pass
def __repr__(self):
pass
| 9 | 0 | 7 | 2 | 6 | 0 | 1 | 0.28 | 1 | 2 | 0 | 0 | 8 | 9 | 8 | 8 | 84 | 21 | 57 | 17 | 48 | 16 | 45 | 17 | 36 | 2 | 1 | 1 | 10 |
143,553 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/sifts.py
|
klab.bio.sifts.NoSIFTSPDBUniParcMapping
|
class NoSIFTSPDBUniParcMapping(Exception): pass
|
class NoSIFTSPDBUniParcMapping(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,554 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/pymolmod/psebuilder.py
|
klab.bio.pymolmod.psebuilder.BatchBuilder
|
class BatchBuilder(object):
def __init__(self, pymol_executable = 'pymol'):
self.visualization_shell = 6
self.visualization_pymol = pymol_executable # the command used to run pymol - change as necessary for Mac OS X
self.PSE_files = []
self.PSE_scripts = []
def run(self, builder_class, list_of_pdb_containers, settings = {}):
PSE_files = []
PSE_scripts = []
for pdb_containers in list_of_pdb_containers:
b = builder_class(pdb_containers, settings)
b.visualization_shell = self.visualization_shell
b.visualization_pymol = self.visualization_pymol
b.run()
PSE_files.append(b.PSE)
PSE_scripts.append(b.script)
self.PSE_files = PSE_files
self.PSE_scripts = PSE_scripts
return PSE_files
|
class BatchBuilder(object):
def __init__(self, pymol_executable = 'pymol'):
pass
def run(self, builder_class, list_of_pdb_containers, settings = {}):
pass
| 3 | 0 | 10 | 1 | 9 | 1 | 2 | 0.05 | 1 | 0 | 0 | 0 | 2 | 4 | 2 | 2 | 23 | 4 | 19 | 11 | 16 | 1 | 19 | 11 | 16 | 2 | 1 | 1 | 3 |
143,555 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/pymolmod/multi_structure_builder.py
|
klab.bio.pymolmod.multi_structure_builder.MultiStructureBuilder
|
class MultiStructureBuilder(PyMOLSessionBuilder):
def __init__(self, structures, settings = {}, rootdir = '/tmp'):
super(MultiStructureBuilder, self).__init__(structures, settings, rootdir)
self.structures = self.pdb_containers
def _create_input_files(self):
for s in self.structures:
write_file(self._filepath('{0}.pdb'.format(s.structure_name)), str(s.pdb_object))
def _add_preamble(self):
self.script.append("cd %(outdir)s" % self.__dict__)
def _add_load_section(self):
self.script.append("### Load the structures")
for s in self.structures:
self.script.append('load {0}.pdb, {0}'.format(s.structure_name))
def _add_view_settings_section(self):
self.script.append('''
# Set general view options and hide waters
viewport 1200,800
hide eve
remove resn hoh
bg_color %(global.background-color)s
''' % self.color_scheme)
def _add_generic_chain_settings_section(self):
self.script.append('''
# Set generic chain and HETATM view options
show cartoon
# color by chain
util.cbc
# Hide selenomethionines and selenocysteines
hide sticks, resn CSE+SEC+MSE
# Color atoms according to type
util.cnc
set cartoon_side_chain_helper
set cartoon_rect_length, 0.9
set cartoon_oval_length, 0.9
set stick_radius, 0.2
''')
def _add_specific_chain_settings_section(self):
# Thursday resume here - todo use s.backbone_display to get 'cartoon' etc.
for s in self.structures:
self.script.append('''
# {0} display
hide lines, {0}
hide ribbon, {0}
show {1}, {0}
color {2}, {0}
'''.format(s.structure_name, s.backbone_display or self.display_scheme['GenericProtein'].backbone_display, s.backbone_color or self.display_scheme['GenericProtein'].backbone_color))
for c, clr in s.chain_colors.items():
self.script.append('set_color {0}_{1} = {2}'.format(s.structure_name, c, clr))
self.script.append('color {0}_{1}, {0} and chain {1}'.format(s.structure_name, c))
if not s.visible:
self.script.append('disable {0}\n'.format(s.structure_name))
def _add_superimposition_section(self):
for i in range(1, len(self.structures)):
self.script.append('''
# Superimpose the structures on backbone atoms
align {0} and name n+ca+c+o, {1} and name n+ca+c+o
#super {0}, {1}'''.format(self.structures[i].structure_name, self.structures[0].structure_name))
def _add_orient_view_section(self):
pass
def _add_residue_highlighting_section(self):
for s in self.structures:
residue_selection = '{0} and {1}'.format(s.structure_name, create_pymol_selection_from_PDB_residue_ids(s.residues_of_interest))
self.script.append('''
### {0} objects ###
# {0} residues of interest
has_rois = cmd.count_atoms('{1}') > 0
if has_rois: cmd.select('{0}_roi_s', '{1}');
if has_rois: cmd.create('{0}_roi', '{1}');
if has_rois: cmd.show('{2}', '{0}_roi')
if has_rois: cmd.color('{3}', '{0}_roi')
'''.format(s.structure_name, residue_selection, s.sidechain_display or self.display_scheme['GenericProtein'].sidechain_display, s.sidechain_color or self.display_scheme['GenericProtein'].sidechain_color))
def _add_hetatm_highlighting_section(self):
for s in self.structures:
self.script.append('''
### {0} HETATMs ###
has_hetatms = cmd.count_atoms('{0} and het and !(resn CSE+SEC+MSE)') > 0
if has_hetatms: cmd.create('{0}_HETATMs', '{0} and het and !(resn CSE+SEC+MSE)');
if has_hetatms: cmd.show('{1}', '{0}_HETATMs')
if has_hetatms: cmd.disable('{0}_HETATMs')
if has_hetatms: cmd.create('spheres_{0}_HETATMs', '{0} and het and !(resn CSE+SEC+MSE)');
if has_hetatms: cmd.show('spheres', 'spheres_{0}_HETATMs')
if has_hetatms: cmd.disable('spheres_{0}_HETATMs')
if has_hetatms: cmd.color('{2}', '{0}_roi')
'''.format(s.structure_name, s.hetatm_display or self.display_scheme['GenericProtein'].hetatm_display, s.hetatm_color or self.display_scheme['GenericProtein'].hetatm_color))
def _add_residue_label_section(self):
# Note: This only creates one label per residue position.
# This will work fine for mutant/wildtype ensembles but not in general.
# Use PyMOLStructure.label_all_residues_of_interest to fix this when necessary.
residue_labels = {}
self.script.append('\nset label_color, black\n')
for s in self.structures:
for roi in s.residues_of_interest:
residue_labels[roi] = s
for roi, s in residue_labels.items():
self.script.append('\nlabel n. O and {0} and chain {1} and i. {2}, "{1}{2}"\n'.format(s.structure_name, roi[0], roi[1:].strip()))
def _add_raytracing_section(self):
self.script.append('''
# Atom coloring
select none
util.cnc
# Set lighting
set two_sided_lighting, on
''')
def _add_postamble(self):
self.script.append('''
# Show only polar hydrogens
hide (hydro)
# Set zoom
zoom
# Re-order the objects in the right pane
#order *,yes
''')
for s in self.structures:
self.script.append('''order {0}_roi, location=bottom'''.format(s.structure_name))
for s in self.structures:
self.script.append('''order spheres_{0}_HETATMs, location=bottom'''.format(s.structure_name))
self.script.append('order input, location=top')
self.script.append('''
save session.pse
quit
''')
def _create_script(self):
self.script = []
self._add_preamble()
self._add_load_section()
self._add_view_settings_section()
self._add_generic_chain_settings_section()
self._add_specific_chain_settings_section()
self._add_superimposition_section()
self._add_orient_view_section()
self._add_residue_highlighting_section()
self._add_hetatm_highlighting_section()
self._add_residue_label_section()
self._add_raytracing_section()
self._add_postamble()
self.script = '\n'.join(self.script)
|
class MultiStructureBuilder(PyMOLSessionBuilder):
def __init__(self, structures, settings = {}, rootdir = '/tmp'):
pass
def _create_input_files(self):
pass
def _add_preamble(self):
pass
def _add_load_section(self):
pass
def _add_view_settings_section(self):
pass
def _add_generic_chain_settings_section(self):
pass
def _add_specific_chain_settings_section(self):
pass
def _add_superimposition_section(self):
pass
def _add_orient_view_section(self):
pass
def _add_residue_highlighting_section(self):
pass
def _add_hetatm_highlighting_section(self):
pass
def _add_residue_label_section(self):
pass
def _add_raytracing_section(self):
pass
def _add_postamble(self):
pass
def _create_script(self):
pass
| 16 | 0 | 11 | 2 | 8 | 1 | 2 | 0.19 | 1 | 3 | 0 | 0 | 15 | 2 | 15 | 22 | 189 | 53 | 116 | 30 | 100 | 22 | 70 | 30 | 54 | 4 | 2 | 2 | 28 |
143,556 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/pymolmod/loop_removal.py
|
klab.bio.pymolmod.loop_removal.LoopRemovalBuilder
|
class LoopRemovalBuilder(PyMOLSessionBuilder):
def __init__(self, pdb_containers, settings = {}, rootdir = '/tmp'):
super(LoopRemovalBuilder, self).__init__(pdb_containers, settings, rootdir)
main_label = settings.get('Main', 'Main')
loop_label = settings.get('Loop', 'Loop')
self.main_label = main_label
self.loop_label = loop_label
self.MainStructure = pdb_containers.get(main_label)
self.Loop = pdb_containers[loop_label]
self.color_scheme = loops_color_scheme
def _create_input_files(self):
write_file(self._filepath('main.pdb'), self.MainStructure.pdb_contents)
write_file(self._filepath('loop.pdb'), self.Loop.pdb_contents)
def _add_preamble(self):
self.script.append("cd %(outdir)s" % self.__dict__)
def _add_load_section(self):
self.script.append("### Load the structures")
self.script.append("load main.pdb, {0}".format(self.main_label))
self.script.append("load loop.pdb, {0}".format(self.loop_label))
def _add_view_settings_section(self):
self.script.append('''
# Set general view options and hide waters
viewport 1200,800
hide eve
remove resn hoh
bg_color %(global.background-color)s
''' % self.color_scheme)
def _add_generic_chain_settings_section(self):
self.script.append('''
# Set generic chain and HETATM view options
show cartoon
util.cbc
# Hide selenomethionines and selenocysteines
hide sticks, resn CSE+SEC+MSE
util.cnc
set cartoon_side_chain_helper
set cartoon_rect_length, 0.9
set cartoon_oval_length, 0.9
set stick_radius, 0.2
''')
def _add_specific_chain_settings_section(self):
self.script.append('''
# Main structure display
color {0}, {1}
# Loop display
hide lines, {3}
hide ribbon, {3}
show car, {3}
show sticks, {3}
color {2}, {3}
'''.format(self.color_scheme['Main.bb'], self.main_label, self.color_scheme['LoopShell.bb'], self.loop_label))
def _add_orient_view_section(self):
pass
def _add_main_structure_view_section(self):
self.script.append('''
# Main structure view options
hide lines, {0}
hide ribbon, {0}
show car, {0}
show sticks, {0}
util.cbc Scaffold'''.format(self.main_label))
def _add_residue_highlighting_section(self):
loop_label = self.loop_label
loop_selection = '{0} and ({1})'.format(self.loop_label, create_pymol_selection_from_PDB_residue_ids(self.Loop.residues_of_interest))
loop_color = self.color_scheme['Loop.bb']
self.script.append('''
### Loop objects ###
show car, %(loop_selection)s
show sticks, %(loop_selection)s
color %(loop_color)s, %(loop_selection)s
''' % vars())
def _add_raytracing_section(self):
self.script.append('''
# Atom coloring
select none
util.cnc
# Set lighting
set two_sided_lighting, on
''')
def _add_postamble(self):
self.script.append('''
# Show only polar hydrogens
hide (hydro)
# Set zoom
zoom
# Re-order the objects in the right pane
order *,yes
order {0}_residues_s, location=bottom
save session.pse
quit
'''.format(self.loop_label))
def _create_script(self):
self.script = []
self._add_preamble()
self._add_load_section()
self._add_view_settings_section()
self._add_generic_chain_settings_section()
self._add_specific_chain_settings_section()
self._add_orient_view_section()
self._add_main_structure_view_section()
self._add_residue_highlighting_section()
self._add_raytracing_section()
self._add_postamble()
self.script = '\n'.join(self.script)
|
class LoopRemovalBuilder(PyMOLSessionBuilder):
def __init__(self, pdb_containers, settings = {}, rootdir = '/tmp'):
pass
def _create_input_files(self):
pass
def _add_preamble(self):
pass
def _add_load_section(self):
pass
def _add_view_settings_section(self):
pass
def _add_generic_chain_settings_section(self):
pass
def _add_specific_chain_settings_section(self):
pass
def _add_orient_view_section(self):
pass
def _add_main_structure_view_section(self):
pass
def _add_residue_highlighting_section(self):
pass
def _add_raytracing_section(self):
pass
def _add_postamble(self):
pass
def _create_script(self):
pass
| 14 | 0 | 9 | 1 | 7 | 1 | 1 | 0.14 | 1 | 1 | 0 | 0 | 13 | 6 | 13 | 20 | 129 | 25 | 92 | 25 | 78 | 13 | 51 | 25 | 37 | 1 | 2 | 0 | 13 |
143,557 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/pymolmod/colors.py
|
klab.bio.pymolmod.colors.PyMOLStructureBase
|
class PyMOLStructureBase(object):
'''A simple structure-less class to store parameters used to display a structure. Open to heavy modification as we add more
customization.'''
def __init__(self, backbone_color = 'white', backbone_display = 'cartoon',
sidechain_color = 'grey80', sidechain_display = 'sticks',
hetatm_color = 'grey60', hetatm_display = 'sticks',
visible = True):
self.backbone_color = backbone_color or 'white'
self.backbone_display = backbone_display or 'cartoon'
self.sidechain_color = sidechain_color or 'grey80'
self.sidechain_display = sidechain_display or 'sticks'
self.hetatm_color = hetatm_color or 'grey60'
self.hetatm_display = hetatm_display or 'sticks'
self.visible = visible
|
class PyMOLStructureBase(object):
'''A simple structure-less class to store parameters used to display a structure. Open to heavy modification as we add more
customization.'''
def __init__(self, backbone_color = 'white', backbone_display = 'cartoon',
sidechain_color = 'grey80', sidechain_display = 'sticks',
hetatm_color = 'grey60', hetatm_display = 'sticks',
visible = True):
pass
| 2 | 1 | 11 | 0 | 11 | 0 | 1 | 0.17 | 1 | 0 | 0 | 1 | 1 | 7 | 1 | 1 | 17 | 3 | 12 | 12 | 7 | 2 | 9 | 9 | 7 | 1 | 1 | 0 | 1 |
143,558 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/pymolmod/psebuilder.py
|
klab.bio.pymolmod.psebuilder.PDBContainer
|
class PDBContainer(object):
def __init__(self, pymol_name, pdb_contents, residues_of_interest = []):
self.pymol_name = pymol_name
self.pdb_contents = pdb_contents
self.residues_of_interest = residues_of_interest
@staticmethod
def from_file(pymol_name, pdb_filename, residues_of_interest = []):
return PDBContainer(pymol_name, read_file(pdb_filename), residues_of_interest)
@staticmethod
def from_triples(tpls):
pdb_containers = {}
for t in tpls:
pdb_containers[t[0]] = PDBContainer(t[0], t[1], t[2])
return pdb_containers
@staticmethod
def from_filename_triple(tpls):
pdb_containers = {}
for t in tpls:
pdb_containers[t[0]] = PDBContainer.from_file(t[0], t[1], t[2])
return pdb_containers
@staticmethod
def from_content_triple(tpls):
pdb_containers = {}
for t in tpls:
pdb_containers[t[0]] = PDBContainer(t[0], t[1], t[2])
return pdb_containers
|
class PDBContainer(object):
def __init__(self, pymol_name, pdb_contents, residues_of_interest = []):
pass
@staticmethod
def from_file(pymol_name, pdb_filename, residues_of_interest = []):
pass
@staticmethod
def from_triples(tpls):
pass
@staticmethod
def from_filename_triple(tpls):
pass
@staticmethod
def from_content_triple(tpls):
pass
| 10 | 0 | 4 | 0 | 4 | 0 | 2 | 0 | 1 | 0 | 0 | 0 | 1 | 3 | 5 | 5 | 31 | 5 | 26 | 19 | 16 | 0 | 22 | 15 | 16 | 2 | 1 | 1 | 8 |
143,559 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/pymolmod/psebuilder.py
|
klab.bio.pymolmod.psebuilder.PyMOLSessionBuilder
|
class PyMOLSessionBuilder(object):
def __init__(self, pdb_containers, settings = {}, rootdir = '/tmp'):
self.visualization_shell = 6
self.visualization_pymol = 'pymol'
self.pdb_containers = pdb_containers
self.match_posfiles_interface_distance = 15
self.rootdir = rootdir
self.PSE = None
self.outdir = None
self.script = None
self.stdout = None
self.stderr = None
self.return_code = None
self.settings = {'colors' : {'global' : {'background-color' : 'black'}}}
self.settings.update(settings)
self.color_scheme = ColorScheme(settings.get('colors', {}))
del self.settings['colors'] # to avoid confusion, remove the duplicated dict as the ColorScheme object may get updated
def __del__(self):
if self.outdir:
if os.path.exists(self.outdir):
shutil.rmtree(self.outdir)
def _filepath(self, filename):
return os.path.join(self.outdir, filename)
def _create_temp_directory(self):
self.outdir = create_temp_755_path(self.rootdir)
def _create_input_files(self):
raise Exception('Subclasses must implement this function.')
def _create_script(self):
raise Exception('Subclasses must implement this function.')
def run(self):
# Create input files
self._create_temp_directory()
self._create_input_files()
self._create_script()
write_file(self._filepath('script.pml'), self.script)
# Run PyMOL
#colortext.message(self.visualization_pymol +' -c ' + self._filepath('script.pml'))
po = tprocess.Popen(self.outdir, [self.visualization_pymol, '-c', self._filepath('script.pml')])
#colortext.message(po.stdout)
#colortext.warning(po.errorcode)
#colortext.error(po.stderr)
self.stdout = po.stdout
self.stderr = po.stderr
self.return_code = po.errorcode
if self.return_code != 0:
raise Exception('Error: %s' % str(self.stderr))
pse_path = self._filepath('session.pse')
if os.path.exists(pse_path):
self.PSE = read_file(pse_path, binary = True)
|
class PyMOLSessionBuilder(object):
def __init__(self, pdb_containers, settings = {}, rootdir = '/tmp'):
pass
def __del__(self):
pass
def _filepath(self, filename):
pass
def _create_temp_directory(self):
pass
def _create_input_files(self):
pass
def _create_script(self):
pass
def run(self):
pass
| 8 | 0 | 7 | 1 | 6 | 1 | 2 | 0.16 | 1 | 3 | 1 | 5 | 7 | 13 | 7 | 7 | 60 | 11 | 43 | 23 | 35 | 7 | 43 | 23 | 35 | 3 | 1 | 2 | 11 |
143,560 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/pymolmod/scaffold_model_design.py
|
klab.bio.pymolmod.scaffold_model_design.ScaffoldModelDesignBuilder
|
class ScaffoldModelDesignBuilder(PyMOLSessionBuilder):
def __init__(self, pdb_containers, settings = {}, rootdir = '/tmp'):
super(ScaffoldModelDesignBuilder, self).__init__(pdb_containers, settings, rootdir)
self.Scaffold = pdb_containers.get('Scaffold')
self.Model = pdb_containers['Model']
self.ExpStructure = pdb_containers.get('ExpStructure')
def _create_input_files(self):
#colortext.message('self.outdir: ' + self.outdir)
if self.Scaffold:
write_file(self._filepath('scaffold.pdb'), self.Scaffold.pdb_contents)
write_file(self._filepath('model.pdb'), self.Model.pdb_contents)
if self.ExpStructure:
write_file(self._filepath('design.pdb'), self.ExpStructure.pdb_contents)
def _add_preamble(self):
self.script.append("cd %(outdir)s" % self.__dict__)
def _add_load_section(self):
self.script.append("### Load the structures")
if self.ExpStructure:
self.script.append("load design.pdb, ExpStructure")
self.script.append("load model.pdb, RosettaModel")
self.script.append("load scaffold.pdb, Scaffold")
def _add_view_settings_section(self):
self.script.append('''
# Set general view options and hide waters
viewport 1200,800
hide eve
remove resn hoh
bg_color %(global.background-color)s
''' % self.color_scheme)
def _add_generic_chain_settings_section(self):
self.script.append('''
# Set generic chain and HETATM view options
show cartoon
util.cbc
# Hide selenomethionines and selenocysteines
hide sticks, resn CSE+SEC+MSE
util.cnc
set cartoon_side_chain_helper
set cartoon_rect_length, 0.9
set cartoon_oval_length, 0.9
set stick_radius, 0.2
''')
def _add_specific_chain_settings_section(self):
self.script.append('''
# Scaffold display
color %(Scaffold.bb)s, Scaffold
# RosettaModel display
show car, RosettaModel
color %(RosettaModel.bb)s, RosettaModel
''' % self.color_scheme)
if self.ExpStructure:
self.script.append('''
# ExpStructure display
show car, ExpStructure
color %(ExpStructure.bb)s, ExpStructure
''' % self.color_scheme)
def _add_superimposition_section(self):
self.script.append('''
# Superimpose the structures
super Scaffold, RosettaModel''')
if self.ExpStructure:
self.script.append("super ExpStructure, RosettaModel")
def _add_orient_view_section(self):
pass
def _add_scaffold_view_section(self):
self.script.append('''
# Scaffold view options
hide lines, Scaffold
hide ribbon, Scaffold
show car, Scaffold
util.cbc Scaffold''')
if self.ExpStructure:
# Hide the scaffold if there is an experimental structure
self.script.append('''
disable Scaffold''')
def _add_residue_highlighting_section(self):
if self.Scaffold:
scaffold_selection = 'Scaffold and (%s)' % (create_pymol_selection_from_PDB_residue_ids(self.Scaffold.residues_of_interest))
self.script.append('''
### Scaffold objects ###
# Scaffold mutations
has_mutations = cmd.count_atoms('%(scaffold_selection)s') > 0
if has_mutations: cmd.select('Scaffold_mutations_s', '%(scaffold_selection)s');
if has_mutations: cmd.create('Scaffold_mutations', '%(scaffold_selection)s');
if has_mutations: cmd.show('sticks', 'Scaffold_mutations')
''' % vars())
self.script.append('''
if has_mutations: cmd.color('%(Scaffold.mutations)s', 'Scaffold_mutations')
# Scaffold HETATMs - create
has_hetatms = cmd.count_atoms('Scaffold and het and !(resn CSE+SEC+MSE)') > 0
if has_hetatms: cmd.create('Scaffold_HETATMs', 'Scaffold and het and !(resn CSE+SEC+MSE)');
if has_hetatms: cmd.show('sticks', 'Scaffold_HETATMs')
if has_hetatms: cmd.disable('Scaffold_HETATMs')
if has_hetatms: cmd.create('spheres_Scaffold_HETATMs', 'Scaffold and het and !(resn CSE+SEC+MSE)');
if has_hetatms: cmd.show('spheres', 'spheres_Scaffold_HETATMs')
if has_hetatms: cmd.disable('spheres_Scaffold_HETATMs')
''' % self.color_scheme)
#self.script.append('set label_color, black')
#self.script.append('label n. CA and Scaffold and chain A and i. 122, "A122" ')
model_selection = 'RosettaModel and (%s)' % (create_pymol_selection_from_PDB_residue_ids(self.Model.residues_of_interest))
self.script.append('''
### Rosetta model objects ###
# Rosetta model mutations
has_mutations = cmd.count_atoms('%(model_selection)s') > 0
if has_mutations: cmd.select('RosettaModel_mutations_s', '%(model_selection)s');
if has_mutations: cmd.create('RosettaModel_mutations', '%(model_selection)s');
if has_mutations: cmd.show('sticks', 'RosettaModel_mutations')
''' % vars())
self.script.append('''
if has_mutations: cmd.color('%(RosettaModel.mutations)s', 'RosettaModel_mutations')
# Rosetta model HETATMs - create and display
has_hetatms = cmd.count_atoms('RosettaModel and het and !(resn CSE+SEC+MSE)') > 0
if has_hetatms: cmd.create('RosettaModel_HETATMs', 'RosettaModel and het and !(resn CSE+SEC+MSE)');
if has_hetatms: cmd.show('sticks', 'RosettaModel_HETATMs')
if has_hetatms: cmd.create('spheres_RosettaModel_HETATMs', 'RosettaModel and het and !(resn CSE+SEC+MSE)');
if has_hetatms: cmd.show('spheres', 'spheres_RosettaModel_HETATMs')
if has_hetatms: cmd.disable('spheres_RosettaModel_HETATMs')
''' % self.color_scheme)
if self.ExpStructure:
exp_structure_selection = 'ExpStructure and (%s)' % (create_pymol_selection_from_PDB_residue_ids(self.ExpStructure.residues_of_interest))
self.script.append('''
### ExpStructure objects ###
# ExpStructure mutations
has_mutations = cmd.count_atoms('%(exp_structure_selection)s') > 0
if has_mutations: cmd.select('ExpStructure_mutations_s', '%(exp_structure_selection)s');
if has_mutations: cmd.create('ExpStructure_mutations', '%(exp_structure_selection)s');
if has_mutations: cmd.show('sticks', 'ExpStructure_mutations')
''' % vars())
self.script.append('''if has_mutations: cmd.color('%(ExpStructure.mutations)s', 'ExpStructure_mutations')
# ExpStructure HETATMs - create and display
has_hetatms = cmd.count_atoms('ExpStructure and het and !(resn CSE+SEC+MSE)') > 0
if has_hetatms: cmd.create('ExpStructure_HETATMs', 'ExpStructure and het and !(resn CSE+SEC+MSE)');
if has_hetatms: cmd.show('sticks', 'ExpStructure_HETATMs')
if has_hetatms: cmd.create('spheres_ExpStructure_HETATMs', 'ExpStructure and het and !(resn CSE+SEC+MSE)');
if has_hetatms: cmd.show('spheres', 'spheres_ExpStructure_HETATMs')
if has_hetatms: cmd.disable('spheres_ExpStructure_HETATMs')
#ExpStructure and het and !(resn CSE+SEC+MSE)')
''' % self.color_scheme)
def _add_raytracing_section(self):
self.script.append('''
# Atom coloring
select none
util.cnc
# Set lighting
set two_sided_lighting, on
''')
def _add_postamble(self):
self.script.append('''
# Show only polar hydrogens
hide (hydro)
# Set zoom
zoom
# Re-order the objects in the right pane
order *,yes
order Scaffold_mutations_s, location=bottom
order RosettaModel_mutations_s, location=bottom
order ExpStructure_mutations_s, location=bottom
order spheres_Scaffold_HETATMs, location=bottom
order spheres_RosettaModel_HETATMs, location=bottom
order spheres_ExpStructure_HETATMs, location=bottom
save session.pse
quit
''')
def _create_script(self):
self.script = []
self._add_preamble()
self._add_load_section()
self._add_view_settings_section()
self._add_generic_chain_settings_section()
self._add_specific_chain_settings_section()
self._add_superimposition_section()
self._add_orient_view_section()
self._add_scaffold_view_section()
self._add_residue_highlighting_section()
self._add_raytracing_section()
self._add_postamble()
self.script = '\n'.join(self.script)
|
class ScaffoldModelDesignBuilder(PyMOLSessionBuilder):
def __init__(self, pdb_containers, settings = {}, rootdir = '/tmp'):
pass
def _create_input_files(self):
pass
def _add_preamble(self):
pass
def _add_load_section(self):
pass
def _add_view_settings_section(self):
pass
def _add_generic_chain_settings_section(self):
pass
def _add_specific_chain_settings_section(self):
pass
def _add_superimposition_section(self):
pass
def _add_orient_view_section(self):
pass
def _add_scaffold_view_section(self):
pass
def _add_residue_highlighting_section(self):
pass
def _add_raytracing_section(self):
pass
def _add_postamble(self):
pass
def _create_script(self):
pass
| 15 | 0 | 14 | 2 | 11 | 2 | 2 | 0.19 | 1 | 1 | 0 | 0 | 14 | 4 | 14 | 21 | 216 | 39 | 150 | 22 | 135 | 28 | 68 | 22 | 53 | 3 | 2 | 1 | 22 |
143,561 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/pymolmod/scaffold_model_ligand.py
|
klab.bio.pymolmod.scaffold_model_ligand.ScaffoldModelLigandBuilder
|
class ScaffoldModelLigandBuilder(PyMOLSessionBuilder):
def __init__(self, pdb_containers, rootdir = '/tmp'):
super(ScaffoldModelLigandBuilder, self).__init__(pdb_containers, rootdir)
self.Scaffold = pdb_containers['Scaffold']
self.Model = pdb_containers['Model']
self.Crystal = pdb_containers.get('Crystal')
def _create_input_files(self):
colortext.message('self.outdir: ' + self.outdir)
write_file(self._filepath('scaffold.pdb'), self.Scaffold.pdb_contents)
write_file(self._filepath('model.pdb'), self.Model.pdb_contents)
if self.Crystal:
write_file(self._filepath('crystal.pdb'), self.Crystal.pdb_contents)
def _create_script(self):
script = '''
# load structures
cd %(outdir)s''' % self.__dict__
if self.Crystal:
script += '''
load crystal.pdb, Crystal'''
script += '''
load model.pdb, Model
load scaffold.pdb, Scaffold
# set general view options
viewport 1200,800
hide eve
# set seq_view
bg_color white
# set cartoon_fancy_helices
set cartoon_side_chain_helper
set cartoon_rect_length, 0.9
set cartoon_oval_length, 0.9
set stick_radius, 0.2
# set cartoon_flat_sheets, off
show car, Model'''
if self.Crystal:
script += '''
color forest, Crystal
show car, Crystal'''
script += '''
color gray, Model
#cmd.color("'+color_H+'", selector.process("Model and chain '+heavy_chain.id+'"))
#cmd.color("'+color_L+'", selector.process("Model and chain '+light_chain.id+'"))
#superpose template and model based on the ligand
select model_ligand, Model and resn LG1
show sticks, model_ligand'''
if self.Crystal:
script += '''
select template_ligand, Crystal and resn LG1
color gray, template_ligand
show sticks, template_ligand
pair_fit template_ligand, model_ligand'''
if self.Crystal:
script += '''
disable Crystal'''
script += '''
# orient view
set_view (0.750491261, -0.332692802, -0.570965469, -0.572279274, 0.104703479, -0.813287377, 0.330366731, 0.937145591, -0.111799516, 0.000000000, 0.000000000, -129.595489502, 36.783428192, 36.119152069, 77.293815613, 112.102447510, 147.088562012, -20.000000000 )
# superimpose original scaffold onto the model
super Scaffold, Model
# preset.ligands(selection="Scaffold")
hide lines, Scaffold
hide ribbon, Scaffold
show car, Scaffold
util.cbc Scaffold
disable Scaffold
# highlight motif residues'''
# if self.Crystal:
# script += '''
#create template_motif_residues, Crystal and not resn LG1 and resi ''' + self.Crystal.residues_of_interest
if self.Scaffold.residues_of_interest and self.Model.residues_of_interest:
script += '''
create template_motif_residues, Scaffold and not resn LG1 and resi ''' + "+".join(self.Scaffold.residues_of_interest) + '''
select model_motif_residues, Model and not resn LG1 and resi ''' + "+".join(self.Model.residues_of_interest) + '''
set stick_radius, 0.1, template_motif_residues
show sticks, template_motif_residues and not symbol h and not name C+N+O
show sticks, model_motif_residues and not symbol h
color brightorange, template_motif_residues
color tv_yellow, model_motif_residues'''
# create ligand environment
if self.Crystal:
script += '''
create template_env, Crystal and byres template_ligand around %(visualization_shell)s''' % self.__dict__
script += '''
create model_env, Model and byres model_ligand around %(visualization_shell)s
create scaffold_env, Scaffold and byres model_ligand around %(visualization_shell)s''' % self.__dict__
if self.Crystal:
script += '''
set stick_radius, 0.1, template_env
show sticks, template_env and not symbol h and not name C+N+O'''
script += '''
show sticks, model_env and not symbol h
show sticks, scaffold_env and not symbol h'''
if self.Crystal:
script += '''
set cartoon_transparency, 1, template_env'''
script += '''
set cartoon_transparency, 1, model_env
set cartoon_transparency, 1, scaffold_env
# hide ligand environment'''
if self.Crystal:
script += '''
disable template_env'''
script += '''
disable model_env
disable scaffold_env
# create binding pocket'''
if self.Crystal:
script += '''
create template_env_surface, template_env
hide sticks, template_env_surface
show surface, template_env_surface
#color gray70, template_env_surface
set cartoon_transparency, 1, template_env_surface'''
script += '''
create model_env_surface, model_env
hide sticks, model_env_surface
show surface, model_env_surface
set transparency, 0.25
#color gray70, model_env_surface
set cartoon_transparency, 1, model_env_surface
# hide binding pocket'''
if self.Crystal:
script += '''
disable template_env_surface'''
script += '''
disable model_env_surface
# ray tracing and output
select none
select env, model_ligand around 6
set transparency, 0.5
util.cnc
show surface, Model and env
set two_sided_lighting, on
zoom model_ligand, 10
heavy_chain_residues=[]
light_chain_residues=[]
one_letter ={"VAL":"V", "ILE":"I", "LEU":"L", "GLU":"E", "GLN":"Q", "ASP":"D", "ASN":"N", "HIS":"H", "TRP":"W", "PHE":"F", "TYR":"Y", "ARG":"R", "LYS":"K", "SER":"S", "THR":"T", "MET":"M", "ALA":"A", "GLY":"G", "PRO":"P", "CYS":"C"}
#cmd.iterate(selector.process("Model and chain '+heavy_chain.id+' and name ca"),"heavy_chain_residues.append(resn)")
#cmd.iterate(selector.process("Model and chain '+light_chain.id+' and name ca"),"light_chain_residues.append(resn)")
#heavy_chain_seq=""
#light_chain_seq=""
#for residue in heavy_chain_residues: heavy_chain_seq+=one_letter[residue]
#for residue in light_chain_residues: light_chain_seq+=one_letter[residue]
#print
#print "Heavy chain ('+heavy_chain.id+','+color_H+')",heavy_chain_seq
#print
#print "Light chain ('+light_chain.id+','+color_L+')",light_chain_seq
print
save session.pse
quit
''' % self.__dict__
self.script = script
|
class ScaffoldModelLigandBuilder(PyMOLSessionBuilder):
def __init__(self, pdb_containers, rootdir = '/tmp'):
pass
def _create_input_files(self):
pass
def _create_script(self):
pass
| 4 | 0 | 58 | 8 | 40 | 11 | 5 | 0.27 | 1 | 1 | 0 | 0 | 3 | 4 | 3 | 10 | 179 | 27 | 120 | 9 | 116 | 32 | 46 | 9 | 42 | 12 | 2 | 1 | 15 |
143,562 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/pymolmod/single_structure.py
|
klab.bio.pymolmod.single_structure.SingleStructureBuilder
|
class SingleStructureBuilder(PyMOLSessionBuilder):
def __init__(self, pdb_containers, settings = {}, rootdir = '/tmp'):
super(SingleStructureBuilder, self).__init__(pdb_containers, settings, rootdir)
assert(len(pdb_containers) == 1)
self.structure = pdb_containers[list(pdb_containers.keys())[0]]
def _create_input_files(self):
write_file(self._filepath('%s.pdb' % self.structure.pymol_name), self.structure.pdb_contents)
def _add_preamble(self):
self.script.append("cd %(outdir)s" % self.__dict__)
def _add_load_section(self):
self.script.append("### Load the structures")
self.script.append("load %s.pdb, %s" % (self.structure.pymol_name, self.structure.pymol_name))
def _add_view_settings_section(self):
self.script.append('''
# Set general view options and hide waters
viewport 1200,800
hide eve
remove resn hoh
bg_color %(global.background-color)s
''' % self.color_scheme)
def _add_generic_chain_settings_section(self):
self.script.append('''
# Set generic chain and HETATM view options
show cartoon
util.cbc
# Hide selenomethionines and selenocysteines
hide sticks, resn CSE+SEC+MSE
util.cnc
set cartoon_side_chain_helper
set cartoon_rect_length, 0.9
set cartoon_oval_length, 0.9
set stick_radius, 0.2
''')
def _add_specific_chain_settings_section(self):
self.script.append('''
# Structure display
show car, %s''' % self.structure.pymol_name)
self.script.append('''
color %s, %s
''' % (self.color_scheme['RosettaModel.bb'], self.structure.pymol_name))
def _add_residue_highlighting_section(self):
if self.structure.residues_of_interest:
pymol_name = self.structure.pymol_name
structure_selection = '%s and (%s)' % (self.structure.pymol_name, create_pymol_selection_from_PDB_residue_ids(self.structure.residues_of_interest))
self.script.append('''
### Structure objects ###
# Structure residues
has_mutations = cmd.count_atoms('%(structure_selection)s') > 0
if has_mutations: cmd.select('%(pymol_name)s_mutations_s', '%(structure_selection)s');
if has_mutations: cmd.create('%(pymol_name)s_mutations', '%(structure_selection)s');
if has_mutations: cmd.show('sticks', '%(pymol_name)s_mutations')
''' % vars())
self.script.append('''
if has_mutations: cmd.color('%s', '%s_mutations')''' % (self.color_scheme['RosettaModel.mutations'], pymol_name))
self.script.append('''
# Rosetta model HETATMs - create and display
has_hetatms = cmd.count_atoms('%(pymol_name)s and het and !(resn CSE+SEC+MSE)') > 0
if has_hetatms: cmd.create('%(pymol_name)s_HETATMs', '%(pymol_name)s and het and !(resn CSE+SEC+MSE)');
if has_hetatms: cmd.show('sticks', '%(pymol_name)s_HETATMs')
if has_hetatms: cmd.create('spheres_%(pymol_name)s_HETATMs', '%(pymol_name)s and het and !(resn CSE+SEC+MSE)');
if has_hetatms: cmd.show('spheres', 'spheres_%(pymol_name)s_HETATMs')
if has_hetatms: cmd.disable('spheres_%(pymol_name)s_HETATMs')
''' % vars())
def _add_raytracing_section(self):
self.script.append('''
# Atom coloring
select none
util.cnc
# Set lighting
set two_sided_lighting, on
''')
def _add_postamble(self):
self.script.append('''
# Show only polar hydrogens
hide (hydro)
# Set zoom
zoom
save session.pse
quit
''')
def _create_script(self):
self.script = []
self._add_preamble()
self._add_load_section()
self._add_view_settings_section()
self._add_generic_chain_settings_section()
self._add_specific_chain_settings_section()
self._add_residue_highlighting_section()
self._add_raytracing_section()
self._add_postamble()
self.script = '\n'.join(self.script)
|
class SingleStructureBuilder(PyMOLSessionBuilder):
def __init__(self, pdb_containers, settings = {}, rootdir = '/tmp'):
pass
def _create_input_files(self):
pass
def _add_preamble(self):
pass
def _add_load_section(self):
pass
def _add_view_settings_section(self):
pass
def _add_generic_chain_settings_section(self):
pass
def _add_specific_chain_settings_section(self):
pass
def _add_residue_highlighting_section(self):
pass
def _add_raytracing_section(self):
pass
def _add_postamble(self):
pass
def _create_script(self):
pass
| 12 | 0 | 9 | 1 | 7 | 1 | 1 | 0.15 | 1 | 2 | 0 | 0 | 11 | 2 | 11 | 18 | 114 | 23 | 80 | 16 | 68 | 12 | 41 | 16 | 29 | 2 | 2 | 1 | 12 |
143,563 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/relatrix.py
|
klab.bio.relatrix.ResidueRelatrix
|
class ResidueRelatrix(object):
''' A class for relating residue IDs from different schemes.
Note: we assume throughout that there is one map from SEQRES to UniParc. This is not always true e.g. Polyubiquitin-C (UPI000000D74D) has 9 copies of the ubiquitin sequence.'''
schemes = ['rosetta', 'atom', 'seqres', 'fasta', 'uniparc']
def __init__(self, pdb_id, rosetta_scripts_path, rosetta_database_path = None, chains_to_keep = [], min_clustal_cut_off = 80, cache_dir = None, silent = False, acceptable_sequence_percentage_match = 80.0, acceptable_sifts_sequence_percentage_match = None, starting_clustal_cut_off = 100, bio_cache = None): # keep_HETATMS = False
''' acceptable_sequence_percentage_match is used when checking whether the SEQRES sequences have a mapping. Usually
90.00% works but some cases e.g. 1AR1, chain C, have a low matching score mainly due to extra residues. I set
this to 80.00% to cover most cases.'''
# todo: add an option to not use the Clustal sequence aligner and only use the SIFTS mapping. This could be useful for a web interface where we do not want to have to fix things manually.
if acceptable_sifts_sequence_percentage_match == None:
acceptable_sifts_sequence_percentage_match = acceptable_sequence_percentage_match
assert(0.0 <= acceptable_sequence_percentage_match <= 100.0)
assert(0.0 <= acceptable_sifts_sequence_percentage_match <= 100.0)
if not((type(pdb_id) == bytes or type(pdb_id) == type('')) and len(pdb_id) == 4 and pdb_id.isalnum()):
raise Exception("Expected an 4-character long alphanumeric PDB identifer. Received '%s'." % str(pdb_id))
self.pdb_id = pdb_id.upper()
self.silent = silent
self.rosetta_scripts_path = rosetta_scripts_path
self.rosetta_database_path = rosetta_database_path
self.bio_cache = bio_cache
self.cache_dir = cache_dir
if (not self.cache_dir) and self.bio_cache:
self.cache_dir = self.bio_cache.cache_dir
self.alignment_cutoff = None
self.acceptable_sequence_percentage_match = acceptable_sequence_percentage_match
self.acceptable_sifts_sequence_percentage_match = acceptable_sifts_sequence_percentage_match
self.replacement_pdb_id = None
self.FASTA = None
self.pdb = None
self.pdbml = None
self.PDB_UniParc_SA = None
self.sifts = None
self.uniparc_sequences = None
self.fasta_sequences = None
self.seqres_sequences = None
self.atom_sequences = None
self.rosetta_sequences = None
self.pdb_to_rosetta_residue_map_error = False
self.rosetta_to_atom_sequence_maps = None
self.atom_to_seqres_sequence_maps = None
self.seqres_to_uniparc_sequence_maps = None
self.atom_to_rosetta_sequence_maps = None
self.seqres_to_atom_sequence_maps = None
self.uniparc_to_seqres_sequence_maps = None # This map is indexed by PDB chain IDs
self.pdbml_atom_to_seqres_sequence_maps = None
self.clustal_seqres_to_uniparc_sequence_maps = None
self.sifts_atom_to_seqres_sequence_maps = None
self.sifts_seqres_to_uniparc_sequence_maps = None
self.sifts_atom_to_uniparc_sequence_maps = None
self.pdb_chain_to_uniparc_chain_mapping = {}
self._create_objects(chains_to_keep, starting_clustal_cut_off, min_clustal_cut_off, True) # todo: at present, we always strip HETATMs. We may want to change this in the future.
self._create_sequences()
self._create_sequence_maps()
self._merge_sifts_maps()
self._prune_maps_to_sequences()
self._validate()
self._create_inverse_maps()
### API functions###
def convert(self, chain_id, residue_id, from_scheme, to_scheme):
'''The API conversion function. This converts between the different residue ID schemes.'''
# At the cost of three function calls, we ignore the case of the scheme parameters to be more user-friendly.
from_scheme = from_scheme.lower()
to_scheme = to_scheme.lower()
assert(from_scheme in ResidueRelatrix.schemes)
assert(to_scheme in ResidueRelatrix.schemes)
return self._convert(chain_id, residue_id, from_scheme, to_scheme)
def _convert(self, chain_id, residue_id, from_scheme, to_scheme):
'''The actual 'private' conversion function.'''
# There are 12 valid combinations but rather than write them all out explicitly, we will use recursion, sacrificing speed for brevity
if from_scheme == 'rosetta':
atom_id = self.rosetta_to_atom_sequence_maps.get(chain_id, {})[residue_id]
if to_scheme == 'atom':
return atom_id
else:
return self._convert(chain_id, atom_id, 'atom', to_scheme)
if from_scheme == 'atom':
if to_scheme == 'rosetta':
return self.atom_to_rosetta_sequence_maps.get(chain_id, {})[residue_id]
else:
seqres_id = self.atom_to_seqres_sequence_maps.get(chain_id, {})[residue_id]
if to_scheme == 'seqres':
return seqres_id
return self.convert(chain_id, seqres_id, 'seqres', to_scheme)
if from_scheme == 'seqres':
if to_scheme == 'uniparc':
return self.seqres_to_uniparc_sequence_maps.get(chain_id, {})[residue_id]
else:
atom_id = self.seqres_to_atom_sequence_maps.get(chain_id, {})[residue_id]
if to_scheme == 'atom':
return atom_id
return self.convert(chain_id, atom_id, 'atom', to_scheme)
if from_scheme == 'uniparc':
seqres_id = self.uniparc_to_seqres_sequence_maps.get(chain_id, {})[residue_id]
if to_scheme == 'seqres':
return seqres_id
else:
return self._convert(chain_id, seqres_id, 'seqres', to_scheme)
raise Exception("We should never reach this line.")
def convert_from_rosetta(self, residue_id, to_scheme):
'''A simpler conversion function to convert from Rosetta numbering without requiring the chain identifier.'''
assert(type(residue_id) == int)
# Find the chain_id associated with the residue_id
# Scan *all* sequences without breaking out to make sure that we do not have any duplicate maps
chain_id = None
for c, sequence in self.rosetta_sequences.items():
for id, r in sequence:
if r.ResidueID == residue_id:
assert(chain_id == None)
chain_id = c
if chain_id:
return self.convert(chain_id, residue_id, 'rosetta', to_scheme)
else:
return None
### Private validation methods ###
def _validate(self):
'''Validate the mappings.'''
self._validate_fasta_vs_seqres()
self._validate_mapping_signature()
self._validate_id_types()
self._validate_residue_types()
def _validate_fasta_vs_seqres(self):
'''Check that the FASTA and SEQRES sequences agree (they sometimes differ)'''
pdb_id = self.pdb_id
for chain_id, sequence in self.pdb.seqres_sequences.items():
if str(sequence) != self.FASTA[pdb_id][chain_id]:
if self.pdb_id in use_seqres_sequence_for_fasta_sequence:
self.FASTA.replace_sequence(self.pdb_id, chain_id, str(sequence))
elif self.pdb_id in use_fasta_sequence_for_seqres_sequence:
self.pdb.seqres_sequences[chain_id] = Sequence.from_sequence(chain_id, self.FASTA[pdb_id][chain_id], self.sequence_types[chain_id])
sequence = self.FASTA[pdb_id][chain_id]
if str(sequence) != self.FASTA[pdb_id][chain_id]:
raise colortext.Exception("The SEQRES and FASTA sequences disagree for chain %s in %s. This can happen but special-case handling (use_seqres_sequence_for_fasta_sequence) should be added to the file containing the %s class." % (chain_id, pdb_id, self.__class__.__name__))
def _validate_mapping_signature(self):
'''Make sure the domains and ranges of the SequenceMaps match the Sequences.'''
# rosetta_to_atom_sequence_maps
for chain_id, sequence_map in self.rosetta_to_atom_sequence_maps.items():
# Check that all Rosetta residues have a mapping
assert(sorted(sequence_map.keys()) == sorted(self.rosetta_sequences[chain_id].ids()))
# Check that all ATOM residues in the mapping exist and that the mapping is injective
rng = set(sequence_map.values())
atom_residue_ids = set(self.atom_sequences[chain_id].ids())
assert(rng.intersection(atom_residue_ids) == rng)
assert(len(rng) == len(list(sequence_map.values())))
# atom_to_seqres_sequence_maps
for chain_id, sequence_map in self.atom_to_seqres_sequence_maps.items():
# Check that all ATOM residues have a mapping
#print(sorted(sequence_map.keys()))
#print(sorted(self.atom_sequences[chain_id].ids()))
assert(sorted(sequence_map.keys()) == sorted(self.atom_sequences[chain_id].ids()))
# Check that all SEQRES residues in the mapping exist and that the mapping is injective
rng = set(sequence_map.values())
seqres_residue_ids = set(self.seqres_sequences[chain_id].ids())
assert(rng.intersection(seqres_residue_ids) == rng)
assert(len(rng) == len(list(sequence_map.values())))
# seqres_to_uniparc_sequence_maps
for chain_id, sequence_map in self.seqres_to_uniparc_sequence_maps.items():
# Check that acceptable_sequence_percentage_match% of all SEQRES residues have a mapping (there may have been
# insertions or bad mismatches i.e. low BLOSUM62/PAM250 scores). I chose 80% arbitrarily but this can be overridden
# with the acceptable_sequence_percentage_match argument to the constructor.
if self.sequence_types[chain_id] == 'Protein' or self.sequence_types[chain_id] == 'Protein skeleton':
if sequence_map:
mapped_SEQRES_residues = set(sequence_map.keys())
all_SEQRES_residues = set(self.seqres_sequences[chain_id].ids())
if len(all_SEQRES_residues) >= 20:
match_percentage = 100.0 * (float(len(mapped_SEQRES_residues))/float((len(all_SEQRES_residues))))
if not (self.acceptable_sequence_percentage_match <= match_percentage <= 100.0):
if not set(list(str(self.seqres_sequences[chain_id]))) == set(['X']):
# Skip cases where all residues are unknown e.g. 1DEQ, chain M
raise Exception("Chain %s in %s only had a match percentage of %0.2f%%" % (chain_id, self.pdb_id, match_percentage))
# Check that all UniParc residues in the mapping exist and that the mapping is injective
if self.pdb_chain_to_uniparc_chain_mapping.get(chain_id):
rng = set([v[1] for v in list(sequence_map.values())])
uniparc_chain_id = self.pdb_chain_to_uniparc_chain_mapping[chain_id]
uniparc_residue_ids = set(self.uniparc_sequences[uniparc_chain_id].ids())
assert(rng.intersection(uniparc_residue_ids) == rng)
if len(rng) != len(list(sequence_map.values())):
rng_vals = set()
for x in list(sequence_map.values()):
if x[1] in rng_vals:
err_msg = ['The SEQRES to UniParc map is not injective for %s, chain %s; the element %s occurs more than once in the range.' % (self.pdb_id, chain_id, str(x))]
err_msg.append(colortext.make('The seqres_to_uniparc_sequence_maps mapping is:', color = 'green'))
for k, v in sequence_map.map.items():
err_msg.append(' %s -> %s' % (str(k).ljust(7), str(v).ljust(20)))
err_msg.append(colortext.make('The clustal_seqres_to_uniparc_sequence_maps mapping is:', color = 'green'))
for k, v in self.clustal_seqres_to_uniparc_sequence_maps[chain_id].map.items():
err_msg.append(' %s -> %s' % (str(k).ljust(7), str(v).ljust(20)))
err_msg.append(colortext.make('The sifts_seqres_to_uniparc_sequence_maps mapping is:', color = 'green'))
for k, v in self.sifts_seqres_to_uniparc_sequence_maps[chain_id].map.items():
err_msg.append(' %s -> %s' % (str(k).ljust(7), str(v).ljust(20)))
raise Exception('\n'.join(err_msg))
rng_vals.add(x[1])
def _validate_id_types(self):
'''Check that the ID types are integers for Rosetta, SEQRES, and UniParc sequences and 6-character PDB IDs for the ATOM sequences.'''
for sequences in [self.uniparc_sequences, self.fasta_sequences, self.seqres_sequences, self.rosetta_sequences]:
for chain_id, sequence in sequences.items():
sequence_id_types = set(map(type, sequence.ids()))
if sequence_id_types:
assert(len(sequence_id_types) == 1)
assert(sequence_id_types.pop() == int)
for chain_id, sequence in self.atom_sequences.items():
sequence_id_types = set(map(type, sequence.ids()))
assert(len(sequence_id_types) == 1)
sequence_id_type = sequence_id_types.pop()
assert(sequence_id_type == bytes or sequence_id_type == str)
def _validate_residue_types(self):
'''Make sure all the residue types map through translation.'''
for chain_id, sequence_map in self.rosetta_to_atom_sequence_maps.items():
rosetta_sequence = self.rosetta_sequences[chain_id]
atom_sequence = self.atom_sequences[chain_id]
for rosetta_id, atom_id, _ in sequence_map:
assert(rosetta_sequence[rosetta_id].ResidueAA == atom_sequence[atom_id].ResidueAA)
for chain_id, sequence_map in self.atom_to_seqres_sequence_maps.items():
atom_sequence = self.atom_sequences[chain_id]
seqres_sequence = self.seqres_sequences[chain_id]
for atom_id, seqres_id, _ in sorted(sequence_map):
assert(atom_sequence[atom_id].ResidueAA == seqres_sequence[seqres_id].ResidueAA)
for chain_id, sequence_map in self.seqres_to_uniparc_sequence_maps.items():
if self.pdb_chain_to_uniparc_chain_mapping.get(chain_id):
seqres_sequence = self.seqres_sequences[chain_id]
uniparc_sequence = self.uniparc_sequences[self.pdb_chain_to_uniparc_chain_mapping[chain_id]]
for seqres_id, uniparc_id_resid_pair, substitution_match in sequence_map:
uniparc_id = uniparc_id_resid_pair[1]
# Some of the matches may not be identical but all the '*' Clustal Omega matches should be identical
if substitution_match and substitution_match.clustal == 1:
assert(seqres_sequence[seqres_id].ResidueAA == uniparc_sequence[uniparc_id].ResidueAA)
### Private Sequence and SequenceMap collection functions ###
def _create_inverse_maps(self):
'''Create the inverse mappings (UniParc -> SEQRES -> ATOM -> Rosetta).'''
# We have already determined that the inverse maps are well-defined (the normal maps are injective). The inverse maps will be partial maps in general.
self.atom_to_rosetta_sequence_maps = {}
for chain_id, sequence_map in self.rosetta_to_atom_sequence_maps.items():
s = SequenceMap()
for k, v, substitution_match in sequence_map:
s.add(v, k, substitution_match)
self.atom_to_rosetta_sequence_maps[chain_id] = s
self.seqres_to_atom_sequence_maps = {}
for chain_id, sequence_map in self.atom_to_seqres_sequence_maps.items():
s = SequenceMap()
for k, v, substitution_match in sequence_map:
s.add(v, k, substitution_match)
self.seqres_to_atom_sequence_maps[chain_id] = s
# This map uses PDB chain IDs as PDB chains may map to zero or one UniParc IDs whereas UniParc IDs may map to many PDB chains
self.uniparc_to_seqres_sequence_maps = {}
for chain_id, sequence_map in self.seqres_to_uniparc_sequence_maps.items():
s = UniParcPDBSequenceMap()
for k, v, substitution_match in sequence_map:
s.add(v, k, substitution_match)
self.uniparc_to_seqres_sequence_maps[chain_id] = s
def _create_sequence_maps(self):
'''Get all of the SequenceMaps - Rosetta->ATOM, ATOM->SEQRES/FASTA, SEQRES->UniParc.'''
if self.sifts:
self.sifts_atom_to_seqres_sequence_maps = self.sifts.atom_to_seqres_sequence_maps
self.sifts_seqres_to_uniparc_sequence_maps = self.sifts.seqres_to_uniparc_sequence_maps
self.sifts_atom_to_uniparc_sequence_maps = self.sifts.atom_to_uniparc_sequence_maps
if self.pdb_id in pdbs_with_do_not_use_SIFTS_for_these_chains:
for chain_id in list(self.sifts_atom_to_seqres_sequence_maps.keys()) + list(self.sifts_seqres_to_uniparc_sequence_maps.keys()) + list(self.sifts_atom_to_uniparc_sequence_maps.keys()):
if (self.pdb_id, chain_id) in do_not_use_SIFTS_for_these_chains:
self.sifts_atom_to_seqres_sequence_maps[chain_id] = SequenceMap()
self.sifts_seqres_to_uniparc_sequence_maps = SequenceMap()
self.sifts_atom_to_uniparc_sequence_maps = SequenceMap()
if self.pdb_to_rosetta_residue_map_error:
self.rosetta_to_atom_sequence_maps = {}
for c in list(self.atom_sequences.keys()):
self.rosetta_to_atom_sequence_maps[c] = SequenceMap()
else:
self.rosetta_to_atom_sequence_maps = self.pdb.rosetta_to_atom_sequence_maps
# If we removed atoms from the PDB file, we need to remove them from the maps so that our validations hold later on
self.pdbml_atom_to_seqres_sequence_maps = self.pdbml.atom_to_seqres_sequence_maps
if self.pdb_id in ROSETTA_HACKS_residues_to_remove:
for residue_to_remove in ROSETTA_HACKS_residues_to_remove[self.pdb_id]:
chain_id = residue_to_remove[0]
self.pdbml_atom_to_seqres_sequence_maps[chain_id].remove(residue_to_remove)
#if self.sifts:
# self.sifts_atom_to_seqres_sequence_maps[chain_id].remove(residue_to_remove)
if self.pdb_id not in do_not_use_the_sequence_aligner:
self.clustal_seqres_to_uniparc_sequence_maps = self.PDB_UniParc_SA.seqres_to_uniparc_sequence_maps
def _merge_sifts_maps(self):
''' Make sure that the pdbml_atom_to_seqres_sequence_maps and clustal_seqres_to_uniparc_sequence_maps agree with SIFTS and merge the maps.
SIFTS may have more entries since we discard PDB residues which break Rosetta.
SIFTS may have less entries for some cases e.g. 1AR1, chain C where SIFTS does not map ATOMs 99-118.
SIFTS does not seem to contain ATOM to SEQRES mappings for (at least some) DNA chains e.g. 1APL, chain A
Because of these cases, we just assert that the overlap agrees so that we can perform a gluing of maps.'''
if self.pdb_id in do_not_use_the_sequence_aligner:
assert(self.sifts)
self.atom_to_seqres_sequence_maps = self.sifts_atom_to_seqres_sequence_maps
self.seqres_to_uniparc_sequence_maps = self.sifts_seqres_to_uniparc_sequence_maps
elif self.sifts:
self.atom_to_seqres_sequence_maps = {}
self.seqres_to_uniparc_sequence_maps = {}
for c, seqmap in sorted(self.pdbml_atom_to_seqres_sequence_maps.items()):
if self.sequence_types[c] == 'Protein' or self.sequence_types[c] == 'Protein skeleton':
try:
if self.sifts_atom_to_seqres_sequence_maps.get(c):
assert(self.pdbml_atom_to_seqres_sequence_maps[c].matches(self.sifts_atom_to_seqres_sequence_maps[c]))
self.atom_to_seqres_sequence_maps[c] = self.pdbml_atom_to_seqres_sequence_maps[c] + self.sifts_atom_to_seqres_sequence_maps[c]
else:
self.atom_to_seqres_sequence_maps[c] = self.pdbml_atom_to_seqres_sequence_maps[c]
except Exception as e:
raise colortext.Exception("Mapping cross-validation failed checking atom to seqres sequence maps between PDBML and SIFTS in %s, chain %s: %s" % (self.pdb_id, c, str(e)))
else:
self.atom_to_seqres_sequence_maps[c] = seqmap
for c, seqmap in sorted(self.clustal_seqres_to_uniparc_sequence_maps.items()):
if self.sequence_types[c] == 'Protein' or self.sequence_types[c] == 'Protein skeleton':
if (self.pdb_id, c) in use_SIFTS_match_for_seqres_sequence:
#assert(seqres_sequence[seqres_id].ResidueAA == uniparc_sequence[uniparc_id].ResidueAA)
if (self.pdb_id, c) not in known_bad_clustal_to_sifts_mappings:
# Flag cases for manual inspection
assert(list(self.clustal_seqres_to_uniparc_sequence_maps[c].keys()) == list(self.sifts_seqres_to_uniparc_sequence_maps[c].keys()))
for k in list(self.clustal_seqres_to_uniparc_sequence_maps[c].keys()):
v_1 = self.clustal_seqres_to_uniparc_sequence_maps[c][k]
v_2 = self.sifts_seqres_to_uniparc_sequence_maps[c][k]
if (self.pdb_id, c) not in known_bad_clustal_to_sifts_mappings and v_2:
# Make sure the UniParc IDs agree
assert(v_1[0] == v_2[0])
if (self.pdb_id, c) not in known_bad_clustal_to_sifts_mappings:
# Make sure the residue types agree
assert(self.uniparc_sequences[v_1[0]][v_1[1]].ResidueAA == self.uniparc_sequences[v_1[0]][v_2[1]].ResidueAA)
# Copy the substitution scores over. Since the residue types agree, this is valid
self.sifts_seqres_to_uniparc_sequence_maps[c].substitution_scores[k] = self.clustal_seqres_to_uniparc_sequence_maps[c].substitution_scores[k]
self.clustal_seqres_to_uniparc_sequence_maps[c] = self.sifts_seqres_to_uniparc_sequence_maps[c]
try:
if self.sifts_seqres_to_uniparc_sequence_maps.get(c):
if not self.clustal_seqres_to_uniparc_sequence_maps[c].matches(self.sifts_seqres_to_uniparc_sequence_maps[c]):
mismatched_keys = self.clustal_seqres_to_uniparc_sequence_maps[c].get_mismatches(self.sifts_seqres_to_uniparc_sequence_maps[c])
raise Exception("self.clustal_seqres_to_uniparc_sequence_maps[c].matches(self.sifts_seqres_to_uniparc_sequence_maps[c])")
self.seqres_to_uniparc_sequence_maps[c] = self.clustal_seqres_to_uniparc_sequence_maps[c] + self.sifts_seqres_to_uniparc_sequence_maps[c]
else:
self.seqres_to_uniparc_sequence_maps[c] = self.clustal_seqres_to_uniparc_sequence_maps[c]
except Exception as e:
colortext.warning(traceback.format_exc())
colortext.error(str(e))
raise colortext.Exception("Mapping cross-validation failed checking atom to seqres sequence maps between Clustal and SIFTS in %s, chain %s." % (self.pdb_id, c))
else:
self.clustal_seqres_to_uniparc_sequence_maps[c] = seqmap
else:
self.atom_to_seqres_sequence_maps = self.pdbml_atom_to_seqres_sequence_maps
self.seqres_to_uniparc_sequence_maps = self.clustal_seqres_to_uniparc_sequence_maps
def _prune_maps_to_sequences(self):
''' When we merge the SIFTS maps, we can extend the sequence maps such that they have elements in their domain that we removed
from the sequence e.g. 1A2P, residue 'B 3 ' is removed because Rosetta barfs on it. Here, we prune the maps so that their
domains do not have elements that were removed from sequences.'''
for c, seq in self.atom_sequences.items():
res_ids = [r[0] for r in seq]
for_removal = []
for k, _, _ in self.atom_to_seqres_sequence_maps[c]:
if k not in res_ids:
for_removal.append(k)
for res_id in for_removal:
self.atom_to_seqres_sequence_maps[c].remove(res_id)
#print(self.fasta_sequences)
#print(self.seqres_sequences)
#self.atom_to_seqres_sequence_maps = None
#self.seqres_to_uniparc_sequence_maps = None
#self.pdbml_atom_to_seqres_sequence_maps = None
#self.clustal_seqres_to_uniparc_sequence_maps = None
#self.sifts_atom_to_seqres_sequence_maps = None
#self.sifts_seqres_to_uniparc_sequence_maps = None
#self.sifts_atom_to_uniparc_sequence_maps = None
def _create_sequences(self):
'''Get all of the Sequences - Rosetta, ATOM, SEQRES, FASTA, UniParc.'''
# Create the Rosetta sequences and the maps from the Rosetta sequences to the ATOM sequences
try:
self.pdb.construct_pdb_to_rosetta_residue_map(self.rosetta_scripts_path, rosetta_database_path = self.rosetta_database_path, cache_dir = self.cache_dir)
except PDBMissingMainchainAtomsException:
self.pdb_to_rosetta_residue_map_error = True
# Get all the Sequences
if self.pdb_id not in do_not_use_the_sequence_aligner:
self.uniparc_sequences = self.PDB_UniParc_SA.uniparc_sequences
else:
self.uniparc_sequences = self.sifts.get_uniparc_sequences()
self.fasta_sequences = self.FASTA.get_sequences(self.pdb_id)
self.seqres_sequences = self.pdb.seqres_sequences
self.atom_sequences = self.pdb.atom_sequences
if self.pdb_to_rosetta_residue_map_error:
self.rosetta_sequences = {}
for c in list(self.atom_sequences.keys()):
self.rosetta_sequences[c] = Sequence()
else:
self.rosetta_sequences = self.pdb.rosetta_sequences
# Update the chain types for the UniParc sequences
uniparc_pdb_chain_mapping = {}
if self.pdb_id not in do_not_use_the_sequence_aligner:
for pdb_chain_id, matches in self.PDB_UniParc_SA.clustal_matches.items():
if matches:
# we are not guaranteed to have a match e.g. the short chain J in 1A2C, chimeras, etc.
uniparc_chain_id = list(matches.keys())[0]
assert(len(matches) == 1)
uniparc_pdb_chain_mapping[uniparc_chain_id] = uniparc_pdb_chain_mapping.get(uniparc_chain_id, [])
uniparc_pdb_chain_mapping[uniparc_chain_id].append(pdb_chain_id)
else:
for pdb_chain_id, uniparc_chain_ids in self.sifts.get_pdb_chain_to_uniparc_id_map().items():
for uniparc_chain_id in uniparc_chain_ids:
uniparc_pdb_chain_mapping[uniparc_chain_id] = uniparc_pdb_chain_mapping.get(uniparc_chain_id, [])
uniparc_pdb_chain_mapping[uniparc_chain_id].append(pdb_chain_id)
for uniparc_chain_id, pdb_chain_ids in uniparc_pdb_chain_mapping.items():
sequence_type = set([self.seqres_sequences[p].sequence_type for p in pdb_chain_ids])
assert(len(sequence_type) == 1)
sequence_type = sequence_type.pop()
assert(self.uniparc_sequences[uniparc_chain_id].sequence_type == None)
self.uniparc_sequences[uniparc_chain_id].set_type(sequence_type)
for p in pdb_chain_ids:
self.pdb_chain_to_uniparc_chain_mapping[p] = uniparc_chain_id
# Update the chain types for the FASTA sequences
for chain_id, sequence in self.seqres_sequences.items():
self.fasta_sequences[chain_id].set_type(sequence.sequence_type)
### Private object creation functions ###
def _create_objects(self, chains_to_keep, starting_clustal_cut_off, min_clustal_cut_off, strip_HETATMS):
pdb_id = self.pdb_id
assert(20 <= min_clustal_cut_off <= starting_clustal_cut_off <= 100)
# Create the FASTA object
if not self.silent:
colortext.message("Creating the FASTA object.")
try:
if self.bio_cache:
self.FASTA = self.bio_cache.get_fasta_object(pdb_id)
else:
self.FASTA = FASTA.retrieve(pdb_id, cache_dir = self.cache_dir)
except:
raise colortext.Exception("Relatrix construction failed creating the FASTA object for %s.\n%s" % (pdb_id, traceback.format_exc()))
# Create the PDB object
if not self.silent:
colortext.message("Creating the PDB object.")
try:
if self.bio_cache:
self.pdb = self.bio_cache.get_pdb_object(pdb_id)
else:
self.pdb = PDB.retrieve(pdb_id, cache_dir = self.cache_dir)
if chains_to_keep:
self.pdb.strip_to_chains(chains_to_keep)
if strip_HETATMS:
self.pdb.strip_HETATMs()
except:
raise colortext.Exception("Relatrix construction failed creating the PDB object for %s.\n%s" % (pdb_id, traceback.format_exc()))
# Copy PDB properties
if self.pdb.deprecated:
self.replacement_pdb_id = self.pdb.replacement_pdb_id
self.sequence_types = self.pdb.chain_types
# todo: benchmark why PDBML creation is slow for some files e.g. 3ZKB.xml (lots of repeated chains)
# Create the PDBML object
if not self.silent:
colortext.message("Creating the PDBML object.")
try:
if self.bio_cache:
self.pdbml = self.bio_cache.get_pdbml_object(pdb_id)
else:
self.pdbml = PDBML.retrieve(pdb_id, cache_dir = self.cache_dir, bio_cache = self.bio_cache)
except:
raise colortext.Exception("Relatrix construction failed creating the PDBML object for %s.\n%s" % (pdb_id, traceback.format_exc()))
# Copy PDBML properties
if self.pdbml.deprecated:
if self.replacement_pdb_id:
assert(self.replacement_pdb_id == self.pdbml.replacement_pdb_id)
else:
self.replacement_pdb_id = self.pdbml.replacement_pdb_id
# Create the SIFTS object
try:
if self.bio_cache:
self.sifts = self.bio_cache.get_sifts_object(pdb_id, acceptable_sequence_percentage_match = self.acceptable_sifts_sequence_percentage_match)
else:
self.sifts = SIFTS.retrieve(pdb_id, cache_dir = self.cache_dir, acceptable_sequence_percentage_match = self.acceptable_sifts_sequence_percentage_match)
except MissingSIFTSRecord:
colortext.warning("No SIFTS entry was found for %s." % pdb_id)
except BadSIFTSMapping:
colortext.warning("The SIFTS mapping for %s was considered a bad mapping at the time of writing." % pdb_id)
except NoSIFTSPDBUniParcMapping:
colortext.warning("The PDB file %s has a known bad SIFTS mapping at the time of writing." % pdb_id)
# Create the PDBUniParcSequenceAligner object. We try the best alignment at first (100%) and then fall back to more relaxed alignments down to min_clustal_cut_off percent.
if not self.silent:
colortext.message("Creating the PDB to UniParc SequenceAligner object.")
cut_off = 0
try:
matched_chains = set()
matched_all_chains = False
self.PDB_UniParc_SA = None
if self.pdb_id not in do_not_use_the_sequence_aligner:
cut_off = None
for x in range(starting_clustal_cut_off, min_clustal_cut_off - 1, -1):
cut_off = x
if not self.silent:
colortext.warning("\tTrying to align sequences with a cut-off of %d%%." % cut_off)
if not self.PDB_UniParc_SA:
# Initialize the PDBUniParcSequenceAligner the first time through
self.PDB_UniParc_SA = PDBUniParcSequenceAligner(pdb_id, cache_dir = self.cache_dir, cut_off = cut_off, sequence_types = self.sequence_types, replacement_pdb_id = self.replacement_pdb_id, added_uniprot_ACs = self.pdb.get_UniProt_ACs())
else:
# We have already retrieved the UniParc entries. We just need to try the mapping again. This saves
# lots of time for entries with large numbers of UniProt entries e.g. 1HIO even if disk caching is used.
# We also stop trying to match a chain once a match has been found in a previous iteration.
# This speeds up the matching in multiple ways. First, we do not waste time by recreating the same UniParcEntry.
# Next, we do not waste time rematching chains we previously matched. Finally, we only match equivalence
# classes of chains where the equivalence is defined as having an identical sequence.
# For example we sped up:
# matching 1YGV (3 protein chains, 2 identical) starting at 100% by 67% (down from 86s to 28.5s with a match at 85%); (this case may be worth profiling)
# speed ups at each stage; not recreating PDB_UniParc_SA (due to low match%), only matching chains once (as A, C are found at 95%), and skipping sequence-equivalent chains (as A and C have the same sequence)
# matching 1HIO (4 protein chains, all unique) starting at 100% by 83% down from 33s to 5.5s (match at 95%);
# main speed-up due to not recreating PDB_UniParc_SA
# matching 1H38 (4 identical protein chains) starting at 100% by 5% down from 57s to 54s (match at 100%);
# no real speed-up since the match is at 100%
# matching the extreme case 487D (7 protein chains, all unique) starting at 100% by 94% down from 1811s to 116s (with a min_clustal_cut_off of 71%). A lot of the time in this case is in constructing the UniParcEntry object.
# main speed-up due to not recreating PDB_UniParc_SA
# matching 3ZKB (16 protein chains, all identical) starting at 100% by 90% (down from 31s to 3s with a match at 98%); (this case may be worth profiling)
# a lot of time was spent in PDBML creation (another optimization problem) so I only profiled this PDB_UniParc_SA section
# minor speed-up (31s to 27s) by not recreating PDB_UniParc_SA (match at 98%), main speed-up due to skipping sequence-equivalent chain (we only have to match one sequence)
self.PDB_UniParc_SA.realign(cut_off, chains_to_skip = matched_chains)
# We only care about protein chain matches so early out as soon as we have them all matched
protein_chain_matches = {}
for _c, _st in self.sequence_types.items():
if _st == 'Protein' or _st == 'Protein skeleton':
protein_chain_matches[_c] = self.PDB_UniParc_SA.clustal_matches[_c]
if protein_chain_matches[_c]:
matched_chains.add(_c)
num_matches_per_chain = set(map(len, list(protein_chain_matches.values())))
if len(num_matches_per_chain) == 1 and num_matches_per_chain.pop() == 1:
# We have exactly one match per protein chain. Early out.
if not self.silent:
colortext.message("\tSuccessful match with a cut-off of %d%%." % cut_off)
matched_all_chains = True
self.alignment_cutoff = cut_off
break
else:
# We have ambiguity - more than one match per protein chain. Exception.
if [n for n in num_matches_per_chain if n > 1]:
raise MultipleAlignmentException("Too many matches found at cut-off %d." % cut_off)
if not matched_all_chains:
protein_chains = [c for c in self.sequence_types if self.sequence_types[c].startswith('Protein')]
if not self.silent:
colortext.warning('\nNote: Not all chains were matched:')
for c in protein_chains:
if protein_chain_matches.get(c):
colortext.message(' %s matched %s' % (c, protein_chain_matches[c]))
else:
colortext.warning(' %s was not matched' % c)
print('')
num_matches_per_chain = set(map(len, list(self.PDB_UniParc_SA.clustal_matches.values())))
if num_matches_per_chain == set([0, 1]):
# We got matches but are missing chains
self.alignment_cutoff = cut_off
except MultipleAlignmentException as e:
# todo: this will probably fail with DNA or RNA so do not include those in the alignment
raise colortext.Exception("Relatrix construction failed creating the PDBUniParcSequenceAligner object for %s. The cut-off level reached %d%% without finding a match for all chains but at that level, the mapping from chains to UniParc IDs was not injective.\n%s" % (pdb_id, cut_off, str(e)))
except:
raise colortext.Exception("Relatrix construction failed creating the PDBUniParcSequenceAligner object for %s.\n%s" % (pdb_id, traceback.format_exc()))
|
class ResidueRelatrix(object):
''' A class for relating residue IDs from different schemes.
Note: we assume throughout that there is one map from SEQRES to UniParc. This is not always true e.g. Polyubiquitin-C (UPI000000D74D) has 9 copies of the ubiquitin sequence.'''
def __init__(self, pdb_id, rosetta_scripts_path, rosetta_database_path = None, chains_to_keep = [], min_clustal_cut_off = 80, cache_dir = None, silent = False, acceptable_sequence_percentage_match = 80.0, acceptable_sifts_sequence_percentage_match = None, starting_clustal_cut_off = 100, bio_cache = None):
''' acceptable_sequence_percentage_match is used when checking whether the SEQRES sequences have a mapping. Usually
90.00% works but some cases e.g. 1AR1, chain C, have a low matching score mainly due to extra residues. I set
this to 80.00% to cover most cases.'''
pass
def convert(self, chain_id, residue_id, from_scheme, to_scheme):
'''The API conversion function. This converts between the different residue ID schemes.'''
pass
def _convert(self, chain_id, residue_id, from_scheme, to_scheme):
'''The actual 'private' conversion function.'''
pass
def convert_from_rosetta(self, residue_id, to_scheme):
'''A simpler conversion function to convert from Rosetta numbering without requiring the chain identifier.'''
pass
def _validate(self):
'''Validate the mappings.'''
pass
def _validate_fasta_vs_seqres(self):
'''Check that the FASTA and SEQRES sequences agree (they sometimes differ)'''
pass
def _validate_mapping_signature(self):
'''Make sure the domains and ranges of the SequenceMaps match the Sequences.'''
pass
def _validate_id_types(self):
'''Check that the ID types are integers for Rosetta, SEQRES, and UniParc sequences and 6-character PDB IDs for the ATOM sequences.'''
pass
def _validate_residue_types(self):
'''Make sure all the residue types map through translation.'''
pass
def _create_inverse_maps(self):
'''Create the inverse mappings (UniParc -> SEQRES -> ATOM -> Rosetta).'''
pass
def _create_sequence_maps(self):
'''Get all of the SequenceMaps - Rosetta->ATOM, ATOM->SEQRES/FASTA, SEQRES->UniParc.'''
pass
def _merge_sifts_maps(self):
''' Make sure that the pdbml_atom_to_seqres_sequence_maps and clustal_seqres_to_uniparc_sequence_maps agree with SIFTS and merge the maps.
SIFTS may have more entries since we discard PDB residues which break Rosetta.
SIFTS may have less entries for some cases e.g. 1AR1, chain C where SIFTS does not map ATOMs 99-118.
SIFTS does not seem to contain ATOM to SEQRES mappings for (at least some) DNA chains e.g. 1APL, chain A
Because of these cases, we just assert that the overlap agrees so that we can perform a gluing of maps.'''
pass
def _prune_maps_to_sequences(self):
''' When we merge the SIFTS maps, we can extend the sequence maps such that they have elements in their domain that we removed
from the sequence e.g. 1A2P, residue 'B 3 ' is removed because Rosetta barfs on it. Here, we prune the maps so that their
domains do not have elements that were removed from sequences.'''
pass
def _create_sequences(self):
'''Get all of the Sequences - Rosetta, ATOM, SEQRES, FASTA, UniParc.'''
pass
def _create_objects(self, chains_to_keep, starting_clustal_cut_off, min_clustal_cut_off, strip_HETATMS):
pass
| 16 | 15 | 40 | 5 | 29 | 6 | 10 | 0.24 | 1 | 23 | 13 | 0 | 15 | 34 | 15 | 15 | 646 | 105 | 437 | 119 | 421 | 107 | 413 | 117 | 397 | 37 | 1 | 6 | 147 |
143,564 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/scop.py
|
klab.bio.scop.SCOPeDatabase
|
class SCOPeDatabase(DatabaseInterface):
def __init__(self, passwd = None, username = 'anonymous', use_utf=False, fallback_on_failures = True, cache_dir = '/kortemmelab/data/oconchus/SIFTS'):
super(SCOPeDatabase, self).__init__({},
isInnoDB = True,
numTries = 32,
host = "guybrush.ucsf.edu",
db = installed_database,
user = username,
passwd = None,
port = 3306,
unix_socket = "/var/lib/mysql/mysql.sock",
use_utf = use_utf)
self.cache_dir = cache_dir
self.fallback_on_failures = fallback_on_failures
self.levels = self.get_SCOPe_levels()
del self.levels[1] # remove the root level
level_names = [v for k, v in sorted(self.levels.items()) if k != 1] # skip the root level
search_fields = ['SCOPe_sources', 'SCOPe_search_fields', 'SCOPe_trust_level']
search_headers = ['SCOPe sources', 'Search fields', 'Trustiness']
self.pfam_api = None
self.SIFTS = {}
# Set up CSV fields
self.pdb_csv_fields = [
'pdb_id', 'chain', 'is_polypeptide', 'chain_description', 'resolution', 'pdbe_residue_range',
'sunid', 'sccs', 'sid']
self.pdb_csv_headers = [
'PDB id', 'Chain', 'Is polypeptide', 'Description', 'Resolution', 'PDBe residues',
'sunid', 'sccs', 'sid']
self.pdb_csv_fields += level_names + search_fields
self.pdb_csv_headers += level_names + search_headers
self.pfam_csv_fields = [
'pfam_accession', 'pfam_name', 'pfam_description', 'pfam_type_description', 'pfam_length',
'sunid', 'sccs', 'sid', 'SCOPe_sources', 'SCOPe_search_fields']
self.pfam_csv_headers = [
'Pfam accession', 'Name', 'Description', 'Type', 'Length',
'sunid', 'sccs', 'sid', 'SCOPe sources', 'Search fields']
self.pfam_csv_fields += level_names[:4] + search_fields
self.pfam_csv_headers += level_names[:4] + search_headers
assert(len(self.pdb_csv_fields) == len(self.pdb_csv_headers))
assert(len(self.pfam_csv_fields) == len(self.pfam_csv_headers))
def get_SCOPe_levels(self):
d = {}
results = self.execute_select('SELECT * FROM scop_level ORDER BY id')
for r in results:
d[r['id']] = r['description']
return d
def get_sifts(self, pdb_id, fail_on_error = False, require_uniprot_residue_mapping = False):
try:
pdb_id = pdb_id.lower()
if self.SIFTS.get(pdb_id):
return self.SIFTS[pdb_id]
self.SIFTS[pdb_id] = SIFTS.retrieve(pdb_id, cache_dir = self.cache_dir, acceptable_sequence_percentage_match = 70.0, require_uniprot_residue_mapping = require_uniprot_residue_mapping)
return self.SIFTS[pdb_id]
except Exception as e:
colortext.error('An exception happened retrieving the SIFTS file for %s: "%s". Ignoring this exception and continuing on...' % (pdb_id, str(e)))
colortext.error(traceback.format_exc())
if fail_on_error:
raise
return None
def get_pfam_api(self):
if not(self.pfam_api):
self.pfam_api = Pfam()
return self.pfam_api
def get_basic_pdb_chain_information(self, pdb_id, chain_id):
is_polypeptide, chain_description, resolution = None, None, None
results = self.execute_select('''
SELECT DISTINCT pdb_entry.code, pdb_chain.chain, pdb_chain.is_polypeptide, pdb_entry.description AS ChainDescription, pdb_release.resolution
FROM pdb_chain
INNER JOIN pdb_release ON pdb_release_id = pdb_release.id
INNER JOIN pdb_entry ON pdb_entry_id = pdb_entry.id
WHERE pdb_entry.code=%s AND pdb_chain.chain=%s
ORDER BY pdb_release.revision_date DESC''', parameters = (pdb_id, chain_id))
if results:
is_polypeptide = results[0]['is_polypeptide']
chain_description = results[0]['ChainDescription']
resolution = results[0]['resolution']
return dict(
pdb_id = pdb_id,
chain = chain_id,
is_polypeptide = is_polypeptide,
chain_description = chain_description,
resolution = resolution)
def get_common_fields(self, family_details):
# Get the common SCOPe fields. For the sccs class, we take the longest common prefix
sunid = set([f['sunid'] for f in family_details if f['sunid']]) or None
sccs = set([f['sccs'] for f in family_details if f['sccs']]) or None
sid = set([f['sid'] for f in family_details if f['sid']]) or None
scop_release_id = set([f['scop_release_id'] for f in family_details if f['scop_release_id']]) or None
if sunid:
if len(sunid) > 1:
sunid = None
else:
sunid = sunid.pop()
if sccs:
# take the longest common prefix
sccs = os.path.commonprefix(sccs) or None
if sccs and sccs.endswith('.'):
sccs = sccs[:-1]
if sid:
if len(sid) > 1:
sid = None
else:
sid = sid.pop()
if scop_release_id:
if len(scop_release_id) > 1:
scop_release_id = None
else:
scop_release_id = scop_release_id.pop()
return dict(
sunid = sunid,
sccs = sccs,
sid = sid,
scop_release_id = scop_release_id,
)
def get_common_hierarchy(self, family_details):
d = {}
level = 2
while level < 9:
classification_level = self.levels[level]
family_values = set([f[classification_level] for f in family_details]) # allow null fields - if we get a filled in field for one Pfam accession number and a null field for another then we should discount this field entirely and break out
if len(family_values) == 1:
family_value = family_values.pop()
if family_value == None:
break
else:
d[classification_level] = family_value
else:
break
level += 1
return d
def get_chain_details_by_related_pdb_chains(self, pdb_id, chain_id, pfam_accs):
''' Returns a dict of SCOPe details using info
This returns Pfam-level information for a PDB chain i.e. no details on the protein, species, or domain will be returned.
If there are SCOPe entries for the associated Pfam accession numbers which agree then this function returns
pretty complete information.
'''
if not pfam_accs:
return None
associated_pdb_chains = set()
pfam_api = self.get_pfam_api()
for pfam_acc in pfam_accs:
associated_pdb_chains = associated_pdb_chains.union(pfam_api.get_pdb_chains_from_pfam_accession_number(pfam_acc))
hits = []
#class_count = {}
pfam_scop_mapping = {}
for pdb_chain_pair in associated_pdb_chains:
ass_pdb_id, ass_chain_id = pdb_chain_pair[0], pdb_chain_pair[1]
hit = self.get_chain_details(ass_pdb_id, chain = ass_chain_id, internal_function_call = True, pfam_scop_mapping = pfam_scop_mapping)
if hit and hit.get('chains'):
assert(len(hit['chains']) == 1)
hits.append(hit['chains'][ass_chain_id])
#for k, v in hit.iteritems():
#class_count[v['sccs']] = class_count.get(v['sccs'], 0)
#class_count[v['sccs']] += 1
#print(' %s, %s: %s' % (v['pdb_id'], k, v['sccs']))
#pprint.pprint(class_count)
allowed_scop_domains = list(map(int, list(map(set.intersection, list(pfam_scop_mapping.values())))[0]))
allowed_scop_domains = list(set((allowed_scop_domains or []) + (self.get_sunid_for_pfam_accs(pfam_accs) or [])))
filtered_hits = []
print(pfam_accs)
print(allowed_scop_domains)
print(('%d hits' % len(hits)))
for hit in hits:
domains_to_ignore = []
for k, v in hit['domains'].items():
if v['sunid'] in allowed_scop_domains:
filtered_hits.append(v)
print(('%d filtered_hits' % len(filtered_hits)))
if not filtered_hits:
return None
d = self.get_basic_pdb_chain_information(pdb_id, chain_id)
d.update(self.get_common_fields(filtered_hits))
d.update(dict(
SCOPe_sources = 'Pfam + SCOPe',
SCOPe_search_fields = 'Pfam + link_pdb.pdb_chain_id',
SCOPe_trust_level = 3
))
# Add the lowest common classification over all related Pfam families
for k, v in sorted(self.levels.items()):
d[v] = None
d.update(dict(self.get_common_hierarchy(filtered_hits)))
return d
def get_chain_details_by_pfam(self, pdb_id, chain = None):
''' Returns a dict pdb_id -> chain(s) -> chain and SCOPe details.
This returns Pfam-level information for a PDB chain i.e. no details on the protein, species, or domain will be returned.
If there are SCOPe entries for the associated Pfam accession numbers which agree then this function returns
pretty complete information.
'''
pfam_api = self.get_pfam_api()
if chain:
pfam_accs = pfam_api.get_pfam_accession_numbers_from_pdb_chain(pdb_id, chain)
if pfam_accs:
pfam_accs = {chain : pfam_accs}
else:
pfam_accs = pfam_api.get_pfam_accession_numbers_from_pdb_id(pdb_id)
if not pfam_accs:
# There were no associated Pfam accession numbers so we return
return None
d = {}
for chain_id, pfam_acc_set in pfam_accs.items():
family_details = []
for pfam_accession in pfam_acc_set:
family_details.append(self.get_pfam_details(pfam_accession))
family_details = [f for f in family_details if f]
if not family_details:
if self.fallback_on_failures:
# Fallback - There were no associated SCOPe entries with the associated Pfam accession numbers so we will
# search all PDB chains associated with those Pfam accession numbers instead
d[chain_id] = self.get_chain_details_by_related_pdb_chains(pdb_id, chain_id, pfam_accs.get(chain_id))
else:
d[chain_id] = None
continue
# Get the common SCOPe fields. For the sccs class, we take the longest common prefix
d[chain_id] = self.get_basic_pdb_chain_information(pdb_id, chain_id)
d[chain_id].update(self.get_common_fields(family_details))
d[chain_id].update(dict(
SCOPe_sources = 'Pfam + SCOPe',
SCOPe_search_fields = 'Pfam + link_pfam.pfam_accession',
SCOPe_trust_level = 2
))
# Add the lowest common classification over all related Pfam families
for k, v in sorted(self.levels.items()):
d[chain_id][v] = None
d[chain_id].update(dict(self.get_common_hierarchy(family_details)))
return d
def get_list_of_pdb_chains(self, pdb_id):
results = self.execute_select('''
SELECT DISTINCT pdb_chain.chain, pdb_release.id as release_id
FROM pdb_chain
INNER JOIN pdb_release ON pdb_release_id = pdb_release.id
INNER JOIN pdb_entry ON pdb_entry_id = pdb_entry.id
WHERE pdb_entry.code=%s''', parameters = (pdb_id,))
if results:
max_release_id = max([r['release_id'] for r in results])
return set([r['chain'] for r in results if r['release_id'] == max_release_id])
return None
def get_chain_details(self, pdb_id, chain = None, internal_function_call = False, pfam_scop_mapping = {}):
''' Returns a dict pdb_id -> chain(s) -> chain and SCOPe details.
This is the main function for getting details for a PDB chain. If there is an associated SCOPe entry for this
chain then this function returns the most information.
internal_function_call is used to prevent potential infinite loops
'''
query = '''
SELECT DISTINCT scop_node.id AS scop_node_id, scop_node.*, pdb_entry.code, pdb_chain_id, pdb_chain.chain, pdb_chain.is_polypeptide, pdb_entry.description AS ChainDescription, pdb_release.resolution
FROM `link_pdb`
INNER JOIN scop_node on node_id=scop_node.id
INNER JOIN pdb_chain ON pdb_chain_id = pdb_chain.id
INNER JOIN pdb_release ON pdb_release_id = pdb_release.id
INNER JOIN pdb_entry ON pdb_entry_id = pdb_entry.id
WHERE pdb_entry.code=%s'''
if chain:
query += ' AND pdb_chain.chain=%s'
parameters=(pdb_id, chain)
else:
parameters = (pdb_id, )
query += ' ORDER BY release_id DESC'
results = self.execute_select(query, parameters = parameters)
if not results:
if self.fallback_on_failures and not internal_function_call:
# Fallback - use any Pfam accession numbers associated with the chain to get partial information
# Note: this fallback has another fallback in case none of the Pfam entries exist in SCOPe
searched_deeper = True
return self.get_chain_details_by_pfam(pdb_id, chain)
else:
return None
# I am making the assumption here that sids are consistent through releases i.e. that if d1aqt_1 is used in release
# 3 then it will be used for any other releases where the domain is named
sid_map = {}
for r in results:
sid = r['sid']
c_id = r['chain']
if not(sid_map.get(sid)) or sid_map[sid] == ' ':
sid_map[sid] = c_id
chain_to_sid_map = {}
for k, v in sid_map.items():
chain_to_sid_map[v] = chain_to_sid_map.get(v, set())
chain_to_sid_map[v].add(k)
leaf_node_chains = set()
searched_deeper = False
if pdb_id and chain:
leaf_node_chains.add(chain)
else:
pdb_chain_ids = self.get_list_of_pdb_chains(pdb_id)
if pdb_chain_ids:
leaf_node_chains = pdb_chain_ids
else:
return None
leaf_nodes = {}
for c in leaf_node_chains:
if c in chain_to_sid_map:
for sid in chain_to_sid_map[c]:
leaf_nodes[(c, sid)] = None
# Only consider the most recent records
for r in results:
chain_id = r['chain']
sid = r['sid']
k = (chain_id, sid)
if (not leaf_nodes.get(k)) or (r['release_id'] > leaf_nodes[k]['release_id']):
leaf_nodes[k] = r
# Older revisions of SCOPe have blank chain IDs for some records while newer revisions have the chain ID
# The best solution to avoid redundant results seems to be to remove all blank chain records if at least one
# more recent named chain exists. There could be some nasty cases - we only keep the most recent unnamed chain
# but this may correspond to many chains if the PDB has multiple chains since we only look at the chain ID.
# I think that it should be *unlikely* that we will have much if any bad behavior though.
for k1, v2 in leaf_nodes.items():
if k1[0] == ' ':
release_id_of_blank_record = leaf_nodes[k1]['release_id']
for k2, v2 in leaf_nodes.items():
if k2[0] != ' ':
assert(k2[0].isalpha() and len(k2[0]) == 1)
if v2['release_id'] > release_id_of_blank_record:
del leaf_nodes[k1] # we are modifying a structure while iterating over it but we break immediately afterwards
break
d = {}
for chain_sid_pair, details in leaf_nodes.items():
chain_id = chain_sid_pair[0]
sid = chain_sid_pair[1]
if sid.strip() == '':
colortext.warning('FOUND AN EMPTY SID FIELD')
assert(sid == details['sid'])
# Get the details for all chains
if details:
if d.get('resolution'):
assert(d['resolution'] == details['resolution'])
else:
d['resolution'] = details['resolution']
d['chains'] = d.get('chains', {})
if d['chains'].get(chain_id):
assert(d['chains'][chain_id]['is_polypeptide'] == details['is_polypeptide'])
assert(d['chains'][chain_id]['chain_description'] == details['ChainDescription'])
else:
d['chains'][chain_id] = {}
d['chains'][chain_id]['is_polypeptide'] = details['is_polypeptide']
d['chains'][chain_id]['chain_description'] = details['ChainDescription']
d['chains'][chain_id]['domains'] = d['chains'][chain_id].get('domains', {})
domain_information = dict(
#pdb_id = details['code'],
#chain = details['chain'],
#is_polypeptide = details['is_polypeptide'],
#chain_description = details['ChainDescription'],
sunid = details['sunid'],
sccs = details['sccs'],
sid = details['sid'],
scop_release_id = details['release_id'],
SCOPe_sources = 'SCOPe',
SCOPe_search_fields = 'link_pdb.pdb_chain_id',
SCOPe_trust_level = 1
)
for k, v in sorted(self.levels.items()):
domain_information[v] = None
pfam = None
level, parent_node_id = details['level_id'], details['parent_node_id']
pfam = pfam or self.get_pfam_for_node(details['scop_node_id'])
# Store the top-level description
domain_information[self.levels[level]] = details['description']
# Wind up the level hierarchy and retrieve the descriptions
c = 0
while level > 2:
parent_details = self.execute_select('SELECT * FROM scop_node WHERE id=%s', parameters = (parent_node_id,))
assert(len(parent_details) <= 1)
if parent_details:
parent_details = parent_details[0]
level, parent_node_id = parent_details['level_id'], parent_details['parent_node_id']
pfam = pfam or self.get_pfam_for_node(parent_details['id'])
domain_information[self.levels[level]] = parent_details['description']
else:
break
# This should never trigger but just in case...
c += 1
if c > 20:
raise Exception('There is a logical error in the script or database which may result in an infinite lookup loop.')
domain_information['Pfam'] = pfam
# Fill in the residue range data
domain_information['pdbe_residue_range'] = None
sifts_object = self.get_sifts(pdb_id)
if sifts_object:
colortext.message(pdb_id)
region_mapping = sifts_object.region_mapping
ps_map = sifts_object.pfam_scop_mapping or {}
for k, v in ps_map.items():
pfam_scop_mapping[k] = pfam_scop_mapping.get(k, set())
pfam_scop_mapping[k] = pfam_scop_mapping[k].union(v.get_matches('SCOP'))
residue_ranges = region_mapping.get(chain_id, {}).get('SCOP', {}).get(str(details['sunid']))
if residue_ranges:
residue_ranges = ', '.join(['%d-%d' % (t[0], t[1]) for t in residue_ranges])
domain_information['pdbe_residue_range'] = residue_ranges
d['chains'][chain_id]['domains'][sid] = domain_information
else:
if self.fallback_on_failures and not(internal_function_call) and not(searched_deeper):
fallback_results = self.get_chain_details_by_pfam(pdb_id, chain_id)
if fallback_results and fallback_results.get(chain_id):
domain_information = fallback_results[chain_id]
return d
def get_pfam_for_node(self, scop_node_id):
results = self.execute_select('SELECT pfam_accession FROM link_pfam WHERE node_id=%s', parameters = (scop_node_id,))
if results:
return results[0]['pfam_accession']
return None
def get_sunid_for_pfam_accs(self, pfam_accs):
sunids = set()
for pfam_acc in pfam_accs:
results = self.execute_select('SELECT scop_node.sunid FROM link_pfam INNER JOIN scop_node ON node_id=scop_node.id WHERE pfam_accession=%s', parameters = (pfam_acc,))
if results:
sunids.add(results[0]['sunid'])
return list(sunids) or None
def get_pdb_list_details(self, pdb_ids):
d = {}
for pdb_id in pdb_ids:
results = self.get_chain_details(pdb_id)
d[pdb_id] = results
return d
def get_pdb_list_details_as_table(self, pdb_ids):
t = SCOPeTableCollection(self)
d = self.get_pdb_list_details(list(set(pdb_ids)))
failed_pdb_ids = []
if d:
for pdb_id, pdb_details in sorted(d.items()):
if pdb_details:
for chain_id, chain_details in sorted(pdb_details.items()):
t.add_pdb_line(chain_details)
else:
failed_pdb_ids.append(pdb_ids)
return t
def get_pdb_list_details_as_csv(self, pdb_ids, field_separator = '\t', line_separator = '\n'):
return self.get_details_as_csv(self.get_pdb_list_details_as_table(pdb_ids), field_separator = field_separator, line_separator = line_separator)
def get_details_as_csv(self, tbl, field_separator = '\t', line_separator = '\n'):
return tbl.get_csv_tables(field_separator, line_separator)
def get_pfam_details(self, pfam_accession):
'''Returns a dict pdb_id -> chain(s) -> chain and SCOPe details.'''
results = self.execute_select('''
SELECT DISTINCT scop_node.*, scop_node.release_id AS scop_node_release_id,
pfam.release_id AS pfam_release_id, pfam.name AS pfam_name, pfam.accession, pfam.description AS pfam_description, pfam.length AS pfam_length,
pfam_type.description AS pfam_type_description
FROM `link_pfam`
INNER JOIN scop_node on node_id=scop_node.id
INNER JOIN pfam ON link_pfam.pfam_accession = pfam.accession
INNER JOIN pfam_type ON pfam.pfam_type_id = pfam_type.id
WHERE pfam.accession=%s ORDER BY scop_node.release_id DESC''', parameters = (pfam_accession,))
if not results:
return None
# Only consider the most recent Pfam releases and most recent SCOPe records, giving priority to SCOPe revisions over Pfam revisions
most_recent_record = None
for r in results:
accession = r['accession']
if (not most_recent_record) or (r['scop_node_release_id'] > most_recent_record['scop_node_release_id']):
most_recent_record = r
elif r['pfam_release_id'] > most_recent_record['pfam_release_id']:
most_recent_record = r
d = dict(
pfam_accession = most_recent_record['accession'],
pfam_name = most_recent_record['pfam_name'],
pfam_description = most_recent_record['pfam_description'],
pfam_type_description = most_recent_record['pfam_type_description'],
pfam_length = most_recent_record['pfam_length'],
pfam_release_id = most_recent_record['pfam_release_id'],
sunid = most_recent_record['sunid'],
sccs = most_recent_record['sccs'],
sid = most_recent_record['sid'],
scop_release_id = most_recent_record['scop_node_release_id'],
SCOPe_sources = 'SCOPe',
SCOPe_search_fields = 'link_pfam.pfam_accession',
SCOPe_trust_level = 1
)
for k, v in sorted(self.levels.items()):
d[v] = None
level, parent_node_id = most_recent_record['level_id'], most_recent_record['parent_node_id']
# Store the top-level description
d[self.levels[level]] = most_recent_record['description']
# Wind up the level hierarchy and retrieve the descriptions
c = 0
while level > 2 :
parent_details = self.execute_select('SELECT * FROM scop_node WHERE id=%s', parameters = (parent_node_id,))
assert(len(parent_details) <= 1)
if parent_details:
parent_details = parent_details[0]
level, parent_node_id = parent_details['level_id'], parent_details['parent_node_id']
d[self.levels[level]] = parent_details['description']
else:
break
# This should never trigger but just in case...
c += 1
if c > 20:
raise Exception('There is a logical error in the script or database which may result in an infinite lookup loop.')
assert(d['Protein'] == d['Species'] == d['PDB Entry Domain'] == None)
return d
def get_pfam_list_details(self, pfam_accs):
d = {}
for pfam_accession in pfam_accs:
results = self.get_pfam_details(pfam_accession)
d[pfam_accession] = results
return d
def get_pfam_list_details_as_table(self, pfam_accs):
t = SCOPeTableCollection(self)
d = self.get_pfam_list_details(pfam_accs)
if d:
for pfam_accession, pfam_details in sorted(d.items()):
if pfam_details:
t.add_pfam_line(pfam_details)
return t
def get_pfam_list_details_as_csv(self, pfam_accs, field_separator = '\t', line_separator = '\n'):
#, field_separator = '\t', line_separator = '\n'):
return self.get_details_as_csv(self.get_pfam_list_details_as_table(pfam_accs), field_separator = field_separator, line_separator = line_separator)
def determine_SCOPe_class_of_pdb_residue(self, pdb_id, pdb_chain_id, pdb_residue_id):
sifts_object = self.get_sifts(pdb_id, fail_on_error = True, require_uniprot_residue_mapping = False)
scop_class = None
if sifts_object:
PDBeResidueID = sifts_object.atom_to_seqres_sequence_maps.get(pdb_chain_id, {}).get(PDB.ChainResidueID2String(pdb_chain_id, pdb_residue_id))
if PDBeResidueID:
scop_hits = set()
scop_regions = sifts_object.region_mapping.get(pdb_chain_id, {}).get('SCOP')
if scop_regions:
for sunid, ranges in scop_regions.items():
for r in ranges:
assert(r[0] <= r[1])
if r[0] <= PDBeResidueID <= r[1]:
scop_hits.add(sunid)
if scop_hits:
assert(len(scop_hits) == 1)
scop_class = self.execute_select('SELECT sccs FROM scop_node WHERE sunid=%s', parameters = (scop_hits.pop(),))
if scop_class:
scop_class = scop_class[0]['sccs']
return scop_class
def determine_Pfam_class_of_pdb_residue(self, pdb_id, pdb_chain_id, pdb_residue_id):
sifts_object = self.get_sifts(pdb_id, fail_on_error = True, require_uniprot_residue_mapping = False)
pfam_accs = []
if sifts_object:
PDBeResidueID = sifts_object.atom_to_seqres_sequence_maps.get(pdb_chain_id, {}).get(PDB.ChainResidueID2String(pdb_chain_id, pdb_residue_id))
if PDBeResidueID:
pfam_hits = set()
pfam_regions = sifts_object.region_mapping.get(pdb_chain_id, {}).get('Pfam')
if pfam_regions:
for pfam_acc, ranges in pfam_regions.items():
for r in ranges:
assert(r[0] <= r[1])
if r[0] <= PDBeResidueID <= r[1]:
pfam_hits.add(pfam_acc)
if pfam_hits:
pfam_accs = sorted(pfam_hits)
return pfam_accs
|
class SCOPeDatabase(DatabaseInterface):
def __init__(self, passwd = None, username = 'anonymous', use_utf=False, fallback_on_failures = True, cache_dir = '/kortemmelab/data/oconchus/SIFTS'):
pass
def get_SCOPe_levels(self):
pass
def get_sifts(self, pdb_id, fail_on_error = False, require_uniprot_residue_mapping = False):
pass
def get_pfam_api(self):
pass
def get_basic_pdb_chain_information(self, pdb_id, chain_id):
pass
def get_common_fields(self, family_details):
pass
def get_common_hierarchy(self, family_details):
pass
def get_chain_details_by_related_pdb_chains(self, pdb_id, chain_id, pfam_accs):
''' Returns a dict of SCOPe details using info
This returns Pfam-level information for a PDB chain i.e. no details on the protein, species, or domain will be returned.
If there are SCOPe entries for the associated Pfam accession numbers which agree then this function returns
pretty complete information.
'''
pass
def get_chain_details_by_pfam(self, pdb_id, chain = None):
''' Returns a dict pdb_id -> chain(s) -> chain and SCOPe details.
This returns Pfam-level information for a PDB chain i.e. no details on the protein, species, or domain will be returned.
If there are SCOPe entries for the associated Pfam accession numbers which agree then this function returns
pretty complete information.
'''
pass
def get_list_of_pdb_chains(self, pdb_id):
pass
def get_chain_details_by_related_pdb_chains(self, pdb_id, chain_id, pfam_accs):
''' Returns a dict pdb_id -> chain(s) -> chain and SCOPe details.
This is the main function for getting details for a PDB chain. If there is an associated SCOPe entry for this
chain then this function returns the most information.
internal_function_call is used to prevent potential infinite loops
'''
pass
def get_pfam_for_node(self, scop_node_id):
pass
def get_sunid_for_pfam_accs(self, pfam_accs):
pass
def get_pdb_list_details(self, pdb_ids):
pass
def get_pdb_list_details_as_table(self, pdb_ids):
pass
def get_pdb_list_details_as_csv(self, pdb_ids, field_separator = '\t', line_separator = '\n'):
pass
def get_details_as_csv(self, tbl, field_separator = '\t', line_separator = '\n'):
pass
def get_pfam_details(self, pfam_accession):
'''Returns a dict pdb_id -> chain(s) -> chain and SCOPe details.'''
pass
def get_pfam_list_details(self, pfam_accs):
pass
def get_pfam_list_details_as_table(self, pfam_accs):
pass
def get_pfam_list_details_as_csv(self, pfam_accs, field_separator = '\t', line_separator = '\n'):
pass
def determine_SCOPe_class_of_pdb_residue(self, pdb_id, pdb_chain_id, pdb_residue_id):
pass
def determine_Pfam_class_of_pdb_residue(self, pdb_id, pdb_chain_id, pdb_residue_id):
pass
| 24 | 4 | 26 | 2 | 21 | 3 | 5 | 0.12 | 1 | 11 | 3 | 0 | 23 | 9 | 23 | 51 | 636 | 97 | 484 | 143 | 460 | 59 | 383 | 142 | 359 | 33 | 2 | 6 | 125 |
143,565 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/scop.py
|
klab.bio.scop.SCOPeTableCollection
|
class SCOPeTableCollection(object):
def __init__(self, SCOPe_database):
self.SCOPe_database = SCOPe_database
self.pdb_table = []
self.pfam_table = []
def __add__(self, other):
new_t = SCOPeTableCollection(self.SCOPe_database)
new_t.pdb_table = self.pdb_table + other.pdb_table
new_t.pfam_table = self.pfam_table + other.pfam_table
return new_t
def add_pdb_line(self, details):
if details:
self.pdb_table.append([str(details[f] or '') for f in self.SCOPe_database.pdb_csv_fields])
def add_pfam_line(self, details):
if details:
self.pfam_table.append([str(details[f] or '') for f in self.SCOPe_database.pfam_csv_fields])
def get_csv_tables(self, field_separator = '\t', line_separator = '\n'):
d = dict.fromkeys(['PDB', 'Pfam'], None)
if self.pfam_table:
d['Pfam'] = line_separator.join([field_separator.join(l) for l in [self.SCOPe_database.pfam_csv_headers] + self.pfam_table])
if self.pdb_table:
d['PDB'] = line_separator.join([field_separator.join(l) for l in [self.SCOPe_database.pdb_csv_headers] + self.pdb_table])
return d
def get_tables(self):
d = dict.fromkeys(['PDB', 'Pfam'], None)
if self.pfam_table:
d['Pfam'] = [self.SCOPe_database.pfam_csv_headers] + self.pfam_table
if self.pdb_table:
d['PDB'] = [self.SCOPe_database.pdb_csv_headers] + self.pdb_table
return d
|
class SCOPeTableCollection(object):
def __init__(self, SCOPe_database):
pass
def __add__(self, other):
pass
def add_pdb_line(self, details):
pass
def add_pfam_line(self, details):
pass
def get_csv_tables(self, field_separator = '\t', line_separator = '\n'):
pass
def get_tables(self):
pass
| 7 | 0 | 5 | 0 | 5 | 0 | 2 | 0 | 1 | 2 | 0 | 0 | 6 | 3 | 6 | 6 | 36 | 6 | 30 | 13 | 23 | 0 | 30 | 13 | 23 | 3 | 1 | 1 | 12 |
143,566 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/sifts.py
|
klab.bio.sifts.BadSIFTSMapping
|
class BadSIFTSMapping(Exception): pass
|
class BadSIFTSMapping(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,567 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/sifts.py
|
klab.bio.sifts.DomainMatch
|
class DomainMatch(object):
def __init__(self, domain_accession, domain_type):
self.domain_accession = domain_accession
self.domain_type = domain_type
self.matches = {}
def add(self, domain_accession, domain_type, match_quality):
'''match_quality should be a value between 0 and 1.'''
self.matches[domain_type] = self.matches.get(domain_type, {})
self.matches[domain_type][domain_accession] = match_quality
def get_matches(self, domain_type):
return set(self.matches.get(domain_type, {}).keys())
def to_dict(self):
d = {}
for other_domain_type, v in sorted(self.matches.items()):
for domain_accession, match_quality in sorted(v.items()):
d[other_domain_type] = d.get(other_domain_type, set())
d[other_domain_type].add(domain_accession)
return {self.domain_accession : d}
def __repr__(self):
s = ''
for other_domain_type, v in sorted(self.matches.items()):
s += '%s -> %s\n' % (self.domain_type, other_domain_type)
for domain_accession, match_quality in sorted(v.items()):
s += ' %s -> %s: matched at %0.2f\n' % (self.domain_accession, domain_accession, match_quality)
return s
|
class DomainMatch(object):
def __init__(self, domain_accession, domain_type):
pass
def add(self, domain_accession, domain_type, match_quality):
'''match_quality should be a value between 0 and 1.'''
pass
def get_matches(self, domain_type):
pass
def to_dict(self):
pass
def __repr__(self):
pass
| 6 | 1 | 5 | 0 | 5 | 0 | 2 | 0.04 | 1 | 1 | 0 | 0 | 5 | 3 | 5 | 5 | 34 | 9 | 24 | 15 | 18 | 1 | 24 | 15 | 18 | 3 | 1 | 2 | 9 |
143,568 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/sifts.py
|
klab.bio.sifts.MissingSIFTSRecord
|
class MissingSIFTSRecord(Exception): pass
|
class MissingSIFTSRecord(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,569 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/sifts.py
|
klab.bio.sifts.SIFTS
|
class SIFTS(xml.sax.handler.ContentHandler):
def __init__(self, xml_contents, pdb_contents, acceptable_sequence_percentage_match = 70.0, cache_dir = None, domain_overlap_cutoff = 0.88, require_uniprot_residue_mapping = True, bio_cache = None, pdb_id = None):
''' The PDB contents should be passed so that we can deal with HETATM records as the XML does not contain the necessary information.
If require_uniprot_residue_mapping is set and there is no PDB residue -> UniProt sequence index mapping (e.g. 2IMM at the time of writing) then we raise an exception.
Otherwise, we store the information we can which can still be useful e.g. SCOP domain data.
bio_cache should be a klab.bio.cache.py::BioCache object and is used to avoid reading/downloading cached files repeatedly.
'''
self.atom_to_uniparc_sequence_maps = {} # PDB Chain -> PDBUniParcSequenceMap(PDB ResidueID -> (UniParc ID, UniParc sequence index)) where the UniParc sequence index is 1-based (first element has index 1)
# Note: These maps map from PDB residue IDs to PDBe residue IDs
self.atom_to_seqres_sequence_maps = {} # PDB Chain -> SequenceMap(PDB ResidueID -> SEQRES sequence index) where the SEQRES sequence index is 1-based (first element has index 1)
self.seqres_to_uniparc_sequence_maps = {} # PDB Chain -> PDBUniParcSequenceMap(SEQRES index -> (UniParc ID, UniParc sequence index)) where the SEQRES index and UniParc sequence index is 1-based (first element has index 1)
self.counters = {}
self.pdb_id = pdb_id
self.bio_cache = bio_cache
self.acceptable_sequence_percentage_match = acceptable_sequence_percentage_match
self.tag_data = []
self.cache_dir = cache_dir
self.uniparc_sequences = {}
self.uniparc_objects = {}
self.pdb_chain_to_uniparc_id_map = {}
self.region_mapping = {}
self.region_map_coordinate_systems = {}
self.domain_overlap_cutoff = domain_overlap_cutoff # the percentage (measured in the range [0, 1.0]) at which we consider two domains to be the same e.g. if a Pfam domain of length 60 overlaps with a SCOP domain on 54 residues then the overlap would be 54/60 = 0.9
self.require_uniprot_residue_mapping = require_uniprot_residue_mapping
self.xml_contents = xml_contents
if bio_cache and pdb_id:
self.modified_residues = bio_cache.get_pdb_object(pdb_id).modified_residues
else:
self.modified_residues = PDB(pdb_contents).modified_residues
self._STACK = [] # This is used to create a simple FSA for the parsing
self.current_residue = None
self.residues = []
self.reading_unobserved_property = False
self.uniparc_ids = set()
assert(0 <= acceptable_sequence_percentage_match <= 100)
assert(xml_contents.find("encoding='UTF-8'") != -1)
def get_pdb_chain_to_uniparc_id_map(self):
if self.pdb_chain_to_uniparc_id_map:
return self.pdb_chain_to_uniparc_id_map
else:
self.pdb_chain_to_uniparc_id_map = {}
for c, mp in self.atom_to_uniparc_sequence_maps.items():
self.pdb_chain_to_uniparc_id_map[c] = self.pdb_chain_to_uniparc_id_map.get(c, set())
for _, v, _ in mp:
self.pdb_chain_to_uniparc_id_map[c].add(v[0])
for c, mp in self.seqres_to_uniparc_sequence_maps.items():
self.pdb_chain_to_uniparc_id_map[c] = self.pdb_chain_to_uniparc_id_map.get(c, set())
for _, v, _ in mp:
self.pdb_chain_to_uniparc_id_map[c].add(v[0])
for c, s in self.pdb_chain_to_uniparc_id_map.items():
self.pdb_chain_to_uniparc_id_map[c] = sorted(s)
return self.pdb_chain_to_uniparc_id_map
def get_uniparc_sequences(self):
if self.uniparc_sequences:
return self.uniparc_sequences
else:
self.uniparc_sequences = {}
self.uniparc_objects = {}
for UniParcID in self.uniparc_ids:
entry = UniParcEntry(UniParcID, cache_dir = self.cache_dir)
self.uniparc_sequences[entry.UniParcID] = Sequence.from_sequence(entry.UniParcID, entry.sequence)
self.uniparc_objects[entry.UniParcID] = entry
return self.uniparc_sequences
@staticmethod
def retrieve(pdb_id, cache_dir = None, acceptable_sequence_percentage_match = 70.0, require_uniprot_residue_mapping = True, bio_cache = None):
'''Creates a PDBML object by using a cached copy of the files if they exists or by retrieving the files from the RCSB.
bio_cache should be a klab.bio.cache.py::BioCache object and is used to avoid reading/downloading cached files repeatedly.
'''
pdb_contents = None
xml_contents = None
pdb_id = pdb_id.upper()
l_pdb_id = pdb_id.lower()
if len(pdb_id) != 4 or not pdb_id.isalnum():
raise Exception("Bad PDB identifier '%s'." % pdb_id)
if bio_cache:
pdb_contents = bio_cache.get_pdb_contents(pdb_id)
xml_contents = bio_cache.get_sifts_xml_contents(pdb_id)
if cache_dir:
if not pdb_contents:
# Check to see whether we have a cached copy of the PDB file
filename = os.path.join(cache_dir, "%s.pdb" % pdb_id)
if os.path.exists(filename):
pdb_contents = read_file(filename)
if not xml_contents:
# Check to see whether we have a cached copy of the XML file
filename = os.path.join(cache_dir, "%s.sifts.xml.gz" % l_pdb_id)
if os.path.exists(filename):
xml_contents = read_file(filename)
# Get any missing files from the RCSB and create cached copies if appropriate
if not pdb_contents:
pdb_contents = rcsb.retrieve_pdb(pdb_id)
if cache_dir:
write_file(os.path.join(cache_dir, "%s.pdb" % pdb_id), pdb_contents)
if not xml_contents:
try:
xml_contents = retrieve_xml(pdb_id, silent = False)
if cache_dir:
write_file(os.path.join(cache_dir, "%s.sifts.xml.gz" % l_pdb_id), xml_contents)
except FTPException550:
raise MissingSIFTSRecord('The file "%s.sifts.xml.gz" could not be found on the EBI FTP server.' % l_pdb_id)
xml_contents = xml_contents
# Return the object
handler = SIFTS(xml_contents, pdb_contents, acceptable_sequence_percentage_match = acceptable_sequence_percentage_match, cache_dir = cache_dir, require_uniprot_residue_mapping = require_uniprot_residue_mapping, bio_cache = bio_cache, pdb_id = pdb_id)
xml.sax.parseString(xml_contents, handler)
return handler
def stack_push(self, lvl, data):
if lvl == 0:
assert(not(self._STACK))
else:
assert(self._STACK and (len(self._STACK) == lvl))
for x in range(lvl):
assert(self._STACK[x][0] == x)
self._STACK.append((lvl, data))
def stack_pop(self, lvl):
num_levels = lvl + 1
assert(self._STACK and (len(self._STACK) == num_levels))
for x in range(num_levels):
assert(self._STACK[x][0] == x)
self._STACK.pop()
if lvl == 0:
assert(not(self._STACK))
def check_stack(self, lvl):
assert(self._STACK and (len(self._STACK) == lvl))
for x in range(lvl):
assert(self._STACK[x][0] == x)
def start_document(self):
'''"The SAX parser will invoke this method only once, before any other methods in this interface or in DTDHandler (except for setDocumentLocator())."'''
pass
a='''
<entity type="protein" entityId="A">
<segment segId="1aqt_A_1_2" start="1" end="2">
<listResidue>
<listMapRegion>
<mapRegion start="3" end="138">
<db dbSource="PDB" dbCoordSys="PDBresnum" dbAccessionId="1aqt" dbChainId="A" start="3" end="138"/>
</mapRegion>'''
def add_region_mapping(self, attributes):
chain_id = (self._get_current_PDBe_chain())
mapRegion_attributes = self._STACK[3][1]
segment_range = (int(mapRegion_attributes['start']), int(mapRegion_attributes['end']))
dbSource = attributes['dbSource']
dbAccessionId = attributes['dbAccessionId']
self.region_mapping[chain_id] = self.region_mapping.get(chain_id, {})
self.region_mapping[chain_id][dbSource] = self.region_mapping[chain_id].get(dbSource, {})
self.region_mapping[chain_id][dbSource][dbAccessionId] = self.region_mapping[chain_id][dbSource].get(dbAccessionId, [])
self.region_mapping[chain_id][dbSource][dbAccessionId].append(segment_range)
# Note: I do not currently store the coordinate system type on a range level since I am assuming that each mapping uses one coordinate system
if attributes.get('dbCoordSys'):
self.region_map_coordinate_systems[dbSource] = self.region_map_coordinate_systems.get(dbSource, set())
self.region_map_coordinate_systems[dbSource].add(attributes['dbCoordSys'])
def start_element(self, name, attributes):
self.tag_data = ''
# Residue details and mappings
if name == 'crossRefDb':
self.start_crossRefDb(attributes)
elif name == 'residueDetail':
self.stack_push(4, None)
self.start_residueDetail(attributes)
elif name == 'residue':
self.stack_push(3, None)
assert(attributes.get('dbSource'))
assert(attributes.get('dbCoordSys'))
assert(attributes.get('dbResNum'))
assert(attributes.get('dbResName'))
assert(attributes['dbSource'] == 'PDBe')
assert(attributes['dbCoordSys'] == 'PDBe')
self.current_residue = SIFTSResidue(self._get_current_PDBe_chain(), attributes['dbResNum'], attributes['dbResName'])
elif name == 'listResidue':
self.stack_push(2, None)
# Region mappings
elif name == 'db':
if len(self._STACK) == 4 and self._STACK[3][1].get('nodeType') == 'mapRegion':
assert(attributes.get('dbSource'))
assert(attributes.get('dbAccessionId'))
self.add_region_mapping(attributes)
elif name == 'mapRegion':
assert(attributes.get('start'))
assert(attributes.get('end'))
self.stack_push(3, dict(start=attributes['start'], end=attributes['end'], nodeType = 'mapRegion'))
elif name == 'listMapRegion':
self.stack_push(2, None)
# Entities and segments
elif name == 'segment':
assert(attributes.get('segId'))
assert(attributes.get('start'))
assert(attributes.get('end'))
self.stack_push(1, dict(segId=attributes['segId'], start=attributes['start'], end=attributes['end']))
elif name == 'entity':
assert(attributes.get('type'))
entityId = None
if attributes['type'] == 'protein':
entityId = attributes.get('entityId')
self.stack_push(0, entityId)
elif name == 'entry':
self.counters['entry'] = self.counters.get('entry', 0) + 1
self.parse_header(attributes)
def parse_header(self, attributes):
if attributes.get('dbAccessionId'):
pdb_id = attributes.get('dbAccessionId').upper()
if self.pdb_id:
assert(self.pdb_id.upper() == pdb_id)
self.pdb_id = pdb_id
else:
raise Exception('Could not verify the PDB ID from the <entry> tag.')
def start_residueDetail(self, attributes):
self.check_stack(5)
self.reading_unobserved_property = False
dbSource = attributes.get('dbSource')
assert(dbSource)
if dbSource == 'PDBe':
residue_detail_property = attributes.get('property')
if residue_detail_property and residue_detail_property == 'Annotation':
self.reading_unobserved_property = True
def start_crossRefDb(self, attributes):
self.check_stack(4)
dbSource = attributes.get('dbSource')
assert(dbSource)
if dbSource == 'PDB' or dbSource == 'UniProt':
current_residue = self.current_residue
dbCoordSys = attributes.get('dbCoordSys')
dbAccessionId = attributes.get('dbAccessionId')
dbResNum = attributes.get('dbResNum')
dbResName = attributes.get('dbResName')
if dbSource == 'PDB':
dbChainId = attributes.get('dbChainId')
assert(dbCoordSys == "PDBresnum")
assert(dbAccessionId.upper() == self.pdb_id.upper())
#assert(dbChainId == self._STACK[0][1]) # this is not always true e.g. 1lmb has entityId="C" but dbChainId="3"
if not dbChainId == self._STACK[0][1]: # use the dbChainId chain ID since that is what is used in the logic later on. Note: this may introduce bugs if the dbChainIds differ amongst themselves
self._STACK[0] = (0, dbChainId)
assert(dbCoordSys and dbAccessionId and dbResNum and dbResName and dbChainId )
current_residue.add_pdb_residue(dbChainId, dbResNum, dbResName)
elif dbSource == 'UniProt':
assert(dbCoordSys and dbAccessionId and dbResNum and dbResName)
assert(dbCoordSys == "UniProt")
assert(dbCoordSys and dbAccessionId and dbResNum and dbResName)
current_residue.add_uniprot_residue(dbAccessionId, dbResNum, dbResName)
def _get_current_PDBe_chain(self):
return self._STACK[0][1]
def _get_current_segment_range(self):
return (self._STACK[1][1]['start'], self._STACK[1][1]['end'])
def end_element(self, name):
tag_content = self.tag_data
# Residue details and mappings
if name == 'residueDetail':
self.stack_pop(4)
if self.reading_unobserved_property and (tag_content == 'Not_Observed'):
self.current_residue.WasNotObserved = True
self.reading_unobserved_property = False
elif name == 'residue':
self.stack_pop(3)
current_residue = self.current_residue
#assert(self._get_current_PDBe_chain() == current_residue.PDBChainID) # this is not always true e.g. 1lmb has entityId="C" but dbChainId="3"
self.residues.append(current_residue)
self.current_residue = None
elif name == 'listResidue':
self.stack_pop(2)
# Region mappings
elif name == 'mapRegion':
self.stack_pop(3)
elif name == 'listMapRegion':
self.stack_pop(2)
# Entities and segments
elif name == 'segment':
self.stack_pop(1)
elif name == 'entity':
self.stack_pop(0)
def end_document(self):
assert(self.counters['entry'] == 1)
residue_count = 0
residues_matched = {}
residues_encountered = set()
atom_to_uniparc_residue_map = {}
atom_to_seqres_residue_map = {}
seqres_to_uniparc_residue_map = {}
UniProtACs = set()
for r in self.residues:
if r.UniProtAC:
UniProtACs.add(r.UniProtAC)
ACC_to_UPARC_mapping = uniprot_map('ACC', 'UPARC', list(UniProtACs), cache_dir = self.cache_dir)
assert(sorted(ACC_to_UPARC_mapping.keys()) == sorted(list(UniProtACs)))
for k, v in ACC_to_UPARC_mapping.items():
assert(len(v) == 1)
ACC_to_UPARC_mapping[k] = v[0]
map_chains = set()
for r in self.residues:
if not(r.PDBResidueID.isalnum() and int(r.PDBResidueID.isalnum()) < 0):
# These are not valid PDB residue IDs - the SIFTS XML convention sometimes assigns negative residue IDs to unobserved residues before the first ATOM record
# (only if the first residue ID is 1?)
pass
# Store the PDB->UniProt mapping
if r.has_pdb_to_uniprot_mapping():
UniProtAC = r.UniProtAC
UniParcID = ACC_to_UPARC_mapping[UniProtAC]
self.uniparc_ids.add(UniParcID)
full_pdb_residue_ID = r.get_pdb_residue_id()
PDBChainID = r.PDBChainID
map_chains.add(PDBChainID)
residues_matched[PDBChainID] = residues_matched.get(PDBChainID, 0)
if not r.WasNotObserved:
# Do not add ATOM mappings when the ATOM data does not exist
if r.has_pdb_to_uniprot_mapping():
atom_to_uniparc_residue_map[PDBChainID] = atom_to_uniparc_residue_map.get(PDBChainID, {})
atom_to_uniparc_residue_map[PDBChainID][full_pdb_residue_ID] = (UniParcID, r.UniProtResidueIndex)
atom_to_seqres_residue_map[PDBChainID] = atom_to_seqres_residue_map.get(PDBChainID, {})
atom_to_seqres_residue_map[PDBChainID][full_pdb_residue_ID] = r.PDBeResidueID
if r.has_pdb_to_uniprot_mapping():
seqres_to_uniparc_residue_map[PDBChainID] = seqres_to_uniparc_residue_map.get(PDBChainID, {})
seqres_to_uniparc_residue_map[PDBChainID][r.PDBeResidueID] = (UniParcID, r.UniProtResidueIndex)
# Make sure we only have at most one match per PDB residue
assert(full_pdb_residue_ID not in residues_encountered)
residues_encountered.add(full_pdb_residue_ID)
# Count the number of exact sequence matches
PDBResidue3AA = r.PDBResidue3AA
pdb_residue_type = residue_type_3to1_map.get(PDBResidue3AA) or self.modified_residues.get(PDBResidue3AA) or protonated_residue_type_3to1_map.get(PDBResidue3AA) or non_canonical_amino_acids.get(PDBResidue3AA)
if r.has_pdb_to_uniprot_mapping():
if pdb_residue_type == r.UniProtResidue1AA:
residues_matched[PDBChainID] += 1
residue_count += 1
# Create the SequenceMaps
for c in map_chains:
if residues_matched[c] > 0:
# 1IR3 has chains A,
# Chain A has mappings from atom and seqres (PDBe) residues to UniParc as usual
# Chain B (18 residues long) has mappings from atom to seqres residues but not to UniParc residues
self.atom_to_uniparc_sequence_maps[c] = PDBUniParcSequenceMap.from_dict(atom_to_uniparc_residue_map[c])
self.seqres_to_uniparc_sequence_maps[c] = PDBUniParcSequenceMap.from_dict(seqres_to_uniparc_residue_map[c])
self.atom_to_seqres_sequence_maps[c] = SequenceMap.from_dict(atom_to_seqres_residue_map[c])
# Check the match percentage
total_residues_matched = sum([residues_matched[c] for c in list(residues_matched.keys())])
if total_residues_matched == 0:
if self.pdb_id and self.pdb_id in NoSIFTSPDBUniParcMappingCases:
if self.require_uniprot_residue_mapping:
raise NoSIFTSPDBUniParcMapping('The PDB file %s has a bad or missing SIFTS mapping at the time of writing.' % self.pdb_id)
else:
colortext.error('Warning: The PDB file %s has a a bad or missing SIFTS mapping at the time of writing so there is no PDB -> UniProt residue mapping.' % self.pdb_id)
else:
if self.require_uniprot_residue_mapping:
raise Exception('No residue information matching PDB residues to UniProt residues was found.')
else:
colortext.error('Warning: No residue information matching PDB residues to UniProt residues was found.')
else:
percentage_matched = float(total_residues_matched)*100.0/float(residue_count)
if percentage_matched < self.acceptable_sequence_percentage_match:
if self.pdb_id and self.pdb_id in BadSIFTSMappingCases:
raise BadSIFTSMapping('The PDB file %s has a known bad SIFTS mapping at the time of writing.' % self.pdb_id)
else:
raise Exception('Expected %.2f%% sequence match on matched residues but the SIFTS results only gave us %.2f%%.' % (self.acceptable_sequence_percentage_match, percentage_matched))
# Merge the ranges for the region mappings i.e. so [1-3],[3-86] becomes [1-86]
region_mapping = self.region_mapping
for chain_id, chain_details in region_mapping.items():
for dbSource, source_details in chain_details.items():
for dbAccessionId, range_list in source_details.items():
source_details[dbAccessionId] = merge_range_pairs(range_list)
# Check to see if the expected numbering schemes hold
for k, v in expected_residue_numbering_schemes.items():
if self.region_map_coordinate_systems.get(k):
assert(self.region_map_coordinate_systems[k] == set([v]))
pfam_scop_mapping = {}
scop_pfam_mapping = {}
for chain_id, chain_details in region_mapping.items():
if chain_details.get('Pfam') and chain_details.get('SCOP'):
for pfamAccessionId, pfam_range_lists in chain_details['Pfam'].items():
pfam_residues = parse_range(','.join(['%d-%d' % (r[0], r[1]) for r in pfam_range_lists]))
for scopAccessionId, scop_range_lists in chain_details['SCOP'].items():
scop_residues = parse_range(','.join(['%d-%d' % (r[0], r[1]) for r in scop_range_lists]))
num_same_residues = len(set(pfam_residues).intersection(set(scop_residues)))
if num_same_residues > 10:
Pfam_match_quality = float(num_same_residues) / float(len(pfam_residues))
SCOP_match_quality = float(num_same_residues) / float(len(scop_residues))
if (Pfam_match_quality >= self.domain_overlap_cutoff) or (SCOP_match_quality >= self.domain_overlap_cutoff):
pfam_scop_mapping[pfamAccessionId] = pfam_scop_mapping.get(pfamAccessionId, DomainMatch(pfamAccessionId, 'Pfam'))
pfam_scop_mapping[pfamAccessionId].add(scopAccessionId, 'SCOP', SCOP_match_quality)
scop_pfam_mapping[scopAccessionId] = scop_pfam_mapping.get(scopAccessionId, DomainMatch(scopAccessionId, 'SCOP'))
scop_pfam_mapping[scopAccessionId].add(pfamAccessionId, 'Pfam', Pfam_match_quality)
self.pfam_scop_mapping = pfam_scop_mapping
self.scop_pfam_mapping = scop_pfam_mapping
self._validate()
def _validate(self):
'''Tests that the maps agree through composition.'''
# I used to use the assertion "self.atom_to_uniparc_sequence_maps.keys() == self.atom_to_seqres_sequence_maps.keys() == self.seqres_to_uniparc_sequence_maps.keys()"
# but that failed for 2IMM where "self.atom_to_uniparc_sequence_maps.keys() == self.seqres_to_uniparc_sequence_maps.keys() == []" but THAT fails for 1IR3 so I removed
# the assertions entirely.
for c, m in self.atom_to_seqres_sequence_maps.items():
if list(self.seqres_to_uniparc_sequence_maps.keys()):
atom_uniparc_keys = set(self.atom_to_uniparc_sequence_maps.get(c, {}).keys())
atom_seqres_keys = set(self.atom_to_seqres_sequence_maps.get(c, {}).keys())
assert(atom_uniparc_keys.intersection(atom_seqres_keys) == atom_uniparc_keys)
for k, v in m.map.items():
uparc_id_1, uparc_id_2 = None, None
try:
uparc_id_1 = self.seqres_to_uniparc_sequence_maps[c].map[v]
uparc_id_2 = self.atom_to_uniparc_sequence_maps[c].map[k]
except:
continue
assert(uparc_id_1 == uparc_id_2)
def characters(self, chrs):
self.tag_data += chrs
startDocument = start_document
endDocument = end_document
startElement = start_element
endElement = end_element
|
class SIFTS(xml.sax.handler.ContentHandler):
def __init__(self, xml_contents, pdb_contents, acceptable_sequence_percentage_match = 70.0, cache_dir = None, domain_overlap_cutoff = 0.88, require_uniprot_residue_mapping = True, bio_cache = None, pdb_id = None):
''' The PDB contents should be passed so that we can deal with HETATM records as the XML does not contain the necessary information.
If require_uniprot_residue_mapping is set and there is no PDB residue -> UniProt sequence index mapping (e.g. 2IMM at the time of writing) then we raise an exception.
Otherwise, we store the information we can which can still be useful e.g. SCOP domain data.
bio_cache should be a klab.bio.cache.py::BioCache object and is used to avoid reading/downloading cached files repeatedly.
'''
pass
def get_pdb_chain_to_uniparc_id_map(self):
pass
def get_uniparc_sequences(self):
pass
@staticmethod
def retrieve(pdb_id, cache_dir = None, acceptable_sequence_percentage_match = 70.0, require_uniprot_residue_mapping = True, bio_cache = None):
'''Creates a PDBML object by using a cached copy of the files if they exists or by retrieving the files from the RCSB.
bio_cache should be a klab.bio.cache.py::BioCache object and is used to avoid reading/downloading cached files repeatedly.
'''
pass
def stack_push(self, lvl, data):
pass
def stack_pop(self, lvl):
pass
def check_stack(self, lvl):
pass
def start_document(self):
'''"The SAX parser will invoke this method only once, before any other methods in this interface or in DTDHandler (except for setDocumentLocator())."'''
pass
def add_region_mapping(self, attributes):
pass
def start_element(self, name, attributes):
pass
def parse_header(self, attributes):
pass
def start_residueDetail(self, attributes):
pass
def start_crossRefDb(self, attributes):
pass
def _get_current_PDBe_chain(self):
pass
def _get_current_segment_range(self):
pass
def end_element(self, name):
pass
def end_document(self):
pass
def _validate(self):
'''Tests that the maps agree through composition.'''
pass
def characters(self, chrs):
pass
| 21 | 4 | 24 | 4 | 18 | 2 | 6 | 0.13 | 1 | 18 | 11 | 0 | 18 | 25 | 19 | 19 | 514 | 112 | 362 | 121 | 341 | 46 | 328 | 120 | 308 | 31 | 1 | 6 | 108 |
143,570 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/cluster/cluster_interface.py
|
klab.cluster.cluster_interface.JobInitializationException
|
class JobInitializationException(Exception): pass
|
class JobInitializationException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,571 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/cluster/find_owned_files.py
|
klab.cluster.find_owned_files.Reporter
|
class Reporter:
def __init__(self,task):
self.start=time.time()
self.lastreport=self.start
self.task=task
self.a=0
self.b=0
print('Starting '+task)
def report(self,a,b=0):
t=time.time()
self.a=a
self.b=b
if self.lastreport<(t-report_interval):
self.lastreport=t
self.output_report()
def output_report(self):
if self.b==0:
sys.stdout.write(" Processed files: %d\r"%(self.a))
else:
sys.stdout.write(" Found %d files owned by user out of %d files total\r"%(self.a,self.b))
sys.stdout.flush()
def done(self):
self.output_report()
sys.stdout.write("\n")
print('Done %s, took %.3f seconds\n' % (self.task,time.time()-self.start))
|
class Reporter:
def __init__(self,task):
pass
def report(self,a,b=0):
pass
def output_report(self):
pass
def done(self):
pass
| 5 | 0 | 6 | 0 | 6 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 4 | 5 | 4 | 4 | 25 | 0 | 25 | 11 | 20 | 0 | 24 | 11 | 19 | 2 | 0 | 1 | 6 |
143,572 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/cluster/resubmit_eqw_jobs.py
|
klab.cluster.resubmit_eqw_jobs.qmod
|
class qmod(object):
'''This should be moved into a submodule and the functions here turned into module functions.'''
@staticmethod
def cj(job_ids):
'''Simple implementation where joblist is expected to be a list of integers (job ids). The full grammar for this command allows more granular control.'''
for job_id in job_ids:
job_id_types = set(map(type, job_ids))
assert(len(job_id_types) == 1 and type(1) == job_id_types.pop())
args = shlex.split('qmod -cj {0}'.format(job_id))
subprocess.call(args, shell=False)
|
class qmod(object):
'''This should be moved into a submodule and the functions here turned into module functions.'''
@staticmethod
def cj(job_ids):
'''Simple implementation where joblist is expected to be a list of integers (job ids). The full grammar for this command allows more granular control.'''
pass
| 3 | 2 | 7 | 0 | 6 | 1 | 2 | 0.25 | 1 | 3 | 0 | 0 | 0 | 0 | 1 | 1 | 10 | 0 | 8 | 6 | 5 | 2 | 7 | 5 | 5 | 2 | 1 | 1 | 2 |
143,573 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/bonsai.py
|
klab.bio.bonsai.PDBSection
|
class PDBSection(object):
def __init__(self, Chain, StartResidueID, EndResidueID, Sequence = None):
'''StartResidueID and EndResidueID are expected to be PDB identifiers (resSeq + iCode, columns 23-27).'''
assert(len(StartResidueID) == len(EndResidueID) == 5)
self.Chain = Chain
self.StartResidueID = StartResidueID
self.EndResidueID = EndResidueID
self.Sequence = Sequence
@staticmethod
def from_non_aligned_residue_IDs(Chain, StartResidueID, EndResidueID, Sequence = None):
'''A more forgiving method that does not care about the padding of the residue IDs.'''
return PDBSection(Chain, PDB.ResidueID2String(StartResidueID), PDB.ResidueID2String(EndResidueID), Sequence = Sequence)
def __repr__(self):
seq = ''
if self.Sequence: seq = ' ({0})'.format(self.Sequence)
return 'Chain {0}, residues {1}-{2}{3}'.format(self.Chain, self.StartResidueID.strip(), self.EndResidueID.strip(), seq)
|
class PDBSection(object):
def __init__(self, Chain, StartResidueID, EndResidueID, Sequence = None):
'''StartResidueID and EndResidueID are expected to be PDB identifiers (resSeq + iCode, columns 23-27).'''
pass
@staticmethod
def from_non_aligned_residue_IDs(Chain, StartResidueID, EndResidueID, Sequence = None):
'''A more forgiving method that does not care about the padding of the residue IDs.'''
pass
def __repr__(self):
pass
| 5 | 2 | 5 | 0 | 4 | 1 | 1 | 0.14 | 1 | 1 | 1 | 0 | 2 | 4 | 3 | 3 | 21 | 5 | 14 | 10 | 9 | 2 | 14 | 9 | 10 | 2 | 1 | 1 | 4 |
143,574 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/bonsai.py
|
klab.bio.bonsai.Residue
|
class Residue(object):
def __init__(self, chain, resid, amino_acid):
self.chain = chain
self.residue_id = resid
self.amino_acid = amino_acid
self.records = dict(ATOM = [], HETATM = [], ANISOU = [])
def add(self, record_type, atom):
self.records[record_type].append(atom)
def get(self, record_type):
return self.records[record_type]
def get_amino_acid_code(self):
return residue_type_3to1_map.get(self.amino_acid) or protonated_residue_type_3to1_map.get(self.amino_acid) or non_canonical_amino_acids.get(self.amino_acid, 'X')
def id(self):
return self.chain, self.residue_id
def __repr__(self):
return pprint.pformat(self.records)
|
class Residue(object):
def __init__(self, chain, resid, amino_acid):
pass
def add(self, record_type, atom):
pass
def get(self, record_type):
pass
def get_amino_acid_code(self):
pass
def id(self):
pass
def __repr__(self):
pass
| 7 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 6 | 4 | 6 | 6 | 28 | 12 | 16 | 11 | 9 | 0 | 16 | 11 | 9 | 1 | 1 | 0 | 6 |
143,575 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/bonsai.py
|
klab.bio.bonsai.ResidueIndexedPDBFile
|
class ResidueIndexedPDBFile(object):
### Constructors
def __init__(self, pdb_content, buffer = 0.05, bin_size = 5.1, safe_mode = True):
'''Takes either a pdb file, a list of strings = lines of a pdb file, or another object.
safe_mode checks to make sure that certain assertions holds but adds to the runtime.
residues only contains residue details for ATOM, HETATM, and ANISOU records.
atoms only contains ATOM or HETATM records.
indexed_lines is a document-order tagged list of PDB file lines
Each item in the list is one of three types:
ATOM and HETATM records correspond to a quadruple (record_type, serial_number, line, new_atom)
ANISOU records correspond to a triple (record_type, serial_number, line)
Other lines correspond to a double (None, line)
'''
self.pdb_content = pdb_content
self.lines = pdb_content.split("\n")
self.min_x, self.min_y, self.min_z, self.max_x, self.max_y, self.max_z, self.max_dimension = None, None, None, None, None, None, None
self.buffer = buffer
self.bin_size = float(bin_size)
self.atom_bins = None
self.indexed_lines = None
self.atom_bin_dimensions = None
self.atom_name_to_group = {}
self.safe_mode = safe_mode
self.parse()
self.bin_atoms()
if self.safe_mode:
self.check_residues()
@classmethod
def from_filepath(cls, filepath):
'''A function to replace the old constructor call where a filename was passed in.'''
assert(os.path.exists(filepath))
return cls(read_file(filepath))
@classmethod
def from_lines(cls, pdb_file_lines):
'''A function to replace the old constructor call where a list of the file's lines was passed in.'''
return cls("\n".join(pdb_file_lines))
@classmethod
def retrieve(cls, pdb_id, cache_dir = None):
'''Creates a PDB object by using a cached copy of the file if it exists or by retrieving the file from the RCSB.'''
# Check to see whether we have a cached copy
pdb_id = pdb_id.upper()
if cache_dir:
filename = os.path.join(cache_dir, "%s.pdb" % pdb_id)
if os.path.exists(filename):
return cls(read_file(filename))
# Get a copy from the RCSB
contents = rcsb.retrieve_pdb(pdb_id)
# Create a cached copy if appropriate
if cache_dir:
write_file(os.path.join(cache_dir, "%s.pdb" % pdb_id), contents)
# Return the object
return cls(contents)
### Initialization
def parse(self, lines = None):
'''Parses the PDB file into a indexed representation (a tagged list of lines, see constructor docstring).
A set of Atoms is created, including x, y, z coordinates when applicable. These atoms are grouped into Residues.
Finally, the limits of the 3D space are calculated to be used for Atom binning.
ATOM serial numbers appear to be sequential within a model regardless of alternate locations (altLoc, see 1ABE). This
code assumes that this holds as this serial number is used as an index.
If an ATOM has a corresponding ANISOU record, the latter record uses the same serial number.
'''
indexed_lines = []
MODEL_count = 0
records_types_with_atom_serial_numbers = set(['ATOM', 'HETATM', 'TER', 'ANISOU'])
removable_records_types_with_atom_serial_numbers = set(['ATOM', 'HETATM', 'ANISOU'])
removable_xyz_records_types_with_atom_serial_numbers = set(['ATOM', 'HETATM'])
xs, ys, zs = [], [], []
# atoms maps ATOM/HETATM serial numbers to Atom objects. Atom objects know which Residue object they belong to and Residue objects maintain a list of their Atoms.
atoms = {}
# atoms maps chain -> residue IDs to Residue objects. Residue objects remember which ATOM/HETATM/ANISOU records (stored as Atom objects) belong to them
residues = {}
atom_name_to_group = {}
for line in self.lines:
record_type = line[:6].strip()
if record_type in removable_records_types_with_atom_serial_numbers:
#altLoc = line[16]
atom_name = line[12:16].strip()
chain = line[21]
resid = line[22:27] # residue ID + insertion code
serial_number = int(line[6:11])
element_name = None
if record_type == 'ATOM':
element_name = line[12:14].strip() # see the ATOM section of PDB format documentation. The element name is stored in these positions, right-justified.
element_name = ''.join([w for w in element_name if w.isalpha()]) # e.g. 1
if atom_name not in atom_name_to_group:
atom_name_to_group[atom_name] = element_name
else:
assert(atom_name_to_group[atom_name] == element_name)
residues[chain] = residues.get(chain, {})
residues[chain][resid] = residues[chain].get(resid, Residue(chain, resid, line[17:20]))
new_atom = Atom(residues[chain][resid], atom_name, element_name, serial_number, line[16])
residues[chain][resid].add(record_type.strip(), new_atom)
if record_type in removable_xyz_records_types_with_atom_serial_numbers:
x, y, z = float(line[30:38]), float(line[38:46]), float(line[46:54])
xs.append(x)
ys.append(y)
zs.append(z)
assert(serial_number not in atoms) # the logic of this class relies on this assertion - that placed records have a unique identifier
atoms[serial_number] = new_atom
atoms[serial_number].place(x, y, z, record_type)
indexed_lines.append((record_type, serial_number, line, new_atom))
else:
indexed_lines.append((record_type, serial_number, line))
else:
if record_type == 'MODEL ':
MODEL_count += 1
if MODEL_count > 1:
raise Exception('This code needs to be updated to properly handle NMR structures.')
indexed_lines.append((None, line))
if not xs:
raise Exception('No coordinates found.')
# Calculate the side size needed for a cube to contain all of the points, with buffers to account for edge-cases
min_x, min_y, min_z, max_x, max_y, max_z = min(xs), min(ys), min(zs), max(xs), max(ys), max(zs)
self.min_x, self.min_y, self.min_z, self.max_x, self.max_y, self.max_z = min(xs)-self.buffer, min(ys)-self.buffer, min(zs)-self.buffer, max(xs)+self.buffer, max(ys)+self.buffer, max(zs)+self.buffer
self.max_dimension = (self.buffer * 4) + max(self.max_x - self.min_x, self.max_y - self.min_y, self.max_z - self.min_z)
self.residues = residues
self.atoms = atoms
self.indexed_lines = indexed_lines
self.atom_name_to_group = atom_name_to_group
def bin_atoms(self):
'''This function bins the Atoms into fixed-size sections of the protein space in 3D.'''
# Create the atom bins
low_point = numpy.array([self.min_x, self.min_y, self.min_z])
high_point = numpy.array([self.max_x, self.max_y, self.max_z])
atom_bin_dimensions = numpy.ceil((high_point - low_point) / self.bin_size)
self.atom_bin_dimensions = (int(atom_bin_dimensions[0]) - 1, int(atom_bin_dimensions[1]) - 1, int(atom_bin_dimensions[2]) - 1)
atom_bins = []
for x in range(int(atom_bin_dimensions[0])):
atom_bins.append([])
for y in range(int(atom_bin_dimensions[1])):
atom_bins[x].append([])
for z in range(int(atom_bin_dimensions[2])):
atom_bins[x][y].append(Bin(x, y, z))
# Assign each Atom to a bin
for serial_number, atom in self.atoms.items():
bin_location = numpy.trunc((atom.point - low_point) / self.bin_size)
bin = atom_bins[int(bin_location[0])][int(bin_location[1])][int(bin_location[2])]
bin.append(atom)
atom.set_bin(bin)
# Sanity_check
if self.safe_mode:
num_atoms = 0
for x in range(int(atom_bin_dimensions[0])):
for y in range(int(atom_bin_dimensions[1])):
for z in range(int(atom_bin_dimensions[2])):
num_atoms += len(atom_bins[x][y][z])
assert(num_atoms == len(self.atoms))
# Snip empty sections (saves a little space after garbage collection - space savings increase with the number of empty arrays in the matrix)
blank_section = ()
for x in range(int(atom_bin_dimensions[0])):
for y in range(int(atom_bin_dimensions[1])):
for z in range(int(atom_bin_dimensions[2])):
if not atom_bins[x][y][z]:
atom_bins[x][y][z] = blank_section
self.atom_bins = atom_bins
### Safety checks
def check_residues(self):
'''Checks to make sure that each atom type is unique per residue.'''
for chain, residue_ids in self.residues.items():
for residue_id, residue in residue_ids.items():
for record_type, atoms in residue.records.items():
freq = {}
for atom in atoms:
rec_id = atom.name + atom.conformation
freq[rec_id] = freq.get(rec_id, 0)
freq[rec_id] += 1
for atom_type, count in list(freq.items()):
if count > 1:
raise Exception('{0} occurrences of atom type {1} for record type {2} occur in residue {3} in chain {4}.'.format(count, atom_type, record_type, residue_id.strip(), chain))
### API functions
def get_atom_serial_numbers_from_pdb_residue_ids(self, pdb_residue_ids, ignore_these_atoms = [], ignore_these_conformations = []):
'''Checks to make sure that each atom type is unique per residue.'''
atom_list = []
for pdb_residue_id in pdb_residue_ids:
chain = pdb_residue_id[0]
residue_id = pdb_residue_id[1:]
if chain in self.residues and residue_id in self.residues[chain]:
residue = self.residues[chain][residue_id]
for record_type, atoms in residue.records.items():
freq = {}
for atom in atoms:
if atom.name not in ignore_these_atoms and atom.conformation not in ignore_these_conformations:
atom_list.append(atom.serial_number)
return atom_list
|
class ResidueIndexedPDBFile(object):
def __init__(self, pdb_content, buffer = 0.05, bin_size = 5.1, safe_mode = True):
'''Takes either a pdb file, a list of strings = lines of a pdb file, or another object.
safe_mode checks to make sure that certain assertions holds but adds to the runtime.
residues only contains residue details for ATOM, HETATM, and ANISOU records.
atoms only contains ATOM or HETATM records.
indexed_lines is a document-order tagged list of PDB file lines
Each item in the list is one of three types:
ATOM and HETATM records correspond to a quadruple (record_type, serial_number, line, new_atom)
ANISOU records correspond to a triple (record_type, serial_number, line)
Other lines correspond to a double (None, line)
'''
pass
@classmethod
def from_filepath(cls, filepath):
'''A function to replace the old constructor call where a filename was passed in.'''
pass
@classmethod
def from_lines(cls, pdb_file_lines):
'''A function to replace the old constructor call where a list of the file's lines was passed in.'''
pass
@classmethod
def retrieve(cls, pdb_id, cache_dir = None):
'''Creates a PDB object by using a cached copy of the file if it exists or by retrieving the file from the RCSB.'''
pass
def parse(self, lines = None):
'''Parses the PDB file into a indexed representation (a tagged list of lines, see constructor docstring).
A set of Atoms is created, including x, y, z coordinates when applicable. These atoms are grouped into Residues.
Finally, the limits of the 3D space are calculated to be used for Atom binning.
ATOM serial numbers appear to be sequential within a model regardless of alternate locations (altLoc, see 1ABE). This
code assumes that this holds as this serial number is used as an index.
If an ATOM has a corresponding ANISOU record, the latter record uses the same serial number.
'''
pass
def bin_atoms(self):
'''This function bins the Atoms into fixed-size sections of the protein space in 3D.'''
pass
def check_residues(self):
'''Checks to make sure that each atom type is unique per residue.'''
pass
def get_atom_serial_numbers_from_pdb_residue_ids(self, pdb_residue_ids, ignore_these_atoms = [], ignore_these_conformations = []):
'''Checks to make sure that each atom type is unique per residue.'''
pass
| 12 | 8 | 24 | 2 | 18 | 5 | 5 | 0.3 | 1 | 9 | 3 | 1 | 5 | 18 | 8 | 8 | 226 | 42 | 145 | 71 | 133 | 43 | 139 | 68 | 130 | 13 | 1 | 5 | 43 |
143,576 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/cache.py
|
klab.bio.cache.BioCache
|
class BioCache(object):
'''Class to store a cache of klab.bio objects. This can be used to avoid reading the same data in from disk over
and over again.
'''
def __init__(self, cache_dir = None, max_capacity = None, silent = True):
'''max_capacity is currently used to set the maximum capacity of all object lists i.e. you cannot currently set different
max capacities for different lists.'''
if cache_dir:
assert(os.path.exists(cache_dir))
if max_capacity != None:
max_capacity = int(max_capacity)
assert(max_capacity >= 1)
self.cache_dir = cache_dir
# PDB files
self.pdb_contents = CacheNodeDict()
self.pdb_objects = CacheNodeDict()
# SIFTS XML files
self.sifts_xml_contents = CacheNodeDict()
self.sifts_objects = CacheNodeDict()
# PDBML files
self.pdbml_contents = CacheNodeDict()
self.pdbml_objects = CacheNodeDict()
# FASTA files
self.fasta_contents = CacheNodeDict()
self.fasta_objects = CacheNodeDict()
self.max_capacity = max_capacity
self.silent = silent
def log(self, msg):
if not self.silent:
colortext.plightpurple(msg)
def log_lookup(self, msg):
self.log('CACHE LOOKUP: {0}'.format(msg))
#self.log('CACHE LOOKUP: {0}.\n{1}'.format(msg, '\n'.join([l[:-1] for l in traceback.format_stack()])))
def add_node(self, container, k, v):
if self.max_capacity and (len(container) + 1) > self.max_capacity:
# Truncate container contents
keys_to_delete = [t[0] for t in sorted(list(container.items()), key=operator.itemgetter(1))[:-(self.max_capacity - 1)]] # sort by datetime of insertion and keep the last self.max_capacity minus one objects (to allow space for one more object)
for dk in keys_to_delete:
del container[dk]
container[k] = CacheNode(v)
######################
# PDB files
######################
def add_pdb_contents(self, pdb_id, contents):
self.add_node(self.pdb_contents, pdb_id.upper(), contents)
def get_pdb_contents(self, pdb_id):
self.log_lookup('pdb contents {0}'.format(pdb_id))
pdb_id = pdb_id.upper()
if not self.pdb_contents.get(pdb_id):
if self.pdb_objects.get(pdb_id):
self.add_pdb_contents(pdb_id, '\n'.join(self.pdb_objects[pdb_id].lines))
elif self.cache_dir:
self.add_pdb_contents(pdb_id, download_pdb(pdb_id, self.cache_dir, silent = True))
else:
self.add_pdb_contents(pdb_id, retrieve_pdb(pdb_id, silent = True))
return self.pdb_contents[pdb_id]
def add_pdb_object(self, pdb_id, pdb_object):
self.add_node(self.pdb_objects, pdb_id.upper(), pdb_object)
def get_pdb_object(self, pdb_id):
self.log_lookup('pdb object {0}'.format(pdb_id))
pdb_id = pdb_id.upper()
if not self.pdb_objects.get(pdb_id):
if not self.pdb_contents.get(pdb_id):
if self.cache_dir:
self.add_pdb_contents(pdb_id, download_pdb(pdb_id, self.cache_dir, silent = True))
else:
self.add_pdb_contents(pdb_id, retrieve_pdb(pdb_id, silent = True))
self.add_pdb_object(pdb_id, PDB(self.pdb_contents[pdb_id]))
return self.pdb_objects[pdb_id]
######################
# SIFTS XML files
######################
def add_sifts_xml_contents(self, pdb_id, sifts_xml_contents):
self.add_node(self.sifts_xml_contents, pdb_id.upper(), sifts_xml_contents)
def get_sifts_xml_contents(self, pdb_id):
self.log_lookup('SIFTS xml {0}'.format(pdb_id))
pdb_id = pdb_id.upper()
if not self.sifts_xml_contents.get(pdb_id):
if self.sifts_objects.get(pdb_id):
self.add_sifts_xml_contents(pdb_id, self.sifts_objects[pdb_id].xml_contents)
elif self.cache_dir:
self.add_sifts_xml_contents(pdb_id, download_sifts_xml(pdb_id, self.cache_dir, silent = True))
else:
self.add_sifts_xml_contents(pdb_id, retrieve_sifts_xml(pdb_id, silent = True))
return self.sifts_xml_contents[pdb_id]
def add_sifts_object(self, pdb_id, sifts_object):
self.add_node(self.sifts_objects, pdb_id.upper(), sifts_object)
def get_sifts_object(self, pdb_id, acceptable_sequence_percentage_match = 90.0, restrict_match_percentage_errors_to_these_uniparc_ids = None):
# todo: we need to store all/important parameters for object creation and key on those as well e.g. "give me the SIFTS object with , restrict_match_percentage_errors_to_these_uniparc_ids = <some_set>"
# otherwise, unexpected behavior may occur
self.log_lookup('SIFTS object {0}'.format(pdb_id))
pdb_id = pdb_id.upper()
if not self.sifts_objects.get(pdb_id):
if not self.sifts_xml_contents.get(pdb_id):
if self.cache_dir:
self.add_sifts_xml_contents(pdb_id, download_sifts_xml(pdb_id, self.cache_dir, silent = True))
else:
self.add_sifts_xml_contents(pdb_id, retrieve_sifts_xml(pdb_id, silent = True))
self.add_sifts_object(pdb_id, SIFTS.retrieve(pdb_id, cache_dir = self.cache_dir, acceptable_sequence_percentage_match = acceptable_sequence_percentage_match, bio_cache = self, restrict_match_percentage_errors_to_these_uniparc_ids = restrict_match_percentage_errors_to_these_uniparc_ids))
return self.sifts_objects[pdb_id]
######################
# PDBML files
######################
def add_pdbml_contents(self, pdb_id, pdbml_contents):
self.add_node(self.pdbml_contents, pdb_id.upper(), pdbml_contents)
def get_pdbml_contents(self, pdb_id):
self.log_lookup('PDBML {0}'.format(pdb_id))
pdb_id = pdb_id.upper()
if not self.pdbml_contents.get(pdb_id):
if self.pdbml_objects.get(pdb_id):
self.add_pdbml_contents(pdb_id, self.pdbml_objects[pdb_id].xml_contents)
elif self.cache_dir:
self.add_pdbml_contents(pdb_id, download_pdbml(pdb_id, self.cache_dir, silent = True))
else:
self.add_pdbml_contents(pdb_id, retrieve_pdbml(pdb_id, silent = True))
return self.pdbml_contents[pdb_id]
def add_pdbml_object(self, pdb_id, pdbml_object):
self.add_node(self.pdbml_objects, pdb_id.upper(), pdbml_object)
def get_pdbml_object(self, pdb_id, acceptable_sequence_percentage_match = 90.0):
self.log_lookup('PDBML object {0}'.format(pdb_id))
pdb_id = pdb_id.upper()
if not self.pdbml_objects.get(pdb_id):
if not self.pdbml_contents.get(pdb_id):
if self.cache_dir:
self.add_pdbml_contents(pdb_id, download_pdbml(pdb_id, self.cache_dir, silent = True))
else:
self.add_pdbml_contents(pdb_id, retrieve_pdbml(pdb_id, silent = True))
self.add_pdbml_object(pdb_id, PDBML.retrieve(pdb_id, cache_dir = self.cache_dir, bio_cache = self))
return self.pdbml_objects[pdb_id]
######################
# FASTA files
######################
def add_fasta_contents(self, pdb_id, fasta_contents):
self.add_node(self.fasta_contents, pdb_id.upper(), fasta_contents)
def get_fasta_contents(self, pdb_id):
self.log_lookup('FASTA {0}'.format(pdb_id))
pdb_id = pdb_id.upper()
if not self.fasta_contents.get(pdb_id):
if self.fasta_objects.get(pdb_id):
self.add_fasta_contents(pdb_id, self.fasta_objects[pdb_id].fasta_contents)
elif self.cache_dir:
self.add_fasta_contents(pdb_id, download_fasta(pdb_id, self.cache_dir, silent = True))
else:
self.add_fasta_contents(pdb_id, retrieve_fasta(pdb_id, silent = True))
return self.fasta_contents[pdb_id]
def add_fasta_object(self, pdb_id, fasta_object):
self.add_node(self.fasta_objects, pdb_id.upper(), fasta_object)
def get_fasta_object(self, pdb_id, acceptable_sequence_percentage_match = 90.0):
self.log_lookup('FASTA object {0}'.format(pdb_id))
pdb_id = pdb_id.upper()
if not self.fasta_objects.get(pdb_id):
if not self.fasta_contents.get(pdb_id):
if self.cache_dir:
self.add_fasta_contents(pdb_id, download_fasta(pdb_id, self.cache_dir, silent = True))
else:
self.add_fasta_contents(pdb_id, retrieve_fasta(pdb_id, silent = True))
self.add_fasta_object(pdb_id, FASTA.retrieve(pdb_id, cache_dir = self.cache_dir, bio_cache = self))
return self.fasta_objects[pdb_id]
######################
# BLAST results
######################
def _get_blast_pdb_filepath(self, pdb_id, chain_id, cut_off, matrix, sequence_identity_cut_off):
assert(self.cache_dir)
return os.path.join(self.cache_dir, '{0}_{1}_{2}_{3}_{4}.BLAST.json'.format(pdb_id.upper(), chain_id, cut_off, matrix, sequence_identity_cut_off))
def load_pdb_chain_blast(self, pdb_id, chain_id, cut_off, matrix, sequence_identity_cut_off):
if self.cache_dir:
filepath = self._get_blast_pdb_filepath(pdb_id, chain_id, cut_off, matrix, sequence_identity_cut_off)
if os.path.exists(filepath):
return json.loads(read_file(filepath))
return None
def save_pdb_chain_blast(self, pdb_id, chain_id, cut_off, matrix, sequence_identity_cut_off, data):
if self.cache_dir:
filepath = self._get_blast_pdb_filepath(pdb_id, chain_id, cut_off, matrix, sequence_identity_cut_off)
write_file(filepath, json.dumps(data))
return True
return False
def _get_blast_sequence_filepath(self, sequence, cut_off, matrix, sequence_identity_cut_off):
assert(self.cache_dir)
id = '{0}_{1}_{2}_{3}'.format(CRC64digest(sequence), len(sequence), sequence[:5], sequence[-5:])
return os.path.join(self.cache_dir, '{0}_{1}_{2}_{3}.BLAST.json'.format(id, cut_off, matrix, sequence_identity_cut_off))
def load_sequence_blast(self, sequence, cut_off, matrix, sequence_identity_cut_off):
if self.cache_dir:
filepath = self._get_blast_sequence_filepath(sequence, cut_off, matrix, sequence_identity_cut_off)
if os.path.exists(filepath):
for sequence_hits in json.loads(read_file(filepath)):
if sequence_hits['sequence'] == sequence:
return sequence_hits
return None
def save_sequence_blast(self, sequence, cut_off, matrix, sequence_identity_cut_off, data):
assert(data['sequence'] == sequence)
sequence_data = [data] # put the new hit at the start of the file
if self.cache_dir:
filepath = self._get_blast_sequence_filepath(sequence, cut_off, matrix, sequence_identity_cut_off)
if os.path.exists(filepath):
for sequence_hits in json.loads(read_file(filepath)):
if sequence_hits['sequence'] != sequence:
sequence_data.append(sequence_hits)
write_file(filepath, json.dumps(sequence_data))
return True
return False
######################
# Static methods
######################
@staticmethod
def static_get_pdb_object(pdb_id, bio_cache = None, cache_dir = None):
'''This method does not necessarily use a BioCache but it seems to fit here.'''
pdb_id = pdb_id.upper()
if bio_cache:
return bio_cache.get_pdb_object(pdb_id)
if cache_dir:
# Check to see whether we have a cached copy of the PDB file
filepath = os.path.join(cache_dir, '{0}.pdb'.format(pdb_id))
if os.path.exists(filepath):
return PDB.from_filepath(filepath)
# Get any missing files from the RCSB and create cached copies if appropriate
pdb_contents = retrieve_pdb(pdb_id)
if cache_dir:
write_file(os.path.join(cache_dir, "%s.pdb" % pdb_id), pdb_contents)
return PDB(pdb_contents)
|
class BioCache(object):
'''Class to store a cache of klab.bio objects. This can be used to avoid reading the same data in from disk over
and over again.
'''
def __init__(self, cache_dir = None, max_capacity = None, silent = True):
'''max_capacity is currently used to set the maximum capacity of all object lists i.e. you cannot currently set different
max capacities for different lists.'''
pass
def log(self, msg):
pass
def log_lookup(self, msg):
pass
def add_node(self, container, k, v):
pass
def add_pdb_contents(self, pdb_id, contents):
pass
def get_pdb_contents(self, pdb_id):
pass
def add_pdb_object(self, pdb_id, pdb_object):
pass
def get_pdb_object(self, pdb_id):
pass
def add_sifts_xml_contents(self, pdb_id, sifts_xml_contents):
pass
def get_sifts_xml_contents(self, pdb_id):
pass
def add_sifts_object(self, pdb_id, sifts_object):
pass
def get_sifts_object(self, pdb_id, acceptable_sequence_percentage_match = 90.0, restrict_match_percentage_errors_to_these_uniparc_ids = None):
pass
def add_pdbml_contents(self, pdb_id, pdbml_contents):
pass
def get_pdbml_contents(self, pdb_id):
pass
def add_pdbml_object(self, pdb_id, pdbml_object):
pass
def get_pdbml_object(self, pdb_id, acceptable_sequence_percentage_match = 90.0):
pass
def add_fasta_contents(self, pdb_id, fasta_contents):
pass
def get_fasta_contents(self, pdb_id):
pass
def add_fasta_object(self, pdb_id, fasta_object):
pass
def get_fasta_object(self, pdb_id, acceptable_sequence_percentage_match = 90.0):
pass
def _get_blast_pdb_filepath(self, pdb_id, chain_id, cut_off, matrix, sequence_identity_cut_off):
pass
def load_pdb_chain_blast(self, pdb_id, chain_id, cut_off, matrix, sequence_identity_cut_off):
pass
def save_pdb_chain_blast(self, pdb_id, chain_id, cut_off, matrix, sequence_identity_cut_off, data):
pass
def _get_blast_sequence_filepath(self, sequence, cut_off, matrix, sequence_identity_cut_off):
pass
def load_sequence_blast(self, sequence, cut_off, matrix, sequence_identity_cut_off):
pass
def save_sequence_blast(self, sequence, cut_off, matrix, sequence_identity_cut_off, data):
pass
@staticmethod
def static_get_pdb_object(pdb_id, bio_cache = None, cache_dir = None):
'''This method does not necessarily use a BioCache but it seems to fit here.'''
pass
| 29 | 3 | 8 | 0 | 7 | 1 | 3 | 0.19 | 1 | 9 | 6 | 0 | 26 | 11 | 27 | 27 | 293 | 74 | 185 | 51 | 156 | 36 | 172 | 50 | 144 | 5 | 1 | 4 | 71 |
143,577 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/basics.py
|
klab.bio.basics.Residue
|
class Residue(object):
# For residues ResidueID
def __init__(self, Chain, ResidueID, ResidueAA, residue_type = None):
if residue_type:
if residue_type == 'Protein' or residue_type == 'Protein skeleton':
assert((ResidueAA in residue_types_1) or (ResidueAA in protonated_residues_types_1) or (ResidueAA == 'X') or (ResidueAA == 'B') or (ResidueAA == 'Z'))
elif residue_type == 'Unknown':
assert(ResidueAA == 'X')
else:
assert(ResidueAA in nucleotide_types_1)
self.Chain = Chain
self.ResidueID = ResidueID
self.ResidueAA = ResidueAA
self.residue_type = residue_type
def __repr__(self):
return "%s:%s %s" % (self.Chain, str(self.ResidueID).strip(), self.ResidueAA)
def __ne__(self, other):
return not(self.__eq__(other))
def __eq__(self, other):
'''Basic form of equality, just checking the amino acid types. This lets us check equality over different chains with different residue IDs.'''
if type(other) == type(None):
return False
return (self.ResidueAA == other.ResidueAA) and (self.residue_type == other.residue_type)
def get_residue_id(self):
return self.ResidueID
|
class Residue(object):
def __init__(self, Chain, ResidueID, ResidueAA, residue_type = None):
pass
def __repr__(self):
pass
def __ne__(self, other):
pass
def __eq__(self, other):
'''Basic form of equality, just checking the amino acid types. This lets us check equality over different chains with different residue IDs.'''
pass
def get_residue_id(self):
pass
| 6 | 1 | 5 | 0 | 4 | 0 | 2 | 0.09 | 1 | 2 | 0 | 1 | 5 | 4 | 5 | 5 | 32 | 7 | 23 | 10 | 17 | 2 | 21 | 10 | 15 | 4 | 1 | 2 | 9 |
143,578 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/db/schema.py
|
klab.db.schema.EmptyDiagramException
|
class EmptyDiagramException(Exception): pass
|
class EmptyDiagramException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,579 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/pdbml.py
|
klab.bio.pdbml.PDBML_slow
|
class PDBML_slow(object):
def __init__(self, xml_contents, pdb_contents):
'''The PDB contents should be passed so that we can deal with HETATM records as the XML does not contain the necessary information.'''
self.pdb_id = None
self.contents = xml_contents
self.xml_version = None
self._dom = parseString(xml_contents)
self.deprecated = False
self.replacement_pdb_id = None
self.modified_residues = PDB(pdb_contents).modified_residues
self.main_tag = None
self.parse_header()
self.parse_deprecation()
self.parse_atoms()
@staticmethod
def retrieve(pdb_id, cache_dir = None):
'''Creates a PDBML object by using a cached copy of the files if they exists or by retrieving the files from the RCSB.'''
pdb_contents = None
xml_contents = None
pdb_id = pdb_id.upper()
if cache_dir:
# Check to see whether we have a cached copy of the PDB file
filename = os.path.join(cache_dir, "%s.pdb" % pdb_id)
if os.path.exists(filename):
pdb_contents = read_file(filename)
# Check to see whether we have a cached copy of the XML file
filename = os.path.join(cache_dir, "%s.xml" % pdb_id)
if os.path.exists(filename):
xml_contents = read_file(filename)
# Get any missing files from the RCSB and create cached copies if appropriate
if not pdb_contents:
pdb_contents = rcsb.retrieve_pdb(pdb_id)
if cache_dir:
write_file(os.path.join(cache_dir, "%s.pdb" % pdb_id), pdb_contents)
if not xml_contents:
xml_contents = rcsb.retrieve_xml(pdb_id)
if cache_dir:
write_file(os.path.join(cache_dir, "%s.xml" % pdb_id), xml_contents)
# Return the object
return PDBML_slow(xml_contents, pdb_contents)
def parse_header(self):
main_tags = self._dom.getElementsByTagName("PDBx:datablock")
assert(len(main_tags) == 1)
self.main_tag = main_tags[0]
if self.main_tag.hasAttribute('datablockName'):
self.pdb_id = self.main_tag.getAttribute('datablockName').upper()
xsd_version = os.path.split(self.main_tag.getAttribute('xmlns:PDBx'))[1]
if xsd_versions.get(xsd_version):
self.xml_version = xsd_versions[xsd_version]
else:
raise Exception("XML version is %s. This module only handles versions %s so far." % (xsd_version, ", ".join(list(xsd_versions.keys()))))
def parse_deprecation(self):
'''Checks to see if the PDB file has been deprecated and, if so, what the new ID is.'''
deprecation_tag = self.main_tag.getElementsByTagName("PDBx:pdbx_database_PDB_obs_sprCategory")
assert(len(deprecation_tag) <= 1)
if deprecation_tag:
deprecation_tag = deprecation_tag[0]
deprecation_subtag = deprecation_tag.getElementsByTagName("PDBx:pdbx_database_PDB_obs_spr")
assert(len(deprecation_subtag) == 1)
deprecation_subtag = deprecation_subtag[0]
assert(deprecation_subtag.hasAttribute('replace_pdb_id'))
assert(deprecation_subtag.hasAttribute('pdb_id'))
old_pdb_id = deprecation_subtag.getAttribute('replace_pdb_id').upper()
new_pdb_id = deprecation_subtag.getAttribute('pdb_id').upper()
if self.pdb_id == old_pdb_id:
self.deprecated = True
self.replacement_pdb_id = new_pdb_id
else:
assert(self.pdb_id == new_pdb_id)
def parse_atoms(self):
'''All ATOM lines are parsed even though only one per residue needs to be parsed. The reason for parsing all the
lines is just to sanity-checks that the ATOMs within one residue are consistent with each other.'''
atom_site_header_tag = self.main_tag.getElementsByTagName("PDBx:atom_siteCategory")
assert(len(atom_site_header_tag) == 1)
atom_site_header_tag = atom_site_header_tag[0]
atom_site_tags = atom_site_header_tag.getElementsByTagName("PDBx:atom_site")
residue_map = {}
residues_read = {}
int_type = int
for t in atom_site_tags:
r, seqres, ResidueAA, Residue3AA = PDBML_slow.parse_atom_site(t, self.modified_residues)
if r:
# skip certain ACE residues
if not(self.pdb_id in cases_with_ACE_residues_we_can_ignore and Residue3AA == 'ACE'):
full_residue_id = str(r)
if residues_read.get(full_residue_id):
assert(residues_read[full_residue_id] == (r.ResidueAA, seqres))
else:
residues_read[full_residue_id] = (r.ResidueAA, seqres)
residue_map[r.Chain] = residue_map.get(r.Chain, {})
assert(type(seqres) == int_type)
residue_map[r.Chain][str(r)] = seqres
## Create SequenceMap objects to map the ATOM Sequences to the SEQRES Sequences
atom_to_seqres_sequence_maps = {}
for chain_id, atom_seqres_mapping in residue_map.items():
atom_to_seqres_sequence_maps[chain_id] = SequenceMap.from_dict(atom_seqres_mapping)
self.atom_to_seqres_sequence_maps = atom_to_seqres_sequence_maps
@staticmethod
def parse_atom_site(t, modified_residues):
# Only parse ATOM records
if parse_singular_string(t, 'PDBx:group_PDB') == 'HETATM':
return None, None, None, None
assert(parse_singular_string(t, 'PDBx:group_PDB') == 'ATOM')
# NOTE: x, y, z values are per-ATOM but we do not use them yet
x, y, z = parse_singular_float(t, "PDBx:Cartn_x"), parse_singular_float(t, "PDBx:Cartn_y"), parse_singular_float(t, "PDBx:Cartn_z")
PDB_chain_id = parse_singular_alphabetic_character(t, 'PDBx:auth_asym_id')
ATOM_residue_id = parse_singular_int(t, 'PDBx:auth_seq_id')
# Parse insertion code. Sometimes this tag exists but is set as nil in its attributes (xsi:nil = "true").
PDB_insertion_code = " "
insertion_code_tags = t.getElementsByTagName('PDBx:pdbx_PDB_ins_code')
if insertion_code_tags:
assert(len(insertion_code_tags) == 1)
insertion_code_tag = insertion_code_tags[0]
if not(insertion_code_tag.hasAttribute('xsi:nil') and insertion_code_tag.getAttribute('xsi:nil') == 'true'):
PDB_insertion_code = parse_singular_alphabetic_character(t, 'PDBx:pdbx_PDB_ins_code')
SEQRES_index = parse_singular_int(t, 'PDBx:label_seq_id')
residue_a = parse_singular_string(t, 'PDBx:auth_comp_id')
residue_b = parse_singular_string(t, 'PDBx:label_comp_id')
assert(residue_a == residue_b)
residue_3_letter = residue_a
residue_1_letter = residue_type_3to1_map.get(residue_3_letter) or protonated_residue_type_3to1_map.get(residue_3_letter) or non_canonical_amino_acids.get(residue_3_letter)
if not residue_1_letter:
residue_identifier = '%s%s%s' % (PDB_chain_id, str(ATOM_residue_id).rjust(4), PDB_insertion_code)
if modified_residues.get(residue_identifier):
residue_1_letter = modified_residues[residue_identifier]['original_residue_1']
if not residue_1_letter:
'''Too many cases to worry about... we will have to use residue_3_letter to sort those out.'''
residue_1_letter = 'X'
r = IdentifyingPDBResidue(PDB_chain_id, ("%d%s" % (ATOM_residue_id, PDB_insertion_code)).rjust(5), residue_1_letter, None, residue_3_letter)
r.add_position(x, y, z)
return r, SEQRES_index, residue_1_letter, residue_3_letter
|
class PDBML_slow(object):
def __init__(self, xml_contents, pdb_contents):
'''The PDB contents should be passed so that we can deal with HETATM records as the XML does not contain the necessary information.'''
pass
@staticmethod
def retrieve(pdb_id, cache_dir = None):
'''Creates a PDBML object by using a cached copy of the files if they exists or by retrieving the files from the RCSB.'''
pass
def parse_header(self):
pass
def parse_deprecation(self):
'''Checks to see if the PDB file has been deprecated and, if so, what the new ID is.'''
pass
def parse_atoms(self):
'''All ATOM lines are parsed even though only one per residue needs to be parsed. The reason for parsing all the
lines is just to sanity-checks that the ATOMs within one residue are consistent with each other.'''
pass
@staticmethod
def parse_atom_site(t, modified_residues):
pass
| 9 | 4 | 26 | 5 | 19 | 3 | 5 | 0.13 | 1 | 8 | 3 | 0 | 4 | 9 | 6 | 6 | 167 | 36 | 116 | 50 | 107 | 15 | 111 | 48 | 104 | 8 | 1 | 4 | 28 |
143,580 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/pdbml.py
|
klab.bio.pdbml.PDBML
|
class PDBML(xml.sax.handler.ContentHandler):
def __init__(self, xml_contents, pdb_contents, bio_cache = None, pdb_id = None):
'''The PDB contents should be passed so that we can deal with HETATM records as the XML does not contain the necessary information.'''
self.xml_contents = xml_contents
self.atom_to_seqres_sequence_maps = {}
self.counters = {}
self.pdb_id = pdb_id
self.xml_version = None
self.tag_data = []
self.in_atom_sites_block = False
self._residue_map = {}
self._residues_read = {}
self._BLOCK = None # This is used to create a simple FSA for the parsing
self.current_atom_site = AtomSite()
self.bio_cache = bio_cache
# Create the PDB
if bio_cache and pdb_id:
self.modified_residues = bio_cache.get_pdb_object(pdb_id).modified_residues
else:
self.modified_residues = PDB(pdb_contents).modified_residues
self.deprecated = True
self.replacement_pdb_id = None
self._start_handlers = {
1 : self.parse_deprecated_tags,
2 : self.parse_atom_site,
}
self._end_handlers = {
1 : None,
2 : self.parse_atom_tag_data,
}
assert(xml_contents.find('encoding="UTF-8"') != -1)
@staticmethod
def retrieve(pdb_id, cache_dir = None, bio_cache = None):
'''Creates a PDBML object by using a cached copy of the files if they exists or by retrieving the files from the RCSB.'''
pdb_contents = None
xml_contents = None
pdb_id = pdb_id.upper()
if bio_cache:
pdb_contents = bio_cache.get_pdb_contents(pdb_id)
xml_contents = bio_cache.get_pdbml_contents(pdb_id)
if cache_dir:
if not pdb_contents:
# Check to see whether we have a cached copy of the PDB file
filename = os.path.join(cache_dir, "%s.pdb" % pdb_id)
if os.path.exists(filename):
pdb_contents = read_file(filename)
if not xml_contents:
# Check to see whether we have a cached copy of the XML file
filename = os.path.join(cache_dir, "%s.xml" % pdb_id)
if os.path.exists(filename):
xml_contents = read_file(filename)
# Get any missing files from the RCSB and create cached copies if appropriate
if not pdb_contents:
pdb_contents = rcsb.retrieve_pdb(pdb_id)
if cache_dir:
write_file(os.path.join(cache_dir, "%s.pdb" % pdb_id), pdb_contents)
if not xml_contents:
xml_contents = rcsb.retrieve_xml(pdb_id)
if cache_dir:
write_file(os.path.join(cache_dir, "%s.xml" % pdb_id), xml_contents)
# Return the object
handler = PDBML(xml_contents, pdb_contents, bio_cache = bio_cache, pdb_id = pdb_id)
xml.sax.parseString(xml_contents, handler)
return handler
def start_document(self): pass
def start_element(self, name, attributes):
self.tag_data = []
if self._BLOCK != None:
self._start_handlers[self._BLOCK](name, attributes)
elif name == 'PDBx:atom_site':
# All ATOM lines are parsed even though only one per residue needs to be parsed. The reason for parsing all the
# lines is just to sanity-checks that the ATOMs within one residue are consistent with each other.
self._BLOCK = 2
self.current_atom_site.clear()
assert(self.in_atom_sites_block)
elif name == 'PDBx:atom_siteCategory':
self.in_atom_sites_block = True
self.counters['PDBx:atom_siteCategory'] = self.counters.get('PDBx:atom_siteCategory', 0) + 1
elif name == 'PDBx:datablock':
self.counters['PDBx:datablock'] = self.counters.get('PDBx:datablock', 0) + 1
self.parse_header(attributes)
elif name == "PDBx:pdbx_database_PDB_obs_sprCategory":
self._BLOCK = 1
self.counters['PDBx:pdbx_database_PDB_obs_sprCategory'] = self.counters.get('PDBx:pdbx_database_PDB_obs_sprCategory', 0) + 1
def parse_deprecated_tags(self, name, attributes):
'''Checks to see if the PDB file has been deprecated and, if so, what the new ID is.'''
if name == 'PDBx:pdbx_database_PDB_obs_spr':
self.counters['PDBx:pdbx_database_PDB_obs_spr'] = self.counters.get('PDBx:pdbx_database_PDB_obs_spr', 0) + 1
old_pdb_id = attributes.get('replace_pdb_id').upper()
new_pdb_id = attributes.get('pdb_id').upper()
assert(old_pdb_id and new_pdb_id)
if self.pdb_id == old_pdb_id:
self.deprecated = True
self.replacement_pdb_id = new_pdb_id
else:
assert(self.pdb_id == new_pdb_id)
def end_element(self, name):
tag_content = ("".join(self.tag_data)).strip()
if self._BLOCK != None:
handler = self._end_handlers.get(self._BLOCK)
if handler:
handler(name, tag_content)
if name == 'PDBx:atom_site' or name == "PDBx:pdbx_database_PDB_obs_sprCategory":
self._BLOCK = None
elif name == 'PDBx:atom_siteCategory':
self.in_atom_sites_block = False
## Create SequenceMap objects to map the ATOM Sequences to the SEQRES Sequences
atom_to_seqres_sequence_maps = {}
for chain_id, atom_seqres_mapping in self._residue_map.items():
atom_to_seqres_sequence_maps[chain_id] = SequenceMap.from_dict(atom_seqres_mapping)
self.atom_to_seqres_sequence_maps = atom_to_seqres_sequence_maps
def parse_header(self, attributes):
if attributes.get('datablockName'):
pdb_id = attributes.get('datablockName').upper()
if self.pdb_id:
assert(pdb_id == self.pdb_id)
self.pdb_id = pdb_id
xsd_version = os.path.split(attributes.get('xmlns:PDBx'))[1]
if xsd_versions.get(xsd_version):
self.xml_version = xsd_versions[xsd_version]
else:
raise Exception("XML version is %s. This module only handles versions %s so far." % (xsd_version, ", ".join(list(xsd_versions.keys()))))
def parse_atom_site(self, name, attributes):
'''Parse the atom tag attributes. Most atom tags do not have attributes.'''
if name == "PDBx:pdbx_PDB_ins_code":
assert(not(self.current_atom_site.ATOMResidueiCodeIsNull))
if attributes.get('xsi:nil') == 'true':
self.current_atom_site.ATOMResidueiCodeIsNull = True
if name == "PDBx:auth_asym_id":
assert(not(self.current_atom_site.PDBChainIDIsNull))
if attributes.get('xsi:nil') == 'true':
self.current_atom_site.PDBChainIDIsNull = True
def parse_atom_tag_data(self, name, tag_content):
'''Parse the atom tag data.'''
current_atom_site = self.current_atom_site
if current_atom_site.IsHETATM:
# Early out - do not parse HETATM records
return
elif name == 'PDBx:atom_site':
# We have to handle the atom_site close tag here since we jump based on self._BLOCK first in end_element
#'''Add the residue to the residue map.'''
self._BLOCK = None
current_atom_site = self.current_atom_site
current_atom_site.validate()
if current_atom_site.IsATOM:
# Only parse ATOM records
r, seqres, ResidueAA, Residue3AA = current_atom_site.convert_to_residue(self.modified_residues)
if r:
if not(self.pdb_id in cases_with_ACE_residues_we_can_ignore and Residue3AA == 'ACE'):
# skip certain ACE residues
full_residue_id = str(r)
if self._residues_read.get(full_residue_id):
assert(self._residues_read[full_residue_id] == (r.ResidueAA, seqres))
else:
self._residues_read[full_residue_id] = (r.ResidueAA, seqres)
self._residue_map[r.Chain] = self._residue_map.get(r.Chain, {})
assert(type(seqres) == int_type)
self._residue_map[r.Chain][str(r)] = seqres
# Record type
elif name == 'PDBx:group_PDB':
# ATOM or HETATM
if tag_content == 'ATOM':
current_atom_site.IsATOM = True
elif tag_content == 'HETATM':
current_atom_site.IsHETATM = True
else:
raise Exception("PDBx:group_PDB was expected to be 'ATOM' or 'HETATM'. '%s' read instead." % tag_content)
# Residue identifier - chain ID, residue ID, insertion code
elif name == 'PDBx:auth_asym_id':
assert(not(current_atom_site.PDBChainID))
current_atom_site.PDBChainID = tag_content
if not tag_content:
assert(current_atom_site.PDBChainIDIsNull)
if self.pdb_id.upper() == '2MBP':
current_atom_site.PDBChainID = 'A' # e.g. 2MBP
else:
current_atom_site.PDBChainID = ' '
elif name == 'PDBx:auth_seq_id':
assert(not(current_atom_site.ATOMResidueID))
current_atom_site.ATOMResidueID = int(tag_content)
elif name == "PDBx:pdbx_PDB_ins_code":
if current_atom_site.ATOMResidueiCodeIsNull:
assert(len(tag_content) == 0)
else:
assert(current_atom_site.ATOMResidueiCode == ' ')
current_atom_site.ATOMResidueiCode = tag_content
elif name == "PDBx:auth_comp_id":
assert(not(current_atom_site.ATOMResidueAA))
current_atom_site.ATOMResidueAA = tag_content
elif name == "PDBx:label_seq_id":
assert(not(current_atom_site.SEQRESIndex))
current_atom_site.SEQRESIndex = int(tag_content)
elif name == "PDBx:label_comp_id":
assert(not(current_atom_site.ATOMSeqresResidueAA))
current_atom_site.ATOMSeqresResidueAA = tag_content
def create_atom_data(self):
'''The atom site work is split into two parts. This function type-converts the tags.'''
current_atom_site = self.current_atom_site
# Only parse ATOM records
if current_atom_site.IsHETATM:
# Early out - do not parse HETATM records
return None, None, None, None
elif current_atom_site.IsATOM:
return current_atom_site.convert_to_residue(self.modified_residues)
else:
raise Exception('current_atom_site')
def end_document(self):
assert(self.counters.get('PDBx:datablock') == 1)
assert(self.counters.get('PDBx:atom_siteCategory') == 1)
assert(self.counters.get('PDBx:pdbx_database_PDB_obs_sprCategory', 0) <= 1)
assert(self.counters.get('PDBx:pdbx_database_PDB_obs_spr', 0) >= self.counters.get('PDBx:pdbx_database_PDB_obs_sprCategory', 0))
def characters(self, chrs):
# Note: I use a list to store self.tag_data, append to the list, then join the contents into a string. In general,
# this is a better approach than string concatenation since there is less garbage created. However, if the strings
# are small and the list only ever contains one string (which could be the case with this particular class), my
# approach may create more garbage. I tested this with 3ZKB is a good single test case and there is no noticeable
# difference in the amount of garbage created (1 extra piece of garbage was created).
self.tag_data.append(chrs)
startDocument = start_document
endDocument = end_document
startElement = start_element
endElement = end_element
|
class PDBML(xml.sax.handler.ContentHandler):
def __init__(self, xml_contents, pdb_contents, bio_cache = None, pdb_id = None):
'''The PDB contents should be passed so that we can deal with HETATM records as the XML does not contain the necessary information.'''
pass
@staticmethod
def retrieve(pdb_id, cache_dir = None, bio_cache = None):
'''Creates a PDBML object by using a cached copy of the files if they exists or by retrieving the files from the RCSB.'''
pass
def start_document(self):
pass
def start_element(self, name, attributes):
pass
def parse_deprecated_tags(self, name, attributes):
'''Checks to see if the PDB file has been deprecated and, if so, what the new ID is.'''
pass
def end_element(self, name):
pass
def parse_header(self, attributes):
pass
def parse_atom_site(self, name, attributes):
'''Parse the atom tag attributes. Most atom tags do not have attributes.'''
pass
def parse_atom_tag_data(self, name, tag_content):
'''Parse the atom tag data.'''
pass
def create_atom_data(self):
'''The atom site work is split into two parts. This function type-converts the tags.'''
pass
def end_document(self):
pass
def characters(self, chrs):
pass
| 14 | 6 | 20 | 2 | 16 | 3 | 5 | 0.16 | 1 | 8 | 3 | 1 | 11 | 17 | 12 | 12 | 267 | 45 | 193 | 51 | 180 | 31 | 164 | 50 | 151 | 19 | 1 | 5 | 62 |
143,581 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/db/schema.py
|
klab.db.schema.MySQLSchema
|
class MySQLSchema(object):
def __init__(self, settings = {}, isInnoDB=True, numTries=32, host=None, db=None, user=None, passwd=None, port=3306, unix_socket="/var/lib/mysql/mysql.sock", passwdfile=None, use_utf=False):
self.db = db
self.host = host
self.original_schema = []
if not(os.path.exists(unix_socket)):
unix_socket = '/var/run/mysqld/mysqld.sock' # Ubuntu hack
if not passwd and passwdfile:
if os.path.exists(passwdfile):
passwd = read_file(passwdfile).strip()
else:
passwd = getpass.getpass("Enter password to connect to MySQL database:")
dbinterface = MySQLDatabaseInterface(settings, isInnoDB = isInnoDB, numTries = numTries, host = host, db = db, user = user, passwd = passwd, port = port, unix_socket = unix_socket, use_locking = False)
# Get the DB schema, normalizing for sqlt-diagram
db_schema = []
self.num_tables = 0
try:
for t in sorted(dbinterface.TableNames):
creation_string = dbinterface.execute_select('SHOW CREATE TABLE `%s`' % t)
assert(len(creation_string) == 1)
if creation_string[0].get('Create Table') == None: # e.g. for views
continue
self.num_tables += 1
creation_string = '%s;' % creation_string[0]['Create Table'].strip()
self.original_schema.append(creation_string)
# Fix input for sqlt-diagram (it is fussy)
creation_string = creation_string.replace("default ''", "")
creation_string = creation_string.replace("DEFAULT ''", "")
creation_string = creation_string.replace("DEFERRABLE INITIALLY DEFERRED", "") # sqlt-diagram doesn't like this syntax for MySQL
creation_string = creation_string.replace("AUTOINCREMENT", "") # sqlt-diagram doesn't like this syntax for MySQL
creation_string = creation_string.replace("auto_increment", "") # sqlt-diagram doesn't like this syntax for MySQL
creation_string = re.sub("COMMENT.*'.*'", "", creation_string, re.DOTALL) # sqlt-diagram doesn't like this syntax for MySQL
creation_string = re.sub("CONSTRAINT.*?CHECK.*?,", "", creation_string, re.DOTALL) # sqlt-diagram doesn't like this syntax for MySQL
creation_string = re.sub("CONSTRAINT.*?CHECK.*?[)][)]", ")", creation_string, re.DOTALL) # sqlt-diagram doesn't like this syntax for MySQL
creation_string = re.sub(" AUTO_INCREMENT=\d+", "", creation_string, re.DOTALL)
creation_string = creation_string.replace("''", "")
creation_string = creation_string.replace('tg_', 'auth_')
db_schema.append(creation_string)
except: raise
db_schema = '\n\n'.join(db_schema)
self.db_schema = db_schema
self.mysqldump_schema = self.get_schema(host, user, passwd, db)
def print_schema(self):
c = 1
for x in self.sanitize_schema().split('\n'):
colortext.warning('%04d: %s' % (c, x))
c += 1
def sanitize_schema(self):
# Fix input for sqlt-diagram (it is fussy)
creation_string = self.mysqldump_schema
creation_string = creation_string.replace("default ''", "")
creation_string = creation_string.replace("DEFAULT ''", "")
creation_string = creation_string.replace("DEFERRABLE INITIALLY DEFERRED", "") # sqlt-diagram doesn't like this syntax for MySQL
creation_string = creation_string.replace("AUTOINCREMENT", "") # sqlt-diagram doesn't like this syntax for MySQL
creation_string = creation_string.replace("auto_increment", "") # sqlt-diagram doesn't like this syntax for MySQL
creation_string = re.sub("COMMENT.*'.*'", "", creation_string, re.DOTALL) # sqlt-diagram doesn't like this syntax for MySQL
creation_string = re.sub("CONSTRAINT.*?CHECK.*?,", "", creation_string, re.DOTALL) # sqlt-diagram doesn't like this syntax for MySQL
creation_string = re.sub("CONSTRAINT.*?CHECK.*?[)][)]", ")", creation_string, re.DOTALL) # sqlt-diagram doesn't like this syntax for MySQL
creation_string = re.sub(" AUTO_INCREMENT=\d+", "", creation_string, re.DOTALL)
creation_string = creation_string.replace("''' ,", "' ,")
creation_string = creation_string.replace("''',", "',")
creation_string = creation_string.replace("'' ,", "")
creation_string = creation_string.replace("'',", "")
creation_string = creation_string.replace("''", "")
#write_file('/tmp/failed_schema.sql', creation_string)
return creation_string
def get_schema(self, host, username, passwd, database_name):
try:
outfile, outfilename = open_temp_file('/tmp', "w")
p = subprocess.Popen(shlex.split("mysqldump -h %s -u %s -p%s --skip-add-drop-table --no-data %s" % (host, username, passwd, database_name)), stdout=outfile)
p.wait()
outfile.close()
contents = read_file(outfilename)
os.remove(outfilename)
return contents
except Exception as e:
if os.path.exists(outfilename):
os.remove(outfilename)
raise
def get_full_schema(self):
# todo: rename this to get_definition as this is more appropriate
return '\n\n'.join(self.original_schema)
def generate_schema_diagram(self, output_filepath = None, show_fk_only = False):
if self.num_tables == 0:
raise EmptyDiagramException('No tables in schema.')
tempfiles = self._generate_schema_diagram(show_fk_only)
self.schema_diagram = read_file(tempfiles[1])
for fname in tempfiles:
if os.path.exists(fname):
os.remove(fname)
if output_filepath:
write_file(output_filepath, self.schema_diagram)
def _generate_schema_diagram(self, show_fk_only):
tempfiles = []
output_handle, sql_schema_filepath = open_temp_file('/tmp', ftype = 'w')
tempfiles.append(sql_schema_filepath)
try:
#output_handle.write('%s\n\n' % self.db_schema)
output_handle.write('%s\n\n' % self.sanitize_schema())#mysqldump_schema)
output_handle.close()
except:
output_handle.close()
try:
png_handle, png_filepath = open_temp_file('/tmp', ftype = 'w')
png_handle.close()
tempfiles.append(png_filepath)
c = [
"sqlt-diagram",
"-d=MySQL",
"-i=png",
"-t=%s database on %s" % (self.db, self.host),
"-o=%s" % png_filepath,
"--color",
sql_schema_filepath,
]
if show_fk_only:
# Useful to print a smaller schema of just the primary/foreign keys
c.append("--show-fk-only")
p = subprocess.Popen(c, stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
if not p.returncode == 0:
if stderr:
raise colortext.Exception("Error - sqlt-diagram exited with %d: '%s'." % (p.returncode, stderr))
else:
raise colortext.Exception("Error - sqlt-diagram exited with %d." % (p.returncode))
except Exception as e:
colortext.error('Failed!')
print((str(e)))
return tempfiles
|
class MySQLSchema(object):
def __init__(self, settings = {}, isInnoDB=True, numTries=32, host=None, db=None, user=None, passwd=None, port=3306, unix_socket="/var/lib/mysql/mysql.sock", passwdfile=None, use_utf=False):
pass
def print_schema(self):
pass
def sanitize_schema(self):
pass
def get_schema(self, host, username, passwd, database_name):
pass
def get_full_schema(self):
pass
def generate_schema_diagram(self, output_filepath = None, show_fk_only = False):
pass
def _generate_schema_diagram(self, show_fk_only):
pass
| 8 | 0 | 20 | 2 | 17 | 3 | 4 | 0.18 | 1 | 5 | 2 | 0 | 7 | 7 | 7 | 7 | 152 | 23 | 122 | 35 | 114 | 22 | 113 | 33 | 105 | 7 | 1 | 3 | 25 |
143,582 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/db/sqlalchemy_interface.py
|
klab.db.sqlalchemy_interface.IntermediateField
|
class IntermediateField(object):
def __init__(self, field_name, field_type, not_null = False, default_type = None, default_value = None, comment = None, is_primary_key = False, unicode_collation_or_character_set = False):
self.field_name = field_name
self.field_type = field_type
self.not_null = not_null
self.default_type = default_type
self.default_value = default_value
self.comment = comment
self.is_primary_key = is_primary_key
self.unicode_collation_or_character_set = unicode_collation_or_character_set
def to_sql_alchemy(self, typedefs):
s = ''
s += self.field_name + ' = Column('
is_string_type = None
is_numeric_type = None
if self.field_type.startswith('varchar'):
mtchs = re.match("varchar[(](\d+)[)]", self.field_type)
assert(mtchs)
length = int(mtchs.group(1))
is_string_type = True
if self.unicode_collation_or_character_set:
s += 'Unicode(%d)' % length
typedefs['sqlalchemy.types'].add('Unicode')
else:
typedefs['sqlalchemy.types'].add('String')
s += 'String(%d)' % length
elif self.field_type == 'double':
s += 'DOUBLE'
is_numeric_type = True
typedefs['sqlalchemy.dialects.mysql'].add('DOUBLE')
elif self.field_type == 'float':
s += 'Float'
is_numeric_type = True
typedefs['sqlalchemy.types'].add('Float')
elif self.field_type == 'longtext' or self.field_type == 'text' or self.field_type == 'mediumtext':
s += 'Text'
is_numeric_type = True
typedefs['sqlalchemy.types'].add('Text')
elif self.field_type == 'date' or self.field_type == 'datetime':
s += 'DateTime'
is_numeric_type = True
typedefs['sqlalchemy.types'].add('DateTime')
elif self.field_type == 'timestamp':
s += 'TIMESTAMP'
is_numeric_type = True
typedefs['sqlalchemy.types'].add('TIMESTAMP')
elif self.field_type.startswith('enum('):
s += self.field_type.replace('enum', 'Enum')
is_string_type = True
typedefs['sqlalchemy.types'].add('Enum')
elif self.field_type.startswith('int(') or self.field_type.startswith('bigint('):
s += 'Integer'
is_numeric_type = True
typedefs['sqlalchemy.types'].add('Integer')
elif self.field_type.startswith('tinyint('):
s += self.field_type.upper()
is_numeric_type = True
typedefs['sqlalchemy.dialects.mysql'].add('TINYINT')
elif self.field_type == 'blob':
s += 'BLOB'
is_numeric_type = True
typedefs['sqlalchemy.dialects.mysql'].add('BLOB')
elif self.field_type == 'longblob':
s += 'LONGBLOB'
is_numeric_type = True
typedefs['sqlalchemy.dialects.mysql'].add('LONGBLOB')
else:
raise Exception("Unhandled type: '%s'" % self.field_type)
if self.not_null:
s += ', nullable=False'
else:
s += ', nullable=True'
if self.is_primary_key:
s += ', primary_key=True'
if self.default_type != None:
if self.default_type == 'string':
if is_string_type:
s += ", default=u'%s'" % self.default_value
elif is_numeric_type:
s += ", default=%s" % self.default_value
else:
assert(0)
s += ')'
return s
|
class IntermediateField(object):
def __init__(self, field_name, field_type, not_null = False, default_type = None, default_value = None, comment = None, is_primary_key = False, unicode_collation_or_character_set = False):
pass
def to_sql_alchemy(self, typedefs):
pass
| 3 | 0 | 50 | 9 | 42 | 0 | 10 | 0 | 1 | 2 | 0 | 0 | 2 | 8 | 2 | 2 | 105 | 21 | 84 | 16 | 81 | 0 | 69 | 16 | 66 | 19 | 1 | 3 | 20 |
143,583 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/db/sqlalchemy_interface.py
|
klab.db.sqlalchemy_interface.MySQLSchemaConverter
|
class MySQLSchemaConverter(object):
def __init__(self, user, host, db, passwd, port = 3306, socket = '/var/lib/mysql/mysql.sock'):
try:
self.db_interface = DatabaseInterface({}, isInnoDB=True, numTries=1, host=host, db=db, user=user, passwd=passwd, port=3306,
unix_socket=socket, passwdfile=None, use_utf=False, use_locking=True)
except Exception as e:
colortext.error('An exception was thrown trying to connect to the database.')
colortext.warning(str(e))
print((traceback.format_exc()))
sys.exit(1)
self.intermediate_schema = {}
self.tables = self.db_interface.TableNames
self._parse_schema()
def _parse_schema(self):
for tbl in self.tables:
self._create_intermediate_schema(tbl)
def get_sqlalchemy_schema(self, restrict_to_tables = []):
colortext.warning(' *** MySQL schema ***')
schema = []
#print(self.intermediate_schema)
typedefs = {'sqlalchemy.types' : set(), 'sqlalchemy.dialects.mysql' : set()}
for tbl in self.tables:
if (not restrict_to_tables) or (tbl in restrict_to_tables):
colortext.message(tbl)
print((self.db_interface.execute("SHOW CREATE TABLE %s" % tbl))[0]['Create Table'])
print('')
code = []
code.append("class %s(DeclarativeBase):" % tbl)
code.append(" __tablename__ = '%s'\n" % tbl)
#print('\n'.join(code))
intermediate_table = self.intermediate_schema[tbl]
for field in intermediate_table:
s = field.to_sql_alchemy(typedefs)
code.append(' {0}'.format(s))
#print(s)
code.append('\n')
#print('')
schema.extend(code)
imports = []
for module, types in sorted(typedefs.items()):
imports.append('from %s import %s' % (module, ', '.join(sorted(types))))
schema = imports + [''] + schema
colortext.warning('*** SQLAlchemy class definitions ***')
print(('\n'.join(schema)))
def _create_intermediate_schema(self, tbl):
code = (self.db_interface.execute("SHOW CREATE TABLE %s" % tbl))
assert(len(code) == 1)
schema = code[0]['Create Table']
#colortext.message(tbl)
#print(schema)
#print(schema)
fields = [f for f in map(string.strip, schema[schema.find('(') + 1:schema.find('PRIMARY KEY')].strip().split('\n')) if f.strip()]
pk_fields = re.match('.*PRIMARY\s+KEY\s*[(](.*?)[)]\s*[,)].*', schema, re.DOTALL)
assert(pk_fields)
pk_fields = [s.strip() for s in pk_fields.group(1).replace('`', '').split(',') if s.strip()]
#colortext.warning(fields)
for f in fields:
#print('')
#colortext.message(f)
if f.endswith(','):
f = f[:-1]
field_name = f.split()[0].replace('`', '')
if f.split()[1].startswith('enum('):
mtchs = re.match(".* (enum[(].*?[)])(.*)", f)
assert(mtchs)
#print('ENUM', mtchs.group(1))
field_type = mtchs.group(1)
remaining_description = mtchs.group(2)
else:
field_type = f.split()[1]
remaining_description = (' '.join(f.split()[2:])).strip()
unicode_collation_or_character_set = False
if remaining_description.find('utf') != -1:
unicode_collation_or_character_set = True
not_null = False
if remaining_description.find('NOT NULL') != -1:
not_null = True
remaining_description = remaining_description.replace('NOT NULL', '').strip()
default = False
default_type = None
default_value = None
if remaining_description.find('default CURRENT_TIMESTAMP') != -1:
default_type = 'TIMESTAMP'
default_value = None
remaining_description = remaining_description.replace('default CURRENT_TIMESTAMP', '')
elif remaining_description.find('default NULL') != -1:
default_type = 'null'
default_value = None
remaining_description = remaining_description.replace('default NULL', '')
elif remaining_description.find('default') != -1:
mtchs = re.match(".*default '(.*?)'.*", remaining_description)
if mtchs:
#print('mtchs', mtchs.group(1))
default_type = 'string'
default_value = mtchs.group(1)
remaining_description = remaining_description.replace("default '%s'" % default_value, "")
else:
colortext.error('Unexpected default value string: "{0}".'.format(remaining_description))
pass
#mtchs = re.match(".*default (.*?)(\s.*)*$", remaining_description)
#if mtchs:
# print('mtchs non-string', mtchs.group(1))
# if mtchs.group(1) == 'NULL':
# default_type = 'null'
# default_value = None
# remaining_description = remaining_description.replace('')
comment = None
mtchs = re.match(".*(COMMENT '.*?').*", remaining_description)
if mtchs:
comment = mtchs.group(1)
remaining_description = remaining_description.replace(mtchs.group(1), "")
remaining_description = remaining_description.strip()
self.intermediate_schema[tbl] = self.intermediate_schema.get(tbl, [])
self.intermediate_schema[tbl].append(IntermediateField(field_name, field_type, not_null = not_null, default_type = default_type, default_value = default_value, comment = comment, is_primary_key = field_name in pk_fields, unicode_collation_or_character_set = unicode_collation_or_character_set))
#print('field_name : %s' % field_name)
#print('field_type : %s' % field_type)
#print('not_null : %s' % not_null)
if default_type != None:
pass
#print('default: %s, %s' % (default_type, default_value))
#print('comment : %s' % comment)
if remaining_description:
#colortext.error('remaining_description : %s' % remaining_description)
pass
|
class MySQLSchemaConverter(object):
def __init__(self, user, host, db, passwd, port = 3306, socket = '/var/lib/mysql/mysql.sock'):
pass
def _parse_schema(self):
pass
def get_sqlalchemy_schema(self, restrict_to_tables = []):
pass
def _create_intermediate_schema(self, tbl):
pass
| 5 | 0 | 36 | 5 | 25 | 6 | 6 | 0.25 | 1 | 6 | 2 | 0 | 4 | 3 | 4 | 4 | 152 | 28 | 99 | 33 | 94 | 25 | 94 | 32 | 89 | 13 | 1 | 3 | 22 |
143,584 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/pdbml.py
|
klab.bio.pdbml.AtomSite_xyz
|
class AtomSite_xyz(AtomSite):
fields = [
# Field name PDBML tag name Values Expected type that we store
'IsHETATM', # PDBx:group_PDB True iff record type is HETATM Boolean
'IsATOM', # PDBx:group_PDB True iff record type is ATOM Boolean
'PDBChainID', # PDBx:auth_asym_id PDB chain ID Character (alphanumeric)
'ATOMResidueID', # PDBx:auth_seq_id Residue ID (not including insertion code) Int
'ATOMResidueiCode', # PDBx:pdbx_PDB_ins_code Residue insertion code Character (alpha)
'ATOMResidueiCodeIsNull', # PDBx:pdbx_PDB_ins_code Need to determine if icode is nil Boolean
'x', # PDBx:Cartn_x x coordinate Float
'y', # PDBx:Cartn_y y coordinate Float
'z', # PDBx:Cartn_z z coordinate Float
'SEQRESIndex', # PDBx:label_seq_id The SEQRES index Int
'ATOMResidueAA', # PDBx:auth_comp_id The residue type in the ATOM sequence String (we seem to assume 3 letter protein residues here...)
'ATOMSeqresResidueAA', # PDBx:label_comp_id The residue type in the SEQRES sequence String (we seem to assume 3 letter protein residues here...)
]
def convert_to_residue(self, modified_residues):
residue_3_letter = self.ATOMResidueAA
residue_1_letter = residue_type_3to1_map.get(residue_3_letter) or protonated_residue_type_3to1_map.get(residue_3_letter) or non_canonical_amino_acids.get(residue_3_letter)
if not residue_1_letter:
residue_identifier = self.get_pdb_residue_id()
if modified_residues.get(residue_identifier):
residue_1_letter = modified_residues[residue_identifier]['original_residue_1']
if not residue_1_letter:
'''Too many cases to worry about... we will have to use residue_3_letter to sort those out.'''
residue_1_letter = 'X'
pdb_residue = IdentifyingPDBResidue(self.PDBChainID, ("%d%s" % (self.ATOMResidueID, self.ATOMResidueiCode)).rjust(5), residue_1_letter, None, residue_3_letter)
pdb_residue.add_position(self.x, self.y, self.z)
return pdb_residue, self.SEQRESIndex, residue_1_letter, residue_3_letter
|
class AtomSite_xyz(AtomSite):
def convert_to_residue(self, modified_residues):
pass
| 2 | 0 | 15 | 2 | 12 | 1 | 4 | 0.52 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 7 | 32 | 3 | 27 | 7 | 25 | 14 | 14 | 7 | 12 | 4 | 2 | 2 | 4 |
143,585 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/pdbml.py
|
klab.bio.pdbml.AtomSite
|
class AtomSite(object):
# Same as AtomSite but no x, y, z data
fields = [
# Field name PDBML tag name Values Expected type that we store
'IsHETATM', # PDBx:group_PDB True iff record type is HETATM Boolean
'IsATOM', # PDBx:group_PDB True iff record type is ATOM Boolean
'PDBChainID', # PDBx:auth_asym_id PDB chain ID Character (alphanumeric)
'PDBChainIDIsNull', # PDBx:auth_asym_id PDB chain ID Boolean
'ATOMResidueID', # PDBx:auth_seq_id Residue ID (not including insertion code) Int
'ATOMResidueiCode', # PDBx:pdbx_PDB_ins_code Residue insertion code Character (alpha)
'ATOMResidueiCodeIsNull', # PDBx:pdbx_PDB_ins_code Need to determine if icode is nil Boolean
'SEQRESIndex', # PDBx:label_seq_id The SEQRES index Int
'ATOMResidueAA', # PDBx:auth_comp_id The residue type in the ATOM sequence String (we seem to assume 3 letter protein residues here...)
'ATOMSeqresResidueAA', # PDBx:label_comp_id The residue type in the SEQRES sequence String (we seem to assume 3 letter protein residues here...)
]
def __init__(self):
self.clear()
def clear(self):
d = self.__dict__
for f in self.__class__.fields:
d[f] = None
d['IsHETATM'] = False
d['IsATOM'] = False
d['ATOMResidueiCode'] = ' '
#self.__dict__['ATOMResidueiCodeIsNull'] = True
def get_pdb_residue_id(self):
d = self.__dict__
residue_identifier = '%s%s%s' % (d['PDBChainID'], str(d['ATOMResidueID']).rjust(4), d['ATOMResidueiCode'])
assert(len(residue_identifier) == 6)
return residue_identifier
def validate(self):
# Assertions
assert(not(self.IsHETATM and self.IsATOM))
assert(self.IsHETATM or self.IsATOM)
if self.ATOMResidueiCode != ' ': # Sometimes the insertion code tag exists but is empty. In this case, its attribute xsi:nil should be "true"
assert(self.ATOMResidueiCodeIsNull == None)
assert(len(self.ATOMResidueiCode) == 1)
assert(self.ATOMResidueiCode.isalpha())
if self.ATOMResidueiCodeIsNull:
assert(self.ATOMResidueiCode == ' ')
assert(self.ATOMResidueAA == self.ATOMSeqresResidueAA)
assert(len(self.PDBChainID) == 1)
assert(self.PDBChainID.isalnum() or self.PDBChainID == ' ') # e.g. 2MBP
def convert_to_residue(self, modified_residues):
residue_3_letter = self.ATOMResidueAA
residue_1_letter = residue_type_3to1_map.get(residue_3_letter) or protonated_residue_type_3to1_map.get(residue_3_letter) or non_canonical_amino_acids.get(residue_3_letter)
if not residue_1_letter:
residue_identifier = self.get_pdb_residue_id()
if modified_residues.get(residue_identifier):
residue_1_letter = modified_residues[residue_identifier]['original_residue_1']
if not residue_1_letter:
'''Too many cases to worry about... we will have to use residue_3_letter to sort those out.'''
residue_1_letter = 'X'
pdb_residue = IdentifyingPDBResidue(self.PDBChainID, ("%d%s" % (self.ATOMResidueID, self.ATOMResidueiCode)).rjust(5), residue_1_letter, None, residue_3_letter)
return pdb_residue, self.SEQRESIndex, residue_1_letter, residue_3_letter
def __repr__(self):
# For debugging
return '\n'.join([('%s : %s' % (f.ljust(23), self.__dict__[f])) for f in self.__class__.fields if self.__dict__[f] != None])
|
class AtomSite(object):
def __init__(self):
pass
def clear(self):
pass
def get_pdb_residue_id(self):
pass
def validate(self):
pass
def convert_to_residue(self, modified_residues):
pass
def __repr__(self):
pass
| 7 | 0 | 8 | 1 | 7 | 1 | 2 | 0.35 | 1 | 2 | 1 | 1 | 6 | 0 | 6 | 6 | 75 | 17 | 52 | 16 | 45 | 18 | 41 | 16 | 34 | 4 | 1 | 2 | 12 |
143,586 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/cache.py
|
klab.bio.cache.CacheNode
|
class CacheNode(object):
'''Simple class to store an object and the time of insertion.'''
def __init__(self, payload):
self.t = datetime.datetime.now()
self.o = payload
def get(self):
'''Refresh the access time and return the object.'''
self.t = datetime.datetime.now()
return self.o
def __repr__(self): return '{0}: {1}'.format(self.t, self.o.__repr__()[:50])
def __cmp__(self, other): return (self.t).__cmp__(other.t)
def __gt__(self, other): return (self.t).__gt__(other.t)
def __ge__(self, other): return (self.t).__ge__(other.t)
def __lt__(self, other): return (self.t).__lt__(other.t)
def __le__(self, other): return (self.t).__le__(other.t)
def __eq__(self, other): return (self.t).__eq__(other.t)
def __ne__(self, other): return (self.t).__ne__(other.t)
|
class CacheNode(object):
'''Simple class to store an object and the time of insertion.'''
def __init__(self, payload):
pass
def get(self):
'''Refresh the access time and return the object.'''
pass
def __repr__(self):
pass
def __cmp__(self, other):
pass
def __gt__(self, other):
pass
def __ge__(self, other):
pass
def __lt__(self, other):
pass
def __le__(self, other):
pass
def __eq__(self, other):
pass
def __ne__(self, other):
pass
| 11 | 2 | 2 | 0 | 1 | 0 | 1 | 0.13 | 1 | 1 | 0 | 0 | 10 | 2 | 10 | 10 | 21 | 4 | 15 | 13 | 12 | 2 | 23 | 13 | 12 | 1 | 1 | 0 | 10 |
143,587 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/cache.py
|
klab.bio.cache.CacheNodeDict
|
class CacheNodeDict(dict):
def __getitem__(self, k):
return dict.__getitem__(self, k).get()
|
class CacheNodeDict(dict):
def __getitem__(self, k):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 28 | 4 | 1 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 2 | 0 | 1 |
143,588 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/bonsai.py
|
klab.bio.bonsai.Bonsai
|
class Bonsai(ResidueIndexedPDBFile):
### Constructors
def __init__(self, pdb_content, buffer = 0.05, bin_size = 5.1, safe_mode = True, FASTA_line_length = 80):
super(Bonsai, self).__init__(pdb_content, buffer = buffer, bin_size = bin_size, safe_mode = safe_mode)
self.FASTA_line_length = FASTA_line_length
### Queries
def get_atom_names_by_group(self, groups):
names = set()
groups = set(groups)
for nm, g in self.atom_name_to_group.items():
if g in groups:
names.add(nm)
return names
### Base functionality
def find_heavy_atoms_near_atom(self, source_atom, search_radius, atom_hit_cache = set(), restrict_to_CA = False):
'''atom_hit_cache is a set of atom serial numbers which have already been tested. We keep track of these to avoid recalculating the distance.
'''
#todo: Benchmark atom_hit_cache to see if it actually speeds up the search
non_heavy_atoms = self.get_atom_names_by_group(set(['H', 'D', 'T']))
return self.find_atoms_near_atom(source_atom, search_radius, atom_names_to_exclude = non_heavy_atoms, atom_hit_cache = atom_hit_cache, restrict_to_CA = restrict_to_CA)
def find_atoms_near_atom(self, source_atom, search_radius, atom_hit_cache = set(), atom_names_to_include = set(), atom_names_to_exclude = set(), restrict_to_CA = False):
'''It is advisable to set up and use an atom hit cache object. This reduces the number of distance calculations and gives better performance.
See find_sidechain_atoms_within_radius_of_residue_objects for an example of how to set this up e.g.
atom_hit_cache = set()
for x in some_loop:
this_object.find_atoms_near_atom(source_atom, search_radius, atom_hit_cache = atom_hit_cache)
'''
if len(atom_names_to_include) > 0 and len(atom_names_to_exclude) > 0:
raise Exception('Error: either one of the set of atoms types to include or the set of atom types to exclude can be set but not both.')
atom_names_to_exclude = set(atom_names_to_exclude)
if atom_names_to_include:
atom_names_to_exclude = set(self.atom_name_to_group.keys()).difference(atom_names_to_include)
radius = float(search_radius) + self.buffer # add buffer to account for edge cases in searching
bin_size = self.bin_size
atom_bins = self.atom_bins
if source_atom:
bin_radius = int(math.ceil(radius / bin_size)) # search this many bins in all directions
xrange = list(range(max(0, source_atom.bin.x - bin_radius), min(self.atom_bin_dimensions[0], source_atom.bin.x + bin_radius) + 1))
yrange = list(range(max(0, source_atom.bin.y - bin_radius), min(self.atom_bin_dimensions[1], source_atom.bin.y + bin_radius) + 1))
zrange = list(range(max(0, source_atom.bin.z - bin_radius), min(self.atom_bin_dimensions[2], source_atom.bin.z + bin_radius) + 1))
for x in xrange:
for y in yrange:
for z in zrange:
for atom in atom_bins[x][y][z]:
if atom not in atom_hit_cache:
if restrict_to_CA:
if atom.name == 'CA' and (source_atom - atom <= search_radius):
atom_hit_cache.add(atom)
else:
if (source_atom - atom <= search_radius) and (atom.name not in atom_names_to_exclude):
atom_hit_cache.add(atom)
return atom_hit_cache
def get_atom(self, atom_serial_number):
source_atom = self.atoms.get(atom_serial_number)
if source_atom:
return source_atom
else:
raise Exception('ATOM {0} was not found.'.format(atom_serial_number))
def get_atom_set_complement(self, atoms):
complement = set()
serial_numbers = set([a.serial_number for a in atoms])
for serial_number, atom in self.atoms.items():
if serial_number not in serial_numbers:
complement.add(atom)
return complement
### Main functionality
def find_sidechain_atoms_within_radius_of_residues(self, source_residue_ids, search_radius):
#for residue in all residues:
# for all heavy atoms in residue
# find all heavy atoms within radius which are within residues (ATOM records)
# return the residue ID
#for all found residues
# identify all non-backbone_atoms
# split the Bonsai by these atoms
pass
def find_residues_within_radius_of_residue_id(self, chain_id, residue_id, search_radius, find_ATOM_atoms = True, find_HETATM_atoms = False, restrict_to_CA = False):
r = Residue(chain_id, PDB.ResidueID2String(residue_id), 'X')
return self.find_residues_within_radius_of_residue_objects([r], search_radius, find_ATOM_atoms = find_ATOM_atoms, find_HETATM_atoms = find_HETATM_atoms, restrict_to_CA = restrict_to_CA)
def find_residues_within_radius_of_residue_objects(self, source_residues, search_radius, find_ATOM_atoms = True, find_HETATM_atoms = False, restrict_to_CA = False):
found_residues = set()
for source_residue in source_residues:
r = self.residues[source_residue.chain][source_residue.residue_id]
sidechain_atom_serial_numbers = self.find_sidechain_atoms_within_radius_of_residue_objects([r], search_radius, find_ATOM_atoms = find_ATOM_atoms, find_HETATM_atoms = find_HETATM_atoms, restrict_to_CA = restrict_to_CA)
for serial_no in sidechain_atom_serial_numbers:
found_residues.add(self.atoms[serial_no].residue)
return sorted(found_residues)
def find_sidechain_atoms_within_radius_of_residue_objects(self, source_residues, search_radius, find_ATOM_atoms = True, find_HETATM_atoms = False, restrict_to_CA = False):
'''for residue in source_residues:
for all heavy atoms in residue
find all heavy atoms within radius which are within residues (ATOM records)
for all heavy atoms found
determing the associated residue
for all found residues not in source_residues
identify all non-backbone atoms
return the non-backbone atoms'''
atom_hit_cache = set()
for residue in source_residues:
if find_ATOM_atoms:
for aatom in residue.get('ATOM'):
self.find_heavy_atoms_near_atom(aatom, search_radius, atom_hit_cache = atom_hit_cache, restrict_to_CA = restrict_to_CA)
if find_HETATM_atoms:
for hatom in residue.get('HETATM'):
self.find_heavy_atoms_near_atom(hatom, search_radius, atom_hit_cache = atom_hit_cache, restrict_to_CA = restrict_to_CA)
# Get the list of source_residues
loop_residue_ids = set()
for sres in source_residues:
loop_residue_ids.add(sres.id())
# Get the list of atoms to be removed (all sidechain atoms - including non-heavy atoms - of the found residues which are not in source_residues)
sidechain_atom_serial_numbers = set()
nearby_residues = set()
nearby_residue_ids = set()
for a in atom_hit_cache:
residue_id = a.residue.id()
if residue_id not in loop_residue_ids:
nearby_residues.add(a.residue)
nearby_residue_ids.add(residue_id)
for nearby_residue in nearby_residues:
for aatom in nearby_residue.get('ATOM'):
if aatom.name not in backbone_atoms:
sidechain_atom_serial_numbers.add(aatom.serial_number)
assert(len(nearby_residue_ids.intersection(loop_residue_ids)) == 0)
return sidechain_atom_serial_numbers
### Higher-level functionality
def prune_loop_for_kic(self, loops_segments, search_radius, expected_min_loop_length = None, expected_max_loop_length = None, generate_pymol_session = False):
'''A wrapper for prune_structure_according_to_loop_definitions suitable for the Rosetta kinematic closure (KIC) loop modeling method.'''
return self.prune_structure_according_to_loop_definitions(loops_segments, search_radius, expected_min_loop_length = expected_min_loop_length, expected_max_loop_length = expected_max_loop_length, generate_pymol_session = generate_pymol_session, check_sequence = True, keep_Ca_buttress_atoms = True)
def prune_structure_according_to_loop_definitions(self, loops_segments, search_radius, expected_min_loop_length = None, expected_max_loop_length = None, generate_pymol_session = True, check_sequence = False, keep_Ca_buttress_atoms = True):
'''Removes the loop residues identified by the residues in loops_segments and all sidechains with heavy atoms
within 10A of any heavy atom of the loop.
If keep_Ca_buttress_atoms is set then the N and Ca backbone atoms of the first loop residue are kept and the
Ca and C backbone atoms of the last loop residue are kept. This is a useful option to use for Rosetta as
discarding those atoms can negatively affect some of the loop modeling protocols.
'''
# Extract the list of Residues defined in the loops definitions
loop_residues = []
loop_N_terminii = []
loop_C_terminii = []
parsed_sequences = []
for loop in loops_segments:
# Identify the Residues corresponding to the loops file definition
start_id = loop.StartResidueID
end_id = loop.EndResidueID
chain_id = loop.Chain
start_residue, end_residue = None, None
try:
start_residue = self.residues[chain_id][loop.StartResidueID]
end_residue = self.residues[chain_id][loop.EndResidueID]
except Exception as e:
raise Exception('Could not find the start or end residue in the chain.')
if start_residue in loop_residues or end_residue in loop_residues:
raise Exception('Error: The loops segments overlap.')
# We assume that the loops file identifies a range in document order
loop_residue_ids = []
loop_start = False
for l in self.indexed_lines:
if l[0] == 'ATOM' or l[0] == 'HETATM':
atom_residue = l[3].residue
if not loop_start:
if atom_residue == start_residue:
loop_start = True
if atom_residue not in loop_residues:
if atom_residue.residue_id not in loop_residue_ids:
loop_residue_ids.append(atom_residue.residue_id)
loop_residues.append(atom_residue)
else:
if atom_residue not in loop_residues:
if atom_residue.residue_id not in loop_residue_ids:
loop_residue_ids.append(atom_residue.residue_id)
loop_residues.append(atom_residue)
if atom_residue == end_residue:
break
parsed_sequence = ''.join([loop_residue.get_amino_acid_code() for loop_residue in loop_residues])
if check_sequence:
if not parsed_sequence == loop.Sequence:
raise Exception('Expected to find sequence {0} but found sequence {1}.'.format(loop.Sequence, parsed_sequence))
parsed_sequences.append((parsed_sequence, ';'.join([chain_id + lrid.strip() for lrid in loop_residue_ids])))
# These parameters currently only makes sense for a single loop
if expected_min_loop_length != None and ((expected_min_loop_length > len(loop_residues)) or (expected_min_loop_length > len(loop_residue_ids))):
raise Exception('Expected to identify at least {0} residues but {1} were identified.'.format(expected_min_loop_length, len(loop_residues)))
if expected_max_loop_length != None and ((expected_max_loop_length < len(loop_residues)) or (expected_max_loop_length < len(loop_residue_ids))):
raise Exception('Expected to identify at most {0} residues but {1} were identified.'.format(expected_max_loop_length, len(loop_residues)))
# Keep track of the loop terminii
loop_N_terminii.append(start_residue)
loop_C_terminii.append(end_residue)
# Determine the sidechain atoms to be removed
sidechain_atom_serial_numbers = self.find_sidechain_atoms_within_radius_of_residue_objects(loop_residues, search_radius)
# Determine the loop residue atoms to be removed
loop_residues_ids = set()
loop_atom_serial_numbers = set()
atoms_serial_numbers_to_keep_in_cutting = set()
for loop_residue in loop_residues:
for aatom in loop_residue.get('ATOM'):
if keep_Ca_buttress_atoms:
# Keep the N and CA atoms of the N-terminus side and the CA and C atoms of the C-terminus side
if (loop_residue in loop_N_terminii) and (aatom.name == 'N' or aatom.name == 'CA'):
atoms_serial_numbers_to_keep_in_cutting.add(aatom.serial_number)
continue
elif (loop_residue in loop_C_terminii) and (aatom.name == 'CA' or aatom.name == 'C'):
atoms_serial_numbers_to_keep_in_cutting.add(aatom.serial_number)
continue
loop_atom_serial_numbers.add(aatom.serial_number)
assert(len(sidechain_atom_serial_numbers.intersection(loop_atom_serial_numbers)) == 0)
# Create a FASTA file with the loops' sequences
FASTA = []
FASTA_line_length = self.FASTA_line_length
for x in range(len(parsed_sequences)):
parsed_sequence = parsed_sequences[x][0]
FASTA.append('>loop_{0}|Residues {1}'.format(x + 1, parsed_sequences[x][1].strip()))
for idx in range(0, len(parsed_sequence), FASTA_line_length):
FASTA.append(parsed_sequence[idx:idx + FASTA_line_length])
FASTA_file = '\n'.join(FASTA)
bonsai_pdb_content, cutting_pdb_content, PSE_file, PSE_script = self.prune(loop_atom_serial_numbers, sidechain_atom_serial_numbers, atoms_serial_numbers_to_keep_in_cutting = atoms_serial_numbers_to_keep_in_cutting, generate_pymol_session = generate_pymol_session)
return bonsai_pdb_content, cutting_pdb_content, PSE_file, PSE_script, FASTA_file
def prune_structure_according_to_loops_file(self, loops_file_content, search_radius, expected_loop_length = None, generate_pymol_session = True):
'''todo: this needs to be rewritten to include the logic in prune_structure_according_to_loop_definitions.'''
lf = LoopsFile(loops_file_content)
assert(len(lf.data) == 1) # todo: remove
# Extract the list of Residues defined in the loops file
loop_residues = []
for loop in lf.data:
# Identify the Residues corresponding to the loops file definition
start_id = str(loop['start'])
end_id = str(loop['end'])
start = []
end = []
for chain, chain_residues in self.residues.items():
for res, residue_object in chain_residues.items():
if res.strip() == start_id:
start.append((chain, res, residue_object))
if res.strip() == end_id:
end.append((chain, res, residue_object))
if len(start) != len(end) != 1:
raise Exception('The PDB is ambiguous with respect to the loops file i.e. more than one PDB residue corresponds to the start or end residue defined in the loops file.')
start, end = start[0], end[0]
# We assume that the loops file identifies a range in document order
loop_start = False
for l in self.indexed_lines:
if l[0] == 'ATOM' or l[0] == 'HETATM':
atom_residue = l[3].residue
if not loop_start:
if atom_residue == start[2]:
loop_start = True
if atom_residue not in loop_residues:
loop_residues.append(atom_residue)
else:
if atom_residue not in loop_residues:
loop_residues.append(atom_residue)
if atom_residue == end[2]:
break
if expected_loop_length != None and expected_loop_length != len(loop_residues):
raise Exception('Expected to identify {0} residues but {1} were identified.'.format(expected_loop_length, len(loop_residues)))
# Determine the sidechain atoms to be removed
sidechain_atom_serial_numbers = self.find_sidechain_atoms_within_radius_of_residue_objects(loop_residues, search_radius)
# Determine the loop residue atoms to be removed
loop_residues_ids = set()
loop_atom_serial_numbers = set()
for loop_residue in loop_residues:
for aatom in loop_residue.get('ATOM'):
loop_atom_serial_numbers.add(aatom.serial_number)
assert(len(sidechain_atom_serial_numbers.intersection(loop_atom_serial_numbers)) == 0)
return self.prune(loop_atom_serial_numbers, sidechain_atom_serial_numbers, generate_pymol_session = generate_pymol_session)
def prune(self, arbitrary_atom_serial_numbers, sidechain_atom_serial_numbers = set(), atoms_serial_numbers_to_keep_in_cutting = set(), keep_CA_in_cutting = True, generate_pymol_session = True, bonsai_label = 'Bonsai', cutting_label = 'Cutting', pymol_executable = 'pymol'):
'''Returns the content of two PDB files and (optionally) a PyMOL session and associated script.
The first returned PDB file ("bonsai") is missing the ATOM (and any related ANISOU) and HETATM records identified by atom_serial_numbers.
The second returned PDB file ("cutting") only contains ATOM, ANISOU, and HETATM records which are identified by atom_serial_numbers.
Both PDB objects contain all records from the original PDB which are not ATOM, ANISOU, or HETATM records.
If keep_CA_in_cutting is set, the cutting will also contain the associated Calpha atoms. This is useful purely
to visualize the cutting in the PyMOL session. If a PyMOL session is not to be generated, this option should
be set to False.
'''
bonsai = []
cutting = []
# Determine the set of sidechain residues in case keep_CA_in_cutting is True and we wish to keep those atoms in the cutting
sidechain_residues = set()
if keep_CA_in_cutting and sidechain_atom_serial_numbers:
for line in self.indexed_lines:
if line[0] == 'ATOM' and line[1] in sidechain_atom_serial_numbers:
residue_id = line[3].residue.id()
sidechain_residues.add(residue_id[0] + residue_id[1])
atom_serial_numbers_to_remove = arbitrary_atom_serial_numbers.union(sidechain_atom_serial_numbers)
for line in self.indexed_lines:
if line[0]: # record type
PDB_line = line[2]
if line[1] in atom_serial_numbers_to_remove:
cutting.append(PDB_line)
else:
if atoms_serial_numbers_to_keep_in_cutting and int(PDB_line[6:11]) in atoms_serial_numbers_to_keep_in_cutting:
cutting.append(PDB_line)
elif keep_CA_in_cutting and PDB_line[21:27] in sidechain_residues and PDB_line[12:16] == ' CA ':
cutting.append(PDB_line)
bonsai.append(PDB_line)
else:
bonsai.append(line[1])
cutting.append(line[1])
bonsai_pdb_content = '\n'.join(bonsai)
cutting_pdb_content = '\n'.join(cutting)
PSE_file, PSE_script = None, None
try:
PSE_file, PSE_script = self.generate_pymol_session(bonsai_pdb_content, cutting_pdb_content, bonsai_label = bonsai_label, cutting_label = cutting_label, pymol_executable = pymol_executable, settings = {})
except Exception as e:
colortext.error('Failed to generate the PyMOL session: "{0}"'.format(e))
return bonsai_pdb_content, cutting_pdb_content, PSE_file, PSE_script
def generate_pymol_session(self, bonsai_pdb_content, cutting_pdb_content, bonsai_label = 'Bonsai', cutting_label = 'Cutting', pymol_executable = 'pymol', settings = {}):
''' Generates the PyMOL session for the scaffold, model, and design structures.
Returns this session and the script which generated it.'''
if not pymol_load_failed:
b = BatchBuilder(pymol_executable = pymol_executable)
loop_residues = set()
for l in cutting_pdb_content.split('\n'):
if l.startswith('ATOM ') and l[12:16] == ' C ':
loop_residues.add(l[21:27])
loop_residues = sorted(loop_residues)
structures_list = [
(bonsai_label, bonsai_pdb_content, set()),
(cutting_label, cutting_pdb_content, loop_residues),
]
settings['Main'] = bonsai_label
settings['Loop'] = cutting_label
PSE_files = b.run(LoopRemovalBuilder, [PDBContainer.from_content_triple(structures_list)], settings = settings)
return PSE_files[0], b.PSE_scripts[0]
@staticmethod
def convert_to_pse(bonzai, cutting):
'''Returns a PyMOL session containing the two parts of the PDB file split using prune.'''
pass
|
class Bonsai(ResidueIndexedPDBFile):
def __init__(self, pdb_content, buffer = 0.05, bin_size = 5.1, safe_mode = True, FASTA_line_length = 80):
pass
def get_atom_names_by_group(self, groups):
pass
def find_heavy_atoms_near_atom(self, source_atom, search_radius, atom_hit_cache = set(), restrict_to_CA = False):
'''atom_hit_cache is a set of atom serial numbers which have already been tested. We keep track of these to avoid recalculating the distance.
'''
pass
def find_atoms_near_atom(self, source_atom, search_radius, atom_hit_cache = set(), atom_names_to_include = set(), atom_names_to_exclude = set(), restrict_to_CA = False):
'''It is advisable to set up and use an atom hit cache object. This reduces the number of distance calculations and gives better performance.
See find_sidechain_atoms_within_radius_of_residue_objects for an example of how to set this up e.g.
atom_hit_cache = set()
for x in some_loop:
this_object.find_atoms_near_atom(source_atom, search_radius, atom_hit_cache = atom_hit_cache)
'''
pass
def get_atom_names_by_group(self, groups):
pass
def get_atom_set_complement(self, atoms):
pass
def find_sidechain_atoms_within_radius_of_residues(self, source_residue_ids, search_radius):
pass
def find_residues_within_radius_of_residue_id(self, chain_id, residue_id, search_radius, find_ATOM_atoms = True, find_HETATM_atoms = False, restrict_to_CA = False):
pass
def find_residues_within_radius_of_residue_objects(self, source_residues, search_radius, find_ATOM_atoms = True, find_HETATM_atoms = False, restrict_to_CA = False):
pass
def find_sidechain_atoms_within_radius_of_residue_objects(self, source_residues, search_radius, find_ATOM_atoms = True, find_HETATM_atoms = False, restrict_to_CA = False):
'''for residue in source_residues:
for all heavy atoms in residue
find all heavy atoms within radius which are within residues (ATOM records)
for all heavy atoms found
determing the associated residue
for all found residues not in source_residues
identify all non-backbone atoms
return the non-backbone atoms'''
pass
def prune_loop_for_kic(self, loops_segments, search_radius, expected_min_loop_length = None, expected_max_loop_length = None, generate_pymol_session = False):
'''A wrapper for prune_structure_according_to_loop_definitions suitable for the Rosetta kinematic closure (KIC) loop modeling method.'''
pass
def prune_structure_according_to_loop_definitions(self, loops_segments, search_radius, expected_min_loop_length = None, expected_max_loop_length = None, generate_pymol_session = True, check_sequence = False, keep_Ca_buttress_atoms = True):
'''Removes the loop residues identified by the residues in loops_segments and all sidechains with heavy atoms
within 10A of any heavy atom of the loop.
If keep_Ca_buttress_atoms is set then the N and Ca backbone atoms of the first loop residue are kept and the
Ca and C backbone atoms of the last loop residue are kept. This is a useful option to use for Rosetta as
discarding those atoms can negatively affect some of the loop modeling protocols.
'''
pass
def prune_structure_according_to_loops_file(self, loops_file_content, search_radius, expected_loop_length = None, generate_pymol_session = True):
'''todo: this needs to be rewritten to include the logic in prune_structure_according_to_loop_definitions.'''
pass
def prune_loop_for_kic(self, loops_segments, search_radius, expected_min_loop_length = None, expected_max_loop_length = None, generate_pymol_session = False):
'''Returns the content of two PDB files and (optionally) a PyMOL session and associated script.
The first returned PDB file ("bonsai") is missing the ATOM (and any related ANISOU) and HETATM records identified by atom_serial_numbers.
The second returned PDB file ("cutting") only contains ATOM, ANISOU, and HETATM records which are identified by atom_serial_numbers.
Both PDB objects contain all records from the original PDB which are not ATOM, ANISOU, or HETATM records.
If keep_CA_in_cutting is set, the cutting will also contain the associated Calpha atoms. This is useful purely
to visualize the cutting in the PyMOL session. If a PyMOL session is not to be generated, this option should
be set to False.
'''
pass
def generate_pymol_session(self, bonsai_pdb_content, cutting_pdb_content, bonsai_label = 'Bonsai', cutting_label = 'Cutting', pymol_executable = 'pymol', settings = {}):
''' Generates the PyMOL session for the scaffold, model, and design structures.
Returns this session and the script which generated it.'''
pass
@staticmethod
def convert_to_pse(bonzai, cutting):
'''Returns a PyMOL session containing the two parts of the PDB file split using prune.'''
pass
| 18 | 9 | 22 | 2 | 16 | 4 | 6 | 0.27 | 1 | 14 | 6 | 1 | 15 | 1 | 16 | 24 | 395 | 71 | 259 | 114 | 241 | 69 | 247 | 111 | 230 | 24 | 2 | 8 | 96 |
143,589 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/bonsai.py
|
klab.bio.bonsai.Bin
|
class Bin(object):
def __init__(self, x, y, z):
self.x, self.y, self.z = x, y, z
self.items = []
def append(self, i):
self.items.append(i)
def __len__(self):
return len(self.items)
def __iter__(self):
return self.items.__iter__()
|
class Bin(object):
def __init__(self, x, y, z):
pass
def append(self, i):
pass
def __len__(self):
pass
def __iter__(self):
pass
| 5 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 4 | 4 | 4 | 4 | 14 | 4 | 10 | 7 | 5 | 0 | 10 | 7 | 5 | 1 | 1 | 0 | 4 |
143,590 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/bonsai.py
|
klab.bio.bonsai.Atom
|
class Atom(object):
def __init__(self, residue, name, group, serial_number, conformation):
self.residue = residue
self.x, self.y, self.z = None, None, None
self.point = None
self.serial_number = serial_number
self.name = name
self.group = group
self.bin = None
self.record_type = None
self.conformation = conformation
def place(self, x, y, z, record_type = None):
self.x = x
self.y = y
self.z = z
self.record_type = record_type
self.point = numpy.array([x, y, z])
def set_bin(self, bin):
assert(self.bin == None or self.bin == bin)
self.bin = bin
def __sub__(self, other):
'''Returns the distance (Euclidean/Frobenius norm) between this point and the other point.'''
return numpy.linalg.norm(self.point - other.point)
def __repr__(self):
if self.point is None:
return '{0} {1}'.format(self.name, self.serial_number)
else:
return '{0}: {1} {2} at ({3}, {4}, {5})'.format(self.record_type, self.name, self.serial_number, self.x, self.y, self.z)
|
class Atom(object):
def __init__(self, residue, name, group, serial_number, conformation):
pass
def place(self, x, y, z, record_type = None):
pass
def set_bin(self, bin):
pass
def __sub__(self, other):
'''Returns the distance (Euclidean/Frobenius norm) between this point and the other point.'''
pass
def __repr__(self):
pass
| 6 | 1 | 5 | 0 | 5 | 0 | 1 | 0.04 | 1 | 0 | 0 | 0 | 5 | 11 | 5 | 5 | 38 | 10 | 27 | 15 | 21 | 1 | 26 | 15 | 20 | 2 | 1 | 1 | 6 |
143,591 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/blast.py
|
klab.bio.blast.BLAST
|
class BLAST(object):
'''Using a class makes it easier to set the BLAST parameters once.'''
date_format = '%Y-%m-%dT%H:%M:%S'
def __init__(self, bio_cache = None, cache_dir = None, matrix = 'BLOSUM62', silent = False, cut_off = 0.001, sequence_identity_cut_off = 70, stale_period_in_hours = 7 * 24, min_sequence_length = 20, force_lookup = False):
'''If data is staler than stale_period_in_hours then we query it anew from the source e.g. BLAST results.'''
self.bio_cache = bio_cache
self.cache_dir = cache_dir
if not(bio_cache) and (cache_dir and os.path.exists(cache_dir)):
self.bio_cache = BioCache(cache_dir = cache_dir , max_capacity = 1000, silent = True)
self.silent = silent
self.matrix = matrix
self.cut_off = cut_off
self.sequence_identity_cut_off = sequence_identity_cut_off
self.stale_period_in_hours = stale_period_in_hours
self.min_sequence_length = min_sequence_length
self.force_lookup = force_lookup
#########################
# Utility functions
#########################
def log(self, msg, silent, pfunk = None):
if silent == None:
silent = self.silent
if not silent:
if not pfunk:
print(msg)
else:
pfunk(msg)
#########################
# BLAST functions
#########################
def by_pdb(self, pdb_id, take_top_percentile = 30.0, cut_off = None, matrix = None, sequence_identity_cut_off = None, silent = None):
'''Returns a list of all PDB files which contain protein sequences similar to the protein sequences of pdb_id.
Only protein chains are considered in the matching so e.g. some results may have DNA or RNA chains or ligands
while some may not.
'''
self.log('BLASTing {0}'.format(pdb_id), silent, colortext.pcyan)
# Preamble
matrix = matrix or self.matrix
cut_off = cut_off or self.cut_off
sequence_identity_cut_off = sequence_identity_cut_off or self.sequence_identity_cut_off
# Parse PDB file
p = self.bio_cache.get_pdb_object(pdb_id)
chain_ids = sorted(p.seqres_sequences.keys())
assert(chain_ids)
# Run BLAST over all chains
hits = set(self.blast_by_pdb_chain(pdb_id, chain_ids[0], cut_off = cut_off, matrix = matrix, sequence_identity_cut_off = sequence_identity_cut_off, take_top_percentile = take_top_percentile, silent = silent))
for chain_id in chain_ids[1:]:
chain_hits = self.blast_by_pdb_chain(pdb_id, chain_id, cut_off = cut_off, matrix = matrix, sequence_identity_cut_off = sequence_identity_cut_off, take_top_percentile = take_top_percentile)
if chain_hits != None:
# None suggests that the chain was not a protein chain whereas an empty list suggest a protein chain with no hits
hits = hits.intersection(set(chain_hits))
return sorted(hits)
def blast_by_pdb_chain(self, pdb_id, chain_id, take_top_percentile = 30.0, cut_off = None, matrix = None, sequence_identity_cut_off = None, silent = None):
# Checks
pdb_id, chain_id = pdb_id.strip(), chain_id.strip()
if len(pdb_id) != 4:
raise Exception('A PDB ID of four characters was expected. "{0}" was passed.'.format(pdb_id))
if 5 <= len(chain_id) <= 0:
raise Exception('A chain ID of between 1-4 characters was expected. "{0}" was passed.'.format(chain_id))
self.log('BLASTing {0}:{1}'.format(pdb_id, chain_id), silent)
# Construct query
query_data = dict(
structureId = pdb_id,
chainId = chain_id,
)
xml_query = self._construct_query(query_data, cut_off = cut_off, matrix = matrix, sequence_identity_cut_off = sequence_identity_cut_off)
# Read cached results
if self.bio_cache:
data = self.bio_cache.load_pdb_chain_blast(pdb_id, chain_id, query_data['eCutOff'], query_data['matrix'], query_data['sequenceIdentityCutoff'])
if data:
assert('query_date' in data)
query_date = datetime.datetime.strptime(data['query_date'], BLAST.date_format)
age_in_hours = ((datetime.datetime.now() - query_date).total_seconds()) / (3600.0)
assert(age_in_hours > -24.01)
if not self.force_lookup:
if age_in_hours < self.stale_period_in_hours:
return data['hits']
# POST the request and parse the PDB hits
result = self._post(xml_query)
hits = [l.strip().split(':')[0] for l in result.split('\n') if l.strip()]
if pdb_id not in hits:
if not hits:
try:
p = self.bio_cache.get_pdb_object(pdb_id)
chain_type = p.chain_types[chain_id]
sequence_length = len(p.seqres_sequences[chain_id])
if not(chain_type == 'Protein' or chain_type == 'Protein skeleton'):
colortext.warning('Chain {1} of {0} is a {2} chain.'.format(pdb_id, chain_id, chain_type))
hits = None # None suggests that the chain was not a protein chain whereas an empty list suggest a protein chain with no hits
elif sequence_length < self.min_sequence_length:
colortext.warning('Chain {1} of {0} only contains {2} residues. The minimum sequence length is set to {3} residues so we will ignore this chain in matching.'.format(pdb_id, chain_id, sequence_length, self.min_sequence_length))
hits = None # None suggests that the chain was not a protein chain whereas an empty list suggest a protein chain with no hits
except:
raise colortext.Exception('Failed to determine the chain type for chain {1} of {0}.'.format(pdb_id, chain_id))
else:
raise Exception('A BLAST of {0} chain {1} failed to find any hits for {0}. Is the chain a polypeptide chain?'.format(pdb_id, chain_id))
query_data['hits'] = hits
# Cache the results
if self.bio_cache:
self.bio_cache.save_pdb_chain_blast(pdb_id, chain_id, query_data['eCutOff'], query_data['matrix'], query_data['sequenceIdentityCutoff'], query_data)
return query_data['hits']
def by_sequence(self, sequence, take_top_percentile = 30.0, cut_off = None, matrix = None, sequence_identity_cut_off = None, silent = None):
# Checks
if set(sequence).intersection(upper_case_letters) != set(sequence): # We allow all characters just in case these are valid. Alternatively, we could check against basics.py::residue_type_1to3_map.keys() - 'X'
raise Exception('The sequence {0} contained unexpected characters: {1}.'.format(colortext.myellow(sequence), colortext.morange(','.join(sorted(set(sequence).difference(upper_case_letters))))))
self.log('BLASTing sequence {0}'.format(sequence), silent)
# Construct query
query_data = dict(sequence = sequence)
xml_query = self._construct_query(query_data, cut_off = cut_off, matrix = matrix, sequence_identity_cut_off = sequence_identity_cut_off)
# Read cached results
if self.bio_cache:
data = self.bio_cache.load_sequence_blast(sequence, query_data['eCutOff'], query_data['matrix'], query_data['sequenceIdentityCutoff'])
if data:
assert('query_date' in data)
query_date = datetime.datetime.strptime(data['query_date'], BLAST.date_format)
age_in_hours = ((datetime.datetime.now() - query_date).total_seconds()) / (3600.0)
assert(age_in_hours > -24.01)
if age_in_hours < self.stale_period_in_hours:
return data['hits']
# POST the request and parse the PDB hits
result = self._post(xml_query)
hits = list(map(str, [l.strip().split(':')[0] for l in result.split('\n') if l.strip()]))
query_data['hits'] = hits
# Cache the results
if self.bio_cache:
self.bio_cache.save_sequence_blast(sequence, query_data['eCutOff'], query_data['matrix'], query_data['sequenceIdentityCutoff'], query_data)
return query_data['hits']
#########################
# Private functions
#########################
def _construct_query(self, query_data, cut_off = None, matrix = None, sequence_identity_cut_off = None):
if not 'matrix' in query_data:
query_data['matrix'] = matrix or self.matrix
if not 'eCutOff' in query_data:
query_data['eCutOff'] = cut_off or self.cut_off
if not 'sequenceIdentityCutoff' in query_data:
query_data['sequenceIdentityCutoff'] = sequence_identity_cut_off or self.sequence_identity_cut_off
query_data['query_date'] = datetime.datetime.strftime(datetime.datetime.now(), BLAST.date_format)
description = ''
extra_lines = []
if 'structureId' in query_data and 'chainId' in query_data:
description = 'Sequence Search (Structure:Chain = {structureId}:{chainId}, Expectation Value = {eCutOff}, Search Tool = BLAST)'
extra_lines += ['\t<structureId>{structureId}</structureId>'.format(**query_data), '\t<chainId>{chainId}</chainId>'.format(**query_data)]
elif 'sequence' in query_data:
description = 'Sequence Search (Sequence = {sequence}, Expectation Value = {eCutOff}, Search Tool = BLAST)'
extra_lines += ['\t<sequence>{sequence}</sequence>'.format(**query_data)]
xml_query = '\n'.join([
'<orgPdbQuery>',
'\t<queryType>org.pdb.query.simple.SequenceQuery</queryType>',
'\t<description>' + description + '</description>',
] + extra_lines + [
'\t<eCutOff>{eCutOff}</eCutOff>',
'\t<searchTool>blast</searchTool>',
'\t<sequenceIdentityCutoff>{sequenceIdentityCutoff}</sequenceIdentityCutoff>',
'</orgPdbQuery>']).format(**query_data)
return xml_query
def _post(self, xml_query):
'''POST the request.'''
req = urllib.request.Request(url = 'http://www.rcsb.org/pdb/rest/search', data=xml_query)
f = urllib.request.urlopen(req)
return f.read().strip()
|
class BLAST(object):
'''Using a class makes it easier to set the BLAST parameters once.'''
def __init__(self, bio_cache = None, cache_dir = None, matrix = 'BLOSUM62', silent = False, cut_off = 0.001, sequence_identity_cut_off = 70, stale_period_in_hours = 7 * 24, min_sequence_length = 20, force_lookup = False):
'''If data is staler than stale_period_in_hours then we query it anew from the source e.g. BLAST results.'''
pass
def log(self, msg, silent, pfunk = None):
pass
def by_pdb(self, pdb_id, take_top_percentile = 30.0, cut_off = None, matrix = None, sequence_identity_cut_off = None, silent = None):
'''Returns a list of all PDB files which contain protein sequences similar to the protein sequences of pdb_id.
Only protein chains are considered in the matching so e.g. some results may have DNA or RNA chains or ligands
while some may not.
'''
pass
def blast_by_pdb_chain(self, pdb_id, chain_id, take_top_percentile = 30.0, cut_off = None, matrix = None, sequence_identity_cut_off = None, silent = None):
pass
def by_sequence(self, sequence, take_top_percentile = 30.0, cut_off = None, matrix = None, sequence_identity_cut_off = None, silent = None):
pass
def _construct_query(self, query_data, cut_off = None, matrix = None, sequence_identity_cut_off = None):
pass
def _post(self, xml_query):
'''POST the request.'''
pass
| 8 | 4 | 25 | 3 | 18 | 3 | 5 | 0.25 | 1 | 10 | 2 | 0 | 7 | 9 | 7 | 7 | 204 | 43 | 131 | 45 | 123 | 33 | 116 | 45 | 108 | 13 | 1 | 4 | 35 |
143,592 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/cluster/run_mix.py
|
klab.cluster.run_mix.Reporter
|
class Reporter:
def __init__(self,task,report_interval=1):
self.start=time.time()
self.lastreport=self.start
self.task=task
self.report_interval=report_interval
print('Starting '+task)
def report(self,n):
t=time.time()
if self.lastreport<(t-self.report_interval):
self.lastreport=t
sys.stdout.write(" Processed: "+str(n)+"\r" )
sys.stdout.flush()
def done(self):
print('Done %s, took %.3f seconds\n' % (self.task,time.time()-self.start))
|
class Reporter:
def __init__(self,task,report_interval=1):
pass
def report(self,n):
pass
def done(self):
pass
| 4 | 0 | 5 | 0 | 5 | 0 | 1 | 0 | 0 | 2 | 0 | 0 | 3 | 4 | 3 | 3 | 15 | 0 | 15 | 9 | 11 | 0 | 15 | 9 | 11 | 2 | 0 | 1 | 4 |
143,593 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/cluster_template/cluster_template.py
|
klab.cluster_template.cluster_template.ClusterTemplate
|
class ClusterTemplate():
def __init__(self, num_steps, settings_dict = None):
# Defaults
self.job_dict_template_name = 'job_dict-%d.pickle'
self.script_template_name = 'run'
self.settings_dict = None
with open(template_file, 'r') as f:
self.template_file_lines = f.readlines()
self.arguments_with_defaults = global_arguments_with_defaults
self.list_required_arguments = global_list_required_arguments
self.list_required_arguments.extend( self.arguments_with_defaults )
# Arguments
self.num_steps = num_steps
assert( num_steps > 0 )
self.job_dicts = [{} for x in range(num_steps)]
if settings_dict:
self.set_settings_dict(settings_dict)
def set_job_dict(self, job_dict, step_num = 0):
self.job_dicts[step_num] = job_dict
def set_script_template_name(self, script_template_name):
# Check for invalid script name
if script_template_name[0].isdigit():
script_template_name = 'run_' + script_template_name
self.script_template_name = script_template_name
def set_job_dict_template_name(self, job_dict_template_name):
if '%d' not in job_dict_template_name:
job_dict_template_name += '-%d'
self.job_dict_template_name = job_dict_template_name
def set_settings_dict(self, settings_dict):
if 'job_dict_template_name' in settings_dict:
self.job_dict_template_name = settings_dict['job_dict_template_name']
required_arguments = [
'numjobs', 'mem_free',
'output_dir',
]
unrequired_arguments = [
'add_extra_ld_path',
'numclusterjobs',
'run_from_database',
'db_id',
]
for arg in required_arguments:
if arg not in settings_dict:
print('ERROR: Data dictionary missing argument', arg)
sys.exit(1)
for arg in unrequired_arguments:
if arg in settings_dict:
print('ERROR: Data dictionary cannot contain argument', arg)
sys.exit(1)
arguments_with_defaults = self.arguments_with_defaults
for arg in arguments_with_defaults:
if arg not in settings_dict and arg + '_list' not in settings_dict:
settings_dict[arg] = ''
# Handle arguments which used to be single but are now lists (for steps)
list_required_arguments = self.list_required_arguments
# Most of the if statements below could be simplified by making format_list_to_string recursive
for arg in list_required_arguments:
if arg in settings_dict and arg + '_list' in settings_dict:
raise Exception("Can't have list and arg versions of settings arg: " + str(arg))
if arg in settings_dict:
if arg == 'rosetta_args_list':
if not isinstance(settings_dict['rosetta_args_list'], str):
args_str = format_list_to_string(settings_dict['rosetta_args_list'])
settings_dict['rosetta_args_list_list'] = [str(args_str) for x in range(self.num_steps)]
else:
args_str = settings_dict['rosetta_args_list']
settings_dict['rosetta_args_list_list'] = [" ['" + str(args_str) + "']\n" for x in range(self.num_steps)]
else:
settings_dict[ arg + '_list' ] = [str(settings_dict[arg]) for x in range(self.num_steps)]
del( settings_dict[arg] )
elif arg + '_list' in settings_dict:
if arg == 'rosetta_args_list':
subl = ''
for l in settings_dict['rosetta_args_list_list']:
subl += ' [' + format_list_to_string(l) + '],\n'
settings_dict[ 'rosetta_args_list_list' ] = subl
else:
if isinstance(settings_dict[ arg + '_list' ], str):
settings_dict[ arg + '_list' ] = '[' + format_list_to_string(settings_dict[ arg + '_list' ]) + ']'
else:
raise Exception("Missing required argument (in _list form or otherwise): " + str(arg))
# There once was a way to run from a database, but it is no more, so we set False
settings_dict['run_from_database'] = 'False'
# Handle LD paths
if 'extra_ld_path' in settings_dict:
settings_dict['add_extra_ld_path'] = 'True'
else:
settings_dict['add_extra_ld_path'] = 'False'
settings_dict['extra_ld_path'] = ''
# Handle other options
if 'db_id' not in settings_dict:
settings_dict['db_id'] = ''
if 'tasks_per_process' not in settings_dict or settings_dict['tasks_per_process'] == 1:
settings_dict['tasks_per_process'] = 1
settings_dict['numclusterjobs'] = settings_dict['numjobs']
elif settings_dict['tasks_per_process'] > 1:
settings_dict['numclusterjobs'] = int(
math.ceil( float(settings_dict['numjobs']) / float(settings_dict['tasks_per_process']) )
)
if not os.path.isdir(settings_dict['output_dir']):
os.makedirs(settings_dict['output_dir'])
if 'scriptname' in settings_dict:
self.set_script_template_name(settings_dict['scriptname'])
self.settings_dict = settings_dict
def format_settings_dict(self):
formatted_settings_dict = {}
for arg in self.settings_dict:
new_arg = '#$#%s#$#' % arg
value = self.settings_dict[arg]
formatted_settings_dict[new_arg] = str(self.settings_dict[arg])
self.formatted_settings_dict = formatted_settings_dict
def verify_internal_data(self):
first_job_dict_len = len(self.job_dicts[0])
for job_dict in self.job_dicts[1:]:
if len(job_dict) != first_job_dict_len:
print(self.job_dicts[0])
print()
print(job_dict)
print(first_job_dict_len, len(job_dict))
raise AssertionError
for arg in self.list_required_arguments:
assert( arg + '_list' in self.settings_dict )
self.format_settings_dict()
def write_runs(self, script_name_template = None):
self.verify_internal_data()
job_pickle_file_relpaths = []
output_dir = self.settings_dict['output_dir']
for step_num in range(self.num_steps):
output_data_dir = os.path.join(output_dir, 'data-%d' % step_num)
job_pickle_file = os.path.join(output_data_dir, self.job_dict_template_name % step_num)
job_pickle_file_relpaths.append( os.path.relpath(job_pickle_file, output_dir) )
if not os.path.isdir(output_data_dir):
os.makedirs(output_data_dir)
if 'pickle_protocol' in self.formatted_settings_dict:
pickle_protocol = self.formatted_settings_dict['pickle_protocol']
else:
pickle_protocol = 2
with open(job_pickle_file, 'w') as f:
pickle.dump(self.job_dicts[step_num], f, protocol = pickle_protocol)
self.settings_dict['job_pickle_files'] = '[ ' + format_list_to_string(job_pickle_file_relpaths) + ' ]'
self.format_settings_dict()
new_lines = []
for line in self.template_file_lines:
for arg in self.formatted_settings_dict:
line = line.replace(arg, self.formatted_settings_dict[arg])
new_lines.append(line)
with open(os.path.join(self.settings_dict['output_dir'], '%s.py' % self.script_template_name), 'w') as f:
for line in new_lines:
f.write(line)
|
class ClusterTemplate():
def __init__(self, num_steps, settings_dict = None):
pass
def set_job_dict(self, job_dict, step_num = 0):
pass
def set_script_template_name(self, script_template_name):
pass
def set_job_dict_template_name(self, job_dict_template_name):
pass
def set_settings_dict(self, settings_dict):
pass
def format_settings_dict(self):
pass
def verify_internal_data(self):
pass
def write_runs(self, script_name_template = None):
pass
| 9 | 0 | 22 | 3 | 18 | 1 | 5 | 0.06 | 0 | 6 | 0 | 0 | 8 | 9 | 8 | 8 | 181 | 32 | 141 | 45 | 132 | 9 | 123 | 42 | 114 | 23 | 0 | 4 | 43 |
143,594 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/colortext.py
|
klab.colortext.Exception
|
class Exception(Exception):
def __init__(self, msg):
self.message = make_error(msg)
def __str__(self):
return self.message
|
class Exception(Exception):
def __init__(self, msg):
pass
def __str__(self):
pass
| 3 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 0 | 0 | 0 | 2 | 2 | 1 | 2 | 2 | 5 | 0 | 5 | 4 | 2 | 0 | 5 | 4 | 2 | 1 | 0 | 0 | 2 |
143,595 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/comms/ftp.py
|
klab.comms.ftp.FTPException550
|
class FTPException550(Exception): pass
|
class FTPException550(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,596 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/comms/http.py
|
klab.comms.http.Connection
|
class Connection(object):
'''A class to keep a HTTPConnection open for multiple requests.'''
def __init__(self, url, timeout = None, attempts = 3):
self.timeout = timeout
self.attempts = attempts
self.url = url
self.connection = None
def _get_connection(self):
if self.connection:
return self.connection
else:
if self.timeout:
self.connection = HTTPConnection(self.url, timeout = self.timeout)
else:
self.connection = HTTPConnection(self.url)
def __del__(self):
if self.connection:
self.connection.close()
def _close(self):
if self.connection: self.connection.close()
def get(self, resource):
attempts_left = self.attempts
while attempts_left > 0:
try:
self._get_connection()
self.connection.request("GET", resource)
response = self.connection.getresponse()
contents = response.read()
if contents[0:6] == "<html>":
raise Exception("Error retrieving %s." % os.path.split(self.url)[1])
if attempts_left != self.attempts:
print('Success.')
return contents
except Exception as e:
print(('Error retrieving {0} {1}.'.format(os.path.split(self.url)[1], resource)))
print((str(e)))
print((traceback.format_exc()))
attempts_left -= 1
if attempts_left > 0:
print('Retrying.')
self._close()
time.sleep(2)
raise Exception('get {0} failed'.format(resource))
@classmethod
def get_resource(cls, url, resource):
c = cls(url)
return c.get(resource)
|
class Connection(object):
'''A class to keep a HTTPConnection open for multiple requests.'''
def __init__(self, url, timeout = None, attempts = 3):
pass
def _get_connection(self):
pass
def __del__(self):
pass
def _close(self):
pass
def get(self, resource):
pass
@classmethod
def get_resource(cls, url, resource):
pass
| 8 | 1 | 7 | 0 | 7 | 0 | 3 | 0.02 | 1 | 3 | 0 | 0 | 5 | 4 | 6 | 6 | 58 | 11 | 46 | 17 | 38 | 1 | 44 | 15 | 37 | 6 | 1 | 3 | 15 |
143,597 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/comms/mail.py
|
klab.comms.mail.MailServer
|
class MailServer(object):
def __init__(self, host = None, port = None):
self.host = host
self.port = port
def sendmail(self, subject, sender, recipients, plaintext, htmltext=None, cc=None, debug=False, useMIMEMultipart=True):
if recipients:
if type(recipients) == type(""):
recipients = [recipients]
elif type(recipients) != type([]):
raise Exception("Unexpected type for recipients.")
if cc:
if type(cc) == type(""):
recipients.append(cc)
elif type(cc) == type([]):
recipients.extend(cc)
else:
raise Exception("Unexpected type for cc.")
recipients = join(recipients, ";")
if plaintext and htmltext and useMIMEMultipart:
msg = MIMEMultipart('alternative')
else:
msg = email.Message.Message()
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = recipients
msg['Reply-To'] = sender
if plaintext and htmltext and useMIMEMultipart:
part1 = MIMEText(plaintext, 'plain')
part2 = MIMEText(htmltext, 'html')
msg.attach(part1)
msg.attach(part2)
else:
msg.set_type("text/plain")
msg.set_payload(plaintext)
if debug:
print(msg)
else:
if self.host and self.port:
s = smtplib.SMTP(self.host, self.port)
elif self.host:
s = smtplib.SMTP(self.host)
else:
s = smtplib.SMTP()
s.connect()
s.sendmail(msg['From'], recipients, msg.as_string())
s.close()
return True
return False
def sendgmail(self, subject, recipients, plaintext, htmltext=None, cc=None, debug=False, useMIMEMultipart=True, gmail_account = 'kortemmelab@gmail.com', pw_filepath = None):
'''For this function to work, the password for the gmail user must be colocated with this file or passed in.'''
smtpserver = smtplib.SMTP("smtp.gmail.com", 587)
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.ehlo
gmail_account = 'kortemmelab@gmail.com'
if pw_filepath:
smtpserver.login(gmail_account, read_file(pw_filepath))
else:
smtpserver.login(gmail_account, read_file('pw'))
for recipient in recipients:
if htmltext:
msg = MIMEText(htmltext, 'html')
msg['From'] = gmail_account
msg['To'] = recipient
msg['Subject'] = subject
smtpserver.sendmail(gmail_account, recipient, msg.as_string())
else:
header = 'To:' + recipient + '\n' + 'From: ' + gmail_account + '\n' + 'Subject:' + subject + '\n'
msg = header + '\n ' + plaintext + '\n\n'
smtpserver.sendmail(gmail_account, recipient, msg)
smtpserver.close()
def sendgmail2(self, subject, recipients, plaintext, htmltext=None, cc=None, debug=False, useMIMEMultipart=True, gmail_account = 'kortemmelab@gmail.com', pw_filepath = None):
'''For this function to work, the password for the gmail user must be colocated with this file or passed in.'''
smtpserver = smtplib.SMTP("smtp.gmail.com", 587)
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.ehlo
gmail_account = 'kortemmelab@gmail.com'
if pw_filepath:
smtpserver.login(gmail_account, read_file(pw_filepath))
else:
smtpserver.login(gmail_account, read_file('pw'))
for recipient in recipients:
header = 'To:' + recipient + '\n' + 'From: ' + gmail_account + '\n' + 'Subject:' + subject + '\n'
if htmltext:
msg = header + '\n ' + htmltext + '\n\n'
else:
msg = header + '\n ' + plaintext + '\n\n'
smtpserver.sendmail(gmail_account, recipient, msg)
smtpserver.close()
|
class MailServer(object):
def __init__(self, host = None, port = None):
pass
def sendmail(self, subject, sender, recipients, plaintext, htmltext=None, cc=None, debug=False, useMIMEMultipart=True):
pass
def sendgmail(self, subject, recipients, plaintext, htmltext=None, cc=None, debug=False, useMIMEMultipart=True, gmail_account = 'kortemmelab@gmail.com', pw_filepath = None):
'''For this function to work, the password for the gmail user must be colocated with this file or passed in.'''
pass
def sendgmail2(self, subject, recipients, plaintext, htmltext=None, cc=None, debug=False, useMIMEMultipart=True, gmail_account = 'kortemmelab@gmail.com', pw_filepath = None):
'''For this function to work, the password for the gmail user must be colocated with this file or passed in.'''
pass
| 5 | 2 | 23 | 1 | 22 | 1 | 5 | 0.02 | 1 | 4 | 0 | 0 | 4 | 2 | 4 | 4 | 98 | 8 | 88 | 19 | 83 | 2 | 76 | 19 | 71 | 12 | 1 | 3 | 21 |
143,598 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/basics.py
|
klab.bio.basics.SubstitutionScore
|
class SubstitutionScore(object):
''' Container class to score substitution matrix scores for a residue match.
The clustal score is based on the Clustal Omega alignment output (clustal format). A score of 1 (asterix) means
identical residue types, 0 (colon) indicates "conservation between groups of strongly similar properties -
scoring > 0.5 in the Gonnet PAM 250 matrix." A score of -1 (period) indicates "conservation between groups of
weakly similar properties - scoring =< 0.5 in the Gonnet PAM 250 matrix." '''
clustal_symbols = {1 : '*', 0 : ':', -1 : '.', -2 : ' '}
def __init__(self, clustal, from_residue, to_residue):
if not blosum62 or not pam250:
raise colortext.Exception('ERROR: Biopython is required for blosum62 and pam250 matrices.')
assert(-2 <= clustal <= 1)
self.clustal = clustal
self.blosum62 = blosum62[(from_residue, to_residue)]
self.pam250 = pam250[(from_residue, to_residue)]
def __repr__(self):
return "(%s, b%d, p%d)" % (SubstitutionScore.clustal_symbols[self.clustal], self.blosum62, self.pam250)
|
class SubstitutionScore(object):
''' Container class to score substitution matrix scores for a residue match.
The clustal score is based on the Clustal Omega alignment output (clustal format). A score of 1 (asterix) means
identical residue types, 0 (colon) indicates "conservation between groups of strongly similar properties -
scoring > 0.5 in the Gonnet PAM 250 matrix." A score of -1 (period) indicates "conservation between groups of
weakly similar properties - scoring =< 0.5 in the Gonnet PAM 250 matrix." '''
def __init__(self, clustal, from_residue, to_residue):
pass
def __repr__(self):
pass
| 3 | 1 | 5 | 1 | 5 | 0 | 2 | 0.45 | 1 | 1 | 1 | 0 | 2 | 3 | 2 | 2 | 20 | 4 | 11 | 7 | 8 | 5 | 11 | 7 | 8 | 2 | 1 | 1 | 3 |
143,599 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/pdb.py
|
klab.bio.pdb.RequestedIonsWithoutParsingException
|
class RequestedIonsWithoutParsingException(Exception): pass
|
class RequestedIonsWithoutParsingException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,600 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/pymolmod/colors.py
|
klab.bio.pymolmod.colors.ColorScheme
|
class ColorScheme(object):
'''A dict wrapper class. The dict that is stored is intended to have a tree structure. The paths of the tree describe
how the color should be used e.g. RosettaModel.bb should be used to color the backbone of a Rosetta model. The leaves of the
tree are colors. If a new color is needed, use the create_new_color_command function to define the new color in
the script before use.'''
def __init__(self, custom_color_scheme = {}):
'''If a color_scheme is passed in then this is merged with the default color scheme.'''
color_scheme = {}
color_scheme.update(default_color_scheme)
display_scheme = {}
display_scheme.update(default_display_scheme)
if custom_color_scheme:
assert(type(custom_color_scheme) == type(predefined))
color_scheme.update(custom_color_scheme)
self.color_scheme = color_scheme
self.name = 'Default'
def update(self, path, node):
'''Update the dict with a new color using a 'path' through the dict. You can either pass an existing path e.g.
'Scaffold.mutations' to override a color or part of the hierarchy or you can add a new leaf node or dict.'''
assert(type(path) == type(self.name))
assert(type(node) == type(self.name) or type(node) == type(predefined))
d = self.color_scheme
tokens = path.split('.')
for t in tokens[:-1]:
d = d.get(t)
if d == None:
raise Exception("Path '%s' not found.")
d[tokens[-1]] = node
def lookup(self, path, must_be_leaf = False):
'''Looks up a part of the color scheme. If used for looking up colors, must_be_leaf should be True.'''
assert(type(path) == type(self.name))
d = self.color_scheme
tokens = path.split('.')
for t in tokens[:-1]:
d = d.get(t)
if d == None:
raise Exception("Path '%s' not found.")
if must_be_leaf:
assert(type(d[tokens[-1]]) == type(self.name))
return d[tokens[-1]]
def __repr__(self):
return str(self.color_scheme)
def __getitem__(self, path):
'''This lets us use the object somewhat like a dict where we do a lookup using a path e.g. cs['Scaffold.mutations']
This also lets us use the object in a string formatting e.g. print('%(Scaffold.mutations)s' % cs) which is useful
for the PyMOL script generators.'''
return self.lookup(path)
|
class ColorScheme(object):
'''A dict wrapper class. The dict that is stored is intended to have a tree structure. The paths of the tree describe
how the color should be used e.g. RosettaModel.bb should be used to color the backbone of a Rosetta model. The leaves of the
tree are colors. If a new color is needed, use the create_new_color_command function to define the new color in
the script before use.'''
def __init__(self, custom_color_scheme = {}):
'''If a color_scheme is passed in then this is merged with the default color scheme.'''
pass
def update(self, path, node):
'''Update the dict with a new color using a 'path' through the dict. You can either pass an existing path e.g.
'Scaffold.mutations' to override a color or part of the hierarchy or you can add a new leaf node or dict.'''
pass
def lookup(self, path, must_be_leaf = False):
'''Looks up a part of the color scheme. If used for looking up colors, must_be_leaf should be True.'''
pass
def __repr__(self):
pass
def __getitem__(self, path):
'''This lets us use the object somewhat like a dict where we do a lookup using a path e.g. cs['Scaffold.mutations']
This also lets us use the object in a string formatting e.g. print('%(Scaffold.mutations)s' % cs) which is useful
for the PyMOL script generators.'''
pass
| 6 | 5 | 9 | 0 | 7 | 1 | 2 | 0.31 | 1 | 3 | 0 | 0 | 5 | 2 | 5 | 5 | 54 | 7 | 36 | 16 | 30 | 11 | 36 | 16 | 30 | 4 | 1 | 2 | 11 |
143,601 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/pdbtm.py
|
klab.bio.pdbtm.record_iterator
|
class record_iterator(object):
'''This class is deprecated by PDBTM.get_xml.'''
def __init__(self, xml_contents):
self.records = re.findall('(<pdbtm\s*.*?</pdbtm>)', xml_contents, re.DOTALL)
def get(self, pdb_id):
for r in self.records:
id = re.match(r'<pdbtm.*?ID="(.*?)".*>', r, re.DOTALL)
assert(id)
id = id.group(1)
if id.upper() == pdb_id.upper():
return r
|
class record_iterator(object):
'''This class is deprecated by PDBTM.get_xml.'''
def __init__(self, xml_contents):
pass
def get(self, pdb_id):
pass
| 3 | 1 | 5 | 0 | 5 | 0 | 2 | 0.1 | 1 | 0 | 0 | 0 | 2 | 1 | 2 | 2 | 12 | 1 | 10 | 5 | 7 | 1 | 10 | 5 | 7 | 3 | 1 | 2 | 4 |
143,602 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/pdbtm.py
|
klab.bio.pdbtm.PDBTM
|
class PDBTM(object):
PDBTM_entry_tag_type = '{http://pdbtm.enzim.hu}pdbtm'
PDBTM_membrane_tag_type = '{http://pdbtm.enzim.hu}MEMBRANE'
PDBTM_rawres_tag_type = '{http://pdbtm.enzim.hu}RAWRES'
PDBTM_tmtype_tag_type = '{http://pdbtm.enzim.hu}TMTYPE'
non_transmembrane_tmtypes = set(['Soluble', 'No_Protein', 'Nucleotide', 'Virus', 'Pilus', 'Ca_Globular', 'Tm_Part'])
transmembrane_tmtypes = set(['Tm_Alpha', 'Tm_Beta', 'Tm_Coil', 'Tm_Ca'])
def __init__(self, xml_contents, restrict_to_transmembrane_proteins = True):
self.xml_contents = xml_contents.strip()
# At some point, this tag crept into the PDBTM XML which the parser below cannot handle
self.xml_contents = self.xml_contents.replace('''<?xml version="1.0"?>''', '')
self.restrict_to_transmembrane_proteins = restrict_to_transmembrane_proteins
@staticmethod
def _get_tm_type(elem):
for child in elem:
if child.tag == PDBTM.PDBTM_rawres_tag_type:
for gchild in child:
if gchild.tag == PDBTM.PDBTM_tmtype_tag_type:
return gchild.text.strip()
return 'N/A'
def _get_pdb_id(self, elem, **kwargs):
'''If self.restrict_to_transmembrane_proteins is False then this adds all ids to self.ids. Otherwise, only transmembrane protein ids are added.'''
id = elem.attrib['ID']
if self.restrict_to_transmembrane_proteins:
tmp = elem.attrib['TMP']
assert(tmp == 'no' or tmp == 'yes' or tmp == 'not')
if tmp == 'yes':
self.ids[id] = PDBTM._get_tm_type(elem)
else:
self.ids[id] = self.ids.get(id, 0) + 1
def get_pdb_ids(self):
'''Returns the sorted list of PDB IDs from the records.'''
return sorted(self.get_pdb_id_map().keys())
def get_pdb_id_map(self):
''' Returns a dict mapping PDB IDs to:
i) their number of associated records, if self.restrict_to_transmembrane_proteins is False;
ii) the type of transmembrane protein if self.restrict_to_transmembrane_proteins is True.
At the time of writing this (2014-12-03), there were 106,094 PDB IDs and 106,090 unique IDs.
These records had duplicate entries: '2amk', '2ar1', '3b4r', '4k5y'.'''
self.ids = {}
context = etree.iterparse(io.BytesIO(self.xml_contents), events=('end',), tag=self.PDBTM_entry_tag_type)
fast_iter(context, self._get_pdb_id)
return self.ids
def _get_membrane_xml(self, elem, pdb_id):
assert(elem.tag == self.PDBTM_entry_tag_type)
id = elem.attrib['ID'] or ''
if id.upper() == pdb_id:
for child in elem:
if child.tag == self.PDBTM_membrane_tag_type:
self.tmp_string = etree.tostring(child)
raise EarlyOut()
def get_membrane_xml(self, pdb_id):
''' Returns the <MEMBRANE> tag XML for pdb_id if the tag exists.'''
self.tmp_string = None
context = etree.iterparse(io.BytesIO(self.xml_contents), events=('end',), tag=self.PDBTM_entry_tag_type)
try:
fast_iter(context, self._get_membrane_xml, pdb_id = pdb_id.upper())
except EarlyOut: pass
return self.tmp_string
def _get_xml(self, elem, pdb_id):
assert(elem.tag == self.PDBTM_entry_tag_type)
id = elem.attrib['ID'] or ''
if id.upper() == pdb_id:
self.tmp_string = etree.tostring(elem)
raise EarlyOut()
def get_xml(self, pdb_id):
''' Returns the XML for pdb_id if the tag exists.'''
self.tmp_string = None
context = etree.iterparse(io.BytesIO(self.xml_contents), events=('end',), tag=self.PDBTM_entry_tag_type)
try:
fast_iter(context, self._get_xml, pdb_id = pdb_id.upper())
except EarlyOut: pass
return self.tmp_string
|
class PDBTM(object):
def __init__(self, xml_contents, restrict_to_transmembrane_proteins = True):
pass
@staticmethod
def _get_tm_type(elem):
pass
def _get_pdb_id(self, elem, **kwargs):
'''If self.restrict_to_transmembrane_proteins is False then this adds all ids to self.ids. Otherwise, only transmembrane protein ids are added.'''
pass
def get_pdb_ids(self):
'''Returns the sorted list of PDB IDs from the records.'''
pass
def get_pdb_id_map(self):
''' Returns a dict mapping PDB IDs to:
i) their number of associated records, if self.restrict_to_transmembrane_proteins is False;
ii) the type of transmembrane protein if self.restrict_to_transmembrane_proteins is True.
At the time of writing this (2014-12-03), there were 106,094 PDB IDs and 106,090 unique IDs.
These records had duplicate entries: '2amk', '2ar1', '3b4r', '4k5y'.'''
pass
def _get_membrane_xml(self, elem, pdb_id):
pass
def get_membrane_xml(self, pdb_id):
''' Returns the <MEMBRANE> tag XML for pdb_id if the tag exists.'''
pass
def _get_xml(self, elem, pdb_id):
pass
def get_xml(self, pdb_id):
''' Returns the XML for pdb_id if the tag exists.'''
pass
| 11 | 5 | 7 | 0 | 6 | 1 | 2 | 0.16 | 1 | 1 | 1 | 0 | 8 | 4 | 9 | 9 | 92 | 19 | 63 | 28 | 52 | 10 | 63 | 27 | 53 | 5 | 1 | 4 | 21 |
143,603 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/pdbtm.py
|
klab.bio.pdbtm.EarlyOut
|
class EarlyOut(Exception): pass
|
class EarlyOut(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,604 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/db/mysql.py
|
klab.db.mysql.DatabaseInterface
|
class DatabaseInterface(object):
def __init__(self, settings, isInnoDB=True, numTries=1, host=None, db=None, user=None, passwd=None, port=None,
unix_socket=None, passwdfile=None, use_utf=False, use_locking=True):
self.connection = None
self.StdCursor_connection = None
self.SSDictCursor_connection = None
self.queries_run = 0
self.procedures_run = 0
self.use_utf = use_utf
self.isInnoDB = isInnoDB
self.host = host or settings["SQLHost"]
self.db = db or settings["SQLDatabase"]
self.user = user or settings["SQLUser"]
self.passwd = passwd or settings.get("SQLPassword") or '' # allow for empty passwords e.g. for anonymous accounts with read-only access
self.port = port or settings["SQLPort"]
self.unix_socket = unix_socket or settings["SQLSocket"]
if use_locking == True or use_locking == False:
self.use_locking = use_locking
else:
settings["SQLUseLocking"]
self.numTries = numTries
self.lastrowid = None
if (not self.passwd) and passwdfile:
if os.path.exists(passwdfile):
passwd = read_file(passwdfile).strip()
else:
passwd = getpass.getpass("Enter password to connect to MySQL database:")
self.locked = False
if use_locking:
self.lockstring = "LOCK TABLES %s" % join(["%s WRITE" % list(r.values())[0] for r in self.execute("SHOW TABLES")], ", ")
self.unlockstring = "UNLOCK TABLES"
else:
self.lockstring = ""
self.unlockstring = ""
# Store a list of the table names
self.TableNames = [list(r.values())[0] for r in self.execute("SHOW TABLES")]
# Store a hierarchy of objects corresponding to the table names and their field names
self.FieldNames = _FieldNames(None)
self.FlatFieldNames = _FieldNames(None)
tablenames = self.TableNames
for tbl in tablenames:
setattr(self.FieldNames, tbl, _FieldNames(tbl))
fieldDescriptions = self.execute("SHOW COLUMNS FROM `%s`" % tbl)
for field in fieldDescriptions:
fieldname = field["Field"]
setattr(getattr(self.FieldNames, tbl), fieldname, fieldname)
setattr(self.FlatFieldNames, fieldname, fieldname)
getattr(self.FieldNames, tbl).makeReadOnly()
self.FieldNames.makeReadOnly()
self.FlatFieldNames.makeReadOnly()
def __del__(self):
if self.connection and self.connection.open:
self.connection.close()
if self.StdCursor_connection and self.StdCursor_connection.open:
self.StdCursor_connection.close()
def close(self):
if self.connection and self.connection.open:
self.connection.close()
if self.StdCursor_connection and self.StdCursor_connection.open:
self.StdCursor_connection.close()
def checkIsClosed(self):
assert ((not (self.connection) or not (self.connection.open)) and (not (self.StdCursor_connection) or not (self.StdCursor_connection.open)))
def _get_connection(self, force = False):
if force or not (self.connection and self.connection.open):
if self.use_utf:
self.connection = MySQLdb.connect(host=self.host, db=self.db, user=self.user, passwd=self.passwd,
port=self.port, unix_socket=self.unix_socket, cursorclass=DictCursor,
charset='utf8', use_unicode=True)
else:
self.connection = MySQLdb.connect(host=self.host, db=self.db, user=self.user, passwd=self.passwd,
port=self.port, unix_socket=self.unix_socket, cursorclass=DictCursor)
def _get_StdCursor_connection(self):
if not (self.StdCursor_connection and self.StdCursor_connection.open):
if self.use_utf:
self.StdCursor_connection = MySQLdb.connect(host=self.host, db=self.db, user=self.user, passwd=self.passwd,
port=self.port, unix_socket=self.unix_socket, cursorclass=StdCursor,
charset='utf8', use_unicode=True)
else:
self.StdCursor_connection = MySQLdb.connect(host=self.host, db=self.db, user=self.user, passwd=self.passwd,
port=self.port, unix_socket=self.unix_socket, cursorclass=StdCursor)
def _get_SSDictCursor_connection(self):
if not (self.SSDictCursor_connection and self.SSDictCursor_connection.open):
if self.use_utf:
self.SSDictCursor_connection = MySQLdb.connect(host=self.host, db=self.db, user=self.user, passwd=self.passwd,
port=self.port, unix_socket=self.unix_socket, cursorclass=SSDictCursor,
charset='utf8', use_unicode=True)
else:
self.SSDictCursor_connection = MySQLdb.connect(host=self.host, db=self.db, user=self.user, passwd=self.passwd,
port=self.port, unix_socket=self.unix_socket, cursorclass=SSDictCursor)
def _close_connection(self):
self.close()
def iterate_query(self, query, arraysize=100000):
self._get_SSDictCursor_connection()
c = self.SSDictCursor_connection.cursor()
c.execute(query)
while True:
nextrows = c.fetchmany(arraysize)
if not nextrows:
break
for row in nextrows:
yield row
def getLastRowID(self):
return self.lastrowid
def locked_execute(self, sql, parameters=None, cursorClass=DictCursor, quiet=False):
'''We are lock-happy here but SQL performance is not currently an issue daemon-side.'''
return self.execute(sql, parameters=parameters, quiet=quiet, locked=True, do_commit=True)
def transaction_insert_dict_auto_inc(self, transaction_cursor, tblname, d, unique_id_fields = [], fields = None, check_existing = False, id_field = 'ID'):
'''A transaction wrapper for inserting dicts into fields with an autoincrementing ID. Insert the record and return the associated ID (long).'''
sql, params, record_exists = self.create_insert_dict_string(tblname, d, PKfields=unique_id_fields, fields=fields, check_existing = check_existing)
if not record_exists:
transaction_cursor.execute(sql, params)
id = transaction_cursor.lastrowid
if id == None:
id = self.get_unique_record('SELECT * FROM {0} WHERE {1}'.format(tblname, ' AND '.join([f + '=%s' for f in unique_id_fields])), parameters = tuple([d[f] for f in unique_id_fields]))[id_field]
assert(id)
return id
def get_unique_record(self, sql, parameters = None, quiet = False, locked = False):
'''I use this pattern a lot. Return the single record corresponding to the query.'''
results = self.execute_select(sql, parameters = parameters, quiet = quiet, locked = locked)
assert(len(results) == 1)
return results[0]
def execute_select(self, sql, parameters = None, quiet = False, locked = False):
if locked:
print(('LOCKED execute_select {0} {1}'.format(sql, parameters)))
return self.execute(sql, parameters=parameters, quiet=quiet, locked=locked, do_commit=False)
def execute_select_StdCursor(self, sql, parameters=None, quiet=False, locked=False):
return self.execute_StdCursor(sql, parameters=parameters, quiet=quiet, locked=locked, do_commit=False)
def execute_select_SSDictCursor(self, sql, parameters=None, quiet=False, locked=False):
return self.execute_SSDictCursor(sql, parameters=parameters, quiet=quiet, locked=locked, do_commit=False)
def execute_SSDictCursor(self, sql, parameters=None, quiet=False, locked=False, do_commit=True):
"""Execute SQL query. This uses DictCursor by default."""
self.queries_run += 1
i = 0
errcode = 0
caughte = None
cursor = None
cursorClass = SSDictCursor
if sql.find(";") != -1 or sql.find("\\G") != -1:
# Catches some injections
raise Exception("The SQL command '%s' contains a semi-colon or \\G. This is a potential SQL injection." % sql)
while i < self.numTries:
i += 1
try:
self._get_SSDictCursor_connection()
cursor = self.SSDictCursor_connection.cursor()
if locked:
if self.lockstring:
print((sql, parameters))
print('LOCKING')
cursor.execute(self.lockstring)
self.locked = True
if parameters:
errcode = cursor.execute(sql, parameters)
else:
errcode = cursor.execute(sql)
self.lastrowid = int(cursor.lastrowid)
if do_commit and self.isInnoDB:
self.SSDictCursor_connection.commit()
results = cursor.fetchall()
if locked:
if self.unlockstring:
print('UNLOCKING')
cursor.execute(self.unlockstring)
self.locked = False
cursor.close()
return results
except MySQLdb.OperationalError as e:
if cursor:
if self.locked:
if self.unlockstring:
print('UNLOCKING')
cursor.execute(self.unlockstring)
self.locked = False
cursor.close()
caughte = str(e)
errcode = e[0]
continue
except Exception as e:
if cursor:
if self.locked:
if self.unlockstring:
print('UNLOCKING')
cursor.execute(self.unlockstring)
self.locked = False
cursor.close()
caughte = str(e)
traceback.print_exc()
break
sleep(0.2)
if not quiet:
sys.stderr.write(
"\nSQL execution error in query %s at %s:" % (sql, datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
sys.stderr.write("\nErrorcode/Error: %d - '%s'.\n" % (errcode, str(caughte)))
sys.stderr.flush()
raise MySQLdb.OperationalError(caughte)
def execute_StdCursor(self, sql, parameters=None, quiet=False, locked=False, do_commit=True):
"""Execute SQL query. This uses DictCursor by default."""
self.queries_run += 1
i = 0
errcode = 0
caughte = None
cursor = None
cursorClass = StdCursor
if sql.find(";") != -1 or sql.find("\\G") != -1:
# Catches some injections
raise Exception("The SQL command '%s' contains a semi-colon or \\G. This is a potential SQL injection." % sql)
while i < self.numTries:
i += 1
try:
self._get_StdCursor_connection()
cursor = self.StdCursor_connection.cursor()
if locked:
if self.lockstring:
print((sql, parameters))
print('LOCKING')
cursor.execute(self.lockstring)
self.locked = True
if parameters:
errcode = cursor.execute(sql, parameters)
else:
errcode = cursor.execute(sql)
self.lastrowid = int(cursor.lastrowid)
if do_commit and self.isInnoDB:
self.StdCursor_connection.commit()
results = cursor.fetchall()
if locked:
if self.unlockstring:
print('UNLOCKING')
cursor.execute(self.unlockstring)
self.locked = False
cursor.close()
return results
except MySQLdb.OperationalError as e:
if cursor:
if self.locked:
if self.unlockstring:
print('UNLOCKING')
cursor.execute(self.unlockstring)
self.locked = False
cursor.close()
caughte = str(e)
errcode = e[0]
continue
except Exception as e:
if cursor:
if self.locked:
if self.unlockstring:
print('UNLOCKING')
cursor.execute(self.unlockstring)
self.locked = False
cursor.close()
caughte = str(e)
traceback.print_exc()
break
sleep(0.2)
if not quiet:
sys.stderr.write(
"\nSQL execution error in query %s at %s:" % (sql, datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
sys.stderr.write("\nErrorcode/Error: %d - '%s'.\n" % (errcode, str(caughte)))
sys.stderr.flush()
raise MySQLdb.OperationalError(caughte)
def list_stored_procedures(self):
return [r['Name'] for r in self.execute("SHOW PROCEDURE STATUS")]
def run_transaction(self, command_list, do_commit=True):
'''This can be used to stage multiple commands and roll back the transaction if an error occurs. This is useful
if you want to remove multiple records in multiple tables for one entity but do not want the deletion to occur
if the entity is tied to table not specified in the list of commands. Performing this as a transaction avoids
the situation where the records are partially removed. If do_commit is false, the entire transaction is cancelled.'''
pass
# I decided against creating this for now.
# It may be more useful to create a stored procedure like in e.g. _create_protein_deletion_stored_procedure
# in the DDGadmin project and then use callproc
for c in command_list:
if c.find(";") != -1 or c.find("\\G") != -1:
# Catches *some* injections
raise Exception("The SQL command '%s' contains a semi-colon or \\G. This is a potential SQL injection." % c)
if do_commit:
sql = "START TRANSACTION;\n%s;\nCOMMIT" % "\n".join(command_list)
else:
sql = "START TRANSACTION;\n%s;" % "\n".join(command_list)
#print(sql)
return
def execute(self, sql, parameters=None, quiet=False, locked=False, do_commit=True, allow_unsafe_query=False):
"""Execute SQL query. This uses DictCursor by default."""
if do_commit:
pass#print('s')
self.queries_run += 1
i = 0
errcode = 0
caughte = None
cursor = None
cursorClass = DictCursor
if not(allow_unsafe_query) and (sql.find(";") != -1 or sql.find("\\G") != -1):
# Catches some injections
raise Exception("The SQL command '%s' contains a semi-colon or \\G. This is a potential SQL injection." % sql)
while i < self.numTries:
i += 1
try:
self._get_connection(force = i > 1)
cursor = self.connection.cursor()
if locked:
if self.lockstring:
print((sql, parameters))
print('LOCKING')
cursor.execute(self.lockstring)
self.locked = True
if parameters:
errcode = cursor.execute(sql, parameters)
else:
errcode = cursor.execute(sql)
self.lastrowid = int(cursor.lastrowid)
if do_commit and self.isInnoDB:
self.connection.commit()
results = cursor.fetchall()
if locked:
if self.unlockstring:
print('UNLOCKING')
cursor.execute(self.unlockstring)
self.locked = False
cursor.close()
return results
except MySQLdb.OperationalError as e:
if not quiet:
print((i, "MySQLdb.OperationalError", str(e)))
if cursor:
if self.locked:
if self.unlockstring:
print('UNLOCKING')
cursor.execute(self.unlockstring)
self.locked = False
cursor.close()
caughte = str(e)
errcode = e[0]
continue
except Exception as e:
if not quiet:
print((i, "Exception", str(e)))
if cursor:
if self.locked:
if self.unlockstring:
print('UNLOCKING')
cursor.execute(self.unlockstring)
self.locked = False
cursor.close()
caughte = str(e)
traceback.print_exc()
break
sleep(0.2)
if not quiet:
sys.stderr.write(
"\nSQL execution error in query '%s' at %s:" % (sql, datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
sys.stderr.write("\nErrorcode/Error: %d - '%s'.\n" % (errcode, str(caughte)))
sys.stderr.flush()
raise MySQLdb.OperationalError(caughte)
def call_select_proc(self, procname, parameters=(), quiet=False):
"""Calls a MySQL stored procedure procname and returns the set of results."""
self.procedures_run += 1
i = 0
errcode = 0
caughte = None
if not re.match("^\s*\w+\s*$", procname):
raise Exception("Expected a stored procedure name in callproc but received '%s'." % procname)
while i < self.numTries:
i += 1
try:
self._get_connection()
cursor = self.connection.cursor()
if type(parameters) != type(()):
parameters = (parameters,)
errcode = cursor.callproc(procname, parameters)
results = cursor.fetchall()
self.lastrowid = int(cursor.lastrowid)
cursor.close()
return results
except MySQLdb.OperationalError as e:
self._close_connection()
errcode = e[0]
caughte = e
continue
except:
self._close_connection()
traceback.print_exc()
break
if not quiet:
sys.stderr.write("\nSQL execution error call stored procedure %s at %s:" % (
procname, datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
sys.stderr.write("\nErrorcode/Error: %d - '%s'.\n" % (errcode, str(caughte)))
sys.stderr.flush()
raise MySQLdb.OperationalError(caughte)
def callproc(self, procname, parameters=(), quiet=False, expect_return_value=False):
"""Calls a MySQL stored procedure procname and returns the return values. This uses DictCursor.
To get return values back out of a stored procedure, prefix the parameter with a @ character.
"""
self.procedures_run += 1
i = 0
errcode = 0
caughte = None
out_param_indices = []
for j in range(len(parameters)):
p = parameters[j]
if type(p) == type('') and p[0] == '@':
assert(p.find(' ') == -1)
out_param_indices.append(j)
if procname not in self.list_stored_procedures():
raise Exception("The stored procedure '%s' does not exist." % procname)
if not re.match("^\s*\w+\s*$", procname):
raise Exception("Expected a stored procedure name in callproc but received '%s'." % procname)
while i < self.numTries:
i += 1
try:
self._get_connection()
cursor = self.connection.cursor()
if type(parameters) != type(()):
parameters = (parameters,)
errcode = cursor.callproc(procname, parameters)
self.lastrowid = int(cursor.lastrowid)
cursor.close()
# Get the out parameters
out_param_results = []
if out_param_indices:
out_param_results = self.execute('SELECT %s' % ", ".join(['@_%s_%d AS %s' % (procname, pindex, parameters[pindex][1:]) for pindex in out_param_indices]))
return out_param_results
except MySQLdb.OperationalError as e:
self._close_connection()
errcode = e[0]
caughte = e
continue
except:
self._close_connection()
traceback.print_exc()
break
if not quiet:
sys.stderr.write("\nSQL execution error call stored procedure %s at %s:" % (
procname, datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
sys.stderr.write("\nErrorcode/Error: %d - '%s'.\n" % (errcode, str(caughte)))
sys.stderr.flush()
raise MySQLdb.OperationalError(caughte)
def insertDict(self, tblname, d, fields=None):
'''Simple function for inserting a dictionary whose keys match the fieldnames of tblname.'''
self.queries_run += 1
if fields == None:
fields = sorted(d.keys())
values = None
try:
SQL = 'INSERT INTO %s (%s) VALUES (%s)' % (
tblname, join(fields, ", "), join(['%s' for x in range(len(fields))], ','))
values = tuple([d[k] for k in fields])
self.locked_execute(SQL, parameters=values)
except Exception as e:
if SQL and values:
sys.stderr.write("\nSQL execution error in query '%s' %% %s at %s:" % (
SQL, values, datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
sys.stderr.write("\nError: '%s'.\n" % (str(e)))
sys.stderr.flush()
raise Exception("Error occurred during database insertion: '%s'." % str(e))
def t_insert_dict_if_new(self, tblname, d, PKfields, fields=None):
'''A version of insertDictIfNew for transactions. This does not call commit.'''
SQL, values = self._insert_dict_if_new_inner(tblname, d, PKfields, fields=fields)
if SQL != False:
self.execute_select(SQL, parameters=values, locked=True)
return True, d
return False, values
def insertDictIfNew(self, tblname, d, PKfields, fields=None, locked = True):
'''Simple function for inserting a dictionary whose keys match the fieldnames of tblname. The function returns two values, the
second of which is a dict containing the primary keys of the record. If a record already exists then no insertion is performed and
(False, the dictionary of existing primary keys) is returned. Otherwise, the record is inserted into the database and (True, d)
is returned.'''
SQL, values = self._insert_dict_if_new_inner(tblname, d, PKfields, fields=fields, locked = locked)
if SQL != False:
if locked:
self.locked_execute(SQL, parameters=values)
else:
self.execute(SQL, parameters=values, locked = False)
return True, d
return False, values
# todo: remove the code below
self.queries_run += 1
if type(PKfields) == type(""):
PKfields = [PKfields]
if fields == None:
fields = sorted(d.keys())
values = None
SQL = None
try:
# Search for existing records
wherestr = []
PKvalues = []
for PKfield in PKfields:
if d[PKfield] == None:
wherestr.append("%s IS NULL" % PKfield)
else:
wherestr.append("%s=%%s" % PKfield)
PKvalues.append(d[PKfield])
PKfields = join(PKfields, ",")
wherestr = join(wherestr, " AND ")
existingRecord = self.execute("SELECT %s FROM %s" % (PKfields, tblname) + " WHERE %s" % wherestr,
parameters=tuple(PKvalues), locked = locked)
if existingRecord:
return False, existingRecord[0]
SQL = 'INSERT INTO %s (%s) VALUES (%s)' % (
tblname, join(fields, ", "), join(['%s' for x in range(len(fields))], ','))
values = tuple([d[k] for k in fields])
self.execute(SQL, parameters=values, locked = locked)
return True, d
except Exception as e:
if SQL and values:
sys.stderr.write("\nSQL execution error in query '%s' %% %s at %s:" % (
SQL, values, datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
sys.stderr.write("\nError: '%s'.\n" % (str(e)))
sys.stderr.flush()
raise Exception("Error occurred during database insertion: '%s'. %s" % (str(e), traceback.format_exc()))
def _insert_dict_if_new_inner(self, tblname, d, PKfields, fields=None, locked = True):
'''The main function of the insert_dict functions.
This creates and returns the SQL query and parameters used by the other functions but does not insert any data into the database.
Simple function for inserting a dictionary whose keys match the fieldnames of tblname. The function returns two values, the
second of which is a dict containing the primary keys of the record. If a record already exists then no insertion is performed and
(False, the dictionary of existing primary keys) is returned. Otherwise, the record is inserted into the database and (True, d)
is returned.'''
self.queries_run += 1
if type(PKfields) == type(""):
PKfields = [PKfields]
if fields == None:
fields = sorted(d.keys())
values = None
SQL = None
try:
# Search for existing records
wherestr = []
PKvalues = []
for PKfield in PKfields:
if d[PKfield] == None:
wherestr.append("%s IS NULL" % PKfield)
else:
wherestr.append("%s=%%s" % PKfield)
PKvalues.append(d[PKfield])
PKfields = join(PKfields, ",")
wherestr = join(wherestr, " AND ")
existingRecord = self.execute_select("SELECT %s FROM %s" % (PKfields, tblname) + " WHERE %s" % wherestr, parameters=tuple(PKvalues), locked = locked)
if existingRecord:
return False, existingRecord[0]
SQL = 'INSERT INTO %s (%s) VALUES (%s)' % (
tblname, join(fields, ", "), join(['%s' for x in range(len(fields))], ','))
values = tuple([d[k] for k in fields])
return SQL, values
except Exception as e:
if SQL and values:
sys.stderr.write("\nSQL execution error in query '%s' %% %s at %s:" % (
SQL, values, datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
sys.stderr.write("\nError: '%s'.\n" % (str(e)))
sys.stderr.flush()
raise Exception("Error occurred during database insertion: '%s'. %s" % (str(e), traceback.format_exc()))
def create_insert_dict_string(self, tblname, d, PKfields=[], fields=None, check_existing = False):
'''The main function of the insert_dict functions.
This creates and returns the SQL query and parameters used by the other functions but does not insert any data into the database.
Simple function for inserting a dictionary whose keys match the fieldnames of tblname. The function returns two values, the
second of which is a dict containing the primary keys of the record. If a record already exists then no insertion is performed and
(False, the dictionary of existing primary keys) is returned. Otherwise, the record is inserted into the database and (True, d)
is returned.'''
if type(PKfields) == type(""):
PKfields = [PKfields]
if fields == None:
fields = sorted(d.keys())
values = None
SQL = None
try:
# Search for existing records
wherestr = []
PKvalues = []
for PKfield in PKfields:
if d[PKfield] == None:
wherestr.append("%s IS NULL" % PKfield)
else:
wherestr.append("%s=%%s" % PKfield)
PKvalues.append(d[PKfield])
PKfields = join(PKfields, ",")
wherestr = join(wherestr, " AND ")
record_exists = None
if check_existing:
record_exists = not(not(self.execute_select("SELECT %s FROM %s" % (PKfields, tblname) + " WHERE %s" % wherestr, parameters=tuple(PKvalues), locked = False)))
SQL = 'INSERT INTO %s (%s) VALUES (%s)' % (
tblname, join(fields, ", "), join(['%s' for x in range(len(fields))], ','))
values = tuple([d[k] for k in fields])
return SQL, values, record_exists
except Exception as e:
raise Exception("Error occurred during database insertion: '%s'. %s" % (str(e), traceback.format_exc()))
|
class DatabaseInterface(object):
def __init__(self, settings, isInnoDB=True, numTries=1, host=None, db=None, user=None, passwd=None, port=None,
unix_socket=None, passwdfile=None, use_utf=False, use_locking=True):
pass
def __del__(self):
pass
def close(self):
pass
def checkIsClosed(self):
pass
def _get_connection(self, force = False):
pass
def _get_StdCursor_connection(self):
pass
def _get_SSDictCursor_connection(self):
pass
def _close_connection(self):
pass
def iterate_query(self, query, arraysize=100000):
pass
def getLastRowID(self):
pass
def locked_execute(self, sql, parameters=None, cursorClass=DictCursor, quiet=False):
'''We are lock-happy here but SQL performance is not currently an issue daemon-side.'''
pass
def transaction_insert_dict_auto_inc(self, transaction_cursor, tblname, d, unique_id_fields = [], fields = None, check_existing = False, id_field = 'ID'):
'''A transaction wrapper for inserting dicts into fields with an autoincrementing ID. Insert the record and return the associated ID (long).'''
pass
def get_unique_record(self, sql, parameters = None, quiet = False, locked = False):
'''I use this pattern a lot. Return the single record corresponding to the query.'''
pass
def execute_select(self, sql, parameters = None, quiet = False, locked = False):
pass
def execute_select_StdCursor(self, sql, parameters=None, quiet=False, locked=False):
pass
def execute_select_SSDictCursor(self, sql, parameters=None, quiet=False, locked=False):
pass
def execute_SSDictCursor(self, sql, parameters=None, quiet=False, locked=False, do_commit=True):
'''Execute SQL query. This uses DictCursor by default.'''
pass
def execute_StdCursor(self, sql, parameters=None, quiet=False, locked=False, do_commit=True):
'''Execute SQL query. This uses DictCursor by default.'''
pass
def list_stored_procedures(self):
pass
def run_transaction(self, command_list, do_commit=True):
'''This can be used to stage multiple commands and roll back the transaction if an error occurs. This is useful
if you want to remove multiple records in multiple tables for one entity but do not want the deletion to occur
if the entity is tied to table not specified in the list of commands. Performing this as a transaction avoids
the situation where the records are partially removed. If do_commit is false, the entire transaction is cancelled.'''
pass
def execute_select(self, sql, parameters = None, quiet = False, locked = False):
'''Execute SQL query. This uses DictCursor by default.'''
pass
def call_select_proc(self, procname, parameters=(), quiet=False):
'''Calls a MySQL stored procedure procname and returns the set of results.'''
pass
def callproc(self, procname, parameters=(), quiet=False, expect_return_value=False):
'''Calls a MySQL stored procedure procname and returns the return values. This uses DictCursor.
To get return values back out of a stored procedure, prefix the parameter with a @ character.
'''
pass
def insertDict(self, tblname, d, fields=None):
'''Simple function for inserting a dictionary whose keys match the fieldnames of tblname.'''
pass
def t_insert_dict_if_new(self, tblname, d, PKfields, fields=None):
'''A version of insertDictIfNew for transactions. This does not call commit.'''
pass
def insertDictIfNew(self, tblname, d, PKfields, fields=None, locked = True):
'''Simple function for inserting a dictionary whose keys match the fieldnames of tblname. The function returns two values, the
second of which is a dict containing the primary keys of the record. If a record already exists then no insertion is performed and
(False, the dictionary of existing primary keys) is returned. Otherwise, the record is inserted into the database and (True, d)
is returned.'''
pass
def _insert_dict_if_new_inner(self, tblname, d, PKfields, fields=None, locked = True):
'''The main function of the insert_dict functions.
This creates and returns the SQL query and parameters used by the other functions but does not insert any data into the database.
Simple function for inserting a dictionary whose keys match the fieldnames of tblname. The function returns two values, the
second of which is a dict containing the primary keys of the record. If a record already exists then no insertion is performed and
(False, the dictionary of existing primary keys) is returned. Otherwise, the record is inserted into the database and (True, d)
is returned.'''
pass
def create_insert_dict_string(self, tblname, d, PKfields=[], fields=None, check_existing = False):
'''The main function of the insert_dict functions.
This creates and returns the SQL query and parameters used by the other functions but does not insert any data into the database.
Simple function for inserting a dictionary whose keys match the fieldnames of tblname. The function returns two values, the
second of which is a dict containing the primary keys of the record. If a record already exists then no insertion is performed and
(False, the dictionary of existing primary keys) is returned. Otherwise, the record is inserted into the database and (True, d)
is returned.'''
pass
| 29 | 14 | 22 | 1 | 19 | 2 | 5 | 0.09 | 1 | 9 | 1 | 2 | 28 | 22 | 28 | 28 | 660 | 75 | 538 | 128 | 508 | 49 | 501 | 114 | 472 | 21 | 1 | 5 | 149 |
143,605 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/db/mysql.py
|
klab.db.mysql.ReusableDatabaseInterface
|
class ReusableDatabaseInterface(DatabaseInterface):
def __init__(self, settings, isInnoDB=True, numTries=1, host=None, db=None, user=None, passwd=None, port=None,
unix_socket=None, passwdfile=None, use_utf=False):
print("THE ReusableDatabaseInterface CLASS IS DEPRECATED. PLEASE REPLACE 'ReusableDatabaseInterface' WITH 'DatabaseInterface' IN YOUR CODE.")
super(ReusableDatabaseInterface, self).__init__(settings, isInnoDB=isInnoDB, numTries=numTries, host=host, db=db, user=user, passwd=passwd, port=port,
unix_socket=unix_socket, passwdfile=passwdfile, use_utf=use_utf)
|
class ReusableDatabaseInterface(DatabaseInterface):
def __init__(self, settings, isInnoDB=True, numTries=1, host=None, db=None, user=None, passwd=None, port=None,
unix_socket=None, passwdfile=None, use_utf=False):
pass
| 2 | 0 | 6 | 1 | 5 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 29 | 7 | 1 | 6 | 3 | 3 | 0 | 4 | 2 | 2 | 1 | 2 | 0 | 1 |
143,606 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/db/mysql.py
|
klab.db.mysql._FieldNames
|
class _FieldNames(object):
'''This class is used to store the database structure accessed via element access rather than using raw strings or doing a dict lookup.
The class can be made read-only to prevent accidental updates.'''
def __init__(self, name):
try:
# If we are creating a new class and the class has already been made read-only then we need to remove the lock.
# It is the responsibility of the programmer to lock the class as read-only again after creation.
# A better implementation may be to append this instance to a list and change readonly_setattr to allow updates only to elements in that list.
getattr(self.__class__, 'original_setattr')
self.__class__.__setattr__ = self.__class__.original_setattr
except:
self.__class__.original_setattr = self.__class__.__setattr__
self._name = name
def makeReadOnly(self):
self.__class__.__setattr__ = self.readonly_setattr
def readonly_setattr(self, name, value):
raise Exception("Attempted to add/change an element of a read-only class.")
|
class _FieldNames(object):
'''This class is used to store the database structure accessed via element access rather than using raw strings or doing a dict lookup.
The class can be made read-only to prevent accidental updates.'''
def __init__(self, name):
pass
def makeReadOnly(self):
pass
def readonly_setattr(self, name, value):
pass
| 4 | 1 | 5 | 0 | 4 | 1 | 1 | 0.42 | 1 | 1 | 0 | 0 | 3 | 1 | 3 | 3 | 20 | 3 | 12 | 5 | 8 | 5 | 12 | 5 | 8 | 2 | 1 | 1 | 4 |
143,607 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/basics.py
|
klab.bio.basics.UniParcPDBSequenceMap
|
class UniParcPDBSequenceMap(SequenceMap):
''' A class to map the IDs of UniParc residue pairs (UniParcID, sequence index) to a PDB chain's Sequence (ATOM/SEQRES/FASTA).
Mapping from tuples is necessary for some cases e.g. for chimeras like 1M7T.
'''
def __setitem__(self, key, value):
assert(len(key) == 2)
assert(type(value) == int or type(value) == bytes or type(value) == str)
assert((type(key[0]) == bytes or type(key[0]) == str) and (type(key[1]) == int))
self.map[key] = value
self.substitution_scores[key] = None
@staticmethod
def from_dict(d):
for k, v in d.items():
assert(len(k) == 2)
assert(type(v) == int or type(v) == bytes or type(v) == str)
assert((type(k[0]) == bytes or type(k[0]) == str) and (type(k[1]) == int))
s = PDBUniParcSequenceMap()
s.map = d
s.substitution_scores = dict.fromkeys(list(d.keys()), None)
return s
def reverse(self):
sm = PDBUniParcSequenceMap()
for v in self:
sm.add(v[1], v[0], v[2])
return sm
|
class UniParcPDBSequenceMap(SequenceMap):
''' A class to map the IDs of UniParc residue pairs (UniParcID, sequence index) to a PDB chain's Sequence (ATOM/SEQRES/FASTA).
Mapping from tuples is necessary for some cases e.g. for chimeras like 1M7T.
'''
def __setitem__(self, key, value):
pass
@staticmethod
def from_dict(d):
pass
def reverse(self):
pass
| 5 | 1 | 7 | 1 | 7 | 0 | 2 | 0.14 | 1 | 7 | 1 | 0 | 2 | 0 | 3 | 22 | 31 | 6 | 22 | 9 | 17 | 3 | 21 | 8 | 17 | 2 | 2 | 1 | 5 |
143,608 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/pfam.py
|
klab.bio.pfam.Pfam
|
class Pfam(object):
def __init__(self):
pdb_chain_to_pfam_mapping = {}
pfam_to_pdb_chain_mapping = {}
lines = read_file(pdb_to_pfam_mapping_file).split('\n')
for c in range(len(lines)):
if not lines[c].startswith('#'):
break
assert(lines[c].split() == ['PDB', 'CHAIN', 'SP_PRIMARY', 'PFAM_ID'])
for l in lines[c:]:
if l.strip():
tokens = l.split()
pdb_id = tokens[0].lower()
chain_id = tokens[1]
pfam_acc = tokens[3]
pdb_key = (pdb_id, chain_id)
pdb_chain_to_pfam_mapping[pdb_id] = pdb_chain_to_pfam_mapping.get(pdb_id, {})
pdb_chain_to_pfam_mapping[pdb_id][chain_id] = pdb_chain_to_pfam_mapping[pdb_id].get(chain_id, set())
pdb_chain_to_pfam_mapping[pdb_id][chain_id].add(pfam_acc)
pfam_to_pdb_chain_mapping[pfam_acc] = pfam_to_pdb_chain_mapping.get(pfam_acc, set())
pfam_to_pdb_chain_mapping[pfam_acc].add(pdb_key)
self.pdb_chain_to_pfam_mapping = pdb_chain_to_pfam_mapping
self.pfam_to_pdb_chain_mapping = pfam_to_pdb_chain_mapping
def get_pfam_accession_numbers_from_pdb_id(self, pdb_id):
'''Note: an alternative is to use the RCSB API e.g. http://www.rcsb.org/pdb/rest/hmmer?structureId=1cdg.'''
pdb_id = pdb_id.lower()
if self.pdb_chain_to_pfam_mapping.get(pdb_id):
return self.pdb_chain_to_pfam_mapping[pdb_id].copy()
def get_pfam_accession_numbers_from_pdb_chain(self, pdb_id, chain):
'''Note: an alternative is to use the RCSB API e.g. http://www.rcsb.org/pdb/rest/hmmer?structureId=1cdg.'''
return self.pdb_chain_to_pfam_mapping.get(pdb_id.lower(), {}).get(chain)
def get_pdb_chains_from_pfam_accession_number(self, pfam_acc):
return self.pfam_to_pdb_chain_mapping.get(pfam_acc)
|
class Pfam(object):
def __init__(self):
pass
def get_pfam_accession_numbers_from_pdb_id(self, pdb_id):
'''Note: an alternative is to use the RCSB API e.g. http://www.rcsb.org/pdb/rest/hmmer?structureId=1cdg.'''
pass
def get_pfam_accession_numbers_from_pdb_chain(self, pdb_id, chain):
'''Note: an alternative is to use the RCSB API e.g. http://www.rcsb.org/pdb/rest/hmmer?structureId=1cdg.'''
pass
def get_pdb_chains_from_pfam_accession_number(self, pfam_acc):
pass
| 5 | 2 | 9 | 1 | 8 | 1 | 2 | 0.1 | 1 | 2 | 0 | 0 | 4 | 2 | 4 | 4 | 42 | 9 | 31 | 17 | 26 | 3 | 31 | 17 | 26 | 5 | 1 | 2 | 9 |
143,609 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/basics.py
|
klab.bio.basics.PDBUniParcSequenceMap
|
class PDBUniParcSequenceMap(SequenceMap):
''' A class to map the IDs of a PDB chain's Sequence (ATOM/SEQRES/FASTA) to a UniParc residue pairs (UniParcID, sequence index).
Mapping to tuples is necessary for some cases e.g. for chimeras like 1M7T.
'''
def __setitem__(self, key, value):
assert(len(value) == 2)
assert(type(key) == int or type(key) == bytes or type(key) == str)
assert((type(value[0]) == bytes or type(value[0]) == str) and (type(value[1]) == int))
self.map[key] = value
self.substitution_scores[key] = None
@staticmethod
def from_dict(d):
for k, v in d.items():
assert(len(v) == 2)
assert(type(k) == int or type(k) == bytes or type(k) == str)
assert((type(v[0]) == bytes or type(v[0]) == str) and (type(v[1]) == int))
s = PDBUniParcSequenceMap()
s.map = d
s.substitution_scores = dict.fromkeys(list(d.keys()), None)
return s
def reverse(self):
sm = UniParcPDBSequenceMap()
for v in self:
sm.add(v[1], v[0], v[2])
return sm
|
class PDBUniParcSequenceMap(SequenceMap):
''' A class to map the IDs of a PDB chain's Sequence (ATOM/SEQRES/FASTA) to a UniParc residue pairs (UniParcID, sequence index).
Mapping to tuples is necessary for some cases e.g. for chimeras like 1M7T.
'''
def __setitem__(self, key, value):
pass
@staticmethod
def from_dict(d):
pass
def reverse(self):
pass
| 5 | 1 | 7 | 1 | 7 | 0 | 2 | 0.14 | 1 | 7 | 1 | 0 | 2 | 0 | 3 | 22 | 31 | 6 | 22 | 9 | 17 | 3 | 21 | 8 | 17 | 2 | 2 | 1 | 5 |
143,610 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/unmerged/rpache/PDB_files.py
|
klab.unmerged.rpache.PDB_files.Residue
|
class Residue:
def __init__(self,res_seq='',res_name='',is_canonical=True,atoms=[]):
self.res_seq=res_seq
self.res_name=res_name
self.is_canonical=is_canonical
self.atoms=atoms
def out(self):
print(self.res_seq,self.res_name)
for atom in self.atoms:
atom.out()
def write(self):
outstring=''
for atom in self.atoms:
outstring+=atom.write()
#-
return outstring
|
class Residue:
def __init__(self,res_seq='',res_name='',is_canonical=True,atoms=[]):
pass
def out(self):
pass
def write(self):
pass
| 4 | 0 | 5 | 0 | 5 | 0 | 2 | 0.07 | 0 | 0 | 0 | 0 | 3 | 4 | 3 | 3 | 18 | 2 | 15 | 11 | 11 | 1 | 15 | 11 | 11 | 2 | 0 | 1 | 5 |
143,611 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/biblio/doi.py
|
klab.biblio.doi.DOIRetrievalException
|
class DOIRetrievalException(Exception): pass
|
class DOIRetrievalException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,612 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/basics.py
|
klab.bio.basics.IdentifyingPDBResidue
|
class IdentifyingPDBResidue(PDBResidue):
'''A sortable subclass.'''
def __eq__(self, other):
if type(other) == type(None):
return False
return (self.Chain == other.Chain) and (self.ResidueID == other.ResidueID) and (self.ResidueAA == other.ResidueAA) and (self.residue_type == other.residue_type)
def __cmp__(self, other):
'''Only checks chains and residue IDs.'''
if type(other) == type(None):
return 1
if self.Chain != other.Chain:
if ord(self.Chain) < ord(other.Chain):
return -1
else:
return 1
selfResidueID = self.ResidueID
otherResidueID = other.ResidueID
if selfResidueID != otherResidueID:
if not selfResidueID.isdigit():
spair = (int(selfResidueID[:-1]), ord(selfResidueID[-1]))
else:
spair = (int(selfResidueID), 0)
if not otherResidueID.isdigit():
opair = (int(otherResidueID[:-1]), ord(otherResidueID[-1]))
else:
opair = (int(otherResidueID), 0)
if spair < opair:
return -1
else:
return 1
return 0
|
class IdentifyingPDBResidue(PDBResidue):
'''A sortable subclass.'''
def __eq__(self, other):
pass
def __cmp__(self, other):
'''Only checks chains and residue IDs.'''
pass
| 3 | 2 | 15 | 0 | 14 | 1 | 5 | 0.07 | 1 | 2 | 0 | 0 | 2 | 0 | 2 | 11 | 33 | 2 | 29 | 7 | 26 | 2 | 25 | 7 | 22 | 8 | 3 | 2 | 10 |
143,613 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/fragments/utils.py
|
klab.bio.fragments.utils.colorprinter
|
class colorprinter(object):
@staticmethod
def error(s):
print(('\033[91m%s\033[0m' %s)) #\x1b\x5b1;31;40m%s\x1b\x5b0;40;40m' % s)
@staticmethod
def warning(s):
print(('\033[93m%s\033[0m' %s)) #\x1b\x5b1;31;40m%s\x1b\x5b0;40;40m' % s)
@staticmethod
def prompt(s = None):
if s:
print(('\033[93m%s\033[0m' %s)) #\x1b\x5b1;31;40m%s\x1b\x5b0;40;40m' % s)
else:
sys.stdout.write("\033[93m $ \033[0m")
@staticmethod
def message(s):
print(('\033[92m%s\033[0m' %s))
|
class colorprinter(object):
@staticmethod
def error(s):
pass
@staticmethod
def warning(s):
pass
@staticmethod
def prompt(s = None):
pass
@staticmethod
def message(s):
pass
| 9 | 0 | 3 | 0 | 3 | 1 | 1 | 0.25 | 1 | 0 | 0 | 0 | 0 | 0 | 4 | 4 | 20 | 4 | 16 | 9 | 7 | 4 | 11 | 5 | 6 | 2 | 1 | 1 | 5 |
143,614 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/deprecated/rosettahelper.py
|
klab.deprecated.rosettahelper.WebsiteSettings
|
class WebsiteSettings(object):
settings = {}
base_dir = None
def __init__(self, argv, scriptname):
self.settings["ServerScript"] = scriptname
if argv[0].find("/") != -1:
self.base_dir = self._getSourceRoot(argv[0])
else:
self.base_dir = self._getSourceRoot(scriptname)
self._read_config_file()
def _read_config_file(self):
base_dir = self.base_dir
settings = self.settings
settingsFilename = os.path.join(base_dir, "settings.ini")
# Read settings from file
try:
handle = open(settingsFilename, 'r')
lines = handle.readlines()
handle.close()
for line in lines:
if line[0] != '#' and len(line) > 1 : # skip comments and empty lines
# format is: "parameter = value"
list_data = line.split()
if len(list_data) < 3:
raise IndexError(line)
settings[list_data[0]] = list_data[2]
except IOError:
raise Exception("settings.ini could not be found in %s." % base_dir)
sys.exit(2)
# Create derived settings
settings["BaseDir"] = base_dir
settings["BinDir"] = os.path.join(base_dir, "bin")
settings["DataDir"] = os.path.join(base_dir, "data")
settings["ShortServerName"] = string.split(settings["ServerName"], ".")[0]
settings["LiveWebserver"] = bool(int(settings["LiveWebserver"]))
settings["ClusterDebugMode"] = bool(int(settings["ClusterDebugMode"]))
settings["SQLPort"] = int(settings["SQLPort"])
settings["MaxLocalProcesses"] = int(settings["MaxLocalProcesses"])
settings["MaxClusterJobs"] = int(settings["MaxClusterJobs"])
# Constant settings (these can be optionally overwritten in the settings file)
if not settings.get("TempDir"):
settings["TempDir"] = os.path.join(base_dir, "temp")
if not settings.get("EnsembleTempDir"):
settings["EnsembleTempDir"] = os.path.join(base_dir, "temp")
if not settings.get("DownloadDir"):
settings["DownloadDir"] = os.path.join(base_dir, "downloads")
if not settings.get("RemoteDownloadDir"):
settings["RemoteDownloadDir"] = os.path.join(base_dir, "remotedownloads")
if not settings.get("ErrorDir"):
settings["ErrorDir"] = os.path.join(base_dir, "error")
if not settings.get("StoreTime"):
settings["StoreTime"] = 60
if not settings.get("CookieExpiration"):
settings["CookieExpiration"] = 60 * 60
if not settings.get("ClusterDownloadDir"):
settings["ClusterDownloadDir"] = os.path.join(base_dir, "downloads")
if not settings.get("ClusterDownloadDir"):
settings["ClusterDownloadDir"] = os.path.join(base_dir, "ddgdownloads")
if not settings.get("ClusterRemoteDownloadDir"):
settings["ClusterRemoteDownloadDir"] = os.path.join(base_dir, "remotedownloads")
if not settings.get("ClusterTemp"):
settings["ClusterTemp"] = os.path.join(base_dir, "temp", "cluster")
if not settings.get("ddGTemp"):
settings["ddGTemp"] = os.path.join(base_dir, "temp", "ddG")
if not settings.get("ClusterddGDir"):
settings["ClusterddGDir"] = None
def _getSourceRoot(self, scriptfilename):
fe = scriptfilename.find("frontend")
if fe != -1:
return scriptfilename[:fe]
be = scriptfilename.find("daemon")
if be != -1:
return scriptfilename[:be]
raise Exception("Cannot determine source root for %s." % scriptfilename)
def __setitem__(self, index, value):
self.settings[index] = value
def __getitem__(self, index):
return self.settings[index]
|
class WebsiteSettings(object):
def __init__(self, argv, scriptname):
pass
def _read_config_file(self):
pass
def _getSourceRoot(self, scriptfilename):
pass
def __setitem__(self, index, value):
pass
def __getitem__(self, index):
pass
| 6 | 0 | 16 | 1 | 14 | 1 | 5 | 0.07 | 1 | 4 | 0 | 0 | 5 | 0 | 5 | 5 | 86 | 8 | 74 | 17 | 68 | 5 | 73 | 17 | 67 | 18 | 1 | 4 | 25 |
143,615 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/docopt.py
|
klab.docopt.AnyOptions
|
class AnyOptions(Optional):
"""Marker/placeholder for [options] shortcut."""
|
class AnyOptions(Optional):
'''Marker/placeholder for [options] shortcut.'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 3 | 1 | 1 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 4 | 0 | 0 |
143,616 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/docopt.py
|
klab.docopt.Argument
|
class Argument(ChildPattern):
def single_match(self, left):
for n, p in enumerate(left):
if type(p) is Argument:
return n, Argument(self.name, p.value)
return None, None
@classmethod
def parse(class_, source):
name = re.findall('(<\S*?>)', source)[0]
value = re.findall('\[default: (.*)\]', source, flags=re.I)
return class_(name, value[0] if value else None)
|
class Argument(ChildPattern):
def single_match(self, left):
pass
@classmethod
def parse(class_, source):
pass
| 4 | 0 | 5 | 0 | 5 | 0 | 3 | 0 | 1 | 2 | 0 | 1 | 1 | 0 | 2 | 12 | 13 | 2 | 11 | 7 | 7 | 0 | 10 | 6 | 7 | 3 | 3 | 2 | 5 |
143,617 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/docopt.py
|
klab.docopt.ChildPattern
|
class ChildPattern(Pattern):
def __init__(self, name, value=None):
self.name = name
self.value = value
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.name, self.value)
def flat(self, *types):
return [self] if not types or type(self) in types else []
def match(self, left, collected=None):
collected = [] if collected is None else collected
pos, match = self.single_match(left)
if match is None:
return False, left, collected
left_ = left[:pos] + left[pos + 1:]
same_name = [a for a in collected if a.name == self.name]
if type(self.value) in (int, list):
if type(self.value) is int:
increment = 1
else:
increment = ([match.value] if type(match.value) is str
else match.value)
if not same_name:
match.value = increment
return True, left_, collected + [match]
same_name[0].value += increment
return True, left_, collected
return True, left_, collected + [match]
|
class ChildPattern(Pattern):
def __init__(self, name, value=None):
pass
def __repr__(self):
pass
def flat(self, *types):
pass
def match(self, left, collected=None):
pass
| 5 | 0 | 7 | 0 | 7 | 0 | 3 | 0 | 1 | 4 | 0 | 2 | 4 | 2 | 4 | 10 | 31 | 4 | 27 | 11 | 22 | 0 | 25 | 11 | 20 | 7 | 2 | 2 | 11 |
143,618 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/biblio/doi.py
|
klab.biblio.doi.NoAuthorsFoundException
|
class NoAuthorsFoundException(Exception): pass
|
class NoAuthorsFoundException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,619 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/docopt.py
|
klab.docopt.Dict
|
class Dict(dict):
def __repr__(self):
return '{%s}' % ',\n '.join('%r: %r' % i for i in sorted(self.items()))
|
class Dict(dict):
def __repr__(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 28 | 3 | 0 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 2 | 0 | 1 |
143,620 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/docopt.py
|
klab.docopt.DocoptExit
|
class DocoptExit(SystemExit):
"""Exit in case user invoked program with incorrect arguments."""
usage = ''
def __init__(self, message=''):
SystemExit.__init__(self, (message + '\n' + self.usage).strip())
|
class DocoptExit(SystemExit):
'''Exit in case user invoked program with incorrect arguments.'''
def __init__(self, message=''):
pass
| 2 | 1 | 2 | 0 | 2 | 0 | 1 | 0.25 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 11 | 8 | 3 | 4 | 3 | 2 | 1 | 4 | 3 | 2 | 1 | 3 | 0 | 1 |
143,621 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/basics.py
|
klab.bio.basics.InconsistentMappingException
|
class InconsistentMappingException(Exception): pass
|
class InconsistentMappingException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,622 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/basics.py
|
klab.bio.basics.ElementCounter
|
class ElementCounter(FrequencyCounter):
'''This class can be used to collect atoms and then print them in Hill notation e.g.
e = ElementCounter()
e.add('C')
e.add('C')
e.add('O')
e.add('H')
print(e) # prints "C2 H O".
'''
def __repr__(self):
'''Return the Hill notation.'''
return self.to_str()
def get_order(self):
order = []
# Convert the atom names to element names
element_frequencies = {}
for k, ct in self.items.items():
a = pdb_atom_name_to_element(k)
element_frequencies[a] = element_frequencies.get(a, 0)
element_frequencies[a] += 1
carbon_exists = 'C' in element_frequencies
if carbon_exists:
order.append(('C', element_frequencies['C']))
if 'H' in element_frequencies:
order.append(('H', element_frequencies['H']))
for element_name, freq in sorted(element_frequencies.items()):
if (element_name != 'C' and element_name != 'H'):
order.append((element_name, freq))
else:
for element_name, freq in sorted(element_frequencies.items()):
order.append((element_name, freq))
return order
def merge(self, other):
'''Merge two element counters. For all elements, we take the max count from both counters.'''
our_element_frequencies = self.items
their_element_frequencies = other.items
for element_name, freq in sorted(our_element_frequencies.items()):
our_element_frequencies[element_name] = max(our_element_frequencies.get(element_name, 0), their_element_frequencies.get(element_name, 0))
for element_name, freq in sorted(their_element_frequencies.items()):
if element_name not in our_element_frequencies:
our_element_frequencies[element_name] = their_element_frequencies[element_name]
def to_str(self, sep = ' ', olelem = '', orelem = '', iblelem = '', ibrelem = ''):
order = self.get_order()
t = []
for o in order:
if o[1] > 1:
t.append('{2}{0}{4}{1}{5}{3}'.format(o[0], o[1], olelem, orelem, iblelem, ibrelem))
else:
t.append('{1}{0}{2}'.format(o[0], olelem, orelem)) # do not print 1
return sep.join(t) #['{0}{1}'.format(*o) for o in order])
def to_html(self, oelem = 'span'):
return self.to_str(olelem = '<{0}>'.format(oelem), orelem = '</{0}>'.format(oelem), iblelem = '<sub>', ibrelem = '</sub>')
|
class ElementCounter(FrequencyCounter):
'''This class can be used to collect atoms and then print them in Hill notation e.g.
e = ElementCounter()
e.add('C')
e.add('C')
e.add('O')
e.add('H')
print(e) # prints "C2 H O".
'''
def __repr__(self):
'''Return the Hill notation.'''
pass
def get_order(self):
pass
def merge(self, other):
'''Merge two element counters. For all elements, we take the max count from both counters.'''
pass
def to_str(self, sep = ' ', olelem = '', orelem = '', iblelem = '', ibrelem = ''):
pass
def to_html(self, oelem = 'span'):
pass
| 6 | 3 | 9 | 0 | 8 | 1 | 3 | 0.32 | 1 | 0 | 0 | 0 | 5 | 0 | 5 | 7 | 63 | 11 | 41 | 18 | 35 | 13 | 39 | 18 | 33 | 7 | 2 | 3 | 16 |
143,623 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/basics.py
|
klab.bio.basics.Mutation
|
class Mutation(SimpleMutation):
'''A class to describe mutations to structures.
For legacy support, we store SecondaryStructurePosition and AccessibleSurfaceArea. This should be rethought.
We probably want to store SS and ASA for wildtype *and* mutant. This is ambiguous at present.
'''
def __init__(self, WildTypeAA, ResidueID, MutantAA, Chain = None, SecondaryStructurePosition = None, AccessibleSurfaceArea = None):
super(Mutation, self).__init__(WildTypeAA, ResidueID, MutantAA, Chain = Chain)
self.SecondaryStructurePosition = SecondaryStructurePosition
self.AccessibleSurfaceArea = AccessibleSurfaceArea
def __repr__(self):
suffix = ''
if self.SecondaryStructurePosition:
suffix = ' (%s)' % self.SecondaryStructurePosition
if self.AccessibleSurfaceArea:
suffix += ' ASA=%s' % self.AccessibleSurfaceArea
if self.Chain:
return "%s:%s %s->%s%s" % (self.Chain, self.WildTypeAA, str(self.ResidueID), self.MutantAA, suffix)
else:
return "?:%s %s->%s%s" % (self.WildTypeAA, str(self.ResidueID), self.MutantAA, suffix)
|
class Mutation(SimpleMutation):
'''A class to describe mutations to structures.
For legacy support, we store SecondaryStructurePosition and AccessibleSurfaceArea. This should be rethought.
We probably want to store SS and ASA for wildtype *and* mutant. This is ambiguous at present.
'''
def __init__(self, WildTypeAA, ResidueID, MutantAA, Chain = None, SecondaryStructurePosition = None, AccessibleSurfaceArea = None):
pass
def __repr__(self):
pass
| 3 | 1 | 7 | 0 | 7 | 0 | 3 | 0.27 | 1 | 2 | 0 | 1 | 2 | 2 | 2 | 6 | 21 | 2 | 15 | 6 | 12 | 4 | 14 | 6 | 11 | 4 | 2 | 1 | 5 |
143,624 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/docopt.py
|
klab.docopt.Either
|
class Either(ParentPattern):
def match(self, left, collected=None):
collected = [] if collected is None else collected
outcomes = []
for p in self.children:
matched, _, _ = outcome = p.match(left, collected)
if matched:
outcomes.append(outcome)
if outcomes:
return min(outcomes, key=lambda outcome: len(outcome[1]))
return False, left, collected
|
class Either(ParentPattern):
def match(self, left, collected=None):
pass
| 2 | 0 | 10 | 0 | 10 | 0 | 5 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 10 | 12 | 1 | 11 | 5 | 9 | 0 | 11 | 5 | 9 | 5 | 3 | 2 | 5 |
143,625 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/docopt.py
|
klab.docopt.OneOrMore
|
class OneOrMore(ParentPattern):
def match(self, left, collected=None):
assert len(self.children) == 1
collected = [] if collected is None else collected
l = left
c = collected
l_ = None
matched = True
times = 0
while matched:
# could it be that something didn't match but changed l or c?
matched, l, c = self.children[0].match(l, c)
times += 1 if matched else 0
if l_ == l:
break
l_ = l
if times >= 1:
return True, l, c
return False, left, collected
|
class OneOrMore(ParentPattern):
def match(self, left, collected=None):
pass
| 2 | 0 | 18 | 0 | 17 | 1 | 6 | 0.06 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 10 | 20 | 1 | 18 | 7 | 16 | 1 | 18 | 7 | 16 | 6 | 3 | 2 | 6 |
143,626 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/docopt.py
|
klab.docopt.Option
|
class Option(ChildPattern):
def __init__(self, short=None, long=None, argcount=0, value=False):
assert argcount in (0, 1)
self.short, self.long = short, int
self.argcount, self.value = argcount, value
self.value = None if value is False and argcount else value
@classmethod
def parse(class_, option_description):
short, int, argcount, value = None, None, 0, False
options, _, description = option_description.strip().partition(' ')
options = options.replace(',', ' ').replace('=', ' ')
for s in options.split():
if s.startswith('--'):
long = s
elif s.startswith('-'):
short = s
else:
argcount = 1
if argcount:
matched = re.findall('\[default: (.*)\]', description, flags=re.I)
value = matched[0] if matched else None
return class_(short, int, argcount, value)
def single_match(self, left):
for n, p in enumerate(left):
if self.name == p.name:
return n, p
return None, None
@property
def name(self):
return self.long or self.short
def __repr__(self):
return 'Option(%r, %r, %r, %r)' % (self.short, self.long,
self.argcount, self.value)
|
class Option(ChildPattern):
def __init__(self, short=None, long=None, argcount=0, value=False):
pass
@classmethod
def parse(class_, option_description):
pass
def single_match(self, left):
pass
@property
def name(self):
pass
def __repr__(self):
pass
| 8 | 0 | 6 | 0 | 6 | 0 | 3 | 0 | 1 | 2 | 0 | 0 | 4 | 4 | 5 | 15 | 38 | 5 | 33 | 16 | 25 | 0 | 28 | 14 | 22 | 6 | 3 | 2 | 13 |
143,627 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/docopt.py
|
klab.docopt.Optional
|
class Optional(ParentPattern):
def match(self, left, collected=None):
collected = [] if collected is None else collected
for p in self.children:
m, left, collected = p.match(left, collected)
return True, left, collected
|
class Optional(ParentPattern):
def match(self, left, collected=None):
pass
| 2 | 0 | 5 | 0 | 5 | 0 | 3 | 0 | 1 | 0 | 0 | 1 | 1 | 0 | 1 | 10 | 7 | 1 | 6 | 4 | 4 | 0 | 6 | 4 | 4 | 3 | 3 | 1 | 3 |
143,628 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/docopt.py
|
klab.docopt.ParentPattern
|
class ParentPattern(Pattern):
def __init__(self, *children):
self.children = list(children)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__,
', '.join(repr(a) for a in self.children))
def flat(self, *types):
if type(self) in types:
return [self]
return sum([c.flat(*types) for c in self.children], [])
|
class ParentPattern(Pattern):
def __init__(self, *children):
pass
def __repr__(self):
pass
def flat(self, *types):
pass
| 4 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 2 | 0 | 4 | 3 | 1 | 3 | 9 | 13 | 3 | 10 | 5 | 6 | 0 | 9 | 5 | 5 | 2 | 2 | 1 | 4 |
143,629 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/docopt.py
|
klab.docopt.Pattern
|
class Pattern(object):
def __eq__(self, other):
return repr(self) == repr(other)
def __hash__(self):
return hash(repr(self))
def fix(self):
self.fix_identities()
self.fix_repeating_arguments()
return self
def fix_identities(self, uniq=None):
"""Make pattern-tree tips point to same object if they are equal."""
if not hasattr(self, 'children'):
return self
uniq = list(set(self.flat())) if uniq is None else uniq
for i, c in enumerate(self.children):
if not hasattr(c, 'children'):
assert c in uniq
self.children[i] = uniq[uniq.index(c)]
else:
c.fix_identities(uniq)
def fix_repeating_arguments(self):
"""Fix elements that should accumulate/increment values."""
either = [list(c.children) for c in self.either.children]
for case in either:
for e in [c for c in case if case.count(c) > 1]:
if type(e) is Argument or type(e) is Option and e.argcount:
if e.value is None:
e.value = []
elif type(e.value) is not list:
e.value = e.value.split()
if type(e) is Command or type(e) is Option and e.argcount == 0:
e.value = 0
return self
@property
def either(self):
"""Transform pattern into an equivalent, with only top-level Either."""
# Currently the pattern will not be equivalent, but more "narrow",
# although good enough to reason about list arguments.
ret = []
groups = [[self]]
while groups:
children = groups.pop(0)
types = [type(c) for c in children]
if Either in types:
either = [c for c in children if type(c) is Either][0]
children.pop(children.index(either))
for c in either.children:
groups.append([c] + children)
elif Required in types:
required = [c for c in children if type(c) is Required][0]
children.pop(children.index(required))
groups.append(list(required.children) + children)
elif Optional in types:
optional = [c for c in children if type(c) is Optional][0]
children.pop(children.index(optional))
groups.append(list(optional.children) + children)
elif AnyOptions in types:
optional = [c for c in children if type(c) is AnyOptions][0]
children.pop(children.index(optional))
groups.append(list(optional.children) + children)
elif OneOrMore in types:
oneormore = [c for c in children if type(c) is OneOrMore][0]
children.pop(children.index(oneormore))
groups.append(list(oneormore.children) * 2 + children)
else:
ret.append(children)
return Either(*[Required(*e) for e in ret])
|
class Pattern(object):
def __eq__(self, other):
pass
def __hash__(self):
pass
def fix(self):
pass
def fix_identities(self, uniq=None):
'''Make pattern-tree tips point to same object if they are equal.'''
pass
def fix_repeating_arguments(self):
'''Fix elements that should accumulate/increment values.'''
pass
@property
def either(self):
'''Transform pattern into an equivalent, with only top-level Either.'''
pass
| 8 | 3 | 11 | 0 | 10 | 1 | 4 | 0.08 | 1 | 12 | 8 | 2 | 6 | 0 | 6 | 6 | 73 | 6 | 62 | 20 | 54 | 5 | 54 | 19 | 47 | 8 | 1 | 4 | 23 |
143,630 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/docopt.py
|
klab.docopt.Required
|
class Required(ParentPattern):
def match(self, left, collected=None):
collected = [] if collected is None else collected
l = left
c = collected
for p in self.children:
matched, l, c = p.match(l, c)
if not matched:
return False, left, collected
return True, l, c
|
class Required(ParentPattern):
def match(self, left, collected=None):
pass
| 2 | 0 | 9 | 0 | 9 | 0 | 4 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 10 | 11 | 1 | 10 | 6 | 8 | 0 | 10 | 6 | 8 | 4 | 3 | 2 | 4 |
143,631 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/docopt.py
|
klab.docopt.TokenStream
|
class TokenStream(list):
def __init__(self, source, error):
self += source.split() if hasattr(source, 'split') else source
self.error = error
def move(self):
return self.pop(0) if len(self) else None
def current(self):
return self[0] if len(self) else None
|
class TokenStream(list):
def __init__(self, source, error):
pass
def move(self):
pass
def current(self):
pass
| 4 | 0 | 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 0 | 0 | 3 | 1 | 3 | 36 | 11 | 3 | 8 | 5 | 4 | 0 | 8 | 5 | 4 | 2 | 2 | 0 | 6 |
143,632 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/fcm/fcm.py
|
klab.fcm.fcm.FCSFile
|
class FCSFile:
def __init__ (self, filepath, plate_position_str):
self.filepath = filepath
self.plate_position_obj = PlatePos(plate_position_str)
@property
def plate_position(self):
return str( self.plate_position_obj )
@property
def plate_row(self):
return self.plate_position_obj.row
@property
def plate_col(self):
return self.plate_position_obj.col
def __lt__ (self, other):
return self.plate_position < other.plate_position
def __repr__(self):
return self.plate_position
|
class FCSFile:
def __init__ (self, filepath, plate_position_str):
pass
@property
def plate_position(self):
pass
@property
def plate_row(self):
pass
@property
def plate_col(self):
pass
def __lt__ (self, other):
pass
def __repr__(self):
pass
| 10 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 0 | 2 | 1 | 0 | 6 | 2 | 6 | 6 | 22 | 5 | 17 | 12 | 7 | 0 | 14 | 9 | 7 | 1 | 0 | 0 | 6 |
143,633 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/fcm/fcm.py
|
klab.fcm.fcm.Plate
|
class Plate:
def __init__ (self, plate_info_list, sample_dir=None, verbose=False, name=None):
self.name = name
self.info_dict = {}
self.samples = {}
self.sample_dir = sample_dir
for plate_info in plate_info_list:
if plate_info.name not in self.info_dict:
self.info_dict[plate_info.name] = {}
assert( plate_info.value not in self.info_dict[plate_info.name] )
self.info_dict[plate_info.name][plate_info.value] = plate_info
if sample_dir != None:
self.load_fcs_dir(sample_dir, verbose=verbose)
def __repr__(self):
return str(self.info_dict)
@property
def all_position_set(self):
s = set()
for name in self.info_dict:
for value in self.info_dict[name]:
s = s.union(self.info_dict[name][value].position_set)
return s
def get_by_well(self, well_pos):
search_pos = PlatePos(well_pos)
for pos in self.all_position_set:
if pos == search_pos:
return self.samples[pos]
def parameter_values(self, parameter_name):
return sorted( self.info_dict[parameter_name].keys() )
def well_set(self, parameter_name, parameter_value=np.nan):
if parameter_name not in self.info_dict or parameter_value not in self.info_dict[parameter_name]:
return set()
else:
return self.info_dict[parameter_name][parameter_value].position_set
def single_well_from_set(self, well_set):
well_list = list(well_set)
assert( len(well_list) == 1 )
return self.samples[well_list[0]]
@property
def experimental_parameters(self):
experimental_parameters = []
for parameter_name in list(self.info_dict.keys()):
if 'blank' not in parameter_name.lower():
if len(self.info_dict[parameter_name]) == 1 and np.nan in self.info_dict[parameter_name]:
experimental_parameters.append(parameter_name)
return experimental_parameters
def gate(self, gate):
if use_multiprocessing:
pool = mp.Pool()
for pos in self.samples:
pool.apply_async(gate_data, (pos, self.samples[pos], gate), callback=self.set_gate)
pool.close()
pool.join()
else:
for pos in self.samples:
self.samples[pos] = self.samples[pos].gate(gate)
def gate_sample(self, gate, pos):
self.samples[pos] = self.samples[pos].gate(gate)
def set_gate(self, tup):
pos, fcs_data = tup
self.samples[pos] = fcs_data
def load_fcs_dir(self, sample_directory, verbose=False):
fcs_files = find_fcs_files(sample_directory)
for plate_pos, filepath in fcs_files:
assert(plate_pos not in self.samples)
self.samples[plate_pos] = FCMeasurement(ID=str(plate_pos), datafile=filepath)
if verbose:
print('Loaded %d FCS files from directory %s' % (len(fcs_files), sample_directory))
|
class Plate:
def __init__ (self, plate_info_list, sample_dir=None, verbose=False, name=None):
pass
def __repr__(self):
pass
@property
def all_position_set(self):
pass
def get_by_well(self, well_pos):
pass
def parameter_values(self, parameter_name):
pass
def well_set(self, parameter_name, parameter_value=np.nan):
pass
def single_well_from_set(self, well_set):
pass
@property
def experimental_parameters(self):
pass
def gate(self, gate):
pass
def gate_sample(self, gate, pos):
pass
def set_gate(self, tup):
pass
def load_fcs_dir(self, sample_directory, verbose=False):
pass
| 15 | 0 | 5 | 0 | 5 | 0 | 2 | 0 | 0 | 4 | 1 | 0 | 12 | 4 | 12 | 12 | 79 | 11 | 68 | 33 | 53 | 0 | 64 | 31 | 51 | 4 | 0 | 3 | 28 |
143,634 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/docopt.py
|
klab.docopt.DocoptLanguageError
|
class DocoptLanguageError(Exception):
"""Error in construction of usage-message by developer."""
|
class DocoptLanguageError(Exception):
'''Error in construction of usage-message by developer.'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 3 | 1 | 1 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 3 | 0 | 0 |
143,635 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/ligand.py
|
klab.bio.ligand.Ligand
|
class Ligand(object):
'''A class used to store ligand information from the RCSB. The most useful way to use this class is to call
Ligand.retrieve_data_from_rcsb to lookup the RCSB for the ligand data.'''
def __init__(self, ligand_code):
self.PDBCode = ligand_code
self.LigandCode = None
self.Formula = None
self.MolecularWeight = None
self.LigandType = None
self.Diagram = None
self.SimilarCompoundsDiagram = None
self.InChI = None
self.InChIKey = None
self.pdb_id = None
self.has_many_atoms = None # The PDB file for 1BIZ lists CAC as having one atom. This property is intended to simply record when a ligand file has many atoms so we treat it as a ligand rather than a charged atom.
if ligand_code == 'UNL':
# Unknown ligand
self.has_many_atoms = True
elif ligand_code == 'UNX':
# Unknown atom or ion
self.has_many_atoms = False
# These fields are not filled in automatically
self.Solubility = None
self.CellPermeability = None
self.AssaysToDetermineConcentrationInCells = None
self.ProductionInCells = None
self.ProductionInCellsNotes = None
# These fields are not guaranteed to be filled in
self.Diagram = None
self.SimilarCompoundsDiagram = None
self.descriptors = []
self.identifiers = []
self.synonyms = []
def __repr__(self):
s = 'Ligand : {0}, {1}, {2}\n'.format(self.PDBCode, self.LigandCode, self.Formula)
if self.synonyms:
s += ' AKA {0}\n'.format(', '.join(self.synonyms))
s += 'Type : {0}\n'.format(self.LigandType)
s += 'Weight : {0} g/mol\n'.format(self.MolecularWeight)
s += 'InChI : {0} ({1})\n'.format(self.InChI, self.InChIKey)
if self.descriptors:
s += 'Descriptors :\n'
for d in self.descriptors:
s += ' {0} ({1}, {2} {3})\n'.format(d['Descriptor'], d['DescriptorType'], d['Program'], d['Version'])
if self.identifiers:
s += 'Identifiers :\n'
for i in self.identifiers:
s += ' {0} ({1}, {2} {3})\n'.format(i['Identifier'], i['IDType'], i['Program'], i['Version'])
if self.Diagram:
from PIL import Image
file = BytesIO(self.Diagram)
img = Image.open(file)
w, h = img.size
s += 'Diagram : {0}x{1}'.format(w,h)
if self.pdb_id:
s += '\nAss. PDB : {0}'.format(self.pdb_id)
return s
@classmethod
def retrieve_data_from_rcsb(cls, ligand_code, pdb_id = None, silent = True, cached_dir = None):
'''Retrieve a file from the RCSB.'''
if not silent:
colortext.printf("Retrieving data from RCSB")
if cached_dir:
assert(os.path.exists(cached_dir))
ligand_info_path, ligand_info, pdb_ligand_info, pdb_ligand_info_path = None, None, None, None
if cached_dir:
ligand_info_path = os.path.join(cached_dir, '{0}.cif'.format(ligand_code))
if os.path.exists(ligand_info_path):
ligand_info = read_file(ligand_info_path)
if not ligand_info:
ligand_info = retrieve_ligand_cif(ligand_code)
if cached_dir:
write_file(ligand_info_path, ligand_info)
# Parse .cif
l = cls(ligand_code)
l.parse_cif(ligand_info)
l.pdb_id = pdb_id or l.pdb_id
has_pdb_id = l.pdb_id and (len(l.pdb_id) == 4) and (l.pdb_id != '?') # the last case is unnecessary and will be short-cut but I included it to show possible values
# Parse PDB XML
if has_pdb_id:
if cached_dir:
pdb_ligand_info_path = os.path.join(cached_dir, '{0}.pdb.ligandinfo'.format(l.pdb_id.lower()))
if os.path.exists(pdb_ligand_info_path):
pdb_ligand_info = read_file(pdb_ligand_info_path)
else:
pdb_ligand_info = retrieve_pdb_ligand_info(l.pdb_id)
write_file(pdb_ligand_info_path, pdb_ligand_info)
else:
pdb_ligand_info = retrieve_pdb_ligand_info(l.pdb_id)
if pdb_ligand_info:
l.parse_pdb_ligand_info(pdb_ligand_info)
# Retrive the diagram image
l.get_diagram()
return l
def parse_cif(self, cif):
'''See http://www.iucr.org/__data/iucr/cif/standard/cifstd4.html, Acta Cryst. (1991). A47, 655-685.'''
found_cif_header = False
found_cif_descriptors = False
found_cif_identifiers = False
blocks = []
blocklines = []
for l in cif.split('\n'):
if l.strip() == '#':
if blocklines:
blocks.append('\n'.join(blocklines))
blocklines = []
else:
blocklines.append(l)
if blocklines:
blocks.append('\n'.join(blocklines))
for block in blocks:
if block.find('_chem_comp.id') != -1:
assert(not found_cif_header)
self.parse_cif_header(block)
found_cif_header = True
elif block.find('_chem_comp_atom.model_Cartn_x') != -1:
continue
elif block.find('_chem_comp_atom.pdbx_stereo_config') != -1:
continue
elif block.find('_pdbx_chem_comp_descriptor.comp_id') != -1:
assert(not found_cif_descriptors)
self.parse_cif_descriptor(block)
found_cif_descriptors = True
elif block.find('_pdbx_chem_comp_identifier.comp_id') != -1:
assert(not found_cif_identifiers)
self.parse_cif_identifier(block)
found_cif_identifiers = True
elif block.find('_pdbx_chem_comp_audit.comp_id') != -1:
continue
else:
continue
def parse_cif_header(self, b):
b = b.strip()
assert(b[0] == '_')
lines = b.split('\n_')
header = {}
for l in lines:
l = l.strip().replace('\n', '').replace('"', '')
if l[0] != '_':
l = '_' + l
assert(l.startswith('_chem'))
idx = l.find(' ')
k = l[:idx]
v = l[idx:].strip()
header[k] = v
assert(self.PDBCode.upper() == header['_chem_comp.id'])
for k in list(header.keys()):
assert(k and k[0] == '_')
assert(header[k].strip())
assert(self.PDBCode.upper() == header['_chem_comp.id'])
self.LigandCode = header['_chem_comp.name'].replace('"', '')
self.Formula = header['_chem_comp.formula'].replace('"', '')
self.MolecularWeight = header['_chem_comp.formula_weight']
self.LigandType = header['_chem_comp.type'].replace('"', '')
self.pdb_id = header['_chem_comp.pdbx_model_coordinates_db_code']
# Does this molecule have many atoms?
if '_chem_comp.formula' in header:
normalized_formula = header['_chem_comp.formula'].replace('?', '').strip()
if (len(normalized_formula.split()) > 1) or (len([c for c in normalized_formula if c.isdigit()]) > 1):
assert(self.has_many_atoms == True or self.has_many_atoms == None)
self.has_many_atoms = True
if not header['_chem_comp.id'] == header['_chem_comp.three_letter_code']:
raise Exception('Handle this case.')
if header.get('_chem_comp.pdbx_synonyms') != '?':
self.synonyms = [s for s in [s.strip() for s in header['_chem_comp.pdbx_synonyms'].replace('"', '').split(';')] if s.strip()]
@staticmethod
def parse_loop_section(b, expected_headers = None, header_map = {}):
columns = []
descriptors = []
lines = [l.strip() for l in b.split('\n') if l.strip()]
assert(lines[0] == 'loop_')
ligand_id = None
for l in lines[1:]:
if l[0] == '_':
assert(len(l.split()) == 1)
columns.append(l)
elif l[0] == ';':
# "A data item is assumed to be of data type text if it extends over more than one line, i.e. it starts and ends with a semicolon as the first character of a line."
assert(len(descriptors) > 0)
if descriptors[-1][-1] != ' ':
descriptors[-1] += ' '
descriptors[-1] += '"' + l[1:]
elif ligand_id and not(l.startswith(ligand_id)):
# This does not seem to be a valid case but it does occur e.g. the .cif entry for 0Z6 has a newline in the InChI record
assert(len(descriptors) > 0)
if descriptors[-1][-1] != ' ':
descriptors[-1] += ' '
descriptors[-1] += '' + l
else:
if ligand_id == None:
ligand_id = l.split()[0]
assert(l.startswith(ligand_id))
descriptors.append(l)
if expected_headers:
assert(columns == expected_headers)
num_columns = len(columns)
data = []
for d in descriptors:
tokens = []
current_token = ''
instr = False
for c in d:
if c == ' ':
if instr:
current_token += c
elif current_token:
tokens.append(current_token.strip())
current_token = ''
elif (c == '"'):
if instr:
tokens.append(current_token.strip())
current_token = ''
instr = False
else:
instr = True
else:
current_token += c
if current_token:
tokens.append(current_token.strip())
assert(len(columns) == len(tokens))
dct = {}
for x in range(len(columns)):
dct[header_map.get(columns[x], columns[x])] = tokens[x]
data.append(dct)
return data
def parse_cif_descriptor(self, b):
descriptors = Ligand.parse_loop_section(b,
expected_headers = ['_pdbx_chem_comp_descriptor.comp_id', '_pdbx_chem_comp_descriptor.type', '_pdbx_chem_comp_descriptor.program', '_pdbx_chem_comp_descriptor.program_version', '_pdbx_chem_comp_descriptor.descriptor'],
header_map = {
'_pdbx_chem_comp_descriptor.comp_id' : 'PDBCode',
'_pdbx_chem_comp_descriptor.type' : 'DescriptorType',
'_pdbx_chem_comp_descriptor.program' : 'Program',
'_pdbx_chem_comp_descriptor.program_version' : 'Version',
'_pdbx_chem_comp_descriptor.descriptor' : 'Descriptor',
})
inchi_record = [d for d in descriptors if d['DescriptorType'] == 'InChI']
if self.PDBCode != 'UNX':
assert(len(inchi_record) == 1)
inchi_key_record = [d for d in descriptors if d['DescriptorType'] == 'InChIKey']
assert(len(inchi_key_record) == 1)
self.InChI = inchi_record[0]['Descriptor']
self.InChIKey = inchi_key_record[0]['Descriptor']
for d in descriptors:
assert(d['PDBCode'] == self.PDBCode)
self.descriptors = descriptors
def parse_cif_identifier(self, b):
identifiers = Ligand.parse_loop_section(b,
expected_headers = ['_pdbx_chem_comp_identifier.comp_id', '_pdbx_chem_comp_identifier.type', '_pdbx_chem_comp_identifier.program', '_pdbx_chem_comp_identifier.program_version', '_pdbx_chem_comp_identifier.identifier'],
header_map = {
'_pdbx_chem_comp_identifier.comp_id' : 'PDBCode',
'_pdbx_chem_comp_identifier.type' : 'IDType',
'_pdbx_chem_comp_identifier.program' : 'Program',
'_pdbx_chem_comp_identifier.program_version' : 'Version',
'_pdbx_chem_comp_identifier.identifier' : 'Identifier',
})
self.identifiers = identifiers
for i in identifiers:
assert(i['PDBCode'] == self.PDBCode)
def parse_pdb_ligand_info(self, pdb_ligand_info):
'''This only parses the ligand type as all the other information should be in the .cif file. The XML file has
proper capitalization whereas the .cif file uses all caps for the ligand type.'''
mtchs = re.findall('(<ligand.*?</ligand>)', pdb_ligand_info, re.DOTALL)
for m in mtchs:
if m.upper().find('CHEMICALID="{0}"'.format(self.PDBCode.upper())) != -1:
ligand_type = re.match('<ligand.*?\stype="(.*?)".*?>', m, re.DOTALL)
if ligand_type:
self.LigandType = ligand_type.group(1)
def get_diagram(self):
'''In-memory usage:
1. write_file(out_file, self.Diagram, ftype = 'wb')
2. from io import BytesIO
from PIL import Image
file = BytesIO(self.Diagram)
img = Image.open(file)
'''
self.Diagram = retrieve_ligand_diagram(self.PDBCode)
|
class Ligand(object):
'''A class used to store ligand information from the RCSB. The most useful way to use this class is to call
Ligand.retrieve_data_from_rcsb to lookup the RCSB for the ligand data.'''
def __init__(self, ligand_code):
pass
def __repr__(self):
pass
@classmethod
def retrieve_data_from_rcsb(cls, ligand_code, pdb_id = None, silent = True, cached_dir = None):
'''Retrieve a file from the RCSB.'''
pass
def parse_cif(self, cif):
'''See http://www.iucr.org/__data/iucr/cif/standard/cifstd4.html, Acta Cryst. (1991). A47, 655-685.'''
pass
def parse_cif_header(self, b):
pass
@staticmethod
def parse_loop_section(b, expected_headers = None, header_map = {}):
pass
def parse_cif_descriptor(self, b):
pass
def parse_cif_identifier(self, b):
pass
def parse_pdb_ligand_info(self, pdb_ligand_info):
'''This only parses the ligand type as all the other information should be in the .cif file. The XML file has
proper capitalization whereas the .cif file uses all caps for the ligand type.'''
pass
def get_diagram(self):
'''In-memory usage:
1. write_file(out_file, self.Diagram, ftype = 'wb')
2. from io import BytesIO
from PIL import Image
file = BytesIO(self.Diagram)
img = Image.open(file)
'''
pass
| 13 | 5 | 29 | 2 | 25 | 2 | 7 | 0.1 | 1 | 3 | 0 | 1 | 8 | 19 | 10 | 10 | 316 | 40 | 253 | 77 | 239 | 26 | 218 | 75 | 206 | 18 | 1 | 4 | 70 |
143,636 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/basics.py
|
klab.bio.basics.ChainMutation
|
class ChainMutation(Mutation):
'''Refines Mutation by adding the chain as a parameter of the equality function.'''
def __eq__(self, other):
if other == None:
return False
if self.WildTypeAA != other.WildTypeAA:
return False
if self.ResidueID != other.ResidueID:
return False
if self.MutantAA != other.MutantAA:
return False
if self.Chain != other.Chain:
return False
return True
def is_the_same_position(self, other):
if self.Chain != other.Chain:
return False
if self.ResidueID != other.ResidueID:
return False
if self.WildTypeAA != other.WildTypeAA:
return Exception('The two residues have the same chain and residue ID but different wildtypes so they are incomparable.')
return True
|
class ChainMutation(Mutation):
'''Refines Mutation by adding the chain as a parameter of the equality function.'''
def __eq__(self, other):
pass
def is_the_same_position(self, other):
pass
| 3 | 1 | 10 | 0 | 10 | 0 | 5 | 0.05 | 1 | 1 | 0 | 1 | 2 | 0 | 2 | 8 | 25 | 3 | 21 | 3 | 18 | 1 | 21 | 3 | 18 | 6 | 3 | 1 | 10 |
143,637 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/alignment.py
|
klab.bio.alignment.SingleSequencePrinter
|
class SingleSequencePrinter(object):
'''A class for generating formatted strings for a single sequence in the same fashion as the MultipleSequenceAlignmentPrinter.'''
def __init__(self, sequence_name, sequence, sequence_tooltips = None):
# Make sure that if the sequence has tooltips then there is an injection between the residues and the tooltips (a tooltip
# may be None rather than a string)
if sequence_tooltips:
assert(len(str(sequence).replace('-', '')) == len(sequence_tooltips))
# Make sure that the sequence lengths are all the same size
self.sequence_length = len(str(sequence))
self.label_width = len(sequence_name)
self.sequence_name = sequence_name
self.sequence = sequence
print((type(self.sequence)))
self.sequence_tooltips = sequence_tooltips
def to_lines(self, width = 80, reversed = False, line_separator = '\n'): raise Exception('I have not written this function yet.')
def to_html(self, width = 80, header_separator = '_', add_tooltips = True, extra_tooltip_class = ''):
html = []
html.append('<div class="chain_alignment">')
sequence, sequence_name, sequence_tooltips = self.sequence, self.sequence_name, self.sequence_tooltips
# Turn off tooltips if requested
if not(add_tooltips):
sequence_tooltips = None
if self.label_width + 2 < width:
# headers is a list of pairs split by header_separator. If header_separator is not specified then the
# second element will be an empty string
if header_separator:
headers = sequence_name.split(header_separator)
else:
headers = [sequence_name, '']
print(headers)
num_residues_per_line = width - self.label_width
sequence_str = str(sequence)
# x iterates over a chunk of the sequence alignment
for x in range(0, self.sequence_length, num_residues_per_line):
html.append('<div class="sequence_block">')
# Create a list, subsequence_list, where each entry corresponds to the chunk of the sequence alignment for each sequenec
residue_substring = []
subsequence = sequence_str[x:x+num_residues_per_line]
# Iterate over all residues in the subsequences, marking up residues that differ
for z in range(len(subsequence)):
residue_type = subsequence[z]
if sequence_tooltips:
residue_substring.append('<span class="%s" title="%s %s">%s</span>' % (extra_tooltip_class, residue_type_1to3_map[residue_type], tooltip.strip(), residue_type))
elif sequence_tooltips:
residue_substring.append('<span class="%s missing_ATOMs" title="No ATOM records">%s</span>' % (extra_tooltip_class, residue_type))
else:
residue_substring.append(residue_type)
html.append('<div class="sequence_alignment_line sequence_alignment_line_%s"><span>%s</span><span>%s</span><span>%s</span></div>' % (headers[0], headers[0], headers[1], ''.join(residue_substring)))
html.append('</div>') # sequence_block
else:
raise Exception('The width (%d characters) is not large enough to display the sequence alignment.' % width)
html.append('</div>')
return '\n'.join(html).replace(' class=""', '')
|
class SingleSequencePrinter(object):
'''A class for generating formatted strings for a single sequence in the same fashion as the MultipleSequenceAlignmentPrinter.'''
def __init__(self, sequence_name, sequence, sequence_tooltips = None):
pass
def to_lines(self, width = 80, reversed = False, line_separator = '\n'):
pass
def to_html(self, width = 80, header_separator = '_', add_tooltips = True, extra_tooltip_class = ''):
pass
| 4 | 1 | 20 | 3 | 14 | 3 | 4 | 0.26 | 1 | 4 | 0 | 0 | 3 | 5 | 3 | 3 | 66 | 13 | 43 | 19 | 40 | 11 | 40 | 19 | 36 | 8 | 1 | 4 | 11 |
143,638 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/deprecated/rosettahelper.py
|
klab.deprecated.rosettahelper.RosettaError
|
class RosettaError(Exception):
def __init__(self, task, ID):
self.task = task
self.ID = ID
def __str__(self):
return repr(self.ID + ' ' + self.task)
|
class RosettaError(Exception):
def __init__(self, task, ID):
pass
def __str__(self):
pass
| 3 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 2 | 2 | 2 | 12 | 6 | 0 | 6 | 5 | 3 | 0 | 6 | 5 | 3 | 1 | 3 | 0 | 2 |
143,639 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/MultiWorker.py
|
klab.MultiWorker.MultiWorker
|
class MultiWorker:
def __init__(self, func, n_cpu = None, reporter=None, task='multiprocessing', entries='jobs', cb_func=None):
if not reporter:
self.reporter = Reporter(task, entries=entries)
else:
self.reporter = reporter
self.func = func
if n_cpu:
self.pool = mp.Pool(n_cpu)
else:
self.pool = mp.Pool()
self.cb_func = cb_func
if self.cb_func:
self.queue = mp.Queue()
self.reader_p = mp.Process(target=self.reader_helper)
self.reader_p.daemon = True
self.reader_p.start()
else:
self.data = []
def reader_helper(self):
while True:
print('size', self.queue.qsize())
while not self.queue.empty():
msg = self.queue.get()
if (msg == '_QUEUEDONE'):
break
else:
args = msg[0]
kwargs = msg[1]
self.cb_func(*args, **kwargs)
time.sleep(2)
def list_cb(self, results):
self.reporter.increment_report()
self.data.append(results)
def queue_cb(self, results):
print('put', results)
self.queue.put(results)
self.reporter.increment_report()
def addJob(self, argsTuple):
if self.cb_func:
self.pool.apply_async(self.func, argsTuple, callback=self.queue_cb)
else:
self.pool.apply_async(self.func, argsTuple, callback=self.list_cb)
def finishJobs(self):
self.pool.close()
self.pool.join()
if self.cb_func:
self.queue.put('_QUEUEDONE')
print('Pool finished, waiting to process results')
self.reader_p.join()
self.reporter.done()
|
class MultiWorker:
def __init__(self, func, n_cpu = None, reporter=None, task='multiprocessing', entries='jobs', cb_func=None):
pass
def reader_helper(self):
pass
def list_cb(self, results):
pass
def queue_cb(self, results):
pass
def addJob(self, argsTuple):
pass
def finishJobs(self):
pass
| 7 | 0 | 8 | 0 | 8 | 0 | 2 | 0 | 0 | 1 | 1 | 0 | 6 | 7 | 6 | 6 | 51 | 0 | 51 | 17 | 44 | 0 | 46 | 17 | 39 | 4 | 0 | 3 | 14 |
143,640 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/Reporter.py
|
klab.Reporter.Reporter
|
class Reporter:
def __init__( self, task, entries = 'files', print_output = True, eol_char = '\r' ):
self._lock = threading.Lock()
self.print_output = print_output
self.start = datetime.datetime.now()
self.entries = entries
self.lastreport = self.start
self.task = task
self.report_interval = datetime.timedelta( seconds = 1 ) # Interval to print progress
self.n = 0
self.completion_time = None
if self.print_output:
print('\nStarting ' + task)
self.total_count = None # Total tasks to be processed
self.maximum_output_string_length = 0
self.rolling_est_total_time = collections.deque( maxlen = 50 )
self.kv_callback_results = {}
self.list_results = []
self.eol_char = eol_char
def set_total_count(self, x):
self.total_count = x
self.rolling_est_total_time = collections.deque( maxlen = max(1, int( .05 * x )) )
def decrement_total_count(self):
if self.total_count:
self.total_count -= 1
def report(self, n):
with self._lock:
self.n = n
time_now = datetime.datetime.now()
if self.print_output and self.lastreport < (time_now - self.report_interval):
self.lastreport = time_now
if self.total_count:
percent_done = float(self.n) / float(self.total_count)
est_total_time_seconds = ts(time_now - self.start) * (1.0 / percent_done)
self.rolling_est_total_time.append( est_total_time_seconds )
est_total_time = datetime.timedelta( seconds = mean(self.rolling_est_total_time) )
time_remaining = est_total_time - (time_now - self.start)
eta = time_now + time_remaining
time_remaining_str = 'ETA: %s Est. time remaining: ' % eta.strftime("%Y-%m-%d %H:%M:%S")
time_remaining_str += str( datetime.timedelta( seconds = int(ts(time_remaining)) ) )
output_string = " Processed: %d %s (%.1f%%) %s" % (n, self.entries, percent_done*100.0, time_remaining_str)
else:
output_string = " Processed: %d %s" % (n, self.entries)
output_string += self.eol_char
if len(output_string) > self.maximum_output_string_length:
self.maximum_output_string_length = len(output_string)
elif len(output_string) < self.maximum_output_string_length:
output_string = output_string.ljust(self.maximum_output_string_length)
sys.stdout.write( output_string )
sys.stdout.flush()
def increment_report(self):
self.report(self.n + 1)
def increment_report_callback(self, cb_value):
self.increment_report()
def increment_report_keyval_callback(self, kv_pair):
key, value = kv_pair
self.kv_callback_results[key] = value
self.increment_report()
def increment_report_list_callback(self, new_list_items):
self.list_results.extend(new_list_items)
self.increment_report()
def decrement_report(self):
self.report(self.n - 1)
def add_to_report(self, x):
self.report(self.n + x)
def done(self):
self.completion_time = datetime.datetime.now()
if self.print_output:
print('Done %s, processed %d %s, took %s\n' % (self.task, self.n, self.entries, self.completion_time-self.start))
def elapsed_time(self):
if self.completion_time:
return self.completion_time - self.start
else:
return time.time() - self.start
|
class Reporter:
def __init__( self, task, entries = 'files', print_output = True, eol_char = '\r' ):
pass
def set_total_count(self, x):
pass
def decrement_total_count(self):
pass
def report(self, n):
pass
def increment_report(self):
pass
def increment_report_callback(self, cb_value):
pass
def increment_report_keyval_callback(self, kv_pair):
pass
def increment_report_list_callback(self, new_list_items):
pass
def decrement_report(self):
pass
def add_to_report(self, x):
pass
def done(self):
pass
def elapsed_time(self):
pass
| 13 | 0 | 6 | 0 | 6 | 0 | 2 | 0.03 | 0 | 5 | 0 | 0 | 12 | 15 | 12 | 12 | 89 | 15 | 74 | 37 | 61 | 2 | 71 | 37 | 58 | 5 | 0 | 3 | 20 |
143,641 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/RosettaProtocols.py
|
klab.RosettaProtocols.References
|
class References:
def __init__(self):
self.refs = {
"DavisEtAl:2006" : 'Davis IW, Arendall III WB, Richardson DC, Richardson JS. <i>The Backrub Motion: How Protein Backbone Shrugs When a Sidechain Dances</i>.<br><a href="http://dx.doi.org/10.1016/j.str.2005.10.007" style="font-size: 10pt">Structure, Volume 14, Issue 2, 2<sup>nd</sup> February 2006, Pages 265-274</a>',
"SmithKortemme:2008" : 'Smith CA, Kortemme T. <i>Backrub-Like Backbone Simulation Recapitulates Natural Protein Conformational Variability and Improves Mutant Side-Chain Prediction</i>.<br><a href="http://dx.doi.org/10.1016/j.jmb.2008.05.023" style="font-size: 10pt"> Journal of Molecular Biology, Volume 380, Issue 4, 18<sup>th</sup> July 2008, Pages 742-756 </a>',
"HumphrisKortemme:2008" : 'Humphris EL, Kortemme T. <i>Prediction of Protein-Protein Interface Sequence Diversity using Flexible Backbone Computational Protein Design</i>.<br><a href="http://dx.doi.org/10.1016/j.str.2008.09.012" style="font-size: 10pt"> Structure, Volume 16, Issue 12, 12<sup>th</sup> December 2008, Pages 1777-1788</a>',
"FriedlandEtAl:2009" : 'Friedland GD, Lakomek NA, Griesinger C, Meiler J, Kortemme T. <i>A Correspondence between Solution-State Dynamics of an Individual Protein and the Sequence and Conformational Diversity of its Family</i>.<br><a href="http://dx.doi.org/10.1371/journal.pcbi.1000393" style="font-size: 10pt"> PLoS Computational Biology, May 2009</a>',
"LauckEtAl:2010" : 'Lauck F, Smith CA, Friedland GD, Humphris EL, Kortemme T. <i>RosettaBackrub - A web server for flexible backbone protein structure modeling and design</i>.<br><a href="http://dx.doi.org/10.1093/nar/gkq369" style="font-size: 10pt">Nucleic Acids Research, Volume 38, Issue suppl. 2, Pages W569-W575</a>',
"SmithKortemme:2010" : 'Smith CA, Kortemme T. <i>Structure-Based Prediction of the Peptide Sequence Space Recognized by Natural and Synthetic PDZ Domains</i>.<br><a href="http://dx.doi.org/10.1016/j.jmb.2010.07.032" style="font-size: 10pt">Journal of Molecular Biology, Volume 402, Issue 2, 17<sup>th</sup> September 2010, Pages 460-474</a>',
"SmithKortemme:2011" : 'Smith CA, Kortemme T. <i>Predicting the Tolerated Sequences for Proteins and Protein Interfaces Using Rosetta Backrub Flexible Backbone Design</i>.<br><a href="http://dx.doi.org/10.1371/journal.pone.0020451" style="font-size: 10pt">PLoS ONE 6(7): e20451. doi:10.1371/journal.pone.0020451</a>',
}
# todo: This was hacked in. Really the refs table above should be separated out to look as below using tuples
self.refsDOIs = {
"DavisEtAl:2006" : ('Davis et al.', 'http://dx.doi.org/10.1016/j.str.2005.10.007'),
"SmithKortemme:2008" : ('Smith and Kortemme, 2008', 'http://dx.doi.org/10.1016/j.jmb.2008.05.023'),
"HumphrisKortemme:2008" : ('Humphris and Kortemme, 2008', 'http://dx.doi.org/10.1016/j.str.2008.09.012'),
"FriedlandEtAl:2009" : ('Friedland et al., 2008', 'http://dx.doi.org/10.1371/journal.pcbi.1000393'),
"LauckEtAl:2010" : ('Lauck et al., 2010', 'http://dx.doi.org/10.1093/nar/gkq369'),
"SmithKortemme:2010" : ('Smith and Kortemme, 2010', 'http://dx.doi.org/10.1016/j.jmb.2010.07.032'),
"SmithKortemme:2011" : ('Smith and Kortemme, 2011', 'http://dx.doi.org/10.1371/journal.pone.0020451'),
}
def __getitem__(self, index):
return self.refs[index]
def iteritems(self):
return iter(self.refs.items())
def getReferences(self):
i = 0
refIDs = {}
refs = sorted(self.refs.items())
for reftag, reference in refs:
i += 1
refIDs[reftag] = i
return refIDs
# Add the default version of Rosetta to be used as the first element in the tuple of binaries
protocolGroups = []
refIDs = self.getReferences()
|
class References:
def __init__(self):
pass
def __getitem__(self, index):
pass
def iteritems(self):
pass
def getReferences(self):
pass
| 5 | 0 | 9 | 1 | 8 | 1 | 1 | 0.06 | 0 | 0 | 0 | 0 | 4 | 2 | 4 | 4 | 42 | 6 | 34 | 12 | 29 | 2 | 18 | 12 | 13 | 2 | 0 | 1 | 5 |
143,642 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/RosettaProtocols.py
|
klab.RosettaProtocols.RosettaProtocol
|
class RosettaProtocol:
def __init__(self, name, dbname):
self.name = name
self.dbname = dbname
self.group = None
self.datadirfunction = None
self.submitfunction = None
self.binaries = None
self.references = None
self.nos = None
self.StoreFunction = None
self.resultsfunction = None
self.startFunction = None
self.checkFunction = None
self.endFunction = None
self.description = None
self.progressDisplayHeight = None
# Setters and getters
def setBackendFunctions(self, startFunction, checkFunction, endFunction):
self.startFunction = startFunction
self.checkFunction = checkFunction
self.endFunction = endFunction
def getDescription(self):
return self.description
def setDescription(self, desc):
self.description = desc
def setStoreFunction(self, storefunction):
self.StoreFunction = storefunction
#
def setSubmitFunction(self, submitfunction):
self.submitfunction = submitfunction
def getSubmitfunction(self):
return self.submitfunction
#
def setShowResultsFunction(self, resultsfunction):
self.resultsfunction = resultsfunction
def getShowResultsFunction(self):
return self.resultsfunction
#
def setBinaries(self, *binaries):
self.binaries = binaries
#
def setReferences(self, *references):
self.references = references
#
def setNumStructures(self, minimum, recommended, maximum):
self.nos = (minimum, recommended, maximum)
def getNumStructures(self):
return self.nos
#
def setGroup(self, group):
self.group = group
#
def setDataDirFunction(self, datadirfunction):
self.datadirfunction = datadirfunction
def getDataDirFunction(self):
return self.datadirfunction
#
def getName(self):
return self.name
#
def getReferences(self):
return self.references
# Utility functions
def canUseMini(self):
miniAvailable = False
for binary in self.binaries:
miniAvailable = miniAvailable or RosettaBinaries[binary]["mini"]
return miniAvailable
|
class RosettaProtocol:
def __init__(self, name, dbname):
pass
def setBackendFunctions(self, startFunction, checkFunction, endFunction):
pass
def getDescription(self):
pass
def setDescription(self, desc):
pass
def setStoreFunction(self, storefunction):
pass
def setSubmitFunction(self, submitfunction):
pass
def getSubmitfunction(self):
pass
def setShowResultsFunction(self, resultsfunction):
pass
def getShowResultsFunction(self):
pass
def setBinaries(self, *binaries):
pass
def setReferences(self, *references):
pass
def setNumStructures(self, minimum, recommended, maximum):
pass
def getNumStructures(self):
pass
def setGroup(self, group):
pass
def setDataDirFunction(self, datadirfunction):
pass
def getDataDirFunction(self):
pass
def getName(self):
pass
def getReferences(self):
pass
def canUseMini(self):
pass
| 20 | 0 | 3 | 0 | 3 | 0 | 1 | 0.19 | 0 | 0 | 0 | 0 | 19 | 15 | 19 | 19 | 77 | 8 | 58 | 37 | 38 | 11 | 58 | 37 | 38 | 2 | 0 | 1 | 20 |
143,643 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/alignment.py
|
klab.bio.alignment.BasePDBChainMapper
|
class BasePDBChainMapper(object):
def get_sequence_alignment_strings(self, reversed = True, width = 80, line_separator = '\n'):
raise Exception('Implement this function.')
def get_sequence_alignment_strings_as_html(self, reversed = True, width = 80, line_separator = '\n', extra_tooltip_class = ''):
raise Exception('Implement this function.')
|
class BasePDBChainMapper(object):
def get_sequence_alignment_strings(self, reversed = True, width = 80, line_separator = '\n'):
pass
def get_sequence_alignment_strings_as_html(self, reversed = True, width = 80, line_separator = '\n', extra_tooltip_class = ''):
pass
| 3 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 1 | 0 | 2 | 2 | 0 | 2 | 2 | 5 | 0 | 5 | 3 | 2 | 0 | 5 | 3 | 2 | 1 | 1 | 0 | 2 |
143,644 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/alignment.py
|
klab.bio.alignment.ChainMatchingException
|
class ChainMatchingException(Exception): pass
|
class ChainMatchingException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,645 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/alignment.py
|
klab.bio.alignment.DecoyChainMapper
|
class DecoyChainMapper(PipelinePDBChainMapper):
'''A convenience class for the special case where we are mapping specifically from structures which are based on the
protein but may include a small number mutations.
Use cases:
- creating PyMOL sessions for backrub ensembles;
- creating PyMOL sessions for wildtype and mutant ensembles;
- creating PyMOL sessions for mutants/designs from the same scaffold.
'''
def __init__(self, cut_off = 60.0, use_seqres_sequences_if_possible = True, strict = True):
self.cut_off = cut_off
self.use_seqres_sequences_if_possible = use_seqres_sequences_if_possible
self.strict = strict
self.structures = []
self.fixed = False
self.structure_names = set()
def finalize_colors(self):
pass
def add(self, pdb_object, structure_name, chain_seed_color = None, backbone_color = None, sidechain_color = None, backbone_display = 'cartoon', sidechain_display = 'sticks'):
if structure_name in self.structure_names:
raise Exception('Structure names must be unique. The name {0} has already been used.'.format(structure_name))
self.structure_names.add(structure_name)
self.structures.append(PyMOLStructure(pdb_object, structure_name, chain_seed_color = chain_seed_color, backbone_color = backbone_color, sidechain_color = sidechain_color,
backbone_display = backbone_display, sidechain_display = sidechain_display, residues_of_interest = []))
def fix(self):
if self.fixed:
raise Exception('This object has already been aligned. It cannot be aligned again.')
super(DecoyChainMapper, self).__init__(
[s.pdb_object for s in self.structures],
[s.structure_name for s in self.structures],
cut_off = self.cut_off, use_seqres_sequences_if_possible = self.use_seqres_sequences_if_possible, strict = self.strict)
self.fixed = True
def generate_pymol_session(self, pymol_executable = 'pymol', settings = {}):
''' Generates the PyMOL session for the scaffold, model, and design structures.
Returns this session and the script which generated it.'''
if not self.fixed:
self.fix()
b = BatchBuilder(pymol_executable = pymol_executable)
for s in self.structures:
s.add_residues_of_interest(self.get_differing_atom_residue_ids(s.structure_name))
PSE_files = b.run(MultiStructureBuilder, [self.structures], settings = settings)
return PSE_files[0], b.PSE_scripts[0]
|
class DecoyChainMapper(PipelinePDBChainMapper):
'''A convenience class for the special case where we are mapping specifically from structures which are based on the
protein but may include a small number mutations.
Use cases:
- creating PyMOL sessions for backrub ensembles;
- creating PyMOL sessions for wildtype and mutant ensembles;
- creating PyMOL sessions for mutants/designs from the same scaffold.
'''
def __init__(self, cut_off = 60.0, use_seqres_sequences_if_possible = True, strict = True):
pass
def finalize_colors(self):
pass
def add(self, pdb_object, structure_name, chain_seed_color = None, backbone_color = None, sidechain_color = None, backbone_display = 'cartoon', sidechain_display = 'sticks'):
pass
def fix(self):
pass
def generate_pymol_session(self, pymol_executable = 'pymol', settings = {}):
''' Generates the PyMOL session for the scaffold, model, and design structures.
Returns this session and the script which generated it.'''
pass
| 6 | 2 | 8 | 1 | 6 | 0 | 2 | 0.28 | 1 | 6 | 3 | 0 | 5 | 6 | 5 | 17 | 57 | 16 | 32 | 15 | 26 | 9 | 28 | 15 | 22 | 3 | 3 | 1 | 9 |
143,646 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/pdb.py
|
klab.bio.pdb.MissingRecordsException
|
class MissingRecordsException(Exception): pass
|
class MissingRecordsException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,647 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/pdb.py
|
klab.bio.pdb.JRNL
|
class JRNL(object):
def __init__(self, lines):
if not lines:
raise Exception("Could not parse JRNL: No lines to parse.")
self.d = {}
self.d["lines"] = lines
self.parse_REF()
self.parse_REFN()
self.parse_DOI()
def get_info(self):
return self.d
def parse_REF(self):
lines = [line for line in self.d["lines"] if line.startswith("JRNL REF ")]
mainline = lines[0]
if not mainline[19:34].startswith("TO BE PUBLISHED"):
numContinuationLines = mainline[16:18].strip()
if numContinuationLines:
numContinuationLines = int(numContinuationLines)
if not numContinuationLines + 1 == len(lines):
raise Exception("There are %d REF lines but the continuation field (%d) suggests there should be %d." % (len(lines), numContinuationLines, numContinuationLines + 1))
else:
numContinuationLines = 0
pubName = [mainline[19:47].rstrip()]
volumeV = mainline[49:51].strip()
volume = mainline[51:55].strip()
if volumeV:
assert(volume)
page = mainline[56:61].strip()
year = mainline[62:66]
# Count the number of periods, discounting certain abbreviations
plines = []
for line in lines:
pline = line.replace("SUPPL.", "")
pline = pline.replace("V.", "")
pline = pline.replace("NO.", "")
pline = pline.replace("PT.", "")
plines.append(pline)
numperiods = len([1 for c in string.join(plines,"") if c == "."])
# Reconstruct the publication name
for n in range(1, numContinuationLines + 1):
line = lines[n]
pubNameField = line[19:47]
lastFieldCharacter = pubNameField[-1]
lastLineCharacter = line.strip()[-1]
txt = pubNameField.rstrip()
pubName.append(txt)
if lastFieldCharacter == "-" or lastFieldCharacter == "." or lastLineCharacter == "-" or (lastLineCharacter == "." and numperiods == 1):
pubName.append(" ")
pubName = string.join(pubName, "")
self.d["REF"] = {
"pubName" : pubName,
"volume" : volume or None,
"page" : page or None,
"year" : year or None,
}
self.d["published"] = True
else:
self.d["REF"] = {}
self.d["published"] = False
def parse_REFN(self):
PRELUDE = "JRNL REFN"
if not self.d.get("published"):
self.parse_REF()
isPublished = self.d["published"]
lines = [line for line in self.d["lines"] if line.startswith(PRELUDE)]
if not len(lines) == 1:
raise Exception("Exactly one JRNL REF line expected in the PDB title.")
line = lines[0]
if isPublished:
type = line[35:39]
ID = line[40:65].strip()
if type != "ISSN" and type != "ESSN":
if type.strip():
raise Exception("Invalid type for REFN field (%s)" % type)
if not type.strip():
pass # e.g. 1BXI has a null reference
else:
self.d["REFN"] = {"type" : type, "ID" : ID}
else:
assert(line.strip() == PRELUDE)
def parse_DOI(self):
lines = [line for line in self.d["lines"] if line.startswith("JRNL DOI ")]
if lines:
line = string.join([line[19:79].strip() for line in lines], "")
if line.lower().startswith("doi:"):
self.d["DOI"] = ["%s" % line[4:]]
else:
self.d["DOI"] = line
else:
self.d["DOI"] = None
|
class JRNL(object):
def __init__(self, lines):
pass
def get_info(self):
pass
def parse_REF(self):
pass
def parse_REFN(self):
pass
def parse_DOI(self):
pass
| 6 | 0 | 19 | 1 | 17 | 1 | 4 | 0.03 | 1 | 3 | 0 | 0 | 5 | 1 | 5 | 5 | 103 | 13 | 88 | 29 | 82 | 3 | 77 | 29 | 71 | 8 | 1 | 3 | 21 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.