id
int64 0
843k
| repository_name
stringlengths 7
55
| file_path
stringlengths 9
332
| class_name
stringlengths 3
290
| human_written_code
stringlengths 12
4.36M
| class_skeleton
stringlengths 19
2.2M
| total_program_units
int64 1
9.57k
| total_doc_str
int64 0
4.2k
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
300
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
176
| CountClassBase
float64 0
48
| CountClassCoupled
float64 0
589
| CountClassCoupledModified
float64 0
581
| CountClassDerived
float64 0
5.37k
| CountDeclInstanceMethod
float64 0
4.2k
| CountDeclInstanceVariable
float64 0
299
| CountDeclMethod
float64 0
4.2k
| CountDeclMethodAll
float64 0
4.2k
| CountLine
float64 1
115k
| CountLineBlank
float64 0
9.01k
| CountLineCode
float64 0
94.4k
| CountLineCodeDecl
float64 0
46.1k
| CountLineCodeExe
float64 0
91.3k
| CountLineComment
float64 0
27k
| CountStmt
float64 1
93.2k
| CountStmtDecl
float64 0
46.1k
| CountStmtExe
float64 0
90.2k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
6k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
143,448 |
Komnomnomnom/swigibpy
|
Komnomnomnom_swigibpy/swigibpy.py
|
swigibpy.EClientSocketBase
|
class EClientSocketBase(EClient):
"""Proxy of C++ EClientSocketBase class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _swigibpy.delete_EClientSocketBase
def eConnect(self, host, port, clientId=0, extraAuth=False):
"""eConnect(EClientSocketBase self, char const * host, unsigned int port, int clientId=0, bool extraAuth=False) -> bool"""
return _swigibpy.EClientSocketBase_eConnect(self, host, port, clientId, extraAuth)
def eDisconnect(self):
"""eDisconnect(EClientSocketBase self)"""
return _swigibpy.EClientSocketBase_eDisconnect(self)
def clientId(self):
"""clientId(EClientSocketBase self) -> int"""
return _swigibpy.EClientSocketBase_clientId(self)
def isConnected(self):
"""isConnected(EClientSocketBase self) -> bool"""
return _swigibpy.EClientSocketBase_isConnected(self)
def isInBufferEmpty(self):
"""isInBufferEmpty(EClientSocketBase self) -> bool"""
return _swigibpy.EClientSocketBase_isInBufferEmpty(self)
def isOutBufferEmpty(self):
"""isOutBufferEmpty(EClientSocketBase self) -> bool"""
return _swigibpy.EClientSocketBase_isOutBufferEmpty(self)
def serverVersion(self):
"""serverVersion(EClientSocketBase self) -> int"""
return _swigibpy.EClientSocketBase_serverVersion(self)
def TwsConnectionTime(self):
"""TwsConnectionTime(EClientSocketBase self) -> IBString"""
return _swigibpy.EClientSocketBase_TwsConnectionTime(self)
def reqMktData(self, id, contract, genericTicks, snapshot, mktDataOptions):
"""reqMktData(EClientSocketBase self, TickerId id, Contract contract, IBString const & genericTicks, bool snapshot, TagValueListSPtr const & mktDataOptions)"""
return _swigibpy.EClientSocketBase_reqMktData(self, id, contract, genericTicks, snapshot, mktDataOptions)
def cancelMktData(self, id):
"""cancelMktData(EClientSocketBase self, TickerId id)"""
return _swigibpy.EClientSocketBase_cancelMktData(self, id)
def placeOrder(self, id, contract, order):
"""placeOrder(EClientSocketBase self, OrderId id, Contract contract, Order order)"""
return _swigibpy.EClientSocketBase_placeOrder(self, id, contract, order)
def cancelOrder(self, id):
"""cancelOrder(EClientSocketBase self, OrderId id)"""
return _swigibpy.EClientSocketBase_cancelOrder(self, id)
def reqOpenOrders(self):
"""reqOpenOrders(EClientSocketBase self)"""
return _swigibpy.EClientSocketBase_reqOpenOrders(self)
def reqAccountUpdates(self, subscribe, acctCode):
"""reqAccountUpdates(EClientSocketBase self, bool subscribe, IBString const & acctCode)"""
return _swigibpy.EClientSocketBase_reqAccountUpdates(self, subscribe, acctCode)
def reqExecutions(self, reqId, filter):
"""reqExecutions(EClientSocketBase self, int reqId, ExecutionFilter filter)"""
return _swigibpy.EClientSocketBase_reqExecutions(self, reqId, filter)
def reqIds(self, numIds):
"""reqIds(EClientSocketBase self, int numIds)"""
return _swigibpy.EClientSocketBase_reqIds(self, numIds)
def checkMessages(self):
"""checkMessages(EClientSocketBase self) -> bool"""
return _swigibpy.EClientSocketBase_checkMessages(self)
def reqContractDetails(self, reqId, contract):
"""reqContractDetails(EClientSocketBase self, int reqId, Contract contract)"""
return _swigibpy.EClientSocketBase_reqContractDetails(self, reqId, contract)
def reqMktDepth(self, tickerId, contract, numRows, mktDepthOptions):
"""reqMktDepth(EClientSocketBase self, TickerId tickerId, Contract contract, int numRows, TagValueListSPtr const & mktDepthOptions)"""
return _swigibpy.EClientSocketBase_reqMktDepth(self, tickerId, contract, numRows, mktDepthOptions)
def cancelMktDepth(self, tickerId):
"""cancelMktDepth(EClientSocketBase self, TickerId tickerId)"""
return _swigibpy.EClientSocketBase_cancelMktDepth(self, tickerId)
def reqNewsBulletins(self, allMsgs):
"""reqNewsBulletins(EClientSocketBase self, bool allMsgs)"""
return _swigibpy.EClientSocketBase_reqNewsBulletins(self, allMsgs)
def cancelNewsBulletins(self):
"""cancelNewsBulletins(EClientSocketBase self)"""
return _swigibpy.EClientSocketBase_cancelNewsBulletins(self)
def setServerLogLevel(self, level):
"""setServerLogLevel(EClientSocketBase self, int level)"""
return _swigibpy.EClientSocketBase_setServerLogLevel(self, level)
def reqAutoOpenOrders(self, bAutoBind):
"""reqAutoOpenOrders(EClientSocketBase self, bool bAutoBind)"""
return _swigibpy.EClientSocketBase_reqAutoOpenOrders(self, bAutoBind)
def reqAllOpenOrders(self):
"""reqAllOpenOrders(EClientSocketBase self)"""
return _swigibpy.EClientSocketBase_reqAllOpenOrders(self)
def reqManagedAccts(self):
"""reqManagedAccts(EClientSocketBase self)"""
return _swigibpy.EClientSocketBase_reqManagedAccts(self)
def requestFA(self, pFaDataType):
"""requestFA(EClientSocketBase self, faDataType pFaDataType)"""
return _swigibpy.EClientSocketBase_requestFA(self, pFaDataType)
def replaceFA(self, pFaDataType, cxml):
"""replaceFA(EClientSocketBase self, faDataType pFaDataType, IBString const & cxml)"""
return _swigibpy.EClientSocketBase_replaceFA(self, pFaDataType, cxml)
def reqHistoricalData(self, id, contract, endDateTime, durationStr, barSizeSetting, whatToShow, useRTH, formatDate, chartOptions):
"""reqHistoricalData(EClientSocketBase self, TickerId id, Contract contract, IBString const & endDateTime, IBString const & durationStr, IBString const & barSizeSetting, IBString const & whatToShow, int useRTH, int formatDate, TagValueListSPtr const & chartOptions)"""
return _swigibpy.EClientSocketBase_reqHistoricalData(self, id, contract, endDateTime, durationStr, barSizeSetting, whatToShow, useRTH, formatDate, chartOptions)
def exerciseOptions(self, tickerId, contract, exerciseAction, exerciseQuantity, account, override):
"""exerciseOptions(EClientSocketBase self, TickerId tickerId, Contract contract, int exerciseAction, int exerciseQuantity, IBString const & account, int override)"""
return _swigibpy.EClientSocketBase_exerciseOptions(self, tickerId, contract, exerciseAction, exerciseQuantity, account, override)
def cancelHistoricalData(self, tickerId):
"""cancelHistoricalData(EClientSocketBase self, TickerId tickerId)"""
return _swigibpy.EClientSocketBase_cancelHistoricalData(self, tickerId)
def reqRealTimeBars(self, id, contract, barSize, whatToShow, useRTH, realTimeBarsOptions):
"""reqRealTimeBars(EClientSocketBase self, TickerId id, Contract contract, int barSize, IBString const & whatToShow, bool useRTH, TagValueListSPtr const & realTimeBarsOptions)"""
return _swigibpy.EClientSocketBase_reqRealTimeBars(self, id, contract, barSize, whatToShow, useRTH, realTimeBarsOptions)
def cancelRealTimeBars(self, tickerId):
"""cancelRealTimeBars(EClientSocketBase self, TickerId tickerId)"""
return _swigibpy.EClientSocketBase_cancelRealTimeBars(self, tickerId)
def cancelScannerSubscription(self, tickerId):
"""cancelScannerSubscription(EClientSocketBase self, int tickerId)"""
return _swigibpy.EClientSocketBase_cancelScannerSubscription(self, tickerId)
def reqScannerParameters(self):
"""reqScannerParameters(EClientSocketBase self)"""
return _swigibpy.EClientSocketBase_reqScannerParameters(self)
def reqScannerSubscription(self, tickerId, subscription, scannerSubscriptionOptions):
"""reqScannerSubscription(EClientSocketBase self, int tickerId, ScannerSubscription subscription, TagValueListSPtr const & scannerSubscriptionOptions)"""
return _swigibpy.EClientSocketBase_reqScannerSubscription(self, tickerId, subscription, scannerSubscriptionOptions)
def reqCurrentTime(self):
"""reqCurrentTime(EClientSocketBase self)"""
return _swigibpy.EClientSocketBase_reqCurrentTime(self)
def reqFundamentalData(self, reqId, arg3, reportType):
"""reqFundamentalData(EClientSocketBase self, TickerId reqId, Contract arg3, IBString const & reportType)"""
return _swigibpy.EClientSocketBase_reqFundamentalData(self, reqId, arg3, reportType)
def cancelFundamentalData(self, reqId):
"""cancelFundamentalData(EClientSocketBase self, TickerId reqId)"""
return _swigibpy.EClientSocketBase_cancelFundamentalData(self, reqId)
def calculateImpliedVolatility(self, reqId, contract, optionPrice, underPrice):
"""calculateImpliedVolatility(EClientSocketBase self, TickerId reqId, Contract contract, double optionPrice, double underPrice)"""
return _swigibpy.EClientSocketBase_calculateImpliedVolatility(self, reqId, contract, optionPrice, underPrice)
def calculateOptionPrice(self, reqId, contract, volatility, underPrice):
"""calculateOptionPrice(EClientSocketBase self, TickerId reqId, Contract contract, double volatility, double underPrice)"""
return _swigibpy.EClientSocketBase_calculateOptionPrice(self, reqId, contract, volatility, underPrice)
def cancelCalculateImpliedVolatility(self, reqId):
"""cancelCalculateImpliedVolatility(EClientSocketBase self, TickerId reqId)"""
return _swigibpy.EClientSocketBase_cancelCalculateImpliedVolatility(self, reqId)
def cancelCalculateOptionPrice(self, reqId):
"""cancelCalculateOptionPrice(EClientSocketBase self, TickerId reqId)"""
return _swigibpy.EClientSocketBase_cancelCalculateOptionPrice(self, reqId)
def reqGlobalCancel(self):
"""reqGlobalCancel(EClientSocketBase self)"""
return _swigibpy.EClientSocketBase_reqGlobalCancel(self)
def reqMarketDataType(self, marketDataType):
"""reqMarketDataType(EClientSocketBase self, int marketDataType)"""
return _swigibpy.EClientSocketBase_reqMarketDataType(self, marketDataType)
def reqPositions(self):
"""reqPositions(EClientSocketBase self)"""
return _swigibpy.EClientSocketBase_reqPositions(self)
def cancelPositions(self):
"""cancelPositions(EClientSocketBase self)"""
return _swigibpy.EClientSocketBase_cancelPositions(self)
def reqAccountSummary(self, reqId, groupName, tags):
"""reqAccountSummary(EClientSocketBase self, int reqId, IBString const & groupName, IBString const & tags)"""
return _swigibpy.EClientSocketBase_reqAccountSummary(self, reqId, groupName, tags)
def cancelAccountSummary(self, reqId):
"""cancelAccountSummary(EClientSocketBase self, int reqId)"""
return _swigibpy.EClientSocketBase_cancelAccountSummary(self, reqId)
def verifyRequest(self, apiName, apiVersion):
"""verifyRequest(EClientSocketBase self, IBString const & apiName, IBString const & apiVersion)"""
return _swigibpy.EClientSocketBase_verifyRequest(self, apiName, apiVersion)
def verifyMessage(self, apiData):
"""verifyMessage(EClientSocketBase self, IBString const & apiData)"""
return _swigibpy.EClientSocketBase_verifyMessage(self, apiData)
def queryDisplayGroups(self, reqId):
"""queryDisplayGroups(EClientSocketBase self, int reqId)"""
return _swigibpy.EClientSocketBase_queryDisplayGroups(self, reqId)
def subscribeToGroupEvents(self, reqId, groupId):
"""subscribeToGroupEvents(EClientSocketBase self, int reqId, int groupId)"""
return _swigibpy.EClientSocketBase_subscribeToGroupEvents(self, reqId, groupId)
def updateDisplayGroup(self, reqId, contractInfo):
"""updateDisplayGroup(EClientSocketBase self, int reqId, IBString const & contractInfo)"""
return _swigibpy.EClientSocketBase_updateDisplayGroup(self, reqId, contractInfo)
def unsubscribeFromGroupEvents(self, reqId):
"""unsubscribeFromGroupEvents(EClientSocketBase self, int reqId)"""
return _swigibpy.EClientSocketBase_unsubscribeFromGroupEvents(self, reqId)
|
class EClientSocketBase(EClient):
'''Proxy of C++ EClientSocketBase class'''
def __init__(self, *args, **kwargs):
pass
def eConnect(self, host, port, clientId=0, extraAuth=False):
'''eConnect(EClientSocketBase self, char const * host, unsigned int port, int clientId=0, bool extraAuth=False) -> bool'''
pass
def eDisconnect(self):
'''eDisconnect(EClientSocketBase self)'''
pass
def clientId(self):
'''clientId(EClientSocketBase self) -> int'''
pass
def isConnected(self):
'''isConnected(EClientSocketBase self) -> bool'''
pass
def isInBufferEmpty(self):
'''isInBufferEmpty(EClientSocketBase self) -> bool'''
pass
def isOutBufferEmpty(self):
'''isOutBufferEmpty(EClientSocketBase self) -> bool'''
pass
def serverVersion(self):
'''serverVersion(EClientSocketBase self) -> int'''
pass
def TwsConnectionTime(self):
'''TwsConnectionTime(EClientSocketBase self) -> IBString'''
pass
def reqMktData(self, id, contract, genericTicks, snapshot, mktDataOptions):
'''reqMktData(EClientSocketBase self, TickerId id, Contract contract, IBString const & genericTicks, bool snapshot, TagValueListSPtr const & mktDataOptions)'''
pass
def cancelMktData(self, id):
'''cancelMktData(EClientSocketBase self, TickerId id)'''
pass
def placeOrder(self, id, contract, order):
'''placeOrder(EClientSocketBase self, OrderId id, Contract contract, Order order)'''
pass
def cancelOrder(self, id):
'''cancelOrder(EClientSocketBase self, OrderId id)'''
pass
def reqOpenOrders(self):
'''reqOpenOrders(EClientSocketBase self)'''
pass
def reqAccountUpdates(self, subscribe, acctCode):
'''reqAccountUpdates(EClientSocketBase self, bool subscribe, IBString const & acctCode)'''
pass
def reqExecutions(self, reqId, filter):
'''reqExecutions(EClientSocketBase self, int reqId, ExecutionFilter filter)'''
pass
def reqIds(self, numIds):
'''reqIds(EClientSocketBase self, int numIds)'''
pass
def checkMessages(self):
'''checkMessages(EClientSocketBase self) -> bool'''
pass
def reqContractDetails(self, reqId, contract):
'''reqContractDetails(EClientSocketBase self, int reqId, Contract contract)'''
pass
def reqMktDepth(self, tickerId, contract, numRows, mktDepthOptions):
'''reqMktDepth(EClientSocketBase self, TickerId tickerId, Contract contract, int numRows, TagValueListSPtr const & mktDepthOptions)'''
pass
def cancelMktDepth(self, tickerId):
'''cancelMktDepth(EClientSocketBase self, TickerId tickerId)'''
pass
def reqNewsBulletins(self, allMsgs):
'''reqNewsBulletins(EClientSocketBase self, bool allMsgs)'''
pass
def cancelNewsBulletins(self):
'''cancelNewsBulletins(EClientSocketBase self)'''
pass
def setServerLogLevel(self, level):
'''setServerLogLevel(EClientSocketBase self, int level)'''
pass
def reqAutoOpenOrders(self, bAutoBind):
'''reqAutoOpenOrders(EClientSocketBase self, bool bAutoBind)'''
pass
def reqAllOpenOrders(self):
'''reqAllOpenOrders(EClientSocketBase self)'''
pass
def reqManagedAccts(self):
'''reqManagedAccts(EClientSocketBase self)'''
pass
def requestFA(self, pFaDataType):
'''requestFA(EClientSocketBase self, faDataType pFaDataType)'''
pass
def replaceFA(self, pFaDataType, cxml):
'''replaceFA(EClientSocketBase self, faDataType pFaDataType, IBString const & cxml)'''
pass
def reqHistoricalData(self, id, contract, endDateTime, durationStr, barSizeSetting, whatToShow, useRTH, formatDate, chartOptions):
'''reqHistoricalData(EClientSocketBase self, TickerId id, Contract contract, IBString const & endDateTime, IBString const & durationStr, IBString const & barSizeSetting, IBString const & whatToShow, int useRTH, int formatDate, TagValueListSPtr const & chartOptions)'''
pass
def exerciseOptions(self, tickerId, contract, exerciseAction, exerciseQuantity, account, override):
'''exerciseOptions(EClientSocketBase self, TickerId tickerId, Contract contract, int exerciseAction, int exerciseQuantity, IBString const & account, int override)'''
pass
def cancelHistoricalData(self, tickerId):
'''cancelHistoricalData(EClientSocketBase self, TickerId tickerId)'''
pass
def reqRealTimeBars(self, id, contract, barSize, whatToShow, useRTH, realTimeBarsOptions):
'''reqRealTimeBars(EClientSocketBase self, TickerId id, Contract contract, int barSize, IBString const & whatToShow, bool useRTH, TagValueListSPtr const & realTimeBarsOptions)'''
pass
def cancelRealTimeBars(self, tickerId):
'''cancelRealTimeBars(EClientSocketBase self, TickerId tickerId)'''
pass
def cancelScannerSubscription(self, tickerId):
'''cancelScannerSubscription(EClientSocketBase self, int tickerId)'''
pass
def reqScannerParameters(self):
'''reqScannerParameters(EClientSocketBase self)'''
pass
def reqScannerSubscription(self, tickerId, subscription, scannerSubscriptionOptions):
'''reqScannerSubscription(EClientSocketBase self, int tickerId, ScannerSubscription subscription, TagValueListSPtr const & scannerSubscriptionOptions)'''
pass
def reqCurrentTime(self):
'''reqCurrentTime(EClientSocketBase self)'''
pass
def reqFundamentalData(self, reqId, arg3, reportType):
'''reqFundamentalData(EClientSocketBase self, TickerId reqId, Contract arg3, IBString const & reportType)'''
pass
def cancelFundamentalData(self, reqId):
'''cancelFundamentalData(EClientSocketBase self, TickerId reqId)'''
pass
def calculateImpliedVolatility(self, reqId, contract, optionPrice, underPrice):
'''calculateImpliedVolatility(EClientSocketBase self, TickerId reqId, Contract contract, double optionPrice, double underPrice)'''
pass
def calculateOptionPrice(self, reqId, contract, volatility, underPrice):
'''calculateOptionPrice(EClientSocketBase self, TickerId reqId, Contract contract, double volatility, double underPrice)'''
pass
def cancelCalculateImpliedVolatility(self, reqId):
'''cancelCalculateImpliedVolatility(EClientSocketBase self, TickerId reqId)'''
pass
def cancelCalculateOptionPrice(self, reqId):
'''cancelCalculateOptionPrice(EClientSocketBase self, TickerId reqId)'''
pass
def reqGlobalCancel(self):
'''reqGlobalCancel(EClientSocketBase self)'''
pass
def reqMarketDataType(self, marketDataType):
'''reqMarketDataType(EClientSocketBase self, int marketDataType)'''
pass
def reqPositions(self):
'''reqPositions(EClientSocketBase self)'''
pass
def cancelPositions(self):
'''cancelPositions(EClientSocketBase self)'''
pass
def reqAccountSummary(self, reqId, groupName, tags):
'''reqAccountSummary(EClientSocketBase self, int reqId, IBString const & groupName, IBString const & tags)'''
pass
def cancelAccountSummary(self, reqId):
'''cancelAccountSummary(EClientSocketBase self, int reqId)'''
pass
def verifyRequest(self, apiName, apiVersion):
'''verifyRequest(EClientSocketBase self, IBString const & apiName, IBString const & apiVersion)'''
pass
def verifyMessage(self, apiData):
'''verifyMessage(EClientSocketBase self, IBString const & apiData)'''
pass
def queryDisplayGroups(self, reqId):
'''queryDisplayGroups(EClientSocketBase self, int reqId)'''
pass
def subscribeToGroupEvents(self, reqId, groupId):
'''subscribeToGroupEvents(EClientSocketBase self, int reqId, int groupId)'''
pass
def updateDisplayGroup(self, reqId, contractInfo):
'''updateDisplayGroup(EClientSocketBase self, int reqId, IBString const & contractInfo)'''
pass
def unsubscribeFromGroupEvents(self, reqId):
'''unsubscribeFromGroupEvents(EClientSocketBase self, int reqId)'''
pass
| 57 | 56 | 3 | 0 | 2 | 1 | 1 | 0.48 | 1 | 1 | 0 | 1 | 56 | 0 | 56 | 108 | 282 | 110 | 116 | 60 | 59 | 56 | 116 | 60 | 59 | 1 | 2 | 0 | 56 |
143,449 |
Komnomnomnom/swigibpy
|
Komnomnomnom_swigibpy/swigibpy.py
|
swigibpy.EClient
|
class EClient(object):
"""Proxy of C++ EClient class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _swigibpy.delete_EClient
def eConnect(self, host, port, clientId=0, extraAuth=False):
"""eConnect(EClient self, char const * host, unsigned int port, int clientId=0, bool extraAuth=False) -> bool"""
return _swigibpy.EClient_eConnect(self, host, port, clientId, extraAuth)
def eDisconnect(self):
"""eDisconnect(EClient self)"""
return _swigibpy.EClient_eDisconnect(self)
def serverVersion(self):
"""serverVersion(EClient self) -> int"""
return _swigibpy.EClient_serverVersion(self)
def TwsConnectionTime(self):
"""TwsConnectionTime(EClient self) -> IBString"""
return _swigibpy.EClient_TwsConnectionTime(self)
def reqMktData(self, id, contract, genericTicks, snapshot, mktDataOptions):
"""reqMktData(EClient self, TickerId id, Contract contract, IBString const & genericTicks, bool snapshot, TagValueListSPtr const & mktDataOptions)"""
return _swigibpy.EClient_reqMktData(self, id, contract, genericTicks, snapshot, mktDataOptions)
def cancelMktData(self, id):
"""cancelMktData(EClient self, TickerId id)"""
return _swigibpy.EClient_cancelMktData(self, id)
def placeOrder(self, id, contract, order):
"""placeOrder(EClient self, OrderId id, Contract contract, Order order)"""
return _swigibpy.EClient_placeOrder(self, id, contract, order)
def cancelOrder(self, id):
"""cancelOrder(EClient self, OrderId id)"""
return _swigibpy.EClient_cancelOrder(self, id)
def reqOpenOrders(self):
"""reqOpenOrders(EClient self)"""
return _swigibpy.EClient_reqOpenOrders(self)
def reqAccountUpdates(self, subscribe, acctCode):
"""reqAccountUpdates(EClient self, bool subscribe, IBString const & acctCode)"""
return _swigibpy.EClient_reqAccountUpdates(self, subscribe, acctCode)
def reqExecutions(self, reqId, filter):
"""reqExecutions(EClient self, int reqId, ExecutionFilter filter)"""
return _swigibpy.EClient_reqExecutions(self, reqId, filter)
def reqIds(self, numIds):
"""reqIds(EClient self, int numIds)"""
return _swigibpy.EClient_reqIds(self, numIds)
def checkMessages(self):
"""checkMessages(EClient self) -> bool"""
return _swigibpy.EClient_checkMessages(self)
def reqContractDetails(self, reqId, contract):
"""reqContractDetails(EClient self, int reqId, Contract contract)"""
return _swigibpy.EClient_reqContractDetails(self, reqId, contract)
def reqMktDepth(self, id, contract, numRows, mktDepthOptions):
"""reqMktDepth(EClient self, TickerId id, Contract contract, int numRows, TagValueListSPtr const & mktDepthOptions)"""
return _swigibpy.EClient_reqMktDepth(self, id, contract, numRows, mktDepthOptions)
def cancelMktDepth(self, id):
"""cancelMktDepth(EClient self, TickerId id)"""
return _swigibpy.EClient_cancelMktDepth(self, id)
def reqNewsBulletins(self, allMsgs):
"""reqNewsBulletins(EClient self, bool allMsgs)"""
return _swigibpy.EClient_reqNewsBulletins(self, allMsgs)
def cancelNewsBulletins(self):
"""cancelNewsBulletins(EClient self)"""
return _swigibpy.EClient_cancelNewsBulletins(self)
def setServerLogLevel(self, level):
"""setServerLogLevel(EClient self, int level)"""
return _swigibpy.EClient_setServerLogLevel(self, level)
def reqAutoOpenOrders(self, bAutoBind):
"""reqAutoOpenOrders(EClient self, bool bAutoBind)"""
return _swigibpy.EClient_reqAutoOpenOrders(self, bAutoBind)
def reqAllOpenOrders(self):
"""reqAllOpenOrders(EClient self)"""
return _swigibpy.EClient_reqAllOpenOrders(self)
def reqManagedAccts(self):
"""reqManagedAccts(EClient self)"""
return _swigibpy.EClient_reqManagedAccts(self)
def requestFA(self, pFaDataType):
"""requestFA(EClient self, faDataType pFaDataType)"""
return _swigibpy.EClient_requestFA(self, pFaDataType)
def replaceFA(self, pFaDataType, cxml):
"""replaceFA(EClient self, faDataType pFaDataType, IBString const & cxml)"""
return _swigibpy.EClient_replaceFA(self, pFaDataType, cxml)
def reqHistoricalData(self, id, contract, endDateTime, durationStr, barSizeSetting, whatToShow, useRTH, formatDate, chartOptions):
"""reqHistoricalData(EClient self, TickerId id, Contract contract, IBString const & endDateTime, IBString const & durationStr, IBString const & barSizeSetting, IBString const & whatToShow, int useRTH, int formatDate, TagValueListSPtr const & chartOptions)"""
return _swigibpy.EClient_reqHistoricalData(self, id, contract, endDateTime, durationStr, barSizeSetting, whatToShow, useRTH, formatDate, chartOptions)
def exerciseOptions(self, id, contract, exerciseAction, exerciseQuantity, account, override):
"""exerciseOptions(EClient self, TickerId id, Contract contract, int exerciseAction, int exerciseQuantity, IBString const & account, int override)"""
return _swigibpy.EClient_exerciseOptions(self, id, contract, exerciseAction, exerciseQuantity, account, override)
def cancelHistoricalData(self, tickerId):
"""cancelHistoricalData(EClient self, TickerId tickerId)"""
return _swigibpy.EClient_cancelHistoricalData(self, tickerId)
def reqRealTimeBars(self, id, contract, barSize, whatToShow, useRTH, realTimeBarsOptions):
"""reqRealTimeBars(EClient self, TickerId id, Contract contract, int barSize, IBString const & whatToShow, bool useRTH, TagValueListSPtr const & realTimeBarsOptions)"""
return _swigibpy.EClient_reqRealTimeBars(self, id, contract, barSize, whatToShow, useRTH, realTimeBarsOptions)
def cancelRealTimeBars(self, tickerId):
"""cancelRealTimeBars(EClient self, TickerId tickerId)"""
return _swigibpy.EClient_cancelRealTimeBars(self, tickerId)
def cancelScannerSubscription(self, tickerId):
"""cancelScannerSubscription(EClient self, int tickerId)"""
return _swigibpy.EClient_cancelScannerSubscription(self, tickerId)
def reqScannerParameters(self):
"""reqScannerParameters(EClient self)"""
return _swigibpy.EClient_reqScannerParameters(self)
def reqScannerSubscription(self, tickerId, subscription, scannerSubscriptionOptions):
"""reqScannerSubscription(EClient self, int tickerId, ScannerSubscription subscription, TagValueListSPtr const & scannerSubscriptionOptions)"""
return _swigibpy.EClient_reqScannerSubscription(self, tickerId, subscription, scannerSubscriptionOptions)
def reqCurrentTime(self):
"""reqCurrentTime(EClient self)"""
return _swigibpy.EClient_reqCurrentTime(self)
def reqFundamentalData(self, reqId, arg3, reportType):
"""reqFundamentalData(EClient self, TickerId reqId, Contract arg3, IBString const & reportType)"""
return _swigibpy.EClient_reqFundamentalData(self, reqId, arg3, reportType)
def cancelFundamentalData(self, reqId):
"""cancelFundamentalData(EClient self, TickerId reqId)"""
return _swigibpy.EClient_cancelFundamentalData(self, reqId)
def calculateImpliedVolatility(self, reqId, contract, optionPrice, underPrice):
"""calculateImpliedVolatility(EClient self, TickerId reqId, Contract contract, double optionPrice, double underPrice)"""
return _swigibpy.EClient_calculateImpliedVolatility(self, reqId, contract, optionPrice, underPrice)
def calculateOptionPrice(self, reqId, contract, volatility, underPrice):
"""calculateOptionPrice(EClient self, TickerId reqId, Contract contract, double volatility, double underPrice)"""
return _swigibpy.EClient_calculateOptionPrice(self, reqId, contract, volatility, underPrice)
def cancelCalculateImpliedVolatility(self, reqId):
"""cancelCalculateImpliedVolatility(EClient self, TickerId reqId)"""
return _swigibpy.EClient_cancelCalculateImpliedVolatility(self, reqId)
def cancelCalculateOptionPrice(self, reqId):
"""cancelCalculateOptionPrice(EClient self, TickerId reqId)"""
return _swigibpy.EClient_cancelCalculateOptionPrice(self, reqId)
def reqGlobalCancel(self):
"""reqGlobalCancel(EClient self)"""
return _swigibpy.EClient_reqGlobalCancel(self)
def reqMarketDataType(self, marketDataType):
"""reqMarketDataType(EClient self, int marketDataType)"""
return _swigibpy.EClient_reqMarketDataType(self, marketDataType)
def reqPositions(self):
"""reqPositions(EClient self)"""
return _swigibpy.EClient_reqPositions(self)
def cancelPositions(self):
"""cancelPositions(EClient self)"""
return _swigibpy.EClient_cancelPositions(self)
def reqAccountSummary(self, reqId, groupName, tags):
"""reqAccountSummary(EClient self, int reqId, IBString const & groupName, IBString const & tags)"""
return _swigibpy.EClient_reqAccountSummary(self, reqId, groupName, tags)
def cancelAccountSummary(self, reqId):
"""cancelAccountSummary(EClient self, int reqId)"""
return _swigibpy.EClient_cancelAccountSummary(self, reqId)
def verifyRequest(self, apiName, apiVersion):
"""verifyRequest(EClient self, IBString const & apiName, IBString const & apiVersion)"""
return _swigibpy.EClient_verifyRequest(self, apiName, apiVersion)
def verifyMessage(self, apiData):
"""verifyMessage(EClient self, IBString const & apiData)"""
return _swigibpy.EClient_verifyMessage(self, apiData)
def queryDisplayGroups(self, reqId):
"""queryDisplayGroups(EClient self, int reqId)"""
return _swigibpy.EClient_queryDisplayGroups(self, reqId)
def subscribeToGroupEvents(self, reqId, groupId):
"""subscribeToGroupEvents(EClient self, int reqId, int groupId)"""
return _swigibpy.EClient_subscribeToGroupEvents(self, reqId, groupId)
def updateDisplayGroup(self, reqId, contractInfo):
"""updateDisplayGroup(EClient self, int reqId, IBString const & contractInfo)"""
return _swigibpy.EClient_updateDisplayGroup(self, reqId, contractInfo)
def unsubscribeFromGroupEvents(self, reqId):
"""unsubscribeFromGroupEvents(EClient self, int reqId)"""
return _swigibpy.EClient_unsubscribeFromGroupEvents(self, reqId)
|
class EClient(object):
'''Proxy of C++ EClient class'''
def __init__(self, *args, **kwargs):
pass
def eConnect(self, host, port, clientId=0, extraAuth=False):
'''eConnect(EClient self, char const * host, unsigned int port, int clientId=0, bool extraAuth=False) -> bool'''
pass
def eDisconnect(self):
'''eDisconnect(EClient self)'''
pass
def serverVersion(self):
'''serverVersion(EClient self) -> int'''
pass
def TwsConnectionTime(self):
'''TwsConnectionTime(EClient self) -> IBString'''
pass
def reqMktData(self, id, contract, genericTicks, snapshot, mktDataOptions):
'''reqMktData(EClient self, TickerId id, Contract contract, IBString const & genericTicks, bool snapshot, TagValueListSPtr const & mktDataOptions)'''
pass
def cancelMktData(self, id):
'''cancelMktData(EClient self, TickerId id)'''
pass
def placeOrder(self, id, contract, order):
'''placeOrder(EClient self, OrderId id, Contract contract, Order order)'''
pass
def cancelOrder(self, id):
'''cancelOrder(EClient self, OrderId id)'''
pass
def reqOpenOrders(self):
'''reqOpenOrders(EClient self)'''
pass
def reqAccountUpdates(self, subscribe, acctCode):
'''reqAccountUpdates(EClient self, bool subscribe, IBString const & acctCode)'''
pass
def reqExecutions(self, reqId, filter):
'''reqExecutions(EClient self, int reqId, ExecutionFilter filter)'''
pass
def reqIds(self, numIds):
'''reqIds(EClient self, int numIds)'''
pass
def checkMessages(self):
'''checkMessages(EClient self) -> bool'''
pass
def reqContractDetails(self, reqId, contract):
'''reqContractDetails(EClient self, int reqId, Contract contract)'''
pass
def reqMktDepth(self, id, contract, numRows, mktDepthOptions):
'''reqMktDepth(EClient self, TickerId id, Contract contract, int numRows, TagValueListSPtr const & mktDepthOptions)'''
pass
def cancelMktDepth(self, id):
'''cancelMktDepth(EClient self, TickerId id)'''
pass
def reqNewsBulletins(self, allMsgs):
'''reqNewsBulletins(EClient self, bool allMsgs)'''
pass
def cancelNewsBulletins(self):
'''cancelNewsBulletins(EClient self)'''
pass
def setServerLogLevel(self, level):
'''setServerLogLevel(EClient self, int level)'''
pass
def reqAutoOpenOrders(self, bAutoBind):
'''reqAutoOpenOrders(EClient self, bool bAutoBind)'''
pass
def reqAllOpenOrders(self):
'''reqAllOpenOrders(EClient self)'''
pass
def reqManagedAccts(self):
'''reqManagedAccts(EClient self)'''
pass
def requestFA(self, pFaDataType):
'''requestFA(EClient self, faDataType pFaDataType)'''
pass
def replaceFA(self, pFaDataType, cxml):
'''replaceFA(EClient self, faDataType pFaDataType, IBString const & cxml)'''
pass
def reqHistoricalData(self, id, contract, endDateTime, durationStr, barSizeSetting, whatToShow, useRTH, formatDate, chartOptions):
'''reqHistoricalData(EClient self, TickerId id, Contract contract, IBString const & endDateTime, IBString const & durationStr, IBString const & barSizeSetting, IBString const & whatToShow, int useRTH, int formatDate, TagValueListSPtr const & chartOptions)'''
pass
def exerciseOptions(self, id, contract, exerciseAction, exerciseQuantity, account, override):
'''exerciseOptions(EClient self, TickerId id, Contract contract, int exerciseAction, int exerciseQuantity, IBString const & account, int override)'''
pass
def cancelHistoricalData(self, tickerId):
'''cancelHistoricalData(EClient self, TickerId tickerId)'''
pass
def reqRealTimeBars(self, id, contract, barSize, whatToShow, useRTH, realTimeBarsOptions):
'''reqRealTimeBars(EClient self, TickerId id, Contract contract, int barSize, IBString const & whatToShow, bool useRTH, TagValueListSPtr const & realTimeBarsOptions)'''
pass
def cancelRealTimeBars(self, tickerId):
'''cancelRealTimeBars(EClient self, TickerId tickerId)'''
pass
def cancelScannerSubscription(self, tickerId):
'''cancelScannerSubscription(EClient self, int tickerId)'''
pass
def reqScannerParameters(self):
'''reqScannerParameters(EClient self)'''
pass
def reqScannerSubscription(self, tickerId, subscription, scannerSubscriptionOptions):
'''reqScannerSubscription(EClient self, int tickerId, ScannerSubscription subscription, TagValueListSPtr const & scannerSubscriptionOptions)'''
pass
def reqCurrentTime(self):
'''reqCurrentTime(EClient self)'''
pass
def reqFundamentalData(self, reqId, arg3, reportType):
'''reqFundamentalData(EClient self, TickerId reqId, Contract arg3, IBString const & reportType)'''
pass
def cancelFundamentalData(self, reqId):
'''cancelFundamentalData(EClient self, TickerId reqId)'''
pass
def calculateImpliedVolatility(self, reqId, contract, optionPrice, underPrice):
'''calculateImpliedVolatility(EClient self, TickerId reqId, Contract contract, double optionPrice, double underPrice)'''
pass
def calculateOptionPrice(self, reqId, contract, volatility, underPrice):
'''calculateOptionPrice(EClient self, TickerId reqId, Contract contract, double volatility, double underPrice)'''
pass
def cancelCalculateImpliedVolatility(self, reqId):
'''cancelCalculateImpliedVolatility(EClient self, TickerId reqId)'''
pass
def cancelCalculateOptionPrice(self, reqId):
'''cancelCalculateOptionPrice(EClient self, TickerId reqId)'''
pass
def reqGlobalCancel(self):
'''reqGlobalCancel(EClient self)'''
pass
def reqMarketDataType(self, marketDataType):
'''reqMarketDataType(EClient self, int marketDataType)'''
pass
def reqPositions(self):
'''reqPositions(EClient self)'''
pass
def cancelPositions(self):
'''cancelPositions(EClient self)'''
pass
def reqAccountSummary(self, reqId, groupName, tags):
'''reqAccountSummary(EClient self, int reqId, IBString const & groupName, IBString const & tags)'''
pass
def cancelAccountSummary(self, reqId):
'''cancelAccountSummary(EClient self, int reqId)'''
pass
def verifyRequest(self, apiName, apiVersion):
'''verifyRequest(EClient self, IBString const & apiName, IBString const & apiVersion)'''
pass
def verifyMessage(self, apiData):
'''verifyMessage(EClient self, IBString const & apiData)'''
pass
def queryDisplayGroups(self, reqId):
'''queryDisplayGroups(EClient self, int reqId)'''
pass
def subscribeToGroupEvents(self, reqId, groupId):
'''subscribeToGroupEvents(EClient self, int reqId, int groupId)'''
pass
def updateDisplayGroup(self, reqId, contractInfo):
'''updateDisplayGroup(EClient self, int reqId, IBString const & contractInfo)'''
pass
def unsubscribeFromGroupEvents(self, reqId):
'''unsubscribeFromGroupEvents(EClient self, int reqId)'''
pass
| 53 | 52 | 3 | 0 | 2 | 1 | 1 | 0.48 | 1 | 1 | 0 | 1 | 52 | 0 | 52 | 52 | 262 | 102 | 108 | 56 | 55 | 52 | 108 | 56 | 55 | 1 | 1 | 0 | 52 |
143,450 |
Komnomnomnom/swigibpy
|
Komnomnomnom_swigibpy/swigibpy.py
|
swigibpy.ContractDetails
|
class ContractDetails(object):
"""Proxy of C++ ContractDetails class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self):
"""__init__(ContractDetails self) -> ContractDetails"""
_swigibpy.ContractDetails_swiginit(self, _swigibpy.new_ContractDetails())
summary = _swig_property(_swigibpy.ContractDetails_summary_get, _swigibpy.ContractDetails_summary_set)
marketName = _swig_property(_swigibpy.ContractDetails_marketName_get, _swigibpy.ContractDetails_marketName_set)
minTick = _swig_property(_swigibpy.ContractDetails_minTick_get, _swigibpy.ContractDetails_minTick_set)
orderTypes = _swig_property(_swigibpy.ContractDetails_orderTypes_get, _swigibpy.ContractDetails_orderTypes_set)
validExchanges = _swig_property(_swigibpy.ContractDetails_validExchanges_get, _swigibpy.ContractDetails_validExchanges_set)
priceMagnifier = _swig_property(_swigibpy.ContractDetails_priceMagnifier_get, _swigibpy.ContractDetails_priceMagnifier_set)
underConId = _swig_property(_swigibpy.ContractDetails_underConId_get, _swigibpy.ContractDetails_underConId_set)
longName = _swig_property(_swigibpy.ContractDetails_longName_get, _swigibpy.ContractDetails_longName_set)
contractMonth = _swig_property(_swigibpy.ContractDetails_contractMonth_get, _swigibpy.ContractDetails_contractMonth_set)
industry = _swig_property(_swigibpy.ContractDetails_industry_get, _swigibpy.ContractDetails_industry_set)
category = _swig_property(_swigibpy.ContractDetails_category_get, _swigibpy.ContractDetails_category_set)
subcategory = _swig_property(_swigibpy.ContractDetails_subcategory_get, _swigibpy.ContractDetails_subcategory_set)
timeZoneId = _swig_property(_swigibpy.ContractDetails_timeZoneId_get, _swigibpy.ContractDetails_timeZoneId_set)
tradingHours = _swig_property(_swigibpy.ContractDetails_tradingHours_get, _swigibpy.ContractDetails_tradingHours_set)
liquidHours = _swig_property(_swigibpy.ContractDetails_liquidHours_get, _swigibpy.ContractDetails_liquidHours_set)
evRule = _swig_property(_swigibpy.ContractDetails_evRule_get, _swigibpy.ContractDetails_evRule_set)
evMultiplier = _swig_property(_swigibpy.ContractDetails_evMultiplier_get, _swigibpy.ContractDetails_evMultiplier_set)
secIdList = _swig_property(_swigibpy.ContractDetails_secIdList_get, _swigibpy.ContractDetails_secIdList_set)
cusip = _swig_property(_swigibpy.ContractDetails_cusip_get, _swigibpy.ContractDetails_cusip_set)
ratings = _swig_property(_swigibpy.ContractDetails_ratings_get, _swigibpy.ContractDetails_ratings_set)
descAppend = _swig_property(_swigibpy.ContractDetails_descAppend_get, _swigibpy.ContractDetails_descAppend_set)
bondType = _swig_property(_swigibpy.ContractDetails_bondType_get, _swigibpy.ContractDetails_bondType_set)
couponType = _swig_property(_swigibpy.ContractDetails_couponType_get, _swigibpy.ContractDetails_couponType_set)
callable = _swig_property(_swigibpy.ContractDetails_callable_get, _swigibpy.ContractDetails_callable_set)
putable = _swig_property(_swigibpy.ContractDetails_putable_get, _swigibpy.ContractDetails_putable_set)
coupon = _swig_property(_swigibpy.ContractDetails_coupon_get, _swigibpy.ContractDetails_coupon_set)
convertible = _swig_property(_swigibpy.ContractDetails_convertible_get, _swigibpy.ContractDetails_convertible_set)
maturity = _swig_property(_swigibpy.ContractDetails_maturity_get, _swigibpy.ContractDetails_maturity_set)
issueDate = _swig_property(_swigibpy.ContractDetails_issueDate_get, _swigibpy.ContractDetails_issueDate_set)
nextOptionDate = _swig_property(_swigibpy.ContractDetails_nextOptionDate_get, _swigibpy.ContractDetails_nextOptionDate_set)
nextOptionType = _swig_property(_swigibpy.ContractDetails_nextOptionType_get, _swigibpy.ContractDetails_nextOptionType_set)
nextOptionPartial = _swig_property(_swigibpy.ContractDetails_nextOptionPartial_get, _swigibpy.ContractDetails_nextOptionPartial_set)
notes = _swig_property(_swigibpy.ContractDetails_notes_get, _swigibpy.ContractDetails_notes_set)
__swig_destroy__ = _swigibpy.delete_ContractDetails
|
class ContractDetails(object):
'''Proxy of C++ ContractDetails class'''
def __init__(self):
'''__init__(ContractDetails self) -> ContractDetails'''
pass
| 2 | 2 | 3 | 0 | 2 | 1 | 1 | 0.05 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 42 | 1 | 39 | 37 | 37 | 2 | 39 | 37 | 37 | 1 | 1 | 0 | 1 |
143,451 |
Komnomnomnom/swigibpy
|
Komnomnomnom_swigibpy/swigibpy.py
|
swigibpy.Contract
|
class Contract(object):
"""Proxy of C++ Contract class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self):
"""__init__(Contract self) -> Contract"""
_swigibpy.Contract_swiginit(self, _swigibpy.new_Contract())
conId = _swig_property(_swigibpy.Contract_conId_get, _swigibpy.Contract_conId_set)
symbol = _swig_property(_swigibpy.Contract_symbol_get, _swigibpy.Contract_symbol_set)
secType = _swig_property(_swigibpy.Contract_secType_get, _swigibpy.Contract_secType_set)
expiry = _swig_property(_swigibpy.Contract_expiry_get, _swigibpy.Contract_expiry_set)
strike = _swig_property(_swigibpy.Contract_strike_get, _swigibpy.Contract_strike_set)
right = _swig_property(_swigibpy.Contract_right_get, _swigibpy.Contract_right_set)
multiplier = _swig_property(_swigibpy.Contract_multiplier_get, _swigibpy.Contract_multiplier_set)
exchange = _swig_property(_swigibpy.Contract_exchange_get, _swigibpy.Contract_exchange_set)
primaryExchange = _swig_property(_swigibpy.Contract_primaryExchange_get, _swigibpy.Contract_primaryExchange_set)
currency = _swig_property(_swigibpy.Contract_currency_get, _swigibpy.Contract_currency_set)
localSymbol = _swig_property(_swigibpy.Contract_localSymbol_get, _swigibpy.Contract_localSymbol_set)
tradingClass = _swig_property(_swigibpy.Contract_tradingClass_get, _swigibpy.Contract_tradingClass_set)
includeExpired = _swig_property(_swigibpy.Contract_includeExpired_get, _swigibpy.Contract_includeExpired_set)
secIdType = _swig_property(_swigibpy.Contract_secIdType_get, _swigibpy.Contract_secIdType_set)
secId = _swig_property(_swigibpy.Contract_secId_get, _swigibpy.Contract_secId_set)
comboLegsDescrip = _swig_property(_swigibpy.Contract_comboLegsDescrip_get, _swigibpy.Contract_comboLegsDescrip_set)
comboLegs = _swig_property(_swigibpy.Contract_comboLegs_get, _swigibpy.Contract_comboLegs_set)
underComp = _swig_property(_swigibpy.Contract_underComp_get, _swigibpy.Contract_underComp_set)
def CloneComboLegs(dst, src):
"""CloneComboLegs(Contract::ComboLegListSPtr & dst, Contract::ComboLegListSPtr const & src)"""
return _swigibpy.Contract_CloneComboLegs(dst, src)
CloneComboLegs = staticmethod(CloneComboLegs)
__swig_destroy__ = _swigibpy.delete_Contract
|
class Contract(object):
'''Proxy of C++ Contract class'''
def __init__(self):
'''__init__(Contract self) -> Contract'''
pass
def CloneComboLegs(dst, src):
'''CloneComboLegs(Contract::ComboLegListSPtr & dst, Contract::ComboLegListSPtr const & src)'''
pass
| 3 | 3 | 3 | 0 | 2 | 1 | 1 | 0.11 | 1 | 0 | 0 | 0 | 2 | 0 | 2 | 2 | 33 | 3 | 27 | 24 | 24 | 3 | 27 | 24 | 24 | 1 | 1 | 0 | 2 |
143,452 |
Komnomnomnom/swigibpy
|
Komnomnomnom_swigibpy/swigibpy.py
|
swigibpy.CommissionReport
|
class CommissionReport(object):
"""Proxy of C++ CommissionReport class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self):
"""__init__(CommissionReport self) -> CommissionReport"""
_swigibpy.CommissionReport_swiginit(self, _swigibpy.new_CommissionReport())
execId = _swig_property(_swigibpy.CommissionReport_execId_get, _swigibpy.CommissionReport_execId_set)
commission = _swig_property(_swigibpy.CommissionReport_commission_get, _swigibpy.CommissionReport_commission_set)
currency = _swig_property(_swigibpy.CommissionReport_currency_get, _swigibpy.CommissionReport_currency_set)
realizedPNL = _swig_property(_swigibpy.CommissionReport_realizedPNL_get, _swigibpy.CommissionReport_realizedPNL_set)
_yield = _swig_property(_swigibpy.CommissionReport__yield_get, _swigibpy.CommissionReport__yield_set)
yieldRedemptionDate = _swig_property(_swigibpy.CommissionReport_yieldRedemptionDate_get, _swigibpy.CommissionReport_yieldRedemptionDate_set)
__swig_destroy__ = _swigibpy.delete_CommissionReport
|
class CommissionReport(object):
'''Proxy of C++ CommissionReport class'''
def __init__(self):
'''__init__(CommissionReport self) -> CommissionReport'''
pass
| 2 | 2 | 3 | 0 | 2 | 1 | 1 | 0.17 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 15 | 1 | 12 | 11 | 10 | 2 | 12 | 11 | 10 | 1 | 1 | 0 | 1 |
143,453 |
Komnomnomnom/swigibpy
|
Komnomnomnom_swigibpy/swigibpy.py
|
swigibpy.EWrapperVerbose
|
class EWrapperVerbose(EWrapper):
'''Implements all EWrapper methods and prints to standard out when a method
is invoked.
'''
def _print_call(self, name, *args, **kwargs):
argspec = []
if args:
argspec.append(', '.join(str(a) for a in args))
if kwargs:
argspec.append(', '.join('%s=%s' for k, v in kwargs.items()))
print('TWS call ignored - %s(%s)' % (name, ', '.join(argspec)))
|
class EWrapperVerbose(EWrapper):
'''Implements all EWrapper methods and prints to standard out when a method
is invoked.
'''
def _print_call(self, name, *args, **kwargs):
pass
| 2 | 1 | 7 | 0 | 7 | 0 | 3 | 0.38 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 50 | 12 | 1 | 8 | 4 | 6 | 3 | 8 | 3 | 6 | 3 | 2 | 1 | 3 |
143,454 |
Komnomnomnom/swigibpy
|
Komnomnomnom_swigibpy/swigibpy.py
|
swigibpy.ComboLegList
|
class ComboLegList(object):
"""Proxy of C++ std::vector<(shared_ptr<(ComboLeg)>)> class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def iterator(self):
"""iterator(ComboLegList self) -> SwigPyIterator"""
return _swigibpy.ComboLegList_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
"""__nonzero__(ComboLegList self) -> bool"""
return _swigibpy.ComboLegList___nonzero__(self)
def __bool__(self):
"""__bool__(ComboLegList self) -> bool"""
return _swigibpy.ComboLegList___bool__(self)
def __len__(self):
"""__len__(ComboLegList self) -> std::vector< shared_ptr< ComboLeg > >::size_type"""
return _swigibpy.ComboLegList___len__(self)
def pop(self):
"""pop(ComboLegList self) -> std::vector< shared_ptr< ComboLeg > >::value_type"""
return _swigibpy.ComboLegList_pop(self)
def __getslice__(self, i, j):
"""__getslice__(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::difference_type i, std::vector< shared_ptr< ComboLeg > >::difference_type j) -> ComboLegList"""
return _swigibpy.ComboLegList___getslice__(self, i, j)
def __setslice__(self, *args, **kwargs):
"""__setslice__(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::difference_type i, std::vector< shared_ptr< ComboLeg > >::difference_type j, ComboLegList v)"""
return _swigibpy.ComboLegList___setslice__(self, *args, **kwargs)
def __delslice__(self, i, j):
"""__delslice__(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::difference_type i, std::vector< shared_ptr< ComboLeg > >::difference_type j)"""
return _swigibpy.ComboLegList___delslice__(self, i, j)
def __delitem__(self, *args):
"""
__delitem__(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::difference_type i)
__delitem__(ComboLegList self, PySliceObject * slice)
"""
return _swigibpy.ComboLegList___delitem__(self, *args)
def __getitem__(self, *args):
"""
__getitem__(ComboLegList self, PySliceObject * slice) -> ComboLegList
__getitem__(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::difference_type i) -> std::vector< shared_ptr< ComboLeg > >::value_type const &
"""
return _swigibpy.ComboLegList___getitem__(self, *args)
def __setitem__(self, *args):
"""
__setitem__(ComboLegList self, PySliceObject * slice, ComboLegList v)
__setitem__(ComboLegList self, PySliceObject * slice)
__setitem__(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::difference_type i, std::vector< shared_ptr< ComboLeg > >::value_type const & x)
"""
return _swigibpy.ComboLegList___setitem__(self, *args)
def append(self, x):
"""append(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::value_type const & x)"""
return _swigibpy.ComboLegList_append(self, x)
def empty(self):
"""empty(ComboLegList self) -> bool"""
return _swigibpy.ComboLegList_empty(self)
def size(self):
"""size(ComboLegList self) -> std::vector< shared_ptr< ComboLeg > >::size_type"""
return _swigibpy.ComboLegList_size(self)
def clear(self):
"""clear(ComboLegList self)"""
return _swigibpy.ComboLegList_clear(self)
def swap(self, v):
"""swap(ComboLegList self, ComboLegList v)"""
return _swigibpy.ComboLegList_swap(self, v)
def get_allocator(self):
"""get_allocator(ComboLegList self) -> std::vector< shared_ptr< ComboLeg > >::allocator_type"""
return _swigibpy.ComboLegList_get_allocator(self)
def begin(self):
"""begin(ComboLegList self) -> std::vector< shared_ptr< ComboLeg > >::iterator"""
return _swigibpy.ComboLegList_begin(self)
def end(self):
"""end(ComboLegList self) -> std::vector< shared_ptr< ComboLeg > >::iterator"""
return _swigibpy.ComboLegList_end(self)
def rbegin(self):
"""rbegin(ComboLegList self) -> std::vector< shared_ptr< ComboLeg > >::reverse_iterator"""
return _swigibpy.ComboLegList_rbegin(self)
def rend(self):
"""rend(ComboLegList self) -> std::vector< shared_ptr< ComboLeg > >::reverse_iterator"""
return _swigibpy.ComboLegList_rend(self)
def pop_back(self):
"""pop_back(ComboLegList self)"""
return _swigibpy.ComboLegList_pop_back(self)
def erase(self, *args):
"""
erase(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::iterator pos) -> std::vector< shared_ptr< ComboLeg > >::iterator
erase(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::iterator first, std::vector< shared_ptr< ComboLeg > >::iterator last) -> std::vector< shared_ptr< ComboLeg > >::iterator
"""
return _swigibpy.ComboLegList_erase(self, *args)
def __init__(self, *args):
"""
__init__(std::vector<(shared_ptr<(ComboLeg)>)> self) -> ComboLegList
__init__(std::vector<(shared_ptr<(ComboLeg)>)> self, ComboLegList arg2) -> ComboLegList
__init__(std::vector<(shared_ptr<(ComboLeg)>)> self, std::vector< shared_ptr< ComboLeg > >::size_type size) -> ComboLegList
__init__(std::vector<(shared_ptr<(ComboLeg)>)> self, std::vector< shared_ptr< ComboLeg > >::size_type size, std::vector< shared_ptr< ComboLeg > >::value_type const & value) -> ComboLegList
"""
_swigibpy.ComboLegList_swiginit(self, _swigibpy.new_ComboLegList(*args))
def push_back(self, x):
"""push_back(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::value_type const & x)"""
return _swigibpy.ComboLegList_push_back(self, x)
def front(self):
"""front(ComboLegList self) -> std::vector< shared_ptr< ComboLeg > >::value_type const &"""
return _swigibpy.ComboLegList_front(self)
def back(self):
"""back(ComboLegList self) -> std::vector< shared_ptr< ComboLeg > >::value_type const &"""
return _swigibpy.ComboLegList_back(self)
def assign(self, n, x):
"""assign(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::size_type n, std::vector< shared_ptr< ComboLeg > >::value_type const & x)"""
return _swigibpy.ComboLegList_assign(self, n, x)
def resize(self, *args):
"""
resize(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::size_type new_size)
resize(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::size_type new_size, std::vector< shared_ptr< ComboLeg > >::value_type const & x)
"""
return _swigibpy.ComboLegList_resize(self, *args)
def insert(self, *args):
"""
insert(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::iterator pos, std::vector< shared_ptr< ComboLeg > >::value_type const & x) -> std::vector< shared_ptr< ComboLeg > >::iterator
insert(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::iterator pos, std::vector< shared_ptr< ComboLeg > >::size_type n, std::vector< shared_ptr< ComboLeg > >::value_type const & x)
"""
return _swigibpy.ComboLegList_insert(self, *args)
def reserve(self, n):
"""reserve(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::size_type n)"""
return _swigibpy.ComboLegList_reserve(self, n)
def capacity(self):
"""capacity(ComboLegList self) -> std::vector< shared_ptr< ComboLeg > >::size_type"""
return _swigibpy.ComboLegList_capacity(self)
__swig_destroy__ = _swigibpy.delete_ComboLegList
|
class ComboLegList(object):
'''Proxy of C++ std::vector<(shared_ptr<(ComboLeg)>)> class'''
def iterator(self):
'''iterator(ComboLegList self) -> SwigPyIterator'''
pass
def __iter__(self):
pass
def __nonzero__(self):
'''__nonzero__(ComboLegList self) -> bool'''
pass
def __bool__(self):
'''__bool__(ComboLegList self) -> bool'''
pass
def __len__(self):
'''__len__(ComboLegList self) -> std::vector< shared_ptr< ComboLeg > >::size_type'''
pass
def pop(self):
'''pop(ComboLegList self) -> std::vector< shared_ptr< ComboLeg > >::value_type'''
pass
def __getslice__(self, i, j):
'''__getslice__(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::difference_type i, std::vector< shared_ptr< ComboLeg > >::difference_type j) -> ComboLegList'''
pass
def __setslice__(self, *args, **kwargs):
'''__setslice__(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::difference_type i, std::vector< shared_ptr< ComboLeg > >::difference_type j, ComboLegList v)'''
pass
def __delslice__(self, i, j):
'''__delslice__(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::difference_type i, std::vector< shared_ptr< ComboLeg > >::difference_type j)'''
pass
def __delitem__(self, *args):
'''
__delitem__(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::difference_type i)
__delitem__(ComboLegList self, PySliceObject * slice)
'''
pass
def __getitem__(self, *args):
'''
__getitem__(ComboLegList self, PySliceObject * slice) -> ComboLegList
__getitem__(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::difference_type i) -> std::vector< shared_ptr< ComboLeg > >::value_type const &
'''
pass
def __setitem__(self, *args):
'''
__setitem__(ComboLegList self, PySliceObject * slice, ComboLegList v)
__setitem__(ComboLegList self, PySliceObject * slice)
__setitem__(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::difference_type i, std::vector< shared_ptr< ComboLeg > >::value_type const & x)
'''
pass
def append(self, x):
'''append(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::value_type const & x)'''
pass
def empty(self):
'''empty(ComboLegList self) -> bool'''
pass
def size(self):
'''size(ComboLegList self) -> std::vector< shared_ptr< ComboLeg > >::size_type'''
pass
def clear(self):
'''clear(ComboLegList self)'''
pass
def swap(self, v):
'''swap(ComboLegList self, ComboLegList v)'''
pass
def get_allocator(self):
'''get_allocator(ComboLegList self) -> std::vector< shared_ptr< ComboLeg > >::allocator_type'''
pass
def begin(self):
'''begin(ComboLegList self) -> std::vector< shared_ptr< ComboLeg > >::iterator'''
pass
def end(self):
'''end(ComboLegList self) -> std::vector< shared_ptr< ComboLeg > >::iterator'''
pass
def rbegin(self):
'''rbegin(ComboLegList self) -> std::vector< shared_ptr< ComboLeg > >::reverse_iterator'''
pass
def rend(self):
'''rend(ComboLegList self) -> std::vector< shared_ptr< ComboLeg > >::reverse_iterator'''
pass
def pop_back(self):
'''pop_back(ComboLegList self)'''
pass
def erase(self, *args):
'''
erase(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::iterator pos) -> std::vector< shared_ptr< ComboLeg > >::iterator
erase(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::iterator first, std::vector< shared_ptr< ComboLeg > >::iterator last) -> std::vector< shared_ptr< ComboLeg > >::iterator
'''
pass
def __init__(self, *args):
'''
__init__(std::vector<(shared_ptr<(ComboLeg)>)> self) -> ComboLegList
__init__(std::vector<(shared_ptr<(ComboLeg)>)> self, ComboLegList arg2) -> ComboLegList
__init__(std::vector<(shared_ptr<(ComboLeg)>)> self, std::vector< shared_ptr< ComboLeg > >::size_type size) -> ComboLegList
__init__(std::vector<(shared_ptr<(ComboLeg)>)> self, std::vector< shared_ptr< ComboLeg > >::size_type size, std::vector< shared_ptr< ComboLeg > >::value_type const & value) -> ComboLegList
'''
pass
def push_back(self, x):
'''push_back(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::value_type const & x)'''
pass
def front(self):
'''front(ComboLegList self) -> std::vector< shared_ptr< ComboLeg > >::value_type const &'''
pass
def back(self):
'''back(ComboLegList self) -> std::vector< shared_ptr< ComboLeg > >::value_type const &'''
pass
def assign(self, n, x):
'''assign(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::size_type n, std::vector< shared_ptr< ComboLeg > >::value_type const & x)'''
pass
def resize(self, *args):
'''
resize(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::size_type new_size)
resize(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::size_type new_size, std::vector< shared_ptr< ComboLeg > >::value_type const & x)
'''
pass
def insert(self, *args):
'''
insert(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::iterator pos, std::vector< shared_ptr< ComboLeg > >::value_type const & x) -> std::vector< shared_ptr< ComboLeg > >::iterator
insert(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::iterator pos, std::vector< shared_ptr< ComboLeg > >::size_type n, std::vector< shared_ptr< ComboLeg > >::value_type const & x)
'''
pass
def reserve(self, n):
'''reserve(ComboLegList self, std::vector< shared_ptr< ComboLeg > >::size_type n)'''
pass
def capacity(self):
'''capacity(ComboLegList self) -> std::vector< shared_ptr< ComboLeg > >::size_type'''
pass
| 34 | 33 | 4 | 0 | 2 | 2 | 1 | 0.81 | 1 | 0 | 0 | 0 | 33 | 0 | 33 | 33 | 190 | 63 | 70 | 37 | 36 | 57 | 70 | 37 | 36 | 1 | 1 | 0 | 33 |
143,455 |
Komnomnomnom/swigibpy
|
Komnomnomnom_swigibpy/swigibpy.py
|
swigibpy.UnderComp
|
class UnderComp(object):
"""Proxy of C++ UnderComp class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self):
"""__init__(UnderComp self) -> UnderComp"""
_swigibpy.UnderComp_swiginit(self, _swigibpy.new_UnderComp())
conId = _swig_property(_swigibpy.UnderComp_conId_get, _swigibpy.UnderComp_conId_set)
delta = _swig_property(_swigibpy.UnderComp_delta_get, _swigibpy.UnderComp_delta_set)
price = _swig_property(_swigibpy.UnderComp_price_get, _swigibpy.UnderComp_price_set)
__swig_destroy__ = _swigibpy.delete_UnderComp
|
class UnderComp(object):
'''Proxy of C++ UnderComp class'''
def __init__(self):
'''__init__(UnderComp self) -> UnderComp'''
pass
| 2 | 2 | 3 | 0 | 2 | 1 | 1 | 0.22 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 12 | 1 | 9 | 8 | 7 | 2 | 9 | 8 | 7 | 1 | 1 | 0 | 1 |
143,456 |
Komnomnomnom/swigibpy
|
Komnomnomnom_swigibpy/setup.py
|
setup.SwigibpyBuildExt
|
class SwigibpyBuildExt(build_ext):
def build_extensions(self):
compiler = self.compiler.compiler_type
if compiler == 'msvc':
extra = ('/D_CRT_SECURE_NO_DEPRECATE',
'/EHsc', '/wd4355', '/wd4800')
else:
extra = ('-Wno-switch',)
for ext in self.extensions:
ext.extra_compile_args += extra
build_ext.build_extensions(self)
|
class SwigibpyBuildExt(build_ext):
def build_extensions(self):
pass
| 2 | 0 | 10 | 0 | 10 | 0 | 3 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 70 | 11 | 0 | 11 | 5 | 9 | 0 | 9 | 5 | 7 | 3 | 3 | 1 | 3 |
143,457 |
Komnomnomnom/swigibpy
|
Komnomnomnom_swigibpy/examples/historicaldata.py
|
historicaldata.HistoricalDataExample
|
class HistoricalDataExample(EWrapper):
'''Callback object passed to TWS, these functions will be called directly
by TWS.
'''
def __init__(self):
super(HistoricalDataExample, self).__init__()
self.got_history = Event()
def orderStatus(self, id, status, filled, remaining, avgFillPrice, permId,
parentId, lastFilledPrice, clientId, whyHeld):
pass
def openOrder(self, orderID, contract, order, orderState):
pass
def nextValidId(self, orderId):
'''Always called by TWS but not relevant for our example'''
pass
def openOrderEnd(self):
'''Always called by TWS but not relevant for our example'''
pass
def managedAccounts(self, openOrderEnd):
'''Called by TWS but not relevant for our example'''
pass
def historicalData(self, reqId, date, open, high,
low, close, volume,
barCount, WAP, hasGaps):
if date[:8] == 'finished':
print("History request complete")
self.got_history.set()
else:
date = datetime.strptime(date, "%Y%m%d").strftime("%d %b %Y")
print(("History %s - Open: %s, High: %s, Low: %s, Close: "
"%s, Volume: %d") % (date, open, high, low, close, volume))
|
class HistoricalDataExample(EWrapper):
'''Callback object passed to TWS, these functions will be called directly
by TWS.
'''
def __init__(self):
pass
def orderStatus(self, id, status, filled, remaining, avgFillPrice, permId,
parentId, lastFilledPrice, clientId, whyHeld):
pass
def openOrder(self, orderID, contract, order, orderState):
pass
def nextValidId(self, orderId):
'''Always called by TWS but not relevant for our example'''
pass
def openOrderEnd(self):
'''Always called by TWS but not relevant for our example'''
pass
def managedAccounts(self, openOrderEnd):
'''Called by TWS but not relevant for our example'''
pass
def historicalData(self, reqId, date, open, high,
low, close, volume,
barCount, WAP, hasGaps):
pass
| 8 | 4 | 4 | 0 | 3 | 0 | 1 | 0.24 | 1 | 3 | 0 | 0 | 7 | 1 | 7 | 56 | 40 | 9 | 25 | 12 | 14 | 6 | 20 | 9 | 12 | 2 | 2 | 1 | 8 |
143,458 |
Komnomnomnom/swigibpy
|
Komnomnomnom_swigibpy/setup.py
|
setup.Patchify
|
class Patchify(Command):
description = "Apply swigibpy's patches to the TWS API"
user_options = [
('reverse', 'r', 'Un-apply the patches')
]
def initialize_options(self):
self.cwd = None
self.reverse = False
self.patch_opts = ['-v']
def finalize_options(self):
self.cwd = getcwd()
if self.reverse:
self.patch_opts.append('-R')
def run(self):
chdir(root_dir)
for patch in listdir(join(root_dir, 'patches')):
patch_cmd = ['git', 'apply'] + self.patch_opts
patch_cmd.append(join(root_dir, 'patches', patch))
subprocess.call(patch_cmd)
chdir(self.cwd)
|
class Patchify(Command):
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
pass
| 4 | 0 | 5 | 0 | 5 | 0 | 2 | 0 | 1 | 0 | 0 | 0 | 3 | 3 | 3 | 42 | 23 | 3 | 20 | 11 | 16 | 0 | 18 | 11 | 14 | 2 | 2 | 1 | 5 |
143,459 |
Komnomnomnom/swigibpy
|
Komnomnomnom_swigibpy/swigibpy.py
|
swigibpy.ComboLeg
|
class ComboLeg(object):
"""Proxy of C++ ComboLeg class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self):
"""__init__(ComboLeg self) -> ComboLeg"""
_swigibpy.ComboLeg_swiginit(self, _swigibpy.new_ComboLeg())
conId = _swig_property(_swigibpy.ComboLeg_conId_get, _swigibpy.ComboLeg_conId_set)
ratio = _swig_property(_swigibpy.ComboLeg_ratio_get, _swigibpy.ComboLeg_ratio_set)
action = _swig_property(_swigibpy.ComboLeg_action_get, _swigibpy.ComboLeg_action_set)
exchange = _swig_property(_swigibpy.ComboLeg_exchange_get, _swigibpy.ComboLeg_exchange_set)
openClose = _swig_property(_swigibpy.ComboLeg_openClose_get, _swigibpy.ComboLeg_openClose_set)
shortSaleSlot = _swig_property(_swigibpy.ComboLeg_shortSaleSlot_get, _swigibpy.ComboLeg_shortSaleSlot_set)
designatedLocation = _swig_property(_swigibpy.ComboLeg_designatedLocation_get, _swigibpy.ComboLeg_designatedLocation_set)
exemptCode = _swig_property(_swigibpy.ComboLeg_exemptCode_get, _swigibpy.ComboLeg_exemptCode_set)
def __eq__(self, other):
"""__eq__(ComboLeg self, ComboLeg other) -> bool"""
return _swigibpy.ComboLeg___eq__(self, other)
__swig_destroy__ = _swigibpy.delete_ComboLeg
|
class ComboLeg(object):
'''Proxy of C++ ComboLeg class'''
def __init__(self):
'''__init__(ComboLeg self) -> ComboLeg'''
pass
def __eq__(self, other):
'''__eq__(ComboLeg self, ComboLeg other) -> bool'''
pass
| 3 | 3 | 3 | 0 | 2 | 1 | 1 | 0.19 | 1 | 0 | 0 | 0 | 2 | 0 | 2 | 2 | 22 | 3 | 16 | 14 | 13 | 3 | 16 | 14 | 13 | 1 | 1 | 0 | 2 |
143,460 |
Komnomnomnom/swigibpy
|
Komnomnomnom_swigibpy/setup.py
|
setup.Swigify
|
class Swigify(Command):
description = "Regenerate swigibpy's wrapper code (requires SWIG)"
user_options = []
def initialize_options(self):
self.swig_opts = None
self.cwd = None
def finalize_options(self):
self.cwd = getcwd()
self.swig_opts = [
'-v',
'-c++',
'-python',
'-threads',
'-keyword',
'-w511',
'-outdir',
root_dir,
'-modern',
'-fastdispatch',
'-nosafecstrings',
'-noproxydel',
'-fastproxy',
'-fastinit',
'-fastunpack',
'-fastquery',
'-modernargs',
'-nobuildnone'
]
def run(self):
chdir(join(root_dir, IB_DIR))
try:
swig_cmd = ['swig'] + self.swig_opts + ['-o', 'swig_wrap.cpp']
swig_cmd.append(join(root_dir, 'swigify_ib.i'))
print('Running SWIG command: %s' % ' '.join(swig_cmd))
subprocess.check_call(swig_cmd)
print('Removing boost namespace')
# Remove boost namespace, added to support IB's custom shared_ptr
swig_files = [
join(root_dir, IB_DIR, 'swig_wrap.cpp'),
join(root_dir, IB_DIR, 'swig_wrap.h'),
join(root_dir, 'swigibpy.py')
]
for swig_file in swig_files:
with open(swig_file, 'r+') as swig_file_handle:
contents = swig_file_handle.read()
contents = contents.replace(
"boost::shared_ptr", "shared_ptr")
contents = re.sub(
r'(shared_ptr<[^>]+>\([^)]+ )'
r'(SWIG_NO_NULL_DELETER_0)\)',
r'\1)',
contents
)
swig_file_handle.seek(0)
swig_file_handle.truncate()
swig_file_handle.write(contents)
except subprocess.CalledProcessError as cpe:
pass
finally:
chdir(self.cwd)
|
class Swigify(Command):
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
pass
| 4 | 0 | 20 | 1 | 19 | 0 | 2 | 0.02 | 1 | 1 | 0 | 0 | 3 | 2 | 3 | 42 | 66 | 6 | 59 | 14 | 55 | 1 | 29 | 12 | 25 | 3 | 2 | 3 | 5 |
143,461 |
Kong/analytics-agent-python
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kong_analytics-agent-python/tests/helpers.py
|
tests.helpers.make_server.QuietHandler
|
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw): pass
|
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw):
pass
| 2 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 2 | 0 | 2 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 1 | 0 | 1 |
143,462 |
Kong/analytics-agent-python
|
Kong_analytics-agent-python/mashapeanalytics/alf.py
|
mashapeanalytics.alf.Alf
|
class Alf(object):
'''
API Logging Format (ALF) Object
'''
def __init__(self, serviceToken, environment, clientIp = None):
self.serviceToken = serviceToken
self.environment = environment
self.clientIp = clientIp
self.entries = []
def __str__(self):
return json.dumps(self.to_json(), indent=2)
def to_json(self):
alf = {
'version': '1.0.0',
'serviceToken': self.serviceToken,
'environment': self.environment,
'har': {
'log': {
'version': '1.2',
'creator': {
'name': 'mashape-analytics-agent-python',
'version': '1.1.0'
},
'entries': self.entries
}
}
}
if (self.clientIp):
alf['clientIPAddress'] = self.clientIp
return alf
@property
def json(self):
return self.to_json()
def addEntry(self, entry):
self.entries.append(entry)
|
class Alf(object):
'''
API Logging Format (ALF) Object
'''
def __init__(self, serviceToken, environment, clientIp = None):
pass
def __str__(self):
pass
def to_json(self):
pass
@property
def json(self):
pass
def addEntry(self, entry):
pass
| 7 | 1 | 7 | 1 | 6 | 0 | 1 | 0.09 | 1 | 0 | 0 | 0 | 5 | 4 | 5 | 5 | 43 | 8 | 32 | 12 | 25 | 3 | 17 | 11 | 11 | 2 | 1 | 1 | 6 |
143,463 |
Kong/analytics-agent-python
|
Kong_analytics-agent-python/mashapeanalytics/middleware/django_middleware.py
|
mashapeanalytics.middleware.django_middleware.DjangoMiddleware
|
class DjangoMiddleware(object):
def __init__(self):
self.serviceToken = getattr(settings, 'MASHAPE_ANALYTICS_SERVICE_TOKEN', None)
self.environment = getattr(settings, 'MASHAPE_ANALYTICS_ENVIRONMENT', None)
host = getattr(settings, 'MASHAPE_ANALYTICS_HOST', 'collector.galileo.mashape.com')
port = int(getattr(settings, 'MASHAPE_ANALYTICS_PORT', 443))
connection_timeout = int(getattr(settings, 'MASHAPE_ANALYTICS_CONNECTION_TIMEOUT', 30))
retry_count = int(getattr(settings, 'MASHAPE_ANALYTICS_RETRY_COUNT', 0))
self.transport = HttpTransport(host, port, connection_timeout, retry_count)
if self.serviceToken is None:
raise AttributeError("'MASHAPE_ANALYTICS_SERVICE_TOKEN' setting is not found.")
def process_request(self, request):
request.META['MASHAPE_ANALYTICS.STARTED_DATETIME'] = datetime.utcnow()
request.META['galileo.request'] = Request(request.META)
def request_header_size(self, request):
# {METHOD} {URL} HTTP/1.1\r\n = 12 extra characters for space between method and url, and ` HTTP/1.1\r\n`
first_line = len(request.META.get('REQUEST_METHOD')) + len(request.get_full_path()) + 12
# {KEY}: {VALUE}\n\r = 4 extra characters for `: ` and `\n\r` minus `HTTP_` in the KEY is -1
header_fields = sum([(len(header) + len(value) - 1) for (header, value) in request.META.items() if header.startswith('HTTP_')])
last_line = 2 # /r/n
return first_line + header_fields + last_line
def client_address(self, request):
ip = request.META.get('HTTP_X_FORWARDED_FOR', request.META.get('REMOTE_ADDR', None))
if ip:
return ip.split(',')[0]
def response_header_size(self, response):
# HTTP/1.1 {STATUS} {STATUS_TEXT} = 10 extra characters
first_line = len(str(response.status_code)) + len(response.reason_phrase) + 10
# {KEY}: {VALUE}\n\r = 4 extra characters `: ` and `\n\r`
header_fields = sum([(len(header) + len(value) + 4) for (header, value) in response._headers.items()])
return first_line + header_fields
def process_response(self, request, response):
startedDateTime = request.META.get('MASHAPE_ANALYTICS.STARTED_DATETIME', datetime.utcnow())
requestHeaders = [{'name': re.sub('^HTTP_', '', header), 'value': value} for (header, value) in request.META.items() if header.startswith('HTTP_')]
requestHeaderSize = self.request_header_size(request)
requestQueryString = [{'name': name, 'value': (value[0] if len(value) > 0 else None)} for name, value in parse_qs(request.META.get('QUERY_STRING', '')).items()]
r = request.META.get('galileo.request')
requestContentSize = r.content_length or 0
responseHeaders = [{'name': header, 'value': value[-1]} for (header, value) in response._headers.items()]
responseHeadersSize = self.response_header_size(response)
responseContentSize = len(response.content)
alf = Alf(self.serviceToken, self.environment, self.client_address(request))
alf.addEntry({
'startedDateTime': startedDateTime.isoformat() + 'Z',
'serverIpAddress': socket.gethostbyname(socket.gethostname()),
'time': int(round((datetime.utcnow() - startedDateTime).total_seconds() * 1000)),
'request': {
'method': request.method,
'url': request.build_absolute_uri(),
'httpVersion': 'HTTP/1.1',
'cookies': [],
'queryString': requestQueryString,
'headers': requestHeaders,
'headersSize': requestHeaderSize,
'content': {
'size': requestContentSize,
'mimeType': request.META.get('CONTENT_TYPE', 'application/octet-stream')
},
'bodySize': requestContentSize
},
'response': {
'status': response.status_code,
'statusText': response.reason_phrase,
'httpVersion': 'HTTP/1.1',
'cookies': [],
'headers': responseHeaders,
'headersSize': responseHeadersSize,
'content': {
'size': responseContentSize,
'mimeType': response._headers.get('content-type', (None, 'application/octet-stream'))[-1]
},
'bodySize': responseHeadersSize + responseContentSize,
'redirectURL': response._headers.get('location', ('location', ''))[-1]
},
'cache': {},
'timings': {
'blocked': -1,
'dns': -1,
'connect': -1,
'send': 0,
'wait': int(round((datetime.utcnow() - startedDateTime).total_seconds() * 1000)),
'receive': 0,
'ssl': -1
}
})
self.transport.send(alf.json)
return response
|
class DjangoMiddleware(object):
def __init__(self):
pass
def process_request(self, request):
pass
def request_header_size(self, request):
pass
def client_address(self, request):
pass
def response_header_size(self, response):
pass
def process_response(self, request, response):
pass
| 7 | 0 | 17 | 2 | 14 | 1 | 2 | 0.06 | 1 | 6 | 2 | 0 | 6 | 3 | 6 | 6 | 107 | 20 | 83 | 30 | 76 | 5 | 41 | 30 | 34 | 2 | 1 | 1 | 9 |
143,464 |
Kong/analytics-agent-python
|
Kong_analytics-agent-python/mashapeanalytics/middleware/wsgi_middleware.py
|
mashapeanalytics.middleware.wsgi_middleware.WsgiMiddleware
|
class WsgiMiddleware(object):
def __init__(self, app, serviceToken, environment=None, host='collector.galileo.mashape.com', port=443, connection_timeout=30, retry_count=0):
self.app = app
self.serviceToken = serviceToken
self.environment = environment
self.transport = HttpTransport(host, int(port), int(connection_timeout), int(retry_count))
def count_response_content_size(self, env, data):
env['MashapeAnalytics.responseContentSize'] += len(data)
return data
def host(self, env):
if env.get('HTTP_X_FORWARDED_HOST', False):
return env['HTTP_X_FORWARDED_HOST'].split(',')[-1]
elif (env['wsgi.url_scheme'] == 'http' and env['SERVER_PORT'] == '80') or (env['wsgi.url_scheme'] == 'https' and env['SERVER_PORT'] == '443'):
return env['HTTP_HOST'] or env['HTTP_HOST']
else:
return env['HTTP_HOST'] or '{SERVER_NAME}:{SERVER_PORT}'.format(env)
def absolute_uri(self, env):
queryString = ('?' if env.get('QUERY_STRING', False) else '')
queryString += env.get('QUERY_STRING', '')
return '{0}://{1}{2}{3}'.format(env['wsgi.url_scheme'], self.host(env), env['PATH_INFO'], queryString)
def request_header_size(self, env):
# {METHOD} {URL} {HTTP_PROTO}\r\n = 4 extra characters for space between method and url, and `\r\n`
queryString = (1 if env.get('QUERY_STRING', False) else 0) # `?` to start query string if exists
queryString += len(env.get('QUERY_STRING', '')) # Rest of query string
first_line = len(env['REQUEST_METHOD']) + len(env['PATH_INFO']) + queryString + len(env['SERVER_PROTOCOL']) + 4
# {KEY}: {VALUE}\n\r = 4 extra characters for `: ` and `\n\r` minus `HTTP_` in the KEY is -1
header_fields = sum([(len(header) + len(value) - 1) for (header, value) in env.items() if header.startswith('HTTP_')])
last_line = 2 # /r/n
return first_line + header_fields + last_line
def request_header_name(self, header):
return re.sub('_', '-', re.sub('^HTTP_', '', header))
def response_header_size(self, env):
# HTTP/1.1 {STATUS} {STATUS_TEXT} = 11 extra spaces
first_line = len(str(env['MashapeAnalytics.responseStatusCode'])) + len(env['MashapeAnalytics.responseReasonPhrase']) + 11
# {KEY}: {VALUE}\n\r = 4 extra characters `: ` and `\n\r`
header_fields = sum([(len(header) + len(value) + 4) for (header, value) in env['MashapeAnalytics.responseHeaders']])
return first_line + header_fields
def client_address(self, env):
ip = env.get('HTTP_X_FORWARDED_FOR', env.get('REMOTE_ADDR', None))
if ip:
return ip.split(',')[0]
def wrap_start_response(self, env, start_response):
def wrapped_start_response(status, response_headers, exc_info=None):
env['MashapeAnalytics.responseStatusCode'] = int(status[0:3])
env['MashapeAnalytics.responseReasonPhrase'] = status[4:]
env['MashapeAnalytics.responseHeaders'] = response_headers
write = start_response(status, response_headers, exc_info)
def wrapped_write(body): write(self.count_response_content_size(env, body))
return wrapped_write
return wrapped_start_response
def __call__(self, env, start_response):
env['MashapeAnalytics.startedDateTime'] = datetime.utcnow()
env['MashapeAnalytics.responseContentSize'] = 0
# Capture response body from iterable
iterable = None
try:
for data in self.app(env, self.wrap_start_response(env, start_response)):
yield self.count_response_content_size(env, data)
finally:
if hasattr(iterable, 'close'):
iterable.close()
# Construct and send ALF
r = Request(env)
requestHeaders = [{'name': self.request_header_name(header), 'value': value} for (header, value) in env.items() if header.startswith('HTTP_')]
requestHeaderSize = self.request_header_size(env)
requestQueryString = [{'name': name, 'value': value[0]} for name, value in parse_qs(env.get('QUERY_STRING', '')).items()]
requestContentSize = r.content_length or 0
responseHeaders = [{'name': header, 'value': value} for (header, value) in env['MashapeAnalytics.responseHeaders']]
responseHeadersSize = self.response_header_size(env)
responseContentTypeHeaders = [header for header in env['MashapeAnalytics.responseHeaders'] if header[0] == 'Content-Type']
if len(responseContentTypeHeaders) > 0:
responseMimeType = responseContentTypeHeaders[0][1]
else:
responseMimeType = 'application/octet-stream'
alf = Alf(self.serviceToken, self.environment, self.client_address(env))
entry = {
'startedDateTime': env['MashapeAnalytics.startedDateTime'].isoformat() + 'Z', # HACK for MashapeAnalytics server to validate date
'serverIPAddress': socket.gethostbyname(socket.gethostname()),
'time': int(round((datetime.utcnow() - env['MashapeAnalytics.startedDateTime']).total_seconds() * 1000)),
'request': {
'method': env['REQUEST_METHOD'],
'url': self.absolute_uri(env),
'httpVersion': env['SERVER_PROTOCOL'],
'cookies': [],
'queryString': requestQueryString,
'headers': requestHeaders,
'headersSize': requestHeaderSize,
'bodySize': requestContentSize
},
'response': {
'status': env['MashapeAnalytics.responseStatusCode'],
'statusText': env['MashapeAnalytics.responseReasonPhrase'],
'httpVersion': 'HTTP/1.1',
'cookies': [],
'headers': responseHeaders,
'headersSize': responseHeadersSize,
'content': {
'size': env['MashapeAnalytics.responseContentSize'],
'mimeType': responseMimeType
},
'bodySize': env['MashapeAnalytics.responseContentSize'],
'redirectURL': next((value for (header, value) in env['MashapeAnalytics.responseHeaders'] if header == 'Location'), '')
},
'cache': {},
'timings': {
'blocked': -1,
'dns': -1,
'connect': -1,
'send': 0,
'wait': int(round((datetime.utcnow() - env['MashapeAnalytics.startedDateTime']).total_seconds() * 1000)),
'receive': 0,
'ssl': -1
}
}
if 'CONTENT_LENGTH' in env and env['CONTENT_LENGTH'] != '0':
entry['request']['content'] = {
'size': requestContentSize,
'mimeType': env['CONTENT_TYPE'] or 'application/octet-stream'
}
alf.addEntry(entry)
self.transport.send(alf.json)
|
class WsgiMiddleware(object):
def __init__(self, app, serviceToken, environment=None, host='collector.galileo.mashape.com', port=443, connection_timeout=30, retry_count=0):
pass
def count_response_content_size(self, env, data):
pass
def host(self, env):
pass
def absolute_uri(self, env):
pass
def request_header_size(self, env):
pass
def request_header_name(self, header):
pass
def response_header_size(self, env):
pass
def client_address(self, env):
pass
def wrap_start_response(self, env, start_response):
pass
def wrapped_start_response(status, response_headers, exc_info=None):
pass
def wrapped_write(body):
pass
def __call__(self, env, start_response):
pass
| 13 | 0 | 12 | 2 | 10 | 1 | 2 | 0.09 | 1 | 5 | 2 | 0 | 10 | 4 | 10 | 10 | 149 | 28 | 115 | 39 | 103 | 10 | 71 | 39 | 58 | 5 | 1 | 2 | 21 |
143,465 |
Kong/analytics-agent-python
|
Kong_analytics-agent-python/mashapeanalytics/transport.py
|
mashapeanalytics.transport.HttpTransport
|
class HttpTransport(object):
def __init__(self, host, port=443, connection_timeout=30, retry_count=0):
if port == 443:
self.url = 'https://%s/1.0.0/single' % host
elif port == 80:
self.url = 'http://%s/1.0.0/single' % host
else:
self.url = 'http://%s:%d/1.0.0/single' % (host, port)
self.connection_timeout = connection_timeout
self.retry_count = retry_count
def send(self, alf):
''' Non-blocking send '''
send_alf = SendThread(self.url, alf, self.connection_timeout, self.retry_count)
send_alf.start()
|
class HttpTransport(object):
def __init__(self, host, port=443, connection_timeout=30, retry_count=0):
pass
def send(self, alf):
''' Non-blocking send '''
pass
| 3 | 1 | 7 | 0 | 6 | 1 | 2 | 0.08 | 1 | 1 | 1 | 0 | 2 | 3 | 2 | 2 | 15 | 1 | 13 | 7 | 10 | 1 | 11 | 7 | 8 | 3 | 1 | 1 | 4 |
143,466 |
Kong/analytics-agent-python
|
Kong_analytics-agent-python/mashapeanalytics/transport.py
|
mashapeanalytics.transport.SendThread
|
class SendThread(Thread):
'''Send ALF thread'''
def __init__(self, url, alf, connection_timeout=30, retry_count=0):
Thread.__init__(self)
self._stop = Event()
# self.threadID = threadID
self.name = 'alf-send'
self.url = url
self.alf = alf
self.connection_timeout = connection_timeout
self.retry_count = retry_count
def stop(self):
self._stop.set()
@property
def stopped(self):
return self._stop.isSet()
def run(self):
payload = ujson.dumps(dict(self.alf))
with requests.Session() as s:
s.mount('http://', requests.adapters.HTTPAdapter(max_retries=self.retry_count))
s.mount('https://', requests.adapters.HTTPAdapter(max_retries=self.retry_count))
response = s.post(self.url, data=payload, timeout=self.connection_timeout, headers={'Content-Type': 'application/json'})
if response.status_code != 200:
warn(response.text)
|
class SendThread(Thread):
'''Send ALF thread'''
def __init__(self, url, alf, connection_timeout=30, retry_count=0):
pass
def stop(self):
pass
@property
def stopped(self):
pass
def run(self):
pass
| 6 | 1 | 6 | 1 | 5 | 0 | 1 | 0.09 | 1 | 3 | 0 | 0 | 4 | 6 | 4 | 29 | 29 | 5 | 22 | 15 | 16 | 2 | 21 | 13 | 16 | 2 | 1 | 2 | 5 |
143,467 |
Kong/analytics-agent-python
|
Kong_analytics-agent-python/tests/test_wsgi.py
|
tests.test_wsgi.WsgiMiddewareTest
|
class WsgiMiddewareTest(TestCase):
def setUp(self):
self.app = WsgiMiddleware(create_app(), 'SERVICE-TOKEN', 'ENVIRONMENT', 'localhost', 56000)
def tearDown(self):
pass
@property
def middleware(self):
return self._middleware
def test_get(self):
status = '200 OK' # HTTP Status
headers = [('Content-type', 'application/json')] # HTTP Headers
# Mock collector
with mock_server(56000, status, headers, 'Yo!') as collector:
client = Client(self.app)
data, status, headers = client.open()
data = ''.join(data)
self.assertIn('Hello', data)
request = collector.get()
self.assertEqual(request.get('url'), u'http://localhost:56000/1.0.0/single')
alf = ujson.loads(request.get('body'))
self.assertEqual(alf['serviceToken'], 'SERVICE-TOKEN')
self.assertEqual(alf['har']['log']['creator']['name'], 'mashape-analytics-agent-python')
self.assertEqual(alf['har']['log']['entries'][0]['request']['method'], 'GET')
self.assertEqual(alf['har']['log']['entries'][0]['request']['url'], 'http://localhost/')
self.assertEqual(alf['har']['log']['entries'][0]['response']['status'], 200)
self.assertEqual(alf['har']['log']['entries'][0]['response']['statusText'], 'OK CUSTOM')
self.assertEqual(alf['har']['log']['entries'][0]['response']['content']['mimeType'], 'text/plain')
self.assertEqual(alf['har']['log']['entries'][0]['response']['content']['size'], 11)
self.assertTrue(alf['har']['log']['entries'][0]['timings']['wait'] >= 10)
|
class WsgiMiddewareTest(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@property
def middleware(self):
pass
def test_get(self):
pass
| 6 | 0 | 8 | 1 | 7 | 1 | 1 | 0.11 | 1 | 2 | 2 | 0 | 4 | 1 | 4 | 76 | 35 | 6 | 28 | 14 | 22 | 3 | 27 | 12 | 22 | 1 | 2 | 1 | 4 |
143,468 |
Kong/analytics-agent-python
|
Kong_analytics-agent-python/tests/test_pyramid.py
|
tests.test_pyramid.PyramidMiddewareTest
|
class PyramidMiddewareTest(TestCase):
def setUp(self):
self.app = WsgiMiddleware(create_app(), 'SERVICE_TOKEN', 'ENVIRONMENT', 'localhost', 56000)
def tearDown(self):
pass
def test_get(self):
status = '200 OK' # HTTP Status
headers = [('Content-type', 'application/json')] # HTTP Headers
# Mock collector
with mock_server(56000, status, headers, 'Yo!') as collector:
client = Client(self.app)
data, status, headers = client.open()
data = (b'').join(data)
self.assertIn('Hello', str(data))
request = collector.get()
self.assertEqual(request.get('url'), u'http://localhost:56000/1.0.0/single')
alf = ujson.loads(request.get('body'))
self.assertTrue(alf['har']['log']['entries'][0]['timings']['wait'] >= 10)
|
class PyramidMiddewareTest(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_get(self):
pass
| 4 | 0 | 7 | 1 | 5 | 1 | 1 | 0.18 | 1 | 3 | 2 | 0 | 3 | 1 | 3 | 75 | 24 | 6 | 17 | 12 | 13 | 3 | 17 | 11 | 13 | 1 | 2 | 1 | 3 |
143,469 |
Kong/analytics-agent-python
|
Kong_analytics-agent-python/tests/helpers.py
|
tests.helpers.http_server
|
class http_server(object):
def __init__(self, port, handler):
self.queue = Queue()
self.server = make_server(port, handler)
def __enter__(self):
def run_app(server):
server.handle_request()
self.process = Process(target=run_app, args=(self.server,))
self.process.start()
return self.queue
def __exit__(self, exc_type, exc_val, exc_tb):
self.server.server_close()
self.server.socket.close()
self.process.terminate()
self.process.join()
|
class http_server(object):
def __init__(self, port, handler):
pass
def __enter__(self):
pass
def run_app(server):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
| 5 | 0 | 5 | 1 | 4 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 3 | 3 | 3 | 3 | 19 | 4 | 15 | 8 | 10 | 0 | 15 | 8 | 10 | 1 | 1 | 0 | 4 |
143,470 |
Kong/analytics-agent-python
|
Kong_analytics-agent-python/tests/helpers.py
|
tests.helpers.mock_server
|
class mock_server(object):
def __init__(self, port, status, headers, response):
def handler(environ, start_response):
request = Request(environ)
content_length = request.content_length
self.queue.put({'url': request.url, 'body': environ['wsgi.input'].read(content_length)})
start_response(status, headers)
return [response]
self.queue = Queue()
self.server = make_server(port, handler)
def __enter__(self):
def run_app(server):
server.handle_request()
# self.process = Process(target=run_app, args=(self.port, self.queue, self.status, self.headers, self.response))
self.process = Process(target=run_app, args=(self.server,))
self.process.start()
return self.queue
def __exit__(self, exc_type, exc_val, exc_tb):
self.server.server_close()
self.server.socket.close()
self.process.terminate()
self.process.join()
|
class mock_server(object):
def __init__(self, port, status, headers, response):
pass
def handler(environ, start_response):
pass
def __enter__(self):
pass
def run_app(server):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
| 6 | 0 | 6 | 1 | 6 | 0 | 1 | 0.05 | 1 | 0 | 0 | 0 | 3 | 3 | 3 | 3 | 27 | 5 | 21 | 11 | 15 | 1 | 21 | 11 | 15 | 1 | 1 | 0 | 5 |
143,471 |
Kong/analytics-agent-python
|
Kong_analytics-agent-python/tests/test_django.py
|
tests.test_django.DjangoMiddewareTest
|
class DjangoMiddewareTest(TestCase):
def setUp(self):
self._middleware = DjangoMiddleware()
def tearDown(self):
pass
@property
def middleware(self):
return self._middleware
def test_get(self):
status = '200 OK' # HTTP Status
headers = [('Content-type', 'application/json')] # HTTP Headers
with mock_server(56000, status, headers, 'Yo!') as collector:
req = requestFactory.get('/get?foo=bar', {'query': 'string'})
res = createResponse(200, {
'Content-Type': 'text/html; charset=UTF-8'
}, 'Test Body')
self.middleware.process_request(req)
time.sleep(0.01) # Sleep for 10 ms
response = self.middleware.process_response(req, res)
request = collector.get()
self.assertEqual(request.get('url'), u'http://localhost:56000/1.0.0/single')
alf = ujson.loads(request.get('body'))
self.assertIn('text/html', response['Content-Type'])
self.assertTrue(alf['har']['log']['entries'][0]['timings']['wait'] >= 10)
def test_post(self):
status = '200 OK' # HTTP Status
headers = [('Content-type', 'application/json')] # HTTP Headers
with mock_server(56000, status, headers, 'Yo!') as collector:
req = requestFactory.post('/post?foo=bar', {'query': 'string'})
res = createResponse(200, {
'Content-Type': 'application/json'
}, '{"foo": "bar"}')
self.middleware.process_request(req)
time.sleep(0.01) # Sleep for 10 ms
response = self.middleware.process_response(req, res)
request = collector.get()
self.assertEqual(request.get('url'), u'http://localhost:56000/1.0.0/single')
alf = ujson.loads(request.get('body'))
self.assertIn('json', response['Content-Type'])
|
class DjangoMiddewareTest(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@property
def middleware(self):
pass
def test_get(self):
pass
def test_post(self):
pass
| 7 | 0 | 9 | 1 | 7 | 1 | 1 | 0.15 | 1 | 2 | 2 | 0 | 5 | 1 | 5 | 77 | 49 | 10 | 39 | 24 | 32 | 6 | 34 | 21 | 28 | 1 | 2 | 1 | 5 |
143,472 |
Kong/analytics-agent-python
|
Kong_analytics-agent-python/tests/test_flask.py
|
tests.test_flask.FlaskMiddewareTest
|
class FlaskMiddewareTest(TestCase):
def setUp(self):
self.app = create_app()
self.app.wsgi_app = FlaskMiddleware(self.app.wsgi_app, 'SERVICE-TOKEN', 'ENVIRONMENT', 'localhost', 56000)
self.client = self.app.test_client()
def tearDown(self):
pass
@property
def middleware(self):
return self._middleware
def test_get(self):
status = '200 OK' # HTTP Status
headers = [('Content-type', 'application/json')] # HTTP Headers
# Mock collector
with mock_server(56000, status, headers, 'Yo!') as collector:
recv = self.client.get('/get?foo=bar', headers={'CONTENT_TYPE': 'text/plain', 'X-Custom': 'custom'})
self.assertIn('200 OK', recv.status)
self.assertIn('Hello', str(recv.data))
request = collector.get()
self.assertEqual(request.get('url'), u'http://localhost:56000/1.0.0/single')
alf = ujson.loads(request.get('body'))
self.assertTrue(alf['har']['log']['entries'][0]['timings']['wait'] >= 10)
def test_post(self):
status = '200 OK' # HTTP Status
headers = [('Content-type', 'application/json')] # HTTP Headers
# Mock collector
with mock_server(56000, status, headers, 'Yo!') as collector:
recv = self.client.post('/post', data='post data')
self.assertIn('200 OK', recv.status)
self.assertIn('Hello', str(recv.data))
request = collector.get()
self.assertEqual(request.get('url'), u'http://localhost:56000/1.0.0/single')
alf = ujson.loads(request.get('body'))
self.assertTrue(alf['har']['log']['entries'][0]['timings']['wait'] >= 10)
|
class FlaskMiddewareTest(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@property
def middleware(self):
pass
def test_get(self):
pass
def test_post(self):
pass
| 7 | 0 | 8 | 2 | 6 | 1 | 1 | 0.19 | 1 | 2 | 1 | 0 | 5 | 2 | 5 | 77 | 47 | 13 | 32 | 21 | 25 | 6 | 31 | 18 | 25 | 1 | 2 | 1 | 5 |
143,473 |
Kong/unirest-python
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kong_unirest-python/unirest/test/test_unirest.py
|
unirest.test.test_unirest.UnirestTestCase
|
class UnirestTestCase(unittest.TestCase):
def test_get(self):
response = unirest.get(
'http://httpbin.org/get?name=Mark', params={"nick": "thefosk"})
self.assertEqual(response.code, 200)
self.assertEqual(len(response.body['args']), 2)
self.assertEqual(response.body['args']['name'], "Mark")
self.assertEqual(response.body['args']['nick'], "thefosk")
def test_get2(self):
response = unirest.get(
'http://httpbin.org/get?name=Mark', params={"nick": "the fosk"})
self.assertEqual(response.code, 200)
self.assertEqual(len(response.body['args']), 2)
self.assertEqual(response.body['args']['name'], "Mark")
self.assertEqual(response.body['args']['nick'], "the fosk")
def test_get_unicode_param(self):
response = unirest.get(
'http://httpbin.org/get?name=Shimada', params={"nick": u"しまりん"})
self.assertEqual(response.code, 200)
self.assertEqual(len(response.body['args']), 2)
self.assertEqual(response.body['args']['name'], "Shimada")
self.assertEqual(response.body['args']['nick'], u"しまりん")
def test_get_none_param(self):
response = unirest.get('http://httpbin.org/get?name=Mark',
params={"nick": "thefosk", "age": None, "third": ""})
self.assertEqual(response.code, 200)
self.assertEqual(len(response.body['args']), 3)
self.assertEqual(response.body['args']['name'], "Mark")
self.assertEqual(response.body['args']['nick'], "thefosk")
self.assertEqual(response.body['args']['third'], "")
def test_post(self):
response = unirest.post('http://httpbin.org/post',
params={"name": "Mark", "nick": "thefosk"})
self.assertEqual(response.code, 200)
self.assertEqual(len(response.body['args']), 0)
self.assertEqual(len(response.body['form']), 2)
self.assertEqual(response.body['form']['name'], "Mark")
self.assertEqual(response.body['form']['nick'], "thefosk")
def test_post_none_param(self):
response = unirest.post(
'http://httpbin.org/post', params={"name": "Mark", "nick": "thefosk", "age": None, "third": ""})
self.assertEqual(response.code, 200)
self.assertEqual(len(response.body['args']), 0)
self.assertEqual(len(response.body['form']), 3)
self.assertEqual(response.body['form']['name'], "Mark")
self.assertEqual(response.body['form']['nick'], "thefosk")
self.assertEqual(response.body['form']['third'], "")
def test_delete(self):
response = unirest.delete(
'http://httpbin.org/delete', params={"name": "Mark", "nick": "thefosk"})
self.assertEqual(response.code, 200)
self.assertEqual(response.body['form']['name'], "Mark")
self.assertEqual(response.body['form']['nick'], "thefosk")
def test_put(self):
response = unirest.put('http://httpbin.org/put',
params={"name": "Mark", "nick": "thefosk"})
self.assertEqual(response.code, 200)
self.assertEqual(len(response.body['args']), 0)
self.assertEqual(len(response.body['form']), 2)
self.assertEqual(response.body['form']['name'], "Mark")
self.assertEqual(response.body['form']['nick'], "thefosk")
def test_patch(self):
response = unirest.patch(
'http://httpbin.org/patch', params={"name": "Mark", "nick": "thefosk"})
self.assertEqual(response.code, 200)
self.assertEqual(len(response.body['args']), 0)
self.assertEqual(len(response.body['form']), 2)
self.assertEqual(response.body['form']['name'], "Mark")
self.assertEqual(response.body['form']['nick'], "thefosk")
def test_post_entity(self):
response = unirest.post('http://httpbin.org/post', headers={
'Content-Type': 'text/plain'}, params="hello this is custom data")
self.assertEqual(response.code, 200)
self.assertEqual(response.body['data'], "hello this is custom data")
def test_gzip(self):
response = unirest.get('http://httpbin.org/gzip',
params={"name": "Mark"})
self.assertEqual(response.code, 200)
self.assertTrue(response.body['gzipped'])
def test_basicauth(self):
response = unirest.get('http://httpbin.org/get',
auth=('marco', 'password'))
self.assertEqual(response.code, 200)
self.assertEqual(
response.body['headers']['Authorization'], "Basic bWFyY286cGFzc3dvcmQ=")
def test_defaultheaders(self):
unirest.default_header('custom', 'custom header')
response = unirest.get('http://httpbin.org/get')
self.assertEqual(response.code, 200)
self.assertTrue('Custom' in response.body['headers'])
self.assertEqual(response.body['headers']['Custom'], "custom header")
# Make another request
response = unirest.get('http://httpbin.org/get')
self.assertEqual(response.code, 200)
self.assertTrue('Custom' in response.body['headers'])
self.assertTrue(response.body['headers']['Custom'], "custom header")
# Clear the default headers
unirest.clear_default_headers()
response = unirest.get('http://httpbin.org/get')
self.assertEqual(response.code, 200)
self.assertFalse('Custom' in response.body['headers'])
def test_timeout(self):
unirest.timeout(3)
response = unirest.get('http://httpbin.org/delay/1')
self.assertEqual(response.code, 200)
unirest.timeout(1)
try:
response = unirest.get('http://httpbin.org/delay/3')
self.fail("The timeout didn't work")
except:
pass
|
class UnirestTestCase(unittest.TestCase):
def test_get(self):
pass
def test_get2(self):
pass
def test_get_unicode_param(self):
pass
def test_get_none_param(self):
pass
def test_post(self):
pass
def test_post_none_param(self):
pass
def test_delete(self):
pass
def test_put(self):
pass
def test_patch(self):
pass
def test_post_entity(self):
pass
def test_gzip(self):
pass
def test_basicauth(self):
pass
def test_defaultheaders(self):
pass
def test_timeout(self):
pass
| 15 | 0 | 7 | 0 | 7 | 0 | 1 | 0.02 | 1 | 0 | 0 | 0 | 14 | 0 | 14 | 86 | 114 | 16 | 96 | 29 | 81 | 2 | 96 | 29 | 81 | 2 | 2 | 1 | 15 |
143,474 |
Kopachris/seshet
|
Kopachris_seshet/seshet/utils.py
|
seshet.utils.Storage
|
class Storage(dict):
"""A Storage object is like a dictionary except `obj.foo` can be used
in addition to `obj['foo']`, and setting obj.foo = None deletes item foo.
Example:
>>> o = Storage(a=1)
>>> print o.a
1
>>> o['a']
1
>>> o.a = 2
>>> print o['a']
2
>>> del o.a
>>> print o.a
None
"""
__slots__ = ()
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
__getitem__ = dict.get
__getattr__ = dict.get
__getnewargs__ = lambda self: getattr(dict,self).__getnewargs__(self)
__repr__ = lambda self: '<Storage %s>' % dict.__repr__(self)
__getstate__ = lambda self: None
__copy__ = lambda self: Storage(self)
def getlist(self, key):
"""Returns a Storage value as a list.
If the value is a list it will be returned as-is.
If object is None, an empty list will be returned.
Otherwise, `[value]` will be returned.
Example output for a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlist('x')
['abc']
>>> request.vars.getlist('y')
['abc', 'def']
>>> request.vars.getlist('z')
[]
"""
value = self.get(key, [])
if value is None or isinstance(value, (list, tuple)):
return value
else:
return [value]
def getfirst(self, key, default=None):
"""Returns the first value of a list or the value itself when given a
`request.vars` style key.
If the value is a list, its first item will be returned;
otherwise, the value will be returned as-is.
Example output for a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getfirst('x')
'abc'
>>> request.vars.getfirst('y')
'abc'
>>> request.vars.getfirst('z')
"""
values = self.getlist(key)
return values[0] if values else default
def getlast(self, key, default=None):
"""Returns the last value of a list or value itself when given a
`request.vars` style key.
If the value is a list, the last item will be returned;
otherwise, the value will be returned as-is.
Simulated output with a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlast('x')
'abc'
>>> request.vars.getlast('y')
'def'
>>> request.vars.getlast('z')
"""
values = self.getlist(key)
return values[-1] if values else default
|
class Storage(dict):
'''A Storage object is like a dictionary except `obj.foo` can be used
in addition to `obj['foo']`, and setting obj.foo = None deletes item foo.
Example:
>>> o = Storage(a=1)
>>> print o.a
1
>>> o['a']
1
>>> o.a = 2
>>> print o['a']
2
>>> del o.a
>>> print o.a
None
'''
def getlist(self, key):
'''Returns a Storage value as a list.
If the value is a list it will be returned as-is.
If object is None, an empty list will be returned.
Otherwise, `[value]` will be returned.
Example output for a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlist('x')
['abc']
>>> request.vars.getlist('y')
['abc', 'def']
>>> request.vars.getlist('z')
[]
'''
pass
def getfirst(self, key, default=None):
'''Returns the first value of a list or the value itself when given a
`request.vars` style key.
If the value is a list, its first item will be returned;
otherwise, the value will be returned as-is.
Example output for a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getfirst('x')
'abc'
>>> request.vars.getfirst('y')
'abc'
>>> request.vars.getfirst('z')
'''
pass
def getlast(self, key, default=None):
'''Returns the last value of a list or value itself when given a
`request.vars` style key.
If the value is a list, the last item will be returned;
otherwise, the value will be returned as-is.
Simulated output with a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlast('x')
'abc'
>>> request.vars.getlast('y')
'def'
>>> request.vars.getlast('z')
'''
pass
| 4 | 4 | 24 | 5 | 4 | 15 | 2 | 2.77 | 1 | 2 | 0 | 0 | 3 | 0 | 3 | 30 | 108 | 25 | 22 | 16 | 18 | 61 | 21 | 16 | 17 | 2 | 2 | 1 | 6 |
143,475 |
Kopachris/seshet
|
Kopachris_seshet/seshet/utils.py
|
seshet.utils.KVStore
|
class KVStore:
"""Create a key/value store in the bot's database for each
command module to use for persistent storage. Can be accessed
either like a class:
>>> store = KVStore(db)
>>> store.foo = 'bar'
>>> store.foo
'bar'
Or like a dict:
>>> store['spam'] = 'eggs'
>>> store['spam']
'eggs'
The KVStore object uses `inspect` to determine which module
the object is being accessed from and will automatically create
a database table as needed or determine which one to use if it
already exists, so that each module the object is used from has
its own namespace.
KVStore has most of the same interfaces as an ordinary `dict`, but
is not a subclass of `dict` or `collections.UserDict` because
so many functions had to be completely rewritten to work with
KVStore's database model.
"""
def __init__(self, db):
# make sure some tables are defined:
if 'namespaces' not in db:
# list of registered modules
db.define_table('namespaces', Field('name'))
for m in db().select(db.namespaces.ALL):
# these are modules' own "namespaces"
tbl_name = 'kv_' + m.name
if tbl_name not in db:
db.define_table(tbl_name,
Field('k', 'string', unique=True),
Field('v', 'text'),
)
self._db = db # pydal DAL instance
# It's recommended to use a separate database
# for the bot and for the KV store to avoid
# accidental or malicious name collisions
#
# (Then why doesn't the default implimentation?)
def __getattr__(self, k):
if k.startswith('_'):
return self.__dict__[k]
db = self._db
tbl = self._get_calling_module()
tbl_name = 'kv_' + tbl if tbl is not None else None
if tbl is None or tbl_name not in db:
# table doesn't exist
return None
r = db(db[tbl_name].k == k)
if r.isempty():
# no db entry for this key
return None
r = r.select().first()
# db should return string, pickle expects bytes
return pickle.loads(r.v.encode(errors='ignore'))
def __setattr__(self, k, v):
if k.startswith('_'):
self.__dict__[k] = v
return
elif k in self.__dict__:
# instance attributes should be read-only-ish
raise AttributeError("Name already in use: %s" % k)
db = self._db
if v is not None:
v = pickle.dumps(v).decode(errors='ignore')
tbl = self._get_calling_module()
tbl_name = 'kv_' + tbl if tbl is not None else None
if tbl is None or tbl_name not in db:
if v is not None:
# module not registered, need to create
# a new table
self._register_module(tbl)
db[tbl_name].insert(k=k, v=repr(v))
else:
# no need to delete a non-existent key
return None
else:
if v is not None:
db[tbl_name].update_or_insert(db[tbl_name].k == k, k=k, v=v)
else:
db(db[tbl_name].k == k).delete()
db.commit()
self._db = db
def __delattr__(self, k):
self.__setattr__(k, None)
def __getitem__(self, k):
return self.__getattr__(k)
def __setitem__(self, k, v):
self.__setattr__(k, v)
def __delitem__(self, k):
self.__setattr__(k, None)
def _register_module(self, name):
db = self._db
tbl_name = 'kv_' + name
if db(db.namespaces.name == name).isempty():
db.namespaces.insert(name=name)
db.commit()
if tbl_name not in db:
db.define_table(tbl_name,
Field('k', 'string', unique=True),
Field('v', 'text'),
)
self._db = db
def _get_calling_module(self):
# in theory, bot modules will be registered with register_module
# when they're uploaded and installed
curfrm = inspect.currentframe()
for f in inspect.getouterframes(curfrm)[1:]:
if self.__module__.split('.')[-1] not in f[1]:
calling_file = f[1]
break
caller_mod = inspect.getmodulename(calling_file)
db = self._db
mod = db(db.namespaces.name == caller_mod)
if mod.isempty():
return None
else:
return caller_mod
def keys(self):
db = self._db
tbl = self._get_calling_module()
tbl_name = 'kv_' + tbl if tbl is not None else None
if tbl is None or tbl_name not in db:
return []
all_items = db().select(db[tbl_name].ALL)
all_keys = [r.k for r in all_items]
return all_keys
def values(self):
all_keys = self.keys()
all_vals = list()
for k in all_keys:
all_vals.append(self[k])
return all_vals
def update(self, other):
for k, v in other.items():
self[k] = v
return None
def items(self):
return zip(self.keys(), self.values())
def iterkeys(self):
return iter(self.keys())
def itervalues(self):
return iter(self.values())
def iteritems(self):
return iter(self.items())
def __iter__(self):
return iter(self.keys())
def __contains__(self, k):
if self[k] is not None:
return True
else:
return False
def __copy__(self):
"""Return a dict representing the current table"""
d = dict()
d.update(self.items())
return d
def copy(self):
"""Return a dict representing the current table"""
return self.__copy__()
def pop(self, k):
v = self[k]
self[k] = None
return v
def popitem(self):
"""Unlike `dict.popitem()`, this is actually random"""
all_items = self.items()
removed_item = random.choice(all_items)
self[removed_item[0]] = None
return removed_item
def setdefault(self, k, v=None):
existing_v = self[k]
if existing_v is None:
self[k] = v
return v
return existing_v
def has_key(self, k):
return k in self
def get(self, k, v=None):
existing_v = self[k]
if existing_v is None:
return v
else:
return existing_v
def clear(self):
for k in self.keys():
self[k] = None
|
class KVStore:
'''Create a key/value store in the bot's database for each
command module to use for persistent storage. Can be accessed
either like a class:
>>> store = KVStore(db)
>>> store.foo = 'bar'
>>> store.foo
'bar'
Or like a dict:
>>> store['spam'] = 'eggs'
>>> store['spam']
'eggs'
The KVStore object uses `inspect` to determine which module
the object is being accessed from and will automatically create
a database table as needed or determine which one to use if it
already exists, so that each module the object is used from has
its own namespace.
KVStore has most of the same interfaces as an ordinary `dict`, but
is not a subclass of `dict` or `collections.UserDict` because
so many functions had to be completely rewritten to work with
KVStore's database model.
'''
def __init__(self, db):
pass
def __getattr__(self, k):
pass
def __setattr__(self, k, v):
pass
def __delattr__(self, k):
pass
def __getitem__(self, k):
pass
def __setitem__(self, k, v):
pass
def __delitem__(self, k):
pass
def _register_module(self, name):
pass
def _get_calling_module(self):
pass
def keys(self):
pass
def values(self):
pass
def update(self, other):
pass
def items(self):
pass
def iterkeys(self):
pass
def itervalues(self):
pass
def iteritems(self):
pass
def __iter__(self):
pass
def __contains__(self, k):
pass
def __copy__(self):
'''Return a dict representing the current table'''
pass
def copy(self):
'''Return a dict representing the current table'''
pass
def pop(self, k):
pass
def popitem(self):
'''Unlike `dict.popitem()`, this is actually random'''
pass
def setdefault(self, k, v=None):
pass
def has_key(self, k):
pass
def get(self, k, v=None):
pass
def clear(self):
pass
| 27 | 4 | 7 | 1 | 6 | 1 | 2 | 0.28 | 0 | 4 | 0 | 0 | 26 | 1 | 26 | 26 | 237 | 48 | 148 | 61 | 121 | 42 | 135 | 61 | 108 | 8 | 0 | 2 | 53 |
143,476 |
Kopachris/seshet
|
Kopachris_seshet/seshet/utils.py
|
seshet.utils.IRCstr
|
class IRCstr(UserString):
"""Implement str, overriding case-changing methods to only handle ASCII
cases plus "{}|^" and "[]\~" as defined by RFC 2812.
Hashing and equality testing is case insensitive! That is, __hash__ will
return the hash of the lowercase version of the string, and __eq__ will
convert both operands to lowercase before testing equality.
"""
def casefold(self):
return self.lower()
def lower(self):
return self.data.translate(upper_to_lower)
def upper(self):
return self.data.translate(lower_to_upper)
def islower(self):
return self.data == self.lower()
def isupper(self):
return self.data == self.upper()
def __hash__(self):
return hash(self.lower())
def __eq__(self, other):
if isinstance(other, IRCstr):
return self.lower() == other.lower()
elif isinstance(other, str):
# Use our custom lowercasing for IRC on other
return self.lower() == other.translate(upper_to_lower)
else:
return False
|
class IRCstr(UserString):
'''Implement str, overriding case-changing methods to only handle ASCII
cases plus "{}|^" and "[]\~" as defined by RFC 2812.
Hashing and equality testing is case insensitive! That is, __hash__ will
return the hash of the lowercase version of the string, and __eq__ will
convert both operands to lowercase before testing equality.
'''
def casefold(self):
pass
def lower(self):
pass
def upper(self):
pass
def islower(self):
pass
def isupper(self):
pass
def __hash__(self):
pass
def __eq__(self, other):
pass
| 8 | 1 | 3 | 0 | 3 | 0 | 1 | 0.35 | 1 | 1 | 0 | 0 | 7 | 0 | 7 | 109 | 35 | 8 | 20 | 8 | 12 | 7 | 18 | 8 | 10 | 3 | 7 | 1 | 9 |
143,477 |
Kopachris/seshet
|
Kopachris_seshet/seshet/bot.py
|
seshet.bot.SeshetUser
|
class SeshetUser(object):
"""Represent one IRC user."""
def __init__(self, nick, user, host):
logging.debug("Building new SeshetUser, %s", nick)
self.nick = IRCstr(nick)
self.user = user
self.host = host
self.channels = []
def join(self, channel):
"""Add this user to the channel's user list and add the channel to this
user's list of joined channels.
"""
if channel not in self.channels:
channel.users.add(self.nick)
self.channels.append(channel)
def part(self, channel):
"""Remove this user from the channel's user list and remove the channel
from this user's list of joined channels.
"""
if channel in self.channels:
channel.users.remove(self.nick)
self.channels.remove(channel)
def quit(self):
"""Remove this user from all channels and reinitialize the user's list
of joined channels.
"""
for c in self.channels:
c.users.remove(self.nick)
self.channels = []
def change_nick(self, nick):
"""Update this user's nick in all joined channels."""
old_nick = self.nick
self.nick = IRCstr(nick)
for c in self.channels:
c.users.remove(old_nick)
c.users.add(self.nick)
def __str__(self):
return "{}!{}@{}".format(self.nick, self.user, self.host)
def __repr__(self):
temp = "<SeshetUser {}!{}@{} in channels {}>"
return temp.format(self.nick, self.user, self.host, self.channels)
|
class SeshetUser(object):
'''Represent one IRC user.'''
def __init__(self, nick, user, host):
pass
def join(self, channel):
'''Add this user to the channel's user list and add the channel to this
user's list of joined channels.
'''
pass
def part(self, channel):
'''Remove this user from the channel's user list and remove the channel
from this user's list of joined channels.
'''
pass
def quit(self):
'''Remove this user from all channels and reinitialize the user's list
of joined channels.
'''
pass
def change_nick(self, nick):
'''Update this user's nick in all joined channels.'''
pass
def __str__(self):
pass
def __repr__(self):
pass
| 8 | 5 | 6 | 1 | 4 | 1 | 2 | 0.37 | 1 | 1 | 1 | 0 | 7 | 4 | 7 | 7 | 53 | 12 | 30 | 16 | 22 | 11 | 30 | 16 | 22 | 2 | 1 | 1 | 11 |
143,478 |
Kopachris/seshet
|
Kopachris_seshet/seshet/bot.py
|
seshet.bot.SeshetChannel
|
class SeshetChannel(object):
"""Represent one IRC channel."""
def __init__(self, name, users, log_size=100):
self.name = IRCstr(name)
self.users = users
self.message_log = []
self._log_size = log_size
def log_message(self, user, message):
"""Log a channel message.
This log acts as a sort of cache so that recent activity can be searched
by the bot and command modules without querying the database.
"""
if isinstance(user, SeshetUser):
user = user.nick
elif not isinstance(user, IRCstr):
user = IRCstr(user)
time = datetime.utcnow()
self.message_log.append((time, user, message))
while len(self.message_log) > self._log_size:
del self.message_log[0]
def __str__(self):
return str(self.name)
def __repr__(self):
temp = "<SeshetChannel {} with {} users>"
return temp.format(self.name, len(self.users))
|
class SeshetChannel(object):
'''Represent one IRC channel.'''
def __init__(self, name, users, log_size=100):
pass
def log_message(self, user, message):
'''Log a channel message.
This log acts as a sort of cache so that recent activity can be searched
by the bot and command modules without querying the database.
'''
pass
def __str__(self):
pass
def __repr__(self):
pass
| 5 | 2 | 7 | 1 | 5 | 1 | 2 | 0.25 | 1 | 4 | 2 | 0 | 4 | 4 | 4 | 4 | 34 | 9 | 20 | 11 | 15 | 5 | 19 | 11 | 14 | 4 | 1 | 1 | 7 |
143,479 |
Kopachris/seshet
|
Kopachris_seshet/seshet/bot.py
|
seshet.bot.SeshetBot
|
class SeshetBot(bot.SimpleBot):
"""Extend `ircutils3.bot.SimpleBot`.
Each instance represents one bot, connected to one IRC network.
Each instance should have its own database, but can make use of
any shared command modules. The modules may have to be added to
the bot's database if the bot wasn't created using the
`seshet --config` or `seshet --new` commands.
"""
def __init__(self, nick='Seshet', db=None, debug_file=None, verbosity=99):
"""Extend `ircutils3.bot.SimpleBot.__init__()`.
Keyword argument `db` is required for running commands other
than core commands and should be an instance of pydal.DAL.
"""
# initialize debug logging
if debug_file is None:
logging.basicConfig(level=verbosity)
else:
logging.basicConfig(filename=os.path.expanduser(debug_file),
level=verbosity
)
logging.debug("Running `SimpleBot.__init__`...")
bot.SimpleBot.__init__(self, nick, auto_handle=False)
# define defaults
self.session = Storage()
self.log_file = 'seshet.log'
self.log_formats = {}
self.locale = {}
self.channels = {}
self.users = {}
if db is None:
# no database connection, only log to file and run
# core command modules
logging.info("No db, IRC logging will be done to file")
self.log = self._log_to_file
self.run_modules = self._run_only_core
# dummy KV store since no db
self.storage = Storage()
else:
logging.info("Using database %s", db)
self.db = db
self.storage = KVStore(db)
# Add default handlers
logging.debug("Adding default handlers...")
self.events["any"].add_handler(client._update_client_info)
self.events["ctcp_version"].add_handler(client._reply_to_ctcp_version)
self.events["name_reply"].add_handler(_add_channel_names)
def log(self, etype, source, msg='', target='', hostmask='', params=''):
"""Log an event in the database.
Required:
`etype` - event type. One of 'PRIVMSG', 'QUIT', 'PART', 'ACTION',
'NICK', 'JOIN', 'MODE', 'KICK', 'CTCP', or 'ERROR'. Enforced
by database model.
`source` - source of the event. Usually a user. For NICK events,
the user's original nickname. For ERROR events, this should be
the exception name, the module name, and the line number. The
full traceback will be logged in `msg`.
Optional:
`msg` - a message associated with the event.
`target` - the target the message was directed to. For MODE and KICK
events, this will be the user the event was performed on. For
NICK events, this will be channel the event was seen in (an event
will be created for each channel the user is seen by the bot in).
`hostmask` - a hostmask associated with the event.
`parms` - any additional parameters associated with the event, such as
a new nickname (for NICK events), mode switches (for MODE events),
or a dump of local variables (for ERROR events).
"""
self.db.event_log.insert(event_type=etype,
event_time=datetime.utcnow(),
source=source,
target=target,
message=msg,
host=hostmask,
params=params,
)
self.db.commit()
def run_modules(self, e):
# grab local pointer to self.db for faster lookup
db = self.db
# get initial list of modules handling this event type
event_types = db.modules.event_types
mod_enabled = db.modules.enabled
init_mods = db(event_types.contains(e.command) & mod_enabled).select()
logging.debug(("Running modules for {} command. "
"Initial module list:\n{}").format(e.command, init_mods)
)
if e.command in ('PRIVMSG', 'CTCP_ACTION', 'NOTICE'):
# narrow down list of modules to run based on event parameters
# lowercase for non-caps comparisons
m_low = e.message.lower()
bot_n = self.nickname.lower()
bot_u = self.user.lower()
bot_r = self.real_name.lower()
# indicates whether or not name has already been stripped from
# original message
for_us = False
if e.target.startswith('#'):
chan_msg = True
chan_nicks = self.channels[e.target].users
else:
chan_msg = False
fin_mods = list() # final list of modules to run
for mod in init_mods:
if e.source in mod.whitelist:
fin_mods.append(mod)
elif e.source in mod.blacklist:
pass
if self.nickname in mod.enicks:
if e.target == self.nickname or for_us:
fin_mods.append(mod)
elif m_low.startswith(bot_n):
# strip nickname from original message so modules can
# process it correctly
e.message = e.message[len(bot_n):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
elif m_low.startswith(bot_u):
e.message = e.message[len(bot_u):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
elif m_low.startswith(bot_r):
e.message = e.message[len(bot_r):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
if chan_msg:
if e.target in mod.dchannels:
pass
elif set(mod.dnicks) & chan_nicks:
pass
elif e.target in mod.echannels:
fin_mods.append(mod)
elif set(mod.enicks) & chan_nicks:
fin_mods.append(mod)
argv = m_low.split()
for mod in fin_mods:
# run each module
m = __import__(mod.name) # TODO: use importlib
# TODO: add authentication and rate limiting
for cmd, fun in m.commands.items():
if (mod.cmd_prefix + cmd) == argv[0]:
fun(self, e)
break
def get_unique_users(self, chan):
"""Get the set of users that are unique to the given channel (i.e. not
present in any other channel the bot is in).
"""
chan = IRCstr(chan)
these_users = self.channels[chan].users
other_users = set()
for c in self.channels.values():
if c.name != chan:
other_users |= c.users
return these_users - other_users
def on_message(self, e):
self.log('privmsg',
source=e.source,
msg=e.message,
target=e.target,
)
if e.target in self.channels:
# TODO: move this to self.log() so we don't have to get time twice?
self.channels[e.target].log_message(e.source, e.message)
self.run_modules(e)
def on_join(self, e):
self.log('join',
source=e.source,
target=e.target,
hostmask=e.user+'@'+e.host,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
if e.source != self.nickname:
if nick not in self.users:
self.users[nick] = SeshetUser(nick, e.user, e.host)
self.users[nick].join(self.channels[chan])
self.run_modules(e)
def on_part(self, e):
self.log('part',
source=e.source,
hostmask=e.user+'@'+e.host,
msg=' '.join(e.params[1:]),
target=e.target,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
channel = self.channels[chan]
user = self.users[nick]
user.part(channel)
if nick == self.nickname:
# bot parted, remove that channel from all users and
# remove any users with empty channel lists
for u in self.users.values():
if channel in u.channels:
u.channels.remove(channel)
if len(u.channels) == 0:
del self.users[u.nick]
def on_quit(self, e):
nick = IRCstr(e.source)
for chan in self.channels.values():
if nick in chan.users:
self.log('quit',
source=e.source,
hostmask=e.user+'@'+e.host,
msg=' '.join(e.params),
target=chan.name,
)
self.users[nick].quit()
del self.users[nick]
def on_disconnect(self, e):
pass
def on_kick(self, e):
self.log('kick',
source=e.source,
target=e.target,
params=e.params[0],
msg=' '.join(e.params[1:]),
hostmask=e.user+'@'+e.host,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
channel = self.channels[chan]
user = self.users[nick]
user.part(channel)
if nick == self.nickname:
# bot parted, remove that channel from all users and
# remove any users with empty channel lists
for u in self.users.values():
if channel in u.channels:
u.channels.remove(channel)
if len(u.channels) == 0:
del self.users[u.nick]
def on_nick_change(self, e):
new_nick = IRCstr(e.target)
old_nick = IRCstr(e.source)
for chan in self.channels.values():
if e.source in chan.user_list:
self.log('nick',
source=e.source,
hostmask=e.user+'@'+e.host,
target=chan.name,
params=e.target,
)
self.users[old_nick].change_nick(new_nick)
self.users[new_nick] = self.users[old_nick]
del self.users[old_nick]
def on_ctcp_action(self, e):
self.log('action',
source=e.source,
target=e.target,
msg=' '.join(e.params),
)
def on_welcome(self, e):
pass
def on_mode(self, e):
self.log('mode',
source=e.source,
msg=' '.join(e.params),
target=e.target,
)
def before_poll(self):
"""Called each loop before polling sockets for I/O."""
pass
def after_poll(self):
"""Called each loop after polling sockets for I/O and
handling any queued events.
"""
pass
def connect(self, *args, **kwargs):
"""Extend `client.SimpleClient.connect()` with defaults"""
defaults = {}
for i, k in enumerate(('host', 'port', 'channel', 'use_ssl', 'password')):
if i < len(args):
defaults[k] = args[i]
elif k in kwargs:
defaults[k] = kwargs[k]
else:
def_k = 'default_' + k
defaults[k] = getattr(self, def_k, None)
if defaults['use_ssl'] is None:
defaults['use_ssl'] = False
if defaults['host'] is None:
raise TypeError("missing 1 required positional argument: 'host'")
logging.info("Connecting to %s:%s and joining channels %s",
defaults['host'],
defaults['port'],
defaults['channel'],
)
client.SimpleClient.connect(self, **defaults)
def start(self):
logging.debug("Beginning poll loop")
self._loop(self.conn._map)
def _log_to_file(self, etype, source, msg='', target='', hostmask='', params=''):
"""Override `log()` if bot is not initialized with a database
connection. Do not call this method directly.
"""
today = datetime.utcnow()
# TODO: Use self.locale['timezone'] for changing time
date = today.strftime(self.locale['date_fmt'])
time = today.strftime(self.locale['time_fmt'])
datetime_s = today.strftime(self.locale['short_datetime_fmt'])
datetime_l = today.strftime(self.locale['long_datetime_fmt'])
if target == self.nickname and etype in ('privmsg', 'action'):
target = source
if etype in self.log_formats:
file_path = os.path.expanduser(self.log_file.format(**locals()))
file_dir = os.path.dirname(file_path)
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
line = self.log_formats[etype].format(**locals())
with open(file_path, 'a') as log:
log.write(line+'\n')
# else do nothing
def _run_only_core(self, *args, **kwargs):
"""Override `_run_commands()` if bot is not initialized with a
database connection. Do not call this method directly.
Rather than getting a list of enabled modules from the database,
Seshet will only run the commands defined by `core` in this package.
The bot will only run commands given in private message ("query")
by either an authenticated user defined in the instance's config file,
or by any user with the same hostmask if authentication isn't set up
in the instance's config file.
The `core` command module from this package can only be overridden if
the bot is initialized with a database connection and a new `core`
module is entered into the database.
"""
pass
def _loop(self, map):
"""The main loop. Poll sockets for I/O and run any other functions
that need to be run every loop.
"""
try:
from asyncore import poll
except ImportError:
raise Exception("Couldn't find poll function. Cannot start bot.")
while map:
self.before_poll()
poll(timeout=30.0, map=map)
self.after_poll()
|
class SeshetBot(bot.SimpleBot):
'''Extend `ircutils3.bot.SimpleBot`.
Each instance represents one bot, connected to one IRC network.
Each instance should have its own database, but can make use of
any shared command modules. The modules may have to be added to
the bot's database if the bot wasn't created using the
`seshet --config` or `seshet --new` commands.
'''
def __init__(self, nick='Seshet', db=None, debug_file=None, verbosity=99):
'''Extend `ircutils3.bot.SimpleBot.__init__()`.
Keyword argument `db` is required for running commands other
than core commands and should be an instance of pydal.DAL.
'''
pass
def log(self, etype, source, msg='', target='', hostmask='', params=''):
'''Log an event in the database.
Required:
`etype` - event type. One of 'PRIVMSG', 'QUIT', 'PART', 'ACTION',
'NICK', 'JOIN', 'MODE', 'KICK', 'CTCP', or 'ERROR'. Enforced
by database model.
`source` - source of the event. Usually a user. For NICK events,
the user's original nickname. For ERROR events, this should be
the exception name, the module name, and the line number. The
full traceback will be logged in `msg`.
Optional:
`msg` - a message associated with the event.
`target` - the target the message was directed to. For MODE and KICK
events, this will be the user the event was performed on. For
NICK events, this will be channel the event was seen in (an event
will be created for each channel the user is seen by the bot in).
`hostmask` - a hostmask associated with the event.
`parms` - any additional parameters associated with the event, such as
a new nickname (for NICK events), mode switches (for MODE events),
or a dump of local variables (for ERROR events).
'''
pass
def run_modules(self, e):
pass
def get_unique_users(self, chan):
'''Get the set of users that are unique to the given channel (i.e. not
present in any other channel the bot is in).
'''
pass
def on_message(self, e):
pass
def on_join(self, e):
pass
def on_part(self, e):
pass
def on_quit(self, e):
pass
def on_disconnect(self, e):
pass
def on_kick(self, e):
pass
def on_nick_change(self, e):
pass
def on_ctcp_action(self, e):
pass
def on_welcome(self, e):
pass
def on_mode(self, e):
pass
def before_poll(self):
'''Called each loop before polling sockets for I/O.'''
pass
def after_poll(self):
'''Called each loop after polling sockets for I/O and
handling any queued events.
'''
pass
def connect(self, *args, **kwargs):
'''Extend `client.SimpleClient.connect()` with defaults'''
pass
def start(self):
pass
def _log_to_file(self, etype, source, msg='', target='', hostmask='', params=''):
'''Override `log()` if bot is not initialized with a database
connection. Do not call this method directly.
'''
pass
def _run_only_core(self, *args, **kwargs):
'''Override `_run_commands()` if bot is not initialized with a
database connection. Do not call this method directly.
Rather than getting a list of enabled modules from the database,
Seshet will only run the commands defined by `core` in this package.
The bot will only run commands given in private message ("query")
by either an authenticated user defined in the instance's config file,
or by any user with the same hostmask if authentication isn't set up
in the instance's config file.
The `core` command module from this package can only be overridden if
the bot is initialized with a database connection and a new `core`
module is entered into the database.
'''
pass
def _loop(self, map):
'''The main loop. Poll sockets for I/O and run any other functions
that need to be run every loop.
'''
pass
| 22 | 10 | 18 | 2 | 12 | 4 | 3 | 0.32 | 1 | 11 | 4 | 0 | 21 | 9 | 21 | 21 | 411 | 73 | 258 | 80 | 235 | 83 | 194 | 78 | 171 | 19 | 1 | 4 | 68 |
143,480 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/fragments/hpc/SGE.py
|
klab.bio.fragments.hpc.SGE.FragmentsJob
|
class FragmentsJob(object):
# The command line used to call the fragment generating script. Add -noporter into cmd below to skip running Porter
cmd = 'make_fragments_QB3_cluster.pl -verbose -id %(pdbid)s%(chain)s %(no_homologs)s %(fasta)s'
def __init__(self, make_fragments_perl_script, options, test_mode = False):
# Check the options
required_options = set(['queue', 'jobname', 'outpath', 'runtime', 'memfree', 'scratch', 'job_inputs', 'no_homologs', 'frag_sizes', 'n_frags', 'n_candidates', 'no_zip'])
assert(sorted(set(options.keys()).intersection(required_options)) == sorted(required_options))
self.make_fragments_perl_script = make_fragments_perl_script
self.options = options
self.test_mode = test_mode
self._set_script_header_parameters()
self._create_script()
def _set_script_header_parameters(self):
options = self.options
self.queues = self.options['queue'] or ['long.q']
if 'short.q' in self.queues:
self.test_mode = True
self.jobname = options['jobname']
self.job_directory = options['outpath']
if self.test_mode:
self.runtime_string = '0:29:00'
else:
self.runtime_string = '%d:00:00' % int(self.options['runtime'])
if self.test_mode:
self.memory_in_GB = 2
self.scratch_space_in_GB = 1
else:
self.memory_in_GB = int(self.options['memfree'])
self.scratch_space_in_GB = int(self.options['scratch'])
self.num_tasks = len(options['job_inputs'])
def _create_script(self):
options = self.options
# Create the data arrays
job_inputs = options['job_inputs']
job_data_arrays = []
job_data_arrays.append('chains = %s' % str([ji.chain for ji in job_inputs]))
job_data_arrays.append('pdb_ids = %s' % str([ji.pdb_id for ji in job_inputs]))
job_data_arrays.append('fasta_files = %s' % str([ji.fasta_file for ji in job_inputs]))
self.job_data_arrays = '\n'.join(job_data_arrays)
# Create the setup commands
self.job_setup_commands = '''
chain = chains[array_idx]
pdb_id = pdb_ids[array_idx]
fasta_file = fasta_files[array_idx]
task_root_dir = os.path.split(fasta_file)[0]
job_root_dir = os.path.split(task_root_dir)[0]
print_tag('job_root_dir', job_root_dir)
sys.path.insert(0, job_root_dir)
from post_processing import post_process
# Copy resources
shutil.copy(fasta_file, scratch_path)
'''
# Create the main task execution commands
make_fragments_perl_script = self.make_fragments_perl_script
no_homologs = options['no_homologs']
frag_sizes = '-frag_sizes %s' % ','.join(map(str, options['frag_sizes']))
n_frags = '-n_frags %d' % options['n_frags']
n_candidates = '-n_candidates %d' % options['n_candidates']
zip_files = str(not(options['no_zip']))
#has_segment_mapping = options['has_segment_mapping']
self.job_execution_commands = '''
cmd_args = [c for c in ['%(make_fragments_perl_script)s', '-verbose', '-id', pdb_id + chain, '%(no_homologs)s', '%(frag_sizes)s', '%(n_frags)s', '%(n_candidates)s', fasta_file] if c]
print_tag('cmd', ' '.join(cmd_args))
subp = Popen(scratch_path, cmd_args)
sys.stdout.write(subp.stdout)
if %(zip_files)s:
print("<gzip>")
for f in glob.glob(os.path.join(scratch_path, "*mers")) + [os.path.join(scratch_path, 'ss_blast')]:
if os.path.exists(f):
subpzip = Popen(scratch_path, ['gzip', f])
print(f)
print("</gzip>")
os.remove(fasta_file)
''' % locals()
self.job_post_processing_commands = '''
# Run post-processing script
task_dirname = os.path.split(task_root_dir)[1]
post_process(task_dirname)
''' % locals()
self.script = sge_interface.create_script(self.jobname, self.job_directory,
job_data_arrays = self.job_data_arrays, job_setup_commands = self.job_setup_commands, job_execution_commands = self.job_execution_commands, job_post_processing_commands = self.job_post_processing_commands,
architecture = 'linux-x64', num_tasks = self.num_tasks, memory_in_GB = self.memory_in_GB, scratch_space_in_GB = self.scratch_space_in_GB,
runtime_string = self.runtime_string, queues = self.queues)
|
class FragmentsJob(object):
def __init__(self, make_fragments_perl_script, options, test_mode = False):
pass
def _set_script_header_parameters(self):
pass
def _create_script(self):
pass
| 4 | 0 | 32 | 5 | 24 | 2 | 2 | 0.11 | 1 | 4 | 0 | 0 | 3 | 15 | 3 | 3 | 104 | 21 | 75 | 31 | 71 | 8 | 44 | 31 | 40 | 4 | 1 | 1 | 6 |
143,481 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/clustalo.py
|
klab.bio.clustalo.NoPDBUniParcMappingExists
|
class NoPDBUniParcMappingExists(Exception): pass
|
class NoPDBUniParcMappingExists(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,482 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/clustalo.py
|
klab.bio.clustalo.MultipleAlignmentException
|
class MultipleAlignmentException(Exception):
'''This exception gets thrown when there are more alignments found than expected.'''
def __init__(self, chain_id, max_expected_matches_per_chain, num_actual_matches, match_list, msg = ''):
if msg: msg = '\n' + msg
super(MultipleAlignmentException, self).__init__("Each chain was expected to match at most %d other sequences but chain %s matched %d chains: %s.%s" % (max_expected_matches_per_chain, chain_id, num_actual_matches, ", ".join(match_list), msg))
|
class MultipleAlignmentException(Exception):
'''This exception gets thrown when there are more alignments found than expected.'''
def __init__(self, chain_id, max_expected_matches_per_chain, num_actual_matches, match_list, msg = ''):
pass
| 2 | 1 | 3 | 0 | 3 | 0 | 2 | 0.25 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 11 | 5 | 0 | 4 | 2 | 2 | 1 | 5 | 2 | 3 | 2 | 3 | 1 | 2 |
143,483 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/clustalo.py
|
klab.bio.clustalo.MalformedSequenceException
|
class MalformedSequenceException(Exception): pass
|
class MalformedSequenceException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,484 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/pdb.py
|
klab.bio.pdb.PDBMissingMainchainAtomsException
|
class PDBMissingMainchainAtomsException(Exception): pass
|
class PDBMissingMainchainAtomsException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,485 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/clustalo.py
|
klab.bio.clustalo.PDBChainSequenceAligner
|
class PDBChainSequenceAligner(object):
'''This is a useful utility class which allows you to quickly figure out when sequences are identical on their overlap or what the mutations are.
I used this in the DDG project to investigate PDB files to determine overlap between the binding affinity datasets.
Example usage:
pcsa = PDBChainSequenceAligner(initial_chains = [('2GOO', 'A'), ('2GOO', 'D'), ('2H62', 'A'), ('2H62', 'B')], cache_dir = '/tmp')
output, best_matches = pcsa.align()
colortext.warning(pprint.pformat(best_matches))
'''
def __init__(self, initial_chains = [], cache_dir = None):
'''initial_chains should be a list of (pdb_id, chain_id) tuples/lists.'''
self.cache_dir = cache_dir
self.pdb_chains = []
for ic in initial_chains:
self.add(ic[0], ic[1])
def add(self, pdb_id, chain_id, sequence = None):
assert(len(chain_id) == 1)
if len(pdb_id) == 4 and not sequence:
# RCSB files
f = FASTA.retrieve(pdb_id, cache_dir = self.cache_dir)
#print(f[pdb_id][chain_id])
sequence = f[pdb_id][chain_id]
self.pdb_chains.append(dict(
pdb_id = pdb_id,
chain_id = chain_id,
sequence = sequence,
))
def align(self, alignment_tool = 'clustalw', gap_opening_penalty = 0.2, ignore_bad_chains = False):
'''If ignore_bad_chains is True then any chains containing all Xs as the sequence will be silently skipped.
The default behavior is to raise a MalformedSequenceException in this case.'''
if len(self.pdb_chains) > 1:
sa = SequenceAligner(alignment_tool = alignment_tool, gap_opening_penalty = gap_opening_penalty)
for pdb_chain in self.pdb_chains:
sa.add_sequence('%s_%s' % (pdb_chain['pdb_id'], pdb_chain['chain_id']), pdb_chain['sequence'], ignore_bad_chains = ignore_bad_chains)
best_matches = sa.align()
return sa.alignment_output, best_matches
else:
raise Exception('Cannot align sequences - less than two chains were specified.')
|
class PDBChainSequenceAligner(object):
'''This is a useful utility class which allows you to quickly figure out when sequences are identical on their overlap or what the mutations are.
I used this in the DDG project to investigate PDB files to determine overlap between the binding affinity datasets.
Example usage:
pcsa = PDBChainSequenceAligner(initial_chains = [('2GOO', 'A'), ('2GOO', 'D'), ('2H62', 'A'), ('2H62', 'B')], cache_dir = '/tmp')
output, best_matches = pcsa.align()
colortext.warning(pprint.pformat(best_matches))
'''
def __init__(self, initial_chains = [], cache_dir = None):
'''initial_chains should be a list of (pdb_id, chain_id) tuples/lists.'''
pass
def add(self, pdb_id, chain_id, sequence = None):
pass
def align(self, alignment_tool = 'clustalw', gap_opening_penalty = 0.2, ignore_bad_chains = False):
'''If ignore_bad_chains is True then any chains containing all Xs as the sequence will be silently skipped.
The default behavior is to raise a MalformedSequenceException in this case.'''
pass
| 4 | 3 | 10 | 0 | 8 | 2 | 2 | 0.48 | 1 | 4 | 2 | 0 | 3 | 2 | 3 | 3 | 45 | 8 | 25 | 11 | 21 | 12 | 20 | 11 | 16 | 3 | 1 | 2 | 7 |
143,486 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/fragments/utils.py
|
klab.bio.fragments.utils.LogFile.LogFileException
|
class LogFileException(Exception): pass
|
class LogFileException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,487 |
Kortemme-Lab/klab
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kortemme-Lab_klab/klab/scripting.py
|
klab.scripting.catch_and_print_errors
|
class catch_and_print_errors:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_type == KeyboardInterrupt:
print()
return True
if getattr(exc_value, 'no_stack_trace', False):
print_warning(str(exc_value))
return True
def __call__(self, function):
@wraps(function)
def wrapper(*args, **kwargs):
with self:
return function(*args, **kwargs)
return wrapper
|
class catch_and_print_errors:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
pass
def __call__(self, function):
pass
@wraps(function)
def wrapper(*args, **kwargs):
pass
| 6 | 0 | 5 | 0 | 5 | 0 | 2 | 0 | 0 | 2 | 0 | 0 | 3 | 0 | 3 | 3 | 19 | 3 | 16 | 6 | 10 | 0 | 15 | 5 | 10 | 3 | 0 | 1 | 6 |
143,488 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/pdb.py
|
klab.bio.pdb.PDB
|
class PDB(object):
"""A class to store and manipulate PDB data"""
### Constructor ###
_std_ligand_parsing_error_message = 'Ligand information was requested but ligands are not parsed by default. Please set parse_ligands=True in the constructor for the PDB object.'
_std_ion_parsing_error_message = 'Ion information was requested but ions are not parsed by default. Please set parse_ligands=True in the constructor for the PDB object.'
def __init__(self, pdb_content, pdb_id = None, strict = True, parse_ligands = False):
'''Takes either a pdb file, a list of strings = lines of a pdb file, or another object.'''
self.pdb_content = pdb_content
if type(pdb_content) is bytes:
self.lines = pdb_content.decode().split("\n")
else:
#self.lines = [line.strip() for line in pdb_content]
# Change - for python3 compatibility?
self.lines = pdb_content.split('\n')
self.parsed_lines = {}
self.structure_lines = [] # For ATOM and HETATM records
self.ddGresmap = None
self.ddGiresmap = None
self.journal = None
self.chain_types = {}
self.format_version = None
self.modified_residues = None
self.modified_residue_mapping_3 = {}
self.pdb_id = None
self.strict = strict
self.cache_dir = None # todo: Add a cache dir for when we download data e.g. ligand info
self.seqres_chain_order = [] # A list of the PDB chains in document-order of SEQRES records
self.seqres_sequences = {} # A dict mapping chain IDs to SEQRES Sequence objects
self.atom_chain_order = [] # A list of the PDB chains in document-order of ATOM records (not necessarily the same as seqres_chain_order)
self.atom_sequences = {} # A dict mapping chain IDs to ATOM Sequence objects
self.chain_atoms = {} # A dict mapping chain IDs to a set of ATOM types. This is useful to test whether some chains only have CA entries e.g. in 1LRP, 1AIN, 1C53, 1HIO, 1XAS, 2TMA
self.solution = {} # A dict mapping chain IDs to solution molecules (typically water)
self.bfactors = None # A dict containing overall and per-residue B-factors. Filled in on request by get_B_factors.
# Heterogen fields
self.ligands = None # A dict mapping chain IDs to ligands. This is initialized to None so we can distinguish between two cases: i) the user is not parsing any ligand information (self.ligands should be set to None after initialization); and ii) the user is parsing ligand information but this PDB file has no information (self.ligands should be set to {} after initialization).
self.ligand_objects = None # A dict mapping PDB codes to object files retrieved via the ligand module from the RCSB. See comment for self.ligands above.
self.ions = None # A dict mapping chain IDs to ions. See comment for self.ligands above.
# PDB deprecation fields
self.deprecation_date = None
self.deprecated = False
self.replacement_pdb_id = None
self.rosetta_to_atom_sequence_maps = {}
self.rosetta_residues = []
self.residue_types = set() # the set of 3-letter residue types present in the file (useful for checking against e.g. CSE, MSE)
self.fix_pdb()
self._split_lines()
self.pdb_id = pdb_id
self.pdb_id = self.get_pdb_id() # parse the PDB ID if it is not passed in
self._apply_hacks()
self._get_pdb_format_version()
self._get_modified_residues()
self._get_replacement_pdb_id()
if missing_chain_ids.get(self.pdb_id):
self._update_structure_lines()
self._get_SEQRES_sequences()
self._get_ATOM_sequences()
self.hetatm_formulae = PDB.convert_hetatms_to_Hill_notation(self.parsed_lines['HETATM'])
if parse_ligands:
self._get_heterogens()
def __repr__(self):
return '\n'.join(self.lines)
def fix_pdb(self):
'''A function to fix fatal errors in PDB files when they can be automatically fixed. At present, this only runs if
self.strict is False. We may want a separate property for this since we may want to keep strict mode but still
allow PDBs to be fixed.
The only fixes at the moment are for missing chain IDs which get filled in with a valid PDB ID, if possible.'''
if self.strict:
return
# Get the list of chains
chains = set()
for l in self.lines:
if l.startswith('ATOM ') or l.startswith('HETATM'):
chains.add(l[21])
# If there is a chain with a blank ID, change that ID to a valid unused ID
if ' ' in chains:
fresh_id = None
allowed_chain_ids = list(string.uppercase) + list(string.lowercase) + list(map(str, list(range(10))))
for c in chains:
try: allowed_chain_ids.remove(c)
except: pass
if allowed_chain_ids:
fresh_id = allowed_chain_ids[0]
# Rewrite the lines
new_lines = []
if fresh_id:
for l in self.lines:
if (l.startswith('ATOM ') or l.startswith('HETATM')) and l[21] == ' ':
new_lines.append('%s%s%s' % (l[:21], fresh_id, l[22:]))
else:
new_lines.append(l)
self.lines = new_lines
def _apply_hacks(self):
if self.pdb_id:
pdb_id = self.pdb_id.upper()
if pdb_id == '2MBP':
newlines = []
added_COMPND = False
for l in self.lines:
if l.startswith('COMPND'):
if not added_COMPND:
newlines.append('COMPND MOL_ID: 1;')
newlines.append('COMPND 2 MOLECULE: MALTODEXTRIN-BINDING PROTEIN;')
newlines.append('COMPND 3 CHAIN: A;')
newlines.append('COMPND 4 ENGINEERED: YES')
added_COMPND = True
elif l.startswith("ATOM ") or l.startswith("HETATM") or l.startswith("TER"):
newlines.append('%s%s%s' % (l[0:21], 'A', l[22:]))
elif l.startswith("HET "):
newlines.append('%s%s%s' % (l[0:12], 'A', l[13:]))
elif l.startswith("SEQRES"):
newlines.append('%s%s%s' % (l[0:12], 'A', l[13:]))
else:
newlines.append(l)
self.lines = newlines
elif pdb_id == '1HE1':
newlines = []
for l in self.lines:
if l.startswith('HET NI C 204 2'):
newlines.append('HET NI C 204 1') # There is one atom in two possible locations. Marking this entry as having 2 atoms breaks an assertion below.
else:
newlines.append(l)
self.lines = newlines
elif pdb_id == '1R0R':
newlines = []
for l in self.lines:
if l.startswith('HET CA E 303 2'):
newlines.append('HET CA E 303 1') # There is one atom in two possible locations. Marking this entry as having 2 atoms breaks an assertion below.
elif l.startswith('HET CA E 305 2'):
newlines.append('HET CA E 305 1') # There is one atom in two possible locations. Marking this entry as having 2 atoms breaks an assertion below.
else:
newlines.append(l)
self.lines = newlines
elif pdb_id == '3H7P':
newlines = []
for l in self.lines:
if l.startswith('HET CD A 78 2'):
newlines.append('HET CD A 78 1') # There is one atom in two possible locations. Marking this entry as having 2 atoms breaks an assertion below.
elif l.startswith('HET CD A 80 2'):
newlines.append('HET CD A 80 1') # There is one atom in two possible locations. Marking this entry as having 2 atoms breaks an assertion below.
elif l.startswith('HET CD B 78 2'):
newlines.append('HET CD B 78 1') # There is one atom in two possible locations. Marking this entry as having 2 atoms breaks an assertion below.
else:
newlines.append(l)
self.lines = newlines
elif ROSETTA_HACKS_residues_to_remove.get(pdb_id):
hacks = ROSETTA_HACKS_residues_to_remove[pdb_id]
self.lines = [l for l in self.lines if not(l.startswith('ATOM' )) or (l[21:27] not in hacks)]
self._split_lines()
### Class functions ###
@staticmethod
def replace_headers(source_pdb_content, target_pdb_content):
'''Takes the headers from source_pdb_content and adds them to target_pdb_content, removing any headers that
target_pdb_content had.
Only the content up to the first structural line are taken from source_pdb_content and only the content from
the first structural line in target_pdb_content are taken.
'''
s = PDB(source_pdb_content)
t = PDB(target_pdb_content)
source_headers = []
for l in s.lines:
if l[:6].strip() in non_header_records:
break
else:
source_headers.append(l)
target_body = []
in_header = True
for l in t.lines:
if l[:6].strip() in non_header_records:
in_header = False
if not in_header:
target_body.append(l)
return '\n'.join(source_headers + target_body)
@staticmethod
def from_filepath(filepath, strict = True, parse_ligands = False):
'''A function to replace the old constructor call where a filename was passed in.'''
return PDB(read_file(filepath), strict = strict, parse_ligands = parse_ligands)
@staticmethod
def from_lines(pdb_file_lines, strict = True, parse_ligands = False):
'''A function to replace the old constructor call where a list of the file's lines was passed in.'''
return PDB("\n".join(pdb_file_lines), strict = strict, parse_ligands = parse_ligands)
@staticmethod
def retrieve(pdb_id, cache_dir = None, strict = True, parse_ligands = False):
'''Creates a PDB object by using a cached copy of the file if it exists or by retrieving the file from the RCSB.'''
# Check to see whether we have a cached copy
pdb_id = pdb_id.upper()
if cache_dir:
filename = os.path.join(cache_dir, "%s.pdb" % pdb_id)
if os.path.exists(filename):
return PDB(read_file(filename), strict = strict, parse_ligands = parse_ligands)
# Get a copy from the RCSB
contents = rcsb.retrieve_pdb(pdb_id)
# Create a cached copy if appropriate
if cache_dir:
write_file(os.path.join(cache_dir, "%s.pdb" % pdb_id), contents)
# Return the object
return PDB(contents, strict = strict, parse_ligands = parse_ligands)
### Private functions ###
def _split_lines(self):
'''Creates the parsed_lines dict which keeps all record data in document order indexed by the record type.'''
parsed_lines = {}
for rt in all_record_types:
parsed_lines[rt] = []
parsed_lines[0] = []
for line in self.lines:
linetype = line[0:6]
if linetype in all_record_types:
parsed_lines[linetype].append(line)
else:
parsed_lines[0].append(line)
self.parsed_lines = parsed_lines
self._update_structure_lines() # This does a second loop through the lines. We could do this logic above but I prefer a little performance hit for the cleaner code
def _update_structure_lines(self):
'''ATOM and HETATM lines may be altered by function calls. When this happens, this function should be called to keep self.structure_lines up to date.'''
structure_lines = []
atom_chain_order = []
chain_atoms = {}
for line in self.lines:
linetype = line[0:6]
# Not sure why 'TER' is here...
if linetype == 'ATOM ' or linetype == 'HETATM': #or linetype == 'TER ':
chain_id = line[21]
self.residue_types.add(line[17:20].strip())
if missing_chain_ids.get(self.pdb_id):
chain_id = missing_chain_ids[self.pdb_id]
structure_lines.append(line)
if (chain_id not in atom_chain_order) and (chain_id != ' '):
atom_chain_order.append(chain_id)
if linetype == 'ATOM ':
atom_type = line[12:16].strip()
if atom_type:
chain_atoms[chain_id] = chain_atoms.get(chain_id, set())
chain_atoms[chain_id].add(atom_type)
if linetype == 'ENDMDL':
colortext.warning("ENDMDL detected: Breaking out early. We do not currently handle NMR structures properly.")
break
self.structure_lines = structure_lines
self.atom_chain_order = atom_chain_order
self.chain_atoms = chain_atoms
### Basic functions ###
def clone(self, parse_ligands = False):
'''A function to replace the old constructor call where a PDB object was passed in and 'cloned'.'''
return PDB("\n".join(self.lines), pdb_id = self.pdb_id, strict = self.strict, parse_ligands = parse_ligands) # todo: we should copy the ligand information rather than reparse it
def get_content(self):
'''A function to replace the old constructor call where a PDB object was passed in and 'cloned'.'''
return '\n'.join(self.lines)
def write(self, pdbpath, separator = '\n'):
write_file(pdbpath, separator.join(self.lines))
def get_pdb_id(self):
'''Return the PDB ID. If one was passed in to the constructor, this takes precedence, otherwise the header is
parsed to try to find an ID. The header does not always contain a PDB ID in regular PDB files and appears to
always have an ID of 'XXXX' in biological units so the constructor override is useful.'''
if self.pdb_id:
return self.pdb_id
else:
header = self.parsed_lines["HEADER"]
assert(len(header) <= 1)
if header:
self.pdb_id = header[0][62:66]
return self.pdb_id
return None
def get_ATOM_and_HETATM_chains(self):
'''todo: remove this function as it now just returns a member element'''
return self.atom_chain_order
def get_annotated_chain_sequence_string(self, chain_id, use_seqres_sequences_if_possible, raise_Exception_if_not_found = True):
'''A helper function to return the Sequence for a chain. If use_seqres_sequences_if_possible then we return the SEQRES
Sequence if it exists. We return a tuple of values, the first identifying which sequence was returned.'''
if use_seqres_sequences_if_possible and self.seqres_sequences and self.seqres_sequences.get(chain_id):
return ('SEQRES', self.seqres_sequences[chain_id])
elif self.atom_sequences.get(chain_id):
return ('ATOM', self.atom_sequences[chain_id])
elif raise_Exception_if_not_found:
raise Exception('Error: Chain %s expected but not found.' % (str(chain_id)))
else:
return None
def get_chain_sequence_string(self, chain_id, use_seqres_sequences_if_possible, raise_Exception_if_not_found = True):
'''Similar to get_annotated_chain_sequence_string except that we only return the Sequence and do not state which sequence it was.'''
chain_pair = self.get_annotated_chain_sequence_string(chain_id, use_seqres_sequences_if_possible, raise_Exception_if_not_found = raise_Exception_if_not_found)
if chain_pair:
return chain_pair[1]
return None
def _get_modified_residues(self):
if not self.modified_residues:
modified_residues = {}
modified_residue_mapping_3 = {}
# Add in the patch
for k, v in modified_residues_patch.get(self.pdb_id, {}).items():
modified_residue_mapping_3[k] = v
for line in self.parsed_lines["MODRES"]:
restype = line[24:27].strip()
restype_1 = residue_type_3to1_map.get(restype) or dna_nucleotides_2to1_map.get(restype)
if not restype_1:
assert(restype in rna_nucleotides)
restype_1 = restype
modified_residues["%s%s" % (line[16], line[18:23])] = {'modified_residue' : line[12:15], 'original_residue_3' : restype, 'original_residue_1' : restype_1}
modified_residue_mapping_3[line[12:15]] = restype
self.modified_residue_mapping_3 = modified_residue_mapping_3
self.modified_residues = modified_residues
def _get_replacement_pdb_id(self):
'''Checks to see if the PDB file has been deprecated and, if so, what the new ID is.'''
deprecation_lines = self.parsed_lines['OBSLTE']
date_regex = re.compile('(\d+)-(\w{3})-(\d+)')
if deprecation_lines:
assert(len(deprecation_lines) == 1)
tokens = deprecation_lines[0].split()[1:]
if tokens[1].upper() in obsolete_pdb_ids_with_no_replacement_entries:
assert(len(tokens) == 2)
else:
assert(len(tokens) == 3)
if self.pdb_id:
mtchs = date_regex.match(tokens[0])
assert(mtchs)
_day = int(mtchs.group(1))
_month = mtchs.group(2)
_year = int(mtchs.group(3)) # only two characters?
assert(tokens[1] == self.pdb_id)
self.deprecation_date = (_day, _month, _year) # no need to do anything fancier unless this is ever needed
self.deprecated = True
if len(tokens) == 3:
assert(len(tokens[2]) == 4)
self.replacement_pdb_id = tokens[2]
### PDB mutating functions ###
def strip_to_chains(self, chains, break_at_endmdl = True):
'''Throw away all ATOM/HETATM/ANISOU/TER lines for chains that are not in the chains list.'''
if chains:
chains = set(chains)
# Remove any structure lines not associated with the chains
self.lines = [l for l in self.lines if not(l.startswith('ATOM ') or l.startswith('HETATM') or l.startswith('ANISOU') or l.startswith('TER')) or l[21] in chains]
# For some Rosetta protocols, only one NMR model should be kept
if break_at_endmdl:
new_lines = []
for l in self.lines:
if l.startswith('ENDMDL'):
new_lines.append(l)
break
new_lines.append(l)
self.lines = new_lines
self._update_structure_lines()
# todo: this logic should be fine if no other member elements rely on these lines e.g. residue mappings otherwise we need to update or clear those elements here
else:
raise Exception('The chains argument needs to be supplied.')
def strip_HETATMs(self, only_strip_these_chains = []):
'''Throw away all HETATM lines. If only_strip_these_chains is specified then only strip HETATMs lines for those chains.'''
if only_strip_these_chains:
self.lines = [l for l in self.lines if not(l.startswith('HETATM')) or l[21] not in only_strip_these_chains]
else:
self.lines = [l for l in self.lines if not(l.startswith('HETATM'))]
self._update_structure_lines()
# todo: this logic should be fine if no other member elements rely on these lines e.g. residue mappings otherwise we need to update those elements here
def generate_all_point_mutations_for_chain(self, chain_id):
mutations = []
if self.atom_sequences.get(chain_id):
aas = sorted(residue_type_3to1_map.values())
aas.remove('X')
seq = self.atom_sequences[chain_id]
for res_id in seq.order:
r = seq.sequence[res_id]
assert(chain_id == r.Chain)
for mut_aa in aas:
if mut_aa != r.ResidueAA:
mutations.append(ChainMutation(r.ResidueAA, r.ResidueID, mut_aa, Chain = chain_id))
return mutations
def generate_all_paired_mutations_for_position(self, chain_ids, chain_sequence_mappings = {}, residue_ids_to_ignore = [], typed_residue_ids_to_ignore = [], silent = True):
'''Generates a set of mutations for the chains in chain_ids where each set corresponds to the "same" residue (see
below) in both chains and where the wildtype residues match.
e.g. if chain A and B both have K19 then the set of mutations K19A, ... K19I, K19L, K19Y will be included in
in the returned results unless 19 is in residue_ids_to_ignore or typed_residue_ids_to_ignore.
residue_ids_to_ignore should be a list/set of residue IDs.
typed_residue_ids_to_ignore should be a dict residue ID -> residue AA. It is used similarly to residue_ids_to_ignore
but we also assert that the residue types match the sequences in the chains.
By default, "same residue" is inferred by residue ID i.e. the generation assumes that a residue with some ID
in one chain corresponds to the residue with the same ID in another chain. If this is not true then a mapping
between chain residues is necessary and should be provided using the chain_sequence_mappings parameter.
chain_sequence_mappings should be a dict from pairs of chain IDs to SequenceMap objects. As all sequences are
compared with the first chain in chain_ids, only mappings from that first chain to any other chain are used.
This function is useful in certain cases e.g. generating a set of mutations where we make the same mutation in
both chains of a homodimer or a quasi-homodimer (where we only mutate the positions which agree).
'''
residue_ids_to_ignore = set([str(r).strip() for r in residue_ids_to_ignore])
for k, v in typed_residue_ids_to_ignore.items():
typed_residue_ids_to_ignore[k] = v.strip()
assert(len(chain_ids) > 0)
first_chain = chain_ids[0]
mutations = []
if sorted(set(self.atom_sequences.keys()).intersection(set(chain_ids))) == sorted(chain_ids):
aas = sorted(residue_type_3to1_map.values())
aas.remove('X')
sequence = self.atom_sequences[first_chain]
for res_id in sequence.order:
chain_res_ids = {}
for c in chain_ids:
chain_res_ids[c] = c + res_id[1:]
if c != first_chain and chain_sequence_mappings.get((first_chain, c)):
chain_res_ids[c] = chain_sequence_mappings[(first_chain, c)][res_id]
sres_id = str(res_id)[1:].strip()
skip = sres_id in residue_ids_to_ignore
if not skip and sres_id in typed_residue_ids_to_ignore:
for c in chain_ids:
if chain_res_ids[c] in self.atom_sequences[c].sequence:
if not typed_residue_ids_to_ignore[sres_id] == self.atom_sequences[c][chain_res_ids[c]].ResidueAA:
raise Exception('Expected to find {0} at residue {1} but found {2} in chain {3} at this position.'.format(typed_residue_ids_to_ignore[sres_id], sres_id, self.atom_sequences[c][chain_res_id].ResidueAA, c))
skip = True
if skip:
if not silent:
print(('Skipping residue {0} as requested.'.format(res_id)))
continue
for c in chain_ids:
if (chain_res_ids[c]) not in self.atom_sequences[c].sequence:
if not silent:
print(('Skipping residue {0} as it is missing from chain {1}.'.format(res_id, c)))
skip = True
if skip:
continue
chain_res_aas = set([self.atom_sequences[c][chain_res_ids[c]].ResidueAA for c in chain_ids if chain_res_ids[c] in self.atom_sequences[c].sequence])
if len(chain_res_aas) > 1:
if not silent:
colortext.warning('Skipping residue {0} as the amino acid type differs between the specified chains.'.format(res_id))
continue
wt_aa = chain_res_aas.pop()
for mut_aa in aas:
if mut_aa != wt_aa:
mutations.append([ChainMutation(wt_aa, str(chain_res_ids[c])[1:].strip(), mut_aa, Chain = c) for c in chain_ids])
return mutations
else:
raise Exception('Chain(s) {0} could not be found in the PDB file.'.format(', '.join(sorted(set(chain_ids).difference(set(self.atom_sequences.keys()))))))
### FASTA functions ###
def create_fasta(self, length = 80, prefer_seqres_order = True, header = True):
fasta_string = ''
if prefer_seqres_order:
chain_order, sequences = self.seqres_chain_order or self.atom_chain_order, self.seqres_sequences or self.atom_sequences
else:
chain_order, sequences = self.atom_chain_order or self.seqres_chain_order, self.atom_sequences or self.seqres_sequences
for c in chain_order:
if c not in sequences:
continue
seq = str(sequences[c])
if header:
fasta_string += '>%s|%s|PDBID|CHAIN|SEQUENCE\n' % (self.pdb_id, c)
for line in [seq[x:x+length] for x in range(0, len(seq), length)]:
fasta_string += line + '\n'
else:
fasta_string += seq
return fasta_string
### PDB file parsing functions ###
def _get_pdb_format_version(self):
'''Remark 4 indicates the version of the PDB File Format used to generate the file.'''
if not self.format_version:
version = None
version_lines = None
try:
version_lines = [line for line in self.parsed_lines['REMARK'] if int(line[7:10]) == 4 and line[10:].strip()]
except: pass
if version_lines:
assert(len(version_lines) == 1)
version_line = version_lines[0]
version_regex = re.compile('.*?FORMAT V.(.*),')
mtch = version_regex.match(version_line)
if mtch and mtch.groups(0):
try:
version = float(mtch.groups(0)[0])
except:
pass
self.format_version = version
def get_resolution(self):
resolution = None
resolution_lines_exist = False
for line in self.parsed_lines["REMARK"]:
if line[9] == "2" and line[11:22] == "RESOLUTION.":
#if id == :
# line = "REMARK 2 RESOLUTION. 3.00 ANGSTROMS.
# This code SHOULD work but there are badly formatted PDBs in the RCSB database.
# e.g. "1GTX"
#if line[31:41] == "ANGSTROMS.":
# try:
# resolution = float(line[23:30])
# except:
# raise Exception("Error parsing PDB file to determine resolution. The resolution line\n '%s'\ndoes not match the PDB standard. Expected data for diffraction experiments." % line )
#if line[23:38] == "NOT APPLICABLE.":
# resolution = "N/A"
#else:
# raise Exception("Error parsing PDB file to determine resolution. The resolution line\n '%s'\ndoes not match the PDB standard." % line )
#
# Instead, we use the code below:
if resolution:
raise Exception("Found multiple RESOLUTION lines.")
resolution_lines_exist = True
strippedline = line[22:].strip()
Aindex = strippedline.find("ANGSTROMS.")
if strippedline == "NOT APPLICABLE.":
resolution = "N/A"
elif Aindex != -1 and strippedline.endswith("ANGSTROMS."):
if strippedline[:Aindex].strip() == "NULL":
resolution = "N/A" # Yes, yes, yes, I know. Look at 1WSY.pdb.
else:
try:
resolution = float(strippedline[:Aindex].strip())
except:
raise PDBParsingException("Error parsing PDB file to determine resolution. The resolution line\n '%s'\ndoes not match the PDB standard. Expected data for diffraction experiments." % line )
else:
raise PDBParsingException("Error parsing PDB file to determine resolution. The resolution line\n '%s'\ndoes not match the PDB standard." % line )
if resolution_lines_exist and not resolution:
raise PDBParsingException("Could not determine resolution.")
return resolution
def get_title(self):
if self.parsed_lines.get("TITLE "):
return " ".join([line[10:80].strip() for line in self.parsed_lines["TITLE "] if line[10:80].strip()])
return None
def get_techniques(self):
techniques = None
technique_lines_exist = False
for line in self.parsed_lines["EXPDTA"]:
technique_lines_exist = True
techniques = line[10:71].split(";")
for k in range(len(techniques)):
techniques[k] = techniques[k].strip()
techniques = ";".join(techniques)
if technique_lines_exist and not techniques:
raise PDBParsingException("Could not determine techniques used.")
return techniques
def get_UniProt_ACs(self):
return [v['dbAccession'] for k, v in self.get_DB_references().get(self.pdb_id, {}).get('UNIPROT', {}).items()]
def get_DB_references(self):
''' "The DBREF record provides cross-reference links between PDB sequences (what appears in SEQRES record) and
a corresponding database sequence." - http://www.wwpdb.org/documentation/format33/sect3.html#DBREF
'''
_database_names = {
'GB' : 'GenBank',
'PDB' : 'Protein Data Bank',
'UNP' : 'UNIPROT',
'NORINE': 'Norine',
'TREMBL': 'UNIPROT',
}
DBref = {}
for l in self.parsed_lines["DBREF "]: # [l for l in self.lines if l.startswith('DBREF')]
pdb_id = l[7:11]
chain_id = l[12]
seqBegin = int(l[14:18])
insertBegin = l[18]
seqEnd = int(l[20:24])
insertEnd = l[24]
database = _database_names[l[26:32].strip()]
dbAccession = l[33:41].strip()
dbIdCode = l[42:54].strip()
dbseqBegin = int(l[55:60])
idbnsBeg = l[60]
dbseqEnd = int(l[62:67])
dbinsEnd = l[67]
DBref[pdb_id] = DBref.get(pdb_id, {})
DBref[pdb_id][database] = DBref[pdb_id].get(database, {})
if DBref[pdb_id][database].get(chain_id):
if not(DBref[pdb_id][database][chain_id]['dbAccession'] == dbAccession and DBref[pdb_id][database][chain_id]['dbIdCode'] == dbIdCode):
raise PDBParsingException('This code needs to be generalized. dbIdCode should really be a list to handle chimera cases.')
else:
DBref[pdb_id][database][chain_id] = {'dbAccession' : dbAccession, 'dbIdCode' : dbIdCode, 'PDBtoDB_mapping' : []}
DBref[pdb_id][database][chain_id]['PDBtoDB_mapping'].append(
{'PDBRange' : ("%d%s" % (seqBegin, insertBegin), "%d%s" % (seqEnd, insertEnd)),
'dbRange' : ("%d%s" % (dbseqBegin, idbnsBeg), "%d%s" % (dbseqEnd, dbinsEnd)),
}
)
return DBref
def get_molecules_and_source(self):
# Check the COMPND lines
COMPND_lines = self.parsed_lines["COMPND"]
for x in range(1, len(COMPND_lines)):
assert(int(COMPND_lines[x][7:10]) == x+1)
if not COMPND_lines:
raise MissingRecordsException("No COMPND records were found. Handle this gracefully.")
# Concatenate the COMPND lines into one string, removing double spaces
COMPND_lines = " ".join([line[10:].strip() for line in COMPND_lines])
COMPND_lines.replace(" ", " ")
# Split the COMPND lines into separate molecule entries
molecules = {}
MOL_DATA = ["MOL_ID:%s".strip() % s for s in COMPND_lines.split('MOL_ID:') if s]
# Parse the molecule entries
# The hacks below are due to some PDBs breaking the grammar by not following the standard which states:
# Specification: A String composed of a token and its associated value separated by a colon.
# Specification List: A sequence of Specifications, separated by semi-colons.
# COMPND records are a specification list so semi-colons should not appear inside entries.
# The hacks below could probably be removed if I assumed that the standard was not followed (valid) by
# e.g. splitting the COMPND data by allowed tokens (the keys of COMPND_field_map)
# but I would want lots of tests in place first.
for MD in MOL_DATA:
# Hack for 2OMT
MD = MD.replace('EPITHELIAL-CADHERIN; E-CAD/CTF1', 'EPITHELIAL-CADHERIN: E-CAD/CTF1')
# Hack for 1M2T
MD = MD.replace('SYNONYM: BETA-GALACTOSIDE SPECIFIC LECTIN I A CHAIN; MLA; ML-I A;', 'SYNONYM: BETA-GALACTOSIDE SPECIFIC LECTIN I A CHAIN, MLA, ML-I A,')
# Hack for 1IBR
MD = MD.replace('SYNONYM: RAN; TC4; RAN GTPASE; ANDROGEN RECEPTOR- ASSOCIATED PROTEIN 24;', 'SYNONYM: RAN TC4, RAN GTPASE, ANDROGEN RECEPTOR-ASSOCIATED PROTEIN 24;')
# Hack for 1IBR
MD = MD.replace('SYNONYM: KARYOPHERIN BETA-1 SUBUNIT; P95; NUCLEAR FACTOR P97; IMPORTIN 90', 'SYNONYM: KARYOPHERIN BETA-1 SUBUNIT, P95, NUCLEAR FACTOR P97, IMPORTIN 90')
# Hack for 1NKH
MD = MD.replace('SYNONYM: B4GAL-T1; BETA4GAL-T1; BETA-1,4-GALTASE 1; BETA-1, 4-GALACTOSYLTRANSFERASE 1; UDP-GALACTOSE:BETA-N- ACETYLGLUCOSAMINE BETA-1,4-GALACTOSYLTRANSFERASE 1; EC: 2.4.1.22, 2.4.1.90, 2.4.1.38; ENGINEERED: YES; OTHER_DETAILS: CHAINS A AND B FORM FIRST, C AND D SECOND LACTOSE SYNTHASE COMPLEX',
'SYNONYM: B4GAL-T1, BETA4GAL-T1, BETA-1,4-GALTASE 1, BETA-1, 4-GALACTOSYLTRANSFERASE 1, UDP-GALACTOSE:BETA-N- ACETYLGLUCOSAMINE BETA-1,4-GALACTOSYLTRANSFERASE 1, EC: 2.4.1.22, 2.4.1.90, 2.4.1.38, ENGINEERED: YES, OTHER_DETAILS: CHAINS A AND B FORM FIRST, C AND D SECOND LACTOSE SYNTHASE COMPLEX')
# Hacks for 2PMI
MD = MD.replace('SYNONYM: SERINE/THREONINE-PROTEIN KINASE PHO85; NEGATIVE REGULATOR OF THE PHO SYSTEM;',
'SYNONYM: SERINE/THREONINE-PROTEIN KINASE PHO85, NEGATIVE REGULATOR OF THE PHO SYSTEM;')
MD = MD.replace('SYNONYM: PHOSPHATE SYSTEM CYCLIN PHO80; AMINOGLYCOSIDE ANTIBIOTIC SENSITIVITY PROTEIN 3;',
'SYNONYM: PHOSPHATE SYSTEM CYCLIN PHO80, AMINOGLYCOSIDE ANTIBIOTIC SENSITIVITY PROTEIN 3;')
# Hack for 1JRH
MD = MD.replace('FAB FRAGMENT;PEPSIN DIGESTION OF INTACT ANTIBODY', 'FAB FRAGMENT,PEPSIN DIGESTION OF INTACT ANTIBODY')
# Hack for 1KJ1
MD = MD.replace('SYNONYM: MANNOSE-SPECIFIC AGGLUTININ; LECGNA ', 'SYNONYM: MANNOSE-SPECIFIC AGGLUTININ, LECGNA ')
# Hack for 1OCC - The Dean and I
MD = MD.replace('SYNONYM: FERROCYTOCHROME C\:OXYGEN OXIDOREDUCTASE', 'SYNONYM: FERROCYTOCHROME C, OXYGEN OXIDOREDUCTASE')
# Hack for 2AKY
MD = MD.replace('SYNONYM: ATP\:AMP PHOSPHOTRANSFERASE, MYOKINASE', 'SYNONYM: ATP, AMP PHOSPHOTRANSFERASE, MYOKINASE')
# Hack for 3BCI
MD = MD.replace('SYNONYM: THIOL:DISULFIDE OXIDOREDUCTASE DSBA', 'SYNONYM: THIOL, DISULFIDE OXIDOREDUCTASE DSBA')
# Hack for 3BCI
MD = MD.replace('SYNONYM: THIOL:DISULFIDE OXIDOREDUCTASE DSBA', 'SYNONYM: THIOL, DISULFIDE OXIDOREDUCTASE DSBA')
# Hack for 1ELV
MD = MD.replace('FRAGMENT: CCP2-SP CATALYTIC FRAGMENT: ASP363-ASP-673 SEGMENT PRECEDED BY AN ASP-LEU SEQUENCE ADDED AT THE N-TERMINAL END',
'FRAGMENT: CCP2-SP CATALYTIC FRAGMENT; ASP363-ASP-673 SEGMENT PRECEDED BY AN ASP-LEU SEQUENCE ADDED AT THE N-TERMINAL END')
# Hack for 1E6E
MD = MD.replace('MOLECULE: NADPH\:ADRENODOXIN OXIDOREDUCTASE;', 'MOLECULE: NADPH;ADRENODOXIN OXIDOREDUCTASE;')
# Hack for 1JZD
MD = MD.replace('MOLECULE: THIOL:DISULFIDE INTERCHANGE PROTEIN', 'MOLECULE: THIOL;DISULFIDE INTERCHANGE PROTEIN')
# Hack for 1N2C
MD = MD.replace('OTHER_DETAILS: 2\:1 COMPLEX OF HOMODIMERIC FE-PROTEIN', 'OTHER_DETAILS: 2;1 COMPLEX OF HOMODIMERIC FE-PROTEIN')
# Hack for 1S6P
MD = MD.replace('MOLECULE: POL POLYPROTEIN [CONTAINS: REVERSE TRANSCRIPTASE]', 'MOLECULE: POL POLYPROTEIN [CONTAINS; REVERSE TRANSCRIPTASE]')
# Hack for 1Z9E
MD = MD.replace('FRAGMENT: SEQUENCE DATABASE RESIDUES 347-471 CONTAINS: HIV- 1 INTEGRASE-BINDING DOMAIN', 'FRAGMENT: SEQUENCE DATABASE RESIDUES 347-471 CONTAINS; HIV- 1 INTEGRASE-BINDING DOMAIN')
# Hacks for 2GOX
MD = MD.replace('FRAGMENT: FRAGMENT OF ALPHA CHAIN: RESIDUES 996-1287;', 'FRAGMENT: FRAGMENT OF ALPHA CHAIN; RESIDUES 996-1287;')
MD = MD.replace('FRAGMENT: C-TERMINAL DOMAIN: RESIDUES 101-165;', 'FRAGMENT: C-TERMINAL DOMAIN; RESIDUES 101-165;')
MOL_fields = [s.strip() for s in MD.split(';') if s.strip()]
molecule = {}
for field in MOL_fields:
field = field.split(":")
if not(1 <= len(field) <= 2):
print((MD, field))
assert(1 <= len(field) <= 2)
if len(field) == 2: # Hack for 1MBG - missing field value
field_name = COMPND_field_map[field[0].strip()]
field_data = field[1].strip()
molecule[field_name] = field_data
### Normalize and type the fields ###
# Required (by us) fields
molecule['MoleculeID'] = int(molecule['MoleculeID'])
molecule['Chains'] = list(map(string.strip, molecule['Chains'].split(',')))
for c in molecule['Chains']:
assert(len(c) == 1)
# Optional fields
if not molecule.get('Engineered'):
molecule['Engineered'] = None
elif molecule.get('Engineered') == 'YES':
molecule['Engineered'] = True
elif molecule.get('Engineered') == 'NO':
molecule['Engineered'] = False
else:
raise PDBParsingException("Error parsing ENGINEERED field of COMPND lines. Expected 'YES' or 'NO', got '%s'." % molecule['Engineered'])
if molecule.get('Mutation'):
if molecule['Mutation'] != 'YES':
raise PDBParsingException("Error parsing MUTATION field of COMPND lines. Expected 'YES', got '%s'." % molecule['Mutation'])
else:
molecule['Mutation'] = True
else:
molecule['Mutation'] = None
# Add missing fields
for k in list(COMPND_field_map.values()):
if k not in list(molecule.keys()):
molecule[k] = None
molecules[molecule['MoleculeID']] = molecule
# Extract the SOURCE lines
SOURCE_lines = self.parsed_lines["SOURCE"]
for x in range(1, len(SOURCE_lines)):
assert(int(SOURCE_lines[x][7:10]) == x+1)
if not SOURCE_lines:
raise MissingRecordsException("No SOURCE records were found. Handle this gracefully.")
# Concatenate the SOURCE lines into one string, removing double spaces
SOURCE_lines = " ".join([line[10:].strip() for line in SOURCE_lines])
SOURCE_lines.replace(" ", " ")
# Split the SOURCE lines into separate molecule entries
MOL_DATA = ["MOL_ID:%s".strip() % s for s in SOURCE_lines.split('MOL_ID:') if s]
# Parse the molecule entries
for MD in MOL_DATA:
MOL_fields = [s.strip() for s in MD.split(';') if s.strip()]
new_molecule = {}
for field in MOL_fields:
field = field.split(":")
if SOURCE_field_map.get(field[0].strip()):
field_name = SOURCE_field_map[field[0].strip()]
field_data = field[1].strip()
new_molecule[field_name] = field_data
MoleculeID = int(new_molecule['MoleculeID'])
assert(MoleculeID in molecules)
molecule = molecules[MoleculeID]
for field_name, field_data in new_molecule.items():
if field_name != 'MoleculeID':
molecule[field_name] = field_data
# Normalize and type the fields
if not molecule.get('Synthetic'):
molecule['Synthetic'] = None
elif molecule.get('Synthetic') == 'YES':
molecule['Synthetic'] = True
elif molecule.get('Synthetic') == 'NO':
molecule['Synthetic'] = False
else:
raise PDBParsingException("Error parsing SYNTHETIC field of SOURCE lines. Expected 'YES' or 'NO', got '%s'." % molecule['Synthetic'])
# Add missing fields
for k in list(SOURCE_field_map.values()):
if k not in list(molecule.keys()):
molecule[k] = None
return [v for k, v in sorted(molecules.items())]
def get_journal(self):
if self.parsed_lines["JRNL "]:
if not self.journal:
self.journal = JRNL(self.parsed_lines["JRNL "])
return self.journal.get_info()
return None
### Sequence-related functions ###
def _get_SEQRES_sequences(self):
'''Creates the SEQRES Sequences and stores the chains in order of their appearance in the SEQRES records. This order of chains
in the SEQRES sequences does not always agree with the order in the ATOM records.'''
pdb_id = self.get_pdb_id()
SEQRES_lines = self.parsed_lines["SEQRES"]
modified_residue_mapping_3 = self.modified_residue_mapping_3
# I commented this out since we do not need it for my current test cases
#for k, v in self.modified_residues.iteritems():
# assert(v['modified_residue'] not in modified_residues)
# modified_residues[v['modified_residue']] = v['original_residue_3']
for x in range(0, len(SEQRES_lines)):
assert(SEQRES_lines[x][7:10].strip().isdigit())
if not SEQRES_lines:
#colortext.warning("WARNING: No SEQRES records were found. Kyle is trying to handle this gracefully, but Shane may need to fix it")
return
seqres_chain_order = []
SEQRES_lines = [line[11:].rstrip() for line in SEQRES_lines] # we cannot strip the left characters as some cases e.g. 2MBP are missing chain identifiers
# Collect all residues for all chains, remembering the chain order
chain_tokens = {}
for line in SEQRES_lines:
chainID = line[0]
if missing_chain_ids.get(self.pdb_id):
chainID = missing_chain_ids[self.pdb_id]
if chainID not in seqres_chain_order:
seqres_chain_order.append(chainID)
chain_tokens[chainID] = chain_tokens.get(chainID, [])
chain_tokens[chainID].extend(line[6:].strip().split())
sequences = {}
self.chain_types = {}
canonical_acid_types = set(residue_type_3to1_map.keys())
canonical_acid_types.remove('UNK')
assert(len(canonical_acid_types) == 20)
for chain_id, tokens in chain_tokens.items():
# Determine whether the chain is DNA, RNA, or a protein chain
# 1H38 is a good test for this - it contains DNA (chains E and G and repeated by H, K, N, J, M, P), RNA (chain F, repeated by I, L, O) and protein (chain D, repeated by A,B,C) sequences
# 1ZC8 is similar but also has examples of DU
# 4IHY has examples of DI (I is inosine)
# 2GRB has RNA examples of I and U
# 1LRP has protein chains with only CA atoms
# This will throw an exception when a non-canonical is found which is not listed in basics.py. In that case, the list in basics.py should be updated.
chain_type = None
set_of_tokens = set(tokens)
if (set_of_tokens.union(all_recognized_dna) == all_recognized_dna):# or (len(set_of_tokens) <= 5 and len(set_of_tokens.union(dna_nucleotides)) == len(set_of_tokens) + 1): # allow one unknown DNA residue
chain_type = 'DNA'
elif (set_of_tokens.union(all_recognized_rna) == all_recognized_rna):# or (len(set_of_tokens) <= 5 and len(set_of_tokens.union(dna_nucleotides)) == len(set_of_tokens) + 1): # allow one unknown DNA residue
chain_type = 'RNA'
elif len(set_of_tokens) == 1 and 'UNK' in set_of_tokens:
chain_type = 'Unknown'
elif not(set_of_tokens.intersection(canonical_acid_types)):
# Zero canonical residues may imply a ligand or a heterogen chain
chain_type = PDB._determine_heterogen_chain_type(set_of_tokens)
else:
assert(len(set(tokens).intersection(dna_nucleotides)) == 0)
assert(len(set(tokens).intersection(rna_nucleotides)) == 0)
assert(len(set(tokens).intersection(rna_nucleotides)) == 0)
chain_type = 'Protein'
if not self.chain_atoms.get(chain_id):
# possible for biological unit files
continue
if self.chain_atoms[chain_id] == set(['CA']):
chain_type = 'Protein skeleton'
# Get the sequence, mapping non-canonicals to the appropriate letter
self.chain_types[chain_id] = chain_type
sequence = []
if chain_type == 'DNA':
for r in tokens:
if dna_nucleotides_2to1_map.get(r):
sequence.append(dna_nucleotides_2to1_map[r])
else:
if non_canonical_dna.get(r):
sequence.append(non_canonical_dna[r])
else:
raise Exception("Unknown DNA residue %s." % r)
elif chain_type == 'RNA':
for r in tokens:
if r in rna_nucleotides:
sequence.append(r)
else:
if non_canonical_rna.get(r):
sequence.append(non_canonical_rna[r])
else:
raise Exception("Unknown RNA residue %s." % r)
else:
token_counter = 0
for r in tokens:
token_counter += 1
if residue_type_3to1_map.get(r):
sequence.append(residue_type_3to1_map[r])
else:
if self.modified_residue_mapping_3.get(r):
sequence.append(residue_type_3to1_map[self.modified_residue_mapping_3.get(r)])
elif non_canonical_amino_acids.get(r):
#print('Mapping non-canonical residue %s to %s.' % (r, non_canonical_amino_acids[r]))
#print(SEQRES_lines)
#print(line)
sequence.append(non_canonical_amino_acids[r])
elif r == 'UNK':
continue
# Skip these residues
elif r == 'ACE' and token_counter == 1:
# Always allow ACE as the first residue of a chain
sequence.append('X')
elif r == 'ACE' and pdb_id in cases_with_ACE_residues_we_can_ignore:
sequence.append('X')
#continue
# End of skipped residues
else:
#print(modified_residue_mapping_3)
if modified_residue_mapping_3.get(r):
if modified_residue_mapping_3[r] == 'UNK':
sequence.append('X')
else:
assert(modified_residue_mapping_3[r] in residue_types_3)
sequence.append(residue_type_3to1_map[modified_residue_mapping_3[r]])
else:
raise Exception("Unknown protein residue %s in chain %s." % (r, chain_id))
sequences[chain_id] = "".join(sequence)
self.seqres_chain_order = seqres_chain_order
# Create Sequence objects for the SEQRES sequences
for chain_id, sequence in sequences.items():
self.seqres_sequences[chain_id] = Sequence.from_sequence(chain_id, sequence, self.chain_types[chain_id])
def _get_ATOM_sequences(self):
'''Creates the ATOM Sequences.'''
# Get a list of all residues with ATOM or HETATM records
atom_sequences = {}
structural_residue_IDs_set = set() # use a set for a quicker lookup
ignore_HETATMs = True # todo: fix this if we need to deal with HETATMs
residue_lines_by_chain = []
structural_residue_IDs_set = []
present_chain_ids = {}
for l in self.structure_lines:
if len(l) > 21 and l[:3] != 'TER':
present_chain_ids[l[21]] = present_chain_ids.get(l[21], set())
present_chain_ids[l[21]].add(l[:6])
model_index = 0
residue_lines_by_chain.append([])
structural_residue_IDs_set.append(set())
full_code_map = {}
hetatm_map = {}
full_atom_map = {}
for l in self.structure_lines:
chain_id = None
if l.startswith("TER "):
model_index += 1
residue_lines_by_chain.append([])
structural_residue_IDs_set.append(set())
else:
residue_id = l[21:27]
if residue_id not in structural_residue_IDs_set[model_index]:
residue_lines_by_chain[model_index].append(l)
structural_residue_IDs_set[model_index].add(residue_id)
if l.startswith('ATOM'):
chain_id = l[21]
# Only use ATOM records to build the code map as chains can have ligands HETATMs
full_code_map[chain_id] = full_code_map.get(chain_id, set())
full_code_map[chain_id].add(l[17:20].strip())
# Only use ATOM records to build the atom map as CA-only chains can have ligands described in full as HETATMs
full_atom_map[chain_id] = full_atom_map.get(chain_id, set())
full_atom_map[chain_id].add(l[12:16].strip())
elif l.startswith('HETATM'):
chain_id = l[21]
hetatm_map[chain_id] = hetatm_map.get(chain_id, set())
hetatm_map[chain_id].add(l[17:20].strip())
# Get the residues used by the residue lines. These can be used to determine the chain type if the header is missing.
for chain_id in self.atom_chain_order:
if full_code_map.get(chain_id):
# The chains may contain other molecules e.g. MG or HOH so before we decide their type based on residue types alone,
# we subtract out those non-canonicals
canonical_molecules = full_code_map[chain_id].intersection(dna_nucleotides.union(rna_nucleotides).union(residue_types_3))
determined_chain_type = None
if canonical_molecules.union(dna_nucleotides) == dna_nucleotides:
determined_chain_type = 'DNA'
elif canonical_molecules.union(rna_nucleotides) == rna_nucleotides:
determined_chain_type = 'RNA'
elif len(full_code_map[chain_id]) == 1 and 'UNK' in full_code_map[chain_id]:
determined_chain_type = 'Unknown'
elif canonical_molecules:
if len(full_atom_map[chain_id]) == 1 and 'CA' in full_atom_map[chain_id]:
determined_chain_type = 'Protein skeleton'
else:
determined_chain_type = 'Protein'
else:
determined_chain_type = PDB._determine_heterogen_chain_type(canonical_molecules)
if self.chain_types.get(chain_id):
assert(self.chain_types[chain_id] == determined_chain_type)
else:
self.chain_types[chain_id] = determined_chain_type
line_types_by_chain = []
chain_ids = []
for model_index in range(len(residue_lines_by_chain)):
line_types = set()
if residue_lines_by_chain[model_index]:
if missing_chain_ids.get(self.pdb_id):
chain_ids.append(missing_chain_ids[self.pdb_id])
else:
chain_ids.append(residue_lines_by_chain[model_index][0][21])
for l in residue_lines_by_chain[model_index]:
line_types.add(l[0:6])
if line_types == set(['ATOM']):
line_types_by_chain.append('ATOM')
elif line_types == set(['HETATM']):
line_types_by_chain.append('HETATM')
else:
line_types_by_chain.append('Mixed')
for x in range(0, len(residue_lines_by_chain)):
residue_lines = residue_lines_by_chain[x]
line_types = line_types_by_chain[x]
if ignore_HETATMs and line_types == 'HETATM':
continue
for y in range(len(residue_lines)):
l = residue_lines[y]
residue_type = l[17:20].strip()
if l.startswith("HETATM"):
if self.modified_residue_mapping_3.get(residue_type):
residue_type = self.modified_residue_mapping_3[residue_type]
elif y == (len(residue_lines) - 1):
# last residue in the chain
if residue_type == 'NH2':
residue_type = 'UNK' # fixes a few cases e.g. 1MBG, 1K9Q, 1KA6
elif ignore_HETATMs:
continue
elif ignore_HETATMs:
continue
residue_id = l[21:27]
chain_id = l[21]
if missing_chain_ids.get(self.pdb_id):
chain_id = missing_chain_ids[self.pdb_id]
if chain_id in self.chain_types:
# This means the pdb had SEQRES and we constructed atom_sequences
chain_type = self.chain_types[chain_id]
else:
# Otherwise assume this is protein
chain_type = 'Protein'
atom_sequences[chain_id] = atom_sequences.get(chain_id, Sequence(chain_type))
residue_type = self.modified_residue_mapping_3.get(residue_type, residue_type)
short_residue_type = None
if residue_type == 'UNK':
short_residue_type = 'X'
elif chain_type == 'Unknown':
assert(False) # we should not reach here - Unknown chains should only contain UNK records
elif chain_type == 'Protein' or chain_type == 'Protein skeleton':
short_residue_type = residue_type_3to1_map.get(residue_type) or protonated_residue_type_3to1_map.get(residue_type) or non_canonical_amino_acids.get(residue_type)
elif chain_type == 'DNA':
short_residue_type = dna_nucleotides_2to1_map.get(residue_type) or non_canonical_dna.get(residue_type)
elif chain_type == 'RNA':
short_residue_type = non_canonical_rna.get(residue_type) or residue_type
if not short_residue_type:
if l.startswith("ATOM") and l[12:16] == ' OH2' and l[17:20] == 'TIP':
continue
elif not self.strict:
short_residue_type = 'X'
else:
raise NonCanonicalResidueException("Unrecognized residue type %s in PDB file '%s', residue ID '%s'." % (residue_type, str(self.pdb_id), str(residue_id)))
#structural_residue_IDs.append((residue_id, short_residue_type))
# KAB - way to allow for multiresidue noncanonical AA's
if len(short_residue_type) == 1:
atom_sequences[chain_id].add(PDBResidue(residue_id[0], residue_id[1:], short_residue_type, chain_type))
else:
for char in short_residue_type:
atom_sequences[chain_id].add(PDBResidue(residue_id[0], residue_id[1:], char, chain_type))
# Assign 'Ligand' or 'Heterogen' to all HETATM-only chains
for chain_id in list(present_chain_ids.keys()):
if chain_id not in self.chain_types:
assert('ATOM ' not in present_chain_ids[chain_id])
self.chain_types[chain_id] = PDB._determine_heterogen_chain_type(hetatm_map.get(chain_id, set()))
self.atom_sequences = atom_sequences
def _get_ATOM_sequences_2(self):
'''Creates the ATOM Sequences.'''
# Get a list of all residues with ATOM or HETATM records
atom_sequences = {}
structural_residue_IDs_set = set() # use a set for a quicker lookup
ignore_HETATMs = True # todo: fix this if we need to deal with HETATMs
for l in self.structure_lines:
residue_type = l[17:20].strip()
if l.startswith("HETATM"):
if self.modified_residue_mapping_3.get(residue_type):
residue_type = self.modified_residue_mapping_3[residue_type]
elif ignore_HETATMs:
continue
residue_id = l[21:27]
if residue_id not in structural_residue_IDs_set:
chain_id = l[21]
chain_type = self.chain_types[chain_id]
atom_sequences[chain_id] = atom_sequences.get(chain_id, Sequence(chain_type))
residue_type = l[17:20].strip()
residue_type = self.modified_residue_mapping_3.get(residue_type, residue_type)
short_residue_type = None
if residue_type == 'UNK':
short_residue_type = 'X'
elif chain_type == 'Unknown':
assert(False) # we should not reach here - Unknown chains should only contain UNK records
elif chain_type == 'Protein' or chain_type == 'Protein skeleton':
short_residue_type = residue_type_3to1_map.get(residue_type) or protonated_residue_type_3to1_map.get(residue_type)
elif chain_type == 'DNA':
short_residue_type = dna_nucleotides_2to1_map.get(residue_type) or non_canonical_dna.get(residue_type)
elif chain_type == 'RNA':
short_residue_type = non_canonical_rna.get(residue_type) or residue_type
elif not self.strict:
short_residue_type = 'X'
else:
raise NonCanonicalResidueException("Unrecognized residue type %s in PDB file '%s', residue ID '%s'." % (residue_type, str(self.pdb_id), str(residue_id)))
#structural_residue_IDs.append((residue_id, short_residue_type))
atom_sequences[chain_id].add(PDBResidue(residue_id[0], residue_id[1:], short_residue_type, chain_type))
structural_residue_IDs_set.add(residue_id)
self.atom_sequences = atom_sequences
def construct_seqres_to_atom_residue_map(self):
'''Uses the SequenceAligner to align the SEQRES and ATOM sequences and return the mappings.
If the SEQRES sequence does not exist for a chain, the mappings are None.
Note: The ResidueRelatrix is better equipped for this job since it can use the SIFTS mappings. This function
is provided for cases where it is not possible to use the ResidueRelatrix.'''
from klab.bio.clustalo import SequenceAligner
seqres_to_atom_maps = {}
atom_to_seqres_maps = {}
for c in self.seqres_chain_order:
if c in self.atom_chain_order:
# Get the sequences for chain c
seqres_sequence = self.seqres_sequences[c]
atom_sequence = self.atom_sequences[c]
# Align the sequences. mapping will be a mapping between the sequence *strings* (1-indexed)
sa = SequenceAligner()
sa.add_sequence('seqres_%s' % c, str(seqres_sequence))
sa.add_sequence('atom_%s' % c, str(atom_sequence))
mapping, match_mapping = sa.get_residue_mapping()
# Use the mapping from the sequence strings to look up the residue IDs and then create a mapping between these residue IDs
seqres_to_atom_maps[c] = {}
atom_to_seqres_maps[c] = {}
for seqres_residue_index, atom_residue_index in mapping.items():
seqres_residue_id = seqres_sequence.order[seqres_residue_index - 1] # order is a 0-based list
atom_residue_id = atom_sequence.order[atom_residue_index - 1] # order is a 0-based list
seqres_to_atom_maps[c][seqres_residue_id] = atom_residue_id
atom_to_seqres_maps[c][atom_residue_id] = seqres_residue_id
return seqres_to_atom_maps, atom_to_seqres_maps
def construct_pdb_to_rosetta_residue_map(self, rosetta_scripts_path, rosetta_database_path = None, extra_command_flags = None, cache_dir = None):
''' Uses the features database to create a mapping from Rosetta-numbered residues to PDB ATOM residues.
Next, the object's rosetta_sequences (a dict of Sequences) element is created.
Finally, a SequenceMap object is created mapping the Rosetta Sequences to the ATOM Sequences.
The extra_command_flags parameter expects a string e.g. "-ignore_zero_occupancy false".
If cache_dir is passed then the file <self.pdb_id>.
'''
## Create a mapping from Rosetta-numbered residues to PDB ATOM residues
import json
# Apply any PDB-specific hacks
specific_flag_hacks = None
if self.pdb_id and HACKS_pdb_specific_hacks.get(self.pdb_id):
specific_flag_hacks = HACKS_pdb_specific_hacks[self.pdb_id]
skeletal_chains = sorted([k for k in list(self.chain_types.keys()) if self.chain_types[k] == 'Protein skeleton'])
if skeletal_chains:
raise PDBMissingMainchainAtomsException('The PDB to Rosetta residue map could not be created as chains %s only have CA atoms present.' % ", ".join(skeletal_chains))
# Get the residue mapping using the features database
mapping = None
cached_json_mapping_filepath = None
if cache_dir:
cached_json_mapping_filepath = os.path.join(cache_dir, '{0}.rosetta2pdb.rawmap.json'.format(self.pdb_id)) # note: the resmap.json file created by self.get_atom_sequence_to_rosetta_json_map is more involved - rawmap is simply what is returned by get_pdb_contents_to_pose_residue_map
if self.pdb_id and cache_dir and os.path.exists(cached_json_mapping_filepath):
# Read cached file
try:
mapping = json.loads(read_file(cached_json_mapping_filepath))
except: pass
if mapping == None:
pdb_file_contents = "\n".join(self.structure_lines)
success, mapping = get_pdb_contents_to_pose_residue_map(pdb_file_contents, rosetta_scripts_path, rosetta_database_path = rosetta_database_path, pdb_id = self.pdb_id, extra_flags = ((specific_flag_hacks or '') + ' ' + (extra_command_flags or '')).strip())
if not success:
raise colortext.Exception("An error occurred mapping the PDB ATOM residue IDs to the Rosetta numbering.\n%s" % "\n".join(mapping))
if self.pdb_id and cache_dir:
write_file(cached_json_mapping_filepath, json.dumps(mapping, indent = 4, sort_keys = True))
## Create Sequences for the Rosetta residues (self.rosetta_sequences)
# Initialize maps
rosetta_residues = {}
rosetta_sequences = {}
for chain_id in self.atom_chain_order:
chain_type = self.chain_types[chain_id]
rosetta_residues[chain_id] = {}
rosetta_sequences[chain_id] = Sequence(chain_type)
# Create a map rosetta_residues, Chain -> Rosetta residue ID -> Rosetta residue information
rosetta_pdb_mappings = {}
for chain_id in self.atom_chain_order:
rosetta_pdb_mappings[chain_id] = {}
for k, v in mapping.items():
rosetta_residues[k[0]][v['pose_residue_id']] = v
rosetta_pdb_mappings[k[0]][v['pose_residue_id']] = k
# Create rosetta_sequences map Chain -> Sequence(Residue)
for chain_id, v in sorted(rosetta_residues.items()):
chain_type = self.chain_types[chain_id]
for rosetta_id, residue_info in sorted(v.items()):
short_residue_type = None
residue_type = None
if chain_type == 'Protein':
residue_type = residue_info['name3'].strip()
short_residue_type = residue_type_3to1_map.get(residue_type, 'X') # some HETATMs can be passed here e.g. MG so we can not map those cases
else:
residue_type = residue_info['res_type'].strip()
if chain_type == 'DNA':
if residue_type.find('UpperDNA') != -1 or residue_type.find('LowerDNA') != -1:
residue_type = residue_type[:3]
short_residue_type = dna_nucleotides_3to1_map.get(residue_type) # Commenting this out since Rosetta does not seem to handle these "or non_canonical_dna.get(residue_type)"
else:
assert(chain_type == 'RNA')
if residue_type.find('UpperRNA') != -1 or residue_type.find('LowerRNA') != -1 or (len(residue_type) > 3 and residue_type[3] == ':'):
residue_type = residue_type[:3]
short_residue_type = rna_nucleotides_3to1_map.get(residue_type)
if short_residue_type == None:
raise colortext.Exception('Could not determine the one-letter code of the residue: chain {0}, chain_type "{1}", residue "{2}", residue type "{3}".'.format(chain_id, chain_type, rosetta_id, residue_type))
rosetta_sequences[chain_id].add(Residue(chain_id, rosetta_id, short_residue_type, chain_type))
## Create SequenceMap objects to map the Rosetta Sequences to the ATOM Sequences
rosetta_to_atom_sequence_maps = {}
for chain_id, rosetta_pdb_mapping in rosetta_pdb_mappings.items():
rosetta_to_atom_sequence_maps[chain_id] = SequenceMap.from_dict(rosetta_pdb_mapping)
self.rosetta_to_atom_sequence_maps = rosetta_to_atom_sequence_maps
self.rosetta_sequences = rosetta_sequences
def get_atom_sequence_to_rosetta_map(self):
'''Uses the Rosetta->ATOM injective map to construct an injective mapping from ATOM->Rosetta.
We do not extend the injection to include ATOM residues which have no corresponding Rosetta residue.
e.g. atom_sequence_to_rosetta_mapping[c].map.get('A 45 ') will return None if there is no corresponding Rosetta residue
those residues to None.
Likewise, if a PDB chain c is not present in the Rosetta model then atom_sequence_to_rosetta_mapping[c].map.get(s) returns None.
'''
if not self.rosetta_to_atom_sequence_maps and self.rosetta_sequences:
raise Exception('The PDB to Rosetta mapping has not been determined. Please call construct_pdb_to_rosetta_residue_map first.')
atom_sequence_to_rosetta_mapping = {}
for chain_id, mapping in self.rosetta_to_atom_sequence_maps.items():
chain_mapping = {}
for k in mapping:
chain_mapping[k[1]] = k[0]
atom_sequence_to_rosetta_mapping[chain_id] = SequenceMap.from_dict(chain_mapping)
# Add empty maps for missing chains
for chain_id, sequence in self.atom_sequences.items():
if not atom_sequence_to_rosetta_mapping.get(chain_id):
atom_sequence_to_rosetta_mapping[chain_id] = SequenceMap()
return atom_sequence_to_rosetta_mapping
def get_atom_sequence_to_rosetta_json_map(self):
'''Returns the mapping from PDB ATOM residue IDs to Rosetta residue IDs in JSON format.'''
import json
d = {}
atom_sequence_to_rosetta_mapping = self.get_atom_sequence_to_rosetta_map()
for c, sm in atom_sequence_to_rosetta_mapping.items():
for k, v in sm.map.items():
d[k] = v
return json.dumps(d, indent = 4, sort_keys = True)
def get_rosetta_sequence_to_atom_json_map(self):
'''Returns the mapping from Rosetta residue IDs to PDB ATOM residue IDs in JSON format.'''
import json
if not self.rosetta_to_atom_sequence_maps and self.rosetta_sequences:
raise Exception('The PDB to Rosetta mapping has not been determined. Please call construct_pdb_to_rosetta_residue_map first.')
d = {}
for c, sm in self.rosetta_to_atom_sequence_maps.items():
for k, v in sm.map.items():
d[k] = v
#d[c] = sm.map
return json.dumps(d, indent = 4, sort_keys = True)
def map_pdb_residues_to_rosetta_residues(self, mutations):
'''This function takes a list of ChainMutation objects and uses the PDB to Rosetta mapping to return the corresponding
list of SimpleMutation objects using Rosetta numbering.
e.g.
p = PDB(...)
p.construct_pdb_to_rosetta_residue_map()
rosetta_mutations = p.map_pdb_residues_to_rosetta_residues(pdb_mutations)
'''
if not self.rosetta_to_atom_sequence_maps and self.rosetta_sequences:
raise Exception('The PDB to Rosetta mapping has not been determined. Please call construct_pdb_to_rosetta_residue_map first.')
rosetta_mutations = []
atom_sequence_to_rosetta_mapping = self.get_atom_sequence_to_rosetta_map()
for m in mutations:
rosetta_residue_id = atom_sequence_to_rosetta_mapping[m.Chain].get('%s%s' % (m.Chain, m.ResidueID))
rosetta_mutations.append(SimpleMutation(m.WildTypeAA, rosetta_residue_id, m.MutantAA))
return rosetta_mutations
def assert_wildtype_matches(self, mutation):
'''Check that the wildtype of the Mutation object matches the PDB sequence.'''
readwt = self.getAminoAcid(self.getAtomLine(mutation.Chain, mutation.ResidueID))
assert(mutation.WildTypeAA == residue_type_3to1_map[readwt])
### Chain type determination ###
@staticmethod
def _determine_heterogen_chain_type(residue_types):
'''We distinguish three types of heterogen chain: i) all solution; ii) all ligand; or iii) other (a mix of solution, ligand, and/or ions).
residue_types should be a Set of sequence identifers e.g. GTP, ZN, HOH.
'''
residue_type_id_lengths = set(map(len, residue_types))
if (len(residue_types) > 0):
if len(residue_types.difference(common_solution_ids)) == 0:
return 'Solution'
elif (len(residue_type_id_lengths) == 1) and (3 in residue_type_id_lengths) and (len(residue_types.difference(common_solution_ids)) > 0):
# The last expression discounts chains which only contain solution molecules e.g. HOH
return 'Ligand'
return 'Heterogen'
### Ligand functions ###
def get_ligand_formulae_as_html(self, oelem = 'span'):
html_list = {}
for k, v in self.ligand_formulae.items():
html_list[k] = v.to_html(oelem = oelem)
return html_list
@staticmethod
def convert_hetatms_to_Hill_notation(lines, ignore_list = []):#['HOH']):
'''From the PDB site:
The elements of the chemical formula are given in the order following Hill ordering. The order of elements depends
on whether carbon is present or not. If carbon is present, the order should be: C, then H, then the other elements
in alphabetical order of their symbol. If carbon is not present, the elements are listed purely in alphabetic order
of their symbol. This is the 'Hill' system used by Chemical Abstracts.
WARNING: This assumes that all atoms are in the PDB. This is not usually the case so the formulae will be missing
atoms in those cases. To account for some missing data, we merge the element counters to use the most
amount of information we can.
In general, the FORMUL lines should be used. This function can be used in files with missing headers.
'''
ignore_list = set(ignore_list)
hetatms = {}
for l in lines:
if l.startswith('HETATM'):
het_id = l[17:20].strip()
if het_id in ignore_list:
continue
res_id = l[21:27]
atom_name = l[12:16]
alt_loc = l[16]
hetatms[het_id] = hetatms.get(het_id, {})
hetatms[het_id][res_id] = hetatms[het_id].get(res_id, {})
hetatms[het_id][res_id][alt_loc] = hetatms[het_id][res_id].get(alt_loc, ElementCounter())
hetatms[het_id][res_id][alt_loc].add(atom_name)
for het_id, res_atoms in hetatms.items():
res_ids = list(res_atoms.keys())
for res_id in res_ids:
ecs = list(hetatms[het_id][res_id].values())
for x in range(1, len(ecs)):
ecs[0].merge(ecs[x])
hetatms[het_id][res_id] = ecs[0]
str_mapping = {}
mapping = {}
for het_id, res_atoms in hetatms.items():
res_ids = list(res_atoms.keys())
for res_id in res_ids:
Hill_notation = hetatms[het_id][res_id]
if str_mapping.get(het_id):
if not str_mapping[het_id] == str(Hill_notation):
mapping[het_id].merge(Hill_notation)
str_mapping[het_id] = str(mapping[het_id])
else:
str_mapping[het_id] = str(Hill_notation)
mapping[het_id] = Hill_notation
return mapping
def get_ligand_codes(self):
if self.ligands == None:
raise RequestedLigandsWithoutParsingException(PDB._std_ligand_parsing_error_message)
ligand_codes = set()
for c, cligands in self.ligands.items():
for seq_id, l in cligands.items():
ligand_codes.add(l.PDBCode)
return sorted(ligand_codes)
def get_ion_codes(self):
if self.ions == None:
raise RequestedIonsWithoutParsingException(PDB._std_ion_parsing_error_message)
ion_codes = set()
for c, cions in self.ions.items():
for seq_id, i in cions.items():
ion_codes.add(i.Element)
return sorted(ion_codes)
def get_solution_residue_ids(self, chain_id = None, solution_id = None):
if chain_id:
solution = self.solution.get(chain_id, {})
if solution_id:
return self.solution.get(chain_id, {}).get(solution_id, [])
return self.solution.get(chain_id, {})
else:
if solution_id:
d = {}
for chain_id, solution_residue_ids in self.solution.items():
d[chain_id] = copy.deepcopy(self.solution[chain_id].get(solution_id, []))
return d
else:
return self.solution
def _get_heterogens(self):
# Initialize properties
self.ligands = {}
self.ligand_objects = {}
self.ions = {}
het_ids = set()
# Parse HETATM names
het_names = {}
for hetname_line in self.parsed_lines.get('HETNAM', []):
continuation = hetname_line[8:10].strip()
het_id = hetname_line[11:14].strip()
het_ids.add(het_id)
description = hetname_line[15:]
if continuation:
assert(het_id in het_names)
het_names[het_id] += description.rstrip() # keep leading space
else:
assert(het_id not in het_names)
het_names[het_id] = description.strip()
# Read in the associated CIF files for the ligand
for het_id in het_ids:
if not self.ligand_objects.get(het_id):
try:
self.ligand_objects[het_id] = Ligand.retrieve_data_from_rcsb(het_id.strip(), cached_dir = self.cache_dir)
except:
colortext.error('Failed to retrieve/parse the ligand .cif entry.')
# Parse HETSYN names
het_synonyms = {}
for hetsyn_line in self.parsed_lines.get('HETSYN', []):
continuation = hetsyn_line[8:10].strip()
het_id = hetsyn_line[11:14].strip()
het_ids.add(het_id)
description = hetsyn_line[15:]
if continuation:
assert(het_id in het_synonyms)
het_synonyms[het_id] += description.rstrip() # keep leading space
else:
assert(het_id not in het_synonyms)
het_synonyms[het_id] = description.strip()
for het_id, synonymns_str in het_synonyms.items():
het_synonyms[het_id] = [s.strip() for s in synonymns_str.split(';')]
for het_id in het_ids:
if het_names.get(het_id):
het_names[het_id] = [het_names[het_id]] + het_synonyms.get(het_id, [])
else:
het_names[het_id] = het_synonyms[het_id]
# Parse FORMUL names
het_formulae = {}
for formul_line in self.parsed_lines.get('FORMUL', []):
component_number = formul_line[8:10].strip()
het_id = formul_line[12:15].strip()
continuation = formul_line[16:18].strip()
asterisk = formul_line[18].strip()
if asterisk:
assert(het_id in common_solutions) # ignore waters: "PDB entries do not have HET records for water molecules, deuterated water, or methanol (when used as solvent)"
formula = formul_line[19:]
if continuation:
assert(het_id in het_formulae)
het_formulae[het_id] += formula.rstrip() # keep leading space
else:
assert(het_id not in het_formulae)
het_formulae[het_id] = formula.strip()
for het_line in self.parsed_lines.get('HET ', []):
# 0:3 = "HET", 7:10 = hetID, 12 = ChainID, 13:17 = seqNum, 17 = iCode, 20:25 = numHetAtoms, 30:70 = description
het_id = het_line[7:10].strip()
chain_id = het_line[12]
het_seq_id = het_line[13:18] # similar to 5-character residue ID
description = het_line[30:].strip() or None
numHetAtoms = int(het_line[20:25].strip())
assert(het_id not in common_solutions)
has_many_atoms = ((het_id in self.ligand_objects) and (self.ligand_objects[het_id].has_many_atoms)) or (numHetAtoms > 1) # Note: the first expression can be None so the order is important here (None or False = False but False or None = None).
if ((het_id == 'UNL') or (len(het_id) == 3 and has_many_atoms)) and (het_id not in three_letter_ion_codes):
# If a HET record has exactly one atom specified in the formula, we treat it as an ion e.g. FE2 in 1ZZ7.pdb.
# This may not be the correct approach for all cases but has fit cases I have come across so far and seems reasonable.
lig = SimplePDBLigand(het_id, het_seq_id, description = description, chain_id = chain_id, names = het_names.get(het_id), formula = het_formulae.get(het_id), number_of_atoms = numHetAtoms)
#colortext.pcyan('Adding ligand: case 1, {0} {1}{2}'.format(het_id, chain_id, het_seq_id))
self.ligands[chain_id] = self.ligands.get(chain_id, {})
assert(chain_id == ' ' or het_seq_id not in self.ligands[chain_id]) # the first expression is a hack to handle bad cases - see 1UOX
self.ligands[chain_id][het_seq_id] = lig
else:
assert((1 <= len(het_id) <= 2) or (numHetAtoms == 1))
assert(numHetAtoms == 1) # this should be true
#colortext.pcyan('Adding ion: case 1, {0} {1}{2}'.format(het_id, chain_id, het_seq_id))
ion = PDBIon(het_id, het_seq_id, description = description, chain_id = chain_id, names = het_names.get(het_id), formula = het_formulae.get(het_id), number_of_atoms = numHetAtoms)
self.ions[chain_id] = self.ions.get(chain_id, {})
assert(chain_id == ' ' or het_seq_id not in self.ions[chain_id]) # the first expression is a hack to handle bad cases - see 1UOX
self.ions[chain_id][het_seq_id] = ion
# Some output files (e.g. from Rosetta) may contain HETATM records with no corresponding HET records due to
# preprocessing of the PDB file.
# We have a lot less information to work with here but should still record the presence of a HETATM.
# The atom counts may be less than those present in the molecule if the coordinates of some atoms were not
# determined.
# Another special case is water molecules or methanol solvent molecules which we still wish to record.
hetatm_molecules = {}
for het_line in self.parsed_lines.get('HETATM', []):
het_id = het_line[17:20].strip()
chain_id = het_line[21]
het_seq_id = het_line[22:27] # similar to 5-character residue ID
if not((len(het_id) == 3 and (chain_id in self.ligands and het_seq_id in self.ligands[chain_id])) or
(1 <= len(het_id) <= 3 and (chain_id in self.ions and het_seq_id in self.ions[chain_id]))): # some ions use three-letter codes e.g. FE2 in 1ZZ7
# Otherwise this case was handled above
hetatm_molecules[chain_id] = hetatm_molecules.get(chain_id, {})
if het_seq_id in hetatm_molecules[chain_id]:
assert(hetatm_molecules[chain_id][het_seq_id][0] == het_id)
hetatm_molecules[chain_id][het_seq_id][1] += 1 # count the number of atoms
else:
hetatm_molecules[chain_id][het_seq_id] = [het_id, 1]
for chain_id, seq_ids in sorted(hetatm_molecules.items()):
for het_seq_id, tpl in sorted(seq_ids.items()):
het_id = tpl[0]
numHetAtoms = tpl[1]
description = common_solutions.get(het_id, het_id)
formula = None
has_many_atoms = ((het_id in self.ligand_objects) and (self.ligand_objects[het_id].has_many_atoms)) or (numHetAtoms > 1) # Note: the first expression can be None so the order is important here (None or False = False but False or None = None).
if het_id in common_solutions:
formula = het_id
assert(1 <= numHetAtoms <= 3)
self.solution[chain_id] = self.solution.get(chain_id, {})
self.solution[chain_id][het_id] = self.solution[chain_id].get(het_id, set())
assert(chain_id == ' ' or het_seq_id not in self.solution[chain_id][het_id]) # the first expression is a hack to handle bad cases - see 1UOX
self.solution[chain_id][het_id].add(het_seq_id)
elif ((het_id == 'UNL') or (len(het_id) == 3 and has_many_atoms)) and (het_id not in three_letter_ion_codes):
# NOTE: Just using len(het_id) == 3 can falsely identify ions as ligands.
# Since there may be missing ATOM records, we cannot infer that a heterogen is an ion based on
# the existence of only one ATOM record.
#
# Instead, we require that has_many_atoms is True. This fixes the problem above but now we can
# cascade into the next case and falsely identify ligands as ions. However, this should only
# happen if there is no corresponding .cif entry for the ligand or if that .cif entry is missing
# the _chem_comp.formula field which is presumably unlikely.
#
# A wrong classification could also occur if a user changed the ligand entry from the standard
# RCSB code to something else e.g. "GTP" -> "LIG". It is difficult to protect against this case
# but it seems likely that most users would use a three-letter code in this case.
lig = SimplePDBLigand(het_id, het_seq_id, description = description, chain_id = chain_id, names = [], formula = None)
#colortext.pcyan('Adding ligand: case 2, {0} {1}{2}'.format(het_id, chain_id, het_seq_id))
self.ligands[chain_id] = self.ligands.get(chain_id, {})
assert(chain_id == ' ' or het_seq_id not in self.ligands[chain_id]) # the first expression is a hack to handle bad cases - see 1UOX
self.ligands[chain_id][het_seq_id] = lig
else:
assert(1 <= len(het_id) <= 2)
ion = PDBIon(het_id, het_seq_id, description = description, chain_id = chain_id, names = [], formula = None)
#colortext.pcyan('Adding ion: case 2, {0} {1}{2}'.format(het_id, chain_id, het_seq_id))
self.ions[chain_id] = self.ions.get(chain_id, {})
assert(chain_id == ' ' or het_seq_id not in self.ions[chain_id]) # the first expression is a hack to handle bad cases - see 1UOX
self.ions[chain_id][het_seq_id] = ion
def get_B_factors(self, force = False):
'''This reads in all ATOM lines and compute the mean and standard deviation of each
residue's B-factors. It returns a table of the mean and standard deviation per
residue as well as the mean and standard deviation over all residues with each
residue having equal weighting.
Whether the atom is occupied or not is not taken into account.'''
# Read in the list of bfactors for each ATOM line.
if (not self.bfactors) or (force == True):
bfactors = {}
old_chain_residue_id = None
for line in self.lines:
if line[0:4] == "ATOM":
chain_residue_id = line[21:27]
if chain_residue_id != old_chain_residue_id:
bfactors[chain_residue_id] = []
old_chain_residue_id = chain_residue_id
bfactors[chain_residue_id].append(float(line[60:66]))
# Compute the mean and standard deviation for the list of B-factors of each residue
B_factor_per_residue = {}
mean_per_residue = []
for chain_residue_id, bfactor_list in bfactors.items():
mean, stddev, variance = get_mean_and_standard_deviation(bfactor_list)
B_factor_per_residue[chain_residue_id] = dict(mean = mean, stddev = stddev)
mean_per_residue.append(mean)
total_average, total_standard_deviation, variance = get_mean_and_standard_deviation(mean_per_residue)
self.bfactors = dict(
Overall = dict(mean = total_average, stddev = total_standard_deviation),
PerResidue = B_factor_per_residue,
)
return self.bfactors
### END OF REFACTORED CODE
def GetATOMSequences(self, ConvertMSEToAtom = False, RemoveIncompleteFinalResidues = False, RemoveIncompleteResidues = False):
raise Exception('This code looks to be deprecated.')
sequences, residue_map = self.GetRosettaResidueMap(ConvertMSEToAtom = ConvertMSEToAtom, RemoveIncompleteFinalResidues = RemoveIncompleteFinalResidues, RemoveIncompleteResidues = RemoveIncompleteResidues)
return sequences
def GetRosettaResidueMap(self, ConvertMSEToAtom = False, RemoveIncompleteFinalResidues = False, RemoveIncompleteResidues = False):
'''Note: This function ignores any DNA.'''
raise Exception('This code looks to be deprecated. Use construct_pdb_to_rosetta_residue_map instead.')
chain = None
sequences = {}
residue_map = {}
resid_set = set()
resid_list = []
DNA_residues = set([' DA', ' DC', ' DG', ' DT'])
chains = []
self.RAW_ATOM_SEQUENCE = []
essential_atoms_1 = set(['CA', 'C', 'N'])#, 'O'])
essential_atoms_2 = set(['CA', 'C', 'N'])#, 'OG'])
current_atoms = set()
atoms_read = {}
oldchainID = None
removed_residue = {}
for line in self.lines:
if line[0:4] == 'ATOM' or (ConvertMSEToAtom and (line[0:6] == 'HETATM') and (line[17:20] == 'MSE')):
chainID = line[21]
if missing_chain_ids.get(self.pdb_id):
chainID = missing_chain_ids[self.pdb_id]
if chainID not in chains:
chains.append(chainID)
residue_longname = line[17:20]
if residue_longname in DNA_residues:
# Skip DNA
continue
if residue_longname == 'UNK':
# Skip unknown residues
continue
if residue_longname not in allowed_PDB_residues_types and not(ConvertMSEToAtom and residue_longname == 'MSE'):
if not self.strict:
# Skip unknown residues
continue
else:
raise NonCanonicalResidueException("Residue %s encountered: %s" % (line[17:20], line))
else:
resid = line[21:27]
#print(chainID, residue_longname, resid)
#print(line)
#print(resid_list)
if resid not in resid_set:
removed_residue[chainID] = False
add_residue = True
if current_atoms:
if RemoveIncompleteResidues and essential_atoms_1.intersection(current_atoms) != essential_atoms_1 and essential_atoms_2.intersection(current_atoms) != essential_atoms_2:
oldChain = resid_list[-1][0]
oldResidueID = resid_list[-1][1:]
print(("The last residue '%s', %s, in chain %s is missing these atoms: %s." % (resid_list[-1], residue_longname, oldChain, essential_atoms_1.difference(current_atoms) or essential_atoms_2.difference(current_atoms))))
resid_set.remove(resid_list[-1])
#print("".join(resid_list))
resid_list = resid_list[:-1]
if oldchainID:
removed_residue[oldchainID] = True
#print("".join(resid_list))
#print(sequences[oldChain])
if sequences.get(oldChain):
sequences[oldChain] = sequences[oldChain][:-1]
if residue_map.get(oldChain):
residue_map[oldChain] = residue_map[oldChain][:-1]
#print(sequences[oldChain]
else:
assert(not(resid_set))
current_atoms = set()
atoms_read[chainID] = set()
atoms_read[chainID].add(line[12:15].strip())
resid_set.add(resid)
resid_list.append(resid)
chainID = line[21]
sequences[chainID] = sequences.get(chainID, [])
if residue_longname in non_canonical_amino_acids:
sequences[chainID].append(non_canonical_amino_acids[residue_longname])
else:
sequences[chainID].append(residue_type_3to1_map[residue_longname])
residue_map[chainID] = residue_map.get(chainID, [])
if residue_longname in non_canonical_amino_acids:
residue_map[chainID].append((resid, non_canonical_amino_acids[residue_longname]))
else:
residue_map[chainID].append((resid, residue_type_3to1_map[residue_longname]))
oldchainID = chainID
else:
#atoms_read[chainID] = atoms_read.get(chainID, set())
atoms_read[chainID].add(line[12:15].strip())
current_atoms.add(line[12:15].strip())
if RemoveIncompleteFinalResidues:
# These are (probably) necessary for Rosetta to keep the residue. Rosetta does throw away residues where only the N atom is present if that residue is at the end of a chain.
for chainID, sequence_list in sequences.items():
if not(removed_residue[chainID]):
if essential_atoms_1.intersection(atoms_read[chainID]) != essential_atoms_1 and essential_atoms_2.intersection(atoms_read[chainID]) != essential_atoms_2:
print(("The last residue %s of chain %s is missing these atoms: %s." % (sequence_list[-1], chainID, essential_atoms_1.difference(atoms_read[chainID]) or essential_atoms_2.difference(atoms_read[chainID]))))
oldResidueID = sequence_list[-1][1:]
residue_map[chainID] = residue_map[chainID][0:-1]
sequences[chainID] = sequence_list[0:-1]
for chainID, sequence_list in sequences.items():
sequences[chainID] = "".join(sequence_list)
assert(sequences[chainID] == "".join([res_details[1] for res_details in residue_map[chainID]]))
for chainID in chains:
for a_acid in sequences.get(chainID, ""):
self.RAW_ATOM_SEQUENCE.append((chainID, a_acid))
residue_objects = {}
for chainID in list(residue_map.keys()):
residue_objects[chainID] = []
for chainID, residue_list in residue_map.items():
for res_pair in residue_list:
resid = res_pair[0]
resaa = res_pair[1]
assert(resid[0] == chainID)
residue_objects[chainID].append((resid[1:].strip(), resaa))
return sequences, residue_objects
@staticmethod
def ChainResidueID2String(chain, residueID):
'''Takes a chain ID e.g. 'A' and a residueID e.g. '123' or '123A' and returns the 6-character identifier spaced as in the PDB format.'''
return "%s%s" % (chain, PDB.ResidueID2String(residueID))
@staticmethod
def ResidueID2String(residueID):
'''Takes a residueID e.g. '123' or '123A' and returns the 5-character identifier spaced as in the PDB format.'''
if residueID.isdigit():
return "%s " % (residueID.rjust(4))
else:
return "%s" % (residueID.rjust(5))
def validate_mutations(self, mutations):
'''This function has been refactored to use the SimpleMutation class.
The parameter is a list of Mutation objects. The function has no return value but raises a PDBValidationException
if the wildtype in the Mutation m does not match the residue type corresponding to residue m.ResidueID in the PDB file.
'''
# Chain, ResidueID, WildTypeAA, MutantAA
resID2AA = self.get_residue_id_to_type_map()
badmutations = []
for m in mutations:
wildtype = resID2AA.get(PDB.ChainResidueID2String(m.Chain, m.ResidueID), "")
if m.WildTypeAA != wildtype:
badmutations.append(m)
if badmutations:
raise PDBValidationException("The mutation(s) %s could not be matched against the PDB %s." % (", ".join(map(str, badmutations)), self.pdb_id))
def remove_nonbackbone_atoms(self, resid_list):
backbone_atoms = set(["N", "CA", "C", "O", "OXT"])
resid_set = set(resid_list)
self.lines = [line for line in self.lines if line[0:4] != "ATOM" or
line[21:26] not in resid_set or
line[12:16].strip() in backbone_atoms]
@staticmethod
def getOccupancy(line):
''' Handles the cases of missing occupancy by omission '''
occstring = line[54:60]
if not(occstring):
return 0
else:
try:
return float(occstring)
except ValueError as TypeError:
return 0
def removeUnoccupied(self):
self.lines = [line for line in self.lines if not (line.startswith("ATOM") and PDB.getOccupancy(line) == 0)]
def fillUnoccupied(self):
for i in range(len(self.lines)):
line = self.lines[i]
if line.startswith("ATOM") and PDB.getOccupancy(line) == 0:
self.lines[i] = line[:54] + " 1.00" + line[60:]
# Unused function
def fix_backbone_occupancy(self):
backbone_atoms = set(["N", "CA", "C", "O"])
for i in range(len(self.lines)):
line = self.lines[i]
if line.startswith("ATOM") and line[12:16].strip() in backbone_atoms and PDB.getOccupancy(line) == 0:
self.lines[i] = line[:54] + " 1.00" + line[60:]
def fix_chain_id(self):
"""fill in missing chain identifier"""
for i in range(len(self.lines)):
line = self.lines[i]
if line.startswith("ATOM") and line[21] == ' ':
self.lines[i] = line[:21] + 'A' + line[22:]
def remove_hetatm(self):
self.lines = [line for line in self.lines if not line.startswith("HETATM")]
def get_ddGResmap(self):
return self.ddGresmap
def get_ddGInverseResmap(self):
return self.ddGiresmap
def getAminoAcid(self, line):
return line[17:20]
def getAtomLine(self, chain, resid):
'''This function assumes that all lines are ATOM or HETATM lines.
resid should have the proper PDB format i.e. an integer left-padded
to length 4 followed by the insertion code which may be a blank space.'''
for line in self.lines:
fieldtype = line[0:6].strip()
assert(fieldtype == "ATOM" or fieldtype == "HETATM")
if line[21:22] == chain and resid == line[22:27]:
return line
raise Exception("Could not find the ATOM/HETATM line corresponding to chain '%(chain)s' and residue '%(resid)s'." % vars())
def getAtomLinesForResidueInRosettaStructure(self, resid):
'''We assume a Rosetta-generated structure where residues are uniquely identified by number.'''
lines = [line for line in self.lines if line[0:4] == "ATOM" and resid == int(line[22:27])]
if not lines:
#print('Failed searching for residue %d.' % resid)
#print("".join([line for line in self.lines if line[0:4] == "ATOM"]))
raise Exception("Could not find the ATOM/HETATM line corresponding to residue '%(resid)s'." % vars())
return lines
def remapMutations(self, mutations, pdbID = '?'):
'''Takes in a list of (Chain, ResidueID, WildTypeAA, MutantAA) mutation tuples and returns the remapped
mutations based on the ddGResmap (which must be previously instantiated).
This function checks that the mutated positions exist and that the wild-type matches the PDB.
'''
raise Exception('This code is deprecated. Please use map_pdb_residues_to_rosetta_residues instead.')
remappedMutations = []
ddGResmap = self.get_ddGResmap()
for m in mutations:
ns = (PDB.ChainResidueID2String(m['Chain'], str(ddGResmap['ATOM-%s' % PDB.ChainResidueID2String(m['Chain'], m['ResidueID'])])))
remappedMutations.append(Mutation(m['WildTypeAA'], ns[1:].strip(), m['MutantAA'], ns[0]))
# Validate the mutations against the Rosetta residues
sequences, residue_map = self.GetRosettaResidueMap()
for rm in remappedMutations:
offset = int(residue_map[rm.Chain][0][0])
pr = residue_map[rm.Chain][int(rm.ResidueID) - offset]
assert(pr[0] == rm.ResidueID)
assert(pr[1] == rm.WildTypeAA)
return remappedMutations
def stripForDDG(self, chains = True, keepHETATM = False, numberOfModels = None, raise_exception = True):
'''Strips a PDB to ATOM lines. If keepHETATM is True then also retain HETATM lines.
By default all PDB chains are kept. The chains parameter should be True or a list.
In the latter case, only those chains in the list are kept.
Unoccupied ATOM lines are discarded.
This function also builds maps from PDB numbering to Rosetta numbering and vice versa.
'''
if raise_exception:
raise Exception('This code is deprecated.')
from Bio.PDB import PDBParser
resmap = {}
iresmap = {}
newlines = []
residx = 0
oldres = None
model_number = 1
for line in self.lines:
fieldtype = line[0:6].strip()
if fieldtype == "ENDMDL":
model_number += 1
if numberOfModels and (model_number > numberOfModels):
break
if not numberOfModels:
raise Exception("The logic here does not handle multiple models yet.")
if (fieldtype == "ATOM" or (fieldtype == "HETATM" and keepHETATM)) and (float(line[54:60]) != 0):
chain = line[21:22]
if (chains == True) or (chain in chains):
resid = line[21:27] # Chain, residue sequence number, insertion code
iCode = line[26:27]
if resid != oldres:
residx += 1
newnumbering = "%s%4.i " % (chain, residx)
assert(len(newnumbering) == 6)
id = fieldtype + "-" + resid
resmap[id] = residx
iresmap[residx] = id
oldres = resid
oldlength = len(line)
# Add the original line back including the chain [21] and inserting a blank for the insertion code
line = "%s%4.i %s" % (line[0:22], resmap[fieldtype + "-" + resid], line[27:])
assert(len(line) == oldlength)
newlines.append(line)
self.lines = newlines
self.ddGresmap = resmap
self.ddGiresmap = iresmap
# Sanity check against a known library
tmpfile = "/tmp/ddgtemp.pdb"
self.lines = self.lines or ["\n"] # necessary to avoid a crash in the Bio Python module
F = open(tmpfile,'w')
F.write(string.join(self.lines, "\n"))
F.close()
parser=PDBParser()
structure=parser.get_structure('tmp', tmpfile)
os.remove(tmpfile)
count = 0
for residue in structure.get_residues():
count += 1
assert(count == residx)
assert(len(resmap) == len(iresmap))
def mapRosettaToPDB(self, resnumber):
res = self.ddGiresmap.get(resnumber)
if res:
res = res.split("-")
return res[1], res[0]
return None
def mapPDBToRosetta(self, chain, resnum, iCode = " ", ATOM = True):
if ATOM:
key = "ATOM-%s%4.i%s" % (chain, resnum, iCode)
else:
key = "HETATM-%s%4.i%s" % (chain, resnum, iCode)
res = self.ddGresmap.get(key)
if res:
return res
return None
def aa_resids(self, only_res=None):
if only_res:
atomlines = [line for line in self.lines if line[0:4] == "ATOM" and line[17:20] in allowed_PDB_residues_types and line[26] == ' ']
else:
atomlines = [line for line in self.lines if line[0:4] == "ATOM" and (line[17:20].strip() in allowed_PDB_residues_and_nucleotides) and line[26] == ' ']
resid_set = set()
resid_list = []
# todo: Seems a little expensive to create a set, check 'not in', and do fn calls to add to the set. Use a dict instead?
for line in atomlines:
resid = line[21:26]
if resid not in resid_set:
resid_set.add(resid)
resid_list.append(resid)
return resid_list # format: "A 123" or: '%s%4.i' % (chain,resid)
def ComputeBFactors(self):
raise Exception('Use get_b_factors() instead.')
def CheckForPresenceOf(self, reslist):
'''This checks whether residues in reslist exist in the ATOM lines.
It returns a list of the residues in reslist which did exist.'''
if type(reslist) == type(""):
reslist = [reslist]
foundRes = {}
for line in self.lines:
resname = line[17:20]
if line[0:4] == "ATOM":
if resname in reslist:
foundRes[resname] = True
return list(foundRes.keys())
def get_residue_id_to_type_map(self):
'''Returns a dictionary mapping 6-character residue IDs (Chain, residue number, insertion code e.g. "A 123B") to the
corresponding one-letter amino acid.
Caveat: This function ignores occupancy - this function should be called once occupancy has been dealt with appropriately.'''
resid2type = {}
atomlines = self.parsed_lines['ATOM ']
for line in atomlines:
resname = line[17:20]
if resname in allowed_PDB_residues_types and line[13:16] == 'CA ':
resid2type[line[21:27]] = residue_type_3to1_map.get(resname) or protonated_residue_type_3to1_map.get(resname)
return resid2type
def pruneChains(self, chainsChosen):
# If chainsChosen is non-empty then removes any ATOM lines of chains not in chainsChosen
if chainsChosen and (sorted(chainsChosen) != sorted(self.chain_ids())):
templines = []
for line in self.lines:
shortRecordName = line[0:4]
if shortRecordName == "ATOM" and line[17:20] in allowed_PDB_residues_types and line[26] == ' ':
chain = line[21:22]
if chain in chainsChosen:
# Only keep ATOM lines for chosen chains
templines.append(line)
elif shortRecordName == "TER ":
chain = line[21:22]
if chain in chainsChosen:
# Only keep TER lines for chosen chains
templines.append(line)
else:
# Keep all non-ATOM lines
templines.append(line)
self.lines = templines
def chain_ids(self):
chain_ids = set()
chainlist = []
for line in self.lines:
if line[0:4] == "ATOM" and line[17:20] in allowed_PDB_residues_types and line[26] == ' ':
chain = line[21:22]
if chain not in chain_ids:
chain_ids.add(chain)
chainlist.append(chain)
return chainlist
def number_of_models(self):
return len( [line for line in self.lines if line[0:4] == 'MODEL'] )
def fix_residue_numbering(self):
"""this function renumbers the res ids in order to avoid strange behaviour of Rosetta"""
resid_list = self.aa_resids()
resid_set = set(resid_list)
resid_lst1 = list(resid_set)
resid_lst1.sort()
map_res_id = {}
x = 1
old_chain = resid_lst1[0][0]
for resid in resid_lst1:
map_res_id[resid] = resid[0] + '%4.i' % x
if resid[0] == old_chain:
x+=1
else:
x = 1
old_chain = resid[0]
atomlines = []
for line in self.lines:
if line[0:4] == "ATOM" and line[21:26] in resid_set and line[26] == ' ':
lst = [char for char in line]
#lst.remove('\n')
lst[21:26] = map_res_id[line[21:26]]
atomlines.append( string.join(lst,'') )
#print string.join(lst,'')
else:
atomlines.append(line)
self.lines = atomlines
return map_res_id
def get_residue_mapping(self):
"""this function maps the chain and res ids "A 234" to values from [1-N]"""
resid_list = self.aa_resids()
# resid_set = set(resid_list)
# resid_lst1 = list(resid_set)
# resid_lst1.sort()
map_res_id = {}
x = 1
for resid in resid_list:
# map_res_id[ int(resid[1:].strip()) ] = x
map_res_id[ resid ] = x
x+=1
return map_res_id
def GetAllATOMLines(self):
return [line for line in self.lines if line[0:4] == "ATOM"]
def atomlines(self, resid_list = None):
if resid_list == None:
resid_list = self.aa_resids()
resid_set = set(resid_list)
return [line for line in self.lines if line[0:4] == "ATOM" and line[21:26] in resid_set and line[26] == ' ' ]
def neighbors(self, distance, residue, atom = None, resid_list = None): #atom = " CA "
if atom == None: # consider all atoms
lines = [line for line in self.atomlines(resid_list)]
else: # consider only given atoms
lines = [line for line in self.atomlines(resid_list) if line[12:16] == atom]
shash = spatialhash.SpatialHash(distance)
#resid_pos = []
for line in lines:
pos = (float(line[30:38]), float(line[38:46]), float(line[46:54]))
shash.insert(pos, line[21:26])
neighbor_list = [] # (key, value) = (resid,
for line in lines:
#print line
resid = line[21:26]
#print resid[1:-1], str(residue).rjust(4), resid[1:-1] == str(residue).rjust(4)
if resid[1:] == str(residue).rjust(4):
pos = (float(line[30:38]), float(line[38:46]), float(line[46:54]))
for data in shash.nearby(pos, distance):
if data[1] not in neighbor_list:
#print data
neighbor_list.append(data[1])
neighbor_list.sort()
return neighbor_list
#todo 29: Optimise all callers of this function by using fastneighbors2 instead
def neighbors2(self, distance, chain_residue, atom = None, resid_list = None):
#atom = " CA "
'''this one is more precise since it uses the chain identifier also'''
if atom == None: # consider all atoms
lines = [line for line in self.atomlines(resid_list) if line[17:20] in allowed_PDB_residues_types]
else: # consider only given atoms
lines = [line for line in self.atomlines(resid_list) if line[17:20] in allowed_PDB_residues_types and line[12:16] == atom]
shash = spatialhash.SpatialHash(distance)
for line in lines:
pos = (float(line[30:38]), float(line[38:46]), float(line[46:54]))
shash.insert(pos, line[21:26])
neighbor_list = []
for line in lines:
resid = line[21:26]
if resid == chain_residue:
pos = (float(line[30:38]), float(line[38:46]), float(line[46:54]))
for data in shash.nearby(pos, distance):
if data[1] not in neighbor_list:
neighbor_list.append(data[1])
neighbor_list.sort()
return neighbor_list
def fastneighbors2(self, distance, chain_residues, atom = None, resid_list = None):
# Create the spatial hash and construct a list of positions matching chain_residue
#chainResPositions holds all positions related to a chain residue (A1234) defined on ATOM lines
chainResPositions = {}
for res in chain_residues:
chainResPositions[res] = []
shash = spatialhash.SpatialHash3D(distance)
# This could be made fast by inlining atomlines and avoiding creating line[21:26] twice and by reusing resids rather than recomputing them
# However, the speedup may not be too great and would need profiling
for line in self.atomlines(resid_list):
if line[17:20] in allowed_PDB_residues_types:
if atom == None or line[12:16] == atom:
resid = line[21:26]
pos = (float(line[30:38]), float(line[38:46]), float(line[46:54]))
shash.insert(pos, resid)
if resid in chain_residues:
chainResPositions[resid].append(pos)
neighbors = {}
# for all residues ids (A1234) in chain residues and all their positions,
# get a list of all ((x,y,z),resid) tuples within a radius of distance and add them uniquely to neighbor_list
# sort the list and store in neighbors
for resid in chain_residues:
neighbor_list = {}
for pos in chainResPositions[resid]:
for data in shash.nearby(pos):
neighbor_list[data[1]] = True
neighbors[resid] = list(neighbor_list.keys())
return neighbors
def neighbors3(self, distance, chain_residue, atom = None, resid_list = None):
'''this is used by the sequence tolerance scripts to find the sc-sc interactions only'''
backbone_atoms = [' N ',' CA ',' C ',' O ']
lines = [line for line in self.atomlines(resid_list) if line[12:16] not in backbone_atoms] # this excludes backbone atoms
lines = [line for line in lines if line[13] != 'H'] # exclude hydrogens too!
shash = spatialhash.SpatialHash(distance)
#resid_pos = []
for line in lines:
pos = (float(line[30:38]), float(line[38:46]), float(line[46:54]))
shash.insert(pos, line[21:26])
neighbor_list = [] #
for line in lines:
resid = line[21:26]
if resid == chain_residue:
pos = (float(line[30:38]), float(line[38:46]), float(line[46:54]))
for data in shash.nearby(pos, distance):
if data[1] not in neighbor_list:
neighbor_list.append(data[1])
neighbor_list.sort()
return neighbor_list
def get_stats(self):
counts = {}
counts["models"] = self.number_of_models()
counts["residues"] = len(self.aa_resids())
counts["chains"] = len(self.chain_ids())
counts["atoms"] = len(self.atomlines())
counts["cys"] = len([line for line in self.lines if line[0:4] == "ATOM" and line[13:16] == 'CA ' and line[17:20] == "CYS" and line[26] == ' '])
return counts
# This function can be expanded to allow us to use non-standard PDB files such as the ones given
# as examples in the RosettaCon 2010 sequence tolerance protocol based on Smith, Kortemme 2010.
def check_custom_format(self, line, lineidx):
if line[0:9] == "FOLD_TREE":
return True
return False
def check_format(self, usingClassic, ableToUseMini):
warnings = []
errors = []
lineidx = 1
# remove leading and trailing empty lines
for line in self.lines:
if len(line.strip()) == 0:
self.lines.remove(line)
lineidx = lineidx + 1
else:
break
for line in reversed(self.lines):
if len(line.strip()) == 0:
self.lines.remove(line)
else:
break
currentChain = None
oldChain = None
TERidx = 0
ATOMidx = 0
# Unused but handy to have for debugging
residueNumber = 0
# Variables for checking missing backbone residues
missingBackboneResidues = False
lastReadResidue = None
currentResidue = None
bbatoms = ["N", "O", "C", "CA"]#, "CB"]
# For readability
NOT_OCCUPIED = -1.0
NINDEX = 0
OINDEX = 1
CINDEX = 2
CAINDEX = 3
#CBINDEX = 4
missingSomeBBAtoms = False
someBBAtomsAreUnoccupied = False
backboneAtoms = {}
backboneAtoms[" "] = [0.0, 0.0, 0.0, 0.0]#, 0.0]
commonConformationIsPresent = False
oldres = ""
# Check for bad resfile input to classic
resfileEntries = {}
classicErrors = []
# We add these dummy lines to avoid messy edge-case logic in the loop below.
self.lines.append("ATOM 9999 N VAL ^ 999 0.000 0.000 0.000 1.00 00.00 N")
self.lines.append("ATOM 9999 CA VAL ^ 999 0.000 0.000 0.000 1.00 00.00 C")
self.lines.append("ATOM 9999 C VAL ^ 999 0.000 0.000 0.000 1.00 00.00 C")
self.lines.append("ATOM 9999 O VAL ^ 999 0.000 0.000 0.000 1.00 00.00 O")
#self.lines.append("ATOM 9999 CB VAL ^ 999 0.000 0.000 0.000 1.00 00.00 C")
for line in self.lines:
if line[0:4] == "ATOM":
# http://www.wwpdb.org/documentation/format32/sect9.html#ATOM
alternateConformation = line[16]
residue = line[17:20]
currentChain = line[21]
if currentChain == " ":
errors.append("Missing chain identifier (e.g. 'A', 'B') on line %d." % lineidx)
currentResidue = line[21:27]
classicCurrentResidue = line[21:26] # classic did not handle the insertion code in resfiles until revision 29386
occupancy = PDB.getOccupancy(line)
if usingClassic and (residue not in allowed_PDB_residues_types):
# Check for residues outside the list classic can handle
classicErrors.append("Residue %s on line %d is not recognized by classic." % (residue, lineidx))
elif (oldChain != None) and (currentChain == oldChain):
# Check for bad TER fields
oldChain = None
errors.append("A TER field on line %d interrupts two ATOMS on lines %d and %d with the same chain %s." % (TERidx, ATOMidx, lineidx, currentChain))
ATOMidx = lineidx
if not lastReadResidue:
if currentResidue == '^ 999 ':
# We reached the end of the file
break
lastReadResidue = (residue, lineidx, currentResidue)
if lastReadResidue[2] == currentResidue:
if alternateConformation == ' ':
commonConformationIsPresent = True
if lastReadResidue[2] != currentResidue:
residueNumber += 1
# Check for malformed resfiles for classic
if usingClassic:
if not resfileEntries.get(classicCurrentResidue):
resfileEntries[classicCurrentResidue] = (currentResidue, lineidx)
else:
oldRes = resfileEntries[classicCurrentResidue][0]
oldLine = resfileEntries[classicCurrentResidue][1]
if currentResidue == resfileEntries[classicCurrentResidue][0]:
classicErrors.append("Residue %(currentResidue)s on line %(lineidx)d was already defined on line %(oldLine)d." % vars())
else:
classicErrors.append("Residue %(currentResidue)s on line %(lineidx)d has the same sequence number (ignoring iCode) as residue %(oldRes)s on line %(oldLine)d." % vars())
# Check for missing backbone residues
# Add the backbone atoms common to all alternative conformations to the common conformation
# todo: I've changed this to always take the union in all versions rather than just in Rosetta 3. This was to fix a false positive with 3OGB.pdb on residues A13 and A55 which run fine under point mutation.
# This may now be too permissive.
if True or not usingClassic:
commonToAllAlternatives = [0, 0, 0, 0]#, 0]
for conformation, bba in list(backboneAtoms.items()):
for atomocc in range(CAINDEX + 1):
if conformation != " " and backboneAtoms[conformation][atomocc]:
commonToAllAlternatives[atomocc] += backboneAtoms[conformation][atomocc]
for atomocc in range(CAINDEX + 1):
backboneAtoms[" "][atomocc] = backboneAtoms[" "][atomocc] or 0
backboneAtoms[" "][atomocc] += commonToAllAlternatives[atomocc]
# Check whether the common conformation has all atoms
commonConformationHasAllBBAtoms = True
for atomocc in range(CAINDEX + 1):
commonConformationHasAllBBAtoms = backboneAtoms[" "][atomocc] and commonConformationHasAllBBAtoms
ps = ""
for conformation, bba in list(backboneAtoms.items()):
# Add the backbone atoms of the common conformation to all alternatives
if not usingClassic:
for atomocc in range(CAINDEX + 1):
if backboneAtoms[" "][atomocc]:
backboneAtoms[conformation][atomocc] = backboneAtoms[conformation][atomocc] or 0
backboneAtoms[conformation][atomocc] += backboneAtoms[" "][atomocc]
missingBBAtoms = False
for atomocc in range(CAINDEX + 1):
if not backboneAtoms[conformation][atomocc]:
missingBBAtoms = True
break
if not commonConformationHasAllBBAtoms and missingBBAtoms:
missing = []
unoccupied = []
for m in range(CAINDEX + 1):
if backboneAtoms[conformation][m] == 0:
unoccupied.append(bbatoms[m])
someBBAtomsAreUnoccupied = True
elif not(backboneAtoms[conformation][m]):
missing.append(bbatoms[m])
missingSomeBBAtoms = True
s1 = ""
s2 = ""
if len(missing) > 1:
s1 = "s"
if len(unoccupied) > 1:
s2 = "s"
missing = string.join(missing, ",")
unoccupied = string.join(unoccupied, ",")
failedClassic = False
haveAllAtoms = True
for atomocc in range(CAINDEX + 1):
if backboneAtoms[conformation][atomocc] <= 0 or backboneAtoms[" "][atomocc] <= 0:
haveAllAtoms = False
break
if haveAllAtoms:
failedClassic = True
ps = " The common conformation correctly has these atoms."
if conformation != " " or commonConformationIsPresent:
# We assume above that the common conformation exists. However, it is valid for it not to exist at all.
if conformation == " ":
conformation = "common"
if missing:
errstring = "The %s residue %s on line %d is missing the backbone atom%s %s in the %s conformation.%s" % (lastReadResidue[0], lastReadResidue[2], lastReadResidue[1], s1, missing, conformation, ps)
if ps:
classicErrors.append(errstring)
else:
errors.append(errstring)
if unoccupied:
errstring = "The %s residue %s on line %d has the backbone atom%s %s set as unoccupied in the %s conformation.%s" % (lastReadResidue[0], lastReadResidue[2], lastReadResidue[1], s2, unoccupied, conformation, ps)
if ps:
classicErrors.append(errstring)
else:
errors.append(errstring)
backboneAtoms = {}
backboneAtoms[" "] = [None, None, None, None, None]
commonConformationIsPresent = False
lastReadResidue = (residue, lineidx, currentResidue)
oldres = residue
atom = line[12:16]
backboneAtoms[alternateConformation] = backboneAtoms.get(alternateConformation) or [None, None, None, None, None]
if occupancy >= 0:
if atom == ' N ':
backboneAtoms[alternateConformation][NINDEX] = occupancy
elif atom == ' O ' or atom == ' OT1' or atom == ' OT2':
backboneAtoms[alternateConformation][OINDEX] = occupancy
elif atom == ' C ':
backboneAtoms[alternateConformation][CINDEX] = occupancy
elif atom == ' CA ':
backboneAtoms[alternateConformation][CAINDEX] = occupancy
#if atom == ' CB ' or residue == 'GLY':
# backboneAtoms[alternateConformation][CBINDEX] = occupancy
elif line[0:3] == "TER":
oldChain = currentChain
TERidx = lineidx
# print len(line),'\t', line[0:6]
# remove all white spaces, and check if the line is empty or too long:
if len(line.strip()) == 0:
errors.append("Empty line found on line %d." % lineidx)
elif len(line.rstrip()) > 81:
errors.append("Line %d is too long." % lineidx)
# check if the file contains tabs
elif '\t' in line:
errors.append("The file contains tabs on line %d." % lineidx)
# check whether the records in the file are conform with the PDB format
elif not line[0:6].rstrip() in all_record_types:
if not self.check_custom_format(line, lineidx):
errors.append("Unknown record (%s) on line %d." % (line[0:6], lineidx))
else:
warnings.append("The PDB file contains the following non-standard line which is allowed by the server:\n line %d: %s" % (lineidx, line))
lineidx = lineidx + 1
# Remove the extra ATOM lines added above
self.lines = self.lines[0:len(self.lines) - (CAINDEX + 1)]
if not lastReadResidue:
errors.append("No valid ATOM lines were found.")
if not missingSomeBBAtoms and someBBAtomsAreUnoccupied:
errors.insert(0, "The PDB has some backbone atoms set as unoccupied. You can set these as occupied using the checkbox on the submission page.<br>")
if classicErrors:
if ableToUseMini:
errors.insert(0, "The PDB is incompatible with the classic version of Rosetta. Try using the mini version of Rosetta or else altering the PDB.<br>")
else:
errors.insert(0, "The PDB is incompatible with the classic version of Rosetta. No mini version is available for this protocol so the PDB will need to be altered.<br>")
errors.append("<br>The classic-specific errors are as follows:<ul style='text-align:left'>")
errors.append("<li>%s" % string.join(classicErrors, "<li>"))
errors.append("</ul>")
if errors:
if usingClassic:
errors.insert(0, "Version: Rosetta++")
else:
errors.insert(0, "Version: Rosetta 3")
return errors, None
return True, warnings
def extract_xyz_matrix_from_chain(self, chain_id, atoms_of_interest = []):
'''Create a pandas coordinates dataframe from the lines in the specified chain.'''
chains = [l[21] for l in self.structure_lines if len(l) > 21]
chain_lines = [l for l in self.structure_lines if len(l) > 21 and l[21] == chain_id]
return PDB.extract_xyz_matrix_from_pdb(chain_lines, atoms_of_interest = atoms_of_interest, include_all_columns = True)
@staticmethod
def extract_xyz_matrix_from_pdb(pdb_lines, atoms_of_interest = backbone_atoms, expected_num_residues = None, expected_num_residue_atoms = None, fail_on_model_records = True, include_all_columns = False):
'''Returns a pandas dataframe of X, Y, Z coordinates for all chains in the PDB.
Note: This function is not intended to handle structures with MODELs e.g. from NMR although the fail_on_model_records
check is optional for convenience in case the first model is to be parsed.
Otherwise, the file should be split apart and passed into this function model by model.'''
if fail_on_model_records and [l for l in pdb_lines if l.startswith('MODEL')]:
raise Exception('This function does not handle files with MODEL records. Please split those file by model first.')
chain_ids = set([l[21] for l in pdb_lines if l.startswith('ATOM ')])
dataframes = []
for chain_id in chain_ids:
dataframes.append(PDB.extract_xyz_matrix_from_pdb_chain(pdb_lines, chain_id, atoms_of_interest = atoms_of_interest, expected_num_residues = expected_num_residues, expected_num_residue_atoms = expected_num_residue_atoms, include_all_columns = include_all_columns))
if dataframes:
return(pandas.concat(dataframes, verify_integrity = True))
else:
return None
@staticmethod
def extract_xyz_matrix_from_pdb_chain(pdb_lines, chain_id, atoms_of_interest = backbone_atoms, expected_num_residues = None, expected_num_residue_atoms = None, fail_on_model_records = True, include_all_columns = False):
'''Returns a pandas dataframe of X, Y, Z coordinates for the PDB chain.
Note: This function is not intended to handle structures with MODELs e.g. from NMR although the fail_on_model_records
check is optional for convenience in case the chain of the first model is to be parsed.'''
if fail_on_model_records and [l for l in pdb_lines if l.startswith('MODEL')]:
raise Exception('This function does not handle files with MODEL records. Please split those file by model first.')
new_pdb_lines = []
found_chain = False
for l in pdb_lines:
if l.startswith('ATOM '):
if l[21] == chain_id:
found_chain = True
if found_chain:
new_pdb_lines.append(l)
if found_chain and (l.strip() == 'TER' or l.startswith('MODEL') or (len(l) > 21 and l[21] != chain_id)):
# Do not cross over into other chains or models
break
return PDB.extract_xyz_matrix_from_pdb_residue_range(new_pdb_lines, atoms_of_interest = atoms_of_interest, expected_num_residues = expected_num_residues, expected_num_residue_atoms = expected_num_residue_atoms, include_all_columns = include_all_columns)
@staticmethod
def extract_xyz_matrix_from_loop_json(pdb_lines, parsed_loop_json_contents, atoms_of_interest = backbone_atoms, expected_num_residues = None, expected_num_residue_atoms = None, allow_overlaps = False, include_all_columns = False):
'''A utility wrapper to extract_xyz_matrix_from_pdb_residue_range.
This accepts PDB file lines and a loop.json file (a defined Rosetta format) and returns a pandas dataframe of
the X, Y, Z coordinates for the requested atom types for all residues in all loops defined by the loop.json
file. The dataframe is indexed by a string identifying the PDB residue and atom type.
loop_json_contents should be a Python dict read in from a loop.json file e.g. json.loads(file_contents).'''
# Create one dataframe per loop segment
dataframes = []
for loop_set in parsed_loop_json_contents['LoopSet']:
start_pdb_residue_id = PDB.ChainResidueID2String(loop_set['start']['chainID'], str(loop_set['start']['resSeq']) + loop_set['start']['iCode'])
stop_pdb_residue_id = PDB.ChainResidueID2String(loop_set['stop']['chainID'], str(loop_set['stop']['resSeq']) + loop_set['stop']['iCode'])
dataframes.append(PDB.extract_xyz_matrix_from_pdb_residue_range(pdb_lines, start_pdb_residue_id = start_pdb_residue_id, stop_pdb_residue_id = stop_pdb_residue_id, atoms_of_interest = atoms_of_interest, expected_num_residues = None, expected_num_residue_atoms = expected_num_residue_atoms, include_all_columns = include_all_columns))
# Concatenate the dataframes
dataframe = pandas.concat(dataframes, verify_integrity = (allow_overlaps == False)) # note: the pandas documentation notes that verify_integrity is relatively expensive
if expected_num_residues != None and expected_num_residue_atoms != None:
assert(dataframe.shape[0] == expected_num_residues * expected_num_residue_atoms)
return dataframe
@staticmethod
def extract_xyz_matrix_from_pdb_residue_range(pdb_lines, start_pdb_residue_id = None, stop_pdb_residue_id = None, atoms_of_interest = backbone_atoms, expected_num_residues = None, expected_num_residue_atoms = None, break_on_chain_end = True, include_all_columns = False):
'''Creates a pandas dataframe of X, Y, Z coordinates for the residues identified in the range from
start_pdb_residue_id to stop_pdb_residue_id inclusive. The dataframe is indexed by a string identifying the
PDB residue and atom type.
pdb_lines should be an array of lines from a PDB file (only ATOM lines are currently considered).
The residue IDs should be 6-character strings correspond to columns 22-27 (inclusive where columns are 1-indexed)
of the PDB file. The start and stop residues are both optional - omitting either removes the lower and upper
bound of the residue range respectively.
The returned coordinates will be restricted to atoms_of_interest.
expected_num_residues can be set if the number of residues is known in advance and the user wishes to assert this.
If expected_num_residue_atoms is set then each residue will be checked to ensure that this number of atoms was read.
This method does not handle MODELs e.g. from PDB files determined via NMR. To handle those files, an intermediate
function should be written which identifies the lines for one MODEL and then calls this function.'''
atoms = {}
found_start, found_end = start_pdb_residue_id == None, None
res_id_list, x_list, y_list, z_list = [], [], [], []
if include_all_columns:
chain_id_list, plain_res_id_list, atom_type_list, loc_list, residue_aa_list = [], [], [], [], []
for l in pdb_lines:
res_id = None
atom_type = None
alt_loc = None
if (break_on_chain_end and l.strip() == 'TER') or l.startswith('MODEL'):
# Do not cross over into other chains (unless specified) or models
break
if l.startswith('ATOM '):
res_id = l[21:27]
atom_type = l[12:16].strip()
alt_loc = l[16]
if res_id == start_pdb_residue_id:
# Saw the first residue - enter parsing
found_start = True
if (stop_pdb_residue_id != None) and (res_id == stop_pdb_residue_id):
assert(found_start)
found_end = True
if found_end and res_id != stop_pdb_residue_id:
# Passed the last residue - exit parsing
break
if found_start and l.startswith('ATOM ') and (not(atoms_of_interest) or (atom_type in atoms_of_interest)):
if expected_num_residues or expected_num_residue_atoms:
# Only build the atoms dict if the assertions are turned on
assert(res_id and atom_type and not(atoms.get(res_id, {}).get(atom_type)))
atoms[res_id] = atoms.get(res_id, {})
atoms[res_id][atom_type] = True
# Add the dataframe elements
res_id_list.append('{0}_{1}_{2}'.format(res_id.strip(), atom_type, alt_loc))
x_list.append(float(l[30:38]))
y_list.append(float(l[38:46]))
z_list.append(float(l[46:54]))
if include_all_columns:
chain_id_list.append(l[21])
plain_res_id_list.append(l[22:27])
atom_type_list.append(l[12:16])
loc_list.append(l[16])
residue_aa_list.append(l[17:20])
assert(found_start and ((stop_pdb_residue_id == None) or found_end))
if expected_num_residues != None:
assert(len(atoms) == expected_num_residues)
if expected_num_residue_atoms != None:
for res_id, atom_details in atoms.items():
assert(len(atom_details) == expected_num_residue_atoms)
if include_all_columns:
return pandas.DataFrame(dict(chain = chain_id_list, res_id = plain_res_id_list, atom = atom_type_list, altloc = loc_list, AA = residue_aa_list, X = x_list, Y = y_list, Z = z_list), index = res_id_list)
else:
return pandas.DataFrame(dict(X = x_list, Y = y_list, Z = z_list), index = res_id_list)
|
class PDB(object):
'''A class to store and manipulate PDB data'''
def __init__(self, pdb_content, pdb_id = None, strict = True, parse_ligands = False):
'''Takes either a pdb file, a list of strings = lines of a pdb file, or another object.'''
pass
def __repr__(self):
pass
def fix_pdb(self):
'''A function to fix fatal errors in PDB files when they can be automatically fixed. At present, this only runs if
self.strict is False. We may want a separate property for this since we may want to keep strict mode but still
allow PDBs to be fixed.
The only fixes at the moment are for missing chain IDs which get filled in with a valid PDB ID, if possible.'''
pass
def _apply_hacks(self):
pass
@staticmethod
def replace_headers(source_pdb_content, target_pdb_content):
'''Takes the headers from source_pdb_content and adds them to target_pdb_content, removing any headers that
target_pdb_content had.
Only the content up to the first structural line are taken from source_pdb_content and only the content from
the first structural line in target_pdb_content are taken.
'''
pass
@staticmethod
def from_filepath(filepath, strict = True, parse_ligands = False):
'''A function to replace the old constructor call where a filename was passed in.'''
pass
@staticmethod
def from_lines(pdb_file_lines, strict = True, parse_ligands = False):
'''A function to replace the old constructor call where a list of the file's lines was passed in.'''
pass
@staticmethod
def retrieve(pdb_id, cache_dir = None, strict = True, parse_ligands = False):
'''Creates a PDB object by using a cached copy of the file if it exists or by retrieving the file from the RCSB.'''
pass
def _split_lines(self):
'''Creates the parsed_lines dict which keeps all record data in document order indexed by the record type.'''
pass
def _update_structure_lines(self):
'''ATOM and HETATM lines may be altered by function calls. When this happens, this function should be called to keep self.structure_lines up to date.'''
pass
def clone(self, parse_ligands = False):
'''A function to replace the old constructor call where a PDB object was passed in and 'cloned'.'''
pass
def get_content(self):
'''A function to replace the old constructor call where a PDB object was passed in and 'cloned'.'''
pass
def write(self, pdbpath, separator = '\n'):
pass
def get_pdb_id(self):
'''Return the PDB ID. If one was passed in to the constructor, this takes precedence, otherwise the header is
parsed to try to find an ID. The header does not always contain a PDB ID in regular PDB files and appears to
always have an ID of 'XXXX' in biological units so the constructor override is useful.'''
pass
def get_ATOM_and_HETATM_chains(self):
'''todo: remove this function as it now just returns a member element'''
pass
def get_annotated_chain_sequence_string(self, chain_id, use_seqres_sequences_if_possible, raise_Exception_if_not_found = True):
'''A helper function to return the Sequence for a chain. If use_seqres_sequences_if_possible then we return the SEQRES
Sequence if it exists. We return a tuple of values, the first identifying which sequence was returned.'''
pass
def get_chain_sequence_string(self, chain_id, use_seqres_sequences_if_possible, raise_Exception_if_not_found = True):
'''Similar to get_annotated_chain_sequence_string except that we only return the Sequence and do not state which sequence it was.'''
pass
def _get_modified_residues(self):
pass
def _get_replacement_pdb_id(self):
'''Checks to see if the PDB file has been deprecated and, if so, what the new ID is.'''
pass
def strip_to_chains(self, chains, break_at_endmdl = True):
'''Throw away all ATOM/HETATM/ANISOU/TER lines for chains that are not in the chains list.'''
pass
def strip_HETATMs(self, only_strip_these_chains = []):
'''Throw away all HETATM lines. If only_strip_these_chains is specified then only strip HETATMs lines for those chains.'''
pass
def generate_all_point_mutations_for_chain(self, chain_id):
pass
def generate_all_paired_mutations_for_position(self, chain_ids, chain_sequence_mappings = {}, residue_ids_to_ignore = [], typed_residue_ids_to_ignore = [], silent = True):
'''Generates a set of mutations for the chains in chain_ids where each set corresponds to the "same" residue (see
below) in both chains and where the wildtype residues match.
e.g. if chain A and B both have K19 then the set of mutations K19A, ... K19I, K19L, K19Y will be included in
in the returned results unless 19 is in residue_ids_to_ignore or typed_residue_ids_to_ignore.
residue_ids_to_ignore should be a list/set of residue IDs.
typed_residue_ids_to_ignore should be a dict residue ID -> residue AA. It is used similarly to residue_ids_to_ignore
but we also assert that the residue types match the sequences in the chains.
By default, "same residue" is inferred by residue ID i.e. the generation assumes that a residue with some ID
in one chain corresponds to the residue with the same ID in another chain. If this is not true then a mapping
between chain residues is necessary and should be provided using the chain_sequence_mappings parameter.
chain_sequence_mappings should be a dict from pairs of chain IDs to SequenceMap objects. As all sequences are
compared with the first chain in chain_ids, only mappings from that first chain to any other chain are used.
This function is useful in certain cases e.g. generating a set of mutations where we make the same mutation in
both chains of a homodimer or a quasi-homodimer (where we only mutate the positions which agree).
'''
pass
def create_fasta(self, length = 80, prefer_seqres_order = True, header = True):
pass
def _get_pdb_format_version(self):
'''Remark 4 indicates the version of the PDB File Format used to generate the file.'''
pass
def get_resolution(self):
pass
def get_title(self):
pass
def get_techniques(self):
pass
def get_UniProt_ACs(self):
pass
def get_DB_references(self):
''' "The DBREF record provides cross-reference links between PDB sequences (what appears in SEQRES record) and
a corresponding database sequence." - http://www.wwpdb.org/documentation/format33/sect3.html#DBREF
'''
pass
def get_molecules_and_source(self):
pass
def get_journal(self):
pass
def _get_SEQRES_sequences(self):
'''Creates the SEQRES Sequences and stores the chains in order of their appearance in the SEQRES records. This order of chains
in the SEQRES sequences does not always agree with the order in the ATOM records.'''
pass
def _get_ATOM_sequences(self):
'''Creates the ATOM Sequences.'''
pass
def _get_ATOM_sequences_2(self):
'''Creates the ATOM Sequences.'''
pass
def construct_seqres_to_atom_residue_map(self):
'''Uses the SequenceAligner to align the SEQRES and ATOM sequences and return the mappings.
If the SEQRES sequence does not exist for a chain, the mappings are None.
Note: The ResidueRelatrix is better equipped for this job since it can use the SIFTS mappings. This function
is provided for cases where it is not possible to use the ResidueRelatrix.'''
pass
def construct_pdb_to_rosetta_residue_map(self, rosetta_scripts_path, rosetta_database_path = None, extra_command_flags = None, cache_dir = None):
''' Uses the features database to create a mapping from Rosetta-numbered residues to PDB ATOM residues.
Next, the object's rosetta_sequences (a dict of Sequences) element is created.
Finally, a SequenceMap object is created mapping the Rosetta Sequences to the ATOM Sequences.
The extra_command_flags parameter expects a string e.g. "-ignore_zero_occupancy false".
If cache_dir is passed then the file <self.pdb_id>.
'''
pass
def get_atom_sequence_to_rosetta_map(self):
'''Uses the Rosetta->ATOM injective map to construct an injective mapping from ATOM->Rosetta.
We do not extend the injection to include ATOM residues which have no corresponding Rosetta residue.
e.g. atom_sequence_to_rosetta_mapping[c].map.get('A 45 ') will return None if there is no corresponding Rosetta residue
those residues to None.
Likewise, if a PDB chain c is not present in the Rosetta model then atom_sequence_to_rosetta_mapping[c].map.get(s) returns None.
'''
pass
def get_atom_sequence_to_rosetta_json_map(self):
'''Returns the mapping from PDB ATOM residue IDs to Rosetta residue IDs in JSON format.'''
pass
def get_rosetta_sequence_to_atom_json_map(self):
'''Returns the mapping from Rosetta residue IDs to PDB ATOM residue IDs in JSON format.'''
pass
def map_pdb_residues_to_rosetta_residues(self, mutations):
'''This function takes a list of ChainMutation objects and uses the PDB to Rosetta mapping to return the corresponding
list of SimpleMutation objects using Rosetta numbering.
e.g.
p = PDB(...)
p.construct_pdb_to_rosetta_residue_map()
rosetta_mutations = p.map_pdb_residues_to_rosetta_residues(pdb_mutations)
'''
pass
def assert_wildtype_matches(self, mutation):
'''Check that the wildtype of the Mutation object matches the PDB sequence.'''
pass
@staticmethod
def _determine_heterogen_chain_type(residue_types):
'''We distinguish three types of heterogen chain: i) all solution; ii) all ligand; or iii) other (a mix of solution, ligand, and/or ions).
residue_types should be a Set of sequence identifers e.g. GTP, ZN, HOH.
'''
pass
def get_ligand_formulae_as_html(self, oelem = 'span'):
pass
@staticmethod
def convert_hetatms_to_Hill_notation(lines, ignore_list = []):
'''From the PDB site:
The elements of the chemical formula are given in the order following Hill ordering. The order of elements depends
on whether carbon is present or not. If carbon is present, the order should be: C, then H, then the other elements
in alphabetical order of their symbol. If carbon is not present, the elements are listed purely in alphabetic order
of their symbol. This is the 'Hill' system used by Chemical Abstracts.
WARNING: This assumes that all atoms are in the PDB. This is not usually the case so the formulae will be missing
atoms in those cases. To account for some missing data, we merge the element counters to use the most
amount of information we can.
In general, the FORMUL lines should be used. This function can be used in files with missing headers.
'''
pass
def get_ligand_codes(self):
pass
def get_ion_codes(self):
pass
def get_solution_residue_ids(self, chain_id = None, solution_id = None):
pass
def _get_heterogens(self):
pass
def get_B_factors(self, force = False):
'''This reads in all ATOM lines and compute the mean and standard deviation of each
residue's B-factors. It returns a table of the mean and standard deviation per
residue as well as the mean and standard deviation over all residues with each
residue having equal weighting.
Whether the atom is occupied or not is not taken into account.'''
pass
def GetATOMSequences(self, ConvertMSEToAtom = False, RemoveIncompleteFinalResidues = False, RemoveIncompleteResidues = False):
pass
def GetRosettaResidueMap(self, ConvertMSEToAtom = False, RemoveIncompleteFinalResidues = False, RemoveIncompleteResidues = False):
'''Note: This function ignores any DNA.'''
pass
@staticmethod
def ChainResidueID2String(chain, residueID):
'''Takes a chain ID e.g. 'A' and a residueID e.g. '123' or '123A' and returns the 6-character identifier spaced as in the PDB format.'''
pass
@staticmethod
def ResidueID2String(residueID):
'''Takes a residueID e.g. '123' or '123A' and returns the 5-character identifier spaced as in the PDB format.'''
pass
def validate_mutations(self, mutations):
'''This function has been refactored to use the SimpleMutation class.
The parameter is a list of Mutation objects. The function has no return value but raises a PDBValidationException
if the wildtype in the Mutation m does not match the residue type corresponding to residue m.ResidueID in the PDB file.
'''
pass
def remove_nonbackbone_atoms(self, resid_list):
pass
@staticmethod
def getOccupancy(line):
''' Handles the cases of missing occupancy by omission '''
pass
def removeUnoccupied(self):
pass
def fillUnoccupied(self):
pass
def fix_backbone_occupancy(self):
pass
def fix_chain_id(self):
'''fill in missing chain identifier'''
pass
def remove_hetatm(self):
pass
def get_ddGResmap(self):
pass
def get_ddGInverseResmap(self):
pass
def getAminoAcid(self, line):
pass
def getAtomLine(self, chain, resid):
'''This function assumes that all lines are ATOM or HETATM lines.
resid should have the proper PDB format i.e. an integer left-padded
to length 4 followed by the insertion code which may be a blank space.'''
pass
def getAtomLinesForResidueInRosettaStructure(self, resid):
'''We assume a Rosetta-generated structure where residues are uniquely identified by number.'''
pass
def remapMutations(self, mutations, pdbID = '?'):
'''Takes in a list of (Chain, ResidueID, WildTypeAA, MutantAA) mutation tuples and returns the remapped
mutations based on the ddGResmap (which must be previously instantiated).
This function checks that the mutated positions exist and that the wild-type matches the PDB.
'''
pass
def stripForDDG(self, chains = True, keepHETATM = False, numberOfModels = None, raise_exception = True):
'''Strips a PDB to ATOM lines. If keepHETATM is True then also retain HETATM lines.
By default all PDB chains are kept. The chains parameter should be True or a list.
In the latter case, only those chains in the list are kept.
Unoccupied ATOM lines are discarded.
This function also builds maps from PDB numbering to Rosetta numbering and vice versa.
'''
pass
def mapRosettaToPDB(self, resnumber):
pass
def mapPDBToRosetta(self, chain, resnum, iCode = " ", ATOM = True):
pass
def aa_resids(self, only_res=None):
pass
def ComputeBFactors(self):
pass
def CheckForPresenceOf(self, reslist):
'''This checks whether residues in reslist exist in the ATOM lines.
It returns a list of the residues in reslist which did exist.'''
pass
def get_residue_id_to_type_map(self):
'''Returns a dictionary mapping 6-character residue IDs (Chain, residue number, insertion code e.g. "A 123B") to the
corresponding one-letter amino acid.
Caveat: This function ignores occupancy - this function should be called once occupancy has been dealt with appropriately.'''
pass
def pruneChains(self, chainsChosen):
pass
def chain_ids(self):
pass
def number_of_models(self):
pass
def fix_residue_numbering(self):
'''this function renumbers the res ids in order to avoid strange behaviour of Rosetta'''
pass
def get_residue_mapping(self):
'''this function maps the chain and res ids "A 234" to values from [1-N]'''
pass
def GetAllATOMLines(self):
pass
def atomlines(self, resid_list = None):
pass
def neighbors(self, distance, residue, atom = None, resid_list = None):
pass
def neighbors2(self, distance, chain_residue, atom = None, resid_list = None):
'''this one is more precise since it uses the chain identifier also'''
pass
def fastneighbors2(self, distance, chain_residues, atom = None, resid_list = None):
pass
def neighbors3(self, distance, chain_residue, atom = None, resid_list = None):
'''this is used by the sequence tolerance scripts to find the sc-sc interactions only'''
pass
def get_stats(self):
pass
def check_custom_format(self, line, lineidx):
pass
def check_format(self, usingClassic, ableToUseMini):
pass
def extract_xyz_matrix_from_chain(self, chain_id, atoms_of_interest = []):
'''Create a pandas coordinates dataframe from the lines in the specified chain.'''
pass
@staticmethod
def extract_xyz_matrix_from_pdb(pdb_lines, atoms_of_interest = backbone_atoms, expected_num_residues = None, expected_num_residue_atoms = None, fail_on_model_records = True, include_all_columns = False):
'''Returns a pandas dataframe of X, Y, Z coordinates for all chains in the PDB.
Note: This function is not intended to handle structures with MODELs e.g. from NMR although the fail_on_model_records
check is optional for convenience in case the first model is to be parsed.
Otherwise, the file should be split apart and passed into this function model by model.'''
pass
@staticmethod
def extract_xyz_matrix_from_pdb_chain(pdb_lines, chain_id, atoms_of_interest = backbone_atoms, expected_num_residues = None, expected_num_residue_atoms = None, fail_on_model_records = True, include_all_columns = False):
'''Returns a pandas dataframe of X, Y, Z coordinates for the PDB chain.
Note: This function is not intended to handle structures with MODELs e.g. from NMR although the fail_on_model_records
check is optional for convenience in case the chain of the first model is to be parsed.'''
pass
@staticmethod
def extract_xyz_matrix_from_loop_json(pdb_lines, parsed_loop_json_contents, atoms_of_interest = backbone_atoms, expected_num_residues = None, expected_num_residue_atoms = None, allow_overlaps = False, include_all_columns = False):
'''A utility wrapper to extract_xyz_matrix_from_pdb_residue_range.
This accepts PDB file lines and a loop.json file (a defined Rosetta format) and returns a pandas dataframe of
the X, Y, Z coordinates for the requested atom types for all residues in all loops defined by the loop.json
file. The dataframe is indexed by a string identifying the PDB residue and atom type.
loop_json_contents should be a Python dict read in from a loop.json file e.g. json.loads(file_contents).'''
pass
@staticmethod
def extract_xyz_matrix_from_pdb_residue_range(pdb_lines, start_pdb_residue_id = None, stop_pdb_residue_id = None, atoms_of_interest = backbone_atoms, expected_num_residues = None, expected_num_residue_atoms = None, break_on_chain_end = True, include_all_columns = False):
'''Creates a pandas dataframe of X, Y, Z coordinates for the residues identified in the range from
start_pdb_residue_id to stop_pdb_residue_id inclusive. The dataframe is indexed by a string identifying the
PDB residue and atom type.
pdb_lines should be an array of lines from a PDB file (only ATOM lines are currently considered).
The residue IDs should be 6-character strings correspond to columns 22-27 (inclusive where columns are 1-indexed)
of the PDB file. The start and stop residues are both optional - omitting either removes the lower and upper
bound of the residue range respectively.
The returned coordinates will be restricted to atoms_of_interest.
expected_num_residues can be set if the number of residues is known in advance and the user wishes to assert this.
If expected_num_residue_atoms is set then each residue will be checked to ensure that this number of atoms was read.
This method does not handle MODELs e.g. from PDB files determined via NMR. To handle those files, an intermediate
function should be written which identifies the lines for one MODEL and then calls this function.'''
pass
| 108 | 55 | 27 | 3 | 20 | 5 | 6 | 0.25 | 1 | 36 | 23 | 0 | 81 | 33 | 94 | 94 | 2,766 | 447 | 1,916 | 619 | 1,803 | 482 | 1,750 | 604 | 1,650 | 62 | 1 | 8 | 602 |
143,489 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/clustalo.py
|
klab.bio.clustalo.PDBSeqresSequenceAligner
|
class PDBSeqresSequenceAligner(object):
'''This is a useful utility class to compare unique chains in RCSB PDB files and print the sequence alignments.
Example usage:
pssa = PDBSeqresSequenceAligner('2AJF', '3D0G')
representative_alignment_output, chain_mapping, summary = pssa.get_representative_alignment()
print(representative_alignment_output)
colortext.warning(pprint.pformat(chain_mapping))
'''
def __init__(self, pdb_id_1, pdb_id_2, pdb_1 = None, pdb_2 = None, bio_cache = None, cache_dir = None, cut_off = 70.0, alignment_tool = 'clustalw', gap_opening_penalty = 0.2, ignore_bad_chains = False):
self.pdb_id_1 = pdb_id_1
self.pdb_id_2 = pdb_id_2
self.bio_cache = bio_cache
self.cache_dir = cache_dir
if (not self.cache_dir) and self.bio_cache:
self.cache_dir = self.bio_cache.cache_dir
if self.bio_cache:
self.pdb_1 = self.bio_cache.get_pdb_object(pdb_id_1)
self.pdb_2 = self.bio_cache.get_pdb_object(pdb_id_2)
else:
self.pdb_1 = PDB.retrieve(pdb_id_1, cache_dir = self.cache_dir)
self.pdb_2 = PDB.retrieve(pdb_id_2, cache_dir = self.cache_dir)
self.best_matches = None
self.complete_alignment_output = None
self.representative_alignment_output = None
self.summary = None
self.alignment_tool = alignment_tool
self.gap_opening_penalty = gap_opening_penalty
self.ignore_bad_chains = ignore_bad_chains
self.cut_off = cut_off
self.alignments = {} # Chain in pdb_1 -> Chain in pdb_2 -> alignment object
self.seqres_sequence_maps = {} # Chain in pdb_1 -> Chain in pdb_2 -> residue map (from sequence index to sequence index)
self.atom_sequence_maps = {}
self.seqres_to_atom_maps_1, self.atom_to_seqres_maps_1 = self.pdb_1.construct_seqres_to_atom_residue_map()
self.seqres_to_atom_maps_2, self.atom_to_seqres_maps_2 = self.pdb_2.construct_seqres_to_atom_residue_map()
def align(self):
alignment_tool, gap_opening_penalty, ignore_bad_chains = self.alignment_tool, self.gap_opening_penalty, self.ignore_bad_chains
if not(self.best_matches) or not(self.complete_alignment_output):
sa = SequenceAligner(alignment_tool = alignment_tool, gap_opening_penalty = gap_opening_penalty)
for chain_id, seq in sorted(self.pdb_1.seqres_sequences.items()):
sa.add_sequence('{0}_{1}'.format(self.pdb_id_1, chain_id), str(seq), ignore_bad_chains = ignore_bad_chains)
for chain_id, seq in sorted(self.pdb_2.seqres_sequences.items()):
sa.add_sequence('{0}_{1}'.format(self.pdb_id_2, chain_id), str(seq), ignore_bad_chains = ignore_bad_chains)
self.best_matches = sa.align()
self.complete_alignment_output = sa.alignment_output
def get_representative_alignment(self):
# Perform a global alignment of all chains
self.align()
# Based on the alignment, determine which chains map best to each other
pdb_1_self_mapping = {}
pdb_2_self_mapping = {}
chain_mapping = {}
covered_pdb_1_chains = set()
covered_pdb_2_chains = set()
for chain_id in sorted(self.pdb_1.seqres_sequences.keys()):
if chain_id in covered_pdb_1_chains:
continue
covered_pdb_1_chains.add(chain_id)
for pdb_chain, match in sorted(list(self.best_matches['{0}_{1}'.format(self.pdb_id_1, chain_id)].items()), key = lambda x: x[1], reverse = True):
other_pdb_chain_letter = pdb_chain.split('_')[1]
if pdb_chain.startswith(self.pdb_id_1):
if match == 100.0:
covered_pdb_1_chains.add(other_pdb_chain_letter)
pdb_1_self_mapping[chain_id] = pdb_1_self_mapping.get(chain_id, [])
pdb_1_self_mapping[chain_id].append(other_pdb_chain_letter)
else:
assert(pdb_chain.startswith(self.pdb_id_2))
if not(chain_mapping.get(chain_id)):
chain_mapping[chain_id] = (other_pdb_chain_letter, match)
for chain_id in sorted(self.pdb_2.seqres_sequences.keys()):
if chain_id in covered_pdb_2_chains:
continue
covered_pdb_2_chains.add(chain_id)
for pdb_chain, match in sorted(list(self.best_matches['{0}_{1}'.format(self.pdb_id_2, chain_id)].items()), key = lambda x: x[1], reverse = True):
other_pdb_chain_letter = pdb_chain.split('_')[1]
if pdb_chain.startswith(self.pdb_id_2):
if match == 100.0:
covered_pdb_2_chains.add(other_pdb_chain_letter)
pdb_2_self_mapping[chain_id] = pdb_2_self_mapping.get(chain_id, [])
pdb_2_self_mapping[chain_id].append(other_pdb_chain_letter)
# chain_mapping is a mapping of pdb_1 chains to a representative of the best match in pdb_2
# we now create individual alignments for each case in the mapping
#self.alignments
self.representative_alignment_output = ''
for chain_1, chain_2_pair in chain_mapping.items():
chain_2 = chain_2_pair[0]
sa = self._align_chains(chain_1, chain_2)
self.representative_alignment_output += sa.alignment_output + '\n'
#get_residue_mapping
self.summary = ''
for chain_id, related_chains in pdb_1_self_mapping.items():
self.summary += 'Chain {0} of {1} represents chains {2} of {1}.\n'.format(chain_id, self.pdb_id_1, ', '.join(sorted(related_chains)))
for chain_id, related_chains in pdb_2_self_mapping.items():
self.summary += 'Chain {0} of {1} represents chains {2} of {1}.\n'.format(chain_id, self.pdb_id_2, ', '.join(sorted(related_chains)))
for chain_id, mtch in sorted(chain_mapping.items()):
self.summary += 'Chain {0} of {1} matches chain {2} of {3} (and its represented chains) at {4}%.\n'.format(chain_id, self.pdb_id_1, mtch[0], self.pdb_id_2, mtch[1])
return self.representative_alignment_output, chain_mapping, self.summary
def _align_chains(self, chain_1, chain_2):
alignment_tool, gap_opening_penalty, ignore_bad_chains = self.alignment_tool, self.gap_opening_penalty, self.ignore_bad_chains
if (chain_1, chain_2) in self.alignments:
return self.alignments[(chain_1, chain_2)]
else:
sa = SequenceAligner(alignment_tool=alignment_tool, gap_opening_penalty=gap_opening_penalty)
sa.add_sequence('{0}_{1}'.format(self.pdb_id_1, chain_1), str(self.pdb_1.seqres_sequences[chain_1]), ignore_bad_chains=ignore_bad_chains)
sa.add_sequence('{0}_{1}'.format(self.pdb_id_2, chain_2), str(self.pdb_2.seqres_sequences[chain_2]), ignore_bad_chains=ignore_bad_chains)
sa.align()
self.alignments[(chain_1, chain_2)] = sa
return sa
def build_residue_mappings(self, from_chain = None, to_chain = None):
alignment_tool, gap_opening_penalty, ignore_bad_chains = self.alignment_tool, self.gap_opening_penalty, self.ignore_bad_chains
# ...
if not self.alignments:
self.get_representative_alignment()
assert(self.alignments)
#matched_chains = self.alignments = {} # Chain in pdb_1 -> Chain in pdb_2 -> alignment object
# Perform individual alignments for all best-matched chains
for chain_1 in sorted(self.pdb_1.seqres_sequences.keys()):
if from_chain != None and from_chain != chain_1:
continue
#self.alignments[chain_1] = self.alignments.get(chain_1, {})
self.seqres_sequence_maps[chain_1] = self.seqres_sequence_maps.get(chain_1, {})
self.atom_sequence_maps[chain_1] = self.atom_sequence_maps.get(chain_1, {})
best_match_percentage = None
for pdb_chain, match in sorted(list(self.best_matches['{0}_{1}'.format(self.pdb_id_1, chain_1)].items()), key=lambda x: x[1], reverse=True):
if pdb_chain.startswith(self.pdb_id_1):
continue
chain_2 = pdb_chain.split('_')[1]
if best_match_percentage == None or best_match_percentage == match:
best_match_percentage = match
if match < self.cut_off:
# Do not bother aligning sequences below the sequence identity cut-off
continue
elif from_chain != None and from_chain != chain_1:
continue
elif to_chain != None and to_chain != chain_2:
continue
else:
sa = self._align_chains(chain_1, chain_2)
self.seqres_sequence_maps[chain_1][chain_2] = sa.get_residue_mapping()
# self.seqres_sequence_maps contains the mappings between SEQRES sequences
#
for chain_1, matched_chains in self.seqres_sequence_maps.items():
self.atom_sequence_maps[chain_1] = self.atom_sequence_maps.get(chain_1, {})
for chain_2, residue_mappings in matched_chains.items():
if not self.atom_sequence_maps[chain_1].get(chain_2):
self.atom_sequence_maps[chain_1][chain_2] = {}
# mapping is a SEQRES -> SEQRES mapping
mapping, match_mapping = residue_mappings
if chain_1 in self.seqres_to_atom_maps_1 and chain_2 in self.seqres_to_atom_maps_2:
for seqres_res_1, atom_res_1 in sorted(self.seqres_to_atom_maps_1[chain_1].items()):
if seqres_res_1 in mapping:
seqres_res_2 = mapping[seqres_res_1]
if seqres_res_2 in self.seqres_to_atom_maps_2[chain_2]:
atom_res_2 = self.seqres_to_atom_maps_2[chain_2][seqres_res_2]
self.atom_sequence_maps[chain_1][chain_2][atom_res_1] = atom_res_2
def get_atom_residue_mapping(self, from_chain, to_chain = None):
self.build_residue_mappings(from_chain, to_chain)
return self.atom_sequence_maps.get(from_chain, {}).get(to_chain, None)
def get_matching_chains(self, from_chain):
self.build_residue_mappings()
return sorted([p[1] for p in list(self.alignments.keys()) if p[0] == from_chain])
def map_atom_residue(self, from_chain, to_chain, atom_res_1):
# atom_res_1 should not include the chain ID
self.build_residue_mappings(from_chain, to_chain)
return self.atom_sequence_maps.get(from_chain, {}).get(to_chain, {}).get('{0}{1}'.format(from_chain, atom_res_1), None)
|
class PDBSeqresSequenceAligner(object):
'''This is a useful utility class to compare unique chains in RCSB PDB files and print the sequence alignments.
Example usage:
pssa = PDBSeqresSequenceAligner('2AJF', '3D0G')
representative_alignment_output, chain_mapping, summary = pssa.get_representative_alignment()
print(representative_alignment_output)
colortext.warning(pprint.pformat(chain_mapping))
'''
def __init__(self, pdb_id_1, pdb_id_2, pdb_1 = None, pdb_2 = None, bio_cache = None, cache_dir = None, cut_off = 70.0, alignment_tool = 'clustalw', gap_opening_penalty = 0.2, ignore_bad_chains = False):
pass
def align(self):
pass
def get_representative_alignment(self):
pass
def _align_chains(self, chain_1, chain_2):
pass
def build_residue_mappings(self, from_chain = None, to_chain = None):
pass
def get_atom_residue_mapping(self, from_chain, to_chain = None):
pass
def get_matching_chains(self, from_chain):
pass
def map_atom_residue(self, from_chain, to_chain, atom_res_1):
pass
| 9 | 1 | 23 | 3 | 18 | 2 | 6 | 0.17 | 1 | 5 | 2 | 0 | 8 | 21 | 8 | 8 | 205 | 41 | 142 | 58 | 133 | 24 | 136 | 58 | 127 | 17 | 1 | 7 | 45 |
143,490 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/complexes.py
|
klab.bio.complexes.ProteinProteinComplex
|
class ProteinProteinComplex(object):
'''A class to represent the notion of protein-protein complex and return records which store the data for a specific
database schema.
Suggested usage:
ppcomplex = ProteinProteinComplex(...)
ppcomplex.add_pdb_set(...)
...
ppcomplex.add_pdb_set(...)
complex_id = db_api.add_complex(ppcomplex.get_complex())
for pdb_set in ppcomplex.get_pdb_sets():
db_api.add_pdb_set(pdb_set['pdb_set'])
for chain_record in pdb_set['chain_records']:
db_api.add_chain_record(chain_record)
'''
def __init__(self, lname, lshortname, rname, rshortname, lhtmlname = None, rhtmlname = None,
id = None,
functional_class_id = None, functional_class_id_ppdbm = None,
difficulty_ppdbm = None,
is_wildtype = None, wildtype_complex = None,
notes = None, warnings = None
):
self.id = id # can be used for database entry
# Partner 1
self.lname = lname
self.lshortname = lshortname
self.lhtmlname = lhtmlname
# Partner 2
self.rname = rname
self.rshortname = rshortname
self.rhtmlname = rhtmlname
# Classification
self.functional_class_id = functional_class_id # Generic
self.functional_class_id_ppdbm = functional_class_id_ppdbm # Protein-Protein Docking Benchmark
# Benchmark ratings
self.difficulty_ppdbm = difficulty_ppdbm # Protein-Protein Docking Benchmark
# Relationships
self.is_wildtype = is_wildtype
self.wildtype_complex = wildtype_complex
# Notes
self.notes = notes
self.warnings = warnings
self.pdb_sets = []
def add_pdb_set(self, pdb_names, lchains, rchains, is_complex, notes = None):
'''lchains and rchains should be lists of pairs (p, c, n) where p is a PDB object (bio/pdb.py:PDB), c is a chain
identifier, and n is the NMR model number (or zero if this is not applicable).
(character). This allows us to represent combinations of unbound chains.
is_complex should be set to True if the PDB chains collectively form a complex.'''
# If PDB objects differ and is_complex is set, raise an exception
# This is not foolproof - we use the PDB object reference which could differ if the same PDB were loaded more than
# once and passed in. However, that would be bad practice and this check is much cheaper than comparing the PDB content.
if len(set([pc[0] for pc in lchains + rchains])) > 1 and is_complex:
raise Exception('The PDB set cannot be marked as a complex as it is defined using multiple PDB objects.')
# Check for unique occurrences of PDB chains (same caveat applies as above)
all_chains = [(pc[0], pc[1]) for pc in lchains + rchains]
if not len(all_chains) == len(set(all_chains)):
raise Exception('Each PDB chain should be included at most once.')
# Make sure that the chains exist in the PDB objects
for pc in lchains + rchains:
assert(pc[0] in pdb_names)
assert(pc[1] in pc[0].atom_sequences)
# Create the metadata
set_number = len(self.pdb_sets)
pdb_set = dict(
set_number = set_number,
is_complex = is_complex,
notes = notes,
chains = dict(L = [], R = []),
pdb_set_id = None
)
# Add the PDB chains
pdb_set_id = []
for chain_set_def in ((lchains, 'L'), (rchains, 'R')):
for pc in sorted(chain_set_def[0]):
chain_set = pdb_set['chains'][chain_set_def[1]]
nmr_model = None
if len(pc) > 2:
nmr_model = pc[2]
chain_set.append(dict(
chain_index = len(chain_set),
pdb_file_id = pdb_names[pc[0]],
chain_id = pc[1],
nmr_model = nmr_model,
))
pdb_set_id.append('{0}:{1}:{2}:{3}'.format(chain_set_def[0], pdb_names[pc[0]], pc[1], nmr_model))
pdb_set['pdb_set_id'] = sorted(pdb_set_id)
print((pdb_set['pdb_set_id']))
# Make sure we do not already have this set defined (the Complex should contain a unique list of bags of chains).
if pdb_set['pdb_set_id'] in [ps['pdb_set_id'] for ps in self.pdb_sets]:
raise Exception('This PDB set has already been defined (same PDB chains/NMR models).')
self.pdb_sets.append(pdb_set)
@db_entry
def get_complex(self):
'''Returns the record for the complex definition to be used for database storage.'''
d = dict(
LName = self.lname,
LShortName = self.lshortname,
LHTMLName = self.lhtmlname,
RName = self.rname,
RShortName = self.rshortname,
RHTMLName = self.rhtmlname,
FunctionalClassID = self.functional_class_id,
PPDBMFunctionalClassID = self.functional_class_id_ppdbm,
PPDBMDifficulty = self.difficulty_ppdbm,
IsWildType = self.is_wildtype,
WildTypeComplexID = self.wildtype_complex,
Notes = self.notes,
Warnings = self.warnings,
)
if self.id:
d['ID'] = self.id
return d
@db_entry
def get_pdb_sets(self):
'''Return a record to be used for database storage. This only makes sense if self.id is set. See usage example
above.'''
assert(self.id != None)
data = []
for pdb_set in self.pdb_sets:
pdb_set_record = dict(
PPComplexID = self.id,
SetNumber = pdb_set['set_number'],
IsComplex = pdb_set['is_complex'],
Notes = pdb_set['notes'],
)
chain_records = []
for side, chain_details in sorted(pdb_set['chains'].items()):
chain_records.append(dict(
PPComplexID = self.id,
SetNumber = pdb_set['set_number'],
Side = side,
ChainIndex = chain_details['chain_index'],
PDBFileID = chain_details['pdb_file_id'],
Chain = chain_details['chain_id'],
NMRModel = chain_details['nmr_model'],
))
data.append(dict(pdb_set = pdb_set_record, chain_records = chain_records))
return data
|
class ProteinProteinComplex(object):
'''A class to represent the notion of protein-protein complex and return records which store the data for a specific
database schema.
Suggested usage:
ppcomplex = ProteinProteinComplex(...)
ppcomplex.add_pdb_set(...)
...
ppcomplex.add_pdb_set(...)
complex_id = db_api.add_complex(ppcomplex.get_complex())
for pdb_set in ppcomplex.get_pdb_sets():
db_api.add_pdb_set(pdb_set['pdb_set'])
for chain_record in pdb_set['chain_records']:
db_api.add_chain_record(chain_record)
'''
def __init__(self, lname, lshortname, rname, rshortname, lhtmlname = None, rhtmlname = None,
id = None,
functional_class_id = None, functional_class_id_ppdbm = None,
difficulty_ppdbm = None,
is_wildtype = None, wildtype_complex = None,
notes = None, warnings = None
):
pass
def add_pdb_set(self, pdb_names, lchains, rchains, is_complex, notes = None):
'''lchains and rchains should be lists of pairs (p, c, n) where p is a PDB object (bio/pdb.py:PDB), c is a chain
identifier, and n is the NMR model number (or zero if this is not applicable).
(character). This allows us to represent combinations of unbound chains.
is_complex should be set to True if the PDB chains collectively form a complex.'''
pass
@db_entry
def get_complex(self):
'''Returns the record for the complex definition to be used for database storage.'''
pass
@db_entry
def get_pdb_sets(self):
'''Return a record to be used for database storage. This only makes sense if self.id is set. See usage example
above.'''
pass
| 7 | 4 | 36 | 5 | 25 | 6 | 4 | 0.37 | 1 | 3 | 0 | 0 | 4 | 15 | 4 | 4 | 167 | 30 | 103 | 41 | 90 | 38 | 57 | 33 | 52 | 8 | 1 | 3 | 14 |
143,491 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/clustalo.py
|
klab.bio.clustalo.SIFTSChainMutatorSequenceAligner
|
class SIFTSChainMutatorSequenceAligner(object):
'''This is a useful utility class to generate a list of mutations between PDB files covered by SIFTS. It is used in the
SpiderWebs project.
Example usage:
import pprint
from klab.bio.clustalo import SIFTSChainMutatorSequenceAligner
scmsa = SIFTSChainMutatorSequenceAligner(bio_cache = bio_cache)
mutations = scmsa.get_mutations('2AJF', 'A', '3D0G', 'A')
if mutations:
print('{0} mismatches:\n{1}'.format(len(mutations), pprint.pformat(mutations)))
'''
def __init__(self, bio_cache = None, cache_dir = None, acceptable_sifts_sequence_percentage_match = 60.0, acceptable_sequence_similarity = 85.0):
self.pdbs = {}
self.alignments = {}
self.chain_map = {} # maps PDB ID 1 -> chain ID in PDB ID 1 -> PDB ID 2 -> Set[chain IDs in PDB ID 2]
self.bio_cache = bio_cache
self.cache_dir = cache_dir
self.acceptable_sequence_similarity = acceptable_sequence_similarity
self.seqres_sequence_maps = {} # maps Tuple(PDB ID 1, chain ID in PDB ID 1) -> Tuple(PDB ID 2, chain ID in PDB ID 2) -> SequenceMap object based on an alignment from the first chain to the second
self.acceptable_sifts_sequence_percentage_match = acceptable_sifts_sequence_percentage_match
if (not self.cache_dir) and self.bio_cache:
self.cache_dir = self.bio_cache.cache_dir
def add_pdb(self, pdb_id):
from klab.bio.sifts import SIFTS
pdb_id = pdb_id.upper()
if not self.pdbs.get(pdb_id):
# Create the SIFTS objects
try:
if self.bio_cache:
sifts_object = self.bio_cache.get_sifts_object(pdb_id, acceptable_sequence_percentage_match = self.acceptable_sifts_sequence_percentage_match)
else:
sifts_object = SIFTS.retrieve(pdb_id, cache_dir = self.cache_dir, acceptable_sequence_percentage_match = self.acceptable_sifts_sequence_percentage_match)
sifts_object._create_inverse_maps()
except:
colortext.error('An exception occurred creating the SIFTS object for {0}.'.format(pdb_id))
raise
try:
if self.bio_cache:
pdb_object = self.bio_cache.get_pdb_object(pdb_id)
else:
pdb_object = PDB(sifts_object.pdb_contents)
except:
colortext.error('An exception occurred creating the PDB object for {0}.'.format(pdb_id))
raise
self.pdbs[pdb_id.upper()] = dict(
id = pdb_id,
sifts = sifts_object,
pdb = pdb_object,
)
return self.pdbs[pdb_id]
def get_alignment(self, pdb_id_1, pdb_id_2, alignment_tool = 'clustalw', gap_opening_penalty = 0.2):
# Set up the objects
p1 = self.add_pdb(pdb_id_1)
p2 = self.add_pdb(pdb_id_2)
pdb_1 = p1['pdb']
pdb_2 = p2['pdb']
# Run a sequence alignment on the sequences
if not self.alignments.get(pdb_id_1, {}).get(pdb_id_2):
self.alignments[pdb_id_1] = self.alignments.get(pdb_id_1, {})
sa = SequenceAligner(alignment_tool = alignment_tool, gap_opening_penalty = gap_opening_penalty)
for chain_id, seq in sorted(pdb_1.seqres_sequences.items()):
sa.add_sequence('{0}_{1}'.format(pdb_id_1, chain_id), str(seq), ignore_bad_chains = True)
for chain_id, seq in sorted(pdb_2.seqres_sequences.items()):
sa.add_sequence('{0}_{1}'.format(pdb_id_2, chain_id), str(seq), ignore_bad_chains = True)
self.alignments[pdb_id_1][pdb_id_2] = dict(
alignment = sa,
best_matches = sa.align()
)
return self.alignments[pdb_id_1][pdb_id_2]
def get_mutations(self, pdb_id_1, pdb_id_2, alignment_tool = 'clustalw', gap_opening_penalty = 0.2):
'''Returns a mapping chain_of_pdb_1 -> List[PDBMutationPair] representing the mutations needed to transform each chain of pdb_1 into the respective chains of pdb_2.
This function also sets self.seqres_sequence_maps, a mapping Tuple(pdb_id_1, chain ID in pdb_id_1) -> Tuple(pdb_id_2, chain ID in pdb_id_2) -> a SequenceMap representing the mapping of residues between the chains based on the alignment.
Warning: This function does not consider what happens if a chain in pdb_id_1 matches two chains in pdb_id_2
which have differing sequences. In this case, an self-inconsistent set of mutations is returned.
One solution would be to extend the mapping to:
chain_m_of_pdb_1 -> common_mutations -> List[PDBMutationPair]
-> chain_x_of_pdb_2 -> List[PDBMutationPair]
-> chain_y_of_pdb_2 -> List[PDBMutationPair]
...
where common_mutations contains the set of mutations common to the mapping m->x and m->y etc. whereas the
other two mappings contain the set of mutations from m->x or m->y etc. respectively which do not occur in
common_mutations. In general, both chain_x_of_pdb_2 and chain_y_of_pdb_2 will be empty excepting the
considered case where x and y differ in sequence.
'''
# Set up the objects
p1 = self.add_pdb(pdb_id_1)
p2 = self.add_pdb(pdb_id_2)
self.chain_map[pdb_id_1] = self.chain_map.get(pdb_id_1, {})
# Determine which chains map to which
alignment = self.get_alignment(pdb_id_1, pdb_id_2, alignment_tool = alignment_tool, gap_opening_penalty = gap_opening_penalty)
best_matches = alignment['best_matches']
# Create the list of mutations
mutations = {}
for from_chain, mtches in sorted(best_matches.items()):
from_pdb_id, from_chain_id = from_chain.split('_')
# Only consider matches from pdb_id_1 to pdb_id_2
if from_pdb_id == pdb_id_1:
self.seqres_sequence_maps[(from_pdb_id, from_chain_id)] = {}
self.chain_map[from_pdb_id][from_chain_id] = self.chain_map[from_pdb_id].get(from_chain_id, {})
# Do not consider matches from pdb_id_1 to itself or matches with poor sequence similarity
restricted_mtchs = {}
for to_chain, similarity in sorted(mtches.items()):
if to_chain.split('_')[0] == pdb_id_2 and similarity >= self.acceptable_sequence_similarity:
restricted_mtchs[to_chain] = similarity
# Take the best matching chains and create a list of mutations needed to transform from_chain to those chains
# Warning: This does NOT take into account whether the sequences of the best matches differ.
if restricted_mtchs:
top_similarity = max(restricted_mtchs.values())
#todo: if the sequences of the best matches differ, raise an Exception. Use 2ZNW and 1DQJ as an example (2ZNW chain A matches with 48% to both 1DQJ chain A and chain B)
#top_matches = [to_chain for to_chain, similarity in sorted(restricted_mtchs.iteritems()) if similarity == top_similarity]
#pprint.pprint(restricted_mtchs)
#print(from_pdb_id, from_chain, 'top_matches', top_matches)
#sys.exit(0)
for to_chain, similarity in sorted(restricted_mtchs.items()):
to_pdb_id, to_chain_id = to_chain.split('_')
if similarity == top_similarity:
#print(from_pdb_id, from_chain_id)
#print(restricted_mtchs)
#print(to_pdb_id, to_chain, similarity)
self.chain_map[from_pdb_id][from_chain_id][to_pdb_id] = self.chain_map[from_pdb_id][from_chain_id].get(to_pdb_id, set())
self.chain_map[from_pdb_id][from_chain_id][to_pdb_id].add(to_chain)
mutations[from_chain_id] = mutations.get(from_chain_id, [])
chain_mutations = self.get_chain_mutations(from_pdb_id, from_chain_id, to_pdb_id, to_chain_id)
mutations[from_chain_id].extend(chain_mutations)
# mutations can contain duplicates so we remove those
for chain_id, mlist in mutations.items():
mutations[chain_id] = sorted(set(mlist))
return mutations
def get_corresponding_chains(self, from_pdb_id, from_chain_id, to_pdb_id):
'''Should be called after get_mutations.'''
chains = self.chain_map.get(from_pdb_id, {}).get(from_chain_id, {}).get(to_pdb_id, [])
return sorted(chains)
def get_chain_mutations(self, pdb_id_1, chain_1, pdb_id_2, chain_2):
'''Returns a list of tuples each containing a SEQRES Mutation object and an ATOM Mutation object representing the
mutations from pdb_id_1, chain_1 to pdb_id_2, chain_2.
SequenceMaps are constructed in this function between the chains based on the alignment.
PDBMutationPair are returned as they are hashable and amenable to Set construction to eliminate duplicates.
'''
# Set up the objects
p1 = self.add_pdb(pdb_id_1)
p2 = self.add_pdb(pdb_id_2)
sifts_1, pdb_1 = p1['sifts'], p1['pdb']
sifts_2, pdb_2 = p2['sifts'], p2['pdb']
# Set up the sequences
#pprint.pprint(sifts_1.seqres_to_atom_sequence_maps)
seqres_to_atom_sequence_maps_1 = sifts_1.seqres_to_atom_sequence_maps.get(chain_1, {}) # this is not guaranteed to exist e.g. 2ZNW chain A
seqres_1, atom_1 = pdb_1.seqres_sequences.get(chain_1), pdb_1.atom_sequences.get(chain_1)
seqres_2, atom_2 = pdb_2.seqres_sequences.get(chain_2), pdb_2.atom_sequences.get(chain_2)
if not seqres_1: raise Exception('No SEQRES sequence for chain {0} of {1}.'.format(chain_1, pdb_1))
if not atom_1: raise Exception('No ATOM sequence for chain {0} of {1}.'.format(chain_1, pdb_1))
if not seqres_2: raise Exception('No SEQRES sequence for chain {0} of {1}.'.format(chain_2, pdb_2))
if not atom_2: raise Exception('No ATOM sequence for chain {0} of {1}.'.format(chain_2, pdb_2))
seqres_str_1 = str(seqres_1)
seqres_str_2 = str(seqres_2)
# Align the SEQRES sequences
sa = SequenceAligner()
sa.add_sequence('{0}_{1}'.format(pdb_id_1, chain_1), seqres_str_1)
sa.add_sequence('{0}_{1}'.format(pdb_id_2, chain_2), seqres_str_2)
sa.align()
seqres_residue_mapping, seqres_match_mapping = sa.get_residue_mapping()
#colortext.pcyan(sa.alignment_output)
# Create a SequenceMap
seqres_sequence_map = SequenceMap()
assert(sorted(seqres_residue_mapping.keys()) == sorted(seqres_match_mapping.keys()))
for k, v in seqres_residue_mapping.items():
seqres_sequence_map.add(k, v, seqres_match_mapping[k])
self.seqres_sequence_maps[(pdb_id_1, chain_1)][(pdb_id_2, chain_2)] = seqres_sequence_map
# Determine the mutations between the SEQRES sequences and use these to generate a list of ATOM mutations
mutations = []
clustal_symbols = SubstitutionScore.clustal_symbols
#print(pdb_id_1, chain_1, pdb_id_2, chain_2)
#print(seqres_to_atom_sequence_maps_1)
for seqres_res_id, v in seqres_match_mapping.items():
# Look at all positions which differ. seqres_res_id is 1-indexed, following the SEQRES and UniProt convention. However, so our our Sequence objects.
if clustal_symbols[v.clustal] != '*':
# Get the wildtype Residue objects
seqres_wt_residue = seqres_1[seqres_res_id]
#print(seqres_wt_residue)
seqres_mutant_residue = seqres_2[seqres_residue_mapping[seqres_res_id]] # todo: this will probably fail for some cases where there is no corresponding mapping
# If there is an associated ATOM record for the wildtype residue, get its residue ID
atom_res_id = None
atom_chain_res_id = seqres_to_atom_sequence_maps_1.get(seqres_res_id)
try:
if atom_chain_res_id:
assert(atom_chain_res_id[0] == chain_1)
atom_residue = atom_1[atom_chain_res_id]
atom_res_id = atom_chain_res_id[1:]
assert(atom_residue.ResidueAA == seqres_wt_residue.ResidueAA)
assert(atom_residue.ResidueID == atom_res_id)
except:
atom_res_id = None
if seqres_wt_residue.ResidueAA != 'X':
# we do not seem to keep ATOM records for unknown/non-canonicals: see 2BTF chain A -> 2PBD chain A
raise
# Create two mutations - one for the SEQRES residue and one for the corresponding (if any) ATOM residue
# We create both so that the user is informed whether there is a mutation between the structures which is
# not captured by the coordinates.
# If there are no ATOM coordinates, there is no point creating an ATOM mutation object so we instead use
# the None type. This also fits with the approach in the SpiderWeb framework.
seqres_mutation = ChainMutation(seqres_wt_residue.ResidueAA, seqres_res_id,seqres_mutant_residue.ResidueAA, Chain = chain_1)
atom_mutation = None
if atom_res_id:
atom_mutation = ChainMutation(seqres_wt_residue.ResidueAA, atom_res_id, seqres_mutant_residue.ResidueAA, Chain = chain_1)
mutations.append(PDBMutationPair(seqres_mutation, atom_mutation))
return mutations
|
class SIFTSChainMutatorSequenceAligner(object):
'''This is a useful utility class to generate a list of mutations between PDB files covered by SIFTS. It is used in the
SpiderWebs project.
Example usage:
import pprint
from klab.bio.clustalo import SIFTSChainMutatorSequenceAligner
scmsa = SIFTSChainMutatorSequenceAligner(bio_cache = bio_cache)
mutations = scmsa.get_mutations('2AJF', 'A', '3D0G', 'A')
if mutations:
print('{0} mismatches:
{1}'.format(len(mutations), pprint.pformat(mutations)))
'''
def __init__(self, bio_cache = None, cache_dir = None, acceptable_sifts_sequence_percentage_match = 60.0, acceptable_sequence_similarity = 85.0):
pass
def add_pdb(self, pdb_id):
pass
def get_alignment(self, pdb_id_1, pdb_id_2, alignment_tool = 'clustalw', gap_opening_penalty = 0.2):
pass
def get_mutations(self, pdb_id_1, pdb_id_2, alignment_tool = 'clustalw', gap_opening_penalty = 0.2):
'''Returns a mapping chain_of_pdb_1 -> List[PDBMutationPair] representing the mutations needed to transform each chain of pdb_1 into the respective chains of pdb_2.
This function also sets self.seqres_sequence_maps, a mapping Tuple(pdb_id_1, chain ID in pdb_id_1) -> Tuple(pdb_id_2, chain ID in pdb_id_2) -> a SequenceMap representing the mapping of residues between the chains based on the alignment.
Warning: This function does not consider what happens if a chain in pdb_id_1 matches two chains in pdb_id_2
which have differing sequences. In this case, an self-inconsistent set of mutations is returned.
One solution would be to extend the mapping to:
chain_m_of_pdb_1 -> common_mutations -> List[PDBMutationPair]
-> chain_x_of_pdb_2 -> List[PDBMutationPair]
-> chain_y_of_pdb_2 -> List[PDBMutationPair]
...
where common_mutations contains the set of mutations common to the mapping m->x and m->y etc. whereas the
other two mappings contain the set of mutations from m->x or m->y etc. respectively which do not occur in
common_mutations. In general, both chain_x_of_pdb_2 and chain_y_of_pdb_2 will be empty excepting the
considered case where x and y differ in sequence.
'''
pass
def get_corresponding_chains(self, from_pdb_id, from_chain_id, to_pdb_id):
'''Should be called after get_mutations.'''
pass
def get_chain_mutations(self, pdb_id_1, chain_1, pdb_id_2, chain_2):
'''Returns a list of tuples each containing a SEQRES Mutation object and an ATOM Mutation object representing the
mutations from pdb_id_1, chain_1 to pdb_id_2, chain_2.
SequenceMaps are constructed in this function between the chains based on the alignment.
PDBMutationPair are returned as they are hashable and amenable to Set construction to eliminate duplicates.
'''
pass
| 7 | 4 | 39 | 6 | 23 | 10 | 6 | 0.53 | 1 | 11 | 7 | 0 | 6 | 8 | 6 | 6 | 256 | 51 | 137 | 61 | 129 | 72 | 132 | 61 | 124 | 12 | 1 | 5 | 34 |
143,492 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/clustalo.py
|
klab.bio.clustalo.SequenceAligner
|
class SequenceAligner(object):
''' This class is used to align sequences. To use it, first add sequences using the add_sequence function. Next, call the align function to perform
the alignment. Alignment results are stored in the following object variables:
matrix : the 1-indexed matrix returned from clustalw
named_matrix : [finish this help section...]
e.g.
sa = SequenceAligner()
sa.add_sequence('1A2P_A', 'AQVINTFDGVADYLQTYHKLPDNYITKSEAQALGWVASKGNLADVAPGKSIGGDIFSNREGKLPGKSGRTWREADINYTSGFRNSDRILYSSDWLIYKTTDHYQTFTKIR')
sa.add_sequence('1B20_A', 'AQVINTFDGVADYLQTYHKLPDNYITKSEAQALGWVASKGNLADVAPGKSIGGDIFSNREGKLPGKSGSTWREADINYTSGFRNSDRILYSSDWLIYKTTDHYQTFTKIR')
sa.add_sequence('2KF4_A', 'AQVINTFDGVADYLQTYHKLPDNYITKSEAQALGWVASKGNLADVAPGKSIGGDIFSNREGKLPGKSGRTWREADINYTSGFRNSDRILYSSDWLIYKTTDAYQTFTKIR')
best_matches = sa.align() # {'2KF4_A': {'2KF4_A': 100.0, '1A2P_A': 99.0, '1B20_A': 98.0}, '1A2P_A': {'2KF4_A': 99.0, '1A2P_A': 100.0, '1B20_A': 99.0}, '1B20_A': {'2KF4_A': 98.0, '1A2P_A': 99.0, '1B20_A': 100.0}}
best_matches_by_id = sa.get_best_matches_by_id('2KF4_A') # {'1A2P_A': 99.0, '1B20_A': 98.0}
get_residue_mapping returns the mapping between the sequences. Note that this mapping is only based on the sequence strings
and not e.g. on the residue IDs in the PDB. Since the sequences are 1-indexed, the mapping is also 1-indexed. In the example
above, for 1A2P to 1B20, the residue mapping would be 1->1 for residue A, 2->2 for residue Q, 3->3 for residue V etc.
'''
### Constructor ###
def __init__(self, alignment_tool = 'clustalw', gap_opening_penalty = 0.2):
'''The constructor accepts an alignment tool used to create the alignment and a gap opening penalty. Note that
the gap opening penalty is currently only used by ClustalW.'''
assert(alignment_tool == 'clustalw' or alignment_tool == 'clustalo')
gap_opening_penalty = float(gap_opening_penalty)
self.records = []
self.sequence_ids = {} # A 1-indexed list of the sequences in the order that they were added (1-indexing to match Clustal numbering)
self.matrix = None
self.named_matrix = None
self.alignment_output = None
self.alignment_tool = alignment_tool
self.gap_opening_penalty = gap_opening_penalty
### Class methods
@staticmethod
def from_list_of_FASTA_content(FASTA_content_list):
f = FASTA(FASTA_content_list[0])
for x in FASTA_content_list[1:]:
f += FASTA(x)
return SequenceAligner.from_FASTA(f)
@staticmethod
def from_FASTA(f):
sa = SequenceAligner()
for sequence in f.sequences:
sa.add_sequence('%s_%s' % (sequence[0], sequence[1]), sequence[2])
best_matches = sa.align()
return sa
### API methods ###
def __repr__(self):
s = []
best_matches = self.align()
for k, v in sorted(best_matches.items()):
s.append("%s: %s" % (k, ["%s, %s" % (x, y) for x, y in sorted(iter(v.items()), key=lambda x:-x[1]) if x !=k ]))
return "\n".join(s)
def add_sequence(self, sequence_id, sequence, ignore_bad_chains = False):
'''If ignore_bad_chains is True then any chains containing all Xs as the sequence will be silently skipped.
The default behavior is to raise a MalformedSequenceException in this case.'''
# This is a sanity check. ClustalO allows ':' in the chain ID but ClustalW replaces ':' with '_' which breaks our parsing
# All callers to add_sequence now need to replace ':' with '_' so that we can use ClustalW
assert(sequence_id.find(':') == -1)
if sequence_id in list(self.sequence_ids.values()):
raise Exception("Sequence IDs must be unique")
if list(set(sequence)) == ['X']:
if ignore_bad_chains:
return
else:
raise MalformedSequenceException('The sequence contains only X characters. This will crash Clustal Omega.')
self.records.append(">%s\n%s" % (sequence_id, "\n".join([sequence[i:i+80] for i in range(0, len(sequence), 80)])))
self.sequence_ids[len(self.sequence_ids) + 1] = sequence_id
def align(self):
records = self.records
percentage_identity_output = None
fasta_handle, fasta_filename = open_temp_file('/tmp')
clustal_output_handle, clustal_filename = open_temp_file('/tmp')
stats_output_handle, stats_filename = open_temp_file('/tmp')
tempfiles = [fasta_filename, clustal_filename, stats_filename]
# Create FASTA file
fasta_handle.write("\n".join(records))
fasta_handle.close()
try:
# Note: By default, ClustalW can rearrange the sequence order in the alignment i.e. the order in which we add
# the sequences is not necessarily the order in which they appear in the output. For simplicity, the parsing
# logic assumes (and asserts) that order is maintained so we add the -OUTORDER=INPUT command to ClustalW to
# ensure this.
if self.alignment_tool == 'clustalo':
p = _Popen('.', shlex.split('clustalo --infile %(fasta_filename)s --verbose --outfmt clustal --outfile %(clustal_filename)s --force' % vars()))
if p.errorcode:
raise Exception('An error occurred while calling clustalo to align sequences:\n%s' % p.stderr)
self.alignment_output = read_file(clustal_filename)
p = _Popen('.', shlex.split('clustalw -INFILE=%(clustal_filename)s -PIM -TYPE=PROTEIN -STATS=%(stats_filename)s -OUTFILE=/dev/null -OUTORDER=INPUT' % vars()))
if p.errorcode:
raise Exception('An error occurred while calling clustalw to generate the Percent Identity Matrix:\n%s' % p.stderr)
percentage_identity_output = p.stdout
elif self.alignment_tool == 'clustalw':
gap_opening_penalty = self.gap_opening_penalty
p = _Popen('.', shlex.split('clustalw -INFILE=%(fasta_filename)s -PIM -TYPE=PROTEIN -STATS=%(stats_filename)s -GAPOPEN=%(gap_opening_penalty)0.2f -OUTFILE=%(clustal_filename)s -OUTORDER=INPUT' % vars()))
if p.errorcode:
raise Exception('An error occurred while calling clustalw to generate the Percent Identity Matrix:\n%s' % p.stderr)
self.alignment_output = read_file(clustal_filename)
percentage_identity_output = p.stdout
else:
raise Exception("An unexpected alignment tool ('%s') was specified" % alignment_tool)
#colortext.pcyan(self.alignment_output)
except Exception as e:
colortext.error(str(e))
colortext.error(traceback.format_exc())
for t in tempfiles:
os.remove(t)
raise
for t in tempfiles:
try:
os.remove(t)
except: pass
return self._parse_percentage_identity_output(percentage_identity_output)
def get_best_matches_by_id(self, id, cut_off = 98.0):
if not self.alignment_output:
self.align()
best_matches = {}
named_matrix = self.named_matrix
for k, v in named_matrix[id].items():
if k != id and v >= cut_off:
best_matches[k] = v
return best_matches
def get_residue_mapping(self):
'''Returns a mapping between the sequences ONLY IF there are exactly two. This restriction makes the code much simpler.'''
if len(self.sequence_ids) == 2:
if not self.alignment_output:
self.align()
assert(self.alignment_output)
return self._create_residue_map(self._get_alignment_lines(), self.sequence_ids[1], self.sequence_ids[2])
else:
return None
### Private methods ###
def _create_residue_map(self, alignment_lines, from_sequence_id, to_sequence_id):
from_sequence = alignment_lines[from_sequence_id]
to_sequence = alignment_lines[to_sequence_id]
match_sequence = alignment_lines[-1]
mapping = {}
match_mapping = {}
assert(len(from_sequence) == len(to_sequence) and len(to_sequence) == len(match_sequence))
from_residue_id = 0
to_residue_id = 0
for x in range(len(from_sequence)):
c = 0
from_residue = from_sequence[x]
to_residue = to_sequence[x]
match_type = match_sequence[x]
if from_residue != '-':
from_residue_id += 1
assert('A' <= from_residue <= 'Z')
c += 1
if to_residue != '-':
to_residue_id += 1
assert('A' <= to_residue <= 'Z')
c += 1
if c == 2:
if from_residue == to_residue:
assert(match_type == '*')
# We do not want to include matches which are distant from other matches
has_surrounding_matches = ((x > 0) and (match_sequence[x - 1] != ' ')) or (((x + 1) < len(match_sequence)) and (match_sequence[x + 1] != ' '))
if match_type == '*':
# "a single, fully conserved residue" - http://www.ebi.ac.uk/Tools/msa/clustalw2/help/faq.html
mapping[from_residue_id] = to_residue_id
match_mapping[from_residue_id] = SubstitutionScore(1, from_residue, to_residue)
elif match_type == ':':
# "conservation between groups of strongly similar properties - scoring > 0.5 in the Gonnet PAM 250 matrix" - ibid.
if has_surrounding_matches:
mapping[from_residue_id] = to_residue_id
match_mapping[from_residue_id] = SubstitutionScore(0, from_residue, to_residue)
elif match_type == '.':
# "conservation between groups of weakly similar properties - scoring =< 0.5 in the Gonnet PAM 250 matrix" - ibid.
if has_surrounding_matches:
mapping[from_residue_id] = to_residue_id
match_mapping[from_residue_id] = SubstitutionScore(-1, from_residue, to_residue)
elif match_type == ' ':
# not conserved
# Allow unmatched residues if they have surrounding matches
if has_surrounding_matches:
mapping[from_residue_id] = to_residue_id
match_mapping[from_residue_id] = SubstitutionScore(-2, from_residue, to_residue)
else:
assert(False)
### Prune the mapping
# We probably do not want to consider all partial matches that Clustal reports as some may be coincidental
# e.g. part of a HIS-tag partially matching a tyrosine so we will prune the mapping.
# Remove any matches where there are no matches which are either direct neighbors or the neighbor of a direct
# neighbor e.g. the colon in this match " ***..*** : .*****" is on its own
while True:
remove_count = 0
all_keys = sorted(mapping.keys())
for k in all_keys:
current_keys = list(mapping.keys())
if (k - 2 not in current_keys) and (k - 1 not in current_keys) and (k + 1 not in current_keys) and (k - 2 not in current_keys):
del mapping[k]
del match_mapping[k]
remove_count += 1
if remove_count == 0:
break
# Remove all leading partial matches except the last one
keys_so_far = set()
for k in sorted(mapping.keys()):
if match_mapping[k].clustal == 1:
break
else:
keys_so_far.add(k)
for k in sorted(keys_so_far)[:-1]:
del mapping[k]
del match_mapping[k]
# Remove all trailing partial matches except the first one
keys_so_far = set()
for k in sorted(list(mapping.keys()), reverse = True):
if match_mapping[k].clustal == 1:
break
else:
keys_so_far.add(k)
for k in sorted(keys_so_far)[1:]:
del mapping[k]
del match_mapping[k]
return mapping, match_mapping
def _get_alignment_lines(self):
''' This function parses the Clustal Omega alignment output and returns the aligned sequences in a dict: sequence_id -> sequence_string.
The special key -1 is reserved for the match line (e.g. ' .:******* *').'''
# Strip the boilerplate lines
lines = self.alignment_output.split("\n")
assert(lines[0].startswith('CLUSTAL'))
lines = '\n'.join(lines[1:]).lstrip().split('\n')
# The sequence IDs should be unique. Reassert this here
assert(len(list(self.sequence_ids.values())) == len(set(self.sequence_ids.values())))
# Create the list of sequence IDs
id_list = [v for k, v in sorted(self.sequence_ids.items())]
# Determine the indentation level
first_id = id_list[0]
header_regex = re.compile("(.*?\s+)(.*)")
alignment_regex = re.compile("^([A-Z\-]+)\s*$")
mtchs = header_regex.match(lines[0])
assert(mtchs.group(1).strip() == first_id)
indentation = len(mtchs.group(1))
sequence = mtchs.group(2)
assert(sequence)
assert(alignment_regex.match(sequence))
# Create empty lists for the sequences
sequences = {}
for id in id_list:
sequences[id] = []
sequences[-1] = []
# Get the lists of sequences
num_ids = len(id_list)
for x in range(0, len(lines), num_ids + 2):
for y in range(num_ids):
id = id_list[y]
assert(lines[x + y][:indentation].strip() == id)
assert(lines[x + y][indentation - 1] == ' ')
sequence = lines[x + y][indentation:].strip()
assert(alignment_regex.match(sequence))
sequences[id].append(sequence)
# Get the length of the sequence lines
length_of_sequences = list(set(map(len, [v[-1] for k, v in sequences.items() if k != -1])))
assert(len(length_of_sequences) == 1)
length_of_sequences = length_of_sequences[0]
# Parse the Clustal match line
assert(lines[x + num_ids][:indentation].strip() == '')
match_sequence = lines[x + num_ids][indentation:indentation + length_of_sequences]
assert(match_sequence.strip() == lines[x + num_ids].strip())
assert(lines[x + y][indentation - 1] == ' ')
sequences[-1].append(match_sequence)
# Check for the empty line
assert(lines[x + num_ids + 1].strip() == '')
# Create the sequences, making sure that all sequences are the same length
lengths = set()
for k, v in sequences.items():
sequences[k] = "".join(v)
lengths.add(len(sequences[k]))
assert(len(lengths) == 1)
return sequences
def _parse_percentage_identity_output(self, percentage_identity_output):
# Initalize matrix
matrix = dict.fromkeys(list(self.sequence_ids.keys()), None)
for x in range(len(self.sequence_ids)):
matrix[x + 1] = {}
for y in range(len(self.sequence_ids)):
matrix[x + 1][y + 1] = None
matrix[x + 1][x + 1] = 100.0
matches = alignment_results_regex.match(percentage_identity_output)
if matches:
assert(len(matches.groups(0)) == 1)
for l in matches.group(1).strip().split('\n'):
line_matches = alignment_line_regex.match(l)
if line_matches:
from_index = int(line_matches.group(1))
to_index = int(line_matches.group(2))
score = float(line_matches.group(3))
assert(matrix[from_index][to_index] == None)
assert(matrix[to_index][from_index] == None)
matrix[from_index][to_index] = score
matrix[to_index][from_index] = score
else:
raise colortext.Exception("Error parsing alignment line for alignment scores. The line was:\n%s" % l)
else:
raise colortext.Exception("Error parsing alignment output for alignment scores. The output was:\n%s" % percentage_identity_output)
self.matrix = matrix
return self._create_named_matrix()
def _create_named_matrix(self):
matrix = self.matrix
named_matrix = {}
for x, line in matrix.items():
named_matrix[self.sequence_ids[x]] = {}
for y, value in line.items():
named_matrix[self.sequence_ids[x]][self.sequence_ids[y]] = value
self.named_matrix = named_matrix
return named_matrix
|
class SequenceAligner(object):
''' This class is used to align sequences. To use it, first add sequences using the add_sequence function. Next, call the align function to perform
the alignment. Alignment results are stored in the following object variables:
matrix : the 1-indexed matrix returned from clustalw
named_matrix : [finish this help section...]
e.g.
sa = SequenceAligner()
sa.add_sequence('1A2P_A', 'AQVINTFDGVADYLQTYHKLPDNYITKSEAQALGWVASKGNLADVAPGKSIGGDIFSNREGKLPGKSGRTWREADINYTSGFRNSDRILYSSDWLIYKTTDHYQTFTKIR')
sa.add_sequence('1B20_A', 'AQVINTFDGVADYLQTYHKLPDNYITKSEAQALGWVASKGNLADVAPGKSIGGDIFSNREGKLPGKSGSTWREADINYTSGFRNSDRILYSSDWLIYKTTDHYQTFTKIR')
sa.add_sequence('2KF4_A', 'AQVINTFDGVADYLQTYHKLPDNYITKSEAQALGWVASKGNLADVAPGKSIGGDIFSNREGKLPGKSGRTWREADINYTSGFRNSDRILYSSDWLIYKTTDAYQTFTKIR')
best_matches = sa.align() # {'2KF4_A': {'2KF4_A': 100.0, '1A2P_A': 99.0, '1B20_A': 98.0}, '1A2P_A': {'2KF4_A': 99.0, '1A2P_A': 100.0, '1B20_A': 99.0}, '1B20_A': {'2KF4_A': 98.0, '1A2P_A': 99.0, '1B20_A': 100.0}}
best_matches_by_id = sa.get_best_matches_by_id('2KF4_A') # {'1A2P_A': 99.0, '1B20_A': 98.0}
get_residue_mapping returns the mapping between the sequences. Note that this mapping is only based on the sequence strings
and not e.g. on the residue IDs in the PDB. Since the sequences are 1-indexed, the mapping is also 1-indexed. In the example
above, for 1A2P to 1B20, the residue mapping would be 1->1 for residue A, 2->2 for residue Q, 3->3 for residue V etc.
'''
def __init__(self, alignment_tool = 'clustalw', gap_opening_penalty = 0.2):
'''The constructor accepts an alignment tool used to create the alignment and a gap opening penalty. Note that
the gap opening penalty is currently only used by ClustalW.'''
pass
@staticmethod
def from_list_of_FASTA_content(FASTA_content_list):
pass
@staticmethod
def from_FASTA(f):
pass
def __repr__(self):
pass
def add_sequence(self, sequence_id, sequence, ignore_bad_chains = False):
'''If ignore_bad_chains is True then any chains containing all Xs as the sequence will be silently skipped.
The default behavior is to raise a MalformedSequenceException in this case.'''
pass
def align(self):
pass
def get_best_matches_by_id(self, id, cut_off = 98.0):
pass
def get_residue_mapping(self):
'''Returns a mapping between the sequences ONLY IF there are exactly two. This restriction makes the code much simpler.'''
pass
def _create_residue_map(self, alignment_lines, from_sequence_id, to_sequence_id):
pass
def _get_alignment_lines(self):
''' This function parses the Clustal Omega alignment output and returns the aligned sequences in a dict: sequence_id -> sequence_string.
The special key -1 is reserved for the match line (e.g. ' .:******* *').'''
pass
def _parse_percentage_identity_output(self, percentage_identity_output):
pass
def _create_named_matrix(self):
pass
| 15 | 5 | 26 | 3 | 20 | 3 | 5 | 0.24 | 1 | 13 | 4 | 0 | 10 | 7 | 12 | 12 | 355 | 49 | 248 | 90 | 233 | 59 | 235 | 87 | 222 | 23 | 1 | 4 | 65 |
143,493 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/fragments/generate_fragments.py
|
klab.bio.fragments.generate_fragments.OptionParserWithNewlines
|
class OptionParserWithNewlines(OptionParser):
'''Override the help section with a function which does not strip the newline characters.'''
def format_epilog(self, formatter):
return self.epilog
|
class OptionParserWithNewlines(OptionParser):
'''Override the help section with a function which does not strip the newline characters.'''
def format_epilog(self, formatter):
pass
| 2 | 1 | 2 | 0 | 2 | 0 | 1 | 0.33 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 54 | 4 | 0 | 3 | 2 | 1 | 1 | 3 | 2 | 1 | 1 | 2 | 0 | 1 |
143,494 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/dssp.py
|
klab.bio.dssp.ComplexDSSP
|
class ComplexDSSP(MonomerDSSP):
'''
A class wrapper for DSSP.
Note: This class does *not* strip the PDB file.
Once initialized, the dssp element of the object should contain a mapping from protein chain IDs to dicts.
The dict associated with a protein chain ID is a dict from PDB residue IDs to details about that residue.
For example:
dssp -> 'A' -> ' 64 ' -> {
'3LC': 'LEU',
'acc': 171,
'bp_1': 0,
'bp_2': 0,
'chain_id': 'I',
'dssp_res_id': 10,
'dssp_residue_aa': 'L',
'exposure': 0.95,
'is_buried': False,
'pdb_res_id': ' 64 ',
'residue_aa': 'L',
'sheet_label': ' ',
'ss': None,
'ss_details': '< '}
Description of the fields:
- residue_aa and 3LC contain the residue 1-letter and 3-letter codes respectively;
- acc is the number of water molecules in contact with this residue (according to DSSP);
- exposure is a normalized measure of exposure where 0.0 denotes total burial and 1.0 denotes total exposure;
- exposure is calculated by dividing the number of water molecules in contact with this residue (according to DSSP) by the residue_max_acc value from the appropriate value table;
- is_buried is either None (could not be determined), True if exposure < cut_off, or False if exposure >= cut_off;
- ss is the assigned secondary structure type, using the DSSP secondary structure types (see basics.py/dssp_secondary_structure_types). This value may be None if no secondary structure was assigned.
Usage:
d = DSSP.from_RCSB('1HAG')
# access the dict directly
print(d.dssp['I'][' 64 ']['exposure'])
print(d.dssp['I'][' 64 ']['is_buried'])
# use dot notation
print(d.dsspb.I.get(' 64 ').exposure)
print(d.dsspb.I.get(' 64 ').is_buried)
# iterate through the residues
for chain_id, mapping in d:
for residue_id, residue_details in sorted(mapping.iteritems()):
print(residue_id, residue_details['exposure'])
'''
@classmethod
def from_pdb_contents(cls, pdb_contents, cut_off = 0.25, acc_array = 'Miller', tmp_dir = '/tmp', read_only = False):
return cls(PDB(pdb_contents), cut_off = cut_off, acc_array = acc_array, tmp_dir = tmp_dir, read_only = read_only)
@classmethod
def from_pdb_filepath(cls, pdb_filepath, cut_off = 0.25, acc_array = 'Miller', tmp_dir = '/tmp', read_only = False):
return cls(PDB(read_file(pdb_filepath)), cut_off = cut_off, acc_array = acc_array, tmp_dir = tmp_dir, read_only = read_only)
@classmethod
def from_RCSB(cls, pdb_id, cut_off = 0.25, acc_array = 'Miller', tmp_dir = '/tmp', read_only = False):
return cls(PDB(retrieve_pdb(pdb_id)), cut_off = cut_off, acc_array = acc_array, tmp_dir = tmp_dir, read_only = read_only)
def __init__(self, p, cut_off = 0.25, acc_array = 'Miller', tmp_dir = '/tmp', read_only = False):
'''This function strips a PDB file to one chain and then runs DSSP on this new file.
p should be a PDB object (see pdb.py).
'''
try:
_Popen('.', shlex.split('mkdssp --version'))
except:
raise colortext.Exception('mkdssp does not seem to be installed in a location declared in the environment path.')
self.cut_off = cut_off
self.tmp_dir = tmp_dir
self.residue_max_acc = residue_max_acc[acc_array]
self.read_only = read_only
if not self.read_only:
self.pdb = p.clone() # make a local copy in case this gets modified externally
else:
self.pdb = p
self.chain_order = self.pdb.atom_chain_order
self.dssp_output = None
self.dssp = {}
self.compute()
self.chain_order = [c for c in self.chain_order if c in self.dssp]
self.dsspb = NestedBunch(self.dssp)
def compute(self):
tmp_dir = self.tmp_dir
if not self.read_only:
pdb_object = self.pdb.clone()
else:
pdb_object = self.pdb # in general, we should not be modifying the structure in this class
input_filepath = write_temp_file(tmp_dir, pdb_object.get_content(), ftype = 'w', prefix = 'dssp_')
output_filepath = write_temp_file(tmp_dir, '', ftype = 'w', prefix = 'dssp_')
try:
p = _Popen('.', shlex.split('mkdssp -i {input_filepath} -o {output_filepath}'.format(**locals())))
if p.errorcode:
if p.stderr.find('empty protein, or no valid complete residues') != -1:
raise MissingAtomException(p.stdout)
else:
raise Exception('An error occurred while calling DSSP:\n%s' % p.stderr)
self.dssp_output = read_file(output_filepath)
self.dssp = self.parse_output()
except MissingAtomException as e:
os.remove(input_filepath)
os.remove(output_filepath)
raise
except Exception as e:
os.remove(input_filepath)
os.remove(output_filepath)
raise colortext.Exception('%s\n%s' % (str(e), traceback.format_exc()))
os.remove(input_filepath)
os.remove(output_filepath)
def parse_output(self):
d = {}
dssp_output = self.dssp_output
assert(dssp_output.startswith('===='))
header_line = ' # RESIDUE AA STRUCTURE BP1 BP2 ACC N-H-->O O-->H-N N-H-->O O-->H-N TCO KAPPA ALPHA PHI PSI X-CA Y-CA Z-CA'
idx = dssp_output.find(header_line)
assert(idx != -1)
data_lines = [l for l in dssp_output[idx + len(header_line):].split('\n') if l.strip()]
for dl in data_lines:
l = self.parse_data_line(dl)
if l:
d[l['chain_id']] = d.get(l['chain_id'], {})
d[l['chain_id']][l['pdb_res_id']] = l
return d
|
class ComplexDSSP(MonomerDSSP):
'''
A class wrapper for DSSP.
Note: This class does *not* strip the PDB file.
Once initialized, the dssp element of the object should contain a mapping from protein chain IDs to dicts.
The dict associated with a protein chain ID is a dict from PDB residue IDs to details about that residue.
For example:
dssp -> 'A' -> ' 64 ' -> {
'3LC': 'LEU',
'acc': 171,
'bp_1': 0,
'bp_2': 0,
'chain_id': 'I',
'dssp_res_id': 10,
'dssp_residue_aa': 'L',
'exposure': 0.95,
'is_buried': False,
'pdb_res_id': ' 64 ',
'residue_aa': 'L',
'sheet_label': ' ',
'ss': None,
'ss_details': '< '}
Description of the fields:
- residue_aa and 3LC contain the residue 1-letter and 3-letter codes respectively;
- acc is the number of water molecules in contact with this residue (according to DSSP);
- exposure is a normalized measure of exposure where 0.0 denotes total burial and 1.0 denotes total exposure;
- exposure is calculated by dividing the number of water molecules in contact with this residue (according to DSSP) by the residue_max_acc value from the appropriate value table;
- is_buried is either None (could not be determined), True if exposure < cut_off, or False if exposure >= cut_off;
- ss is the assigned secondary structure type, using the DSSP secondary structure types (see basics.py/dssp_secondary_structure_types). This value may be None if no secondary structure was assigned.
Usage:
d = DSSP.from_RCSB('1HAG')
# access the dict directly
print(d.dssp['I'][' 64 ']['exposure'])
print(d.dssp['I'][' 64 ']['is_buried'])
# use dot notation
print(d.dsspb.I.get(' 64 ').exposure)
print(d.dsspb.I.get(' 64 ').is_buried)
# iterate through the residues
for chain_id, mapping in d:
for residue_id, residue_details in sorted(mapping.iteritems()):
print(residue_id, residue_details['exposure'])
'''
@classmethod
def from_pdb_contents(cls, pdb_contents, cut_off = 0.25, acc_array = 'Miller', tmp_dir = '/tmp', read_only = False):
pass
@classmethod
def from_pdb_filepath(cls, pdb_filepath, cut_off = 0.25, acc_array = 'Miller', tmp_dir = '/tmp', read_only = False):
pass
@classmethod
def from_RCSB(cls, pdb_id, cut_off = 0.25, acc_array = 'Miller', tmp_dir = '/tmp', read_only = False):
pass
def __init__(self, p, cut_off = 0.25, acc_array = 'Miller', tmp_dir = '/tmp', read_only = False):
'''This function strips a PDB file to one chain and then runs DSSP on this new file.
p should be a PDB object (see pdb.py).
'''
pass
def compute(self):
pass
def parse_output(self):
pass
| 10 | 2 | 12 | 0 | 11 | 1 | 3 | 0.67 | 1 | 6 | 4 | 0 | 3 | 9 | 6 | 18 | 130 | 16 | 70 | 31 | 60 | 47 | 64 | 27 | 57 | 6 | 2 | 3 | 15 |
143,495 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/dssp.py
|
klab.bio.dssp.MissingAtomException
|
class MissingAtomException(Exception): pass
|
class MissingAtomException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,496 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/fragments/utils.py
|
klab.bio.fragments.utils.LogFile
|
class LogFile(object):
class LogFileException(Exception): pass
def __init__(self, flname):
self.logfile = flname
self.format = "%s: Job ID %s results will be saved in %s.%s"
self.regex = re.compile(self.format % ("^(.*)", "(\\d+)", "(.+)\\", "$"))
def getName(self):
return self.logfile
def readFromLogfile(self):
joblist = {}
F = open(self.logfile, "r")
lines = F.read().strip().split("\n")
F.close()
for line in lines:
mtchs = self.regex.match(line)
if mtchs:
jobID = int(mtchs.group(2))
jobdir = mtchs.group(3)
nt = mtchs.group(1)
nt = nt.replace("-", "")
nt = nt[:nt.find(".")]
timetaken = datetime.datetime.now() - datetime.datetime(*time.strptime(nt, "%Y%m%dT%H:%M:%S")[0:6])
jobtime = timetaken.seconds
joblist[jobID] = {"Directory" : jobdir, "TimeInSeconds" : jobtime}
else:
raise LogFileException("Error parsing logfile: '%s' does not match regex." % line)
return joblist
def writeToLogfile(self, o_datetime, jobid, location):
F = open(self.logfile, "a")
F.write(self.format % (o_datetime.isoformat(), jobid, location, "\n"))
F.close()
|
class LogFile(object):
class LogFileException(Exception):
def __init__(self, flname):
pass
def getName(self):
pass
def readFromLogfile(self):
pass
def writeToLogfile(self, o_datetime, jobid, location):
pass
| 6 | 0 | 7 | 0 | 7 | 0 | 2 | 0 | 1 | 3 | 1 | 0 | 4 | 3 | 4 | 4 | 36 | 5 | 31 | 20 | 26 | 0 | 31 | 20 | 25 | 3 | 1 | 2 | 6 |
143,497 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/dssp.py
|
klab.bio.dssp.MonomerDSSP
|
class MonomerDSSP(object):
'''
A class wrapper for DSSP.
Note: This class strips a PDB file to each chain and computes the DSSP for that chain alone. If you want to run DSSP
on complexes, subclass this class and do not perform the stripping.
Once initialized, the dssp element of the object should contain a mapping from protein chain IDs to dicts.
The dict associated with a protein chain ID is a dict from PDB residue IDs to details about that residue.
For example:
dssp -> 'A' -> ' 64 ' -> {
'3LC': 'LEU',
'acc': 171,
'bp_1': 0,
'bp_2': 0,
'chain_id': 'I',
'dssp_res_id': 10,
'dssp_residue_aa': 'L',
'exposure': 0.95,
'is_buried': False,
'pdb_res_id': ' 64 ',
'residue_aa': 'L',
'sheet_label': ' ',
'ss': None,
'ss_details': '< '}
Description of the fields:
- residue_aa and 3LC contain the residue 1-letter and 3-letter codes respectively;
- acc is the number of water molecules in contact with this residue (according to DSSP);
- exposure is a normalized measure of exposure where 0.0 denotes total burial and 1.0 denotes total exposure;
- exposure is calculated by dividing the number of water molecules in contact with this residue (according to DSSP) by the residue_max_acc value from the appropriate value table;
- is_buried is either None (could not be determined), True if exposure < cut_off, or False if exposure >= cut_off;
- ss is the assigned secondary structure type, using the DSSP secondary structure types (see basics.py/dssp_secondary_structure_types). This value may be None if no secondary structure was assigned.
Usage:
d = DSSP.from_RCSB('1HAG')
# access the dict directly
print(d.dssp['I'][' 64 ']['exposure'])
print(d.dssp['I'][' 64 ']['is_buried'])
# use dot notation
print(d.dsspb.I.get(' 64 ').exposure)
print(d.dsspb.I.get(' 64 ').is_buried)
# iterate through the residues
for chain_id, mapping in d:
for residue_id, residue_details in sorted(mapping.iteritems()):
print(residue_id, residue_details['exposure'])
'''
@classmethod
def from_pdb_contents(cls, pdb_contents, cut_off = 0.25, acc_array = 'Miller', tmp_dir = '/tmp', read_only = False):
return cls(PDB(pdb_contents), cut_off = cut_off, acc_array = acc_array, tmp_dir = tmp_dir, read_only = read_only)
@classmethod
def from_pdb_filepath(cls, pdb_filepath, cut_off = 0.25, acc_array = 'Miller', tmp_dir = '/tmp', read_only = False):
return cls(PDB(read_file(pdb_filepath)), cut_off = cut_off, acc_array = acc_array, tmp_dir = tmp_dir, read_only = read_only)
@classmethod
def from_RCSB(cls, pdb_id, cut_off = 0.25, acc_array = 'Miller', tmp_dir = '/tmp', read_only = False):
return cls(PDB(retrieve_pdb(pdb_id)), cut_off = cut_off, acc_array = acc_array, tmp_dir = tmp_dir, read_only = read_only)
def __init__(self, p, cut_off = 0.25, acc_array = 'Miller', tmp_dir = '/tmp', read_only = False):
'''This function strips a PDB file to one chain and then runs DSSP on this new file.
p should be a PDB object (see pdb.py).
'''
try:
_Popen('.', shlex.split('mkdssp --version'))
except:
raise colortext.Exception('mkdssp does not seem to be installed in a location declared in the environment path.')
self.cut_off = cut_off
self.tmp_dir = tmp_dir
self.residue_max_acc = residue_max_acc[acc_array]
self.read_only = read_only
if not self.read_only:
self.pdb = p.clone() # make a local copy in case this gets modified externally
else:
self.pdb = p
self.chain_order = self.pdb.atom_chain_order
self.dssp_output = {}
self.dssp = {}
for chain_id in [c for c in list(self.pdb.atom_sequences.keys()) if self.pdb.chain_types[c] == 'Protein']:
self.compute(chain_id)
self.chain_order = [c for c in self.chain_order if c in self.dssp]
self.dsspb = NestedBunch(self.dssp)
def __iter__(self):
self._iter_keys = [c for c in self.chain_order]
self._iter_keys.reverse() # we pop from the list
return self
def __next__(self): # todo: This is __next__ in Python 3.x
try:
chain_id = self._iter_keys.pop()
return chain_id, self.dssp[chain_id]
except:
raise StopIteration
def __repr__(self):
return pprint.pformat(self.dssp)
def compute(self, chain_id):
tmp_dir = self.tmp_dir
pdb_object = self.pdb.clone() # we are immediately modifying the PDB file by stripping chains so we need to make a copy
pdb_object.strip_to_chains(chain_id)
input_filepath = write_temp_file(tmp_dir, pdb_object.get_content(), ftype = 'w', prefix = 'dssp_')
output_filepath = write_temp_file(tmp_dir, '', ftype = 'w', prefix = 'dssp_')
try:
p = _Popen('.', shlex.split('mkdssp -i {input_filepath} -o {output_filepath}'.format(**locals())))
if p.errorcode:
if p.stderr.find('empty protein, or no valid complete residues') != -1:
raise MissingAtomException(p.stdout)
else:
raise Exception('An error occurred while calling DSSP:\n%s' % p.stderr)
self.dssp_output[chain_id] = read_file(output_filepath)
self.dssp[chain_id] = self.parse_output(chain_id)
except MissingAtomException as e:
os.remove(input_filepath)
os.remove(output_filepath)
raise
except Exception as e:
os.remove(input_filepath)
os.remove(output_filepath)
raise colortext.Exception('%s\n%s' % (str(e), traceback.format_exc()))
os.remove(input_filepath)
os.remove(output_filepath)
def parse_output(self, chain_id):
d = {}
dssp_output = self.dssp_output[chain_id]
assert(dssp_output.startswith('===='))
header_line = ' # RESIDUE AA STRUCTURE BP1 BP2 ACC N-H-->O O-->H-N N-H-->O O-->H-N TCO KAPPA ALPHA PHI PSI X-CA Y-CA Z-CA'
idx = dssp_output.find(header_line)
assert(idx != -1)
data_lines = [l for l in dssp_output[idx + len(header_line):].split('\n') if l.strip()]
for dl in data_lines:
l = self.parse_data_line(dl)
if l:
d[l['pdb_res_id']] = l
return d
#Sample line:
# # RESIDUE AA STRUCTURE BP1 BP2 ACC N-H-->O O-->H-N N-H-->O O-->H-N TCO KAPPA ALPHA PHI PSI X-CA Y-CA Z-CA
# 1 55 I D 0 0 210 0, 0.0 2,-0.2 0, 0.0 0, 0.0 0.000 360.0 360.0 360.0 -37.4 5.2 -12.5 1.9
# 13 5 E P T 345S+ 0 0 13 0, 0.0 -1,-0.1 0, 0.0 136,-0.1 0.852 103.9 38.3 -44.1 -45.0 1.8 14.6 14.5
# 54 33 E L E -IJ 64 97B 1 10,-2.7 9,-1.4 -2,-0.4 10,-0.9 -0.789 25.5-160.5 -94.1 131.9 15.6 -3.3 9.4
#Sample line with insertion code
# 1 1HE T 0 0 152 0, 0.0 2,-0.3 0, 0.0 4,-0.0 0.000 360.0 360.0 360.0 53.3 22.6 20.0 16.5
#0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345
# 1 2 3 4 5 6 7 8 9 0 1 2 3
def parse_data_line(self, data_line):
d = dict(
dssp_res_id = data_line[0:5],
pdb_res_id = data_line[5:11], # this includes an insertion code e.g. run against 1HAG
chain_id = data_line[11],
dssp_residue_aa = data_line[13], # e.g. a-z can be used for SS-bridge cysteines
ss = data_line[16].strip() or ' ',
ss_details = data_line[18:25],
bp_1 = data_line[25:29],
bp_2 = data_line[29:33],
sheet_label = data_line[33],
acc = data_line[34:38],
)
if d['dssp_residue_aa'] != '!': # e.g. 1A22, chain A, between PDB residue IDs 129 and 136
self.check_line(d) # note: this function call has side-effects
self.compute_burial(d)
return d
else:
return None
def check_line(self, d):
d['dssp_res_id'] = int(d['dssp_res_id'])
int(d['pdb_res_id'][:-1])
assert(d['pdb_res_id'][0] == ' ') # I think this is the case
assert(d['pdb_res_id'][-1] == ' ' or d['pdb_res_id'][-1].isalpha())
d['pdb_res_id'] = d['pdb_res_id'][1:]
assert(d['chain_id'].isalnum())
d['residue_aa'] = residue_type_3to1_map[residue_type_1to3[d['dssp_residue_aa']]] # convert the DSSP residue type into a canonical 1-letter code or 'X'
assert(d['residue_aa'] in residue_types_1)
assert(d['ss'] in secondary_structure_types)
d['bp_1'] = int(d['bp_1'])
d['bp_2'] = int(d['bp_2'])
d['acc'] = int(d['acc'])
d['3LC'] = residue_type_1to3.get(d['dssp_residue_aa'])
def compute_burial(self, d):
cut_off = self.cut_off
if d['3LC'] == 'UNK':
d['is_buried'] = None
d['exposure'] = None
else:
acc = float(d['acc']) / float(self.residue_max_acc[d['residue_aa']])
d['exposure'] = acc
if acc < self.cut_off:
d['is_buried'] = True
else:
d['is_buried'] = False
|
class MonomerDSSP(object):
'''
A class wrapper for DSSP.
Note: This class strips a PDB file to each chain and computes the DSSP for that chain alone. If you want to run DSSP
on complexes, subclass this class and do not perform the stripping.
Once initialized, the dssp element of the object should contain a mapping from protein chain IDs to dicts.
The dict associated with a protein chain ID is a dict from PDB residue IDs to details about that residue.
For example:
dssp -> 'A' -> ' 64 ' -> {
'3LC': 'LEU',
'acc': 171,
'bp_1': 0,
'bp_2': 0,
'chain_id': 'I',
'dssp_res_id': 10,
'dssp_residue_aa': 'L',
'exposure': 0.95,
'is_buried': False,
'pdb_res_id': ' 64 ',
'residue_aa': 'L',
'sheet_label': ' ',
'ss': None,
'ss_details': '< '}
Description of the fields:
- residue_aa and 3LC contain the residue 1-letter and 3-letter codes respectively;
- acc is the number of water molecules in contact with this residue (according to DSSP);
- exposure is a normalized measure of exposure where 0.0 denotes total burial and 1.0 denotes total exposure;
- exposure is calculated by dividing the number of water molecules in contact with this residue (according to DSSP) by the residue_max_acc value from the appropriate value table;
- is_buried is either None (could not be determined), True if exposure < cut_off, or False if exposure >= cut_off;
- ss is the assigned secondary structure type, using the DSSP secondary structure types (see basics.py/dssp_secondary_structure_types). This value may be None if no secondary structure was assigned.
Usage:
d = DSSP.from_RCSB('1HAG')
# access the dict directly
print(d.dssp['I'][' 64 ']['exposure'])
print(d.dssp['I'][' 64 ']['is_buried'])
# use dot notation
print(d.dsspb.I.get(' 64 ').exposure)
print(d.dsspb.I.get(' 64 ').is_buried)
# iterate through the residues
for chain_id, mapping in d:
for residue_id, residue_details in sorted(mapping.iteritems()):
print(residue_id, residue_details['exposure'])
'''
@classmethod
def from_pdb_contents(cls, pdb_contents, cut_off = 0.25, acc_array = 'Miller', tmp_dir = '/tmp', read_only = False):
pass
@classmethod
def from_pdb_filepath(cls, pdb_filepath, cut_off = 0.25, acc_array = 'Miller', tmp_dir = '/tmp', read_only = False):
pass
@classmethod
def from_RCSB(cls, pdb_id, cut_off = 0.25, acc_array = 'Miller', tmp_dir = '/tmp', read_only = False):
pass
def __init__(self, p, cut_off = 0.25, acc_array = 'Miller', tmp_dir = '/tmp', read_only = False):
'''This function strips a PDB file to one chain and then runs DSSP on this new file.
p should be a PDB object (see pdb.py).
'''
pass
def __iter__(self):
pass
def __next__(self):
pass
def __repr__(self):
pass
def compute(self, chain_id):
pass
def parse_output(self, chain_id):
pass
def parse_data_line(self, data_line):
pass
def check_line(self, d):
pass
def compute_burial(self, d):
pass
| 16 | 2 | 10 | 0 | 10 | 1 | 2 | 0.52 | 1 | 11 | 4 | 1 | 9 | 10 | 12 | 12 | 201 | 22 | 125 | 43 | 109 | 65 | 106 | 39 | 93 | 5 | 1 | 3 | 25 |
143,498 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/fasta.py
|
klab.bio.fasta.FASTA
|
class FASTA(dict):
''' This class may replace the one in rcsb.py. I am trying to make the class more generally useful.
This class inherits from dict. This lets us use some useful shortcuts e.g.
f = FASTA.retrieve('1A2C', cache_dir = '~/temp')
print(f['1A2C']['I'])
will print the sequence for chain I. We index by both the PDB ID and chain ID as FASTA files could be
merged and chain letters may not be unique identifiers.
When you print the object, the chains are color-coded such that chains with the same sequence have the same color.
'''
fasta_chain_header_ = "|PDBID|CHAIN|SEQUENCE"
def __init__(self, fasta_contents, strict = True, *args, **kw):
super(FASTA,self).__init__(*args, **kw)
self.fasta_contents = fasta_contents
self.strict = strict
self.itemlist = list(super(FASTA,self).keys())
self.unique_sequences = {}
self.sequences = []
self.sequence_string_length = 120
self._parse(fasta_contents)
self._find_identical_sequences()
def replace_sequence(self, pdb_ID, chain_id, replacement_sequence):
'''Replaces a sequence with another. Typically not useful but I use it in the ResidueRelatrix to make sure that the FASTA and SEQRES sequences match.'''
old_sequences = self.sequences
old_unique_sequences = self.unique_sequences
self.sequences = []
self.unique_sequences = {}
for s in old_sequences:
if s[0] == pdb_ID and s[1] == chain_id:
self._add_sequence(pdb_ID, chain_id, replacement_sequence)
else:
self._add_sequence(s[0], s[1], s[2])
self._find_identical_sequences()
def __add__(self, other):
return FASTA("\n".join([self.fasta_contents, other.fasta_contents]))
@staticmethod
def combine(pdb_ids, cache_dir = None):
if pdb_ids:
FASTAs = [FASTA.retrieve(pdb_id, cache_dir) for pdb_id in pdb_ids]
f = FASTAs[0]
for x in range(1, len(FASTAs)):
f = f + FASTAs[x]
return f
return None
@staticmethod
def retrieve(pdb_id, cache_dir = None, bio_cache = None):
'''Creates a FASTA object by using a cached copy of the file if it exists or by retrieving the file from the RCSB.'''
pdb_id = pdb_id.upper()
if bio_cache:
return FASTA(bio_cache.get_fasta_contents(pdb_id))
# Check to see whether we have a cached copy
if cache_dir:
filename = os.path.join(cache_dir, "%s.fasta" % pdb_id)
if os.path.exists(filename):
return FASTA(read_file(filename))
else:
filename += ".txt"
if os.path.exists(filename):
return FASTA(read_file(filename))
# Get a copy from the RCSB
contents = rcsb.retrieve_fasta(pdb_id)
# Create a cached copy if appropriate
if cache_dir:
write_file(os.path.join(cache_dir, "%s.fasta" % pdb_id), contents)
# Return the object
return FASTA(contents)
### Private methods
def _parse(self, fasta_contents):
sequences = []
chains = [c for c in fasta_contents.split(">") if c]
for c in chains:
if self.strict:
assert(c[4:5] == ":")
assert(c[6:].startswith(FASTA.fasta_chain_header_))
self._add_sequence(c[0:4], c[5:6], c[6 + len(FASTA.fasta_chain_header_):].replace("\n","").strip())
else:
lines = c.split('\n')
header = lines[0]
sequence = ''.join(lines[1:]).replace("\n","").strip()
tokens = header.split('|')
pdbID, chain = tokens[0], ' '
if len(tokens) > 1 and len(tokens[1]) == 1:
chain = tokens[1]
self._add_sequence(pdbID, chain, sequence)
def _add_sequence(self, pdbID, chainID, sequence):
'''This is a 'private' function. If you call it directly, call _find_identical_sequences() afterwards to update identical_sequences.'''
pdbID = pdbID.upper()
self[pdbID] = self.get(pdbID, {})
self[pdbID][chainID] = sequence
self.sequences.append((pdbID, chainID, sequence))
if not self.unique_sequences.get(sequence):
self.unique_sequences[sequence] = visible_colors[len(self.unique_sequences) % len(visible_colors)]
self.identical_sequences = None
def _find_identical_sequences(self):
''' Stores the identical sequences in a map with the structure pdb_id -> chain_id -> List(identical chains)
where the identical chains have the format 'pdb_id:chain_id'
e.g. for 1A2P, we get {'1A2P': {'A': ['1A2P:B', '1A2P:C'], 'C': ['1A2P:A', '1A2P:B'], 'B': ['1A2P:A', '1A2P:C']}}
'''
sequences = self.sequences
identical_sequences = {}
numseq = len(self.sequences)
for x in range(numseq):
for y in range(x + 1, numseq):
pdbID1 = sequences[x][0]
pdbID2 = sequences[y][0]
chain1 = sequences[x][1]
chain2 = sequences[y][1]
if sequences[x][2] == sequences[y][2]:
identical_sequences[pdbID1] = identical_sequences.get(pdbID1, {})
identical_sequences[pdbID1][chain1]=identical_sequences[pdbID1].get(chain1, [])
identical_sequences[pdbID1][chain1].append("%s:%s" % (pdbID2, chain2))
identical_sequences[pdbID2] = identical_sequences.get(pdbID2, {})
identical_sequences[pdbID2][chain2]=identical_sequences[pdbID2].get(chain2, [])
identical_sequences[pdbID2][chain2].append("%s:%s" % (pdbID1, chain1))
self.identical_sequences = identical_sequences
### Public methods
def get_sequences(self, pdb_id = None):
'''Create Sequence objects for each FASTA sequence.'''
sequences = {}
if pdb_id:
for chain_id, sequence in self.get(pdb_id, {}).items():
sequences[chain_id] = Sequence.from_sequence(chain_id, sequence)
else:
for pdb_id, v in self.items():
sequences[pdb_id] = {}
for chain_id, sequence in v.items():
sequences[pdb_id][chain_id] = Sequence.from_sequence(chain_id, sequence)
return sequences
def get_number_of_unique_sequences(self):
return len(self.unique_sequences)
def get_chain_ids(self, pdb_id = None, safe_call = False):
'''If the FASTA file only has one PDB ID, pdb_id does not need to be specified. Otherwise, the list of chains identifiers for pdb_id is returned.'''
if pdb_id == None and len(list(self.keys())) == 1:
return list(self[list(self.keys())[0]].keys())
pdbID = pdbID.upper()
if not self.get(pdbID):
if not safe_call:
raise Exception("FASTA object does not contain sequences for PDB %s." % pdbID)
else:
return []
return list(self[pdbID].keys())
def match(self, other):
''' This is a noisy terminal-printing function at present since there is no need to make it a proper API function.'''
colortext.message("FASTA Match")
for frompdbID, fromchains in sorted(self.items()):
matched_pdbs = {}
matched_chains = {}
for fromchain, fromsequence in fromchains.items():
for topdbID, tochains in other.items():
for tochain, tosequence in tochains.items():
if fromsequence == tosequence:
matched_pdbs[topdbID] = matched_pdbs.get(topdbID, set())
matched_pdbs[topdbID].add(fromchain)
matched_chains[fromchain] = matched_chains.get(fromchain, [])
matched_chains[fromchain].append((topdbID, tochain))
foundmatches = []
colortext.printf(" %s" % frompdbID, color="silver")
for mpdbID, mchains in matched_pdbs.items():
if mchains == set(fromchains.keys()):
foundmatches.append(mpdbID)
colortext.printf(" PDB %s matched PDB %s on all chains" % (mpdbID, frompdbID), color="white")
if foundmatches:
for fromchain, fromsequence in fromchains.items():
colortext.printf(" %s" % (fromchain), color = "silver")
colortext.printf(" %s" % (fromsequence), color = self.unique_sequences[fromsequence])
mstr = []
for mchain in matched_chains[fromchain]:
if mchain[0] in foundmatches:
mstr.append("%s chain %s" % (mchain[0], mchain[1]))
colortext.printf(" Matches: %s" % ", ".join(mstr))
else:
colortext.error(" No matches found.")
def __repr__(self):
splitsize = self.sequence_string_length
self._find_identical_sequences()
identical_sequences = self.identical_sequences
s = []
s.append(colortext.make("FASTA: Contains these PDB IDs - %s" % ", ".join(list(self.keys())), color="green"))
s.append("Number of unique sequences : %d" % len(self.unique_sequences))
s.append("Chains:")
for pdbID, chains_dict in sorted(self.items()):
s.append(" %s" % pdbID)
for chainID, sequence in chains_dict.items():
s.append(" %s" % chainID)
color = self.unique_sequences[sequence]
split_sequence = [sequence[i:i+splitsize] for i in range(0, len(sequence), splitsize)]
for seqpart in split_sequence:
s.append(colortext.make(" %s" % seqpart, color=color))
if identical_sequences.get(pdbID) and identical_sequences[pdbID].get(chainID):
iseqas = identical_sequences[pdbID][chainID]
s.append(" Identical sequences: %s" % ", ".join(iseqas))
return "\n".join(s)
|
class FASTA(dict):
''' This class may replace the one in rcsb.py. I am trying to make the class more generally useful.
This class inherits from dict. This lets us use some useful shortcuts e.g.
f = FASTA.retrieve('1A2C', cache_dir = '~/temp')
print(f['1A2C']['I'])
will print the sequence for chain I. We index by both the PDB ID and chain ID as FASTA files could be
merged and chain letters may not be unique identifiers.
When you print the object, the chains are color-coded such that chains with the same sequence have the same color.
'''
def __init__(self, fasta_contents, strict = True, *args, **kw):
pass
def replace_sequence(self, pdb_ID, chain_id, replacement_sequence):
'''Replaces a sequence with another. Typically not useful but I use it in the ResidueRelatrix to make sure that the FASTA and SEQRES sequences match.'''
pass
def __add__(self, other):
pass
@staticmethod
def combine(pdb_ids, cache_dir = None):
pass
@staticmethod
def retrieve(pdb_id, cache_dir = None, bio_cache = None):
'''Creates a FASTA object by using a cached copy of the file if it exists or by retrieving the file from the RCSB.'''
pass
def _parse(self, fasta_contents):
pass
def _add_sequence(self, pdbID, chainID, sequence):
'''This is a 'private' function. If you call it directly, call _find_identical_sequences() afterwards to update identical_sequences.'''
pass
def _find_identical_sequences(self):
''' Stores the identical sequences in a map with the structure pdb_id -> chain_id -> List(identical chains)
where the identical chains have the format 'pdb_id:chain_id'
e.g. for 1A2P, we get {'1A2P': {'A': ['1A2P:B', '1A2P:C'], 'C': ['1A2P:A', '1A2P:B'], 'B': ['1A2P:A', '1A2P:C']}}
'''
pass
def get_sequences(self, pdb_id = None):
'''Create Sequence objects for each FASTA sequence.'''
pass
def get_number_of_unique_sequences(self):
pass
def get_chain_ids(self, pdb_id = None, safe_call = False):
'''If the FASTA file only has one PDB ID, pdb_id does not need to be specified. Otherwise, the list of chains identifiers for pdb_id is returned.'''
pass
def match(self, other):
''' This is a noisy terminal-printing function at present since there is no need to make it a proper API function.'''
pass
def __repr__(self):
pass
| 16 | 8 | 14 | 1 | 13 | 1 | 4 | 0.14 | 1 | 6 | 1 | 0 | 11 | 7 | 13 | 40 | 220 | 29 | 167 | 71 | 151 | 24 | 159 | 69 | 145 | 12 | 2 | 5 | 51 |
143,499 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/fragments/generate_fragments.py
|
klab.bio.fragments.generate_fragments.FastaException
|
class FastaException(Exception): pass
|
class FastaException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,500 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/fragments/generate_fragments.py
|
klab.bio.fragments.generate_fragments.JobInput
|
class JobInput(object):
def __init__(self, fasta_file, pdb_id, chain):
self.fasta_file = fasta_file
self.pdb_id = pdb_id
self.chain = chain
|
class JobInput(object):
def __init__(self, fasta_file, pdb_id, chain):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 3 | 1 | 1 | 6 | 1 | 5 | 5 | 3 | 0 | 5 | 5 | 3 | 1 | 1 | 0 | 1 |
143,501 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/fragments/generate_fragments.py
|
klab.bio.fragments.generate_fragments.MultiOption
|
class MultiOption(Option):
'''From http://docs.python.org/2/library/optparse.html'''
ACTIONS = Option.ACTIONS + ("extend",)
STORE_ACTIONS = Option.STORE_ACTIONS + ("extend",)
TYPED_ACTIONS = Option.TYPED_ACTIONS + ("extend",)
ALWAYS_TYPED_ACTIONS = Option.ALWAYS_TYPED_ACTIONS + ("extend",)
def take_action(self, action, dest, opt, value, values, parser):
if action == "extend":
lvalue = value.split(",")
values.ensure_value(dest, []).extend(lvalue)
else:
Option.take_action(self, action, dest, opt, value, values, parser)
|
class MultiOption(Option):
'''From http://docs.python.org/2/library/optparse.html'''
def take_action(self, action, dest, opt, value, values, parser):
pass
| 2 | 1 | 6 | 0 | 6 | 0 | 2 | 0.09 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 19 | 13 | 1 | 11 | 7 | 9 | 1 | 10 | 7 | 8 | 2 | 1 | 1 | 2 |
143,502 |
Kortemme-Lab/klab
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kortemme-Lab_klab/klab/retrospect.py
|
klab.retrospect.SimpleScript
|
class SimpleScript(Script):
def __init__(self, allowedLapse):
self.allowedLapse = allowedLapse
def check(self, dateOfLastSuccess, extralapse):
lapse = self.getDaysSinceLastSuccess(dateOfLastSuccess) - extralapse
if lapse > self.allowedLapse:
return False
else:
return True
def getDaysSinceLastSuccess(self, dateOfLastSuccess):
td = (datetime.datetime.today() - dateOfLastSuccess)
return td.days + (float(td.seconds) / float(SECONDS_IN_A_DAY))
|
class SimpleScript(Script):
def __init__(self, allowedLapse):
pass
def check(self, dateOfLastSuccess, extralapse):
pass
def getDaysSinceLastSuccess(self, dateOfLastSuccess):
pass
| 4 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 1 | 2 | 0 | 0 | 3 | 1 | 3 | 5 | 15 | 3 | 12 | 7 | 8 | 0 | 11 | 7 | 7 | 2 | 2 | 1 | 4 |
143,503 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/clustalo.py
|
klab.bio.clustalo.PDBUniParcSequenceAligner
|
class PDBUniParcSequenceAligner(object):
### Constructor methods ###
def __init__(self, pdb_id, cache_dir = None, cut_off = 98.0, sequence_types = {}, replacement_pdb_id = None, added_uniprot_ACs = [], restrict_to_uniparc_values = []):
''' The sequences are matched up to a percentage identity specified by cut_off (0.0 - 100.0).
sequence_types e.g. {'A' : 'Protein', 'B' : 'RNA',...} should be passed in if the PDB file contains DNA or RNA chains. Otherwise, the aligner will attempt to match their sequences.
replacement_pdb_id is used to get a mapping from deprecated PDB IDs to uniparc sequences. It should be the new PDB ID corresponding to the obsolete pdb_id.
The PDB and PDBML classes parse the deprecation information from the PDB file or XML respectively and store the new PDB ID in their replacement_pdb_id variable.
'''
# todo: We could speed up the matching by only matching unique sequences rather than matching all sequences
# Remove any deprecated UniProt AC records that may be stored in PDB files. This depends on the cache directory being up to date.
added_uniprot_ACs = list(set(added_uniprot_ACs))
for AC in added_uniprot_ACs:
try:
UniProtACEntry(AC, cache_dir = cache_dir)
except:
added_uniprot_ACs.remove(AC)
self.pdb_id = pdb_id
self.replacement_pdb_id = replacement_pdb_id
self.cut_off = cut_off
self.added_uniprot_ACs = added_uniprot_ACs
self.sequence_types = sequence_types
self.restrict_to_uniparc_values = list(map(str, restrict_to_uniparc_values)) # can be used to remove ambiguity - see comments in relatrix.py about this
assert(0.0 <= cut_off <= 100.0)
# Retrieve the FASTA record
f = FASTA.retrieve(pdb_id, cache_dir = cache_dir)
self.identical_sequences = {}
if f.identical_sequences.get(pdb_id):
self.identical_sequences = f.identical_sequences[pdb_id]
f = f[pdb_id]
self.chains = sorted(f.keys())
self.fasta = f
self.clustal_matches = dict.fromkeys(self.chains, None)
self.substring_matches = dict.fromkeys(self.chains, None)
self.alignment = {}
self.seqres_to_uniparc_sequence_maps = {}
self.uniparc_sequences = {}
self.uniparc_objects = {}
self.equivalence_fiber = {}
self.representative_chains = []
# Retrieve the list of associated UniParc entries
self._get_uniparc_sequences_through_uniprot(cache_dir)
# Reduce the set of chains to a set of chains where there is exactly one chain from the equivalence class where equivalence is defined as sequence equality
# This is used later to reduce the amount of matching we need to do by not matching the same sequences again
self._determine_representative_chains()
# All of the above only needs to be run once
# The alignment can be run multiple times with different cut-offs
# Run an initial alignment with clustal using the supplied cut-off
self._align_with_clustal()
self._align_with_substrings()
self._check_alignments()
self._get_residue_mapping()
### Object methods ###
def __getitem__(self, chain):
return self.alignment.get(chain)
def __repr__(self):
s = []
for c in sorted(self.chains):
if self.clustal_matches.get(c):
match_string = ['%s (%.2f%%)' % (k, v) for k, v in sorted(iter(self.clustal_matches[c].items()), key = lambda x: x[1])] # this list should have be at most one element unless the matching did not go as expected
s.append("%s -> %s" % (c, ", ".join(match_string)))
elif self.alignment.get(c):
s.append("%s -> %s" % (c, self.alignment[c]))
else:
s.append("%s -> ?" % c)
return "\n".join(s)
### API methods ###
def realign(self, cut_off, chains_to_skip = set()):
''' Alter the cut-off and run alignment again. This is much quicker than creating a new PDBUniParcSequenceAligner
object as the UniParcEntry creation etc. in the constructor does not need to be repeated.
The chains_to_skip argument (a Set) allows us to skip chains that were already matched which speeds up the alignment even more.
'''
if cut_off != self.cut_off:
self.cut_off = cut_off
# Wipe any existing information for chains not in chains_to_skip
for c in self.chains:
if c not in chains_to_skip:
self.clustal_matches[c] = None
self.substring_matches[c] = None
if self.alignment.get(c):
del self.alignment[c]
if self.seqres_to_uniparc_sequence_maps.get(c):
del self.seqres_to_uniparc_sequence_maps[c]
# Run alignment for the remaining chains
self._align_with_clustal(chains_to_skip = chains_to_skip)
self._align_with_substrings(chains_to_skip = chains_to_skip)
self._check_alignments(chains_to_skip = chains_to_skip)
self._get_residue_mapping(chains_to_skip = chains_to_skip)
def get_alignment_percentage_identity(self, chain):
vals = list(self.clustal_matches[chain].values())
if len(vals) == 1:
return vals[0]
return None
def get_uniparc_object(self, chain):
if self.alignment.get(chain):
return self.uniparc_objects.get(self.alignment[chain])
return None
### Private methods ###
def _determine_representative_chains(self):
''' Quotient the chains to get equivalence classes of chains. These will be used for the actual mapping.'''
# todo: This logic should be moved into the FASTA class or a more general module (maybe a fast exists which uses a C/C++ library?) but at present it is easier to write here since we do not need to worry about other PDB IDs.
equivalence_fiber = {}
matched_chains = set()
for chain_id, equivalent_chains in self.identical_sequences.items():
matched_chains.add(chain_id)
equivalent_chain_ids = set()
for equivalent_chain in equivalent_chains:
assert(len(equivalent_chain) == 6)
assert((equivalent_chain[:5] == '%s_' % self.pdb_id) or (equivalent_chain[:5] == '%s:' % self.pdb_id)) # ClustalW changes e.g. 1KI1:A to 1KI1_A in its output
equivalent_chain_ids.add(equivalent_chain[5])
found = False
for equivalent_chain_id in equivalent_chain_ids:
if equivalence_fiber.get(equivalent_chain_id):
found = True
assert(equivalence_fiber[equivalent_chain_id] == equivalent_chain_ids.union(set([chain_id])))
break
if not found:
equivalence_fiber[chain_id] = set(equivalent_chain_ids)
equivalence_fiber[chain_id].add(chain_id)
for c in self.chains:
if c not in matched_chains:
equivalence_fiber[c] = set([c])
self.equivalence_fiber = equivalence_fiber
self.representative_chains = list(equivalence_fiber.keys())
# we could remove each chain from its image in the fiber which would be marginally more efficient in the logic below but that destroys the reflexivity in the equivalence class. Mathematics would cry a little.
#pprint.pprint(self.representative_chains)
#pprint.pprint(self.equivalence_fiber)
def _get_uniparc_sequences_through_uniprot(self, cache_dir):
# Retrieve the related UniParc sequences
pdb_id = self.pdb_id
replacement_pdb_id = self.replacement_pdb_id
# This is the usual path. We get a PDB->UniProt/UniParc mapping using the UniProt web API. This usually works
# if there are matches.
# todo: We *either* use the UniProt web API *or (exclusively)* use the DBREF entries. In cases where the UniProt API has mappings for, say chain A in a PDB file but not chain B but the DBREF maps B, we will not have a mapping for B. In this case, a hybrid method would be best.
uniparc_sequences = {}
uniparc_objects = {}
mapping_pdb_id = pdb_id
pdb_uniparc_mapping = pdb_to_uniparc([pdb_id], cache_dir = cache_dir, manual_additions = {self.pdb_id : self.added_uniprot_ACs}) # we could pass both pdb_id and replacement_pdb_id here but I prefer the current (longer) logic at present
if not pdb_uniparc_mapping.get(pdb_id):
if replacement_pdb_id:
mapping_pdb_id = replacement_pdb_id
pdb_uniparc_mapping = pdb_to_uniparc([replacement_pdb_id], cache_dir = cache_dir)
dbref_exists = False
if not pdb_uniparc_mapping:
# We could not get a UniProt mapping using the UniProt web API. Instead, try using the PDB DBREF fields.
# This fixes some cases e.g. 3ZKB (at the time of writing) where the UniProt database is not up-to-date.
uniprot_ACs = set()
p = PDB.retrieve(pdb_id, cache_dir = cache_dir)
uniprot_mapping = p.get_DB_references().get(pdb_id).get('UNIPROT')
if uniprot_mapping:
dbref_exists = True
for chain_id, details in uniprot_mapping.items():
uniprot_ACs.add(details['dbAccession'])
if not(uniprot_ACs) and replacement_pdb_id:
p = PDB.retrieve(replacement_pdb_id, cache_dir = cache_dir)
uniprot_mapping = p.get_DB_references().get(replacement_pdb_id).get('UNIPROT')
if uniprot_mapping:
for chain_id, details in uniprot_mapping.items():
uniprot_ACs.add(details['dbAccession'])
mapping_pdb_id = replacement_pdb_id
else:
mapping_pdb_id = pdb_id
pdb_uniparc_mapping = self._get_uniparc_sequences_through_uniprot_ACs(mapping_pdb_id, list(uniprot_ACs), cache_dir)
# If there is no mapping whatsoever found from PDB chains to UniParc sequences then we cannot continue. Again, the hybrid method mentioned in the to-do above would solve some of these cases.
if not pdb_uniparc_mapping:
extra_str = ''
if not(dbref_exists):
extra_str = ' No DBREF records were found in the PDB file.'
if replacement_pdb_id:
raise NoPDBUniParcMappingExists('No PDB->UniParc mapping was found for %s (obsolete) or its replacement %s.%s' % (pdb_id, replacement_pdb_id, extra_str))
else:
raise NoPDBUniParcMappingExists('No PDB->UniParc mapping was found for %s.%s' % (pdb_id, extra_str))
for upe in pdb_uniparc_mapping[mapping_pdb_id]:
uniparc_sequences[upe.UniParcID] = Sequence.from_sequence(upe.UniParcID, upe.sequence)
uniparc_objects[upe.UniParcID] = upe
#print(upe.UniParcID, upe.sequence)
self.uniparc_sequences = uniparc_sequences
self.uniparc_objects = uniparc_objects
def _get_uniparc_sequences_through_uniprot_ACs(self, mapping_pdb_id, uniprot_ACs, cache_dir):
'''Get the UniParc sequences associated with the UniProt accession number.'''
# Map the UniProt ACs to the UniParc IDs
m = uniprot_map('ACC', 'UPARC', uniprot_ACs, cache_dir = cache_dir)
UniParcIDs = []
for _, v in m.items():
UniParcIDs.extend(v)
# Create a mapping from the mapping_pdb_id to the UniParcEntry objects. This must match the return type from pdb_to_uniparc.
mapping = {mapping_pdb_id : []}
for UniParcID in UniParcIDs:
entry = UniParcEntry(UniParcID, cache_dir = cache_dir)
mapping[mapping_pdb_id].append(entry)
return mapping
def _align_with_clustal(self, chains_to_skip = set()):
if not(self.uniparc_sequences):
raise NoPDBUniParcMappingExists("No matches were found to any UniParc sequences.")
for c in self.representative_chains:
# Skip specified chains
if c not in chains_to_skip:
# Only align protein chains
chain_type = self.sequence_types.get(c, 'Protein')
#print('chain_type', chain_type, c)
if chain_type == 'Protein' or chain_type == 'Protein skeleton':
pdb_chain_id = '%s_%s' % (self.pdb_id, c)
sa = SequenceAligner()
try:
sa.add_sequence(pdb_chain_id, self.fasta[c])
except MalformedSequenceException:
self.clustal_matches[c] = {}
continue
for uniparc_id, uniparc_sequence in sorted(self.uniparc_sequences.items()):
sa.add_sequence(uniparc_id, str(uniparc_sequence))
best_matches = sa.align()
#colortext.pcyan(sa.alignment_output)
self.clustal_matches[c] = sa.get_best_matches_by_id(pdb_chain_id, cut_off = self.cut_off)
#colortext.plightpurple(self.cut_off)
#pprint.pprint(sa.get_best_matches_by_id(pdb_chain_id, cut_off = self.cut_off))
#colortext.plightpurple(60.0)
#pprint.pprint(sa.get_best_matches_by_id(pdb_chain_id, cut_off = 60.0))
else:
# Do not try to match DNA or RNA
self.clustal_matches[c] = {}
# Restrict the matches to a given set of UniParc IDs. This can be used to remove ambiguity when the correct mapping has been determined e.g. from the SIFTS database.
if self.restrict_to_uniparc_values:
for c in self.representative_chains:
if set(map(str, list(self.clustal_matches[c].keys()))).intersection(set(self.restrict_to_uniparc_values)) > 0:
# Only restrict in cases where there is at least one match in self.restrict_to_uniparc_values
# Otherwise, chains which are not considered in self.restrict_to_uniparc_values may throw away valid matches
# e.g. when looking for structures related to 1KTZ (A -> P10600 -> UPI000000D8EC, B -> P37173 -> UPI000011DD7E),
# we find the close match 2PJY. However, 2PJY has 3 chains: A -> P10600, B -> P37173, and C -> P36897 -> UPI000011D62A
restricted_matches = dict((str(k), self.clustal_matches[c][k]) for k in list(self.clustal_matches[c].keys()) if str(k) in self.restrict_to_uniparc_values)
if len(restricted_matches) != len(self.clustal_matches[c]):
removed_matches = sorted(set(self.clustal_matches[c].keys()).difference(set(restricted_matches)))
#todo: add silent option to class else colortext.pcyan('Ignoring {0} as those chains were not included in the list self.restrict_to_uniparc_values ({1}).'.format(', '.join(removed_matches), ', '.join(self.restrict_to_uniparc_values)))
self.clustal_matches[c] = restricted_matches
# Use the representatives' alignments for their respective equivalent classes
for c_1, related_chains in self.equivalence_fiber.items():
for c_2 in related_chains:
self.clustal_matches[c_2] = self.clustal_matches[c_1]
def _align_with_substrings(self, chains_to_skip = set()):
'''Simple substring-based matching'''
for c in self.representative_chains:
# Skip specified chains
if c not in chains_to_skip:
#colortext.pcyan(c)
#colortext.warning(self.fasta[c])
fasta_sequence = self.fasta[c]
substring_matches = {}
for uniparc_id, uniparc_sequence in sorted(self.uniparc_sequences.items()):
uniparc_sequence = str(uniparc_sequence)
idx = uniparc_sequence.find(fasta_sequence)
if idx != -1:
substring_matches[uniparc_id] = 0
elif len(fasta_sequence) > 30:
idx = uniparc_sequence.find(fasta_sequence[5:-5])
if idx != -1:
substring_matches[uniparc_id] = 5
else:
idx = uniparc_sequence.find(fasta_sequence[7:-7])
if idx != -1:
substring_matches[uniparc_id] = 7
elif len(fasta_sequence) > 15:
idx = uniparc_sequence.find(fasta_sequence[3:-3])
if idx != -1:
substring_matches[uniparc_id] = 3
self.substring_matches[c] = substring_matches
# Restrict the matches to a given set of UniParc IDs. This can be used to remove ambiguity when the correct mapping has been determined e.g. from the SIFTS database.
colortext.pcyan('*' * 100)
pprint.pprint(self.substring_matches)
if self.restrict_to_uniparc_values:
for c in self.representative_chains:
#print('HERE!')
#print(c)
if set(map(str, list(self.substring_matches[c].keys()))).intersection(set(self.restrict_to_uniparc_values)) > 0:
# Only restrict in cases where there is at least one match in self.restrict_to_uniparc_values
# Otherwise, chains which are not considered in self.restrict_to_uniparc_values may throw away valid matches
# e.g. when looking for structures related to 1KTZ (A -> P10600 -> UPI000000D8EC, B -> P37173 -> UPI000011DD7E),
# we find the close match 2PJY. However, 2PJY has 3 chains: A -> P10600, B -> P37173, and C -> P36897 -> UPI000011D62A
restricted_matches = dict((str(k), self.substring_matches[c][k]) for k in list(self.substring_matches[c].keys()) if str(k) in self.restrict_to_uniparc_values)
if len(restricted_matches) != len(self.substring_matches[c]):
removed_matches = sorted(set(self.substring_matches[c].keys()).difference(set(restricted_matches)))
# todo: see above re:quiet colortext.pcyan('Ignoring {0} as those chains were not included in the list self.restrict_to_uniparc_values ({1}).'.format(', '.join(removed_matches), ', '.join(self.restrict_to_uniparc_values)))
self.substring_matches[c] = restricted_matches
#pprint.pprint(self.substring_matches)
#colortext.pcyan('*' * 100)
# Use the representatives' alignments for their respective equivalent classes
for c_1, related_chains in self.equivalence_fiber.items():
for c_2 in related_chains:
self.substring_matches[c_2] = self.substring_matches[c_1]
def _check_alignments(self, chains_to_skip = set()):
max_expected_matches_per_chain = 1
for c in self.representative_chains:
# Skip specified chains
if c not in chains_to_skip:
if not(len(self.clustal_matches[c]) <= max_expected_matches_per_chain):
raise MultipleAlignmentException(c, max_expected_matches_per_chain, len(self.clustal_matches[c]), self.clustal_matches[c])
#colortext.message('Chain {0}'.format(c))
#pprint.pprint(self.substring_matches)
#pprint.pprint(self.clustal_matches)
#pprint.pprint(self.substring_matches[c])
#pprint.pprint(self.clustal_matches[c])
if not (len(self.substring_matches[c]) == 1 or len(self.substring_matches[c]) <= len(self.clustal_matches[c])):
#pprint.pprint(self.clustal_matches[c])
#pprint.pprint(self.substring_matches[c])
match_list = sorted(set((list(self.clustal_matches[c].keys()) or []) + (list(self.substring_matches[c].keys()) or [])))
raise MultipleAlignmentException(c, max_expected_matches_per_chain, max(len(self.substring_matches[c]), len(self.clustal_matches[c])), match_list, msg = 'More matches were found using the naive substring matching than the Clustal matching. Try lowering the cut-off (currently set to {0}).'.format(self.cut_off))
if self.clustal_matches[c]:
if not (len(list(self.clustal_matches[c].keys())) == max_expected_matches_per_chain):
match_list = sorted(set((list(self.clustal_matches[c].keys()) or []) + (list(self.substring_matches[c].keys()) or [])))
raise MultipleAlignmentException(c, max_expected_matches_per_chain, len(list(self.clustal_matches[c].keys())), match_list)
if self.substring_matches[c]:
if list(self.substring_matches[c].keys()) != list(self.clustal_matches[c].keys()):
print("ERROR: An inconsistent alignment was found between Clustal Omega and a substring search.")
else:
self.alignment[c] = list(self.clustal_matches[c].keys())[0]
else:
self.alignment[c] = list(self.clustal_matches[c].keys())[0]
# Use the representatives' alignments for their respective equivalent classes. This saves memory as the same SequenceMap is used.
for c_1, related_chains in self.equivalence_fiber.items():
for c_2 in related_chains:
if self.alignment.get(c_1):
self.alignment[c_2] = self.alignment[c_1]
def _get_residue_mapping(self, chains_to_skip = set()):
'''Creates a mapping between the residues of the chains and the associated UniParc entries.'''
for c in self.representative_chains:
# Skip specified chains
if c not in chains_to_skip:
if self.alignment.get(c):
uniparc_entry = self.get_uniparc_object(c)
sa = SequenceAligner()
sa.add_sequence(c, self.fasta[c])
sa.add_sequence(uniparc_entry.UniParcID, uniparc_entry.sequence)
sa.align()
residue_mapping, residue_match_mapping = sa.get_residue_mapping()
# Create a SequenceMap
s = PDBUniParcSequenceMap()
assert(sorted(residue_mapping.keys()) == sorted(residue_match_mapping.keys()))
for k, v in residue_mapping.items():
s.add(k, (uniparc_entry.UniParcID, v), residue_match_mapping[k])
self.seqres_to_uniparc_sequence_maps[c] = s
else:
self.seqres_to_uniparc_sequence_maps[c] = PDBUniParcSequenceMap()
# Use the representatives' alignments for their respective equivalent classes. This saves memory as the same SequenceMap is used.
for c_1, related_chains in self.equivalence_fiber.items():
for c_2 in related_chains:
if self.seqres_to_uniparc_sequence_maps.get(c_1):
self.seqres_to_uniparc_sequence_maps[c_2] = self.seqres_to_uniparc_sequence_maps[c_1]
|
class PDBUniParcSequenceAligner(object):
def __init__(self, pdb_id, cache_dir = None, cut_off = 98.0, sequence_types = {}, replacement_pdb_id = None, added_uniprot_ACs = [], restrict_to_uniparc_values = []):
''' The sequences are matched up to a percentage identity specified by cut_off (0.0 - 100.0).
sequence_types e.g. {'A' : 'Protein', 'B' : 'RNA',...} should be passed in if the PDB file contains DNA or RNA chains. Otherwise, the aligner will attempt to match their sequences.
replacement_pdb_id is used to get a mapping from deprecated PDB IDs to uniparc sequences. It should be the new PDB ID corresponding to the obsolete pdb_id.
The PDB and PDBML classes parse the deprecation information from the PDB file or XML respectively and store the new PDB ID in their replacement_pdb_id variable.
'''
pass
def __getitem__(self, chain):
pass
def __repr__(self):
pass
def realign(self, cut_off, chains_to_skip = set()):
''' Alter the cut-off and run alignment again. This is much quicker than creating a new PDBUniParcSequenceAligner
object as the UniParcEntry creation etc. in the constructor does not need to be repeated.
The chains_to_skip argument (a Set) allows us to skip chains that were already matched which speeds up the alignment even more.
'''
pass
def get_alignment_percentage_identity(self, chain):
pass
def get_uniparc_object(self, chain):
pass
def _determine_representative_chains(self):
''' Quotient the chains to get equivalence classes of chains. These will be used for the actual mapping.'''
pass
def _get_uniparc_sequences_through_uniprot(self, cache_dir):
pass
def _get_uniparc_sequences_through_uniprot_ACs(self, mapping_pdb_id, uniprot_ACs, cache_dir):
'''Get the UniParc sequences associated with the UniProt accession number.'''
pass
def _align_with_clustal(self, chains_to_skip = set()):
pass
def _align_with_substrings(self, chains_to_skip = set()):
'''Simple substring-based matching'''
pass
def _check_alignments(self, chains_to_skip = set()):
pass
def _get_residue_mapping(self, chains_to_skip = set()):
'''Creates a mapping between the residues of the chains and the associated UniParc entries.'''
pass
| 14 | 6 | 29 | 3 | 20 | 6 | 7 | 0.34 | 1 | 15 | 10 | 0 | 13 | 17 | 13 | 13 | 420 | 76 | 260 | 97 | 246 | 88 | 249 | 97 | 235 | 16 | 1 | 6 | 92 |
143,504 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/pdb.py
|
klab.bio.pdb.NonCanonicalResidueException
|
class NonCanonicalResidueException(Exception): pass
|
class NonCanonicalResidueException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,505 |
Kortemme-Lab/klab
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kortemme-Lab_klab/klab/retrospect.py
|
klab.retrospect.Script
|
class Script(object):
def __init__(self):
raise Exception(
"This is an abstract class. Define a concrete derived class.")
def check(self, dateOfLastSuccess):
raise Exception(
"This is an abstract class. Define a concrete derived class.")
|
class Script(object):
def __init__(self):
pass
def check(self, dateOfLastSuccess):
pass
| 3 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 1 | 0 | 1 | 2 | 0 | 2 | 2 | 7 | 2 | 5 | 3 | 2 | 0 | 5 | 3 | 2 | 1 | 1 | 0 | 2 |
143,506 |
Kortemme-Lab/klab
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kortemme-Lab_klab/klab/retrospect.py
|
klab.retrospect.ExpectedScripts
|
class ExpectedScripts(object):
def __init__(self):
self.scripts = {}
def add(self, scriptname, script):
self.scripts[scriptname] = script
def get(self, scriptname):
return self.scripts.get(scriptname)
def checkAll(self):
result = True
for k in list(self.scripts.keys()):
result = self.check(k) and result
def check(self, scriptname, dateOfLastSuccess, criteria):
return self.scripts[scriptname].check(dateOfLastSuccess, criteria)
def SymmetricDifference(self, scriptnames):
'''Takes in a set, list, or tuple scriptnames and returns the symmetric difference (as a list)
of scriptnames and the stored names.'''
scriptnames = set(scriptnames)
myscripts = set(self.scripts.keys())
return list(scriptnames.difference(myscripts).union(myscripts.difference(scriptnames)))
|
class ExpectedScripts(object):
def __init__(self):
pass
def add(self, scriptname, script):
pass
def get(self, scriptname):
pass
def checkAll(self):
pass
def checkAll(self):
pass
def SymmetricDifference(self, scriptnames):
'''Takes in a set, list, or tuple scriptnames and returns the symmetric difference (as a list)
of scriptnames and the stored names.'''
pass
| 7 | 1 | 3 | 0 | 3 | 0 | 1 | 0.12 | 1 | 2 | 0 | 0 | 6 | 1 | 6 | 6 | 25 | 6 | 17 | 11 | 10 | 2 | 17 | 11 | 10 | 2 | 1 | 1 | 7 |
143,507 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/basics.py
|
klab.bio.basics.SKEMPIMutation
|
class SKEMPIMutation(ChainMutation):
'''Adds a Location member to the ChainMutation class.'''
def __init__(self, WildTypeAA, ResidueID, MutantAA, Location, Chain = None):
super(SKEMPIMutation, self).__init__(WildTypeAA, ResidueID, MutantAA, Chain = Chain, SecondaryStructurePosition = None, AccessibleSurfaceArea = None)
self.Location = Location
def __repr__(self):
return "%s:%s %s->%s in %s" % (self.Chain, self.WildTypeAA, str(self.ResidueID), self.MutantAA, self.Location)
|
class SKEMPIMutation(ChainMutation):
'''Adds a Location member to the ChainMutation class.'''
def __init__(self, WildTypeAA, ResidueID, MutantAA, Location, Chain = None):
pass
def __repr__(self):
pass
| 3 | 1 | 3 | 0 | 3 | 0 | 1 | 0.17 | 1 | 2 | 0 | 0 | 2 | 1 | 2 | 10 | 9 | 2 | 6 | 4 | 3 | 1 | 6 | 4 | 3 | 1 | 4 | 0 | 2 |
143,508 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/basics.py
|
klab.bio.basics.Sequence
|
class Sequence(object):
''' A class to hold a list of Residues in the same chain.
This class maintains two elements:
1) order List(ID) : a list of residue IDs in the order of addition;
2) sequence Dict(ID->Residue) : a map from residue IDs to a Residue object (chain, residue ID, residue type, sequence_type).
'''
def __init__(self, sequence_type = None):
self.order = []
self.sequence = {}
self.sequence_type = sequence_type
if sequence_type:
assert(sequence_type == 'Protein' or sequence_type == 'DNA' or sequence_type == 'RNA' or sequence_type == 'Protein skeleton' or sequence_type == 'Ligand' or sequence_type == 'Unknown')
self.special_insertion_count = 1
def __iter__(self):
self._iter_index = 0
return self
def __getitem__(self, item):
return self.sequence[item]
def __len__(self):
return len(self.sequence)
def ids(self):
return list(self.sequence.keys())
def __next__(self): # todo: This is __next__ in Python 3.x
try:
id = self.order[self._iter_index]
self._iter_index += 1
return id, self.sequence[id]
except:
raise StopIteration
def __eq__(self, other):
'''Equality is defined on residue ID and type.'''
num_res = len(self.order)
if num_res != len(other.order):
return False
for x in range(num_res):
if self.order[x] != other.order[x]:
return False
if self.sequence[self.order[x]] != other.sequence[other.order[x]]:
return False
return True
def add(self, r):
'''Takes an id and a Residue r and adds them to the Sequence.'''
id = r.get_residue_id()
if self.order:
last_id = self.order[-1]
# KAB - allow for multiresidue noncanonicals
if id in self.order:
raise colortext.Exception('Warning: using code to "allow for multiresidue noncanonicals" - check this case manually.')
id = '%s.%d'%(str(id),self.special_insertion_count)
self.special_insertion_count += 1
assert(r.Chain == self.sequence[last_id].Chain)
assert(r.residue_type == self.sequence[last_id].residue_type)
self.order.append(id)
self.sequence[id] = r
def set_type(self, sequence_type):
'''Set the type of a Sequence if it has not been set.'''
if not(self.sequence_type):
for id, r in self.sequence.items():
assert(r.residue_type == None)
r.residue_type = sequence_type
self.sequence_type = sequence_type
def __repr__(self):
sequence = self.sequence
return "".join([sequence[id].ResidueAA for id in self.order])
@staticmethod
def from_sequence(chain, list_of_residues, sequence_type = None):
'''Takes in a chain identifier and protein sequence and returns a Sequence object of Residues, indexed from 1.'''
s = Sequence(sequence_type)
count = 1
for ResidueAA in list_of_residues:
s.add(Residue(chain, count, ResidueAA, sequence_type))
count += 1
return s
|
class Sequence(object):
''' A class to hold a list of Residues in the same chain.
This class maintains two elements:
1) order List(ID) : a list of residue IDs in the order of addition;
2) sequence Dict(ID->Residue) : a map from residue IDs to a Residue object (chain, residue ID, residue type, sequence_type).
'''
def __init__(self, sequence_type = None):
pass
def __iter__(self):
pass
def __getitem__(self, item):
pass
def __len__(self):
pass
def ids(self):
pass
def __next__(self):
pass
def __eq__(self, other):
'''Equality is defined on residue ID and type.'''
pass
def add(self, r):
'''Takes an id and a Residue r and adds them to the Sequence.'''
pass
def set_type(self, sequence_type):
'''Set the type of a Sequence if it has not been set.'''
pass
def __repr__(self):
pass
@staticmethod
def from_sequence(chain, list_of_residues, sequence_type = None):
'''Takes in a chain identifier and protein sequence and returns a Sequence object of Residues, indexed from 1.'''
pass
| 13 | 5 | 6 | 0 | 6 | 1 | 2 | 0.17 | 1 | 6 | 2 | 0 | 10 | 5 | 11 | 11 | 92 | 19 | 63 | 27 | 50 | 11 | 62 | 25 | 50 | 5 | 1 | 2 | 22 |
143,509 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/basics.py
|
klab.bio.basics.SequenceMap
|
class SequenceMap(object):
''' A class to map the IDs of one Sequence to another.'''
def __init__(self):
self.map = {}
self.substitution_scores = {}
@staticmethod
def from_dict(d):
for k, v in d.items():
assert(type(k) == int or type(k) == bytes or type(k) == str)
assert(type(v) == int or type(v) == bytes or type(v) == str)
s = SequenceMap()
s.map = d
s.substitution_scores = dict.fromkeys(list(d.keys()), None)
return s
def add(self, key, value, substitution_score):
self[key] = value
self.substitution_scores[key] = substitution_score
def remove(self, key):
if self.map.get(key):
del self.map[key]
if self.substitution_scores.get(key):
del self.substitution_scores[key]
def matches(self, other):
overlap = set(self.keys()).intersection(set(other.keys()))
for k in overlap:
if self[k] != other[k]:
return False
return True
def get_mismatches(self, other):
overlap = set(self.keys()).intersection(set(other.keys()))
return [k for k in overlap if self[k] != other[k]]
def substitution_scores_match(self, other):
'''Check to make sure that the substitution scores agree. If one map has a null score and the other has a non-null score, we trust the other's score and vice versa.'''
overlap = set(self.substitution_scores.keys()).intersection(set(other.substitution_scores.keys()))
for k in overlap:
if not(self.substitution_scores[k] == None or other.substitution_scores[k] == None):
if self.substitution_scores[k] != other.substitution_scores[k]:
return False
return True
def get(self, k, default_value = None):
return self.map.get(k, default_value)
def keys(self):
return list(self.map.keys())
def values(self):
return list(self.map.values())
def __getitem__(self, item):
return self.map.get(item)
def __setitem__(self, key, value):
assert(type(key) == int or type(key) == bytes or type(key) == str)
assert(type(value) == int or type(value) == bytes or type(value) == str)
self.map[key] = value
self.substitution_scores[key] = None
def __next__(self): # todo: This is __next__ in Python 3.x
try:
id = self._iter_keys.pop()
return id, self.map[id], self.substitution_scores[id]
except:
raise StopIteration
def __iter__(self):
self._iter_keys = set(self.map.keys())
return self
def __eq__(self, other):
if list(self.keys()) == list(other.keys()):
for k in list(self.keys()):
if self[k] != other[k]:
return False
return True
else:
return False
def __le__(self, other):
if set(self.keys()).issubset == set(other.keys()):
for k in list(self.keys()):
if self[k] != other[k]:
return False
return True
else:
return False
def glue(self, other):
return self + other
def __add__(self, other):
'''Glue two maps together. The operation is defined on maps which agree on the intersection of their domain as:
(f + g)(x) = f(x) if x not in dom(f)
(f + g)(x) = g(x) if x not in dom(g)
(f + g)(x) = f(x) = g(x) if x in dom(f) n dom(g)
'''
if not self.matches(other):
overlap = set(self.keys()).intersection(set(other.keys()))
mismatches = [k for k in overlap if self[k] != other[k]]
raise InconsistentMappingException('The two maps disagree on the common domain elements %s.' % str(mismatches))
elif not self.substitution_scores_match(other):
overlap = set(self.substitution_scores.keys()).intersection(set(other.substitution_scores.keys()))
mismatches = [k for k in overlap if self.substitution_scores[k] != other.substitution_scores[k]]
raise InconsistentMappingException('The two maps scores disagree on the common domain elements %s.' % str(mismatches))
elif not self.__class__ == other.__class__:
raise InconsistentMappingException('''The two maps have different classes: '%s' and '%s'.''' % ( self.__class__, other.__class__))
else:
d, s = {}, {}
other_domain = set(other.keys()).difference(set(self.keys()))
for k in list(self.keys()):
d[k] = self.map[k]
s[k] = self.substitution_scores[k]
for k in other_domain:
assert(self.map.get(k) == None)
assert(self.substitution_scores.get(k) == None)
d[k] = other.map[k]
s[k] = other.substitution_scores[k]
o = self.__class__.from_dict(d)
o.substitution_scores = s
return o
def __repr__(self):
s = []
substitution_scores = self.substitution_scores
for k, v in sorted(self.map.items()):
if type(k) == bytes or type(k) == str:
key = "'%s'" % k
else:
key = str(k)
if type(v) == bytes or type(v) == str:
val = "'%s'" % v
else:
val = str(v)
if substitution_scores.get(k):
s.append('%s->%s (%s)' % (str(key), str(val), str(substitution_scores[k])))
else:
s.append('%s->%s' % (str(key), str(val)))
return ", ".join(s)
|
class SequenceMap(object):
''' A class to map the IDs of one Sequence to another.'''
def __init__(self):
pass
@staticmethod
def from_dict(d):
pass
def add(self, key, value, substitution_score):
pass
def remove(self, key):
pass
def matches(self, other):
pass
def get_mismatches(self, other):
pass
def substitution_scores_match(self, other):
'''Check to make sure that the substitution scores agree. If one map has a null score and the other has a non-null score, we trust the other's score and vice versa.'''
pass
def get_mismatches(self, other):
pass
def keys(self):
pass
def values(self):
pass
def __getitem__(self, item):
pass
def __setitem__(self, key, value):
pass
def __next__(self):
pass
def __iter__(self):
pass
def __eq__(self, other):
pass
def __le__(self, other):
pass
def glue(self, other):
pass
def __add__(self, other):
'''Glue two maps together. The operation is defined on maps which agree on the intersection of their domain as:
(f + g)(x) = f(x) if x not in dom(f)
(f + g)(x) = g(x) if x not in dom(g)
(f + g)(x) = f(x) = g(x) if x in dom(f) n dom(g)
'''
pass
def __repr__(self):
pass
| 21 | 3 | 7 | 0 | 6 | 0 | 2 | 0.07 | 1 | 9 | 1 | 2 | 18 | 3 | 19 | 19 | 147 | 21 | 119 | 43 | 98 | 8 | 110 | 42 | 90 | 6 | 1 | 3 | 43 |
143,510 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/basics.py
|
klab.bio.basics.SimpleMutation
|
class SimpleMutation(object):
'''A class to describe mutations to (PDB) structures.'''
def __init__(self, WildTypeAA, ResidueID, MutantAA, Chain = None):
self.WildTypeAA = WildTypeAA
self.ResidueID = ResidueID
self.MutantAA = MutantAA
self.Chain = Chain
def __repr__(self):
suffix = ''
if self.Chain:
return "%s:%s %s->%s%s" % (self.Chain, self.WildTypeAA, str(self.ResidueID), self.MutantAA, suffix)
else:
return "?:%s %s->%s%s" % (self.WildTypeAA, str(self.ResidueID), self.MutantAA, suffix)
def __eq__(self, other):
'''Only checks amino acid types and residue ID.'''
if other == None:
return False
if self.WildTypeAA != other.WildTypeAA:
return False
if self.ResidueID != other.ResidueID:
return False
if self.MutantAA != other.MutantAA:
return False
return True
def __cmp__(self, other):
if other == None:
return 1
if self.Chain != other.Chain:
if ord(self.Chain) < ord(other.Chain):
return -1
else:
return 1
selfResidueID = str(self.ResidueID)
otherResidueID = str(other.ResidueID)
if selfResidueID != otherResidueID:
if not selfResidueID.isdigit():
spair = (int(selfResidueID[:-1]), ord(selfResidueID[-1]))
else:
spair = (int(selfResidueID), 0)
if not otherResidueID.isdigit():
opair = (int(otherResidueID[:-1]), ord(otherResidueID[-1]))
else:
opair = (int(otherResidueID), 0)
if spair < opair:
return -1
else:
return 1
return 0
|
class SimpleMutation(object):
'''A class to describe mutations to (PDB) structures.'''
def __init__(self, WildTypeAA, ResidueID, MutantAA, Chain = None):
pass
def __repr__(self):
pass
def __eq__(self, other):
'''Only checks amino acid types and residue ID.'''
pass
def __cmp__(self, other):
pass
| 5 | 2 | 12 | 0 | 11 | 0 | 4 | 0.04 | 1 | 2 | 0 | 1 | 4 | 4 | 4 | 4 | 55 | 7 | 46 | 14 | 41 | 2 | 41 | 14 | 36 | 8 | 1 | 2 | 16 |
143,511 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/biblio/doi.py
|
klab.biblio.doi.RecordTypeParsingNotImplementedException
|
class RecordTypeParsingNotImplementedException(Exception): pass
|
class RecordTypeParsingNotImplementedException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,512 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/biblio/doi.py
|
klab.biblio.doi.UnexpectedRecordTypeException
|
class UnexpectedRecordTypeException(Exception): pass
|
class UnexpectedRecordTypeException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,513 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/biblio/publication.py
|
klab.biblio.publication.PublicationInterface
|
class PublicationInterface(object):
def __init__(self): pass
@staticmethod
def get_author_name_in_short_format(author):
names = []
if author.get('FirstName'):
names.append(author['FirstName'])
if author.get('MiddleNames'):
names.extend(author['MiddleNames'].split())
initials = ''.join([n[0] for n in names])
return "%s, %s" % (author['Surname'], initials)
@staticmethod
def get_page_range_in_abbreviated_format(startpage, endpage):
if startpage and endpage:
# Abbreviate e.g. '6026-6029' to '6026-9'.
endpage_prefix = commonprefix([startpage, endpage])
if len(endpage_prefix) == len(endpage):
return startpage
else:
endpage = endpage[len(endpage_prefix):]
return "%s-%s" % (startpage, endpage)
elif startpage or endpage:
return startpage or endpage
else:
return ''
@staticmethod
def _normalize_journal_name(j):
return j.strip().replace(".", "").replace(",", "").replace(" ", "").lower()
def to_dict(self): raise Exception('This function needs to be implemented by the subclasses.')
def get_earliest_date(self): raise Exception('This function needs to be implemented by the subclasses.')
def get_year(self): raise Exception('This function needs to be implemented by the subclasses.')
def get_url(self): raise Exception('This function needs to be implemented by the subclasses.')
def to_json(self):
import json
return json.dumps(self.to_dict())
def to_string(self, abbreviate_journal = True, html = False, add_url = False):
d = self.to_dict()
author_str = ', '.join([PublicationInterface.get_author_name_in_short_format(author) for author in d['authors']])
# author_str.append(('%s %s' % (author.get('Surname'), initials or '').strip()))
#author_str = (', '.join(author_str))
if html and author_str:
author_str = '<span class="publication_authors">%s.</span>' % author_str
title_str = d.get('Title', '')
if title_str:
if add_url:
title_str = '<a href="%s" target="_blank">%s</a>' % (self.get_url(), title_str)
if title_str:
if html:
if d['RecordType'] == "Book":
title_str = '<span class="publication_title">%s</span> <span>in</span>' % title_str
else:
title_str = '<span class="publication_title">%s.</span>' % title_str
else:
if d['RecordType'] == "Book":
title_str += ' in'
issue_str = ''
if d.get('PublicationName'):
if abbreviate_journal and d['RecordType'] == "Journal":
issue_str += publication_abbreviations.get(PublicationInterface._normalize_journal_name(d['PublicationName']), d['PublicationName'])
else:
issue_str += d['PublicationName']
if d.get('Volume'):
if d.get('Issue'):
issue_str += ' %(Volume)s(%(Issue)s)' % d
else:
issue_str += ' %(Volume)s' % d
page_string = PublicationInterface.get_page_range_in_abbreviated_format(d.get('StartPage'), d.get('EndPage'))
if page_string:
issue_str += ':%s' % page_string
if html and issue_str:
issue_str = '<span class="publication_issue">%s.</span>' % issue_str
if title_str and issue_str:
if html or d['RecordType'] == "Book":
article_str = '%s %s' % (title_str, issue_str)
else:
article_str = '%s. %s' % (title_str, issue_str)
earliest_date = self.get_earliest_date()
if earliest_date:
article_date = earliest_date
else:
article_date = self.get_year()
if html and article_date:
article_date = '<span class="publication_date">%s.</span>' % article_date
s = None
if html:
s = ' '.join([c for c in [author_str, title_str, issue_str, article_date] if c])
else:
s = '. '.join([c for c in [author_str, title_str, issue_str, article_date] if c])
if s:
s = s + '.'
return s
|
class PublicationInterface(object):
def __init__(self):
pass
@staticmethod
def get_author_name_in_short_format(author):
pass
@staticmethod
def get_page_range_in_abbreviated_format(startpage, endpage):
pass
@staticmethod
def _normalize_journal_name(j):
pass
def to_dict(self):
pass
def get_earliest_date(self):
pass
def get_year(self):
pass
def get_url(self):
pass
def to_json(self):
pass
def to_string(self, abbreviate_journal = True, html = False, add_url = False):
pass
| 14 | 0 | 9 | 1 | 8 | 0 | 3 | 0.03 | 1 | 1 | 0 | 2 | 7 | 0 | 10 | 10 | 107 | 16 | 88 | 27 | 78 | 3 | 80 | 24 | 68 | 20 | 1 | 3 | 34 |
143,514 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/biblio/pubmed.py
|
klab.biblio.pubmed.NoCorrespondingDOIMappingException
|
class NoCorrespondingDOIMappingException(Exception): pass
|
class NoCorrespondingDOIMappingException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,515 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/biblio/pubmed.py
|
klab.biblio.pubmed.PubMed
|
class PubMed(DOI):
'''This is essentially a wrapper onto the DOI class.'''
def __init__(self, pubmed_id):
# Allow for 'pmid:23717507' or '23717507'
self.pubmed_id = pubmed_id
pubmed_id = pubmed_id.strip()
if pubmed_id.lower().startswith('pmid:'):
pubmed_id = pubmed_id[5:].strip()
# Convert the PMID to a DOI identifier
if not pubmed_id.isdigit():
raise PubMedIDRetrievalException("PubMed identifiers are expected to be numeric strings with or without a prefix of 'pmid'. The passed value '%s' does not meet this requirement." % pubmed_id)
doi = convert_single(pubmed_id, 'pmid', 'doi')
if doi == None:
raise NoCorrespondingDOIMappingException
else:
super(PubMed, self).__init__(doi)
def get_pubmed_id(self):
return self.pubmed_id
|
class PubMed(DOI):
'''This is essentially a wrapper onto the DOI class.'''
def __init__(self, pubmed_id):
pass
def get_pubmed_id(self):
pass
| 3 | 1 | 10 | 2 | 7 | 1 | 3 | 0.2 | 1 | 3 | 2 | 0 | 2 | 1 | 2 | 26 | 22 | 4 | 15 | 5 | 12 | 3 | 14 | 5 | 11 | 4 | 3 | 1 | 5 |
143,516 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/biblio/pubmed.py
|
klab.biblio.pubmed.PubMedConverterTypeException
|
class PubMedConverterTypeException(Exception):
'''Exception class thrown when incorrect conversion types are passed.'''
def __init__(self, bad_type):
self.bad_type = bad_type
def __str__(self):
return "\nThe type '%s' is not a valid type for the PubMed ID Converter API (or else this code needs to be updated).\nValid types are: '%s'." % (self.bad_type, "', '".join(converter_types))
|
class PubMedConverterTypeException(Exception):
'''Exception class thrown when incorrect conversion types are passed.'''
def __init__(self, bad_type):
pass
def __str__(self):
pass
| 3 | 1 | 2 | 0 | 2 | 0 | 1 | 0.2 | 1 | 0 | 0 | 0 | 2 | 1 | 2 | 12 | 7 | 1 | 5 | 4 | 2 | 1 | 5 | 4 | 2 | 1 | 3 | 0 | 2 |
143,517 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/biblio/pubmed.py
|
klab.biblio.pubmed.PubMedIDRetrievalException
|
class PubMedIDRetrievalException(Exception): pass
|
class PubMedIDRetrievalException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,518 |
Kortemme-Lab/klab
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kortemme-Lab_klab/klab/retrospect.py
|
klab.retrospect.LogReader
|
class LogReader(object):
def __init__(self, logfile, expectedScripts, maxchars=DEFAULT_LOG_SIZE):
'''This function reads up to maxchars of the logfile.
It creates a dict for the object called log which maps job dates to dicts with the keys "script" (jobname),
"status" (a number made up from the flags above, "lines" (an array of [line attributes, line] pairs),
and "type" (job type e.g. "Restore", "Backup").
If also creates a dict called scriptsRun mapping script names to dicts with the keys "status" (a number
from the most recent run made up from the flags above), "lastRun" (the date of the most recent run), and
"lastSuccess" (the date of the most recent success).
Finally, we check whether all scripts in expectedScripts are found and report the missing scripts as
failures in scriptsRun.
'''
self.log = None
self.scriptsRun = None
self.expectedScripts = expectedScripts
self.earliestEntry = None
errorStrings = {
"Script error": "Script error",
"Execution incomplete": "Execution incomplete.",
"Catalog File invalid/damaged": "error -2241",
"Network communication failed": "error -519",
"Cannot access volume": "Can't access volume ",
}
warningStrings = {
"Read error (feature unsupported)": "error -1012",
"Read error (unknown)": "error -39",
"File/directory not found": "error -1101",
"Backup stopped manually": "error -1201",
"Transaction already complete": "error -557",
}
# Read in up to maxchars from the file and store in memory
sz = os.path.getsize(logfile)
F = codecs.open(logfile, "r", "utf-16")
contents = F.read(4)
if maxchars < sz:
F.seek(sz - maxchars - 4)
else:
maxchars = sz
contents = F.read(maxchars)
F.close()
# Remove all nested $[...] sections from the log
internalregex = re.compile("\$\[[^[]*?\]")
lasts = None
while contents != lasts:
lasts = contents
contents = internalregex.sub("", contents)
# Convert from UTF-16 to ASCII ignoring any conversion errors
contents = contents.encode('ascii', 'ignore')
# Optional: Remove whitespace from lines
contents = re.sub("\n\t+", "\n", contents)
contents = re.sub("\n\n", "\n", contents)
contents = re.sub("-\t", "-", contents)
# Entries seem to begin with a plus sign. Skip to the first full entry or use the entire string if no full entry exists.
# Each element of contents will contain the record for a retrospect event
contents = contents[contents.find("\n+"):]
contents = contents.split("\n+")
if not contents[0]:
contents = contents[1:]
# After this, each element of contents will contain a list of lines of the record for a retrospect event
for i in range(len(contents)):
contents[i] = contents[i].split("\n")
# Regular expressions used to match common strings
backupJob = re.compile(
".*Normal backup using (.*?) at (.*?)/(.*?)/(.*?) (.*?):(.*?) (.M).*")
scriptJob = re.compile(
".*Executing (.*?) at (.*?)/(.*?)/(.*?) (.*?):(.*?) (.M).*")
restoreJob1 = re.compile(
".*Executing (Restore Assistant) .* at (.*?)/(.*?)/(.*?) (.*?):(.*?) (.M).*")
restoreJob2 = re.compile(
".*Restore using Restore Assistant - (.*?) at (.*?)/(.*?)/(.*?) (.*?):(.*?) (.M).*")
engineStart = re.compile(".*Retrospect version [.\d]+\s*")
engineStartTime = re.compile(
".*(Launched at) (.*?)/(.*?)/(.*?) (.*?):(.*?) (.M).*")
# Parse the log, record by record
log = {}
scriptsRun = {}
for record in contents:
firstline = record[0]
type = None
jobmatch = restoreJob1.match(
firstline) or restoreJob2.match(firstline)
if jobmatch:
type = "Restore"
else:
jobmatch = backupJob.match(
firstline) or scriptJob.match(firstline)
if jobmatch:
type = "Backup"
else:
jobmatch = engineStart.match(firstline)
if jobmatch:
type = "Engine start"
jobmatch = engineStartTime.match(record[1])
# NOTE: If we could not match the record to one of the above types, it is not stored in the log.
# Instead, we just print out the record for debugging.
if type and jobmatch:
record[-1] = record[-1].strip()
# Parse the date
hour = int(jobmatch.group(5))
if jobmatch.group(7) == "PM":
hour = (hour + 12) % 24
dt = datetime.datetime(int(jobmatch.group(4)), int(jobmatch.group(2)), int(
jobmatch.group(3)), hour, int(jobmatch.group(6)))
# Store the date of the earliest entry
if not (self.earliestEntry) or dt < self.earliestEntry:
self.earliestEntry = dt
# rscript is typically the name of the script as assigned in Retrospect
rscript = None
# We assign attributes to lines of records. Lines can have multiple attributes. These allow us to format lines appropriately to the user.
# The first line of a record is a RETROSPECT_HEADER indicating that it is the title of a job.
# RETROSPECT_SUBHEADER is used to mark lines which indicate the beginning of a part of the job e.g. "Copying...", "Verifying...", etc.
# RETROSPECT_EVENT indicates that a line contains log information.
# RETROSPECT_WARNING indicates that something did not go smoothly but not that a critical error necessarily occurred.
# RETROSPECT_FAIL indicates that the job failed.
# RETROSPECT_UNHANDLED_ERROR indicates that a possible error occurred which this function does not yet handle.
# RETROSPECT_PLAIN is used for all other lines.
record[0] = (RETROSPECT_HEADER, firstline.strip() + "\n")
if type == "Engine start":
rscript = "Engine start"
status = RETROSPECT_EVENT
for i in range(1, len(record)):
record[i] = (RETROSPECT_EVENT, record[i])
elif type in ["Backup", "Restore"]:
if type == "Restore":
rscript = "Restore"
else:
rscript = jobmatch.group(1)
# Iterate through the lines of a record, setting the attributes of each appropriately
status = 0
for i in range(1, len(record)):
line = record[i]
if line:
if line[0] == '-':
record[i] = (RETROSPECT_SUBHEADER, line[1:])
continue
skipOtherCases = False
for k, s in warningStrings.items():
if line.find(s) != -1:
record[i] = (RETROSPECT_WARNING, line)
status |= RETROSPECT_WARNING
skipOtherCases = True
break
for k, s in errorStrings.items():
if line.find(s) != -1:
# This message does not seem to indicate a failure?
if line.find("#Script error: no source Media Set specified") == -1:
record[i] = (RETROSPECT_FAIL, line)
status |= RETROSPECT_FAIL
skipOtherCases = True
break
if skipOtherCases:
continue
if line.find("error -") != -1:
# Unhandled error
record[i] = (RETROSPECT_UNHANDLED_ERROR, line)
status |= RETROSPECT_UNHANDLED_ERROR
else:
record[i] = (RETROSPECT_PLAIN, line)
status |= RETROSPECT_PLAIN
# We index jobs by date which are not necessarily unique. This is a hack to avoid throwing
# away records in case multiple jobs are started simultaneously
while log.get(dt):
dt = dt + datetime.timedelta(0, 1)
# Store the record
log[dt] = {
"script": rscript,
"status": status,
"lines": record,
"type": type,
}
lastSuccess = None
success = False
if not (status & RETROSPECT_FAIL):
success = True
lastSuccess = dt
# Record the most recent successful date and/or the most recent run of a particular script
if scriptsRun.get(rscript):
data = scriptsRun[rscript]
if success:
if not (data["lastSuccess"]) or (dt > data["lastSuccess"]):
data["lastSuccess"] = dt
if dt > data["lastRun"]:
data["lastRun"] = dt
data["status"] = status
else:
scriptsRun[rscript] = {
"status": status, "lastRun": dt, "lastSuccess": lastSuccess}
else:
# Not pretty but it will be displayed pretty obviously
print((join(record, "<br>") + "<br><br>"))
# Check against expected scripts
missingScripts = expectedScripts.SymmetricDifference(
list(scriptsRun.keys()))
# NOTE: We ignore these special script types. This list is probably not comprehensive and so may need to be added to.
for t in ['Kortemme (offsite)', 'Ming', 'Ming (offsite)', 'Restore', 'Grooming', 'Engine start', 'Rebuild', 'Retrieve Snapshot']:
if t in missingScripts:
missingScripts.remove(t)
del scriptsRun[t]
for ms in missingScripts:
scriptsRun[ms] = {"status": RETROSPECT_FAIL,
"lastRun": None, "lastSuccess": None}
self.log = log
self.scriptsRun = scriptsRun
def getFailedJobIDs(self, extraLapse=TYPICAL_LAPSE):
'''Returns a list of which identify failed jobs in the scriptsRun table.
If a time stamp for a job can be found, we return this. The time stamp can be used to index the log.
If no time stamp was found, return the name of the script instead.
'''
scriptsRun = self.scriptsRun
failedJobTimestamps = []
nodata = []
for name, details in sorted(scriptsRun.items()):
if details["lastSuccess"] and expectedScripts.get(name):
if not expectedScripts.check(name, details["lastSuccess"], extraLapse):
if details["lastRun"]:
failedJobTimestamps.append(details["lastRun"])
else:
nodata.append(name)
continue
else:
if details["lastRun"]:
failedJobTimestamps.append(details["lastRun"])
else:
nodata.append(name)
continue
if details["status"] & RETROSPECT_FAIL:
failedJobTimestamps.append(details["lastRun"])
elif details["status"] & RETROSPECT_WARNING:
failedJobTimestamps.append(details["lastRun"])
return failedJobTimestamps, nodata
def generateSummary(self, extraLapse=TYPICAL_LAPSE):
'''Generates a summary of the status of the expected scripts broken based on the log.
This summary (a list of strings) is returned as well as a list with the dates (which
can be used to index the log)of the most recent attempts at the failed jobs.
'''
scriptsRun = self.scriptsRun
body = []
numberOfFailed = 0
numberWithWarnings = 0
failedList = []
successList = []
warningsList = []
for name, details in sorted(scriptsRun.items()):
status = None
daysSinceSuccess = None
if details["lastSuccess"] and expectedScripts.get(name):
daysSinceSuccess = expectedScripts.get(
name).getDaysSinceLastSuccess(details["lastSuccess"])
if not expectedScripts.check(name, details["lastSuccess"], extraLapse):
status = "FAILED"
else:
status = "FAILED"
if not status:
if details["status"] & RETROSPECT_FAIL:
status = "FAILED"
elif details["status"] & RETROSPECT_WARNING:
status = "WARNINGS"
elif status != "FAILED":
status = "OK"
if details["lastSuccess"] and daysSinceSuccess:
lastSuccessDetails = "Last successful run on %s (%0.1f days ago)" % (
details["lastSuccess"], daysSinceSuccess)
else:
lastSuccessDetails = "No recent successful run."
if details["lastRun"]:
lastRunDetails = "Last run on %s (%s)" % (
details["lastRun"], status)
else:
lastRunDetails = "No recent run (%s)" % status
if status == "FAILED":
numberOfFailed += 1
failedList.append("%s: %s. %s" %
(name, lastRunDetails, lastSuccessDetails))
elif status == "WARNINGS":
numberWithWarnings += 1
warningsList.append("%s: %s. %s" %
(name, lastRunDetails, lastSuccessDetails))
else:
successList.append("%s: %s. %s" %
(name, lastRunDetails, lastSuccessDetails))
body = []
if failedList:
body.append("FAILED JOBS (%d)" % numberOfFailed)
body.append("****************")
for j in failedList:
body.append(j)
body.append("\n")
if warningsList:
body.append("JOBS WITH WARNINGS (%d)" % numberWithWarnings)
body.append("***********************")
for j in warningsList:
body.append(j)
body.append("\n")
if successList:
body.append("SUCCESSFUL JOBS")
body.append("***************")
for j in successList:
body.append(j)
return body, failedList
def createAnchorID(self, scriptname, date):
'''This creates a string to be used in an anchor (<a>) HTML tag.'''
return ("%s%s" % (scriptname, str(date))).replace(" ", "")
def generateSummaryHTMLTable(self, extraLapse=TYPICAL_LAPSE):
'''Generates a summary in HTML of the status of the expected scripts broken based on the log.
This summary is returned as a list of strings.
'''
scriptsRun = self.scriptsRun
html = []
# Start summary table
html.append(
"<table style='text-align:center;border:1px solid black;margin-left: auto;margin-right: auto;'>\n")
html.append(
' <tr><td colspan="4" style="text-align:center"></td></tr>\n')
html.append(' <tr style="font-weight:bold;background-color:#cccccc;text-align:center"><td>Script</td><td>Last status</td><td>Last run</td><td>Last success</td></tr>\n')
# Alternate shades between rows
tablestyle = ['background-color:#33dd33;', 'background-color:#33ff33;']
warningstyle = ['background-color:#EA8737;',
'background-color:#f5b767;']
failstyle = ['background-color:#dd3333;', 'background-color:#ff3333;']
count = 0
for name, details in sorted(scriptsRun.items()):
status = None
rowstyle = tablestyle[count % 2]
if details["lastSuccess"] and expectedScripts.get(name):
if not expectedScripts.check(name, details["lastSuccess"], extraLapse):
status = "STOPPED"
else:
rowstyle = failstyle[count % 2]
status = "FAIL"
laststatusstyle = tablestyle[count % 2]
if details["status"] & RETROSPECT_FAIL:
laststatusstyle = failstyle[count % 2]
status = "FAIL"
elif status != "STOPPED" and details["status"] & RETROSPECT_WARNING:
laststatusstyle = warningstyle[count % 2]
status = "WARNINGS"
elif status != "FAIL" and status != "STOPPED":
status = "OK"
# Start a row
html.append('<tr style="text-align:left;%s">\n' % rowstyle)
# Script name field
if status == "STOPPED":
html.append('\t<td style="%s">%s</td>\n' %
(failstyle[count % 2], name))
else:
html.append('\t<td style="%s">%s</td>' %
(tablestyle[count % 2], name))
# Last status field
if details["lastRun"]:
if status == "STOPPED":
html.append('\t<td style="%s"><a href="#%s">%s</a></td>\n' % (
failstyle[count % 2], self.createAnchorID(name, details["lastRun"]), status))
else:
html.append('\t<td style="%s"><a href="#%s">%s</a></td>\n' %
(laststatusstyle, self.createAnchorID(name, details["lastRun"]), status))
else:
html.append('\t<td style="%s">%s</td>\n' %
(laststatusstyle, status))
# Last run field
if details["lastRun"]:
html.append('\t<td style="%s"><a href="#%s">%s</a></td>\n' % (laststatusstyle,
self.createAnchorID(name, details["lastRun"]), details["lastRun"]))
else:
html.append('\t<td style="%s">none found</td>\n' %
laststatusstyle)
# Last success field
if details["lastSuccess"]:
html.append('\t<td><a href="#%s">%s</a></td>\n' %
(self.createAnchorID(name, details["lastSuccess"]), details["lastSuccess"]))
else:
html.append('\t<td>none found</td>\n')
html.append('</tr>\n')
count += 1
html.append("</table>")
return html
# ACCESSORS
def getLog(self):
return self.log
def getEarliestEntry(self):
return self.earliestEntry
|
class LogReader(object):
def __init__(self, logfile, expectedScripts, maxchars=DEFAULT_LOG_SIZE):
'''This function reads up to maxchars of the logfile.
It creates a dict for the object called log which maps job dates to dicts with the keys "script" (jobname),
"status" (a number made up from the flags above, "lines" (an array of [line attributes, line] pairs),
and "type" (job type e.g. "Restore", "Backup").
If also creates a dict called scriptsRun mapping script names to dicts with the keys "status" (a number
from the most recent run made up from the flags above), "lastRun" (the date of the most recent run), and
"lastSuccess" (the date of the most recent success).
Finally, we check whether all scripts in expectedScripts are found and report the missing scripts as
failures in scriptsRun.
'''
pass
def getFailedJobIDs(self, extraLapse=TYPICAL_LAPSE):
'''Returns a list of which identify failed jobs in the scriptsRun table.
If a time stamp for a job can be found, we return this. The time stamp can be used to index the log.
If no time stamp was found, return the name of the script instead.
'''
pass
def generateSummary(self, extraLapse=TYPICAL_LAPSE):
'''Generates a summary of the status of the expected scripts broken based on the log.
This summary (a list of strings) is returned as well as a list with the dates (which
can be used to index the log)of the most recent attempts at the failed jobs.
'''
pass
def createAnchorID(self, scriptname, date):
'''This creates a string to be used in an anchor (<a>) HTML tag.'''
pass
def generateSummaryHTMLTable(self, extraLapse=TYPICAL_LAPSE):
'''Generates a summary in HTML of the status of the expected scripts broken based on the log.
This summary is returned as a list of strings.
'''
pass
def getLog(self):
pass
def getEarliestEntry(self):
pass
| 8 | 5 | 58 | 8 | 41 | 10 | 11 | 0.24 | 1 | 6 | 0 | 0 | 7 | 4 | 7 | 7 | 413 | 62 | 290 | 72 | 282 | 71 | 246 | 72 | 238 | 35 | 1 | 7 | 76 |
143,519 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/biblio/ris.py
|
klab.biblio.ris.RISEntry
|
class RISEntry(PublicationInterface):
record_types = {
'JOUR' : 'Journal',
'JFULL' : 'Journal',
'EJOUR' : 'Journal',
'CHAP' : 'Book',
'BOOK' : 'Book',
'EBOOK' : 'Book',
'ECHAP' : 'Book',
'EDBOOK' : 'Book',
'CONF' : 'Conference',
'CPAPER' : 'Conference',
'THES' : 'Dissertation',
'RPRT' : 'Report',
'STAND' : 'Standard',
'DBASE' : 'Database',
}
def __init__(self, RIS, quiet = True, lenient_on_tag_order = False):
if type(RIS) != unicode_type:
raise Exception("RIS records should always be passed as unicode.")
self.RIS = RIS
self.quiet = quiet
# Setting member elements explicitly here so that developers can see which variable are expected to be set after parsing
self.publication_type = None
self.authors = []
self.ReferralURL = None
self.title = None
self.subtitle = None
self.journal = None
self.volume = None
self.issue = None
self.startpage = None
self.endpage = None
self.date = None
self.year = None
self.doi = None
self.url = None
self.errors, self.warnings = self.parse(lenient_on_tag_order = lenient_on_tag_order)
def parse(self, lenient_on_tag_order = False):
errors = []
warnings = []
d = {}
for v in list(tag_map.values()):
d[v] = []
RIS = self.RIS
lines = [l.strip() for l in RIS.split("\n") if l.strip()]
# Check the first entry
if not lenient_on_tag_order:
if lines[0][0:5] != 'TY -':
raise Exception("Bad RIS record. Expected a TY entry as the first entry, received '%s' instead." % lines[0])
if lines[-1][0:5] != 'ER -':
raise Exception("Bad RIS record. Expected an ER entry as the last entry, received '%s' instead." % lines[-1])
# Parse the record
tag_data = {}
for line in lines:
tag_type = line[0:2]
if not tag_type in taglist:
# Note: I removed 'elif key == "DOI": key = "M3"' from the old code - this may be required here if this is something I added to the RIS records (it breaks the RIS format)
raise Exception("Unrecognized bibliography tag '%s'." % tag_type)
if not (line[2:5] == ' -'): # there should be a space at position 5 as well but besides stripping above, some RIS entries do not have this for the 'ER' line
raise Exception("Unexpected characters '%s' at positions 2-4." % line[2:5])
content = line[5:].strip()
if content:
tag_data[tag_type] = tag_data.get(tag_type, [])
tag_data[tag_type].append(content)
if tag_type in tag_map:
if tag_type == 'JO':
d["journal"].insert(-1, content) # give precedence to the JO entry over the other journal tags
else:
d[tag_map[tag_type]].append(content)
for k, v in tag_data.items():
if len(v) == 1:
tag_data[k] = v[0]
for k, v in d.items():
# Remove
if len(v) == 0:
d[k] = None
elif len(v) == 1 and k != 'authors':
d[k] = v[0]
elif len(set(v)) == 1 and k != 'authors':
d[k] = v[0]
elif len(v) > 1:
if k == 'journal':
d[k] = v[0]
elif k == 'date':
found = False
for val in v:
if len(val.split("/")) == 3 or len(val.split("/")) == 4:
found = True
d[k] = val
if not found:
d[k] = v[0]
assert(found)
elif k == 'url':
d[k] = v[0]
else:
assert(k in ['authors'])
assert(type(d['authors'] == lsttype))
d['year'] = None
if not d['date']:
raise Exception("Error: The RIS record is missing information about the publication date.")
else:
try:
pubdate = d['date']
tokens = [t for t in pubdate.split("/")]
if len(tokens) == 4: # YYYY/MM/DD/other info
tokens = tokens[0:3]
tokens = list(map(int, [t for t in tokens if t]))
assert (1 <= len(tokens) <= 3)
if len(tokens) > 1:
assert (1 <= tokens[1] <= 12)
assert (1900 <= tokens[0] <= datetime.datetime.today().year)
d['year'] = tokens[0]
if len(tokens) == 3:
assert (1 <= tokens[2] <= 31)
d['date'] = datetime.date(tokens[0], tokens[1], tokens[2])
except Exception as e:
if not self.quiet:
print((traceback.format_exc()))
raise colortext.Exception("Exception in date line '%s'.\n %s" % (d['date'].strip(), str(e)))
if not d['year']:
errors.append("The year of publication could not be determined.")
author_order = 0
authors = []
for author in d['authors']:
surname = author.split(",")[0].strip()
firstnames = author.split(",")[1].strip().split()
firstname = firstnames[0]
middlenames = []
if len(firstnames) > 1:
middlenames = firstnames[1:]
assert (firstname)
assert (surname)
details = {
"AuthorOrder": author_order,
"FirstName": firstname,
"MiddleNames": middlenames,
"Surname": surname,
}
authors.append(details)
author_order += 1
d['authors'] = authors
if d['publication_type'] == "JOUR" or d['publication_type'] == "CONF":
pass # d['journal'] already set to JO, JA, J2, or JF data
elif d['publication_type'] == "CHAP":
d['journal'] = tag_data.get("BT")
else:
errors.append("Could not determine publication type.")
for k, v in d.items():
self.__setattr__(k, v)
if d['volume']:
if not(d['issue']) and d['publication_type'] != "CHAP":
errors.append("No issue found.")
if not (PublicationInterface.get_page_range_in_abbreviated_format(self.startpage, self.endpage)):
warnings.append("No start or endpage found.")
#Doesn't seem to make sense for electronic journals without an endpage
#elif not(self.startpage and self.endpage and self.startpage.isdigit() and self.endpage.isdigit()):
# warnings.append("No start or endpage found.")
if not(self.journal):
errors.append("No journal name found.")
# doi parsing
if not(self.doi):
doi = None
for k, v in tag_data.items():
if type(v) == type(''):
if v.startswith("doi:"):
self.doi = v[4:].strip()
break
else:
doi_idx = v.find('dx.doi.org/')
if doi_idx != -1:
self.doi = urllib.parse.unquote(tag_data['UR'][doi_idx+(len('dx.doi.org/')):])
break
if self.doi and self.doi.startswith("doi:"):
self.doi = self.doi[4:].strip()
if not self.doi:
if not tag_data.get("UR"):
errors.append("No DOI or URL available.")
else:
warnings.append("No DOI available.")
else:
if not doi_regex.match(self.doi):
errors.append("Invalid doi string '%s'." % self.doi)
self.doi = None
if not(self.authors and self.title and self.journal and self.year):
errors.append("Missing crucial information (author, title, journal, or year) - skipping entry.")
else:
if self.publication_type != "CHAP" and not(publication_abbreviations.get(self.journal)):
matched = False
normalized_journal_name = PublicationInterface._normalize_journal_name(self.journal)
for k, v in publication_abbreviations.items():
if PublicationInterface._normalize_journal_name(k) == normalized_journal_name or PublicationInterface._normalize_journal_name(v) == normalized_journal_name:
self.journal = k
matched = True
break
if not matched:
errors.append("Missing abbreviation for journal '%s'." % self.journal)
else:
assert(publication_abbreviations.get(self.journal))
return errors, warnings
def format(self, abbreviate_journal = True, abbreviate_author_names = True, show_year = True, html = True, allow_errors = False):
raise Exception('This function is deprecated in favor of PublicationInterface.to_string. Some functionality needs to be added to that function e.g. ReferralURL_link.')
if self.errors and not allow_errors:
if not self.quiet:
colortext.error("There were parsing errors: %s" % self.errors)
return None
# Abbreviate the journal name
journal = self.journal
if abbreviate_journal and self.publication_type != "CHAP":
journal = publication_abbreviations.get(self.journal, self.journal)
# Abbreviate the authors' names
authors_str = None
if abbreviate_author_names:
authors_str = ", ".join(self.get_author_names_in_short_format())
else:
raise Exception("This code needs to be written with whatever is needed.")
# Create string for the publication year
year_str = ""
if show_year:
year_str = ", %s" % self.year
ReferralURL_link = ""
if self.ReferralURL:
ReferralURL_link = " <a class='publist' href='%s'>[free download]</a>" % self.ReferralURL
titlesuffix = '.'
if self.publication_type == "CHAP":
titlesuffix = " in"
# The entry format is fixed. This could be passed as a variable for different styles.
entry = ""
if self.volume:
entry = self.volume
if self.subtitle:
entry += " (%s)" % self.subtitle
if self.issue:
entry += "(%s)" % self.issue
pagerange = PublicationInterface.get_page_range_in_abbreviated_format(self.startpage, self.endpage)
if pagerange:
entry += ":%s" % pagerange
else:
if self.startpage and self.endpage and self.startpage.isdigit() and self.endpage.isdigit():
if self.subtitle:
entry = " (%s)" % self.subtitle
pagerange = PublicationInterface.get_page_range_in_abbreviated_format(self.startpage, self.endpage)
if pagerange:
entry += ":%s" % pagerange
s = ['%s. ' % authors_str]
if html:
if self.doi:
s.append('%s%s %s %s%s.' % (self.title, titlesuffix, self.journal, entry, year_str))
s.append('doi: <a class="publication_doi" href="http://dx.doi.org/%s">%s</a>''' % (self.doi, self.doi))
s.append(ReferralURL_link)
elif self.url:
s.append('<a class="publication_link" href="%s">%s</a>%s' % (self.url, self.title, titlesuffix))
s.append('%s %s%s.' % (self.journal, entry, year_str))
s.append(ReferralURL_link)
else:
s.append('%s%s %s %s%s.' % (self.title, titlesuffix, self.journal, entry, year_str))
s.append(ReferralURL_link)
else:
s.append('%s%s %s %s%s.' % (self.title, titlesuffix, self.journal, entry, year_str))
if self.doi:
s.append('doi: %s' % self.doi)
elif self.url:
s.append('url: %s' % self.url)
return " ".join(s)
def get_earliest_date(self):
return str(self.date).replace('-', '/')
def get_url(self):
if self.doi:
return 'http://dx.doi.org/%s' % self.doi
elif self.url:
return self.url
return None
def get_year(self):
return self.year
def to_dict(self):
'''A representation of that publication data that matches the schema we use in our databases.'''
author_list = []
for author in self.authors:
author_list.append(
dict(
AuthorOrder = author['AuthorOrder'] + 1, # we should always use 1-based indexing but since this is shared code, I do not want to change the logic above without checking to make sure I don't break dependencies
FirstName = author['FirstName'],
MiddleNames = ' '.join(author['MiddleNames']), # this is the main difference with the code above - the database expects a string, we maintain a list
Surname = author['Surname']
)
)
pub_url = None
if self.url or self.doi:
pub_url = self.url or ('http://dx.doi.org/%s' % self.doi)
return dict(
Title = self.title,
PublicationName = self.journal,
Volume = self.volume,
Issue = self.issue,
StartPage = self.startpage,
EndPage = self.endpage,
PublicationYear = self.year,
PublicationDate = self.date,
RIS = self.RIS,
DOI = self.doi,
PubMedID = None,
URL = pub_url,
ISSN = None, # eight-digit number
authors = author_list,
#
RecordType = RISEntry.record_types.get(self.publication_type)
)
|
class RISEntry(PublicationInterface):
def __init__(self, RIS, quiet = True, lenient_on_tag_order = False):
pass
def parse(self, lenient_on_tag_order = False):
pass
def format(self, abbreviate_journal = True, abbreviate_author_names = True, show_year = True, html = True, allow_errors = False):
pass
def get_earliest_date(self):
pass
def get_url(self):
pass
def get_year(self):
pass
def to_dict(self):
'''A representation of that publication data that matches the schema we use in our databases.'''
pass
| 8 | 1 | 46 | 5 | 39 | 3 | 12 | 0.08 | 1 | 11 | 1 | 0 | 7 | 18 | 7 | 17 | 352 | 45 | 292 | 65 | 284 | 22 | 227 | 64 | 219 | 54 | 2 | 5 | 84 |
143,520 |
Kortemme-Lab/klab
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kortemme-Lab_klab/klab/bio/ligand.py
|
klab.bio.ligand.LigandMap._MapPoint
|
class _MapPoint(object):
'''A mapping from a single ligand in one PDB to a single ligand in another.'''
def __init__(self, from_pdb_code, from_pdb_residue_id, to_pdb_code, to_pdb_residue_id, strict=True):
'''PDB codes are the contents of columns [17:20] (Python format i.e. zero-indexed) of HETATM lines.
PDB residue IDs are the contents of columns [21:27] of HETATM lines.'''
assert ((len(from_pdb_residue_id) == 6)
and (len(to_pdb_residue_id) == 6))
assert (from_pdb_residue_id[1:5].strip().isdigit(
) and to_pdb_residue_id[1:5].strip().isdigit())
if strict:
assert ((len(from_pdb_code) == 3) and (len(to_pdb_code) == 3))
else:
assert ((1 <= len(from_pdb_code) <= 3)
and (1 <= len(to_pdb_code) <= 3))
if len(from_pdb_code) < 3:
from_pdb_code = from_pdb_code.strip().rjust(3)
if len(to_pdb_code) < 3:
to_pdb_code = to_pdb_code.strip().rjust(3)
self.from_pdb_code = from_pdb_code
self.to_pdb_code = to_pdb_code
self.from_pdb_residue_id = from_pdb_residue_id
self.to_pdb_residue_id = to_pdb_residue_id
def __repr__(self):
return '{0} ({1}) -> {2} ({3})'.format(self.from_pdb_residue_id, self.from_pdb_code, self.to_pdb_residue_id, self.to_pdb_code)
|
class _MapPoint(object):
'''A mapping from a single ligand in one PDB to a single ligand in another.'''
def __init__(self, from_pdb_code, from_pdb_residue_id, to_pdb_code, to_pdb_residue_id, strict=True):
'''PDB codes are the contents of columns [17:20] (Python format i.e. zero-indexed) of HETATM lines.
PDB residue IDs are the contents of columns [21:27] of HETATM lines.'''
pass
def __repr__(self):
pass
| 3 | 2 | 11 | 2 | 9 | 1 | 3 | 0.17 | 1 | 0 | 0 | 0 | 2 | 4 | 2 | 2 | 27 | 6 | 18 | 7 | 15 | 3 | 17 | 7 | 14 | 4 | 1 | 2 | 5 |
143,521 |
Kortemme-Lab/klab
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kortemme-Lab_klab/klab/cluster/run_mix.py
|
klab.cluster.run_mix.run_local.MultiWorker
|
class MultiWorker:
def __init__(self, task, func):
self.reporter = Reporter(task)
self.func = func
self.pool = Pool()
self.number_finished = 0
def cb(self, time_return):
self.number_finished += 1
self.reporter.report(self.number_finished)
def addJob(self, argsTuple):
self.pool.apply_async(self.func, argsTuple, callback=self.cb)
def finishJobs(self):
self.pool.close()
self.pool.join()
self.reporter.done()
|
class MultiWorker:
def __init__(self, task, func):
pass
def cb(self, time_return):
pass
def addJob(self, argsTuple):
pass
def finishJobs(self):
pass
| 5 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 0 | 1 | 1 | 0 | 4 | 4 | 4 | 4 | 15 | 0 | 15 | 9 | 10 | 0 | 15 | 9 | 10 | 1 | 0 | 0 | 4 |
143,522 |
Kortemme-Lab/klab
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kortemme-Lab_klab/klab/deprecated/rosettadb.py
|
klab.deprecated.rosettadb.DatabaseInterface
|
class DatabaseInterface(object):
data = {}
def __init__(self, settings, isInnoDB=True, numTries=1, host=None, db=None, user=None, passwd=None, port=None, unix_socket=None, passwdfile=None):
self.connection = None
self.isInnoDB = isInnoDB
self.host = host or settings["SQLHost"]
self.db = db or settings["SQLDatabase"]
self.user = user or settings["SQLUser"]
self.passwd = passwd or settings["SQLPassword"]
self.port = port or settings["SQLPort"]
self.unix_socket = unix_socket or settings["SQLSocket"]
self.numTries = numTries
self.lastrowid = None
if (not self.passwd) and passwdfile:
if os.path.exists(passwdfile):
passwd = rosettahelper.readFile(passwdfile).strip()
else:
passwd = getpass.getpass(
"Enter password to connect to MySQL database:")
self.locked = False
self.lockstring = "LOCK TABLES %s" % join(
["%s WRITE" % r[0] for r in self.execute("SHOW TABLES", cursorClass=StdCursor)], ", ")
self.unlockstring = "UNLOCK TABLES"
# Store a list of the table names
self.TableNames = [r[0] for r in self.execute(
"SHOW TABLES", cursorClass=StdCursor)]
# Store a hierarchy of objects corresponding to the table names and their field names
self.FieldNames = _FieldNames(None)
self.FlatFieldNames = _FieldNames(None)
tablenames = self.TableNames
for tbl in tablenames:
setattr(self.FieldNames, tbl, _FieldNames(tbl))
fieldDescriptions = self.execute("SHOW COLUMNS FROM %s" % tbl)
for field in fieldDescriptions:
fieldname = field["Field"]
setattr(getattr(self.FieldNames, tbl), fieldname, fieldname)
setattr(self.FlatFieldNames, fieldname, fieldname)
getattr(self.FieldNames, tbl).makeReadOnly()
self.FieldNames.makeReadOnly()
self.FlatFieldNames.makeReadOnly()
def __del__(self):
if self.connection and self.connection.open:
self.connection.close()
def _get_connection(self, cursorClass):
self.connection = MySQLdb.connect(host=self.host, db=self.db, user=self.user, passwd=self.passwd,
port=self.port, unix_socket=self.unix_socket, cursorclass=cursorClass)
def _close_connection(self):
if self.connection and self.connection.open:
self.connection.close()
def getLastRowID(self):
return self.lastrowid
def locked_execute(self, sql, parameters=None, cursorClass=DictCursor, quiet=False):
'''We are lock-happy here but SQL performance is not currently an issue daemon-side.'''
return self.execute(sql, parameters, cursorClass, quiet=quiet, locked=True)
def execute_select(self, sql, parameters=None, cursorClass=DictCursor, quiet=False, locked=False):
self.execute(sql, parameters, cursorClass,
quiet, locked, do_commit=False)
def execute(self, sql, parameters=None, cursorClass=DictCursor, quiet=False, locked=False, do_commit=True):
"""Execute SQL query. This uses DictCursor by default."""
i = 0
errcode = 0
caughte = None
cursor = None
if sql.find(";") != -1 or sql.find("\\G") != -1:
# Catches some injections
raise Exception(
"The SQL command '%s' contains a semi-colon or \\G. This is a potential SQL injection." % sql)
while i < self.numTries:
i += 1
try:
assert (not (self.connection) or not (self.connection.open))
self._get_connection(cursorClass)
cursor = self.connection.cursor()
if locked:
cursor.execute(self.lockstring)
self.locked = True
if parameters:
errcode = cursor.execute(sql, parameters)
else:
errcode = cursor.execute(sql)
self.lastrowid = int(cursor.lastrowid)
if do_commit and self.isInnoDB:
self.connection.commit()
results = cursor.fetchall()
if locked:
cursor.execute(self.unlockstring)
self.locked = False
cursor.close()
self._close_connection()
return results
except MySQLdb.OperationalError as e:
if cursor:
if self.locked:
cursor.execute(self.unlockstring)
self.locked = False
cursor.close()
self._close_connection()
caughte = str(e)
errcode = e[0]
continue
except Exception as e:
if cursor:
if self.locked:
cursor.execute(self.unlockstring)
self.locked = False
cursor.close()
self._close_connection()
caughte = str(e)
traceback.print_exc()
break
sleep(0.2)
if not quiet:
sys.stderr.write("\nSQL execution error in query %s at %s:" % (
sql, datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
sys.stderr.write("\nErrorcode/Error: %d - '%s'.\n" %
(errcode, str(caughte)))
sys.stderr.flush()
raise MySQLdb.OperationalError(caughte)
def insertDict(self, tblname, d, fields=None):
'''Simple function for inserting a dictionary whose keys match the fieldnames of tblname.'''
if fields == None:
fields = sorted(d.keys())
values = None
try:
SQL = 'INSERT INTO %s (%s) VALUES (%s)' % (tblname, join(
fields, ", "), join(['%s' for x in range(len(fields))], ','))
values = tuple([d[k] for k in fields])
self.locked_execute(SQL, parameters=values)
except Exception as e:
if SQL and values:
sys.stderr.write("\nSQL execution error in query '%s' %% %s at %s:" % (
SQL, values, datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
sys.stderr.write("\nError: '%s'.\n" % (str(e)))
sys.stderr.flush()
raise Exception(
"Error occurred during database insertion: '%s'." % str(e))
def insertDictIfNew(self, tblname, d, PKfields, fields=None):
'''Simple function for inserting a dictionary whose keys match the fieldnames of tblname. The function returns two values, the
second of which is a dict containing the primary keys of the record. If a record already exists then no insertion is performed and
(False, the dictionary of existing primary keys) is returned. Otherwise, the record is inserted into the database and (True, d)
is returned.'''
if type(PKfields) == type(""):
PKfields = [PKfields]
if fields == None:
fields = sorted(d.keys())
values = None
SQL = None
try:
# Search for existing records
wherestr = []
PKvalues = []
for PKfield in PKfields:
wherestr.append("%s=%%s" % PKfield)
PKvalues.append(d[PKfield])
PKfields = join(PKfields, ",")
wherestr = join(wherestr, " AND ")
existingRecord = self.locked_execute("SELECT %s FROM %s" % (
PKfields, tblname) + " WHERE %s" % wherestr, parameters=tuple(PKvalues))
if existingRecord:
return False, existingRecord[0]
SQL = 'INSERT INTO %s (%s) VALUES (%s)' % (tblname, join(
fields, ", "), join(['%s' for x in range(len(fields))], ','))
values = tuple([d[k] for k in fields])
self.locked_execute(SQL, parameters=values)
return True, d
except Exception as e:
if SQL and values:
sys.stderr.write("\nSQL execution error in query '%s' %% %s at %s:" % (
SQL, values, datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
sys.stderr.write("\nError: '%s'.\n" % (str(e)))
sys.stderr.flush()
raise Exception(
"Error occurred during database insertion: '%s'." % str(e))
def callproc(self, procname, parameters=(), cursorClass=DictCursor, quiet=False):
"""Calls a MySQL stored procedure procname. This uses DictCursor by default."""
i = 0
errcode = 0
caughte = None
if not re.match("^\s*\w+\s*$", procname):
raise Exception(
"Expected a stored procedure name in callproc but received '%s'." % procname)
while i < self.numTries:
i += 1
try:
assert (not (self.connection) or not (self.connection.open))
self._get_connection(cursorClass)
cursor = self.connection.cursor()
if type(parameters) != type(()):
parameters = (parameters,)
errcode = cursor.callproc(procname, parameters)
results = cursor.fetchall()
self.lastrowid = int(cursor.lastrowid)
cursor.close()
self._close_connection()
return results
except MySQLdb.OperationalError as e:
self._close_connection()
errcode = e[0]
caughte = e
continue
except:
self._close_connection()
traceback.print_exc()
break
if not quiet:
sys.stderr.write("\nSQL execution error call stored procedure %s at %s:" % (
procname, datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
sys.stderr.write("\nErrorcode/Error: %d - '%s'.\n" %
(errcode, str(caughte)))
sys.stderr.flush()
raise MySQLdb.OperationalError(caughte)
|
class DatabaseInterface(object):
def __init__(self, settings, isInnoDB=True, numTries=1, host=None, db=None, user=None, passwd=None, port=None, unix_socket=None, passwdfile=None):
pass
def __del__(self):
pass
def _get_connection(self, cursorClass):
pass
def _close_connection(self):
pass
def getLastRowID(self):
pass
def locked_execute(self, sql, parameters=None, cursorClass=DictCursor, quiet=False):
'''We are lock-happy here but SQL performance is not currently an issue daemon-side.'''
pass
def execute_select(self, sql, parameters=None, cursorClass=DictCursor, quiet=False, locked=False):
pass
def execute_select(self, sql, parameters=None, cursorClass=DictCursor, quiet=False, locked=False):
'''Execute SQL query. This uses DictCursor by default.'''
pass
def insertDict(self, tblname, d, fields=None):
'''Simple function for inserting a dictionary whose keys match the fieldnames of tblname.'''
pass
def insertDictIfNew(self, tblname, d, PKfields, fields=None):
'''Simple function for inserting a dictionary whose keys match the fieldnames of tblname. The function returns two values, the
second of which is a dict containing the primary keys of the record. If a record already exists then no insertion is performed and
(False, the dictionary of existing primary keys) is returned. Otherwise, the record is inserted into the database and (True, d)
is returned.'''
pass
def callproc(self, procname, parameters=(), cursorClass=DictCursor, quiet=False):
'''Calls a MySQL stored procedure procname. This uses DictCursor by default.'''
pass
| 12 | 5 | 18 | 1 | 16 | 1 | 4 | 0.07 | 1 | 8 | 1 | 1 | 11 | 16 | 11 | 11 | 215 | 22 | 181 | 57 | 169 | 12 | 179 | 52 | 167 | 14 | 1 | 4 | 45 |
143,523 |
Kortemme-Lab/klab
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kortemme-Lab_klab/klab/deprecated/rosettadb.py
|
klab.deprecated.rosettadb.ReusableDatabaseInterface
|
class ReusableDatabaseInterface(DatabaseInterface):
def __init__(self, settings, isInnoDB=True, numTries=1, host=None, db=None, user=None, passwd=None, port=None, unix_socket=None, passwdfile=None, use_utf=False):
# super(ReusableDatabaseInterface, self).__init__(settings = settings, isInnoDB = isInnoDB, numTries = numTries, host = host, db = db, user = user, passwd = passwd, port = port, unix_socket = unix_socket, passwdfile = passwdfile)
self.connection = None
self.use_utf = use_utf
self.isInnoDB = isInnoDB
self.host = host or settings["SQLHost"]
self.db = db or settings["SQLDatabase"]
self.user = user or settings["SQLUser"]
self.passwd = passwd or settings.get("SQLPassword")
self.port = port or settings["SQLPort"]
self.unix_socket = unix_socket or settings["SQLSocket"]
self.numTries = numTries
self.lastrowid = None
if (not self.passwd) and passwdfile:
if os.path.exists(passwdfile):
self.passwd = rosettahelper.readFile(passwdfile).strip()
else:
self.passwd = getpass.getpass(
"Enter password to connect to MySQL database:")
self.locked = False
self.lockstring = "LOCK TABLES %s" % join(
["%s WRITE" % list(r.values())[0] for r in self.execute("SHOW TABLES")], ", ")
self.unlockstring = "UNLOCK TABLES"
# Store a list of the table names
self.TableNames = [list(r.values())[0]
for r in self.execute("SHOW TABLES")]
# Store a hierarchy of objects corresponding to the table names and their field names
self.FieldNames = _FieldNames(None)
self.FlatFieldNames = _FieldNames(None)
tablenames = self.TableNames
for tbl in tablenames:
setattr(self.FieldNames, tbl, _FieldNames(tbl))
fieldDescriptions = self.execute("SHOW COLUMNS FROM %s" % tbl)
for field in fieldDescriptions:
fieldname = field["Field"]
setattr(getattr(self.FieldNames, tbl), fieldname, fieldname)
setattr(self.FlatFieldNames, fieldname, fieldname)
getattr(self.FieldNames, tbl).makeReadOnly()
self.FieldNames.makeReadOnly()
self.FlatFieldNames.makeReadOnly()
def close(self):
if self.connection and self.connection.open:
self.connection.close()
def checkIsClosed(self):
assert (not (self.connection) or not (self.connection.open))
def _get_connection(self):
if not (self.connection and self.connection.open):
if self.use_utf:
self.connection = MySQLdb.connect(host=self.host, db=self.db, user=self.user, passwd=self.passwd,
port=self.port, unix_socket=self.unix_socket, cursorclass=DictCursor, charset='utf8', use_unicode=True)
else:
self.connection = MySQLdb.connect(host=self.host, db=self.db, user=self.user, passwd=self.passwd,
port=self.port, unix_socket=self.unix_socket, cursorclass=DictCursor)
def locked_execute(self, sql, parameters=None, cursorClass=DictCursor, quiet=False):
'''We are lock-happy here but SQL performance is not currently an issue daemon-side.'''
return self.execute(sql, parameters=parameters, quiet=quiet, locked=True, do_commit=True)
def execute_select(self, sql, parameters=None, quiet=False, locked=False):
return self.execute(sql, parameters=parameters, quiet=quiet, locked=locked, do_commit=False)
def execute_select_StdCursor(self, sql, parameters=None, quiet=False, locked=False):
return self.execute_StdCursor(sql, parameters=parameters, quiet=quiet, locked=locked, do_commit=False)
def execute_StdCursor(self, sql, parameters=None, quiet=False, locked=False, do_commit=True):
"""Execute SQL query. This uses DictCursor by default."""
i = 0
errcode = 0
caughte = None
cursor = None
cursorClass = StdCursor
if sql.find(";") != -1 or sql.find("\\G") != -1:
# Catches some injections
raise Exception(
"The SQL command '%s' contains a semi-colon or \\G. This is a potential SQL injection." % sql)
while i < self.numTries:
i += 1
try:
self._get_connection()
# if not self.connection:
# self.connection = MySQLdb.connect(host = self.host, db = self.db, user = self.user, passwd = self.passwd, port = self.port, unix_socket = self.unix_socket, cursorclass = cursorClass)
cursor = self.connection.cursor()
if locked:
cursor.execute(self.lockstring)
self.locked = True
if parameters:
errcode = cursor.execute(sql, parameters)
else:
errcode = cursor.execute(sql)
self.lastrowid = int(cursor.lastrowid)
if do_commit and self.isInnoDB:
self.connection.commit()
results = cursor.fetchall()
if locked:
cursor.execute(self.unlockstring)
self.locked = False
cursor.close()
return results
except MySQLdb.OperationalError as e:
if cursor:
if self.locked:
cursor.execute(self.unlockstring)
self.locked = False
cursor.close()
caughte = str(e)
errcode = e[0]
continue
except Exception as e:
if cursor:
if self.locked:
cursor.execute(self.unlockstring)
self.locked = False
cursor.close()
caughte = str(e)
traceback.print_exc()
break
sleep(0.2)
if not quiet:
sys.stderr.write("\nSQL execution error in query %s at %s:" % (
sql, datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
sys.stderr.write("\nErrorcode/Error: %d - '%s'.\n" %
(errcode, str(caughte)))
sys.stderr.flush()
raise MySQLdb.OperationalError(caughte)
def execute(self, sql, parameters=None, quiet=False, locked=False, do_commit=True):
"""Execute SQL query. This uses DictCursor by default."""
i = 0
errcode = 0
caughte = None
cursor = None
cursorClass = DictCursor
if sql.find(";") != -1 or sql.find("\\G") != -1:
# Catches some injections
raise Exception(
"The SQL command '%s' contains a semi-colon or \\G. This is a potential SQL injection." % sql)
while i < self.numTries:
i += 1
try:
self._get_connection()
# if not self.connection:
# self.connection = MySQLdb.connect(host = self.host, db = self.db, user = self.user, passwd = self.passwd, port = self.port, unix_socket = self.unix_socket, cursorclass = cursorClass)
cursor = self.connection.cursor()
if locked:
cursor.execute(self.lockstring)
self.locked = True
if parameters:
errcode = cursor.execute(sql, parameters)
else:
errcode = cursor.execute(sql)
self.lastrowid = int(cursor.lastrowid)
if do_commit and self.isInnoDB:
self.connection.commit()
results = cursor.fetchall()
if locked:
cursor.execute(self.unlockstring)
self.locked = False
cursor.close()
return results
except MySQLdb.OperationalError as e:
if cursor:
if self.locked:
cursor.execute(self.unlockstring)
self.locked = False
cursor.close()
caughte = str(e)
errcode = e[0]
continue
except Exception as e:
if cursor:
if self.locked:
cursor.execute(self.unlockstring)
self.locked = False
cursor.close()
caughte = str(e)
traceback.print_exc()
break
sleep(0.2)
if not quiet:
sys.stderr.write("\nSQL execution error in query %s at %s:" % (
sql, datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
sys.stderr.write("\nErrorcode/Error: %d - '%s'.\n" %
(errcode, str(caughte)))
sys.stderr.flush()
raise MySQLdb.OperationalError(caughte)
def callproc(self, procname, parameters=(), quiet=False):
"""Calls a MySQL stored procedure procname. This uses DictCursor by default."""
i = 0
errcode = 0
caughte = None
if not re.match("^\s*\w+\s*$", procname):
raise Exception(
"Expected a stored procedure name in callproc but received '%s'." % procname)
while i < self.numTries:
i += 1
try:
self._get_connection()
cursor = self.connection.cursor()
if type(parameters) != type(()):
parameters = (parameters,)
errcode = cursor.callproc(procname, parameters)
results = cursor.fetchall()
self.lastrowid = int(cursor.lastrowid)
cursor.close()
return results
except MySQLdb.OperationalError as e:
self._close_connection()
errcode = e[0]
caughte = e
continue
except:
self._close_connection()
traceback.print_exc()
break
if not quiet:
sys.stderr.write("\nSQL execution error call stored procedure %s at %s:" % (
procname, datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
sys.stderr.write("\nErrorcode/Error: %d - '%s'.\n" %
(errcode, str(caughte)))
sys.stderr.flush()
raise MySQLdb.OperationalError(caughte)
|
class ReusableDatabaseInterface(DatabaseInterface):
def __init__(self, settings, isInnoDB=True, numTries=1, host=None, db=None, user=None, passwd=None, port=None, unix_socket=None, passwdfile=None, use_utf=False):
pass
def close(self):
pass
def checkIsClosed(self):
pass
def _get_connection(self):
pass
def locked_execute(self, sql, parameters=None, cursorClass=DictCursor, quiet=False):
'''We are lock-happy here but SQL performance is not currently an issue daemon-side.'''
pass
def execute_select(self, sql, parameters=None, quiet=False, locked=False):
pass
def execute_select_StdCursor(self, sql, parameters=None, quiet=False, locked=False):
pass
def execute_StdCursor(self, sql, parameters=None, quiet=False, locked=False, do_commit=True):
'''Execute SQL query. This uses DictCursor by default.'''
pass
def execute_select(self, sql, parameters=None, quiet=False, locked=False):
'''Execute SQL query. This uses DictCursor by default.'''
pass
def callproc(self, procname, parameters=(), quiet=False):
'''Calls a MySQL stored procedure procname. This uses DictCursor by default.'''
pass
| 11 | 4 | 21 | 1 | 19 | 1 | 5 | 0.07 | 1 | 7 | 1 | 0 | 10 | 17 | 10 | 21 | 221 | 18 | 190 | 53 | 179 | 13 | 186 | 50 | 175 | 14 | 2 | 4 | 49 |
143,524 |
Kortemme-Lab/klab
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kortemme-Lab_klab/klab/deprecated/rosettadb.py
|
klab.deprecated.rosettadb.RosettaDB
|
class RosettaDB:
data = {}
store_time = 7 # how long will the stuff be stored
def close(self):
self.connection.close()
def __init__(self, settings, numTries=1, host=None, db=None, user=None, passwd=None, port=None, unix_socket=None):
host = host or settings["SQLHost"]
db = db or settings["SQLDatabase"]
user = user or settings["SQLUser"]
passwd = passwd or settings["SQLPassword"]
port = port or settings["SQLPort"]
unix_socket = unix_socket or settings["SQLSocket"]
self.connection = MySQLdb.Connection(
host=host, db=db, user=user, passwd=passwd, port=port, unix_socket=unix_socket)
self.store_time = settings["StoreTime"]
self.numTries = numTries
self.lastrowid = None
def getLastRowID(self):
return self.lastrowid
def getData4ID(self, tablename, ID):
"""get the whole row from the database and store it in a dict"""
fields = self._getFieldsInDB(tablename)
# DATE_ADD(EndDate, INTERVAL 8 DAY), TIMEDIFF(DATE_ADD(EndDate, INTERVAL 7 DAY), NOW()), TIMEDIFF(EndDate, StartDate)
SQL = '''SELECT *,DATE_ADD(EndDate, INTERVAL %s DAY),TIMEDIFF(DATE_ADD(EndDate, INTERVAL %s DAY), NOW()),TIMEDIFF(EndDate, StartDate)
FROM %s WHERE ID=%s''' % (self.store_time, self.store_time, tablename, ID)
array_data = self.execQuery(SQL)
if len(array_data) > 0:
for x in range(len(fields)):
self.data[fields[x]] = array_data[0][x]
self.data['date_expiration'] = array_data[0][-3]
self.data['time_expiration'] = array_data[0][-2]
self.data['time_computation'] = array_data[0][-1]
return self.data
def getData4cryptID(self, tablename, ID):
"""get the whole row from the database and store it in a dict"""
fields = self._getFieldsInDB(tablename)
SQL = 'SELECT *,MAKETIME(0,0,TIMESTAMPDIFF(SECOND, StartDate, EndDate)),DATE_ADD(EndDate, INTERVAL %s DAY),TIMESTAMPDIFF(DAY,DATE_ADD(EndDate, INTERVAL %s DAY), NOW()),TIMESTAMPDIFF(HOUR,DATE_ADD(EndDate, INTERVAL %s DAY), NOW()) FROM %s WHERE cryptID="%s"' % (
self.store_time, self.store_time, self.store_time, tablename, ID)
array_data = self.execQuery(SQL)
if len(array_data) > 0:
for x in range(len(fields)):
self.data[fields[x]] = array_data[0][x]
self.data['date_expiration'] = array_data[0][-3]
time_expiration = None
if array_data[0][-2] and array_data[0][-1]:
time_expiration = "%d days, %d hours" % (
abs(array_data[0][-2]), abs(array_data[0][-1]) - abs(array_data[0][-2] * 24))
self.data['time_expiration'] = time_expiration
self.data['time_computation'] = array_data[0][-4]
return self.data
def insertData(self, tablename, list_value_pairs, list_SQLCMD_pairs=None):
"""insert data into table
- ID: identifier of the updated value
- list_value_pairs: contains the table field ID and the according value
- list_SQLCMD_pairs: contains the table field ID and a SQL command
"""
fields = self._getFieldsInDB(tablename)
lst_field = []
lst_value = []
# normal field-value-pairs
for pair in list_value_pairs:
if pair[0] in fields:
lst_field.append(pair[0])
lst_value.append('"%s"' % pair[1])
else:
print("err: field %s can't be found in the table" % pair[0])
return False
# field-SQL-command-pairs: the only difference is the missing double quotes in the SQL command
if list_SQLCMD_pairs != None:
for pair in list_SQLCMD_pairs:
if pair[0] in fields:
lst_field.append(pair[0])
lst_value.append(pair[1])
else:
print("err: field %s can't be found in the table" %
pair[0])
return False
# build the command
SQL = 'INSERT INTO %s (%s) VALUES (%s)' % (
tablename, join(lst_field, ','), join(lst_value, ','))
self.execQuery(SQL)
return True
def getData4User(self, ID):
"""get all rows for a user from the database and store it to a dict
do we need this?
function is empty
"""
pass
def callproc(self, procname, parameters=(), cursorClass=DictCursor, quiet=False):
"""Calls a MySQL stored procedure procname. This uses DictCursor by default."""
i = 0
errcode = 0
caughte = None
while i < self.numTries:
i += 1
try:
cursor = self.connection.cursor(cursorClass)
if type(parameters) != type(()):
parameters = (parameters,)
errcode = cursor.callproc(procname, parameters)
results = cursor.fetchall()
self.lastrowid = int(cursor.lastrowid)
cursor.close()
return results
except MySQLdb.OperationalError as e:
errcode = e[0]
self.connection.ping()
caughte = e
continue
except:
traceback.print_exc()
break
if not quiet:
sys.stderr.write("\nSQL execution error call stored procedure %s at %s:" % (
procname, datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
sys.stderr.write("\nErrorcode/Error: %d - '%s'.\n" %
(errcode, str(caughte)))
sys.stderr.flush()
raise MySQLdb.OperationalError(caughte)
def execInnoDBQuery(self, sql, parameters=None, cursorClass=MySQLdb.cursors.Cursor):
self.connection.ping(True)
return self.execQuery(sql, parameters, cursorClass, InnoDB=True)
def execQuery(self, sql, parameters=None, cursorClass=MySQLdb.cursors.Cursor, InnoDB=False):
"""Execute SQL query."""
i = 0
errcode = 0
caughte = None
while i < self.numTries:
i += 1
try:
cursor = self.connection.cursor(cursorClass)
if parameters:
errcode = cursor.execute(sql, parameters)
else:
errcode = cursor.execute(sql)
if InnoDB:
self.connection.commit()
results = cursor.fetchall()
self.lastrowid = int(cursor.lastrowid)
cursor.close()
return results
except MySQLdb.OperationalError as e:
errcode = e[0]
# errcodes of 2006 or 2013 usually indicate a dropped connection
# errcode 1100 is an error with table locking
print(e)
self.connection.ping(True)
caughte = e
continue
except:
traceback.print_exc()
break
sys.stderr.write("\nSQL execution error in query at %s:" %
datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
sys.stderr.write("\n %s." % sql)
sys.stderr.flush()
sys.stderr.write("\nErrorcode: '%s'.\n" % (str(caughte)))
sys.stderr.flush()
raise MySQLdb.OperationalError(caughte)
# return None
def _getFieldsInDB(self, tablename):
"""get all the fields from a specific table"""
SQL = 'SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.Columns where TABLE_NAME="%s"' % tablename
array_data = self.execQuery(SQL)
return [x[0] for x in array_data]
def generateHash(self, ID, debug=False):
# create a hash key for the entry we just made
sql = '''SELECT PDBComplex, PDBComplexFile, Mini, EnsembleSize, task, ProtocolParameters FROM backrub WHERE ID="%s" ''' % ID # get data
result = self.execQuery(sql)
value_string = ""
for value in result[0][0:5]: # combine it to a string
value_string += str(value)
# We sort the complex datatypes to get deterministic hashes
# todo: This works better than before (it works!) but could be cleverer.
value_string += _getSortedString(pickle.loads(result[0][5]))
hash_key = md5.new(value_string.encode('utf-8')
).hexdigest() # encode this string
sql = 'UPDATE backrub SET hashkey="%s" WHERE ID="%s"' % (
hash_key, ID) # store it in the database
if not debug:
result = self.execQuery(sql)
else:
print(sql)
return hash_key
|
class RosettaDB:
def close(self):
pass
def __init__(self, settings, numTries=1, host=None, db=None, user=None, passwd=None, port=None, unix_socket=None):
pass
def getLastRowID(self):
pass
def getData4ID(self, tablename, ID):
'''get the whole row from the database and store it in a dict'''
pass
def getData4cryptID(self, tablename, ID):
'''get the whole row from the database and store it in a dict'''
pass
def insertData(self, tablename, list_value_pairs, list_SQLCMD_pairs=None):
'''insert data into table
- ID: identifier of the updated value
- list_value_pairs: contains the table field ID and the according value
- list_SQLCMD_pairs: contains the table field ID and a SQL command
'''
pass
def getData4User(self, ID):
'''get all rows for a user from the database and store it to a dict
do we need this?
function is empty
'''
pass
def callproc(self, procname, parameters=(), cursorClass=DictCursor, quiet=False):
'''Calls a MySQL stored procedure procname. This uses DictCursor by default.'''
pass
def execInnoDBQuery(self, sql, parameters=None, cursorClass=MySQLdb.cursors.Cursor):
pass
def execQuery(self, sql, parameters=None, cursorClass=MySQLdb.cursors.Cursor, InnoDB=False):
'''Execute SQL query.'''
pass
def _getFieldsInDB(self, tablename):
'''get all the fields from a specific table'''
pass
def generateHash(self, ID, debug=False):
pass
| 13 | 7 | 16 | 2 | 12 | 2 | 3 | 0.19 | 0 | 5 | 0 | 0 | 12 | 3 | 12 | 12 | 210 | 36 | 150 | 51 | 137 | 29 | 145 | 49 | 132 | 6 | 0 | 3 | 34 |
143,525 |
Kortemme-Lab/klab
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kortemme-Lab_klab/klab/deprecated/rosettadb.py
|
klab.deprecated.rosettadb._FieldNames
|
class _FieldNames(object):
'''This class is used to store the database structure accessed via element access rather than using raw strings or doing a dict lookup.
The class can be made read-only to prevent accidental updates.'''
def __init__(self, name):
try:
# If we are creating a new class and the class has already been made read-only then we need to remove the lock.
# It is the responsibility of the programmer to lock the class as read-only again after creation.
# A better implementation may be to append this instance to a list and change readonly_setattr to allow updates only to elements in that list.
getattr(self.__class__, 'original_setattr')
self.__class__.__setattr__ = self.__class__.original_setattr
except:
self.__class__.original_setattr = self.__class__.__setattr__
self._name = name
def makeReadOnly(self):
self.__class__.__setattr__ = self.readonly_setattr
def readonly_setattr(self, name, value):
raise Exception(
"Attempted to add/change an element of a read-only class.")
|
class _FieldNames(object):
'''This class is used to store the database structure accessed via element access rather than using raw strings or doing a dict lookup.
The class can be made read-only to prevent accidental updates.'''
def __init__(self, name):
pass
def makeReadOnly(self):
pass
def readonly_setattr(self, name, value):
pass
| 4 | 1 | 5 | 0 | 4 | 1 | 1 | 0.42 | 1 | 1 | 0 | 0 | 3 | 1 | 3 | 3 | 19 | 2 | 12 | 5 | 8 | 5 | 12 | 5 | 8 | 2 | 1 | 1 | 4 |
143,526 |
Kortemme-Lab/klab
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kortemme-Lab_klab/klab/klfilesystem.py
|
klab.klfilesystem.DiskStats
|
class DiskStats(FileSysStats):
def __init__(self, filelocation, unit=None):
unit = None or self.MB
super(DiskStats, self).__init__(unit)
s = os.statvfs(filelocation)
self.stats = s
self.m_size = float(s.f_blocks * s.f_frsize)
self.m_free = float(s.f_bsize * s.f_bavail)
self.m_unit = unit or self.m_unit
def getSize(self, unit=None):
return self.m_size / (unit or self.m_unit)
def getFree(self, unit=None):
return self.m_free / (unit or self.m_unit)
def getUsagePercentage(self):
return 100 - 100 * (float(self.getFree()) / float(self.getSize()))
|
class DiskStats(FileSysStats):
def __init__(self, filelocation, unit=None):
pass
def getSize(self, unit=None):
pass
def getFree(self, unit=None):
pass
def getUsagePercentage(self):
pass
| 5 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 1 | 2 | 0 | 0 | 4 | 4 | 4 | 7 | 19 | 4 | 15 | 10 | 10 | 0 | 15 | 10 | 10 | 1 | 2 | 0 | 4 |
143,527 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/pdb.py
|
klab.bio.pdb.PDBParsingException
|
class PDBParsingException(Exception): pass
|
class PDBParsingException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,528 |
Kortemme-Lab/klab
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kortemme-Lab_klab/klab/klfilesystem.py
|
klab.klfilesystem.FileStats
|
class FileStats(FileSysStats):
def __init__(self, filelocation, unit=None):
unit = None or self.MB
super(FileStats, self).__init__(unit)
s = os.stat(filelocation)
self.stats = s
self.m_size = float(s.st_size)
def getSize(self, unit=None):
return self.m_size / (unit or self.m_unit)
def getHumanReadableSize(self):
return self.getHumanReadable(self.m_size)
|
class FileStats(FileSysStats):
def __init__(self, filelocation, unit=None):
pass
def getSize(self, unit=None):
pass
def getHumanReadableSize(self):
pass
| 4 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 2 | 0 | 0 | 3 | 2 | 3 | 6 | 14 | 3 | 11 | 7 | 7 | 0 | 11 | 7 | 7 | 1 | 2 | 0 | 3 |
143,529 |
Kortemme-Lab/klab
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kortemme-Lab_klab/klab/klfilesystem.py
|
klab.klfilesystem.FileSysStats
|
class FileSysStats(object):
B = 1
KB = 1024.0
MB = 1024.0 * KB
GB = 1024.0 * MB
TB = 1024.0 * GB
def __init__(self, unit=None):
self.m_unit = unit or self.B
def setDefaultUnit(self, unit):
self.m_unit = unit
def getHumanReadable(self, v):
if v < self.KB:
return "%b B" % v
elif v < self.MB:
return "%.2f KB" % (v / self.KB)
elif v < self.GB:
return "%.2f MB" % (v / self.MB)
elif v < self.TB:
return "%.2f GB" % (v / self.GB)
else:
return "%.2f TB" % (v / self.TB)
|
class FileSysStats(object):
def __init__(self, unit=None):
pass
def setDefaultUnit(self, unit):
pass
def getHumanReadable(self, v):
pass
| 4 | 0 | 5 | 0 | 5 | 0 | 2 | 0 | 1 | 0 | 0 | 3 | 3 | 1 | 3 | 3 | 24 | 3 | 21 | 10 | 17 | 0 | 17 | 10 | 13 | 5 | 1 | 1 | 7 |
143,530 |
Kortemme-Lab/klab
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kortemme-Lab_klab/klab/klfilesystem.py
|
klab.klfilesystem.FolderStats
|
class FolderStats(FileSysStats):
def __init__(self, folderpath, unit=None):
unit = None or self.MB
super(FolderStats, self).__init__(unit)
p = subprocess.Popen(["du", "-b", folderpath],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdoutdata, stderrdata = p.communicate()
stdoutdata = stdoutdata.strip().split("\n")[-1]
if stderrdata:
raise colortext
self.m_size = float(stdoutdata.split()[0])
def getSize(self, unit=None):
return self.m_size / (unit or self.m_unit)
def getHumanReadableSize(self):
return self.getHumanReadable(self.m_size)
|
class FolderStats(FileSysStats):
def __init__(self, folderpath, unit=None):
pass
def getSize(self, unit=None):
pass
def getHumanReadableSize(self):
pass
| 4 | 0 | 5 | 1 | 4 | 0 | 1 | 0 | 1 | 3 | 0 | 0 | 3 | 1 | 3 | 6 | 20 | 6 | 14 | 7 | 10 | 0 | 14 | 7 | 10 | 2 | 2 | 1 | 4 |
143,531 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/view_commit_log.py
|
klab.view_commit_log.RosettaRevision
|
class RosettaRevision:
def __init__(self,rev_id):
self.rev_id=rev_id
self.get_sha1() # Fetch corresponding hash from testing server
self.author=None
self.status=None
self.interesting=None
self.commit_message=None
self.parents=None
self.short_sha1=None
self.date_time=None
def __repr__(self):
return '%d->%s'%(self.rev_id,self.sha1)
def get_info_from_repo(self,repo):
# Check if commit is in repo
try:
commit=repo.commit(self.sha1)
except gitdb.exc.BadObject:
print("Couldn't find sha1 %s, trying fetching" % self.sha1)
repo.remotes.origin.fetch()
try:
commit=repo.commit(self.sha1)
except gitdb.exc.BadObject:
print("Failed. Try fetching from origin and attempting again...")
raise Exception('Try fetching')
self.get_commit_message(commit)
self.get_parents(commit)
self.get_author(commit)
self.get_date_time(commit)
self.get_short_sha1(repo)
def get_short_sha1(self,repo):
self.short_sha1=repo.git.rev_parse('--short',self.sha1)
def get_date_time(self,commit):
self.date_time=datetime.fromtimestamp(commit.committed_date)
def get_author(self,commit):
self.author=str(commit.author)
def get_parents(self,commit):
self.parents=[commit.hexsha for commit in commit.parents]
def get_commit_message(self,commit):
self.commit_message=commit.message
def get_sha1(self):
self.sha1=None
if self.rev_id < first_git_rev_id:
raise Exception('Revision id %d corresponds to an SVN commit'%(self.rev_id))
soup = BeautifulSoup(urllib.request.urlopen(test_server_url+'%d'%(self.rev_id)).read())
links = soup.find_all('a')
if not len(links) > 0:
raise Exception('Test server page not parsed correctly')
for link in links:
href = link.attrs['href']
if 'github.com/RosettaCommons/main/commit' in href:
self.sha1 = get_hash_from_github_url(href)
if self.sha1==None:
raise Exception("Couldn't look up hash for revision id: %d"%(self.rev_id))
|
class RosettaRevision:
def __init__(self,rev_id):
pass
def __repr__(self):
pass
def get_info_from_repo(self,repo):
pass
def get_short_sha1(self,repo):
pass
def get_date_time(self,commit):
pass
def get_author(self,commit):
pass
def get_parents(self,commit):
pass
def get_commit_message(self,commit):
pass
def get_sha1(self):
pass
| 10 | 0 | 7 | 1 | 6 | 0 | 2 | 0.04 | 0 | 4 | 0 | 0 | 9 | 9 | 9 | 9 | 71 | 17 | 53 | 24 | 43 | 2 | 53 | 24 | 43 | 6 | 0 | 2 | 16 |
143,532 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/pdb.py
|
klab.bio.pdb.PDBValidationException
|
class PDBValidationException(Exception): pass
|
class PDBValidationException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,533 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/pdbml.py
|
klab.bio.pdbml.PDBML_xyz
|
class PDBML_xyz(PDBML):
'''A subclass which parses x, y, z coordinates (we do not use this at present.'''
def __init__(self, xml_contents, pdb_contents):
super(PDBML_xyz, self).__init__(xml_contents, pdb_contents)
self.current_atom_site = AtomSite()
def parse_atom_tag_data(self, name, tag_content):
'''Parse the atom tag data.'''
current_atom_site = self.current_atom_site
if current_atom_site.IsHETATM:
# Early out - do not parse HETATM records
return
elif name == 'PDBx:atom_site':
# We have to handle the atom_site close tag here since we jump based on self._BLOCK first in end_element
# To keep the code a little neater, I separate the logic out into end_atom_tag at the cost of one function call per atom
#self.end_atom_tag()
#'''Add the residue to the residue map.'''
self._BLOCK = None
current_atom_site = self.current_atom_site
current_atom_site.validate()
if current_atom_site.IsATOM:
# Only parse ATOM records
r, seqres, ResidueAA, Residue3AA = current_atom_site.convert_to_residue(self.modified_residues)
if r:
if not(self.pdb_id in cases_with_ACE_residues_we_can_ignore and Residue3AA == 'ACE'):
# skip certain ACE residues
full_residue_id = str(r)
if self._residues_read.get(full_residue_id):
assert(self._residues_read[full_residue_id] == (r.ResidueAA, seqres))
else:
self._residues_read[full_residue_id] = (r.ResidueAA, seqres)
self._residue_map[r.Chain] = self._residue_map.get(r.Chain, {})
assert(type(seqres) == int_type)
self._residue_map[r.Chain][str(r)] = seqres
# Record type
elif name == 'PDBx:group_PDB':
# ATOM or HETATM
if tag_content == 'ATOM':
current_atom_site.IsATOM = True
elif tag_content == 'HETATM':
current_atom_site.IsHETATM = True
else:
raise Exception("PDBx:group_PDB was expected to be 'ATOM' or 'HETATM'. '%s' read instead." % tag_content)
# Residue identifier - chain ID, residue ID, insertion code
elif name == 'PDBx:auth_asym_id':
assert(not(current_atom_site.PDBChainID))
current_atom_site.PDBChainID = tag_content
elif name == 'PDBx:auth_seq_id':
assert(not(current_atom_site.ATOMResidueID))
current_atom_site.ATOMResidueID = int(tag_content)
elif name == "PDBx:pdbx_PDB_ins_code":
if current_atom_site.ATOMResidueiCodeIsNull:
assert(len(tag_content) == 0)
else:
assert(current_atom_site.ATOMResidueiCode == ' ')
current_atom_site.ATOMResidueiCode = tag_content
elif name == "PDBx:auth_comp_id":
assert(not(current_atom_site.ATOMResidueAA))
current_atom_site.ATOMResidueAA = tag_content
elif name == "PDBx:label_seq_id":
assert(not(current_atom_site.SEQRESIndex))
current_atom_site.SEQRESIndex = int(tag_content)
elif name == "PDBx:label_comp_id":
assert(not(current_atom_site.ATOMSeqresResidueAA))
current_atom_site.ATOMSeqresResidueAA = tag_content
# Coordinates
elif name == "PDBx:Cartn_x":
assert(not(current_atom_site.x))
current_atom_site.x = float(tag_content)
elif name == "PDBx:Cartn_y":
assert(not(current_atom_site.y))
current_atom_site.y = float(tag_content)
elif name == "PDBx:Cartn_z":
assert(not(current_atom_site.z))
current_atom_site.z = float(tag_content)
|
class PDBML_xyz(PDBML):
'''A subclass which parses x, y, z coordinates (we do not use this at present.'''
def __init__(self, xml_contents, pdb_contents):
pass
def parse_atom_tag_data(self, name, tag_content):
'''Parse the atom tag data.'''
pass
| 3 | 2 | 39 | 3 | 30 | 6 | 11 | 0.21 | 1 | 7 | 1 | 0 | 2 | 3 | 2 | 14 | 82 | 8 | 61 | 9 | 58 | 13 | 46 | 8 | 43 | 20 | 2 | 5 | 21 |
143,534 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/pdb.py
|
klab.bio.pdb.RequestedLigandsWithoutParsingException
|
class RequestedLigandsWithoutParsingException(Exception): pass
|
class RequestedLigandsWithoutParsingException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,535 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/spackle.py
|
klab.bio.spackle.Spackler
|
class Spackler(Bonsai):
def __init__(self, pdb_content, buffer = 0.05, bin_size = 5.1, safe_mode = True, FASTA_line_length = 80):
super(Spackler, self).__init__(pdb_content, buffer = buffer, bin_size = bin_size, safe_mode = safe_mode, FASTA_line_length = FASTA_line_length)
self.pdb = PDB(pdb_content)
def add_backbone_atoms_linearly_from_loop_filepaths(self, loop_json_filepath, fasta_filepath, residue_ids):
'''A utility wrapper around add_backbone_atoms_linearly. Adds backbone atoms in a straight line from the first to
the last residue of residue_ids.
loop_json_filepath is a path to a JSON file using the JSON format for Rosetta loops files. This file identifies
the insertion points of the sequence.
fasta_filepath is a path to a FASTA file with one sequence. This sequence will be used as the sequence for
the inserted residues (between the start and stop residues defined in loop_json_filepath).
residue_ids is a list of PDB chain residues (columns 22-27 of ATOM lines in the PDB format). It is assumed that
they are sequential although the logic does not depend on that. This list should have the length length as the
sequence identified in the FASTA file.
'''
# Parse the loop file
loop_def = json.loads(read_file(loop_json_filepath))
assert(len(loop_def['LoopSet']) == 1)
start_res = loop_def['LoopSet'][0]['start']
end_res = loop_def['LoopSet'][0]['stop']
start_res = PDB.ChainResidueID2String(start_res['chainID'], (str(start_res['resSeq']) + start_res['iCode']).strip())
end_res = PDB.ChainResidueID2String(end_res['chainID'], (str(end_res['resSeq']) + end_res['iCode']).strip())
assert(start_res in residue_ids)
assert(end_res in residue_ids)
# Parse the FASTA file and extract the sequence
f = FASTA(read_file(fasta_filepath), strict = False)
assert(len(f.get_sequences()) == 1)
insertion_sequence = f.sequences[0][2]
if not len(residue_ids) == len(insertion_sequence):
raise Exception('The sequence in the FASTA file must have the same length as the list of residues.')
# Create the insertion sequence (a sub-sequence of the FASTA sequence)
# The post-condition is that the start and end residues are the first and last elements of kept_residues respectively
kept_residues = []
insertion_residue_map = {}
in_section = False
found_end = False
for x in range(len(residue_ids)):
residue_id = residue_ids[x]
if residue_id == start_res:
in_section = True
if in_section:
kept_residues.append(residue_id)
insertion_residue_map[residue_id] = insertion_sequence[x]
if residue_id == end_res:
found_end = True
break
if not kept_residues:
raise Exception('The insertion sequence is empty (check the start and end residue ids).')
if not found_end:
raise Exception('The end residue was not encountered when iterating over the insertion sequence (check the start and end residue ids).')
# Identify the start and end Residue objects
try:
start_res = self.residues[start_res[0]][start_res[1:]]
end_res = self.residues[end_res[0]][end_res[1:]]
except Exception as e:
raise Exception('The start or end residue could not be found in the PDB file.')
return self.add_backbone_atoms_linearly(start_res, end_res, kept_residues, insertion_residue_map)
def add_backbone_atoms_linearly(self, start_residue, end_residue, insertion_residues, insertion_residue_map):
'''This function returns the PDB content for a structure with the missing backbone atoms - i.e. it adds the
N, Ca, C atoms spaced evenly between the last existing backbone atom of start_residue and the first existing
backbone atom of end_residue. O-atoms are not currently added although we could arbitrarily add them at 90
degrees to the plane: If resiC_x + x = resjC_x and resiC_y + y = resjC_y, i + 1 = j, then the resiO atom could
have coordinates (resiC_x - y, resiC_y + x).
Adds backbone atoms for insertion_residues in a straight line from start_residue to end_residue. This is useful
for some computational methods which do not require the atoms to be in the correct coordinates but expect N, CA, and C backbone atoms
to exist for all residues (O-atoms are currently ignored here).
start_residue and end_residue are Residue objects. insertion_residues is a list of PDB residue IDs (columns 22-27
of ATOM lines in the PDB format). insertion_residue_map is a mapping from PDB residue IDs to 1-letter amino acid
codes. The keys of insertion_residue_map must be insertion_residues.
start_residue and end_residue must exist in insertion_residues and the PDB file. There is no technical requirement for this;
we just do not handle the alternate case yet. residue_ids are presumed to be ordered in sequence (N -> C) order.
Existing N, CA, and C atoms corresponding to these two residues will be retained as long as their atoms which
connect to the side of those residues not identified by residue_ids are present e.g.
- if the CA atom of the first residue is present, it will be kept as long as the N atom is present and regardless of whether the C atom is present
- if the CA atom of the last residue is present, it will be kept as long as the C atom is present and regardless of whether the N atom is present
All O atoms of residues in residue_ids are discarded. ANISOU records corresponding to any removed ATOMS will be removed.
1st 2nd n-1 n
... N-CA-C- N-CA-C- ... N-CA-C- N-CA-C- ..
Note: This function currently only supports canonical amino acids.
'''
assert(sorted(insertion_residues) == sorted(insertion_residue_map.keys()))
assert(start_residue.chain + start_residue.residue_id in insertion_residues)
assert(end_residue.chain + end_residue.residue_id in insertion_residues)
assert(start_residue.chain == end_residue.chain)
atoms_to_remove = []
discarded_atoms = []
# Remove atoms from the segment's N-terminus residue
# if N and CA and C, keep C else discard C
start_res_atoms_ids = self.get_atom_serial_numbers_from_pdb_residue_ids([insertion_residues[0]])
start_res_atoms = [self.atoms[id] for id in start_res_atoms_ids]
start_res_atom_types = [a.name for a in start_res_atoms]
start_atoms = [None, None, None]
for a in start_res_atoms:
if a.name == 'N': start_atoms[0] = a
elif a.name == 'CA': start_atoms[1] = a
elif a.name == 'C': start_atoms[2] = a
else: discarded_atoms.append(a.serial_number)
if 'C' in start_res_atom_types and 'CA' not in start_res_atom_types:
discarded_atoms += start_atoms[2].serial_number
start_atoms[2] = None
if not start_atoms[0]:
raise Exception('The N atom for the start residue must exist.')
start_atoms = [a for a in start_atoms if a]
start_atom = start_atoms[-1]
# Remove atoms from the segment's C-terminus residue
# if N and CA and C, keep N else discard N
end_res_atoms_ids = self.get_atom_serial_numbers_from_pdb_residue_ids([insertion_residues[-1]])
end_res_atoms = [self.atoms[id] for id in end_res_atoms_ids]
end_res_atom_types = [a.name for a in end_res_atoms]
end_atoms = [None, None, None]
for a in end_res_atoms:
if a.name == 'N': end_atoms[0] = a
elif a.name == 'CA': end_atoms[1] = a
elif a.name == 'C': end_atoms[2] = a
else: discarded_atoms.append(a.serial_number)
if 'N' in end_res_atom_types and 'CA' not in end_res_atom_types:
discarded_atoms += end_atoms[0].serial_number
end_atoms[0] = None
if not end_atoms[-1]:
raise Exception('The C atom for the end residue must exist.')
end_atoms = [a for a in end_atoms if a]
end_atom = end_atoms[0]
# Remove all atoms from the remainder of the segment
discarded_atoms += self.get_atom_serial_numbers_from_pdb_residue_ids(insertion_residues[1:-1])
# Remove the atoms from the PDB
bonsai_pdb_content, cutting_pdb_content, PSE_file, PSE_script = self.prune(set(discarded_atoms), generate_pymol_session = False)
self.__init__(bonsai_pdb_content, buffer = self.buffer, bin_size = self.bin_size, safe_mode = self.safe_mode)
# Create a list of all N, CA, C atoms for the insertion residues not including those present in the start and end residue
# Find last of N CA C of first residue
# Find last of N CA C of first residue
new_atoms = []
assert(len(start_atoms) >= 1) # N is guaranteed to exist
if len(start_atoms) == 2:
# add a C atom
residue_id = insertion_residues[0]
residue_type = insertion_residue_map[residue_id]
assert(residue_type != 'X' and residue_type in residue_type_1to3_map)
new_atoms.append((residue_id, residue_type_1to3_map[residue_type], 'C'))
for insertion_residue in insertion_residues[1:-1]:
# add an N, CA, C atoms
residue_type = insertion_residue_map[insertion_residue]
assert(residue_type != 'X' and residue_type in residue_type_1to3_map)
residue_type = residue_type_1to3_map[residue_type]
new_atoms.append((insertion_residue, residue_type, 'N'))
new_atoms.append((insertion_residue, residue_type, 'CA'))
new_atoms.append((insertion_residue, residue_type, 'C'))
assert(len(end_atoms) >= 1) # C is guaranteed to exist
if len(end_atoms) == 2:
# add an N atom
residue_id = insertion_residues[-1]
residue_type = insertion_residue_map[residue_id]
assert(residue_type != 'X' and residue_type in residue_type_1to3_map)
new_atoms.append((residue_id, residue_type_1to3_map[residue_type], 'N'))
return self.add_atoms_linearly(start_atom, end_atom, new_atoms)
def add_atoms_linearly(self, start_atom, end_atom, new_atoms, jitterbug = 0.2):
'''A low-level function which adds new_atoms between start_atom and end_atom. This function does not validate the
input i.e. the calling functions are responsible for ensuring that the insertion makes sense.
Returns the PDB file content with the new atoms added. These atoms are given fresh serial numbers, starting
from the first serial number larger than the current serial numbers i.e. the ATOM serial numbers do not now
necessarily increase in document order.
The jitter adds some X, Y, Z variability to the new atoms. This is important in the Rosetta software suite when
placing backbone atoms as colinearly placed atoms will break the dihedral angle calculations (the dihedral angle
over 4 colinear atoms is undefined).
'''
atom_name_map = {
'CA' : ' CA ',
'C' : ' C ',
'N' : ' N ',
'O' : ' O ',
}
assert(start_atom.residue.chain == end_atom.residue.chain)
chain_id = start_atom.residue.chain
# Initialize steps
num_new_atoms = float(len(new_atoms))
X, Y, Z = start_atom.x, start_atom.y, start_atom.z
x_step = (end_atom.x - X) / (num_new_atoms + 1.0)
y_step = (end_atom.y - Y) / (num_new_atoms + 1.0)
z_step = (end_atom.z - Z) / (num_new_atoms + 1.0)
D = math.sqrt(x_step * x_step + y_step * y_step + z_step * z_step)
jitter = 0
if jitterbug:
jitter = (((x_step + y_step + z_step) / 3.0) * jitterbug) / D
new_lines = []
next_serial_number = max(sorted(self.atoms.keys())) + 1
round = 0
for new_atom in new_atoms:
X, Y, Z = X + x_step, Y + y_step, Z + z_step
if jitter:
if round % 3 == 0:
X, Y = X + jitter, Y - jitter
elif round % 3 == 1:
Y, Z = Y + jitter, Z - jitter
elif round % 3 == 2:
Z, X = Z + jitter, X - jitter
round += 1
residue_id, residue_type, atom_name = new_atom
assert(len(residue_type) == 3)
assert(len(residue_id) == 6)
new_lines.append('ATOM {0} {1} {2} {3} {4:>8.3f}{5:>8.3f}{6:>8.3f} 1.00 0.00 '.format(str(next_serial_number).rjust(5), atom_name_map[atom_name], residue_type, residue_id, X, Y, Z))
next_serial_number += 1
new_pdb = []
in_start_residue = False
for l in self.indexed_lines:
if l[0] and l[3].serial_number == start_atom.serial_number:
in_start_residue = True
if in_start_residue and l[3].serial_number != start_atom.serial_number:
new_pdb.extend(new_lines)
#colortext.warning('\n'.join(new_lines))
in_start_residue = False
if l[0]:
#print(l[2])
new_pdb.append(l[2])
else:
#print(l[1])
new_pdb.append(l[1])
return '\n'.join(new_pdb)
|
class Spackler(Bonsai):
def __init__(self, pdb_content, buffer = 0.05, bin_size = 5.1, safe_mode = True, FASTA_line_length = 80):
pass
def add_backbone_atoms_linearly_from_loop_filepaths(self, loop_json_filepath, fasta_filepath, residue_ids):
'''A utility wrapper around add_backbone_atoms_linearly. Adds backbone atoms in a straight line from the first to
the last residue of residue_ids.
loop_json_filepath is a path to a JSON file using the JSON format for Rosetta loops files. This file identifies
the insertion points of the sequence.
fasta_filepath is a path to a FASTA file with one sequence. This sequence will be used as the sequence for
the inserted residues (between the start and stop residues defined in loop_json_filepath).
residue_ids is a list of PDB chain residues (columns 22-27 of ATOM lines in the PDB format). It is assumed that
they are sequential although the logic does not depend on that. This list should have the length length as the
sequence identified in the FASTA file.
'''
pass
def add_backbone_atoms_linearly_from_loop_filepaths(self, loop_json_filepath, fasta_filepath, residue_ids):
'''This function returns the PDB content for a structure with the missing backbone atoms - i.e. it adds the
N, Ca, C atoms spaced evenly between the last existing backbone atom of start_residue and the first existing
backbone atom of end_residue. O-atoms are not currently added although we could arbitrarily add them at 90
degrees to the plane: If resiC_x + x = resjC_x and resiC_y + y = resjC_y, i + 1 = j, then the resiO atom could
have coordinates (resiC_x - y, resiC_y + x).
Adds backbone atoms for insertion_residues in a straight line from start_residue to end_residue. This is useful
for some computational methods which do not require the atoms to be in the correct coordinates but expect N, CA, and C backbone atoms
to exist for all residues (O-atoms are currently ignored here).
start_residue and end_residue are Residue objects. insertion_residues is a list of PDB residue IDs (columns 22-27
of ATOM lines in the PDB format). insertion_residue_map is a mapping from PDB residue IDs to 1-letter amino acid
codes. The keys of insertion_residue_map must be insertion_residues.
start_residue and end_residue must exist in insertion_residues and the PDB file. There is no technical requirement for this;
we just do not handle the alternate case yet. residue_ids are presumed to be ordered in sequence (N -> C) order.
Existing N, CA, and C atoms corresponding to these two residues will be retained as long as their atoms which
connect to the side of those residues not identified by residue_ids are present e.g.
- if the CA atom of the first residue is present, it will be kept as long as the N atom is present and regardless of whether the C atom is present
- if the CA atom of the last residue is present, it will be kept as long as the C atom is present and regardless of whether the N atom is present
All O atoms of residues in residue_ids are discarded. ANISOU records corresponding to any removed ATOMS will be removed.
1st 2nd n-1 n
... N-CA-C- N-CA-C- ... N-CA-C- N-CA-C- ..
Note: This function currently only supports canonical amino acids.
'''
pass
def add_atoms_linearly(self, start_atom, end_atom, new_atoms, jitterbug = 0.2):
'''A low-level function which adds new_atoms between start_atom and end_atom. This function does not validate the
input i.e. the calling functions are responsible for ensuring that the insertion makes sense.
Returns the PDB file content with the new atoms added. These atoms are given fresh serial numbers, starting
from the first serial number larger than the current serial numbers i.e. the ATOM serial numbers do not now
necessarily increase in document order.
The jitter adds some X, Y, Z variability to the new atoms. This is important in the Rosetta software suite when
placing backbone atoms as colinearly placed atoms will break the dihedral angle calculations (the dihedral angle
over 4 colinear atoms is undefined).
'''
pass
| 5 | 3 | 61 | 8 | 38 | 16 | 9 | 0.42 | 1 | 8 | 2 | 0 | 4 | 3 | 4 | 28 | 254 | 38 | 154 | 52 | 149 | 64 | 148 | 50 | 143 | 16 | 3 | 3 | 37 |
143,536 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/test.py
|
klab.bio.test.SpecificException
|
class SpecificException(Exception): pass
|
class SpecificException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,537 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/uniprot.py
|
klab.bio.uniprot.EmptyUniProtACXMLException
|
class EmptyUniProtACXMLException(Exception): pass
|
class EmptyUniProtACXMLException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,538 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/uniprot.py
|
klab.bio.uniprot.ProteinSubsection
|
class ProteinSubsection(object):
def __init__(self, att_type, description, begin_position, end_position):
self.att_type = att_type
self.description = description
assert(type(begin_position) == type(1))
assert(type(end_position) == type(1))
self.begin_position = begin_position
self.end_position = end_position
#self.parent = None - Unused at present
def to_db(self):
return {
'StartResidue' : self.begin_position,
'EndResidue' : self.end_position,
'Type' : self.att_type,
'Description' : self.description,
}
def __repr__(self):
s = []
s.append('%s-%s: ' % (str(self.begin_position).rjust(5), str(self.end_position).ljust(5)))
s.append(self.att_type)
if self.description:
s.append(' (%s)' % self.description)
return "".join(s)
def __lt__(self, other):
return self.begin_position < other.begin_position
def __le__(self, other):
return self.begin_position <= other.begin_position
def __eq__(self, other):
return self.att_type == other.att_type and self.description == other.description and self.begin_position == other.begin_position and self.end_position == other.end_position
|
class ProteinSubsection(object):
def __init__(self, att_type, description, begin_position, end_position):
pass
def to_db(self):
pass
def __repr__(self):
pass
def __lt__(self, other):
pass
def __le__(self, other):
pass
def __eq__(self, other):
pass
| 7 | 0 | 5 | 0 | 5 | 0 | 1 | 0.04 | 1 | 2 | 0 | 0 | 6 | 4 | 6 | 6 | 35 | 6 | 28 | 12 | 21 | 1 | 23 | 12 | 16 | 2 | 1 | 1 | 7 |
143,539 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/uniprot.py
|
klab.bio.uniprot.ProteinSubsectionHolder
|
class ProteinSubsectionHolder(object):
def __init__(self, _length):
self.sections = []
self._length = _length
#self.add('SEQUENCE', 'WHOLE SEQUENCE', 1, _length)
#def verify(self):
# pass
def add(self, att_type, description, begin_position, end_position):
# This is fairly brute force. Another implementation is to allow all pairs to be added then have a verify function.
# The verify function would sort sections by (ascending beginning index, descending end index).
# Iterating through the sections (variable s), we would see if a section is contained within a 'parent' (we set the whole sequence to be the root parent).
# if so then we tie a child/parent link and set the current section s to be the new parent
# if there is an overlap, we raise an exception
# if the sections are disparate, we go up the tree to find the first parent which contains s, tie the links, then make s the new parent
# A successful verification should partition the sequence into subsequences inside subsequences.
new_section = ProteinSubsection(att_type, description, begin_position, end_position)
for s in self.sections:
# Sort by beginning index.
# Valid pairs are then: i) 2nd section is disparate; or ii) 2nd section in contained in the first.
# We check for the second case below.
s_pair = sorted([new_section, s], key=lambda x: (x.begin_position, -x.end_position))
overlap = False
assert(s_pair[0].begin_position <= s_pair[1].begin_position)
if (s_pair[0].begin_position <= s_pair[1].begin_position <= s_pair[0].end_position) and (s_pair[1].end_position > s_pair[0].end_position):
overlap = True
elif (s_pair[0].begin_position <= s_pair[1].end_position <= s_pair[0].end_position) and (s_pair[1].begin_position < s_pair[0].begin_position):
overlap = True
if overlap:
#colortext.error("\n1: Overlap in protein sections.\nExisting sections:\n%s\nNew section:\n%s" % (s_pair[0], s_pair[1]))
raise ProteinSubsectionOverlapException("\n1: Overlap in protein sections.\nExisting sections:\n%s\nNew section:\n%s" % (s_pair[0], s_pair[1]))
self.sections.append(new_section)
self.sections = sorted(self.sections, key=lambda x:(x.begin_position, -x.end_position))
def __add__(self, other):
assert(self._length == other._length)
holder = ProteinSubsectionHolder(self._length)
for s in self.sections:
holder.add(s.att_type, s.description, s.begin_position, s.end_position)
for o in other.sections:
already_exists = False
for s in self.sections:
if o.begin_position == s.begin_position and o.end_position == s.end_position:
assert(o.att_type == s.att_type)
if o.description and s.description:
if o.description != s.description:
# Ignore case differences for equality but favor the case where the first letter is capitalized
if o.description.upper() != s.description.upper():
#colortext.error("\nSubsection descriptions do not match for '%s', range %d-%d.\nFirst description: '%s'\nSecond description: '%s'\n" % (s.att_type, s.begin_position, s.end_position, o.description, s.description))
raise ProteinSubsectionOverlapException("\nSubsection descriptions do not match for '%s', range %d-%d.\nFirst description: '%s'\nSecond description: '%s'\n" % (s.att_type, s.begin_position, s.end_position, o.description, s.description))
elif o.description[0].upper() == o.description[0]:
s.description = o.description
else:
o.description = s.description
else:
o.description = o.description or s.description
s.description = o.description or s.description
already_exists = True
#elif o.begin_position <= s.begin_position <= o.end_position:
# raise ProteinSubsectionOverlapException("\n2: Overlap in protein sections.\nFirst set of sections:\n%s\nSecond set of sections:\n%s" % (s, o))
#elif o.end_position <= s.end_position <= o.end_position:
# raise ProteinSubsectionOverlapException("\n3: Overlap in protein sections.\nFirst set of sections:\n%s\nSecond set of sections:\n%s" % (s, o))
if not(already_exists):
holder.add(o.att_type, o.description, o.begin_position, o.end_position)
return holder
def __len__(self):
return len(self.sections)
def __repr__(self):
return "\n".join([str(s) for s in self.sections])
|
class ProteinSubsectionHolder(object):
def __init__(self, _length):
pass
def add(self, att_type, description, begin_position, end_position):
pass
def __add__(self, other):
pass
def __len__(self):
pass
def __repr__(self):
pass
| 6 | 0 | 13 | 0 | 9 | 3 | 4 | 0.43 | 1 | 3 | 2 | 0 | 5 | 2 | 5 | 5 | 72 | 5 | 47 | 16 | 41 | 20 | 43 | 16 | 37 | 10 | 1 | 6 | 18 |
143,540 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/uniprot.py
|
klab.bio.uniprot.ProteinSubsectionOverlapException
|
class ProteinSubsectionOverlapException(colortext.Exception): pass
|
class ProteinSubsectionOverlapException(colortext.Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 1 | 0 | 0 |
143,541 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/uniprot.py
|
klab.bio.uniprot.UniParcEntry
|
class UniParcEntry(object):
def __init__(self, UniParcID, UniProtACs = None, UniProtIDs = None, cache_dir = None, silent = False):
if cache_dir and not(os.path.exists(os.path.abspath(cache_dir))):
raise Exception("The cache directory %s does not exist." % os.path.abspath(cache_dir))
self.UniParcID = UniParcID
self.cache_dir = cache_dir
self.recommended_name = None
self.silent = silent
# Get AC mapping
if not UniProtACs or UniParcID=='UPI0000047CA3': # todo: is this UPI0000047CA3 special handling necessary?
mapping = uniprot_map('UPARC', 'ACC', [UniParcID], cache_dir = cache_dir, silent = silent)[UniParcID]
self.UniProtACs = mapping
else:
self.UniProtACs = UniProtACs
# Get ID mapping
if not UniProtIDs:
mapping = uniprot_map('UPARC', 'ID', [UniParcID], cache_dir = cache_dir, silent = silent)[UniParcID]
self.UniProtIDs = mapping
else:
self.UniProtIDs = UniProtIDs
# Get FASTA
cached_filepath = None
if cache_dir:
cached_filepath = os.path.join(cache_dir, '%s.fasta' % UniParcID)
if cached_filepath and os.path.exists(cached_filepath):
fasta = read_file(cached_filepath)
else:
if not silent:
print("Getting FASTA file")
url = 'http://www.uniprot.org/uniparc/%s.fasta' % UniParcID
fasta = http_get(url)
if cached_filepath:
write_file(cached_filepath, fasta)
# Get sequence
header = fasta.split("\n")[0].split()
assert(len(header) == 2)
assert(header[0] == ">%s" % UniParcID)
assert(header[1].startswith("status="))
sequence = "".join(map(string.strip, fasta.split("\n")[1:]))
self.sequence = sequence
# Get atomic mass (and sequence again)
self.atomic_mass = None
self.CRC64Digest = None
recommended_names = []
alternative_names = []
submitted_names = []
self.AC_entries = {}
subsections = ProteinSubsectionHolder(len(sequence))
for UniProtAC in self.UniProtACs:
#colortext.write("%s\n" % UniProtAC, 'cyan')
try:
AC_entry = UniProtACEntry(UniProtAC, cache_dir = self.cache_dir, silent = silent)
except EmptyUniProtACXMLException:
continue
self.AC_entries[UniProtAC] = AC_entry
# Mass sanity check
if self.atomic_mass != None:
assert(self.atomic_mass == AC_entry.atomic_mass)
self.atomic_mass = AC_entry.atomic_mass
# Sequence sanity check
assert(self.sequence == AC_entry.sequence)
# CRC 64 sanity check
if self.CRC64Digest != None:
assert(self.CRC64Digest == AC_entry.CRC64Digest)
self.CRC64Digest = AC_entry.CRC64Digest
assert(CRC64.CRC64digest(self.sequence) == self.CRC64Digest)
if AC_entry.recommended_name:
found = False
for n in recommended_names:
if n[0] == AC_entry.recommended_name:
n[1] += 1
found = True
break
if not found:
recommended_names.append([AC_entry.recommended_name, 1])
for alternative_name in AC_entry.alternative_names:
found = False
for n in alternative_names:
if n[0] == alternative_name:
n[1] += 1
found = True
break
if not found:
alternative_names.append([alternative_name, 1])
for submitted_name in AC_entry.submitted_names:
found = False
for n in submitted_names:
if n[0] == submitted_name:
n[1] += 1
found = True
break
if not found:
submitted_names.append([submitted_name, 1])
subsections += AC_entry.subsections
self.subsections = subsections
assert(len(set(UniParcMergedRecommendedNamesRemap.keys()).intersection(set(UniParcMergedSubmittedNamesRemap.keys()))) == 0)
if UniParcID in UniParcMergedRecommendedNamesRemap:
recommended_names = [[UniParcMergedRecommendedNamesRemap[UniParcID], 1]]
elif UniParcID in UniParcMergedSubmittedNamesRemap:
recommended_names = [[UniParcMergedSubmittedNamesRemap[UniParcID], 1]]
if not silent:
colortext.write('Subsections\n', 'orange')
#print(subsections)
if len(recommended_names) == 0 and len(alternative_names) == 0 and len(submitted_names) == 0:
raise UniParcEntryStandardizationException("UniParcID %s has no recommended names." % UniParcID)
elif len(recommended_names) == 0:
s = ["UniParcID %s has no recommended names.\n" % UniParcID]
if alternative_names:
s.append("It has the following alternative names:")
for tpl in sorted(alternative_names, key=lambda x:-x[1]):
s.append("\n count=%s: %s" % (str(tpl[1]).ljust(5), tpl[0]['Name']))
if tpl[0]['Short names']:
s.append(" (short names: %s)" % ",".join(tpl[0]['Short names']))
if tpl[0]['EC numbers']:
s.append(" (EC numbers: %s)" % ",".join(tpl[0]['EC numbers']))
if submitted_names:
s.append("It has the following submitted names:")
for tpl in sorted(submitted_names, key=lambda x:-x[1]):
s.append("\n count=%s: %s" % (str(tpl[1]).ljust(5), tpl[0]['Name']))
if tpl[0]['Short names']:
s.append(" (short names: %s)" % ",".join(tpl[0]['Short names']))
if tpl[0]['EC numbers']:
s.append(" (EC numbers: %s)" % ",".join(tpl[0]['EC numbers']))
#raise UniParcEntryStandardizationException("".join(s))
elif len(recommended_names) > 1:
s = ["UniParcID %s has multiple recommended names: " % UniParcID]
for tpl in sorted(recommended_names, key=lambda x:-x[1]):
s.append("\n count=%s: %s" % (str(tpl[1]).ljust(5), tpl[0]['Name']))
if tpl[0]['Short names']:
s.append(" (short names: %s)" % ",".join(tpl[0]['Short names']))
if tpl[0]['EC numbers']:
s.append(" (EC numbers: %s)" % ",".join(tpl[0]['EC numbers']))
raise UniParcEntryStandardizationException("".join(s))
#assert(len(recommended_names) == 1) # todo: this is not always available
#print(recommended_names)
self.recommended_name = None
if len(recommended_names) == 1:
self.recommended_name = recommended_names[0][0]
self.get_organisms()
def _get_XML(self):
uparc_xml = None
cached_filepath = None
if self.cache_dir:
cached_filepath = os.path.join(self.cache_dir, '%s.xml' % self.UniParcID)
if cached_filepath and os.path.exists(cached_filepath):
uparc_xml = read_file(cached_filepath)
else:
if not self.silent:
colortext.write("Retrieving %s\n" % self.UniParcID, "cyan")
url = 'http://www.uniprot.org/uniparc/%s.xml' % self.UniParcID
uparc_xml = http_get(url)
if cached_filepath:
write_file(cached_filepath, uparc_xml)
self.XML = uparc_xml
# Get DOM
self._dom = parseString(uparc_xml)
main_tags = self._dom.getElementsByTagName("uniparc")
assert(len(main_tags) == 1)
entry_tags = main_tags[0].getElementsByTagName("entry")
assert(len(entry_tags) == 1)
self.entry_tag = entry_tags[0]
def _get_active_ACCs(self):
entry_tag = self.entry_tag
db_reference_tags = [child for child in entry_tag.childNodes if child.nodeType == child.ELEMENT_NODE and child.tagName == 'dbReference']
ACCs = []
for db_reference_tag in db_reference_tags:
assert(db_reference_tag.hasAttribute('type') and db_reference_tag.hasAttribute('active') and db_reference_tag.hasAttribute('id'))
att_type = db_reference_tag.getAttribute('type')
is_active = db_reference_tag.getAttribute('active')
dbref_id = db_reference_tag.getAttribute('id')
assert(is_active == 'Y' or is_active == 'N')
is_active = (is_active == 'Y')
if att_type == 'UniProtKB/Swiss-Prot' or att_type == 'UniProtKB/TrEMBL':
if is_active:
#colortext.message(att_type + dbref_id)
ACCs.append(dbref_id)
else:
pass#colortext.warning(att_type + dbref_id)
return ACCs
def get_organisms(self):
self.organisms = {}
self._get_XML()
ACCs = self._get_active_ACCs()
#print(ACCs)
name_count = {}
for UniProtAC in ACCs:
#print(UniProtAC)
if UniProtAC in self.AC_entries:
AC_entry = self.AC_entries[UniProtAC]
else:
if UniProtAC in ['N2XE95', 'N1E9H6', 'N2JUB3', 'N2Z3Z2']: # hack for bad XML documents at time of writing
continue
if not self.silent:
colortext.warning("Retrieving %s" % UniProtAC)
try:
AC_entry = UniProtACEntry(UniProtAC, cache_dir = self.cache_dir, silent = self.silent)
except EmptyUniProtACXMLException:
continue
for o in AC_entry.organisms:
name_count[o['scientific']] = name_count.get(o['scientific'], 0)
name_count[o['scientific']] += 1
assert(len(AC_entry.organisms) == 1)
self.organisms[UniProtAC] = AC_entry.organisms[0]
def to_dict(self):
return {
'UniParcID' : self.UniParcID,
'UniProtAC' : self.UniProtACs,
'UniProtKB' : self.UniProtIDs,
'sequence' : self.sequence,
'atomic_mass' : self.atomic_mass,
'CRC64Digest' : self.CRC64Digest,
'Subsections' : self.subsections,
'Recommended Name' : self.recommended_name,
}
def __repr__(self):
a = []
for k, v in sorted(self.to_dict().items()):
if k == 'Subsections':
a.append(colortext.make("Subsections\n", 'green'))
a.append("%s\n" % str(v))
else:
a.append(colortext.make(k.ljust(20), 'green'))
a.append(colortext.make("%s\n" % str(v), 'silver'))
return "".join(a)
|
class UniParcEntry(object):
def __init__(self, UniParcID, UniProtACs = None, UniProtIDs = None, cache_dir = None, silent = False):
pass
def _get_XML(self):
pass
def _get_active_ACCs(self):
pass
def get_organisms(self):
pass
def to_dict(self):
pass
def __repr__(self):
pass
| 7 | 0 | 40 | 3 | 34 | 3 | 10 | 0.1 | 1 | 8 | 4 | 0 | 6 | 15 | 6 | 6 | 249 | 25 | 207 | 59 | 200 | 20 | 188 | 59 | 181 | 42 | 1 | 4 | 62 |
143,542 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/uniprot.py
|
klab.bio.uniprot.UniParcEntryStandardizationException
|
class UniParcEntryStandardizationException(colortext.Exception): pass
|
class UniParcEntryStandardizationException(colortext.Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 1 | 0 | 0 |
143,543 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/uniprot.py
|
klab.bio.uniprot.UniProtACEntry
|
class UniProtACEntry(object):
organism_name_types = set(['common', 'full', 'scientific', 'synonym', 'abbreviation'])
molecule_processing_subsections = set([
"signal peptide",
"chain",
"transit peptide",
"propeptide",
"peptide",
"initiator methionine",
])
sampling_methods = {
'EM' : 'electron microscopy',
'Fiber' : 'fiber diffraction',
'Model' : 'model',
'NMR' : 'nuclear magnetic resonance',
'X-ray' : 'X-ray crystallography',
'Neutron' : 'neutron diffraction',
'Other' : 'other'
}
def __init__(self, UniProtAC, XML = None, cache_dir = None, silent = True):
if cache_dir and not(os.path.exists(cache_dir)):
raise Exception("The cache directory %s does not exist." % cache_dir)
self.UniProtAC = UniProtAC
self.silent = silent
# Get XML
if XML == None:
protein_xml = None
cached_filepath = None
if cache_dir:
cached_filepath = os.path.join(cache_dir, '%s.xml' % UniProtAC)
if cached_filepath and os.path.exists(cached_filepath):
protein_xml = read_file(cached_filepath)
else:
if not silent:
colortext.write("Retrieving %s\n" % UniProtAC, "cyan")
url = 'http://www.uniprot.org/uniprot/%s.xml' % UniProtAC
protein_xml = http_get(url)
if not(protein_xml.strip()):
raise EmptyUniProtACXMLException('The file %s is empty.' % UniProtAC)
if cached_filepath:
write_file(cached_filepath, protein_xml)
self.XML = protein_xml
else:
self.XML = XML
self.recommended_name = None
self.submitted_names = []
self.alternative_names = []
# Get DOM
try:
self._dom = parseString(protein_xml)
except:
if cached_filepath:
raise Exception("The UniProtAC XML for '%s' was invalid. The cached file is located at %s. Check this file - if it is not valid XML then delete the file and rerun the script." % (UniProtAC, cached_filepath))
else:
raise Exception("The UniProtAC XML for '%s' was invalid." % UniProtAC)
main_tags = self._dom.getElementsByTagName("uniprot")
assert(len(main_tags) == 1)
entry_tags = main_tags[0].getElementsByTagName("entry")
assert(len(entry_tags) == 1)
self.entry_tag = entry_tags[0]
self._parse_evidence_tag()
self._parse_sequence_tag()
self._parse_protein_tag()
self._parse_organism_tag()
self._parse_subsections()
self._parse_PDB_mapping()
def _parse_PDB_mapping(self):
entry_tag = self.entry_tag
mapping = {}
dbReference_tags = [child for child in entry_tag.childNodes if child.nodeType == child.ELEMENT_NODE and child.tagName == 'dbReference']
for t in dbReference_tags:
db_type = t.getAttribute('type')
assert(db_type)
if db_type == 'PDB':
pdb_id = t.getAttribute('id')
assert(len(pdb_id) == 4)
#print(pdb_id)
method = None
resolution = None
chains = []
for p in t.getElementsByTagName('property'):
if p.getAttribute('type') == 'method':
method = p.getAttribute('value')
elif p.getAttribute('type') == 'resolution':
resolution = float(p.getAttribute('value'))
elif p.getAttribute('type') == 'chains':
chains_groups = [x.strip() for x in p.getAttribute('value').split(",") if x.strip()]
for cg in chains_groups:
cg_tokens = cg.split("=")
assert(len(cg_tokens) == 2)
chain_ids = cg_tokens[0].strip().split("/")
for chain_id in chain_ids:
assert(len(chain_id) == 1)
#print(chain_id)
range = cg_tokens[1].strip().split("-")
assert(len(range) == 2)
starting_index = None
ending_index = None
try:
starting_index = int(range[0])
ending_index = int(range[1])
except:
mmkey = "/".join(sorted(chain_ids))
if missing_mapping_for_AC_PDB_chains.get(self.UniProtAC, {}).get(pdb_id, {}).get(mmkey):
starting_index, ending_index = missing_mapping_for_AC_PDB_chains.get(self.UniProtAC, {}).get(pdb_id, {}).get(mmkey)
if not self.silent:
colortext.error("Fixing starting_index, ending_index to %d, %d for PDB chains %s." % (starting_index, ending_index, str(chain_ids)))
else:
if not set(chain_ids) in broken_mapping_for_AC_PDB_chains.get(self.UniProtAC, {}).get(pdb_id, []):
raise colortext.Exception("The starting index and ending index for %s, chains %s in UniProtKB AC entry %s is broken or missing. Fix the mapping or mark it as missing in uniprot_patches.py" % (pdb_id, ",".join(chain_ids), self.UniProtAC))
continue
for chain_id in chain_ids:
assert(len(chain_id) == 1)
if fixed_mapping_for_AC_PDB_chains.get(self.UniProtAC, {}).get(pdb_id, {}).get(chain_id):
fixed_chain_id = fixed_mapping_for_AC_PDB_chains.get(self.UniProtAC, {}).get(pdb_id, {}).get(chain_id)
if not self.silent:
colortext.error("Fixing PDB chain from %s to %s." % (chain_id, fixed_chain_id))
chain_id = fixed_chain_id
chains.append((chain_id, starting_index, ending_index))
else:
raise Exception("Unhandled dbReference property tag type.")
if not method:
temp_method = missing_AC_PDB_methods.get(self.UniProtAC, {}).get(pdb_id, [])
if temp_method:
method = temp_method[0]
if not self.silent:
colortext.error("Fixing method to %s for PDB %s." % (method, pdb_id))
if not chains:
assert(pdb_id in broken_mapping_for_AC_PDB_chains.get(self.UniProtAC, {}))
continue
if not method and chains:
raise colortext.Exception("Missing method and chains for %s in UniProtKB AC entry %s. Fix the mapping or mark it as missing in uniprot_patches.py" % (pdb_id, self.UniProtAC))
if not method in list(UniProtACEntry.sampling_methods.keys()):
raise colortext.Exception("Unknown method '%s' found in UniProtKB AC entry %s." % (method, self.UniProtAC))
if method in ['X-ray'] and resolution: # resolution can be null e.g. in P00698 with 2A6U (POWDER DIFFRACTION)
assert(pdb_id not in mapping)
if pdb_id not in PDBs_marked_as_XRay_with_no_resolution:
assert(resolution)
mapping[pdb_id] = {'method' : method, 'resolution' : resolution, 'chains' : {}}
import pprint
for chain in chains:
#assert(chain[0] not in mapping[pdb_id]['chains']) # todo: I disabled this when calling get_common_PDB_IDs as it hit the assertion while looking up 1REW with 4N1D. Is this assertion necessary?
mapping[pdb_id]['chains'][chain[0]] = (chain[1], chain[2])
if False:
for pdb_id, details in sorted(mapping.items()):
if not self.silent:
colortext.message("%s, %s, %sA" % (str(pdb_id), str(details['method']), str(details['resolution'])))
for chain, indices in sorted(details['chains'].items()):
if not self.silent:
colortext.warning(" Chain %s: %s-%s" % (chain, str(indices[0]).rjust(5), str(indices[1]).ljust(5)))
def _parse_evidence_tag(self):
entry_tag = self.entry_tag
protein_Existence_tags = entry_tag.getElementsByTagName("proteinExistence")
assert(len(protein_Existence_tags) == 1)
self.existence_type = protein_Existence_tags[0].getAttribute('type')
#print(self.existence_type)
def _parse_subsections(self):
molecule_processing_subsections = UniProtACEntry.molecule_processing_subsections
assert(self.sequence_length)
subsections = ProteinSubsectionHolder(self.sequence_length)
entry_tag = self.entry_tag
feature_tags = [child for child in entry_tag.childNodes if child.nodeType == child.ELEMENT_NODE and child.tagName == 'feature']
for additional_subsection in subsections_for_addition.get(self.UniProtAC, []):
if not self.silent:
colortext.warning("Adding additional subsection %s." % str(additional_subsection))
subsections.add(additional_subsection[0], additional_subsection[1], additional_subsection[2], additional_subsection[3])
if self.UniProtAC not in AC_entries_where_we_ignore_the_subsections:
for feature_tag in feature_tags:
if feature_tag.hasAttribute('type'):
att_type = feature_tag.getAttribute('type')
if att_type in molecule_processing_subsections:
description = feature_tag.getAttribute('description')
locations = feature_tag.getElementsByTagName("location")
assert(len(locations) == 1)
subsection_for_addition = None
begin_position = None
end_position = None
position_tag = locations[0].getElementsByTagName("position")
if position_tag:
assert(len(position_tag) == 1)
position_tag = locations[0].getElementsByTagName("position")
if position_tag[0].hasAttribute('position'):
begin_position = int(position_tag[0].getAttribute('position'))
end_position = begin_position
else:
begin_pos = locations[0].getElementsByTagName("begin")
end_pos = locations[0].getElementsByTagName("end")
assert(len(begin_pos) == 1 and len(end_pos) == 1)
if begin_pos[0].hasAttribute('position'):
begin_position = int(begin_pos[0].getAttribute('position'))
if end_pos[0].hasAttribute('position'):
end_position = int(end_pos[0].getAttribute('position'))
if (begin_position, end_position) in differing_subsection_name_patch.get(self.UniProtAC, {}):
description_pair = differing_subsection_name_patch[self.UniProtAC][(begin_position, end_position)]
if description_pair[0] == description:
if not self.silent:
colortext.warning("Changing subsection name from '%s' to '%s'." % description_pair)
description = description_pair[1]
if begin_position and end_position:
subsection_for_addition = (att_type, description, begin_position, end_position)
if subsection_for_addition not in clashing_subsections_for_removal.get(self.UniProtAC, []):
if subsection_for_addition not in overlapping_subsections_for_removal.get(self.UniProtAC, []): # This may be overkill
#colortext.message("Adding subsection %s." % str(subsection_for_addition))
subsections.add(subsection_for_addition[0], subsection_for_addition[1], subsection_for_addition[2], subsection_for_addition[3])
else:
if not self.silent:
colortext.warning("Skipping overlapping subsection %s." % str(subsection_for_addition))
else:
if not self.silent:
colortext.warning("Skipping clashing subsection %s." % str(subsection_for_addition))
self.subsections = subsections
def _parse_sequence_tag(self):
'''Parses the sequence and atomic mass.'''
#main_tags = self._dom.getElementsByTagName("uniprot")
#assert(len(main_tags) == 1)
#entry_tags = main_tags[0].getElementsByTagName("entry")
#assert(len(entry_tags) == 1)
#entry_tags[0]
entry_tag = self.entry_tag
# only get sequence tags that are direct children of the entry tag (sequence tags can also be children of entry.comment.conflict)
sequence_tags = [child for child in entry_tag.childNodes if child.nodeType == child.ELEMENT_NODE and child.tagName == 'sequence']
assert(len(sequence_tags) == 1)
sequence_tag = sequence_tags[0]
# atomic mass, sequence, CRC64 digest
self.atomic_mass = float(sequence_tag.getAttribute("mass"))
self.sequence = "".join(sequence_tag.firstChild.nodeValue.strip().split("\n"))
self.sequence_length = int(sequence_tag.getAttribute("length"))
self.CRC64Digest = sequence_tag.getAttribute("checksum")
def _parse_organism_tag(self):
'''Parses the protein tag to get the names and EC numbers.'''
organism_name_types = UniProtACEntry.organism_name_types
self.organisms = []
organism_tags = [child for child in self.entry_tag.childNodes if child.nodeType == child.ELEMENT_NODE and child.tagName == 'organism']
assert(len(organism_tags) == 1)
for organism_tag in organism_tags:
names = dict.fromkeys(organism_name_types, None)
for name_tag in [child for child in organism_tag.childNodes if child.nodeType == child.ELEMENT_NODE and child.tagName == 'name']:
name_type = name_tag.getAttribute("type")
assert(name_type in organism_name_types)
names[name_type] = name_tag.firstChild.nodeValue.strip()
assert(names.get('scientific'))
self.organisms.append(names)
def _parse_protein_tag(self):
'''Parses the protein tag to get the names and EC numbers.'''
protein_nodes = self._dom.getElementsByTagName('protein')
assert(len(protein_nodes) == 1)
self.protein_node = protein_nodes[0]
self._get_recommended_name()
self._get_submitted_names()
self._get_alternative_names()
def get_names(self):
if self.recommended_name:
return [self.recommended_name] + self.alternative_names + self.submitted_names
else:
return self.alternative_names + self.submitted_names
@staticmethod
def parse_names(tags):
names = []
for tag in tags:
fullNames = tag.getElementsByTagName('fullName')
assert(len(fullNames) == 1)
fullName = fullNames[0].firstChild.nodeValue
EC_numbers = []
EC_number_tags = tag.getElementsByTagName('ecNumber')
for EC_number_tag in EC_number_tags:
EC_numbers.append(EC_number_tag.firstChild.nodeValue)
short_names = []
short_name_tags = tag.getElementsByTagName('shortName')
for short_name_tag in short_name_tags:
short_names.append(short_name_tag.firstChild.nodeValue)
for n in names:
assert(n['Name'] != fullName)
names.append({'Name' : fullName, 'EC numbers' : EC_numbers, 'Short names' : short_names})
return names
def _get_recommended_name(self):
# only get recommendedName tags that are direct children of the protein tag (recommendedName tags can also be children of protein.component tags)
recommended_names = [child for child in self.protein_node.childNodes if child.nodeType == child.ELEMENT_NODE and child.tagName == 'recommendedName']
if recommended_names:
assert(len(recommended_names) == 1)
recommended_names = UniProtACEntry.parse_names(recommended_names)
assert(len(recommended_names) == 1)
self.recommended_name = recommended_names[0]
def _get_submitted_names(self):
submitted_names = self.protein_node.getElementsByTagName('submittedName')
if submitted_names:
for submitted_name in submitted_names:
# According to the schema (http://www.uniprot.org/docs/uniprot.xsd), submitted names have no short names
assert(len(submitted_name.getElementsByTagName('shortName')) == 0)
self.submitted_names = UniProtACEntry.parse_names(submitted_names)
def _get_alternative_names(self):
alternative_names = self.protein_node.getElementsByTagName('alternativeName')
if alternative_names:
self.alternative_names = UniProtACEntry.parse_names(alternative_names)
|
class UniProtACEntry(object):
def __init__(self, UniProtAC, XML = None, cache_dir = None, silent = True):
pass
def _parse_PDB_mapping(self):
pass
def _parse_evidence_tag(self):
pass
def _parse_subsections(self):
pass
def _parse_sequence_tag(self):
'''Parses the sequence and atomic mass.'''
pass
def _parse_organism_tag(self):
'''Parses the protein tag to get the names and EC numbers.'''
pass
def _parse_protein_tag(self):
'''Parses the protein tag to get the names and EC numbers.'''
pass
def get_names(self):
pass
@staticmethod
def parse_names(tags):
pass
def _get_recommended_name(self):
pass
def _get_submitted_names(self):
pass
def _get_alternative_names(self):
pass
| 14 | 3 | 25 | 2 | 21 | 2 | 7 | 0.08 | 1 | 10 | 3 | 0 | 11 | 16 | 12 | 12 | 337 | 44 | 274 | 106 | 259 | 21 | 247 | 105 | 233 | 30 | 1 | 8 | 79 |
143,544 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/xml_pdb_util.py
|
klab.bio.xml_pdb_util.Residue
|
class Residue:
def __init__ (self, atom_data):
self.atoms = {}
atom_num, entity_id, chain, resn, resi, x, y, z = atom_data
self.entity_id = entity_id
self.chain = chain
self.resn = resn
self.resi = resi
self.add_atom(atom_data)
def add_atom(self, atom_data):
atom_num, entity_id, chain, resn, resi, x, y, z = atom_data
# print atom_data
# print self.entity_id, self.selection_tup
assert( self.entity_id == entity_id )
assert( self.chain == chain )
assert( self.resn == resn )
assert( self.resi == resi )
assert( atom_num not in self.atoms )
self.atoms[atom_num] = np.array( (x, y, z) )
def within_dist(self, other, dist_cutoff):
for self_atom in list(self.atoms.values()):
for other_atom in list(other.atoms.values()):
if np.linalg.norm( self_atom - other_atom ) <= dist_cutoff:
return True
return False
@property
def selection_tup(self):
return (self.resi, self.resn, self.chain)
|
class Residue:
def __init__ (self, atom_data):
pass
def add_atom(self, atom_data):
pass
def within_dist(self, other, dist_cutoff):
pass
@property
def selection_tup(self):
pass
| 6 | 0 | 7 | 0 | 6 | 1 | 2 | 0.08 | 0 | 1 | 0 | 0 | 4 | 5 | 4 | 4 | 31 | 3 | 26 | 15 | 20 | 2 | 25 | 14 | 20 | 4 | 0 | 3 | 7 |
143,545 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/bio/xml_pdb_util.py
|
klab.bio.xml_pdb_util.xmlpdb
|
class xmlpdb:
def __init__(self, xml_pdb_path, remove_dup_alt_ids = False):
assert( os.path.isfile(xml_pdb_path) )
if xml_pdb_path.endswith('.gz'):
with gzip.open(xml_pdb_path, 'rb') as f:
et = etree.parse(f)
else:
with open(xml_pdb_path, 'r') as f:
et = etree.parse(f)
root = et.getroot()
ns = root.nsmap
# Get entity to chain mapping
# self.entity_to_chain_mapping = {}
# struct_asymCategory_tag = etree.QName(ns['PDBx'], 'struct_asymCategory').text
# struct_asym_tag = etree.QName(ns['PDBx'], 'struct_asym').text
# entity_id_tag = etree.QName(ns['PDBx'], 'entity_id').text
# struct_asymCategory = root.find(struct_asymCategory_tag)
# for struct_asym in struct_asymCategory.iter(struct_asym_tag):
# chain = struct_asym.attrib['id']
# entity_id = long( struct_asym.findtext(entity_id_tag) )
# print entity_id, chain
# sys.exit(0)
# Tags for later searching
atom_tag = etree.QName(ns['PDBx'], 'atom_site').text
atom_name_tag = etree.QName(ns['PDBx'], 'label_atom_id').text
entity_tag = etree.QName(ns['PDBx'], 'label_entity_id').text
resn_tag = etree.QName(ns['PDBx'], 'label_comp_id').text
resi_tag = etree.QName(ns['PDBx'], 'auth_seq_id').text
chain_tag = etree.QName(ns['PDBx'], 'label_asym_id').text
x_tag = etree.QName(ns['PDBx'], 'Cartn_x').text
y_tag = etree.QName(ns['PDBx'], 'Cartn_y').text
z_tag = etree.QName(ns['PDBx'], 'Cartn_z').text
# for child in root:
# print child.tag, child.attrib
self.residues = {}
if remove_dup_alt_ids:
previously_seen_atoms = set()
for atom_site in root.iter(atom_tag):
atom_name = atom_site.findtext(atom_name_tag).strip()
atom_num = int( atom_site.attrib['id'] )
entity_id = int( atom_site.findtext(entity_tag) )
chain = atom_site.findtext(chain_tag)
resn = atom_site.findtext(resn_tag)
resi = int( atom_site.findtext(resi_tag) )
x = float( atom_site.findtext(x_tag) )
y = float( atom_site.findtext(y_tag) )
z = float( atom_site.findtext(z_tag) )
if remove_dup_alt_ids:
previously_seen_atom_tup = (atom_name, resn, chain, resi)
if previously_seen_atom_tup in previously_seen_atoms:
continue
else:
previously_seen_atoms.add( previously_seen_atom_tup )
atom_data = (atom_num, entity_id, chain, resn, resi, x, y, z)
if chain not in self.residues:
self.residues[chain] = {}
if resi in self.residues[chain]:
# print
# print chain, resi
self.residues[chain][resi].add_atom( atom_data )
else:
self.residues[chain][resi] = Residue( atom_data )
def get_neighbors_by_chain(self, neighbor_chains, dist_cutoff, protein_only = True):
# Returns a selection of any residues within dist_cutoff (angstroms) of given neighbor chains
# Return selection format: (residue number, three letter wildtype residue type, chain letter)
selection = set()
all_chains = sorted( self.residues.keys() )
for search_chain in neighbor_chains:
for search_resi, search_residue in self.residues[search_chain].items():
if not protein_only or search_residue.resn in one_letter:
for chain in all_chains:
if chain not in neighbor_chains:
for resi, residue in self.residues[chain].items():
if not protein_only or residue.resn in one_letter:
selection_tup = residue.selection_tup
if selection_tup not in selection:
if search_residue.within_dist(residue, dist_cutoff):
selection.add( selection_tup )
return sorted( selection )
def get_neighbors_at_dimer_interface(self, interface_chains, dist_cutoff, protein_only = True, filter_only_chains = []):
# Returns a selection of any residues within dist_cutoff (angstroms) of given neighbor chains
# Return selection format: (residue number, three letter wildtype residue type, chain letter)
selection = set()
all_chains = sorted( self.residues.keys() )
for search_chain in interface_chains:
for search_resi, search_residue in self.residues[search_chain].items():
if not protein_only or search_residue.resn in one_letter:
for chain in interface_chains:
if len( filter_only_chains ) > 0 and chain not in filter_only_chains:
continue
if chain != search_chain:
for resi, residue in self.residues[chain].items():
if not protein_only or residue.resn in one_letter:
selection_tup = residue.selection_tup
if selection_tup not in selection:
if search_residue.within_dist(residue, dist_cutoff):
selection.add( selection_tup )
return sorted( selection )
|
class xmlpdb:
def __init__(self, xml_pdb_path, remove_dup_alt_ids = False):
pass
def get_neighbors_by_chain(self, neighbor_chains, dist_cutoff, protein_only = True):
pass
def get_neighbors_at_dimer_interface(self, interface_chains, dist_cutoff, protein_only = True, filter_only_chains = []):
pass
| 4 | 0 | 35 | 2 | 26 | 7 | 10 | 0.26 | 0 | 4 | 1 | 0 | 3 | 1 | 3 | 3 | 108 | 10 | 78 | 45 | 74 | 20 | 75 | 44 | 71 | 11 | 0 | 9 | 29 |
143,546 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/box_backup.py
|
klab.box_backup.BoxAPI
|
class BoxAPI:
def __init__(self):
storage = Storage('klab_box_sync', getpass.getuser())
self.credentials = storage.get()
if self.credentials == None:
parser = argparse.ArgumentParser(parents=[tools.argparser])
flags = parser.parse_args()
flow = oauth2client.client.flow_from_clientsecrets(CLIENT_SECRETS_PATH, scope='', redirect_uri = 'http://localhost:8080')
self.credentials = tools.run_flow(flow, storage, flags)
self.oauth_connector = OAuthConnector(self.credentials)
self.client = Client( self.oauth_connector ) # Replace this with LoggingClient for debugging
self.root_folder = self.client.folder( folder_id = '0' )
self._upload_test_only = False # Don't perform actual uploads if True. Was used to debug memory leaks.
def _find_folder_by_name_inner( self, folder_id, name, limit = 500 ):
search_folder = self.client.folder( folder_id = folder_id )
offset = 0
search_folders = search_folder.get_items( limit = limit, offset = offset )
while len(search_folders) > 0:
folders = [ f for f in search_folders if f['name'] == name and f['type'] == 'folder' ]
if len( folders ) == 1:
return folders[0]['id']
offset += limit
search_folders = search_folder.get_items( limit = limit, offset = offset )
return None
def create_folder( self, root_folder_id, folder_name ):
# Creates a folder in Box folder folder_id if it doesn't exist already
folder_id = self._find_folder_by_name_inner( root_folder_id, folder_name )
if folder_id == None:
return self.client.folder( folder_id = root_folder_id ).create_subfolder(folder_name ).id
else:
return folder_id
def find_file( self, folder_id, basename, limit = 500 ):
'''
Finds a file based on a box path
Returns a list of file IDs
Returns multiple file IDs if the file was split into parts with the extension '.partN' (where N is an integer)
'''
search_folder = self.client.folder( folder_id = folder_id )
offset = 0
search_items = search_folder.get_items( limit = limit, offset = offset )
found_files = []
while len(search_items) > 0:
files = [ (f['id'], f['name']) for f in search_items if f['name'].startswith( basename ) and f['type'] == 'file' ]
files.sort()
for f_id, f_name in files:
assert(
f_name == basename
or
( f_name.startswith( basename ) and f_name[len(basename):len(basename)+5] == '.part' )
)
found_files.extend( files )
offset += limit
search_items = search_folder.get_items( limit = limit, offset = offset )
return [f[0] for f in found_files]
def find_folder_path( self, folder_path ):
current_folder_id = '0'
for folder_name in os.path.normpath(folder_path).split(os.path.sep):
if len(folder_name) > 0:
current_folder_id = self._find_folder_by_name_inner( current_folder_id, folder_name )
return current_folder_id
def upload( self,
destination_folder_id,
source_path,
preflight_check = True,
verify = False, # After upload, check sha1 sums
lock_file = True, # By default, lock uploaded files to prevent changes (unless manually unlocked)
maximum_attempts = 5, # Number of times to retry upload after any exception is encountered
verbose = True,
chunked_upload_threads = 5,
):
for trial_counter in range( maximum_attempts ):
try:
file_size = os.stat(source_path).st_size
uploaded_file_ids = []
if file_size >= BOX_MAX_FILE_SIZE:
uploaded_file_ids = self._upload_in_splits( destination_folder_id, source_path, preflight_check, verbose = verbose, chunked_upload_threads = chunked_upload_threads )
else:
# File will not be uploaded in splits, and that function will check if each split already exists
# So now we are going to check if the file already exists
# We won't check if the file is actually the same here, that happens below at the verify step
uploaded_box_file_ids = self.find_file( destination_folder_id, os.path.basename( source_path ) )
if len(uploaded_box_file_ids) != 1:
if file_size >= BOX_MIN_CHUNK_UPLOAD_SIZE: # 55 MB
uploaded_file_ids = [ self._chunked_upload( destination_folder_id, source_path, preflight_check = preflight_check, verbose = verbose, upload_threads = chunked_upload_threads, ) ]
else:
if not self._upload_test_only:
uploaded_file_ids = [ self.client.folder( folder_id = destination_folder_id ).upload( file_path = source_path, preflight_check = preflight_check, preflight_expected_size = file_size ).get().response_object['id'] ]
if lock_file:
self.lock_files( uploaded_file_ids )
if verify:
if not self.verify_uploaded_file( destination_folder_id, source_path ):
return False
return True
except:
if maximum_attempts > 1 and verbose:
print(( 'Uploading file {0} failed attempt {1} of {2}'.format(source_path, trial_counter+1, maximum_attempts) ))
elif maximum_attempts == 1:
raise
return False
def lock_files( self, file_ids, prevent_download = False ):
for file_id in file_ids:
self.lock_file( file_id, prevent_download = prevent_download )
def lock_file( self, file_id, prevent_download = False ):
self.client.file( file_id = file_id ).lock()
def verify_uploaded_file(
self,
destination_folder_id,
source_path,
verbose = True,
):
'''
Verifies the integrity of a file uploaded to Box
'''
source_file_size = os.stat(source_path).st_size
total_part_size = 0
file_position = 0
uploaded_box_file_ids = self.find_file( destination_folder_id, os.path.basename( source_path ) )
total_sha1 = hashlib.sha1()
for i, file_id in enumerate(uploaded_box_file_ids):
file_info = self.client.file( file_id = file_id ).get()
uploaded_sha1 = file_info.response_object['sha1']
uploaded_size = file_info.response_object['size']
part_sha1 = read_sha1( source_path, start_byte = file_position, read_size = uploaded_size, extra_hashers = [total_sha1] )
if part_sha1.hexdigest() != uploaded_sha1:
print( '\n' )
print(( 'Part sha1: ' + part_sha1.hexdigest() ))
print(( 'Uploaded sha1: ' + uploaded_sha1 ))
print(('Sha1 hash of uploaded file {0} ({1}) does not match'.format(file_info.response_object['name'], file_id) ))
return False
file_position += uploaded_size
total_part_size += uploaded_size
if len(uploaded_box_file_ids) > 1:
print(( 'Finished verifying part {0} of {1} of {2}'.format( i+1, len(uploaded_box_file_ids), file_id ) ))
assert( source_file_size == total_part_size )
if verbose:
print(( 'Verified uploaded file {0} ({1}) with sha1: {2}'.format(source_path, file_id, total_sha1.hexdigest()) ))
return True
def _upload_in_splits( self, destination_folder_id, source_path, preflight_check, verbose = True, chunked_upload_threads = 5 ):
'''
Since Box has a maximum file size limit (15 GB at time of writing),
we need to split files larger than this into smaller parts, and chunk upload each part
'''
file_size = os.stat(source_path).st_size
split_size = BOX_MAX_FILE_SIZE
# Make sure that the last split piece is still big enough for a chunked upload
while file_size % split_size < BOX_MIN_CHUNK_UPLOAD_SIZE:
split_size -= 1000
if split_size < BOX_MIN_CHUNK_UPLOAD_SIZE:
raise Exception('Lazy programming error')
split_start_byte = 0
part_count = 0
uploaded_file_ids = []
while split_start_byte < file_size:
dest_file_name = '{0}.part{1}'.format( os.path.basename(source_path), part_count)
prev_uploaded_file_ids = self.find_file( destination_folder_id, dest_file_name )
if len( prev_uploaded_file_ids ) == 1:
if verbose:
print(( '\nSkipping upload of split {0} of {1}; already exists'.format( part_count + 1, math.ceil(file_size / split_size) ) ))
uploaded_file_ids.extend( prev_uploaded_file_ids )
else:
if verbose:
print(( '\nUploading split {0} of {1}'.format( part_count + 1, math.ceil(file_size / split_size) ) ))
uploaded_file_ids.append( self._chunked_upload(
destination_folder_id, source_path,
dest_file_name = dest_file_name,
split_start_byte = split_start_byte,
file_size = min(split_size, file_size - split_start_byte), # Take the min of file_size - split_start_byte so that the last part of a split doesn't read into the next split
preflight_check = preflight_check,
verbose = verbose,
upload_threads = chunked_upload_threads,
) )
part_count += 1
split_start_byte += split_size
return uploaded_file_ids
def _abort_chunked_upload(self):
delete_response = box.client.session.delete( self._current_chunked_upload_abort_url, expect_json_response = False )
assert( delete_response.status_code == 204 )
assert( len(delete_response.content) == 0 )
self._current_chunked_upload_abort_url = None
def _chunked_upload(
self,
destination_folder_id,
source_path,
dest_file_name = None,
split_start_byte = 0,
file_size = None,
preflight_check = True,
upload_threads = 5, # Your results may vary
verbose = True,
):
dest_file_name = dest_file_name or os.path.basename( source_path )
file_size = file_size or os.stat(source_path).st_size
destination_folder = self.client.folder( folder_id = destination_folder_id )
if preflight_check and not self._upload_test_only:
destination_folder.preflight_check( size = file_size, name = dest_file_name )
url = '{0}/files/upload_sessions'.format(UPLOAD_URL)
data = json.dumps({
'folder_id' : destination_folder_id,
'file_size' : file_size,
'file_name' : dest_file_name,
})
if self._upload_test_only:
json_response = {
'id' : 0,
'part_size' : 5000000, # 5 MB
'session_endpoints' : { 'abort' : None },
'total_parts' : math.ceil( float(file_size) / float(5000000) ),
}
else:
json_response = self.client.session.post(url, data=data, expect_json_response=True).json()
self._current_chunked_upload_abort_url = json_response['session_endpoints']['abort']
upload_responses = {
'create' : json_response,
'parts' : {},
}
session_id = json_response['id']
part_size = json_response['part_size']
reporter = Reporter( 'uploading ' + source_path + ' as ' + dest_file_name, entries = 'chunks', print_output = verbose )
reporter.set_total_count( json_response['total_parts'] )
uploads_complete = threading.Event()
totally_failed = threading.Event()
chunk_queue = queue.PriorityQueue()
results_queue = queue.PriorityQueue()
def upload_worker():
while (not uploads_complete.is_set()) and (not totally_failed.is_set()):
try:
part_n, args = chunk_queue.get(True, 0.3)
except queue.Empty:
continue
source_path, start_byte, header_start_byte, read_amount, attempt_number = args
attempt_number += 1
try:
sha1 = hashlib.sha1()
with open( source_path, 'rb' ) as f:
f.seek( start_byte )
data = f.read( read_amount )
sha1.update(data)
headers['digest'] = 'sha=' + base64.b64encode(sha1.digest()).decode()
headers['content-range'] = 'bytes {0}-{1}/{2}'.format( header_start_byte, header_start_byte + len(data) - 1, file_size )
if self._upload_test_only:
results_queue.put( (part_n, {'part' : part_n}) )
else:
part_response = self.client.session.put(url, headers = headers, data = data, expect_json_response = True)
results_queue.put( (part_n, dict(part_response.json())) )
reporter.increment_report()
except:
if attempt_number >= MAX_CHUNK_ATTEMPTS:
if verbose:
print(( '\nSetting total failure after attempt {0} for part_n {1}\n'.format( attempt_number, part_n ) ))
totally_failed.set()
else:
chunk_queue.put( (part_n, (source_path, start_byte, header_start_byte, read_amount, attempt_number) ) )
chunk_queue.task_done()
upload_worker_threads = []
for i in range( upload_threads ):
t = threading.Thread( target = upload_worker )
t.start()
upload_worker_threads.append(t)
for part_n in range( json_response['total_parts'] ):
header_start_byte = part_n * part_size
start_byte = split_start_byte + header_start_byte
url = '{0}/files/upload_sessions/{1}'.format( UPLOAD_URL, session_id )
headers = {
'content-type' : 'application/octet-stream',
}
read_amount = min(part_size, file_size - header_start_byte) # Make sure the last part of a split doesn't read into the next split
if not read_amount > 0:
if verbose:
print((read_amount, part_size, file_size, start_byte))
raise Exception('read_amount failure')
upload_args = (source_path, start_byte, header_start_byte, read_amount, 0) # Last 0 is attempt number
chunk_queue.put( (part_n, upload_args) )
total_sha = hashlib.sha1()
def read_total_hash_worker():
# We are reading the file for a second time just for hashing here, but that seems
# better than trying to save the whole file in memory for hashing at the end.
# The upload should be slower and ongoing in the background as well
for part_n in range( json_response['total_parts'] ):
if totally_failed.is_set():
break
header_start_byte = part_n * part_size
start_byte = split_start_byte + part_n * part_size
read_amount = min(part_size, file_size - header_start_byte) # Make sure the last part of a split doesn't read into the next split
with open( source_path, 'rb' ) as f:
f.seek( start_byte )
data = f.read( read_amount )
total_sha.update(data)
total_hasher = threading.Thread( target = read_total_hash_worker )
total_hasher.start()
# Wait for everything to finish or fail
chunk_queue.join()
uploads_complete.set()
if totally_failed.is_set():
# Cancel chunked upload upon exception
self._abort_chunked_upload()
if verbose:
print(( 'Chunk upload of file {0} (in {1} parts) cancelled'.format(source_path, json_response['total_parts']) ))
raise Exception('Totally failed upload')
reporter.done()
if total_hasher.isAlive():
if verbose:
print( 'Waiting to compute total hash of file' )
total_hasher.join()
while not results_queue.empty():
part_n, part_response = results_queue.get()
upload_responses['parts'][part_n] = part_response['part']
# Commit
try:
if verbose:
print( 'Committing file upload' )
url = '{0}/files/upload_sessions/{1}/commit'.format( UPLOAD_URL, session_id )
data = json.dumps({
'parts' : [ upload_responses['parts'][part_n] for part_n in range( json_response['total_parts'] ) ],
})
headers = {}
headers['digest'] = 'sha=' + base64.b64encode(total_sha.digest()).decode()
if self._upload_test_only:
commit_response = {}
else:
commit_response = self.client.session.post(url, headers=headers, data=data, expect_json_response=True).json()
upload_responses['commit'] = commit_response
except:
# Cancel chunked upload upon exception
self._abort_chunked_upload()
if verbose:
print(( 'Chunk upload of file {0} (in {1} parts) cancelled'.format(source_path, json_response['total_parts']) ))
raise
self._current_chunked_upload_abort_url = None
if self._upload_test_only:
return None
else:
file_ids = self.find_file( destination_folder_id, dest_file_name )
assert( len(file_ids) == 1 )
return file_ids[0]
def upload_path( self, upload_folder_id, fpath, verbose = True, lock_files = True, maximum_attempts = 5, retry_already_uploaded_files = False, write_marker_files = False, outer_upload_threads = 5, upload_in_random_order = True ):
# Will upload a file, or recursively upload a folder, leaving behind verification files in its wake
assert( os.path.exists( fpath ) )
big_batch_threshold = 10 # Verbosity is higher if the total files to upload is less than this
def find_files_recursive( search_path, outer_folder_id ):
# This function also creates missing Box folders as it searches the local filesystem
if os.path.isfile(search_path):
if search_path.endswith('.uploadedtobox'):
return []
return [ (search_path, outer_folder_id) ]
else:
inner_folder_id = box.create_folder( outer_folder_id, os.path.basename(search_path) )
found_files = []
for x in os.listdir( search_path ):
found_files.extend( find_files_recursive( os.path.join( search_path, x ), inner_folder_id ) )
return found_files
if verbose:
print(( 'Recursively searching for files to upload in:', fpath ))
files_to_upload = find_files_recursive( fpath, upload_folder_id )
if verbose:
print(( 'Found {} files to upload'.format(len(files_to_upload)) ))
if len(files_to_upload) >= big_batch_threshold:
r = Reporter( 'uploading big batch of files to Box', entries = 'files', eol_char = '\r' )
else:
r = Reporter( 'uploading batch of files to Box', entries = 'files', eol_char = '\n' )
r.set_total_count( len(files_to_upload) )
files_to_upload.sort()
files_to_upload_queue = queue.PriorityQueue()
results_queue = queue.Queue()
uploads_complete = threading.Event()
def upload_worker():
while not uploads_complete.is_set():
try:
i, source_path_upload, folder_to_upload_id, call_upload_verbose, uploaded_marker_file = files_to_upload_queue.get(True, 0.3)
except queue.Empty:
continue
upload_successful = False
file_totally_failed = False
for trial_counter in range( maximum_attempts ):
if file_totally_failed:
break
try:
upload_successful = self.upload( folder_to_upload_id, source_path_upload, verify = False, lock_file = lock_files, maximum_attempts = 1, verbose = call_upload_verbose, chunked_upload_threads = 3 )
except Exception as e:
print(e)
upload_successful = False
if not upload_successful:
if maximum_attempts > 1:
print(( 'Uploading file {0} failed upload in attempt {1} of {2}'.format(source_path_upload, trial_counter+1, maximum_attempts) ))
continue
try:
if not self.verify_uploaded_file( folder_to_upload_id, source_path_upload, verbose = call_upload_verbose ):
upload_successful = False
except Exception as e:
print(e)
upload_successful = False
if not upload_successful:
if maximum_attempts > 1:
print(( 'Uploading file {0} failed verification in attempt {1} of {2}. Removing and potentially retrying upload.'.format(source_path_upload, trial_counter+1, maximum_attempts) ))
try:
file_ids = self.find_file( folder_to_upload_id, os.path.basename( source_path_upload ) )
except Exception as e:
print(e)
file_ids = []
for file_id in file_ids:
try:
self.client.file( file_id = file_id ).delete()
except:
print(( 'Delete failed, skipping file ' + source_path_upload ))
file_totally_failed = True
upload_successful = False
continue
break
results_queue.put( (source_path_upload, folder_to_upload_id, upload_successful, uploaded_marker_file) )
files_to_upload_queue.task_done()
if len(files_to_upload) >= big_batch_threshold:
inner_verbosity = False
else:
inner_verbosity = True
i = 0
for file_path, inner_folder_id in files_to_upload:
uploaded_marker_file = file_path + '.uploadedtobox'
if os.path.isfile( uploaded_marker_file ):
if retry_already_uploaded_files:
os.remove( uploaded_marker_file )
else:
print(( 'Skipping already uploaded file: ' + file_path ))
r.decrement_total_count()
continue
# Since we are putting into a sorted PriorityQueue, we add a random first tuple member if randomness is desired
if upload_in_random_order:
worker_args = (random.random(), file_path, inner_folder_id, inner_verbosity, uploaded_marker_file)
else:
worker_args = (i, file_path, inner_folder_id, inner_verbosity, uploaded_marker_file)
files_to_upload_queue.put( worker_args )
i += 1
upload_worker_threads = []
for i in range( outer_upload_threads ):
t = threading.Thread( target = upload_worker )
t.start()
upload_worker_threads.append(t)
failed_files = queue.PriorityQueue()
def results_worker():
while not uploads_complete.is_set():
try:
source_path_upload, folder_to_upload_id, upload_successful, uploaded_marker_file = results_queue.get(True, 0.95)
except queue.Empty:
continue
if upload_successful:
if write_marker_files:
try:
with open(uploaded_marker_file, 'w') as f:
f.write( str( datetime.datetime.now() ) )
except:
# Sometimes this might fail if we have a permissions error (e.g. uploading a file in a directory where we only have read permission), so we just ignore
pass
else:
print(( 'Totally failed:', file_path ))
failed_files.put( file_path )
if os.path.isfile(uploaded_marker_file):
os.remove(uploaded_marker_file)
r.increment_report()
results_worker_thread = threading.Thread( target = results_worker )
results_worker_thread.start()
files_to_upload_queue.join()
uploads_complete.set()
for t in upload_worker_threads:
t.join()
results_worker_thread.join()
failed_files_list = []
while not failed_files.empty():
failed_files_list.append( failed_files.get() )
return failed_files_list
|
class BoxAPI:
def __init__(self):
pass
def _find_folder_by_name_inner( self, folder_id, name, limit = 500 ):
pass
def create_folder( self, root_folder_id, folder_name ):
pass
def find_file( self, folder_id, basename, limit = 500 ):
'''
Finds a file based on a box path
Returns a list of file IDs
Returns multiple file IDs if the file was split into parts with the extension '.partN' (where N is an integer)
'''
pass
def find_folder_path( self, folder_path ):
pass
def upload( self,
destination_folder_id,
source_path,
preflight_check = True,
verify = False, # After upload, check sha1 sums
lock_file = True, # By default, lock uploaded files to prevent changes (unless manually unlocked)
maximum_attempts = 5, # Number of times to retry upload after any exception is encountered
verbose = True,
chunked_upload_threads = 5,
):
pass
def lock_files( self, file_ids, prevent_download = False ):
pass
def lock_files( self, file_ids, prevent_download = False ):
pass
def verify_uploaded_file(
self,
destination_folder_id,
source_path,
verbose = True,
):
'''
Verifies the integrity of a file uploaded to Box
'''
pass
def _upload_in_splits( self, destination_folder_id, source_path, preflight_check, verbose = True, chunked_upload_threads = 5 ):
'''
Since Box has a maximum file size limit (15 GB at time of writing),
we need to split files larger than this into smaller parts, and chunk upload each part
'''
pass
def _abort_chunked_upload(self):
pass
def _chunked_upload(
self,
destination_folder_id,
source_path,
dest_file_name = None,
split_start_byte = 0,
file_size = None,
preflight_check = True,
upload_threads = 5, # Your results may vary
verbose = True,
):
pass
def upload_worker():
pass
def read_total_hash_worker():
pass
def upload_path( self, upload_folder_id, fpath, verbose = True, lock_files = True, maximum_attempts = 5, retry_already_uploaded_files = False, write_marker_files = False, outer_upload_threads = 5, upload_in_random_order = True ):
pass
def find_files_recursive( search_path, outer_folder_id ):
pass
def upload_worker():
pass
def results_worker():
pass
| 19 | 3 | 37 | 4 | 31 | 3 | 6 | 0.09 | 0 | 14 | 2 | 0 | 13 | 6 | 13 | 13 | 542 | 75 | 439 | 154 | 396 | 41 | 371 | 126 | 352 | 17 | 0 | 6 | 106 |
143,547 |
Kortemme-Lab/klab
|
Kortemme-Lab_klab/klab/box_backup.py
|
klab.box_backup.FolderTraversalException
|
class FolderTraversalException(Exception):
pass
|
class FolderTraversalException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.