id
int64 0
843k
| repository_name
stringlengths 7
55
| file_path
stringlengths 9
332
| class_name
stringlengths 3
290
| human_written_code
stringlengths 12
4.36M
| class_skeleton
stringlengths 19
2.2M
| total_program_units
int64 1
9.57k
| total_doc_str
int64 0
4.2k
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
300
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
176
| CountClassBase
float64 0
48
| CountClassCoupled
float64 0
589
| CountClassCoupledModified
float64 0
581
| CountClassDerived
float64 0
5.37k
| CountDeclInstanceMethod
float64 0
4.2k
| CountDeclInstanceVariable
float64 0
299
| CountDeclMethod
float64 0
4.2k
| CountDeclMethodAll
float64 0
4.2k
| CountLine
float64 1
115k
| CountLineBlank
float64 0
9.01k
| CountLineCode
float64 0
94.4k
| CountLineCodeDecl
float64 0
46.1k
| CountLineCodeExe
float64 0
91.3k
| CountLineComment
float64 0
27k
| CountStmt
float64 1
93.2k
| CountStmtDecl
float64 0
46.1k
| CountStmtExe
float64 0
90.2k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
6k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
143,248 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/datasets/qryprocessors.py
|
rdfframework.datasets.qryprocessors.StripEnd
|
class StripEnd(JsonQryProcessor):
""" strips off the provided characters from the end of strings
"""
key = "stripend"
def __init__(self, characters):
self.regex = "[%s]+$" % characters
def __call__(self, action_list):
return [XsdString(re.sub(self.regex, '', str(action)))\
for action in action_list]
|
class StripEnd(JsonQryProcessor):
''' strips off the provided characters from the end of strings
'''
def __init__(self, characters):
pass
def __call__(self, action_list):
pass
| 3 | 1 | 3 | 0 | 3 | 0 | 1 | 0.29 | 1 | 2 | 1 | 0 | 2 | 1 | 2 | 21 | 11 | 2 | 7 | 5 | 4 | 2 | 6 | 5 | 3 | 1 | 4 | 0 | 2 |
143,249 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/connections/connmanager.py
|
rdfframework.connections.connmanager.ConnManager
|
class ConnManager(metaclass=ConnManagerMeta):
""" class for managing database connections """
log_level = logging.INFO
conn_mapping = RdfwConnections
__is_initialized__ = False
def __init__(self, connections=None, **kwargs):
self.conns = {}
self.log_level = kwargs.get('log_level', self.log_level)
if connections:
self.load(connections, **kwargs)
def set_conn(self, **kwargs):
""" takes a connection and creates the connection """
# log = logging.getLogger("%s.%s" % (self.log, inspect.stack()[0][3]))
log.setLevel(kwargs.get('log_level',self.log_level))
conn_name = kwargs.get("name")
if not conn_name:
raise NameError("a connection requires a 'name': %s" % kwargs)
elif self.conns.get(conn_name):
raise KeyError("connection '%s' has already been set" % conn_name)
if not kwargs.get("active", True):
log.warning("Connection '%s' is set as inactive" % conn_name)
return
conn_type = kwargs.get("conn_type")
if not conn_type or conn_type not in self.conn_mapping.nested:
err_msg = ["a connection requires a valid 'conn_type':\n",
"%s"]
raise NameError("".join(err_msg) % (list(self.conn_mapping.nested)))
log.info("Setting '%s' connection", conn_name)
if conn_type == "triplestore":
conn = make_tstore_conn(kwargs)
else:
conn = RdfwConnections[conn_type][kwargs['vendor']](**kwargs)
self.conns[conn_name] = conn
self.__is_initialized__ = True
@initialized
def get(self, conn_name, default=None, **kwargs):
""" returns the specified connection
args:
conn_name: the name of the connection
"""
if isinstance(conn_name, RdfwConnections):
return conn_name
try:
return self.conns[conn_name]
except KeyError:
if default:
return self.get(default, **kwargs)
raise LookupError("'%s' connection has not been set" % conn_name)
def load(self, conn_list, **kwargs):
""" Takes a list of connections and sets them in the manager
args:
conn_list: list of connection defitions
"""
for conn in conn_list:
conn['delay_check'] = kwargs.get('delay_check', False)
self.set_conn(**conn)
if kwargs.get('delay_check'):
test = self.wait_for_conns(**kwargs)
if not test:
log.critical("\n\nEXITING:Unable to establish connections \n"
"%s", test)
@property
def failing(self):
""" Tests to see if all connections are working
returns:
dictionary of all failing connections
"""
log_levels = {key: conn.log_level for key, conn in self.conns.items()
if hasattr(conn, 'log_level')}
for key in log_levels:
self.conns[key].log_level = logging.CRITICAL
failing_conns = {key: conn for key, conn in self.active.items()
if not conn.check_status}
for key, level in log_levels.items():
self.conns[key].log_level = level
return failing_conns
def wait_for_conns(self, timeout=60, start_delay=0, interval=5, **kwargs):
''' delays unitil all connections are working
args:
timeout: number of seconds to try to connecting. Error out when
timeout is reached
start_delay: number of seconds to wait before checking status
interval: number of seconds to wait between checks
'''
log.setLevel(kwargs.get('log_level',self.log_level))
timestamp = time.time()
last_check = time.time() + start_delay - interval
last_delay_notification = time.time() - interval
timeout += 1
failing = True
up_conns = {}
# loop until the server is up or the timeout is reached
while((time.time()-timestamp) < timeout) and failing:
# if delaying, the start of the check, print waiting to start
if start_delay > 0 and time.time() - timestamp < start_delay \
and (time.time()-last_delay_notification) > 5:
print("Delaying server status check until %ss. Current time: %ss" \
% (start_delay, int(time.time() - timestamp)))
last_delay_notification = time.time()
# check status at the specified 'interval' until the server is up
first_check = True
while ((time.time()-last_check) > interval) and failing:
msg = ["\tChecked status of servers at %ss" % \
int((time.time()-timestamp)),
"\t** CONNECTION STATUS:"]
last_check = time.time()
failing = self.failing
new_up = (self.active.keys() - failing.keys()) - \
up_conns.keys()
msg += ["\t\t UP - %s: %s" % (key, self.conns[key])
for key in new_up]
up_conns.update({key: self.conns[key] for key in new_up})
msg.append("\t*** '%s' connection(s) up" % len(up_conns))
msg += ["\t\t FAILING - %s: %s" % (key, self.conns[key])
for key in failing]
log.info("** CONNECTION STATUS:\n%s", "\n".join(msg))
if not failing:
log.info("**** Servers up at %ss" % \
int((time.time()-timestamp)))
break
if failing:
raise RuntimeError("Unable to establish connection(s): ",
failing)
for conn in up_conns.values():
conn.delay_check_pass()
return not failing
@property
def list_conns(self):
""" returns a list of established connections """
return list(self.conns)
def __getattr__(self, attr):
return self.get(attr)
def __getitem__(self, item):
return self.get(item)
def __iter__(self):
return iter(self.conns.items())
def __repr__(self):
rtn_val = super().__repr__()
conns = "\n\t".join(sorted(["%s: %s" % (key.ljust(16,' '),
value.__repr__()[:60])
for key, value in self.conns.items()]))
return "{standard}\n\t{conns}".format(standard=rtn_val,
conns=conns)
@property
def active(self):
""" returns a dictionary of connections set as active.
"""
return {key: value for key, value in self.conns.items()
if value.active}
|
class ConnManager(metaclass=ConnManagerMeta):
''' class for managing database connections '''
def __init__(self, connections=None, **kwargs):
pass
def set_conn(self, **kwargs):
''' takes a connection and creates the connection '''
pass
@initialized
def get(self, conn_name, default=None, **kwargs):
''' returns the specified connection
args:
conn_name: the name of the connection
'''
pass
def load(self, conn_list, **kwargs):
''' Takes a list of connections and sets them in the manager
args:
conn_list: list of connection defitions
'''
pass
@property
def failing(self):
''' Tests to see if all connections are working
returns:
dictionary of all failing connections
'''
pass
def wait_for_conns(self, timeout=60, start_delay=0, interval=5, **kwargs):
''' delays unitil all connections are working
args:
timeout: number of seconds to try to connecting. Error out when
timeout is reached
start_delay: number of seconds to wait before checking status
interval: number of seconds to wait between checks
'''
pass
@property
def list_conns(self):
''' returns a list of established connections '''
pass
def __getattr__(self, attr):
pass
def __getitem__(self, item):
pass
def __iter__(self):
pass
def __repr__(self):
pass
@property
def active(self):
''' returns a dictionary of connections set as active.
'''
pass
| 17 | 8 | 12 | 1 | 9 | 2 | 3 | 0.23 | 1 | 8 | 1 | 0 | 12 | 1 | 12 | 27 | 169 | 20 | 121 | 41 | 104 | 28 | 98 | 37 | 85 | 7 | 3 | 3 | 32 |
143,250 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/datasets/qryprocessors.py
|
rdfframework.datasets.qryprocessors.NoLangTag
|
class NoLangTag(JsonQryProcessor):
""" Removes language tags from string items """
key = "no_lang_tag"
def __init__(self, active=True):
self.active = cbool(active)
def __call__(self, action_list):
if not self.active:
return action_list
rtn_list = []
for action in action_list:
rtn_list.append(XsdString(str(action)))
return rtn_list
|
class NoLangTag(JsonQryProcessor):
''' Removes language tags from string items '''
def __init__(self, active=True):
pass
def __call__(self, action_list):
pass
| 3 | 1 | 5 | 0 | 5 | 0 | 2 | 0.09 | 1 | 2 | 1 | 0 | 2 | 1 | 2 | 21 | 14 | 2 | 11 | 7 | 8 | 1 | 11 | 7 | 8 | 3 | 4 | 1 | 4 |
143,251 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/datasets/qryprocessors.py
|
rdfframework.datasets.qryprocessors.MakeDistinct
|
class MakeDistinct(JsonQryProcessor):
""" Takes a list when called and removes dulplicates """
key = "distinct"
def __init__(self, active=True):
self.active = cbool(active)
def __call__(self, action_list):
if not self.active:
return action_list
rtn_list = UniqueList()
for action in action_list:
rtn_list.append(action)
return rtn_list
|
class MakeDistinct(JsonQryProcessor):
''' Takes a list when called and removes dulplicates '''
def __init__(self, active=True):
pass
def __call__(self, action_list):
pass
| 3 | 1 | 5 | 0 | 5 | 0 | 2 | 0.09 | 1 | 1 | 1 | 0 | 2 | 1 | 2 | 21 | 14 | 2 | 11 | 7 | 8 | 1 | 11 | 7 | 8 | 3 | 4 | 1 | 4 |
143,252 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/datasets/qryprocessors.py
|
rdfframework.datasets.qryprocessors.ListLimiter
|
class ListLimiter(JsonQryProcessor):
""" takes a list and a length limit and returns the list of appropriate
length
"""
key = "limit"
def __init__(self, length):
self.length = int(length)
def __call__(self, action_list):
if self.length >= 0:
return action_list[:self.length]
return action_list[self.length:]
|
class ListLimiter(JsonQryProcessor):
''' takes a list and a length limit and returns the list of appropriate
length
'''
def __init__(self, length):
pass
def __call__(self, action_list):
pass
| 3 | 1 | 3 | 0 | 3 | 0 | 2 | 0.38 | 1 | 1 | 0 | 0 | 2 | 1 | 2 | 21 | 13 | 2 | 8 | 5 | 5 | 3 | 8 | 5 | 5 | 2 | 4 | 1 | 3 |
143,253 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/datasets/qryprocessors.py
|
rdfframework.datasets.qryprocessors.JsonQryProcessor
|
class JsonQryProcessor(metaclass=KeyRegistryMeta):
""" Base class for json query processors. Provides a 'key' registry
for all inherited classes.
To make a new processor create an inherited class with the following
attributes/methods at the class level
Example:
class NewProcessor(JsonQryProcessor):
key = "testkey" #this the string the json_qry will use
def __init__(self, query_str_arg):
# intialize base on query_str_arg
def __call__(self, action_list):
# do something to item in the action list
"""
__required_idx_attrs__ = ['key']
pass
|
class JsonQryProcessor(metaclass=KeyRegistryMeta):
''' Base class for json query processors. Provides a 'key' registry
for all inherited classes.
To make a new processor create an inherited class with the following
attributes/methods at the class level
Example:
class NewProcessor(JsonQryProcessor):
key = "testkey" #this the string the json_qry will use
def __init__(self, query_str_arg):
# intialize base on query_str_arg
def __call__(self, action_list):
# do something to item in the action list
'''
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 4 | 1 | 0 | 0 | 5 | 0 | 0 | 0 | 19 | 20 | 5 | 3 | 2 | 2 | 12 | 3 | 2 | 2 | 0 | 3 | 0 | 0 |
143,254 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/datasets/qryprocessors.py
|
rdfframework.datasets.qryprocessors.First
|
class First(JsonQryProcessor):
""" Returns first item or None. Removes list enclosure """
key = "first"
def __init__(self, active=True):
self.active = cbool(active)
def __call__(self, action_list):
if not self.active:
return action_list
if action_list:
return action_list[0]
else:
return None
|
class First(JsonQryProcessor):
''' Returns first item or None. Removes list enclosure '''
def __init__(self, active=True):
pass
def __call__(self, action_list):
pass
| 3 | 1 | 5 | 0 | 5 | 0 | 2 | 0.09 | 1 | 0 | 0 | 0 | 2 | 1 | 2 | 21 | 14 | 2 | 11 | 5 | 8 | 1 | 10 | 5 | 7 | 3 | 4 | 1 | 4 |
143,255 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/datasets/dataconverter.py
|
rdfframework.datasets.dataconverter.SharedManager
|
class SharedManager(managers.BaseManager):
pass
|
class SharedManager(managers.BaseManager):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 2 | 0 | 0 |
143,256 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/tests/test_frameworkutilities.py
|
tests.test_frameworkutilities.Test_make_triple
|
class Test_make_triple(unittest.TestCase):
def setUp(self):
self.SCHEMA = rdflib.Namespace("https://schema.org/")
def test_triple(self):
subject = rdflib.URIRef("http://peopleuid.com/42")
object_ = rdflib.Literal("Josey McNamera")
self.assertEqual(
make_triple(subject, self.SCHEMA.name, object_),
"{0} {1} {2} .".format(subject, self.SCHEMA.name, object_))
|
class Test_make_triple(unittest.TestCase):
def setUp(self):
pass
def test_triple(self):
pass
| 3 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 2 | 1 | 2 | 74 | 11 | 2 | 9 | 6 | 6 | 0 | 7 | 6 | 4 | 1 | 2 | 0 | 2 |
143,257 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/connections/connmanager.py
|
rdfframework.connections.connmanager.ConnManagerMeta
|
class ConnManagerMeta(type):
""" Metaclass ensures that there is only one instance of the RdfConnManager
"""
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(ConnManagerMeta,
cls).__call__(*args, **kwargs)
else:
conns = None
if args:
conns = args[0]
elif 'connections' in kwargs:
conns = kwargs['connections']
if conns:
cls._instances[cls].load(conns, **kwargs)
return cls._instances[cls]
def clear(cls):
cls._instances = {}
|
class ConnManagerMeta(type):
''' Metaclass ensures that there is only one instance of the RdfConnManager
'''
def __call__(cls, *args, **kwargs):
pass
def clear(cls):
pass
| 3 | 1 | 8 | 0 | 8 | 0 | 3 | 0.12 | 1 | 1 | 0 | 1 | 2 | 0 | 2 | 15 | 21 | 2 | 17 | 5 | 14 | 2 | 14 | 5 | 11 | 5 | 2 | 2 | 6 |
143,258 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/connections/connmanager.py
|
rdfframework.connections.connmanager.RdfwConnections
|
class RdfwConnections(metaclass=KeyRegistryMeta):
__required_idx_attrs__ = {'vendor', 'conn_type'}
__nested_idx_attrs__ = {"conn_type"}
delay_check = None
def __repr__(self):
if self.__class__ == RdfwConnections:
return "<RdfwConnections: %s" % pprint.pformat(self.__registry__)
attrs = ['namespace', 'active', 'check_status']
msg_attrs = ["'%s': '%s'" % (attr, getattr(self, attr))
for attr in attrs
if hasattr(self, attr)]
url = self.ext_url
if self.url:
url = self.url
msg_attrs = ["url: %s" % url] + msg_attrs
return "<%s([{%s}])>" % (self.vendor.capitalize(), ", ".join(msg_attrs))
def __set_mgr__(self, **kwargs):
""" set the data file management for the connection
kwargs:
data_file_manager: instance of DataFileManager
data_upload: list of tuples describing the files to upload
"""
self.mgr = None
# if kwargs.get("data_upload"):
import rdfframework.datamanager as dm
log.info("Setting Data Manager for conn '%s'", kwargs.get('name'))
mgr = kwargs.get('data_file_manager', kwargs.get("name", True))
if isinstance(mgr, dm.DataFileManager):
self.mgr = mgr
elif mgr == 'active_defs':
self.mgr = dm.DefinitionManager(conn=self, **kwargs)
elif mgr == 'datastore':
self.mgr = dm.DatastoreManager(conn=self, **kwargs)
else:
self.mgr = dm.DataFileManager(conn=self, **kwargs)
if self.mgr: # and kwargs.get('data_upload'):
self.mgr.add_file_locations(kwargs.get('data_upload',[]))
if kwargs.get("delay_check"):
self.delay_check = kwargs
else:
if self.check_status:
# kwargs['log_level'] = logging.DEBUG
self.mgr.load(**kwargs)
else:
log.warn("conn '%s' check_status failed",
kwargs.get('name'))
def delay_check_pass(self):
if self.delay_check:
try:
self.mgr.load(**self.delay_check)
except AttributeError:
pass
self.delay_check = None
|
class RdfwConnections(metaclass=KeyRegistryMeta):
def __repr__(self):
pass
def __set_mgr__(self, **kwargs):
''' set the data file management for the connection
kwargs:
data_file_manager: instance of DataFileManager
data_upload: list of tuples describing the files to upload
'''
pass
def delay_check_pass(self):
pass
| 4 | 1 | 17 | 0 | 14 | 3 | 4 | 0.17 | 1 | 4 | 3 | 4 | 3 | 1 | 3 | 22 | 57 | 4 | 46 | 13 | 41 | 8 | 38 | 13 | 33 | 7 | 3 | 3 | 13 |
143,259 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/connections/elasticconn.py
|
rdfframework.connections.elasticconn.Elastic
|
class Elastic(EsBase, RdfwConnections):
""" An API for interacting between elasticsearch and the rdfframework
this is a simple extension of the elasticsearch package
args:
url: The url to the repository
local_directory: the path to the file data directory as python
reads the file path.
container_dir: the path to the file data directory as the docker
container/Blazegraph see the file path.
kwargs:
local_url: alternate url to use if primary is not working
"""
vendor = "elastic"
conn_type = "search"
log_name = "%s-Elastic" % MNAME
log_level = logging.INFO
default_url = 'http://localhost:9200'
def __init__(self,
url=None,
local_directory=None,
container_dir=None,
**kwargs):
self.local_directory = pick(local_directory, CFG.LOCAL_DATA_PATH)
self.ext_url = pick(url, self.default_url)
self.local_url = pick(kwargs.get('local_url'), self.default_url)
self.url = None
self.active = kwargs.get('active', True)
if not kwargs.get('delay_check'):
self.check_status
if self.url:
kwargs['es_url'] = self.url
else:
kwargs['es_url'] = self.ext_url
super(Elastic, self).__init__(**kwargs)
self.container_dir = container_dir
if self.ext_url is None:
msg = ["A Elasticsearch url must be defined. Either pass 'url'",
"or initialize the 'RdfConfigManager'"]
raise AttributeError(" ".join(msg))
def __repr__(self):
url = self.ext_url
if self.url:
url = self.url
return "<Elastic([{'host': '%s'}])>" % url
@property
def check_status(self):
""" tests both the ext_url and local_url to see if the database is
running
returns:
True if a connection can be made
False if the connection cannot me made
"""
log = logging.getLogger("%s.%s" % (self.log_name,
inspect.stack()[0][3]))
log.setLevel(self.log_level)
if self.url:
return True
try:
result = requests.get(self.ext_url)
self.url = self.ext_url
self.es_url = self.url
self.es = Elasticsearch([self.url])
return True
except requests.exceptions.ConnectionError:
pass
try:
result = requests.get(self.local_url)
log.warning("Url '%s' not connecting. Using local_url '%s'" % \
(self.ext_url, self.local_url))
self.url = self.local_url
self.es_url = self.url
self.es = Elasticsearch([self.url])
return True
except requests.exceptions.ConnectionError:
self.url = None
log.warning("Unable to connect using urls: %s" % set([self.ext_url,
self.local_url]))
return False
|
class Elastic(EsBase, RdfwConnections):
''' An API for interacting between elasticsearch and the rdfframework
this is a simple extension of the elasticsearch package
args:
url: The url to the repository
local_directory: the path to the file data directory as python
reads the file path.
container_dir: the path to the file data directory as the docker
container/Blazegraph see the file path.
kwargs:
local_url: alternate url to use if primary is not working
'''
def __init__(self,
url=None,
local_directory=None,
container_dir=None,
**kwargs):
pass
def __repr__(self):
pass
@property
def check_status(self):
''' tests both the ext_url and local_url to see if the database is
running
returns:
True if a connection can be made
False if the connection cannot me made
'''
pass
| 5 | 2 | 22 | 2 | 18 | 2 | 3 | 0.28 | 2 | 3 | 0 | 0 | 3 | 8 | 3 | 33 | 89 | 11 | 61 | 26 | 52 | 17 | 51 | 21 | 47 | 4 | 4 | 1 | 10 |
143,260 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/connections/fedoracommons.py
|
rdfframework.connections.fedoracommons.FedoraCommons
|
class FedoraCommons(RdfwConnections):
""" An API for interacting between a Fedora 4 Commans digital repository
and the rdfframework
args:
url: The url to the repository
local_directory: the path to the file data directory as python
reads the file path.
container_dir: the path to the file data directory as the docker
container/Blazegraph see the file path.
kwargs:
local_url: alternate url to use if primary is not working
"""
vendor = "fedora"
conn_type = "repository"
log_name = "%s-FedoraCommons" % MNAME
log_level = logging.INFO
default_url = ''
qry_results_formats = {'rdf': 'application/sparql-results+xml',
'xml': 'application/sparql-results+xml',
'json': 'application/sparql-results+json',
'binary': 'application/x-binary-rdf-results-table',
'tsv': 'text/tab-separated-values',
'cxv': 'text/csv'}
def __init__(self,
url=None,
local_directory=None,
container_dir=None,
**kwargs):
self.local_directory = pick(local_directory, CFG.LOCAL_DATA_PATH)
self.ext_url = pick(url, self.default_url)
self.local_url = pick(kwargs.get('local_url'), self.default_url)
self.container_dir = container_dir
self.url = None
self.active = kwargs.get('active', True)
if self.ext_url is None:
msg = ["A Blazegraph url must be defined. Either pass 'url'",
"or initialize the 'RdfConfigManager'"]
raise AttributeError(" ".join(msg))
if not kwargs.get('delay_check'):
self.check_status
@property
def check_status(self):
""" tests both the ext_url and local_url to see if the database is
running
returns:
True if a connection can be made
False if the connection cannot me made
"""
log = logging.getLogger("%s.%s" % (self.log_name,
inspect.stack()[0][3]))
log.setLevel(self.log_level)
if self.url:
return True
try:
result = requests.get(self.ext_url)
self.url = self.ext_url
return True
except requests.exceptions.ConnectionError:
pass
try:
result = requests.get(self.local_url)
log.warning("Url '%s' not connecting. Using local_url '%s'" % \
(self.ext_url, self.local_url))
self.url = self.local_url
return True
except requests.exceptions.ConnectionError:
self.url = None
log.warning("Unable to connect using urls: %s" % set([self.ext_url,
self.local_url]))
return False
|
class FedoraCommons(RdfwConnections):
''' An API for interacting between a Fedora 4 Commans digital repository
and the rdfframework
args:
url: The url to the repository
local_directory: the path to the file data directory as python
reads the file path.
container_dir: the path to the file data directory as the docker
container/Blazegraph see the file path.
kwargs:
local_url: alternate url to use if primary is not working
'''
def __init__(self,
url=None,
local_directory=None,
container_dir=None,
**kwargs):
pass
@property
def check_status(self):
''' tests both the ext_url and local_url to see if the database is
running
returns:
True if a connection can be made
False if the connection cannot me made
'''
pass
| 4 | 2 | 25 | 2 | 20 | 3 | 4 | 0.32 | 1 | 2 | 0 | 0 | 2 | 6 | 2 | 24 | 78 | 8 | 53 | 23 | 45 | 17 | 39 | 18 | 36 | 4 | 4 | 1 | 7 |
143,261 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/connections/rdflibconn.py
|
rdfframework.connections.rdflibconn.RdflibTriplestore
|
class RdflibTriplestore(metaclass=RdflibTstoreSingleton):
""" psuedo triplestore functionality for managing graphs and namespaces
similar to a triplestore like blazegraph"""
namespaces = {'kb': ConjunctiveGraph()}
def has_namespace(self, name):
""" sees if the namespace exists
args:
name(str): the name of the namespace
returns:
bool
"""
if name in self.namespaces:
return True
else:
return False
def create_namespace(self, name, ignore_errors=False):
"""creates a namespace if it does not exist
args:
name: the name of the namespace
ignore_errors(bool): Will ignore if a namespace already exists or
there is an error creating the namespace
returns:
True if created
False if not created
error if namespace already exists
"""
if not self.has_namespace(name):
self.namespaces[name] = ConjunctiveGraph()
return True
elif ignore_errors:
return True
else:
raise RuntimeError("namespace '%s' already exists" % name)
def delete_namespace(self, name, ignore_errors=False):
""" deletes a namespace
args:
name: the name of the namespace
ignore_errors(bool): Will ignore if a namespace doesn not exist or
there is an error deleting the namespace
returns:
True if deleted
False if not deleted
error if namespace already exists
"""
if self.has_namespace(name):
del self.namespaces[name]
return True
elif ignore_errors:
return True
else:
raise RuntimeError("namespace '%s' does not exist" % name)
def get_namespace(self, namespace):
""" returns the rdflib graph for the specified namespace
args:
namespace: the name of the namespace
"""
if namespace == 'temp':
return Graph()
else:
return self.namespaces[namespace]
|
class RdflibTriplestore(metaclass=RdflibTstoreSingleton):
''' psuedo triplestore functionality for managing graphs and namespaces
similar to a triplestore like blazegraph'''
def has_namespace(self, name):
''' sees if the namespace exists
args:
name(str): the name of the namespace
returns:
bool
'''
pass
def create_namespace(self, name, ignore_errors=False):
'''creates a namespace if it does not exist
args:
name: the name of the namespace
ignore_errors(bool): Will ignore if a namespace already exists or
there is an error creating the namespace
returns:
True if created
False if not created
error if namespace already exists
'''
pass
def delete_namespace(self, name, ignore_errors=False):
''' deletes a namespace
args:
name: the name of the namespace
ignore_errors(bool): Will ignore if a namespace doesn not exist or
there is an error deleting the namespace
returns:
True if deleted
False if not deleted
error if namespace already exists
'''
pass
def get_namespace(self, namespace):
''' returns the rdflib graph for the specified namespace
args:
namespace: the name of the namespace
'''
pass
| 5 | 5 | 16 | 2 | 7 | 8 | 3 | 1.14 | 1 | 1 | 0 | 0 | 4 | 0 | 4 | 18 | 72 | 12 | 28 | 6 | 23 | 32 | 22 | 6 | 17 | 3 | 3 | 1 | 10 |
143,262 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/connections/rdflibconn.py
|
rdfframework.connections.rdflibconn.RdflibTstoreSingleton
|
class RdflibTstoreSingleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(RdflibTstoreSingleton,
cls).__call__(*args, **kwargs)
return cls._instances[cls]
|
class RdflibTstoreSingleton(type):
def __call__(cls, *args, **kwargs):
pass
| 2 | 0 | 5 | 0 | 5 | 0 | 2 | 0 | 1 | 1 | 0 | 1 | 1 | 0 | 1 | 14 | 7 | 0 | 7 | 3 | 5 | 0 | 6 | 3 | 4 | 2 | 2 | 1 | 2 |
143,263 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/datamanager/datastoremanager.py
|
rdfframework.datamanager.datastoremanager.DatastoreManager
|
class DatastoreManager(DataFileManager, metaclass=DatastoreManagerMeta):
"""
Datastore file manager. This class manages all of the RDF vocabulary
for the rdfframework
"""
log_level = logging.INFO
is_initialized = False
def __init__(self, file_locations=[], conn=None, **kwargs):
self.conn = None
if not conn:
conn = kwargs.get("conn", __CONNS__.datastore)
if conn:
super(DatastoreManager, self).__init__(file_locations,
conn,
**kwargs)
if self.__file_locations__:
self.load(self.__file_locations__, **kwargs)
else:
self.add_file_locations(file_locations)
|
class DatastoreManager(DataFileManager, metaclass=DatastoreManagerMeta):
'''
Datastore file manager. This class manages all of the RDF vocabulary
for the rdfframework
'''
def __init__(self, file_locations=[], conn=None, **kwargs):
pass
| 2 | 1 | 12 | 0 | 12 | 0 | 4 | 0.27 | 2 | 1 | 0 | 0 | 1 | 1 | 1 | 31 | 20 | 1 | 15 | 5 | 13 | 4 | 12 | 5 | 10 | 4 | 3 | 2 | 4 |
143,264 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/datamanager/datastoremanager.py
|
rdfframework.datamanager.datastoremanager.DatastoreManagerMeta
|
class DatastoreManagerMeta(type):
""" Metaclass ensures that there is only one instance manager
"""
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(DatastoreManagerMeta,
cls).__call__(*args, **kwargs)
else:
values = None
if kwargs.get("conn"):
cls._instances[cls].conn = kwargs['conn']
if args:
values = args[0]
if values:
cls._instances[cls].load(values, **kwargs)
return cls._instances[cls]
def __init__(self, *args, **kwargs):
pass
def clear(cls):
cls._instances = {}
|
class DatastoreManagerMeta(type):
''' Metaclass ensures that there is only one instance manager
'''
def __call__(cls, *args, **kwargs):
pass
def __init__(self, *args, **kwargs):
pass
def clear(cls):
pass
| 4 | 1 | 6 | 0 | 6 | 0 | 2 | 0.11 | 1 | 1 | 0 | 1 | 3 | 0 | 3 | 16 | 24 | 3 | 19 | 6 | 15 | 2 | 17 | 6 | 13 | 5 | 2 | 2 | 7 |
143,265 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/datamanager/defmanager.py
|
rdfframework.datamanager.defmanager.DefManagerMeta
|
class DefManagerMeta(type):
""" Metaclass ensures that there is only one instance of the RdfConnManager
"""
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(DefManagerMeta,
cls).__call__(*args, **kwargs)
else:
values = None
if kwargs.get("conn"):
cls._instances[cls].conn = kwargs['conn']
if args:
values = args[0]
elif 'rdf_defs' in kwargs:
values = kwargs['vocabularies']
if values:
cls._instances[cls].load(values, **kwargs)
return cls._instances[cls]
def __init__(self, *args, **kwargs):
pass
def clear(cls):
cls._instances = {}
|
class DefManagerMeta(type):
''' Metaclass ensures that there is only one instance of the RdfConnManager
'''
def __call__(cls, *args, **kwargs):
pass
def __init__(self, *args, **kwargs):
pass
def clear(cls):
pass
| 4 | 1 | 6 | 0 | 6 | 0 | 3 | 0.1 | 1 | 1 | 0 | 1 | 3 | 0 | 3 | 16 | 26 | 3 | 21 | 6 | 17 | 2 | 18 | 6 | 14 | 6 | 2 | 2 | 8 |
143,266 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/datamanager/defmanager.py
|
rdfframework.datamanager.defmanager.DefinitionManager
|
class DefinitionManager(DataFileManager, metaclass=DefManagerMeta):
"""
RDF Vocabulary Manager. This class manages all of the RDF vocabulary
for the rdfframework
"""
log_level = logging.INFO
is_initialized = False
vocab_dir = os.path.join(os.path.split(os.path.realpath(__file__))[0],
"vocabularies")
vocab_map = {
"rdf": {
"filename": "rdf.ttl",
"download": "https://www.w3.org/1999/02/22-rdf-syntax-ns#",
"namespace": "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
},
"owl": {
"filename": "owl.ttl",
"download": "http://www.w3.org/2002/07/owl#",
"namespace": "http://www.w3.org/2002/07/owl#"
},
"schema": {
"filename": "schema.nt",
"download": "http://schema.org/version/latest/schema.nt",
"namespace": "http://schema.org/"
},
"rdfs": {
"filename": "rdfs.ttl",
"download": "https://www.w3.org/2000/01/rdf-schema#",
"namespace": "http://www.w3.org/2000/01/rdf-schema#"
},
"skos": {
"filename": "skos.rdf",
"namespace": "http://www.w3.org/2004/02/skos/core#",
"download": "https://www.w3.org/2009/08/skos-reference/skos.rdf"
},
"dc": {
"filename": "dc.ttl",
"namespace": "http://purl.org/dc/elements/1.1/",
"download": ["http://purl.org/dc/elements/1.1/",
"http://dublincore.org/2012/06/14/dcelements"]
},
"dcterm": {
"filename": "dcterm.ttl",
"download": ["http://purl.org/dc/terms/",
"http://dublincore.org/2012/06/14/dcterms"],
"namespace": "http://purl.org/dc/terms/"
},
"void": {
"filename": "void.ttl",
"namespace": "http://rdfs.org/ns/void#",
"download": "http://vocab.deri.ie/void.ttl"
},
"adms": {
"filename": "adms.ttl",
"namespace": "https://www.w3.org/ns/adms#",
"download": "https://www.w3.org/ns/adms#"
},
"vcard": {
"filename": "vcard.ttl",
"namespace": "http://www.w3.org/2006/vcard/ns#",
"download": "https://www.w3.org/2006/vcard/ns#"
},
"foaf": {
"filename": "foaf.rdf",
"namespace": "http://xmlns.com/foaf/0.1/",
"download": "http://xmlns.com/foaf/spec/20140114.rdf"
},
"bf": {
"filename": "bf.rdf",
"namespace": "http://id.loc.gov/ontologies/bibframe/",
"download": "http://id.loc.gov/ontologies/bibframe.rdf"
}
}
def __init__(self, file_locations=[], conn=None, **kwargs):
# add all namespaces to the RdfNsManager to ensure that there are no
# conflicts with the config file
[__NSM__.bind(prefix, val['namespace'], override=False, calc=False)
for prefix, val in self.vocab_map.items()]
self.conn = None
if not conn:
conn = kwargs.get("conn", __CONNS__.active_defs)
if conn:
super(DefinitionManager, self).__init__(file_locations,
conn,
**kwargs)
if self.__file_locations__:
self.load(self.__file_locations__, **kwargs)
else:
self.add_file_locations(file_locations)
def __get_conn__(self, **kwargs):
if not self.conn:
self.conn = kwargs.get("conn", __CONNS__.active_defs)
return kwargs.get("conn", self.conn)
def load(self, file_locations=[], **kwargs):
""" Loads the file_locations into the triplestores
args:
file_locations: list of tuples to load
[('vocabularies', [list of vocabs to load])
('directory', '/directory/path')
('filepath', '/path/to/a/file')
('package_all', 'name.of.a.package.with.defs')
('package_file','name.of.package', 'filename')]
custom: list of custom definitions to load
"""
self.__set_cache_dir__(**kwargs)
conn = self.__get_conn__(**kwargs)
self.set_load_state(**kwargs)
super(DefinitionManager, self).load(file_locations, **kwargs)
if not file_locations:
file_locations = self.__file_locations__
if file_locations:
log.info("loading vocabs into conn '%s'", conn)
for item in file_locations:
if item[0] == 'vocabularies':
vocabs = item[1]
if item[1] == "all":
vocabs = self.vocab_map
for vocab in vocabs:
self.load_vocab(vocab)
self.loaded_files(reset=True)
self.loaded_times = self.load_times(**kwargs)
def __set_cache_dir__(self, cache_dirs=[], **kwargs):
""" sets the cache directory by test write permissions for various
locations
args:
directories: list of directories to test. First one with read-write
permissions is selected.
"""
# add a path for a subfolder 'vocabularies'
test_dirs = [self.vocab_dir] + cache_dirs
try:
test_dirs += [os.path.join(__CFG__.CACHE_DATA_PATH,
"vocabularies")]
except (RuntimeWarning, TypeError):
pass
super(DefinitionManager, self).__set_cache_dir__(test_dirs, **kwargs)
def load_vocab(self, vocab_name, **kwargs):
""" loads a vocabulary into the defintion triplestore
args:
vocab_name: the prefix, uri or filename of a vocabulary
"""
log.setLevel(kwargs.get("log_level", self.log_level))
vocab = self.get_vocab(vocab_name , **kwargs)
if vocab['filename'] in self.loaded:
if self.loaded_times.get(vocab['filename'],
datetime.datetime(2001,1,1)).timestamp() \
< vocab['modified']:
self.drop_file(vocab['filename'], **kwargs)
else:
return
conn = kwargs.get("conn", self.conn)
conn.load_data(graph=getattr(__NSM__.kdr, vocab['filename']).clean_uri,
data=vocab['data'],
datatype=vocab['filename'].split(".")[-1],
log_level=logging.WARNING)
self.__update_time__(vocab['filename'], **kwargs)
log.warning("\n\tvocab: '%s' loaded \n\tconn: '%s'",
vocab['filename'],
conn)
self.loaded.append(vocab['filename'])
def __get_vocab_dict__(self, vocab_name, **kwargs):
""" dictionary for the specified vocabulary
args:
vocab_name: the name or uri of the vocab to return
"""
try:
vocab_dict = self.vocab_map[vocab_name].copy()
except KeyError:
vocab_dict = {key: value for key, value in self.vocab_map.items()
if vocab_name in value.values()}
vocab_name = list(vocab_dict)[0]
vocab_dict = vocab_dict.pop(vocab_name)
return vocab_dict
def get_vocab(self, vocab_name, **kwargs):
""" Returns data stream of an rdf vocabulary
args:
vocab_name: the name or uri of the vocab to return
"""
vocab_dict = self.__get_vocab_dict__(vocab_name, **kwargs)
filepaths = list(set([os.path.join(self.cache_dir,
vocab_dict['filename']),
os.path.join(self.vocab_dir,
vocab_dict['filename'])]))
for path in filepaths:
if os.path.exists(path):
with open(path, 'rb') as f_obj:
vocab_dict.update({"name": vocab_name,
"data": f_obj.read(),
"modified": os.path.getmtime(path)})
return vocab_dict
download_locs = make_list(vocab_dict.get('download',[]))
for loc in download_locs:
loc_web = urllib.request.urlopen(loc)
# loc_file_date = date_parse(loc_web.info()['Last-Modified'])
urllib.request.urlretrieve(loc, filepaths[0])
with open(filepaths[0], 'rb') as f_obj:
vocab_dict.update({"name": vocab_name,
"data": f_obj.read(),
"modified": os.path.getmtime(filepaths[0])})
return vocab_dict
def drop_vocab(self, vocab_name, **kwargs):
""" Removes the vocab from the definiton triplestore
args:
vocab_name: the name or uri of the vocab to return
"""
vocab_dict = self.__get_vocab_dict__(vocab_name, **kwargs)
return self.drop_file(vocab_dict['filename'], **kwargs)
|
class DefinitionManager(DataFileManager, metaclass=DefManagerMeta):
'''
RDF Vocabulary Manager. This class manages all of the RDF vocabulary
for the rdfframework
'''
def __init__(self, file_locations=[], conn=None, **kwargs):
pass
def __get_conn__(self, **kwargs):
pass
def load(self, file_locations=[], **kwargs):
''' Loads the file_locations into the triplestores
args:
file_locations: list of tuples to load
[('vocabularies', [list of vocabs to load])
('directory', '/directory/path')
('filepath', '/path/to/a/file')
('package_all', 'name.of.a.package.with.defs')
('package_file','name.of.package', 'filename')]
custom: list of custom definitions to load
'''
pass
def __set_cache_dir__(self, cache_dirs=[], **kwargs):
''' sets the cache directory by test write permissions for various
locations
args:
directories: list of directories to test. First one with read-write
permissions is selected.
'''
pass
def load_vocab(self, vocab_name, **kwargs):
''' loads a vocabulary into the defintion triplestore
args:
vocab_name: the prefix, uri or filename of a vocabulary
'''
pass
def __get_vocab_dict__(self, vocab_name, **kwargs):
''' dictionary for the specified vocabulary
args:
vocab_name: the name or uri of the vocab to return
'''
pass
def get_vocab(self, vocab_name, **kwargs):
''' Returns data stream of an rdf vocabulary
args:
vocab_name: the name or uri of the vocab to return
'''
pass
def drop_vocab(self, vocab_name, **kwargs):
''' Removes the vocab from the definiton triplestore
args:
vocab_name: the name or uri of the vocab to return
'''
pass
| 9 | 7 | 18 | 1 | 12 | 5 | 3 | 0.31 | 2 | 7 | 0 | 0 | 8 | 2 | 8 | 38 | 228 | 21 | 167 | 31 | 158 | 52 | 82 | 30 | 73 | 7 | 3 | 3 | 25 |
143,267 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/datasets/rdfdatasets.py
|
rdfframework.datasets.rdfdatasets.RdfDataset
|
class RdfDataset(dict):
""" A container for holding rdf data """
log_level = logging.INFO
_reserved = ['add_triple',
'_format',
'_reserved',
'_link_objects',
'load_data',
'class_types',
'subj_list',
'non_defined',
'',
]
__slots__ = ['classes',
'subj_list',
'non_defined',
'base_uri',
'base_class',
'relate_obj_types',
'smap',
'pmap',
'omap',
'rmap']
def __init__(self, data=None, base_uri=None, **kwargs):
start = datetime.datetime.now()
self.smap = 's'
self.pmap = 'p'
self.omap = 'o'
self.rmap = {}
if base_uri:
base_uri = Uri(base_uri)
self.base_uri = base_uri
if kwargs.get("debug"):
log.setLevel(logging.DEBUG)
# realate_bnode_obj_types sets whether to relate the object of a class
# back to itself
self.relate_obj_types = ['bnode','uri']
if kwargs.get("bnode_only"):
self.relate_obj_types = ['bnode']
if data:
self.load_data(data, **kwargs)
log.debug("loaded %s triples in %s" % (len(data),
(datetime.datetime.now()-start)))
def __repr__(self):
return "<Dataset([{'base_uri': '%s',\n'keys': '%s'}])>" % \
(self.base_uri,
[key.sparql for key in self if key.type != 'bnode'])
def __set_map__(self, **kwargs):
""" sets the subject predicat object json mapping
kwargs:
map: dictionary mapping 's', 'p', 'o' keys
"""
if kwargs.get('map'):
map = kwargs.pop('map',{})
self.smap = map.get('s','s')
self.pmap = map.get('p','p')
self.omap = map.get('o','o')
def add_triple(self, sub, pred=None, obj=None, **kwargs):
""" Adds a triple to the dataset
args:
sub: The subject of the triple or dictionary contaning a
triple
pred: Optional if supplied in sub, predicate of the triple
obj: Optional if supplied in sub, object of the triple
kwargs:
map: Optional, a ditionary mapping for a supplied dictionary
strip_orphans: Optional, remove triples that have an orphan
blanknode as the object
obj_method: if "list" than the object will be returned in the
form of a list
"""
self.__set_map__(**kwargs)
strip_orphans = kwargs.get("strip_orphans", False)
obj_method = kwargs.get("obj_method")
if isinstance(sub, DictClass) or isinstance(sub, dict):
pred = sub[self.pmap]
obj = sub[self.omap]
sub = sub[self.smap]
pred = pyrdf(pred)
obj = pyrdf(obj)
sub = pyrdf(sub)
# reference existing attr for bnodes and uris
if obj.type in self.relate_obj_types :
if strip_orphans and not self.get(obj):
return
obj = self.get(obj,obj)
try:
self[sub].add_property(pred, obj)
except KeyError:
self[sub] = RdfClassBase(sub, self, **kwargs)
self[sub].add_property(pred, obj)
def format(self, **kwargs):
uri_format = kwargs.get('uri_format', "pyuri")
output = kwargs.get('output', "dict")
pretty = kwargs.get('pretty', False)
remove = make_list(kwargs.get('remove', None))
compress = kwargs.get('compress', False)
sort = kwargs.get('sort', False)
base_uri = kwargs.get("base_uri",None)
add_ids = kwargs.get("add_ids", False)
base_only = kwargs.get("base_only", False)
if compress:
new_obj = copy.copy(self)
for key, value in self.items():
for skey, svalue in value.items():
if isinstance(svalue, list) and len(svalue) == 1:
try:
new_obj[key][skey] = svalue[0]
except KeyError:
new_obj[key] = {skey: svalue[0]}
format_obj = new_obj
else:
format_obj = self
if remove:
remove = make_list(remove)
conv_data = {key: value.conv_json(uri_format, add_ids)
for key, value in format_obj.items()
if value.subject.type not in remove}
if base_only:
try:
conv_data = conv_data[self.base_uri]
except KeyError:
return "Base_uri undefined or not in dataset"
if output.lower() == 'json':
indent = None
if pretty:
indent = 4
return json.dumps(conv_data, indent=indent, sort_keys=sort)
elif output.lower() == 'dict':
return conv_data
@property
def view(self):
""" prints the dataset in an easy to read format """
print(self.format(remove='bnode',
sort=True,
pretty=True,
compress=True,
output='json',
add_ids=True))
@property
def view_main(self):
""" prints the dataset in an easy to read format """
print(self.format(remove='bnode',
sort=True,
pretty=True,
compress=True,
output='json',
base_only = True,
add_ids=True))
def load_data(self, data, **kwargs):
"""
Bulk adds rdf data to the class
args:
data: the data to be loaded
kwargs:
strip_orphans: True or False - remove triples that have an
orphan blanknode as the object
obj_method: "list", or None: if "list" the object of a method
will be in the form of a list.
"""
self.__set_map__(**kwargs)
start = datetime.datetime.now()
log.debug("Dataload stated")
if isinstance(data, list):
data = self._convert_results(data, **kwargs)
class_types = self.__group_data__(data, **kwargs)
# generate classes and add attributes to the data
self._generate_classes(class_types, self.non_defined, **kwargs)
# add triples to the dataset
for triple in data:
self.add_triple(sub=triple, **kwargs)
log.debug("Dataload completed in '%s'",
(datetime.datetime.now() - start))
def __group_data__(self, data, **kwargs):
""" processes the data in to groups prior to loading into the
dataset
args:
data: a list of triples
"""
# strip all of the rdf_type triples and merge
class_types = self._merge_classtypes(self.__get_classtypes__(data))
self.subj_list = list([item[self.smap] for item in class_types])
# get non defined classes
self.non_defined = self._get_non_defined(data, class_types)
return class_types
def triples(self, output=None):
rtn_list = []
for sub, props in self.items():
for pred, obj in props.items():
# if not isinstance(pred, Uri):
# pred = Uri(pred)
if isinstance(obj, list):
for oo in obj:
if isinstance(oo, RdfClassBase):
oo = oo.subject
rtn_list.append((sub, pred, oo))
else:
if isinstance(obj, RdfClassBase):
obj = obj.subject
rtn_list.append((sub, pred, obj))
# rtn_list.sort(key=lambda tup: tup[0]+tup[1]+tup[2])
if output:
def size(value):
if len(value) > 42:
value = "... %s" % value[-39:]
spaces = 45 - len(value)
return "%s%s" %(value," " * spaces)
if output == "view":
print("\n".join(
["%s %s%s%s" %
(i,
size(trip[0].sparql),
size(trip[1].sparql),
size(trip[2].sparql))
for i, trip in enumerate(rtn_list)]))
else:
return rtn_list
@property
def __set_classes__(self):
def add_class(key, value):
nonlocal rtn_obj
try:
rtn_obj[value].append(key)
except AttributeError:
rtn_obj[value] = [rtn_obj[value['rdf_type']]]
rtn_obj[value].append(key)
except KeyError:
rtn_obj[value] = [key]
except TypeError:
for item in value:
add_class(key, item)
rtn_obj = {}
for key, value in self.items():
try:
add_class(key, value['rdf_type'])
except KeyError:
pass
self.classes = rtn_obj
#return rtn_obj
def __get_classtypes__(self, data):
""" returns all of the triples where rdf:type is the predicate and
removes them from the data list
agrs:
data: a list of triples
"""
rtn_list = []
remove_index = []
for i, triple in enumerate(data):
if triple[self.pmap] == __a__:
remove_index.append(i)
rtn_list.append(triple)
for i in reversed(remove_index):
data.pop(i)
return rtn_list
def add_rmap_item(self, subj, pred, obj):
"""
adds a triple to the inverted dataset index
"""
def add_item(self, subj, pred, obj):
try:
self.rmap[obj][pred].append(subj)
except KeyError:
try:
self.rmap[obj][pred] = [subj]
except KeyError:
self.rmap[obj] = {pred: [subj]}
if isinstance(obj, list):
for item in obj:
add_item(self, subj, pred, item)
else:
add_item(self, subj, pred, obj)
def _generate_classes(self, class_types, non_defined, **kwargs):
""" creates the class for each class in the data set
args:
class_types: list of class_types in the dataset
non_defined: list of subjects that have no defined class
"""
# kwargs['dataset'] = self
for class_type in class_types:
self[class_type[self.smap]] = self._get_rdfclass(class_type,
**kwargs)\
(class_type,
self,
**kwargs)
self.add_rmap_item(self[class_type[self.smap]],
class_type[self.pmap],
class_type[self.omap])
for class_type in non_defined:
self[class_type] = RdfClassBase(class_type, self, **kwargs)
self.add_rmap_item(self[class_type], __a__, None)
self.__set_classes__
try:
self.base_class = self[self.base_uri]
except KeyError:
self.base_class = None
def _get_rdfclass(self, class_type, **kwargs):
""" returns the instanticated class from the class list
args:
class_type: dictionary with rdf_types
"""
def select_class(class_name):
""" finds the class in the rdfclass Module"""
try:
return getattr(MODULE.rdfclass, class_name.pyuri)
except AttributeError:
return RdfClassBase
if kwargs.get("def_load"):
return RdfClassBase
if isinstance(class_type[self.omap], list):
bases = [select_class(class_name)
for class_name in class_type[self.omap]]
bases = [base for base in bases if base != RdfClassBase]
if len(bases) == 0:
return RdfClassBase
elif len(bases) == 1:
return bases[0]
else:
bases = remove_parents(bases)
if len(bases) == 1:
return bases[0]
else:
name = "_".join(sorted(class_type[self.omap]))
# if the the class has already been created return it
if hasattr(MODULE.rdfclass, name):
return getattr(MODULE.rdfclass, name)
new_class = type(name,
tuple(bases),
{})
new_class.hierarchy = list_hierarchy(class_type[self.omap][0],
bases)
new_class.class_names = sorted([base.__name__ \
for base in bases \
if base not in [RdfClassBase, dict]])
setattr(MODULE.rdfclass, name, new_class)
return new_class
else:
return select_class(class_type[self.omap])
def _merge_classtypes(self, data):
obj = {}
for triple in data:
try:
obj[triple[self.smap]][self.omap].append(triple[self.omap])
except AttributeError:
obj[triple[self.smap]][self.omap] = [obj[triple[self.smap]][self.omap]]
obj[triple[self.smap]][self.omap].append(triple[self.omap])
except KeyError:
obj[triple[self.smap]] = triple
return list(obj.values())
def _get_non_defined(self, data, class_types):
"""
returns a list of URIs and blanknodes that are not defined within
the dataset. For example: schema:Person has an associated rdf:type
then it is considered defined.
args:
data: a list of triples
class_types: list of subjects that are defined in the dataset
"""
subj_set = set([item[self.smap] for item in class_types])
non_def_set = set([item[self.smap] for item in data])
return list(non_def_set - subj_set)
def _convert_results(self, data, **kwargs):
""" converts the results of a query to RdfDatatype instances
args:
data: a list of triples
"""
if kwargs.get("multiprocessing", False):
m = mp.Manager()
output = m.Queue()
pdb.set_trace()
# processes = [mp.Process(target=convert_row_main,
# args=(row, output,))
# for row in data]
# # Run processes
# for p in processes:
# p.start()
# # Exit the completed processes
# for p in processes:
# p.join()
# # Get process results from the output queue
# return [output.get() for p in processes]
pool = mp.Pool(processes=pool_size)
for i, row in enumerate(data):
for key, val in row.items():
try:
pool.apply(convert_row_main, args=(val, i, key, output,))
except:
pass #
# run = [pool.apply(convert_row_main, args=(row, i, output))
# for i, row in enumerate(data)]
for item in output:
pdb.set_trace()
return output
# with multiprocessing.Pool(processes=pool_size) as pool:
# results = [convert_row_main, (row,))
# for row in data]
# converted = [r.get() for r in results]
# return converted #pool_outputs
else:
return [{key:pyrdf(value) for key, value in row.items()}
for row in data]
# def json_qry(self, qry_str, params):
def json_qry(*args):
""" Takes a json query string and returns the results
args:
qry_str: query string
"""
return json_qry(*args)
|
class RdfDataset(dict):
''' A container for holding rdf data '''
def __init__(self, data=None, base_uri=None, **kwargs):
pass
def __repr__(self):
pass
def __set_map__(self, **kwargs):
''' sets the subject predicat object json mapping
kwargs:
map: dictionary mapping 's', 'p', 'o' keys
'''
pass
def add_triple(self, sub, pred=None, obj=None, **kwargs):
''' Adds a triple to the dataset
args:
sub: The subject of the triple or dictionary contaning a
triple
pred: Optional if supplied in sub, predicate of the triple
obj: Optional if supplied in sub, object of the triple
kwargs:
map: Optional, a ditionary mapping for a supplied dictionary
strip_orphans: Optional, remove triples that have an orphan
blanknode as the object
obj_method: if "list" than the object will be returned in the
form of a list
'''
pass
def format(self, **kwargs):
pass
@property
def view(self):
''' prints the dataset in an easy to read format '''
pass
@property
def view_main(self):
''' prints the dataset in an easy to read format '''
pass
def load_data(self, data, **kwargs):
'''
Bulk adds rdf data to the class
args:
data: the data to be loaded
kwargs:
strip_orphans: True or False - remove triples that have an
orphan blanknode as the object
obj_method: "list", or None: if "list" the object of a method
will be in the form of a list.
'''
pass
def __group_data__(self, data, **kwargs):
''' processes the data in to groups prior to loading into the
dataset
args:
data: a list of triples
'''
pass
def triples(self, output=None):
pass
def size(value):
pass
@property
def __set_classes__(self):
pass
def add_class(key, value):
pass
def __get_classtypes__(self, data):
''' returns all of the triples where rdf:type is the predicate and
removes them from the data list
agrs:
data: a list of triples
'''
pass
def add_rmap_item(self, subj, pred, obj):
'''
adds a triple to the inverted dataset index
'''
pass
def add_item(self, subj, pred, obj):
pass
def _generate_classes(self, class_types, non_defined, **kwargs):
''' creates the class for each class in the data set
args:
class_types: list of class_types in the dataset
non_defined: list of subjects that have no defined class
'''
pass
def _get_rdfclass(self, class_type, **kwargs):
''' returns the instanticated class from the class list
args:
class_type: dictionary with rdf_types
'''
pass
def select_class(class_name):
''' finds the class in the rdfclass Module'''
pass
def _merge_classtypes(self, data):
pass
def _get_non_defined(self, data, class_types):
'''
returns a list of URIs and blanknodes that are not defined within
the dataset. For example: schema:Person has an associated rdf:type
then it is considered defined.
args:
data: a list of triples
class_types: list of subjects that are defined in the dataset
'''
pass
def _convert_results(self, data, **kwargs):
''' converts the results of a query to RdfDatatype instances
args:
data: a list of triples
'''
pass
def json_qry(*args):
''' Takes a json query string and returns the results
args:
qry_str: query string
'''
pass
| 27 | 15 | 19 | 1 | 14 | 4 | 4 | 0.33 | 1 | 13 | 3 | 0 | 19 | 10 | 19 | 46 | 455 | 48 | 306 | 90 | 278 | 102 | 239 | 87 | 214 | 12 | 2 | 5 | 85 |
143,268 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/tests/test_frameworkutilities.py
|
tests.test_frameworkutilities.Test_make_set
|
class Test_make_set(unittest.TestCase):
def test_make_set_str(self):
test_str = "this is a test"
self.assertEqual(make_set(test_str),
set([test_str,]))
def test_make_set_list(self):
test_list = ["ab", "cd"]
self.assertEqual(make_set(test_list),
set(test_list))
def test_make_set_set(self):
test_set = set(range(0,5))
self.assertEqual(make_set(test_set),
test_set)
|
class Test_make_set(unittest.TestCase):
def test_make_set_str(self):
pass
def test_make_set_list(self):
pass
def test_make_set_set(self):
pass
| 4 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 1 | 2 | 0 | 0 | 3 | 0 | 3 | 75 | 16 | 3 | 13 | 7 | 9 | 0 | 10 | 7 | 6 | 1 | 2 | 0 | 3 |
143,269 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/rml/processor.py
|
rdfframework.rml.processor.XMLProcessor
|
class XMLProcessor(Processor):
"""XML RDF Mapping Processor"""
rdf_name = Uri("kdr:RmlXMLProcessor")
def __init__(self, **kwargs):
if "rml_rules" in kwargs:
rml_rules = kwargs.pop("rml_rules")
super(XMLProcessor, self).__init__(rml_rules, **kwargs)
if "namespaces" in kwargs:
self.xml_ns = kwargs.pop("namespaces")
else:
self.xml_ns = dict()
self.constants.update(kwargs)
def __generate_reference__(self, triple_map, **kwargs):
"""Internal method takes a triple_map and returns the result of
applying to XPath to the current DOM context
Args:
-----
triple_map: SimpleNamespace
element: etree.Element
"""
element = kwargs.get("element")
found_elements = element.xpath(
triple_map.reference,
namespaces=self.xml_ns)
for elem in found_elements:
raw_text = elem.text.strip()
#! Quick and dirty test for valid URI
if not raw_text.startswith("http"):
continue
return rdflib.URIRef(raw_text)
def __reference_handler__(self, output, **kwargs):
"""Internal method for handling rr:reference in triples map
Keyword Args:
-------------
predicate_obj_map: SimpleNamespace
element: etree.Element
subject: rdflib.URIRef
"""
subjects = []
pred_obj_map = kwargs.get("predicate_obj_map")
element = kwargs.get("element")
subject = kwargs.get("subject")
if pred_obj_map.reference is None:
return subjects
predicate = pred_obj_map.predicate
found_elements = element.xpath(
str(pred_obj_map.reference),
namespaces=self.xml_ns)
for found_elem in found_elements:
if not hasattr(pred_obj_map, "datatype") or \
pred_obj_map.datatype is None:
datatype = None
else:
datatype = pred_obj_map.datatype
if isinstance(found_elem, str): # Handle xpath attributes
object_ = self.__generate_object_term__(datatype, found_elem)
output.add((subject, predicate, object_))
continue
if found_elem.text is None or len(found_elem.text) < 1:
continue
if pred_obj_map.constant is not None:
output.add((subject,
predicate,
pred_obj_map.constant))
continue
if pred_obj_map.delimiters != []:
subjects.extend(
self.__generate_delimited_objects__(
output,
triple_map=pred_obj_map,
subject=subject,
predicate=predicate,
element=found_elem,
delimiters=pred_obj_map.delimiters,
datatype=datatype))
else:
object_ = self.__generate_object_term__(datatype, found_elem.text)
output.add((subject, predicate, object_))
return subjects
def execute(self, triple_map, output, **kwargs):
"""Method executes mapping between source
Args:
-----
triple_map: SimpleNamespace, Triple Map
"""
subjects = []
found_elements = self.source.xpath(
str(triple_map.logicalSource.iterator),
namespaces=self.xml_ns)
for element in found_elements:
subject = self.generate_term(term_map=triple_map.subjectMap,
element=element,
**kwargs)
start = len(output)
for row in triple_map.predicateObjectMap:
predicate = row.predicate
if row.template is not None:
obj_ = self.generate_term(term_map=row, **kwargs)
output.add((subject, predicate, obj_))
if row.parentTriplesMap is not None:
self.__handle_parents__(
output,
parent_map=row.parentTriplesMap,
subject=subject,
predicate=predicate,
**kwargs)
new_subjects = self.__reference_handler__(
output,
predicate_obj_map=row,
element=element,
subject=subject)
subjects.extend(new_subjects)
if row.constant is not None:
output.add((subject, predicate, row.constant))
if start < len(output):
if triple_map.subjectMap.class_ is not None:
output.add((subject,
NS_MGR.rdf.type.rdflib,
triple_map.subjectMap.class_))
subjects.append(subject)
return subjects
def run(self, xml, **kwargs):
"""Method takes either an etree.ElementTree or raw XML text
as the first argument.
Args:
xml(etree.ElementTree or text
"""
kwargs['output'] = self.__graph__()
if isinstance(xml, str):
try:
self.source = etree.XML(xml)
except ValueError:
try:
self.source = etree.XML(xml.encode())
except:
raise ValueError("Cannot run error {}".format(sys.exc_info()[0]))
else:
self.source = xml
super(XMLProcessor, self).run(**kwargs)
self.output = kwargs['output']
return kwargs['output']
|
class XMLProcessor(Processor):
'''XML RDF Mapping Processor'''
def __init__(self, **kwargs):
pass
def __generate_reference__(self, triple_map, **kwargs):
'''Internal method takes a triple_map and returns the result of
applying to XPath to the current DOM context
Args:
-----
triple_map: SimpleNamespace
element: etree.Element
'''
pass
def __reference_handler__(self, output, **kwargs):
'''Internal method for handling rr:reference in triples map
Keyword Args:
-------------
predicate_obj_map: SimpleNamespace
element: etree.Element
subject: rdflib.URIRef
'''
pass
def execute(self, triple_map, output, **kwargs):
'''Method executes mapping between source
Args:
-----
triple_map: SimpleNamespace, Triple Map
'''
pass
def run(self, xml, **kwargs):
'''Method takes either an etree.ElementTree or raw XML text
as the first argument.
Args:
xml(etree.ElementTree or text
'''
pass
| 6 | 5 | 29 | 2 | 23 | 5 | 5 | 0.23 | 1 | 4 | 0 | 0 | 5 | 3 | 5 | 42 | 158 | 17 | 115 | 33 | 109 | 27 | 81 | 33 | 75 | 8 | 4 | 3 | 26 |
143,270 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/tests/test_frameworkutilities.py
|
tests.test_frameworkutilities.Test_is_not_null
|
class Test_is_not_null(unittest.TestCase):
def test_is_not_null(self):
self.assertFalse(is_not_null(None))
self.assertFalse(is_not_null(""))
def test_is_not_null_true(self):
self.assertTrue(is_not_null("Test"))
self.assertTrue(is_not_null(1234))
|
class Test_is_not_null(unittest.TestCase):
def test_is_not_null(self):
pass
def test_is_not_null_true(self):
pass
| 3 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 2 | 0 | 2 | 74 | 9 | 2 | 7 | 3 | 4 | 0 | 7 | 3 | 4 | 1 | 2 | 0 | 2 |
143,271 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/rdfclass/rdffactories.py
|
rdfframework.rdfclass.rdffactories.RdfBaseFactory
|
class RdfBaseFactory(object):
log_level = logging.INFO
cache_file = "base.json"
cache_filepath = ""
def __init__(self, conn, sparql_template, reset=False, nsm=NSM, cfg=CFG):
start = datetime.datetime.now()
log.info("Starting '%s' class creation", self.__class__.__name__)
self.conn = conn
self.cfg = cfg
self.nsm = nsm
self.def_sparql = sparql_template #
self.cache_filepath = os.path.join(self.cfg.dirs.cache,
CACHE_DIR,
self.cache_file)
if not os.path.isdir(os.path.join(self.cfg.dirs.cache,
CACHE_DIR)):
os.makedirs(os.path.join(self.cfg.dirs.cache,
CACHE_DIR))
self.loaded_filepath = os.path.join(self.cfg.dirs.cache,
CACHE_DIR,
CACHE_FILE_LIST)
self.get_defs(not reset)
self.conv_defs()
self.make()
setattr(self.cfg, "props_initialized", True)
log.info(" completed in %s", (datetime.datetime.now() - start))
def __use_cache__(self, cache):
"""
checks for changes in the vocabulary and mod times of the files
to see if the cache should be used.
Args:
cache: the kwarg passed in to use the cache during __init__
Returns:
Bool: True = use the cache files
False = requery the triplestore
"""
# check for changes in the file mod times
try:
cache_mod = os.path.getmtime(self.cache_filepath)
except FileNotFoundError:
return False
last_file_mod = sorted( \
self.conn.mgr.loaded_times.values())[-1].timestamp()
if last_file_mod > cache_mod:
return False
curr_load = set(self.conn.mgr.loaded)
# check to see if there is a change in the loaded files
try:
with open(self.loaded_filepath, "r") as fo:
loaded_files = set(json.loads(fo.read()))
if curr_load != loaded_files:
return False
except FileNotFoundError:
return False
# otherwise return the orginal cache init kwarg value
return cache
def get_defs(self, cache=True):
""" Gets the defitions
args:
cache: True will read from the file cache, False queries the
triplestore
"""
log.debug(" *** Started")
cache = self.__use_cache__(cache)
if cache:
log.info(" loading json cache")
try:
with open(self.cache_filepath) as file_obj:
self.results = json.loads(file_obj.read())
except FileNotFoundError:
self.results = []
if not cache or len(self.results) == 0:
log.info(" NO CACHE, querying the triplestore")
sparql = render_without_request(self.def_sparql,
graph=self.conn.graph,
prefix=self.nsm.prefix())
start = datetime.datetime.now()
log.info(" Starting query")
self.results = self.conn.query(sparql)
log.info("query complete in: %s | %s triples retrieved.",
(datetime.datetime.now() - start),
len(self.results))
with open(self.cache_filepath, "w") as file_obj:
file_obj.write(json.dumps(self.results, indent=4))
with open(self.loaded_filepath, "w") as file_obj:
file_obj.write((json.dumps(self.conn.mgr.loaded)))
def conv_defs(self):
""" Reads through the JSON object and converts them to Dataset """
log.setLevel(self.log_level)
start = datetime.datetime.now()
log.debug(" Converting to a Dataset: %s Triples", len(self.results))
self.defs = RdfDataset(self.results,
def_load=True,
bnode_only=True)
# self.cfg.__setattr__('rdf_prop_defs', self.defs, True)
log.debug(" conv complete in: %s" % (datetime.datetime.now() - start))
|
class RdfBaseFactory(object):
def __init__(self, conn, sparql_template, reset=False, nsm=NSM, cfg=CFG):
pass
def __use_cache__(self, cache):
'''
checks for changes in the vocabulary and mod times of the files
to see if the cache should be used.
Args:
cache: the kwarg passed in to use the cache during __init__
Returns:
Bool: True = use the cache files
False = requery the triplestore
'''
pass
def get_defs(self, cache=True):
''' Gets the defitions
args:
cache: True will read from the file cache, False queries the
triplestore
'''
pass
def conv_defs(self):
''' Reads through the JSON object and converts them to Dataset '''
pass
| 5 | 3 | 25 | 2 | 18 | 5 | 3 | 0.26 | 1 | 4 | 1 | 2 | 4 | 7 | 4 | 4 | 108 | 12 | 77 | 25 | 72 | 20 | 64 | 23 | 59 | 5 | 1 | 3 | 12 |
143,272 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/rdfclass/rdfclass.py
|
rdfframework.rdfclass.rdfclass.RegistryMeta
|
class RegistryMeta(type):
""" Registry meta class for use with the Registry class """
def __getitem__(cls, key):
try:
return cls._registry[key]
except KeyError:
return []
|
class RegistryMeta(type):
''' Registry meta class for use with the Registry class '''
def __getitem__(cls, key):
pass
| 2 | 1 | 5 | 0 | 5 | 0 | 2 | 0.17 | 1 | 1 | 0 | 1 | 1 | 0 | 1 | 14 | 7 | 0 | 6 | 2 | 4 | 1 | 6 | 2 | 4 | 2 | 2 | 1 | 2 |
143,273 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/rdfclass/rdfclass.py
|
rdfframework.rdfclass.rdfclass.Registry
|
class Registry(type, metaclass=RegistryMeta):
""" Registery for for rdf class registration """
_registry = {}
def __new__(mcs, name, bases, clsdict):
cls = super(Registry, mcs).__new__(mcs, name, bases, clsdict)
# if the RdfClassBase is not in the bases then the class is merged
# from multiple classes and should not be registred
try:
if RdfClassBase not in bases:
return cls
except NameError:
pass
if bases[:-1] and len(bases[0].class_names) == 1:
# pdb.set_trace()
try:
mcs._registry[bases[0].__name__].append(cls)
except KeyError:
mcs._registry[bases[0].__name__] = [cls]
return cls
|
class Registry(type, metaclass=RegistryMeta):
''' Registery for for rdf class registration '''
def __new__(mcs, name, bases, clsdict):
pass
| 2 | 1 | 16 | 0 | 13 | 3 | 5 | 0.27 | 2 | 4 | 1 | 1 | 1 | 0 | 1 | 15 | 20 | 1 | 15 | 4 | 13 | 4 | 15 | 4 | 13 | 5 | 3 | 2 | 5 |
143,274 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/rdfclass/rdfclass.py
|
rdfframework.rdfclass.rdfclass.RdfClassMeta
|
class RdfClassMeta(Registry):
""" Metaclass for generating RdfClasses. This metaclass will take the
rdf defined class defintions and convert them to a python class.
"""
@property
def doc(cls):
""" Prints the docstring for the class."""
print_doc(cls)
@property
def subclasses(cls):
return Registry[cls.__name__]
@classmethod
def __prepare__(mcs, name, bases, **kwargs):
# if name == 'bf_UsageAndAccessPolicy':
# pdb.set_trace()
try:
cls_defs = kwargs.pop('cls_defs')
props = get_properties(name) #cls_defs)
doc_string = make_doc_string(name,
cls_defs,
bases,
props)
new_def = {}
# if name == 'bf_Topic': pdb.set_trace()
new_def['__doc__'] = doc_string
new_def['doc'] = property(print_doc)
new_def['properties'] = list_base_properties(bases) #property(list_properties)
# new_def['json_def'] = cls_defs
new_def['hierarchy'] = list_hierarchy(name, bases)
new_def['id'] = None
new_def['class_names'] = [name]
es_defs = es_get_class_defs(cls_defs, name)
if hasattr(bases[0], 'es_defs'):
es_defs.update(bases[0].es_defs)
new_def['es_defs'] = get_rml_processors(es_defs)
new_def['query_kwargs'] = get_query_kwargs(es_defs)
new_def['uri'] = Uri(name).sparql_uri
for prop, value in props.items():
new_def[prop] = MODULE.rdfclass.make_property(value,
prop,
new_def['class_names'])
new_def['properties'][prop] = new_def[prop]
if __a__ not in new_def.keys():
new_def[__a__] = MODULE.rdfclass.properties.get(__a__)
new_def['properties'][__a__] = new_def[__a__]
new_def['cls_defs'] = cls_defs #cls_defs.pop(name)
new_def['es_props'] = []
for prop_name, prop in new_def['properties'].items():
rng_def = get_prop_range_def(\
get_prop_range_defs(new_def['class_names'],
prop.kds_rangeDef))
if rng_def.get('kds_esLookup'):
new_def['es_props'].append(prop_name)
return new_def
except KeyError:
return {}
# return {'_cls_name': name}
def __new__(mcs, name, bases, clsdict, **kwargs):
return super().__new__(mcs, name, bases, clsdict)
def __init__(cls, name, bases, namespace, **kwargs):
super().__init__(name, bases, namespace)
|
class RdfClassMeta(Registry):
''' Metaclass for generating RdfClasses. This metaclass will take the
rdf defined class defintions and convert them to a python class.
'''
@property
def doc(cls):
''' Prints the docstring for the class.'''
pass
@property
def subclasses(cls):
pass
@classmethod
def __prepare__(mcs, name, bases, **kwargs):
pass
def __new__(mcs, name, bases, clsdict, **kwargs):
pass
def __init__(cls, name, bases, namespace, **kwargs):
pass
| 9 | 2 | 11 | 0 | 10 | 2 | 2 | 0.23 | 1 | 4 | 1 | 1 | 4 | 0 | 5 | 20 | 66 | 5 | 52 | 17 | 43 | 12 | 42 | 14 | 36 | 7 | 4 | 3 | 11 |
143,275 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/rdfclass/rdfclass.py
|
rdfframework.rdfclass.rdfclass.RdfClassBase
|
class RdfClassBase(dict, metaclass=RdfClassMeta):
""" This is the base class for the generation of all RDF Class type
Args:
sub: the instance URI for the rdf class. This is the subject URI
Kwargs:
dataset(RdfDataset): The linked RdfDataset that this instance of a class
is part.
"""
class_names = []
uri_format = 'sparql_uri'
def __init__(self, subject=None, dataset=None, **kwargs):
super().__init__(self)
self.dataset = dataset
self._set_subject(subject)
if self.__class__ != RdfClassBase:
self._initilize_props()
def __hash__(self):
return hash(self.subject)
def __eq__(self, other):
if self.subject == other:
return True
return False
def __lt__(self, other):
if hasattr(other, 'subject'):
if other.subject.type == 'bnode':
other = other.bnode_id()
else:
other = other.subject
if self.subject.type == 'bnode':
return self.bnode_id() < other
return self.subject < other
def add_property(self, pred, obj):
""" adds a property and its value to the class instance
args:
pred: the predicate/property to add
obj: the value/object to add
obj_method: *** No longer used.
"""
pred = Uri(pred)
try:
self[pred].append(obj)
# except AttributeError:
# new_list = [self[pred]]
# new_list.append(obj)
# self[pred] = new_list
except KeyError:
try:
new_prop = self.properties[pred]
except AttributeError:
self.properties = {}
self.add_property(pred, obj)
return
except KeyError:
try:
new_prop = MODULE.rdfclass.properties[pred]
except KeyError:
new_prop = MODULE.rdfclass.make_property({},
pred, self.class_names)
try:
self.properties[pred] = new_prop
except AttributeError:
self.properties = {pred: new_prop}
init_prop = new_prop(self, get_attr(self, "dataset"))
setattr(self,
pred,
init_prop)
self[pred] = init_prop
self[pred].append(obj)
if self.dataset:
self.dataset.add_rmap_item(self, pred, obj)
@property
def subclasses(self):
""" returns a list of sublcasses to the current class """
return Registry[self.__class__.__name__]
@property
def to_json(self):
""" converts the class to a json compatable python dictionary """
return self.conv_json(self.uri_format)
def conv_json(self, uri_format="sparql_uri", add_ids=False):
""" converts the class to a json compatable python dictionary
Args:
uri_format('sparql_uri','pyuri'): The format that uri values will
be returned
Returns:
dict: a json compatabile python dictionary
"""
def convert_item(ivalue):
""" converts an idividual value to a json value
Args:
ivalue: value of the item to convert
Returns:
JSON serializable value
"""
nvalue = ivalue
if isinstance(ivalue, BaseRdfDataType):
if ivalue.type == 'uri':
if ivalue.startswith("pyuri") and uri_format == "pyuri":
nvalue = getattr(ivalue, "sparql")
else:
nvalue = getattr(ivalue, uri_format)
else:
nvalue = ivalue.to_json
elif isinstance(ivalue, RdfClassBase):
if ivalue.subject.type == "uri":
nvalue = ivalue.conv_json(uri_format, add_ids)
elif ivalue.subject.type == "bnode":
nvalue = ivalue.conv_json(uri_format, add_ids)
elif isinstance(ivalue, list):
nvalue = []
for item in ivalue:
temp = convert_item(item)
nvalue.append(temp)
return nvalue
rtn_val = {key: convert_item(value) for key, value in self.items()}
#pdb.set_trace()
if add_ids:
if self.subject.type == 'uri':
rtn_val['uri'] = self.subject.sparql_uri
rtn_val['id'] = sha1(rtn_val['uri'].encode()).hexdigest()
#return {key: convert_item(value) for key, value in self.items()}
return rtn_val
@classmethod
def es_mapping(cls, base_class=None, role='rdf_class', **kwargs):
""" Returns the es mapping for the class
args:
-----
base_class: The root class being indexed
role: the role states how the class should be mapped depending
upon whether it is used as a subject of an object. options
are es_Nested or rdf_class
"""
def _prop_filter(prop, value, **kwargs):
""" filters out props that should not be used for es_mappings:
These include props that of the owl:inverseOf the parent_props.
Use of these props will cause a recursion depth error
Args:
prop: the name of the prop
value: the prop value(an instance of the prop's class)
Returns:
bool: whether the prop should be used
"""
try:
use_prop = len(set(value.owl_inverseOf) - parent_props) > 0
except AttributeError:
use_prop = True
# if not use_prop:
# print(prop)
if prop in nested_props and use_prop:
return True
return False
if not base_class:
base_class = cls
es_map = {}
# pdb.set_trace()
if kwargs.get("depth"): # and kwargs.get('class') == cls.__name__:
kwargs['depth'] += 1
initial = False
else:
initial = True
kwargs['depth'] = 1
kwargs['class'] = cls.__name__
kwargs['class_obj'] = cls
if kwargs.get('class_obj'):
parent_props = set(cls.properties)
else:
parent_props = set()
if role == 'rdf_class':
es_map = {}
es_map = {prop: value.es_mapping(base_class) \
for prop, value in cls.properties.items()}
elif role == 'es_Nested':
# print(locals())
# pdb.set_trace()
if cls == base_class:
nested_props = LABEL_FIELDS
else:
nested_props = cls.es_defs.get('kds_esNestedProps',
list(cls.properties.keys()))
es_map = {prop: value.es_mapping(base_class, **kwargs) \
for prop, value in cls.properties.items() \
if _prop_filter(prop, value, **kwargs)}
ref_map = {
"type" : "keyword"
}
lower_map = {
"type": "text",
"fields": {
"lower": es_idx_types['es_Lower']['lower'],
'keyword': {'type': 'keyword'}
}
}
ignore_map = {
"index": False,
"type": "text"
}
if cls == base_class:
es_map['label'] = ref_map
es_map['value'] = lower_map
if cls.cls_defs.get('kds_storageType',[None])[0] != "blanknode" \
and cls == base_class:
es_map['id'] = ref_map
es_map['uri'] = ref_map
rml_procs = cls.es_defs.get("kds_esRmlProcessor", [])
rml_procs = [proc for proc in rml_procs
if role == 'rdf_class' or
proc['force']]
if rml_procs:
rml_maps = {}
for rml in rml_procs:
rml_maps[rml['name']] = ignore_map
if rml_maps:
es_map['rml_map'] = {"properties": rml_maps}
# es_map['turtle'] = ignore_map
return es_map
@classmethod
def es_indexers(cls, base_class=None, role='rdf_class', **kwargs):
""" Returns the es mapping for the class
args:
-----
base_class: The root class being indexed
role: the role states how the class should be mapped depending
upon whether it is used as a subject of an object. options
are es_Nested or rdf_class
"""
def _prop_filter(prop, value, **kwargs):
""" filters out props that should not be used for es_mappings:
These include props that of the owl:inverseOf the parent_props.
Use of these props will cause a recursion depth error
Args:
prop: the name of the prop
value: the prop value(an instance of the prop's class)
Returns:
bool: whether the prop should be used
"""
try:
use_prop = len(set(value.owl_inverseOf) - parent_props) > 0
except AttributeError:
use_prop = True
if prop in nested_props and use_prop:
return True
return False
if not base_class:
base_class = cls
rtn_list = []
# pdb.set_trace()
if kwargs.get("depth"): # and kwargs.get('class') == cls.__name__:
kwargs['depth'] += 1
initial = False
else:
initial = True
kwargs['depth'] = 1
kwargs['class'] = cls.__name__
kwargs['class_obj'] = cls
if kwargs.get('class_obj'):
parent_props = set(cls.properties)
else:
parent_props = set()
if role == 'rdf_class':
for value in cls.properties.values():
# pdb.set_trace()
rtn_list += value.es_indexers(base_class, **kwargs)
elif role == 'es_Nested':
if cls == base_class:
nested_props = LABEL_FIELDS
else:
nested_props = cls.es_defs.get('kds_esNestedProps',
list(cls.properties.keys()))
used_props = [value
for prop, value in cls.properties.items() \
if _prop_filter(prop, value, **kwargs)]
for value in cls.properties.values():
# pdb.set_trace()
rtn_list += value.es_indexers(base_class, **kwargs)
if cls.es_defs.get('kds_esIndex',[None])[0]:
rtn_list += [cls]
return list(set(rtn_list))
def bnode_id(self):
"""
calculates the bnode id for the class
"""
if self.subject.type != 'bnode':
return self.subject
rtn_list = []
for prop in sorted(self):
for value in sorted(self[prop]):
rtn_list.append("%s%s" % (prop, value))
return sha1("".join(rtn_list).encode()).hexdigest()
def es_json(self, role='rdf_class', remove_empty=True, **kwargs):
""" Returns a JSON object of the class for insertion into es
args:
role: the role states how the class data should be returned
depending upon whether it is used as a subject of an object.
options are kds_esNested or rdf_class
remove_empty: True removes empty items from es object
"""
def test_idx_status(cls_inst, **kwargs):
"""
Return True if the class has already been indexed in elastisearch
Args:
-----
cls_inst: the rdfclass instance
Kwargs:
-------
force[boolean]: True will return false to force a reindex of the
class
"""
if kwargs.get("force") == True:
return False
idx_time = cls_inst.get("kds_esIndexTime", [None])[0]
mod_time = cls_inst.get("dcterm_modified", [None])[0]
error_msg = cls_inst.get("kds_esIndexError", [None])[0]
if (not idx_time) or \
error_msg or \
(idx_time and mod_time and idx_time < mod_time):
return False
return True
# if self.__class__.__name__ == 'rdf_type':
# pdb.set_trace()
rtn_obj = {}
if kwargs.get("depth"):
kwargs['depth'] += 1
else:
kwargs['depth'] = 1
if role == 'rdf_class':
if test_idx_status(self, **kwargs):
return None
for prop, value in self.items():
if prop in ['kds_esIndexTime', 'kds_esIndexError']:
continue
new_val = value.es_json()
rtn_method = get_attr(self[prop], 'kds_esObjectType', [])
if 'kdr_Array' in rtn_method:
rtn_obj[prop] = new_val
elif (remove_empty and new_val) or not remove_empty:
if len(new_val) == 1:
rtn_obj[prop] = new_val[0]
else:
rtn_obj[prop] = new_val
nested_props = None
else:
try:
nested_props = self.es_defs.get('kds_esNestedProps',
list(self.keys())).copy()
except AttributeError:
nested_props = list(self.keys())
for prop, value in self.items():
# if prop == 'bf_hasInstance':
# pdb.set_trace()
if prop in ['kds_esIndexTime', 'kds_esIndexError']:
continue
new_val = value.es_json(**kwargs)
rtn_method = get_attr(self[prop], 'kds_esObjectType', [])
if 'kdr_Array' in rtn_method:
rtn_obj[prop] = new_val
elif (remove_empty and new_val) or not remove_empty:
if len(new_val) == 1:
rtn_obj[prop] = new_val[0] \
if not isinstance(new_val, dict) \
else new_val
else:
rtn_obj[prop] = new_val
# if 'bf_Work' in self.hierarchy:
# pdb.set_trace()
rtn_obj = get_es_label(rtn_obj, self)
rtn_obj = get_es_value(rtn_obj, self)
rtn_obj = get_es_ids(rtn_obj, self)
if nested_props:
nested_props += ['value', 'id', 'uri']
rtn_obj = {key: value
for key, value in rtn_obj.items()
if key in nested_props}
# rml_procs = self.es_defs.get("kds_esRmlProcessor", [])
# # if role == 'rdf_class':
# # pdb.set_trace()
# rml_procs = [proc for proc in rml_procs
# if role == 'rdf_class' or
# proc['force']]
# if rml_procs:
# rml_maps = {}
# for rml in rml_procs:
# proc_kwargs = {rml['subj']: self.subject,
# "dataset": self.dataset}
# proc_kwargs.update(rml['proc_kwargs'])
# rml_maps[rml['name']] = rml['processor'](**proc_kwargs)
# if rml_maps:
# rtn_obj['rml_map'] = rml_maps
rml_maps = self.get_all_rml(role=role)
if rml_maps:
rtn_obj['rml_map'] = rml_maps
# if self.get('bf_contribution'):
# pdb.set_trace()
return rtn_obj
def get_rml(self, rml_def, **kwargs):
"""
returns the rml mapping output for specified mapping
Args:
-----
rml_def: The name of the mapping or a dictionary definition
"""
if isinstance(rml_def, str):
rml_procs = self.es_defs.get("kds_esRmlProcessor", [])
for item in rml_procs:
if item['name'] == rml_def:
rml_def = item
break
proc_kwargs = {rml_def['subj']: self.subject,
"dataset": self.dataset}
proc_kwargs.update(rml_def['proc_kwargs'])
return rml_def['processor'](**proc_kwargs)
def get_all_rml(self, **kwargs):
"""
Returns a dictionary with the output of all the rml procceor results
"""
rml_procs = self.es_defs.get("kds_esRmlProcessor", [])
role = kwargs.get('role')
if role:
rml_procs = [proc for proc in rml_procs
if role == 'rdf_class' or
proc['force']]
rml_maps = {}
for rml in rml_procs:
rml_maps[rml['name']] = self.get_rml(rml, **kwargs)
return rml_maps
def _set_subject(self, subject):
""" sets the subject value for the class instance
Args:
subject(dict, Uri, str): the subject for the class instance
"""
# if not subject:
# self.subject =
def test_uri(value):
""" test to see if the value is a uri or bnode
Returns: Uri or Bnode """
# .__wrapped__
if not isinstance(value, (Uri, BlankNode)):
try:
if value.startswith("_:"):
return BlankNode(value)
else:
return Uri(value)
except:
return BlankNode()
else:
return value
if isinstance(subject, dict):
self.subject = test_uri(subject['s'])
if isinstance(subject['o'], list):
for item in subject['o']:
self.add_property(subject['p'],
item)
else:
self.add_property(subject['p'],
subject['o'])
else:
self.subject = test_uri(subject)
def _initilize_props(self):
""" Adds an intialized property to the class dictionary """
# if self.subject == "pyuri_aHR0cDovL3R1dHQuZWR1Lw==_":
# pdb.set_trace()
try:
# pdb.set_trace()
for prop in self.es_props:
self[prop] = self.properties[prop](self, self.dataset)
setattr(self, prop, self[prop])
self[__a__] = self.properties[__a__](self, self.dataset)
setattr(self, __a__, self[__a__])
# for prop, prop_class in self.properties.items():
# # passing in the current dataset tie
# self[prop] = prop_class(self, self.dataset)
# setattr(self, prop, self[prop])
# bases = remove_parents((self.__class__,) +
# self.__class__.__bases__)
# for base in bases:
# if base.__name__ not in IGNORE_CLASSES:
# base_name = Uri(base.__name__)
# try:
# self['rdf_type'].append(base_name)
# except KeyError:
# self[Uri('rdf_type')] = MODULE.rdfclass.make_property({},
# 'rdf_type',
# self.__class__.__name__)(self, self.dataset)
# self['rdf_type'].append(base_name)
except (AttributeError, TypeError):
pass
@property
def sparql(self):
return self.subject.sparql
@property
def rdflib(self):
return self.subject.rdflib
@property
def sparql_uri(self):
return self.subject.sparql_uri
|
class RdfClassBase(dict, metaclass=RdfClassMeta):
''' This is the base class for the generation of all RDF Class type
Args:
sub: the instance URI for the rdf class. This is the subject URI
Kwargs:
dataset(RdfDataset): The linked RdfDataset that this instance of a class
is part.
'''
def __init__(self, subject=None, dataset=None, **kwargs):
pass
def __hash__(self):
pass
def __eq__(self, other):
pass
def __lt__(self, other):
pass
def add_property(self, pred, obj):
''' adds a property and its value to the class instance
args:
pred: the predicate/property to add
obj: the value/object to add
obj_method: *** No longer used.
'''
pass
@property
def subclasses(self):
''' returns a list of sublcasses to the current class '''
pass
@property
def to_json(self):
''' converts the class to a json compatable python dictionary '''
pass
def conv_json(self, uri_format="sparql_uri", add_ids=False):
''' converts the class to a json compatable python dictionary
Args:
uri_format('sparql_uri','pyuri'): The format that uri values will
be returned
Returns:
dict: a json compatabile python dictionary
'''
pass
def convert_item(ivalue):
''' converts an idividual value to a json value
Args:
ivalue: value of the item to convert
Returns:
JSON serializable value
'''
pass
@classmethod
def es_mapping(cls, base_class=None, role='rdf_class', **kwargs):
''' Returns the es mapping for the class
args:
-----
base_class: The root class being indexed
role: the role states how the class should be mapped depending
upon whether it is used as a subject of an object. options
are es_Nested or rdf_class
'''
pass
def _prop_filter(prop, value, **kwargs):
''' filters out props that should not be used for es_mappings:
These include props that of the owl:inverseOf the parent_props.
Use of these props will cause a recursion depth error
Args:
prop: the name of the prop
value: the prop value(an instance of the prop's class)
Returns:
bool: whether the prop should be used
'''
pass
@classmethod
def es_indexers(cls, base_class=None, role='rdf_class', **kwargs):
''' Returns the es mapping for the class
args:
-----
base_class: The root class being indexed
role: the role states how the class should be mapped depending
upon whether it is used as a subject of an object. options
are es_Nested or rdf_class
'''
pass
def _prop_filter(prop, value, **kwargs):
''' filters out props that should not be used for es_mappings:
These include props that of the owl:inverseOf the parent_props.
Use of these props will cause a recursion depth error
Args:
prop: the name of the prop
value: the prop value(an instance of the prop's class)
Returns:
bool: whether the prop should be used
'''
pass
def bnode_id(self):
'''
calculates the bnode id for the class
'''
pass
def es_json(self, role='rdf_class', remove_empty=True, **kwargs):
''' Returns a JSON object of the class for insertion into es
args:
role: the role states how the class data should be returned
depending upon whether it is used as a subject of an object.
options are kds_esNested or rdf_class
remove_empty: True removes empty items from es object
'''
pass
def test_idx_status(cls_inst, **kwargs):
'''
Return True if the class has already been indexed in elastisearch
Args:
-----
cls_inst: the rdfclass instance
Kwargs:
-------
force[boolean]: True will return false to force a reindex of the
class
'''
pass
def get_rml(self, rml_def, **kwargs):
'''
returns the rml mapping output for specified mapping
Args:
-----
rml_def: The name of the mapping or a dictionary definition
'''
pass
def get_all_rml(self, **kwargs):
'''
Returns a dictionary with the output of all the rml procceor results
'''
pass
def _set_subject(self, subject):
''' sets the subject value for the class instance
Args:
subject(dict, Uri, str): the subject for the class instance
'''
pass
def test_uri(value):
''' test to see if the value is a uri or bnode
Returns: Uri or Bnode '''
pass
def _initilize_props(self):
''' Adds an intialized property to the class dictionary '''
pass
@property
def sparql(self):
pass
@property
def rdflib(self):
pass
@property
def sparql_uri(self):
pass
| 32 | 18 | 26 | 2 | 16 | 8 | 4 | 0.48 | 2 | 10 | 3 | 0 | 17 | 3 | 19 | 66 | 547 | 54 | 335 | 83 | 303 | 160 | 268 | 75 | 243 | 18 | 5 | 4 | 104 |
143,276 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/processors/propertyprocessors.py
|
rdfframework.processors.propertyprocessors.PropertyProcessor
|
class PropertyProcessor(metaclass=KeyRegistryMeta):
"""
Base class for all property processors. All subclasses are registered
to this class. To add an addtional property processor outside of the
rdfframework subclass this class and add a class attribute called
'definition_uri' set as a rdfframework.datatypes.Uri instance of the
property proccessor name
Args:
-----
prop(RdfPropertyBase): The instance of the rdf property
classnames: list of applied classnames
data_attr: the name of the attribute to manipulate in the supplied
property during the call
"""
__required_idx_attrs__ = {"definition_uri"}
__optional_idx_attrs__ = {"datatype", "py_type"}
__special_idx_attrs__ = [{"datatype": ["sparql",
"sparql_uri",
"pyuri",
"clean_uri",
"rdflib"]}]
__nested_idx_attrs__ = {"type"}
def __init__(self, params=None, data_attr=None, classnames=[]):
self.params = params
self.classnames = set(classnames)
self.data_attr = data_attr
def __use_processor__(self, prop):
"""
Tests to see if the processor should be used for a particular instance
of a calling property based on the tied rdfclass for the property
"""
if not set(prop.classnames).difference(set(self.classnames)) < \
prop.classnames:
return False
return True
def __data_source__(self, prop):
"""
selects the appropraiate data source
"""
if self.data_attr:
return getattr(prop, self.data_attr)
else:
return prop
def __set_data__(self, prop, data):
"""
sets the processed data to the appropriated property attribute
Args:
-----
prop: the property being manipulated
data: the list of processed data
"""
if self.data_attr:
setattr(prop, self.data_attr, data)
else:
rm_idxs = []
for i, val in enumerate(prop):
if val not in data:
rm_idxs.append(i)
for idx in sorted(rm_idxs, reverse=True):
prop.pop(idx)
for val in data:
if val not in prop:
prop.append(val)
|
class PropertyProcessor(metaclass=KeyRegistryMeta):
'''
Base class for all property processors. All subclasses are registered
to this class. To add an addtional property processor outside of the
rdfframework subclass this class and add a class attribute called
'definition_uri' set as a rdfframework.datatypes.Uri instance of the
property proccessor name
Args:
-----
prop(RdfPropertyBase): The instance of the rdf property
classnames: list of applied classnames
data_attr: the name of the attribute to manipulate in the supplied
property during the call
'''
def __init__(self, params=None, data_attr=None, classnames=[]):
pass
def __use_processor__(self, prop):
'''
Tests to see if the processor should be used for a particular instance
of a calling property based on the tied rdfclass for the property
'''
pass
def __data_source__(self, prop):
'''
selects the appropraiate data source
'''
pass
def __set_data__(self, prop, data):
'''
sets the processed data to the appropriated property attribute
Args:
-----
prop: the property being manipulated
data: the list of processed data
'''
pass
| 5 | 4 | 11 | 0 | 7 | 4 | 3 | 0.75 | 1 | 2 | 0 | 3 | 4 | 3 | 4 | 23 | 71 | 8 | 36 | 15 | 31 | 27 | 29 | 15 | 24 | 7 | 3 | 3 | 12 |
143,277 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/processors/propertyprocessors.py
|
rdfframework.processors.propertyprocessors.PropSingleton
|
class PropSingleton(KeyRegistryMeta):
"""singleton class for processors that do not utilize any params """
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(PropSingleton,
cls).__call__(*args, **kwargs)
return cls._instances[cls]
|
class PropSingleton(KeyRegistryMeta):
'''singleton class for processors that do not utilize any params '''
def __call__(cls, *args, **kwargs):
pass
| 2 | 1 | 5 | 0 | 5 | 0 | 2 | 0.14 | 1 | 1 | 0 | 2 | 1 | 0 | 1 | 20 | 9 | 1 | 7 | 3 | 5 | 1 | 6 | 3 | 4 | 2 | 3 | 1 | 2 |
143,278 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/processors/propertyprocessors.py
|
rdfframework.processors.propertyprocessors.ConvertObjectToStringProcessor
|
class ConvertObjectToStringProcessor(PropertyProcessor):
"""
Converts the object values of the property to a string
Args:
-----
params: {'kds_lookupProperty': the name of the rdf property in the
object value to convert to a string}
Returns:
--------
strings for each object value
"""
definition_uri = Uri('kdr:ConvertObjectToStringProcessor')
def __init__(self, params=[{}], data_attr=None, classnames=[]):
super().__init__(params, data_attr, classnames)
str_prop = params[0].get('kds_lookupProperty')
if str_prop:
self.str_prop = str_prop[0]
else:
self.str_prop = None
def __call__(self, prop):
data = self.__data_source__(prop)
rtn_list = []
if self.str_prop:
for val in data:
if val.get(self.str_prop):
rtn_list = [str(item) for item in val[self.str_prop]]
else:
rtn_list = [str(item) for item in data]
self.__set_data__(prop, rtn_list)
|
class ConvertObjectToStringProcessor(PropertyProcessor):
'''
Converts the object values of the property to a string
Args:
-----
params: {'kds_lookupProperty': the name of the rdf property in the
object value to convert to a string}
Returns:
--------
strings for each object value
'''
def __init__(self, params=[{}], data_attr=None, classnames=[]):
pass
def __call__(self, prop):
pass
| 3 | 1 | 9 | 1 | 9 | 0 | 3 | 0.53 | 1 | 2 | 0 | 0 | 2 | 1 | 2 | 25 | 36 | 7 | 19 | 9 | 16 | 10 | 17 | 9 | 14 | 4 | 4 | 3 | 6 |
143,279 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/datatypes/xsdtypes.py
|
rdfframework.datatypes.xsdtypes.RdfXMLLiteral
|
class RdfXMLLiteral(XsdString):
datatype = Uri("rdf:XMLLiteral")
class_type = "RdfXMLLiteral"
py_type = None
es_type = "text"
|
class RdfXMLLiteral(XsdString):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 102 | 5 | 0 | 5 | 5 | 4 | 0 | 5 | 5 | 4 | 0 | 6 | 0 | 0 |
143,280 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/processors/propertyprocessors.py
|
rdfframework.processors.propertyprocessors.AddClassProcessor
|
class AddClassProcessor(PropertyProcessor, metaclass=PropSingleton):
"""
Adds the rdf:Class URI to the property's list of values
"""
definition_uri = Uri('kdr:AddClassProcessor')
def __call__(self, prop):
prop += prop.bound_class.class_names
|
class AddClassProcessor(PropertyProcessor, metaclass=PropSingleton):
'''
Adds the rdf:Class URI to the property's list of values
'''
def __call__(self, prop):
pass
| 2 | 1 | 2 | 0 | 2 | 0 | 1 | 0.75 | 2 | 0 | 0 | 0 | 1 | 0 | 1 | 25 | 8 | 1 | 4 | 3 | 2 | 3 | 4 | 3 | 2 | 1 | 4 | 0 | 1 |
143,281 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/processors/classprocessors.py
|
rdfframework.processors.classprocessors.ClassProcessor
|
class ClassProcessor(metaclass=KeyRegistryMeta):
"""
Base class for all class processors. All subclasses are registered
to this class. To add an addtional property processor outside of the
rdfframework subclass this class and add a class attribute called
'definition_uri' set as a rdfframework.datatypes.Uri instance of the
property proccessor name
Args:
-----
prop(RdfPropertyBase): The instance of the rdf property
classnames: list of applied classnames
data_attr: the name of the attribute to manipulate in the supplied
property during the call
"""
__required_idx_attrs__ = {"definition_uri"}
__optional_idx_attrs__ = {"datatype", "py_type"}
__special_idx_attrs__ = [{"datatype": ["sparql",
"sparql_uri",
"pyuri",
"clean_uri",
"rdflib"]}]
__nested_idx_attrs__ = {"type"}
def __init__(self, params=None, data_attr=None, classnames=[]):
self.params = params
self.classnames = set(classnames)
self.data_attr = data_attr
def __use_processor__(self, rdf_class):
"""
Tests to see if the processor should be used for a particular instance
of a calling property based on the tied rdfclass for the property
"""
return True
def __data_source__(self, prop):
"""
selects the appropraiate data source
"""
if self.data_attr:
return getattr(prop, self.data_attr)
else:
return prop
def __set_data__(self, prop, data):
"""
sets the processed data to the appropriated property attribute
Args:
-----
prop: the property being manipulated
data: the list of processed data
"""
pass
|
class ClassProcessor(metaclass=KeyRegistryMeta):
'''
Base class for all class processors. All subclasses are registered
to this class. To add an addtional property processor outside of the
rdfframework subclass this class and add a class attribute called
'definition_uri' set as a rdfframework.datatypes.Uri instance of the
property proccessor name
Args:
-----
prop(RdfPropertyBase): The instance of the rdf property
classnames: list of applied classnames
data_attr: the name of the attribute to manipulate in the supplied
property during the call
'''
def __init__(self, params=None, data_attr=None, classnames=[]):
pass
def __use_processor__(self, rdf_class):
'''
Tests to see if the processor should be used for a particular instance
of a calling property based on the tied rdfclass for the property
'''
pass
def __data_source__(self, prop):
'''
selects the appropraiate data source
'''
pass
def __set_data__(self, prop, data):
'''
sets the processed data to the appropriated property attribute
Args:
-----
prop: the property being manipulated
data: the list of processed data
'''
pass
| 5 | 4 | 7 | 0 | 3 | 4 | 1 | 1.23 | 1 | 1 | 0 | 0 | 4 | 3 | 4 | 23 | 57 | 8 | 22 | 12 | 17 | 27 | 17 | 12 | 12 | 2 | 3 | 1 | 5 |
143,282 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/ingesters/xmlingester.py
|
rdfframework.ingesters.xmlingester.Updater
|
class Updater(object):
"""
Updates specified nodes in the xml file
"""
pass
|
class Updater(object):
'''
Updates specified nodes in the xml file
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 0 | 2 | 1 | 1 | 3 | 2 | 1 | 1 | 0 | 1 | 0 | 0 |
143,283 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/ingesters/xmlingester.py
|
rdfframework.ingesters.xmlingester.Extractor
|
class Extractor(object):
"""
Extracts all nodes specified nodes from an xml file
Args:
-----
source: the filepath to the xml file
output: the filepath to output the results
"""
def __init__(self, source, output=None, **kwargs):
self.source = source
self.output = output
self.filter_tag = Uri("rdf:type")
self.filter_val = Uri("bf:Topic")
self.rdf_type = Uri("rdf:type")
def run(self, tag=None, output=None, **kwargs):
"""
runs the extractor
Args:
-----
output: ['filepath', None]
"""
start = datetime.datetime.now()
count = 0
if tag:
tag = Uri(tag)
xml_generator = etree.iterparse(self.source,
#events=("start", "end"),
tag=tag.etree)
else:
xml_generator = etree.iterparse(self.source) #,
#events=("start", "end"))
i = 0
for event, element in xml_generator:
type_tags = element.findall(_RDF_TYPE_TAG)
rdf_types = [el.get(_RES_TAG)
for el in type_tags
if el.get(_RES_TAG)]
# print(rdf_types)
if str(self.filter_val) in rdf_types:
pdb.set_trace()
# print("%s - %s - %s - %s" % (event,
# element.tag,
# element.attrib,
# element.text))
count += 1
# if i == 100:
# break
i += 1
element.clear()
print("Found '{}' items in {}".format(count,
(datetime.datetime.now() - start)))
|
class Extractor(object):
'''
Extracts all nodes specified nodes from an xml file
Args:
-----
source: the filepath to the xml file
output: the filepath to output the results
'''
def __init__(self, source, output=None, **kwargs):
pass
def run(self, tag=None, output=None, **kwargs):
'''
runs the extractor
Args:
-----
output: ['filepath', None]
'''
pass
| 3 | 2 | 23 | 1 | 14 | 8 | 3 | 0.79 | 1 | 3 | 1 | 0 | 2 | 5 | 2 | 2 | 56 | 5 | 29 | 15 | 26 | 23 | 24 | 15 | 21 | 4 | 1 | 2 | 5 |
143,284 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/ingesters/xmlingester.py
|
rdfframework.ingesters.xmlingester.BlankConverter
|
class BlankConverter(object):
"""
Changes blanknodes to URIs
"""
pass
|
class BlankConverter(object):
'''
Changes blanknodes to URIs
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 0 | 2 | 1 | 1 | 3 | 2 | 1 | 1 | 0 | 1 | 0 | 0 |
143,285 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/framework.py
|
rdfframework.framework.RdfFrameworkSingleton
|
class RdfFrameworkSingleton(type):
"""Singleton class for the RdfFramewodk that will allow for only one
instance of the RdfFramework to be created."""
_instances = {}
def __call__(cls, *args, **kwargs):
if not CFG.is_initialized:
# print("The RdfConfigManager has not been initialized!")
pass
if cls not in cls._instances:
cls._instances[cls] = super(RdfFrameworkSingleton,
cls).__call__(*args, **kwargs)
return cls._instances[cls]
|
class RdfFrameworkSingleton(type):
'''Singleton class for the RdfFramewodk that will allow for only one
instance of the RdfFramework to be created.'''
def __call__(cls, *args, **kwargs):
pass
| 2 | 1 | 8 | 0 | 7 | 1 | 3 | 0.33 | 1 | 1 | 0 | 1 | 1 | 0 | 1 | 14 | 13 | 1 | 9 | 3 | 7 | 3 | 8 | 3 | 6 | 3 | 2 | 1 | 3 |
143,286 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/utilities/baseutilities.py
|
rdfframework.utilities.baseutilities.RegistryDictionary
|
class RegistryDictionary(dict):
"""
Extends basic dictionay for access of items in the dictionary
"""
def find(self, value):
"""
returns a dictionary of items based on the a lowercase search
args:
value: the value to search by
"""
value = str(value).lower()
rtn_dict = RegistryDictionary()
for key, item in self.items():
if value in key.lower():
rtn_dict[key] = item
return rtn_dict
def __getattr__(self, value):
return self[value]
|
class RegistryDictionary(dict):
'''
Extends basic dictionay for access of items in the dictionary
'''
def find(self, value):
'''
returns a dictionary of items based on the a lowercase search
args:
value: the value to search by
'''
pass
def __getattr__(self, value):
pass
| 3 | 2 | 8 | 1 | 5 | 3 | 2 | 0.8 | 1 | 1 | 0 | 0 | 2 | 0 | 2 | 29 | 20 | 2 | 10 | 5 | 7 | 8 | 10 | 5 | 7 | 3 | 2 | 2 | 4 |
143,287 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/utilities/baseutilities.py
|
rdfframework.utilities.baseutilities.UniqueList
|
class UniqueList(list):
""" Extends python list preventing double elements from being added to the
list """
def append(self, value):
# pdb.set_trace()
if value not in self:
super(self.__class__, self).append(value)
def __iadd__(self, value):
if isinstance(value, list):
for item in value:
self.append(item)
else:
self.append(value)
return self
|
class UniqueList(list):
''' Extends python list preventing double elements from being added to the
list '''
def append(self, value):
pass
def __iadd__(self, value):
pass
| 3 | 1 | 6 | 0 | 5 | 1 | 3 | 0.27 | 1 | 1 | 0 | 0 | 2 | 0 | 2 | 35 | 16 | 2 | 11 | 4 | 8 | 3 | 10 | 4 | 7 | 3 | 2 | 2 | 5 |
143,288 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/utilities/baseutilities.py
|
rdfframework.utilities.baseutilities.memorize
|
class memorize():
def __init__(self, function):
self.__wrapped__ = function
self.memorized = {}
self.__len = 0
self.lastused = []
self.full = False
def pop_first_used(self):
if self.full or self.__len > 10000000:
self.full = False
counter = 1000
while counter:
key = self.lastused.pop(0)
del self.memorized[key]
counter -= 1
self.__len -= 1
return
self.__len += 1
def __call__(self, *args, **kwargs):
# return self.__wrapped__(*args, **kwargs)
# pdb.set_trace()
try:
return self.memorized[args]
except KeyError:
self.memorized[args] = self.__wrapped__(*args, **kwargs)
self.lastused.append(args)
self.pop_first_used()
return self.memorized[args]
except TypeError:
try:
return self.memorized[str(args)]
except KeyError:
nargs = str(args)
self.memorized[nargs] = self.__wrapped__(*args, **kwargs)
self.lastused.append(nargs)
self.pop_first_used()
return self.memorized[nargs]
|
class memorize():
def __init__(self, function):
pass
def pop_first_used(self):
pass
def __call__(self, *args, **kwargs):
pass
| 4 | 0 | 12 | 0 | 11 | 1 | 3 | 0.06 | 0 | 3 | 0 | 0 | 3 | 5 | 3 | 3 | 39 | 2 | 35 | 12 | 31 | 2 | 35 | 12 | 31 | 4 | 0 | 2 | 8 |
143,289 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/processors/propertyprocessors.py
|
rdfframework.processors.propertyprocessors.AddClassHierarchyProcessor
|
class AddClassHierarchyProcessor(PropertyProcessor, metaclass=PropSingleton):
"""
Adds the rdf:Class hierarchy URIs to the property's list of values.
This is useful for indexing in elasticsearch when dealing with rdf:type.
This way when doing a term search for a particular rdf:type all of the
subclasses for that type will be included as well.
Example:
--------
For a property with 'schema_Person' as the associated class,
['schema:Thing', 'schema:Person'] will be added to the property list
of values since 'schema:Person' is a subclass of 'schema:Thing'
"""
definition_uri = Uri('kdr:AddClassHierarchyProcessor')
def __call__(self, prop):
data = self.__data_source__(prop)
rtn_list = [item for item in data]
for prop_uri in prop.bound_class.hierarchy:
rtn_list.append(prop_uri)
rtn_list = list(set(rtn_list))
self.__set_data__(prop, rtn_list)
|
class AddClassHierarchyProcessor(PropertyProcessor, metaclass=PropSingleton):
'''
Adds the rdf:Class hierarchy URIs to the property's list of values.
This is useful for indexing in elasticsearch when dealing with rdf:type.
This way when doing a term search for a particular rdf:type all of the
subclasses for that type will be included as well.
Example:
--------
For a property with 'schema_Person' as the associated class,
['schema:Thing', 'schema:Person'] will be added to the property list
of values since 'schema:Person' is a subclass of 'schema:Thing'
'''
def __call__(self, prop):
pass
| 2 | 1 | 9 | 2 | 7 | 0 | 2 | 1.22 | 2 | 2 | 0 | 0 | 1 | 0 | 1 | 25 | 26 | 6 | 9 | 6 | 7 | 11 | 9 | 6 | 7 | 2 | 4 | 1 | 2 |
143,290 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/utilities/baseutilities.py
|
rdfframework.utilities.baseutilities.reg_patterns
|
class reg_patterns():
"""
Class of pattern matchers for regular expression matching
"""
url = re.compile(r"^(https?):\/\/\w+(\.\w+)*(:[0-9]+)?\/?(\/[-_%#.\w]*)*$", re.IGNORECASE)
url_no_http = re.compile(r"^\w+(\.\w+)*(:[0-9]+)?\/?(\/[-_%.#\w]*)*$", re.IGNORECASE)
email = re.compile(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)")
dir_win = re.compile(r'^[a-zA-Z]:\\(((?![<>:"/\\|?*]).)+((?<![ .])\\)?)*$')
# dir_linux = re.compile(r'^(\/[\w^ ]+)+\/?([\w.])+[^.]$')
dir_linux = re.compile(r'^(\/\w+)(\/\w+)(\/\w+)$')
isbn = re.compile(r"^(\d+)\b")
|
class reg_patterns():
'''
Class of pattern matchers for regular expression matching
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.86 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0 | 7 | 7 | 6 | 6 | 7 | 7 | 6 | 0 | 0 | 0 | 0 |
143,291 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/rdfclass/rdffactories.py
|
rdfframework.rdfclass.rdffactories.RdfClassFactory
|
class RdfClassFactory(RdfBaseFactory):
""" Extends RdfBaseFactory to property creation specific querying """
log_level = logging.INFO #MLOG_LVL #
cache_file = "classes.json"
classes_key = set([Uri(item) for item in RDF_CLASSES])
inferred_key = set([Uri(item) for item in INFERRED_CLASS_PROPS])
rdf_type = Uri('rdf_type')
def __init__(self, conn, reset=False, nsm=NSM, cfg=CFG):
if cfg.props_initialized != True:
err_msg = ["RdfPropertyFactory must be run prior to",
"the intialization of RdfClassFactory!"]
raise RuntimeError(" ".join(err_msg))
sparql_template = "sparqlDefinitionClassesAll.rq"
super().__init__(conn, sparql_template, reset, nsm, cfg)
def make(self):
""" reads through the definitions and generates an python class for each
definition """
log.setLevel(self.log_level)
created = []
self.set_class_dict()
start = datetime.datetime.now()
log.info(" # of classes to create: %s" % len(self.class_dict))
log.debug(" creating classes that are not subclassed")
for name, cls_defs in self.class_dict.items():
# if name in ['bf_Organization', 'bf_Agent']:
# pdb.set_trace()
if not self.class_dict[name].get('rdfs_subClassOf'):
created.append(name)
setattr(MODULE.rdfclass,
name,
types.new_class(name,
(RdfClassBase,),
{#'metaclass': RdfClassMeta,
'cls_defs': cls_defs}))
log.debug(" created %s classes in: %s",
len(created),
(datetime.datetime.now() - start))
for name in created:
del self.class_dict[name]
left = len(self.class_dict)
classes = []
while left > 0:
new = []
for name, cls_defs in self.class_dict.items():
# if name in ['bf_Organization', 'bf_Agent']:
# pdb.set_trace()
parents = self.class_dict[name].get('rdfs_subClassOf')
if not parents:
bases += (RdfClassBase, )
else:
for parent in make_list(parents):
bases = tuple()
if parent in created or parent in classes:
if parent in classes:
bases += (RdfClassBase, )
else:
base = getattr(MODULE.rdfclass, parent)
bases += (base,) + base.__bases__
if len(bases) > 0:
created.append(name)
setattr(MODULE.rdfclass,
name,
types.new_class(name,
bases,
{#'metaclass': RdfClassMeta,
'cls_defs': cls_defs}))
for name in created:
try:
del self.class_dict[name]
except KeyError:
pass
if left == len(self.class_dict):
# c_list = [self.class_dict[name].get('rdfs_subClassOf') \
# for name in self.class_dict]
missing_parents = []
for name in self.class_dict:
missing_parents += \
self.class_dict[name].get('rdfs_subClassOf', [])
missing_parents = set(missing_parents)
still_valid = set([name for name in self.class_dict
if name not in missing_parents])
classes = list(missing_parents.difference(\
set(self.class_dict.keys())))
# classess = []
# for cl in c_list:
# for item in cl:
# classes.append(item)
for name in self.class_dict:
if name in classes:
classes.remove(name)
for p_name in self.class_dict[name].get('rdfs_subClassOf',
[]).copy():
if p_name in classes:
self.class_dict[name]['rdfs_subClassOf'].remove(\
p_name)
# pdb.set_trace()
left = len(self.class_dict)
# self.tie_properties(created)
log.info(" created all classes in %s",
(datetime.datetime.now() - start))
def set_class_dict(self):
""" Reads through the dataset and assigns self.class_dict the key value
pairs for the classes in the dataset
"""
self.class_dict = {}
for name, cls_defs in self.defs.items():
def_type = set(cls_defs.get(self.rdf_type, []))
if name.type == 'bnode':
continue
# a class can be determined by checking to see if it is of an
# rdf_type listed in the classes_key or has a property that is
# listed in the inferred_key
if def_type.intersection(self.classes_key) or \
list([cls_defs.get(item) for item in self.inferred_key]):
self.class_dict[name] = cls_defs
def tie_properties(self, class_list):
""" Runs through the classess and ties the properties to the class
args:
class_list: a list of class names to run
"""
log.setLevel(self.log_level)
start = datetime.datetime.now()
log.info(" Tieing properties to the class")
for cls_name in class_list:
cls_obj = getattr(MODULE.rdfclass, cls_name)
prop_dict = dict(cls_obj.properties)
for prop_name, prop_obj in cls_obj.properties.items():
setattr(cls_obj, prop_name, link_property(prop_obj, cls_obj))
log.info(" Finished tieing properties in: %s",
(datetime.datetime.now() - start))
|
class RdfClassFactory(RdfBaseFactory):
''' Extends RdfBaseFactory to property creation specific querying '''
def __init__(self, conn, reset=False, nsm=NSM, cfg=CFG):
pass
def make(self):
''' reads through the definitions and generates an python class for each
definition '''
pass
def set_class_dict(self):
''' Reads through the dataset and assigns self.class_dict the key value
pairs for the classes in the dataset
'''
pass
def tie_properties(self, class_list):
''' Runs through the classess and ties the properties to the class
args:
class_list: a list of class names to run
'''
pass
| 5 | 4 | 32 | 1 | 25 | 7 | 7 | 0.28 | 1 | 9 | 1 | 0 | 4 | 1 | 4 | 8 | 137 | 7 | 105 | 32 | 100 | 29 | 82 | 32 | 77 | 19 | 2 | 6 | 28 |
143,292 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/rdfclass/rdfproperty.py
|
rdfframework.rdfclass.rdfproperty.RdfLinkedPropertyMeta
|
class RdfLinkedPropertyMeta(RdfPropertyMeta):
""" Metaclass for generating rdfproperty classes """
@classmethod
def __prepare__(mcs, name, bases, **kwargs):
# print(' RdfClassMeta.__prepare__(\n\t\t%s)' % (p_args(args, kwargs)))
cls_name = kwargs.pop('cls_name')
if cls_name == 'RdfClassBase':
return {}
linked_cls = kwargs.pop('linked_cls')
prop_defs = {attr: getattr(bases[0], attr)
for attr in dir(bases[0])
if isinstance(attr, Uri.__wrapped__)}
prop_name = bases[0]._prop_name
new_def = filter_prop_defs(prop_defs,
linked_cls.hierarchy,
[cls_name])
new_def['__doc__'] = bases[0].__doc__
new_def['_cls_name'] = cls_name
new_def['_linked_cls'] = linked_cls
new_def['_prop_name'] = prop_name
new_def['_init_processors'] = get_processors('kds_initProcessor',
new_def)
new_def['_es_processors'] = get_processors('kds_esProcessor',
new_def)
return new_def
|
class RdfLinkedPropertyMeta(RdfPropertyMeta):
''' Metaclass for generating rdfproperty classes '''
@classmethod
def __prepare__(mcs, name, bases, **kwargs):
pass
| 3 | 1 | 24 | 2 | 21 | 1 | 2 | 0.09 | 1 | 1 | 1 | 0 | 0 | 0 | 1 | 18 | 28 | 3 | 23 | 8 | 20 | 2 | 16 | 7 | 14 | 2 | 3 | 1 | 2 |
143,293 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/rml/rmlmanager.py
|
rdfframework.rml.rmlmanager.RmlSingleton
|
class RmlSingleton(type):
"""Singleton class that will allow for only one instance to be created.
"""
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(RmlSingleton,
cls).__call__(*args, **kwargs)
return cls._instances[cls]
|
class RmlSingleton(type):
'''Singleton class that will allow for only one instance to be created.
'''
def __call__(cls, *args, **kwargs):
pass
| 2 | 1 | 5 | 0 | 5 | 0 | 2 | 0.29 | 1 | 1 | 0 | 1 | 1 | 0 | 1 | 14 | 9 | 0 | 7 | 3 | 5 | 2 | 6 | 3 | 4 | 2 | 2 | 1 | 2 |
143,294 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/search/elasticsearchbase.py
|
rdfframework.search.elasticsearchbase.EsBase
|
class EsBase():
''' Base elasticsearch rdfframework class for common es operations'''
ln = "%s:EsBase" % MODULE_NAME
log_level = logging.INFO
def __init__(self, **kwargs):
self.es_url = kwargs.get('es_url', config.ES_URL)
self.es = kwargs.get("es",Elasticsearch([self.es_url]))
self.op_type = kwargs.get("op_type", "index")
self.es_index = kwargs.get("es_index")
self.doc_type = kwargs.get("doc_type")
self.reset_index = kwargs.get("reset_index",False)
self.reset_doc_type = kwargs.get("reset_doc_type",False)
self.es_mapping = kwargs.get("es_mapping")
def make_action_list(self, item_list, **kwargs):
''' Generates a list of actions for sending to Elasticsearch '''
action_list = []
es_index = get2(kwargs, "es_index", self.es_index)
action_type = kwargs.get("action_type","index")
action_settings = {'_op_type': action_type,
'_index': es_index}
doc_type = kwargs.get("doc_type", self.doc_type)
if not doc_type:
doc_type = "unk"
id_field = kwargs.get("id_field")
for item in item_list:
action = get_es_action_item(item,
action_settings,
doc_type,
id_field)
action_list.append(action)
return action_list
def bulk_save(self, action_list, **kwargs):
''' sends a passed in action_list to elasticsearch '''
lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3]))
lg.setLevel(self.log_level)
err_log = logging.getLogger("index.errors")
es = self.es
es_index = get2(kwargs, "es_index", self.es_index)
reset_index = kwargs.get("reset_index",self.reset_index)
doc_type = kwargs.get("doc_type", self.doc_type)
lg.info("Sending %s items to Elasticsearch",len(action_list))
# bulk_stream = helpers.streaming_bulk(es,
result = helpers.bulk(es,
action_list,
chunk_size=400,
raise_on_error=False)
lg.info("FINISHED sending to Elasticsearch")
if result[1]:
lg.info("Formating Error results")
# action_keys = {item['_id']:i for i, item in enumerate(action_list)}
new_result = []
for item in result[1][:5]:
for action_item in action_list:
if action_item['_id'] == item[list(item)[0]]['_id']:
new_result.append((item, action_item,))
break
err_log.info("Results for batch '%s'\n(%s,\n%s\n%s)",
kwargs.get('batch', "No Batch Number provided"),
result[0],
json.dumps(new_result, indent=4),
json.dumps(result[1]))
del new_result
lg.info("Finished Error logging")
# for success, result in bulk_stream:
# lg.debug("\nsuccess: %s \nresult:\n%s", success, pp.pformat(result))
return result
def save(self, data, **kwargs):
""" sends a passed in action_list to elasticsearch
args:
data: that data dictionary to save
kwargs:
id: es id to use / None = auto
"""
lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3]))
lg.setLevel(self.log_level)
es = self.es
es_index = get2(kwargs, "es_index", self.es_index)
reset_index = kwargs.get("reset_index",self.reset_index)
doc_type = kwargs.get("doc_type", self.doc_type)
op_type = kwargs.get("op_type", self.op_type)
id_value = kwargs.get("id")
id_field = kwargs.get("id_field")
if id_field:
id_value = data.get(id_field)
if op_type == "index":
result = es.index(index=es_index,
id=id_value,
doc_type=doc_type,
body=data)
elif op_type == "create":
result = es.create(index=es_index,
id=id_value,
doc_type=doc_type,
body=data)
lg.debug("Result = \n%s",pp.pformat(result))
return result
def _find_ids(self,
data_list,
prop,
lookup_index,
lookup_doc_type,
lookup_field):
""" Reads a list of data and replaces the ids with es id of the item
args:
data_list: list of items to find in replace
prop: full prop name in es format i.e. make.id
lookup_src: dictionary with index doc_type ie.
{"es_index": "reference", "doc_type": "device_make"}
lookup_fld: field to do the lookup against in full es naming
convention i.e. make.raw
"""
lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3]))
lg.setLevel(self.log_level)
rtn_list = []
first_time = IsFirst()
for item in data_list:
# the Dot class will retive and set dictionary values via dot
# notation
val = Dot(item).get(prop)
if val.startswith("#;lookup#;"):
lookup_val = val.replace("#;lookup#;", "")
lookup_obj = self.get_item(lookup_val, lookup_field)
if first_time.first():
lg.debug(" lookup_obj:\n%s", pp.pformat(lookup_obj))
if lookup_obj:
rtn_list.append(Dot(item).set(prop, lookup_obj['_id']))
return rtn_list
def get_doc(self, item_id, id_field="_id", **kwargs):
""" returns a single item data record/document based on specified
criteria
args:
item_id: the id value of the desired item. Can be used in
combination with the id_field for a paired lookup.
id_field: the field that is related to the item_id; default = '_id'
**Example**: selecting a country using a different
itendifier than the record id. The United States's '_id'
value is 'US' however the record can be found by
specifying item_id='USA', id_field='ISO 3166-1 A3'
kwargs:
used to overided any of the initialization values for the class
"""
lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3]))
lg.setLevel(self.log_level)
args = inspect.getargvalues(inspect.currentframe())[3]
lg.debug("\n****** Args *****:\n%s",
pp.pformat(args))
es = kwargs.get("es",self.es)
doc_type = kwargs.get("doc_type", self.doc_type)
if id_field == "_id":
lg.debug("*** _id lookup: index: %s item_id: %s",
self.es_index,
item_id)
result = es.get(index=self.es_index,
id=item_id)
else:
dsl = {
"query": {
"constant_score": {
"filter": {
"term": { id_field: item_id }
}
}
}
}
lg.debug("*** id_field lookup: index: %s item_id: %s \nDSL: %s",
self.es_index,
item_id,
pp.pformat(dsl))
result = es.search(index=self.es_index,
doc_type=doc_type,
body=dsl)
result = first(result.get("hits",{}).get("hits",[]))
lg.debug("\tresult:\n%s", pp.pformat(result))
self.item_data = result
self.form_data = MultiDict(result)
return result
def get_list(self, method="list", **kwargs):
""" returns a key value list of items based on the specfied criteria
"""
lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3]))
lg.setLevel(self.log_level)
args = inspect.getargvalues(inspect.currentframe())[3]
lg.debug("\n****** Args *****:\n%s",
pp.pformat(args))
es = kwargs.get("es",self.es)
doc_type = get2(kwargs, "doc_type", self.doc_type)
id_field = get2(kwargs, "id_field", "_id")
value_fld = kwargs.get("value_fld")
fields = kwargs.get("fields")
sort_dir = get2(kwargs,"sort_dir", "asc")
sort_fields = get2(kwargs,"sort_fields", get2(kwargs, "fields", [value_fld]))
size = get2(kwargs,"size",2000)
term = get2(kwargs,"term",'').replace("/","//")
filter_field = kwargs.get('filter_field')
filter_value = kwargs.get('filter_value')
dsl = {}
# set retutn to only return the fields specified or return the whole
# document if not specified
if fields is not None:
dsl["_source"] = fields
elif value_fld is not None:
dsl["_source"] = [value_fld]
fields = [value_fld]
else:
fields = []
# set query parameters based on the return method "list" or "search"
if sort_dir != "none" and method == "list":
dsl["sort"] = []
for fld in sort_fields:
if fld is not None:
dsl["sort"].append({ fld: sort_dir })
if method == "search":
# query in elasticsearch breaks if the is a single open parenthesis
# remove a single parenthesis from the search term
if "(" in term and ")" not in term:
search_term = term.replace("(", "")
else:
search_term = term
size = 5
dsl['query'] = {
"bool": {
"should": [
{
"query_string" : {
"analyze_wildcard": {
"query": "*%s*" % search_term
}
}
},
{
"query_string" : {
"query": "*%s*" % search_term,
"analyzer": "default",
"analyze_wildcard": True,
"fields": fields,
"boost": 10
}
}
]
}
}
else:
pass
if filter_value:
dsl['filter'] = {
"term": { filter_field: filter_value }
}
lg.info("\n-------- size: %s\ndsl:\n%s", size, json.dumps(dsl,indent=4))
result = es.search(index=self.es_index,
size=size,
doc_type=doc_type,
body=dsl)
if kwargs.get("calc"):
result = self._calc_result(result, kwargs['calc'])
lg.debug(pp.pformat(result))
return result
def _calc_result(self, results, calc):
""" parses the calc string and then reads the results and returns
the new value Elasticsearch no longer allow dynamic in scripting
args:
results: the list of results from Elasticsearch
calc: the calculation sting
returns:
refomated results list with calculation added to the '__calc' field
in _source
examples:
concatenation: use + field_names and double quotes to add text
fld1 +", " + fld2 = "fld1, fld2"
"""
lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3]))
lg.setLevel(self.log_level)
# if the calculation is empty exit
if calc is None:
return results
lg.debug("calc %s", calc)
# perform concatenation
hits = results.get('hits',{}).get('hits',[])
for item in hits:
lg.debug("\n*** item:\n%s", pp.pformat(item))
if "+" in calc:
calc_parts = calc.split("+")
calc_str = ""
for i, part in enumerate(calc_parts):
if '"' in part:
calc_parts[i] = part.replace('"','')
else:
if part.startswith("_"):
calc_parts[i] = item.get(part)
else:
calc_parts[i] = Dot(item['_source']).get(part)
lg.debug(" calc result: %s", "".join(calc_parts))
item['_source']['__calc'] = "".join(calc_parts)
lg.debug("calc %s", calc)
return results
|
class EsBase():
''' Base elasticsearch rdfframework class for common es operations'''
def __init__(self, **kwargs):
pass
def make_action_list(self, item_list, **kwargs):
''' Generates a list of actions for sending to Elasticsearch '''
pass
def bulk_save(self, action_list, **kwargs):
''' sends a passed in action_list to elasticsearch '''
pass
def save(self, data, **kwargs):
''' sends a passed in action_list to elasticsearch
args:
data: that data dictionary to save
kwargs:
id: es id to use / None = auto
'''
pass
def _find_ids(self,
data_list,
prop,
lookup_index,
lookup_doc_type,
lookup_field):
''' Reads a list of data and replaces the ids with es id of the item
args:
data_list: list of items to find in replace
prop: full prop name in es format i.e. make.id
lookup_src: dictionary with index doc_type ie.
{"es_index": "reference", "doc_type": "device_make"}
lookup_fld: field to do the lookup against in full es naming
convention i.e. make.raw
'''
pass
def get_doc(self, item_id, id_field="_id", **kwargs):
''' returns a single item data record/document based on specified
criteria
args:
item_id: the id value of the desired item. Can be used in
combination with the id_field for a paired lookup.
id_field: the field that is related to the item_id; default = '_id'
**Example**: selecting a country using a different
itendifier than the record id. The United States's '_id'
value is 'US' however the record can be found by
specifying item_id='USA', id_field='ISO 3166-1 A3'
kwargs:
used to overided any of the initialization values for the class
'''
pass
def get_list(self, method="list", **kwargs):
''' returns a key value list of items based on the specfied criteria
'''
pass
def _calc_result(self, results, calc):
''' parses the calc string and then reads the results and returns
the new value Elasticsearch no longer allow dynamic in scripting
args:
results: the list of results from Elasticsearch
calc: the calculation sting
returns:
refomated results list with calculation added to the '__calc' field
in _source
examples:
concatenation: use + field_names and double quotes to add text
fld1 +", " + fld2 = "fld1, fld2"
'''
pass
| 9 | 8 | 39 | 3 | 29 | 7 | 5 | 0.25 | 0 | 4 | 2 | 1 | 8 | 10 | 8 | 8 | 326 | 32 | 236 | 89 | 222 | 60 | 162 | 84 | 153 | 10 | 0 | 5 | 37 |
143,295 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/search/esloaders.py
|
rdfframework.search.esloaders.EsRdfBulkLoader
|
class EsRdfBulkLoader(object):
""" Bulk loads data from the triplestore to elasticsearch """
log_level = logging.DEBUG
def __init__(self, rdf_class, tstore_conn, search_conn, **kwargs):
log.setLevel(self.log_level)
self.tstore_conn = tstore_conn
self.search_conn = search_conn
try:
self.es_index = rdf_class.es_defs.get('kds_esIndex')[0]
self.es_doc_type = rdf_class.es_defs.get('kds_esDocType')[0]
except TypeError:
log.warn("'%s' is NOT cofigured for indexing to elasticsearch",
rdf_class)
return
self.search = Search(using=search_conn.es).index(self.es_index)
self.rdf_class = rdf_class
self._set_es_workers(**kwargs)
self.idx_start_time = XsdDatetime(datetime.datetime.utcnow())
# add all of the sublcasses for a rdf_class
self.rdf_types = [rdf_class.uri] + [item.uri
for item in rdf_class.subclasses]
# self.query = self.items_query_template.format(
# rdf_types="\n\t\t".join(rdf_types),
# idx_start_time=XsdDatetime(datetime.datetime.utcnow()).sparql)
EsMappings().initialize_indices()
if kwargs.get("reset_idx"):
self.delete_idx_status(self.rdf_class)
self.count = 0
kwargs['uri_list'] = self.get_uri_list()
# self._index_group_with_subgroup(**kwargs)
while len(kwargs['uri_list']) > 0:
self._index_group_with_subgroup(**kwargs)
kwargs['uri_list'] = self.get_uri_list()
def _set_es_workers(self, **kwargs):
"""
Creates index worker instances for each class to index
kwargs:
-------
idx_only_base[bool]: True will only index the base class
"""
def make_es_worker(search_conn, es_index, es_doc_type, class_name):
"""
Returns a new es_worker instance
args:
-----
search_conn: the connection to elasticsearch
es_index: the name of the elasticsearch index
es_doc_type: the name of the elasticsearch doctype
class_name: name of the rdf class that is being indexed
"""
new_esbase = copy.copy(search_conn)
new_esbase.es_index = es_index
new_esbase.doc_type = es_doc_type
log.info("Indexing '%s' into ES index '%s' doctype '%s'",
class_name.pyuri,
es_index,
es_doc_type)
return new_esbase
def additional_indexers(rdf_class):
"""
returns additional classes to index based off of the es definitions
"""
rtn_list = rdf_class.es_indexers()
rtn_list.remove(rdf_class)
return rtn_list
self.es_worker = make_es_worker(self.search_conn,
self.es_index,
self.es_doc_type,
self.rdf_class.__name__)
if not kwargs.get("idx_only_base"):
self.other_indexers = {item.__name__: make_es_worker(
self.search_conn,
item.es_defs.get('kds_esIndex')[0],
item.es_defs.get('kds_esDocType')[0],
item.__name__)
for item in additional_indexers(self.rdf_class)}
else:
self.other_indexers = {}
def _index_sub(self, uri_list, num, batch_num):
"""
Converts a list of uris to elasticsearch json objects
args:
uri_list: list of uris to convert
num: the ending count within the batch
batch_num: the batch number
"""
bname = '%s-%s' % (batch_num, num)
log.debug("batch_num '%s' starting es_json conversion",
bname)
qry_data = get_all_item_data([item[0] for item in uri_list],
self.tstore_conn,
rdfclass=self.rdf_class)
log.debug("batch_num '%s-%s' query_complete | count: %s",
batch_num,
num,
len(qry_data))
# path = os.path.join(CFG.dirs.cache, "index_pre")
# if not os.path.exists(path):
# os.makedirs(path)
# with open(os.path.join(path, bname + ".json"), "w") as fo:
# fo.write(json.dumps(qry_data))
data = RdfDataset(qry_data)
del qry_data
log.debug("batch_num '%s-%s' RdfDataset Loaded", batch_num, num)
for value in uri_list:
try:
self.batch_data[batch_num]['main'].append(\
data[value[0]].es_json())
self.count += 1
except KeyError:
pass
for name, indexer in self.other_indexers.items():
for item in data.json_qry("$.:%s" % name.pyuri):
val = item.es_json()
if val:
self.batch_data[batch_num][name].append(val)
self.batch_uris[batch_num].append(item.subject)
del data
del uri_list
log.debug("batch_num '%s-%s' converted to es_json", batch_num, num)
def get_uri_list(self, **kwargs):
"""
Returns a list of Uris to index
"""
index_status_filter = """
optional {{ ?s dcterm:modified ?modTime }} .
optional {{ ?s kds:esIndexTime ?time }} .
optional {{ ?s kds:esIndexError ?error }}
filter (
!(bound(?time)) ||
?time<?modTime ||
(bound(?error) && ?time < {idx_start_time}))
""".format(idx_start_time=self.idx_start_time.sparql)
items_query_template = """
SELECT DISTINCT ?s ?es_id
{{
VALUES ?rdftypes {{\n\t\t{rdf_types} }} .
?s a ?rdftypes .
BIND(SHA1(STR(?s)) as ?es_id) .
{status_filter}
}}
{order_by}
"""
status_filter = index_status_filter \
if not kwargs.get("no_status") else ""
order_by = kwargs.get("order_by", "")
sparql = items_query_template.format(
rdf_types="\n\t\t".join(self.rdf_types),
status_filter=status_filter,
order_by=order_by)
results = [(Uri(item['s']['value']), item['es_id']['value'],)
for item in self.tstore_conn.query(sparql=sparql)]
return results #[:100]
def _index_group_with_subgroup(self, **kwargs):
""" indexes all the URIs defined by the query into Elasticsearch """
log.setLevel(self.log_level)
# get a list of all the uri to index
uri_list = kwargs.get('uri_list', self.get_uri_list())
if not uri_list:
log.info("0 items to index")
return
# results = results[:100]
# Start processing through uri
batch_file = os.path.join(CFG.dirs.logs, "batch_list.txt")
# with open(batch_file, "w") as fo:
# fo.write("{")
log.info("'%s' items to index", len(uri_list))
self.time_start = datetime.datetime.now()
batch_size = kwargs.get("batch_size", 12000)
if len(uri_list) > batch_size:
batch_end = batch_size
else:
batch_end = len(uri_list)
batch_start = 0
batch_num = 1
self.batch_data = {}
self.batch_data[batch_num] = {}
self.batch_data[batch_num]['main'] = []
self.batch_uris = {}
self.batch_uris[batch_num] = []
for name, indexer in self.other_indexers.items():
self.batch_data[batch_num][name] = []
end = False
last = False
final_list = []
expand_index = kwargs.get("expand_index", True)
while not end:
log.debug("batch %s: %s-%s", batch_num, batch_start, batch_end)
sub_batch = []
j = 0
for i in range(batch_start, batch_end):
# for i, subj in enumerate(uri_list[batch_start:batch_end]):
qry_size = kwargs.get("qry_size", 1000)
if j < qry_size:
try:
sub_batch.append(uri_list.pop()) #subj)
except IndexError:
pass
if j == qry_size -1 or i == batch_end - 1:
try:
sub_batch.append(uri_list.pop()) #subj)
except IndexError:
pass
# with open(batch_file, "a") as fo:
# fo.write(json.dumps({str('%s-%s' % (batch_num, i+1)):
# [item[0].sparql
# for item in sub_batch]})[1:-1]+",\n")
if not kwargs.get("no_threading", False):
th = threading.Thread(name=batch_start + i + 1,
target=self._index_sub,
args=(sub_batch,
i+1,
batch_num,))
th.start()
else:
self._index_sub(sub_batch, i+1, batch_num)
j = 0
final_list += sub_batch
sub_batch = []
else:
j += 1
log.debug(datetime.datetime.now() - self.time_start)
if not kwargs.get("no_threading", False):
main_thread = threading.main_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
action_list = []
for key, items in self.batch_data[batch_num].items():
if key == 'main':
es_worker = self.es_worker
else:
es_worker = self.other_indexers[key]
action_list += es_worker.make_action_list(items)
result = self.es_worker.bulk_save(action_list)
final_list += self.batch_uris[batch_num]
self._update_triplestore(result, action_list)
del action_list
del self.batch_uris[batch_num]
del self.batch_data[batch_num]
try:
del pyrdf.memorized
pyrdf.memorized = {}
except AttributeError:
pass
while gc.collect() > 0:
pass
# pdb.set_trace()
batch_end += batch_size
batch_start += batch_size
if last:
end = True
if len(uri_list) <= batch_size:
batch_end = len(uri_list)
last = True
batch_num += 1
self.batch_uris[batch_num] = []
self.batch_data[batch_num] = {}
self.batch_data[batch_num]['main'] = []
for name, indexer in self.other_indexers.items():
self.batch_data[batch_num][name] = []
log.debug(datetime.datetime.now() - self.time_start)
# with open(batch_file, 'rb+') as fo:
# fo.seek(-2, os.SEEK_END)
# fo.truncate()
# # fo.close()
# fo.write("}".encode())
def _update_triplestore(self, es_result, action_list, **kwargs):
"""
updates the triplestore with success of saves and failues of indexing
Args:
-----
es_result: the elasticsearch result list
action_list: list of elasticsearch action items that were indexed
"""
idx_time = XsdDatetime(datetime.datetime.utcnow())
uri_keys = {}
bnode_keys = {}
for item in action_list:
try:
uri_keys[item['_id']] = item['_source']["uri"]
except KeyError:
bnode_keys[item['_id']] = item['_id']
error_dict = {}
error_bnodes = {}
if es_result[1]:
for result in es_result[1]:
err_item = list(result.values())[0]
try:
error_dict[uri_keys.pop(err_item['_id'])] = \
XsdString(err_item['error']['reason'])
except KeyError:
error_bnodes[bnode_keys.pop(err_item['_id'])] = \
XsdString(err_item['error']['reason'])
if uri_keys:
sparql_good = """
DELETE
{{
?s kds:esIndexTime ?esTime .
?s kds:esIndexError ?esError .
}}
INSERT
{{
GRAPH ?g {{ ?s kds:esIndexTime {idx_time} }}.
}}
WHERE
{{
VALUES ?s {{ {subj_list} }} .
{{
SELECT DISTINCT ?g ?s ?esTime ?esError
{{
GRAPH ?g {{ ?s ?p ?o }} .
OPTIONAL {{
?s kds:esIndexTime ?esTime
}}
OPTIONAL {{
?s kds:esIndexError ?esError
}}
}}
}}
}}
""".format(idx_time=idx_time.sparql,
subj_list="<%s>" % ">\n<".join(uri_keys.values()))
self.tstore_conn.update_query(sparql_good)
# Process any errors that were found.
if not error_dict:
return
# Delete all indexing triples related to the error subjects
sparql_error = """
DELETE
{{
?s kds:esIndexTime ?esTime .
?s kds:esIndexError ?esError .
}}
WHERE
{{
VALUES ?s {{ {subj_list} }} .
OPTIONAL {{
?s kds:esIndexTime ?esTime
}}
OPTIONAL {{
?s kds:esIndexError ?esError
}}
}}
""".format(subj_list="<%s>" % ">\n<".join(error_dict.keys()))
self.tstore_conn.update_query(sparql_error)
del sparql_error
sparql_update = """
INSERT
{{
GRAPH ?g {{
?s kds:esIndexTime {idx_time} .
?s kds:esIndexError ?esError .
}}
}}
WHERE
{{
VALUES (?s ?esError) {{ {error_list} }} .
{{
SELECT DISTINCT ?g ?s
{{
graph ?g {{?s ?p ?o}}
}}
}}
}}""".format(
idx_time=idx_time.sparql,
error_list="\n".join(["(<%s> %s)" % (key, val.sparql)
for key, val in error_dict.items()]))
# Create a turtle data stream of the new errors to upload into the
# triplestore
self.tstore_conn.update_query(sparql_update)
del sparql_update
def delete_idx_status(self, rdf_class):
"""
Removes all of the index status triples from the datastore
Args:
-----
rdf_class: The class of items to remove the status from
"""
sparql_template = """
DELETE
{{
?s kds:esIndexTime ?esTime .
?s kds:esIndexError ?esError .
}}
WHERE
{{
VALUES ?rdftypes {{\n\t\t{} }} .
?s a ?rdftypes .
OPTIONAL {{
?s kds:esIndexTime ?esTime
}}
OPTIONAL {{
?s kds:esIndexError ?esError
}}
FILTER(bound(?esTime)||bound(?esError))
}}
"""
rdf_types = [rdf_class.uri] + [item.uri
for item in rdf_class.subclasses]
sparql = sparql_template.format("\n\t\t".join(rdf_types))
log.warn("Deleting index status for %s", rdf_class.uri)
return self.tstore_conn.update_query(sparql)
def get_es_ids(self):
"""
reads all the elasticssearch ids for an index
"""
search = self.search.source(['uri']).sort(['uri'])
es_ids = [item.meta.id for item in search.scan()]
return es_ids
def validate_index(self, rdf_class):
"""
Will compare the triplestore and elasticsearch index to ensure that
that elasticsearch and triplestore items match. elasticsearch records
that are not in the triplestore will be deleteed
"""
es_ids = set(self.get_es_ids())
tstore_ids = set([item[1]
for item in self.get_uri_list(no_status=True)])
diff = es_ids - tstore_ids
if diff:
pdb.set_trace()
action_list = self.es_worker.make_action_list(diff,
action_type="delete")
results = self.es_worker.bulk_save(action_list)
|
class EsRdfBulkLoader(object):
''' Bulk loads data from the triplestore to elasticsearch '''
def __init__(self, rdf_class, tstore_conn, search_conn, **kwargs):
pass
def _set_es_workers(self, **kwargs):
'''
Creates index worker instances for each class to index
kwargs:
-------
idx_only_base[bool]: True will only index the base class
'''
pass
def make_es_worker(search_conn, es_index, es_doc_type, class_name):
'''
Returns a new es_worker instance
args:
-----
search_conn: the connection to elasticsearch
es_index: the name of the elasticsearch index
es_doc_type: the name of the elasticsearch doctype
class_name: name of the rdf class that is being indexed
'''
pass
def additional_indexers(rdf_class):
'''
returns additional classes to index based off of the es definitions
'''
pass
def _index_sub(self, uri_list, num, batch_num):
'''
Converts a list of uris to elasticsearch json objects
args:
uri_list: list of uris to convert
num: the ending count within the batch
batch_num: the batch number
'''
pass
def get_uri_list(self, **kwargs):
'''
Returns a list of Uris to index
'''
pass
def _index_group_with_subgroup(self, **kwargs):
''' indexes all the URIs defined by the query into Elasticsearch '''
pass
def _update_triplestore(self, es_result, action_list, **kwargs):
'''
updates the triplestore with success of saves and failues of indexing
Args:
-----
es_result: the elasticsearch result list
action_list: list of elasticsearch action items that were indexed
'''
pass
def delete_idx_status(self, rdf_class):
'''
Removes all of the index status triples from the datastore
Args:
-----
rdf_class: The class of items to remove the status from
'''
pass
def get_es_ids(self):
'''
reads all the elasticssearch ids for an index
'''
pass
def validate_index(self, rdf_class):
'''
Will compare the triplestore and elasticsearch index to ensure that
that elasticsearch and triplestore items match. elasticsearch records
that are not in the triplestore will be deleteed
'''
pass
| 12 | 11 | 42 | 1 | 32 | 8 | 4 | 0.24 | 1 | 14 | 5 | 0 | 9 | 14 | 9 | 9 | 451 | 25 | 345 | 84 | 333 | 84 | 208 | 84 | 196 | 21 | 1 | 4 | 49 |
143,296 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/search/esloaders_temp.py
|
rdfframework.search.esloaders_temp.EsRdfBulkLoader
|
class EsRdfBulkLoader(object):
""" Bulk loads data from the triplestore to elasticsearch """
ln = "%s-EsRdfBulkLoader" % MNAME
log_level = logging.DEBUG
def __init__(self, rdf_class, namespace):
lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3]))
lg.setLevel(self.log_level)
lg.debug(" *** Started")
self.namespace = namespace
self.es_index = rdf_class.es_defs.get('kds_esIndex')[0]
self.es_doc_type = rdf_class.es_defs.get('kds_esDocType')[0]
self.es_worker = EsBase(es_index=self.es_index,
doc_type=self.es_doc_type)
self.rdf_class = rdf_class
self.query = """
SELECT DISTINCT ?s {{ ?s a {}. }}""".format(rdf_class.uri)
EsMappings().initialize_indices()
self.count = 0
self._index_group()
def _index_item(self, uri, num, batch_num):
""" queries the triplestore for an item sends it to elasticsearch """
data = RdfDataset(get_all_item_data(uri, self.namespace),
uri).base_class.es_json()
self.batch_data[batch_num].append(data)
self.count += 1
def _index_sub(self, uri_list, num, batch_num):
def run_query(uri_list):
item_list = ["BIND(<%s> as ?item) ." % item['s']['value'] \
for item in uri_list]
statement = "{\n%s\n}" % "\n} UNION {\n".join(item_list)
bulk_sparql = render_without_request(\
"sparqlAllItemDataTemplate_Bulk.rq",
bulk_list=statement,
prefix=NSM.prefix())
return run_sparql_query(bulk_sparql, self.namespace)
data = RdfDataset(run_query(uri_list))
for value in RdfDataset.values():
if isinstance(value, self.rdf_class):
self.batch_data[batch_num].append(value.es_json())
self.count += 1
def _index_group_with_sub(self):
""" indexes all the URIs defined by the query into Elasticsearch """
lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3]))
lg.setLevel(self.log_level)
# get a list of all the uri to index
results = run_sparql_query(sparql=self.query, namespace=self.namespace)
# results = results[:100]
# Start processing through uri
self.time_start = datetime.datetime.now()
batch_size = 12000
if len(results) > batch_size:
batch_end = batch_size
else:
batch_end = len(results)
batch_start = 0
batch_num = 1
self.batch_data = {}
self.batch_data[batch_num] = []
end = False
last = False
while not end:
lg.debug("batch %s: %s-%s", batch_num, batch_start, batch_end)
sub_batch = []
j = 0
for i, subj in enumerate(results[batch_start:batch_end]):
qry_size = 50
if j < qry_size:
sub_batch.append(subj)
if j == qry_size or i == batch_end:
sub_batch.append(subj)
th = threading.Thread(name=batch_start + i + 1,
target=self._index_sub,
args=(sub_batch,
i+1,batch_num,))
th.start()
j = 0
sub_batch = []
lg.debug(datetime.datetime.now() - self.time_start)
main_thread = threading.main_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
action_list = \
self.es_worker.make_action_list(self.batch_data[batch_num])
self.es_worker.bulk_save(action_list)
del self.batch_data[batch_num]
batch_end += batch_size
batch_start += batch_size
if last:
end = True
if len(results) <= batch_end:
batch_end = len(results)
last = True
batch_num += 1
self.batch_data[batch_num] = []
lg.debug(datetime.datetime.now() - self.time_start)
|
class EsRdfBulkLoader(object):
''' Bulk loads data from the triplestore to elasticsearch '''
def __init__(self, rdf_class, namespace):
pass
def _index_item(self, uri, num, batch_num):
''' queries the triplestore for an item sends it to elasticsearch '''
pass
def _index_sub(self, uri_list, num, batch_num):
pass
def run_query(uri_list):
pass
def _index_group_with_sub(self):
''' indexes all the URIs defined by the query into Elasticsearch '''
pass
| 6 | 3 | 21 | 1 | 19 | 1 | 3 | 0.07 | 1 | 5 | 2 | 0 | 4 | 9 | 4 | 4 | 107 | 10 | 91 | 40 | 85 | 6 | 79 | 40 | 73 | 10 | 1 | 3 | 16 |
143,297 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/search/esmappings.py
|
rdfframework.search.esmappings.EsMappings
|
class EsMappings():
""" Class for manipulating elasticsearch mappings with the rdfframework
attributes:
"""
log_level = logging.INFO
es_mapping = None
# es_settings = None
def __init__(self, conn=None, **kwargs):
if not conn:
conn = CFG.conns.search
self.conn = conn
self.es_url = conn.es_url
self.es = conn.es
self.mapping_url = '{0}/_mapping'.format(self.es_url)
@staticmethod
def list_mapped_classes():
"""
Returns all the rdfclasses that have and associated elasticsearch
mapping
Args:
None
"""
cls_dict = {key: value
for key, value in MODULE.rdfclass.__dict__.items()
if not isinstance(value, RdfConfigManager)
and key not in ['properties']
and hasattr(value, 'es_defs')
and value.es_defs.get('kds_esIndex')}
new_dict = {}
# remove items that are appearing as a subclass of a main mapping class
# the intersion of the set of the cls_dict values and the a classes
# individual hierarchy will be >1 if the class is a subclass of another
# class in the list
potential_maps = set([cls_.__name__ for cls_ in cls_dict.values()])
for name, cls_ in cls_dict.items():
parents = set(cls_.hierarchy)
if len(parents.intersection(potential_maps)) <= 1:
new_dict[name] = cls_
return new_dict
@classmethod
def list_indexes(cls):
"""
Returns a dictionary with the key as the es_index name and the
object is a list of rdfclasses for that index
args:
None
"""
cls_list = cls.list_mapped_classes()
rtn_obj = {}
for key, value in cls_list.items():
idx = value.es_defs.get('kds_esIndex')[0]
try:
rtn_obj[idx].append(value)
except KeyError:
rtn_obj[idx] = [value]
return rtn_obj
@classmethod
def get_rdf_es_idx_map(cls, idx_obj):
"""
Returns an elasticsearch mapping for the specified index based off
of the mapping defined by rdf class definitions
args:
idx_obj: Dictionary of the index and a list of rdfclasses
included in the mapping
"""
idx_name = list(idx_obj)[0]
es_map = {
"index": idx_name,
"body" : {
"mappings": {},
"settings": {
# "read_only_allow_delete": False,
"index": {
# "blocks" : {
# "read_only_allow_delete" : "false"
# },
"analysis": {
"analyzer": {
"keylower": {
"tokenizer": "keyword",
"type": "custom",
"filter": "lowercase",
"ignore_above" : 256
}
}
}
}
}
}
}
for idx_cls in idx_obj[idx_name]:
# pdb.set_trace()
es_map['body']['mappings'][idx_cls.es_defs['kds_esDocType'][0]] = \
{'properties': idx_cls.es_mapping(idx_cls)}
return es_map
def send_es_mapping(self, es_map, **kwargs):
"""
sends the mapping to elasticsearch
args:
es_map: dictionary of the index mapping
kwargs:
reset_idx: WARNING! If True the current referenced es index
will be deleted destroying all data in that index in
elasticsearch. if False an incremented index will be
created and data-migration will start from the old to
the new index
"""
log.setLevel(kwargs.get('log_level', self.log_level))
def next_es_index_version(curr_alias):
""" returns the next number for a new index
args:
alias_def: the dictionary returned by es for get alias
"""
try:
alias_def = self.es.indices.get_alias(alias)
except es_except.NotFoundError:
alias_def = {alias + "_v0":{}}
old_idx = list(alias_def)[0]
parts = old_idx.split("_v")
try:
parts[1] = str(int(parts[1]) + 1)
except IndexError:
parts = [old_idx,'1']
return {'old': old_idx, 'new': "_v".join(parts)}
reset_idx= kwargs.get('reset_idx', False)
alias = es_map.pop('index')
idx_names = next_es_index_version(alias)
# Delete if the index series if reset_idx was passed
if reset_idx:
log.warning("DELETING Elasticsearch INDEX => %s ******", alias)
self.es.indices.delete(index=alias + "_v*", ignore=[400, 404])
idx_names['new'] = alias + "_v1"
# Create the new index and apply the mapping
self.es.indices.create(index=idx_names['new'],
body=es_map['body'],
update_all_types=True)
# if the index was not deleted transfer documents from old to the
# new index
if not reset_idx and self.es.indices.exists(idx_names['old']):
url = os.path.join(self.es_url,'_reindex').replace('\\','/')
data = {"source":{"index": idx_names['old']},
"dest":{"index": idx_names['new']}}
# Python elasticsearch recommends using a direct call to the
# es 5+ _reindex URL vice using their helper.
result = requests.post(url,
headers={'Content-Type':'application/json'},
data = json.dumps(data))
self.es.indices.delete_alias(index=idx_names['old'],
name=alias,
ignore=[403])
self.es.indices.delete(index=idx_names['old'], ignore=[400, 404])
# add the alias to the new index
self.es.indices.put_alias(index=idx_names['new'], name=alias)
def initialize_indices(self, **kwargs):
"""
creates all the indicies that are defined in the rdf definitions
kwargs:
action: which action is to be perfomed
initialize: (default) tests to see if the index exisits
if not creates it
reset: deletes all of the indexes and recreate them
update: starts a mapping update and reindexing process
"""
action = kwargs.get('action', 'initialize')
if action == 'update':
kwargs['reset_idx'] = False
elif action =='reset':
kwargs['reset_idx'] = True
idx_list = self.list_indexes()
for idx, values in idx_list.items():
if (action == 'initialize' and not self.es.indices.exists(idx)) \
or action != 'initialize':
self.send_es_mapping(self.get_rdf_es_idx_map({idx: values}),
**kwargs)
def get_es_mappings(self):
"""
Returns the mapping defitions presetn in elasticsearh
"""
es_mappings = json.loads(requests.get(self.mapping_url).text)
es_mappings = {"_".join(key.split("_")[:-1]): value['mappings'] \
for key, value in es_mappings.items()}
return es_mappings
def mapping_ref(self, es_mappings):
"""
Retruns a dictionary of mappings and the fiels names in dot notation
args:
mappings: es mapping defitions to parse
"""
new_map = {}
for key, value in es_mappings.items():
for sub_key, sub_value in value.items():
new_map["/".join([key, sub_key])] = \
mapping_fields(sub_value['properties'])
return new_map
def key_data_map(source, mapping, parent=[]):
rtn_obj = {}
if isinstance(source, dict):
for key, value in source.items():
new_key = parent + [key]
new_key = ".".join(new_key)
rtn_obj.update({new_key: {'mapping':mapping.get(new_key)}})
if isinstance(value, list):
value = value[0]
rtn_obj.update(key_data_map(value, mapping, [new_key]))
if isinstance(value, dict):
rtn_obj[new_key]['data'] = "%s ...}" % str(value)[:60]
elif isinstance(value, dict):
rtn_obj.update(key_data_map(value, mapping, [new_key]))
rtn_obj[new_key]['data'] = "%s ...}" % str(value)[:60]
else:
rtn_obj[new_key]['data'] = value
elif isinstance(source, list):
rtn_obj.update(key_data_map(value[0], mapping, parent))
else:
rtn_obj = {"".join(parent): {'data':source,
'mapping':mapping.get("".join(parent))}}
# pdb.set_trace()
return rtn_obj
def sample_data_convert(es_url, data, es_index, doc_type):
maps = self.mapping_ref(self.get_es_mappings())
if data.get('hits'):
new_data = data['hits']['hits'][0]['_source']
elif data.get('_source'):
new_data = data['_source']
conv_data = key_data_map(new_data, maps["%s/%s" % (es_index, doc_type)])
conv_data = [(key, str(value['mapping']), str(value['data']),) \
for key, value in conv_data.items()]
conv_data.sort(key=lambda tup: es_field_sort(tup[0]))
return conv_data
def sample_data_map(es_url):
maps = mapping_ref(es_url)
rtn_obj = {}
for path, mapping in maps.items():
url = "/".join(["{}:9200".format(es_url), path, '_search'])
sample_data = json.loads(requests.get(url).text)
sample_data = sample_data['hits']['hits'][0]['_source']
conv_data = key_data_map(sample_data, mapping)
rtn_obj[path] = [(key, str(value['mapping']), str(value['data']),) \
for key, value in conv_data.items()]
rtn_obj[path].sort(key=lambda tup: es_field_sort(tup[0]))
return rtn_obj
def es_field_sort(fld_name):
""" Used with lambda to sort fields """
parts = fld_name.split(".")
if "_" not in parts[-1]:
parts[-1] = "_" + parts[-1]
return ".".join(parts)
|
class EsMappings():
''' Class for manipulating elasticsearch mappings with the rdfframework
attributes:
'''
def __init__(self, conn=None, **kwargs):
pass
@staticmethod
def list_mapped_classes():
'''
Returns all the rdfclasses that have and associated elasticsearch
mapping
Args:
None
'''
pass
@classmethod
def list_indexes(cls):
'''
Returns a dictionary with the key as the es_index name and the
object is a list of rdfclasses for that index
args:
None
'''
pass
@classmethod
def get_rdf_es_idx_map(cls, idx_obj):
'''
Returns an elasticsearch mapping for the specified index based off
of the mapping defined by rdf class definitions
args:
idx_obj: Dictionary of the index and a list of rdfclasses
included in the mapping
'''
pass
def send_es_mapping(self, es_map, **kwargs):
'''
sends the mapping to elasticsearch
args:
es_map: dictionary of the index mapping
kwargs:
reset_idx: WARNING! If True the current referenced es index
will be deleted destroying all data in that index in
elasticsearch. if False an incremented index will be
created and data-migration will start from the old to
the new index
'''
pass
def next_es_index_version(curr_alias):
''' returns the next number for a new index
args:
alias_def: the dictionary returned by es for get alias
'''
pass
def initialize_indices(self, **kwargs):
'''
creates all the indicies that are defined in the rdf definitions
kwargs:
action: which action is to be perfomed
initialize: (default) tests to see if the index exisits
if not creates it
reset: deletes all of the indexes and recreate them
update: starts a mapping update and reindexing process
'''
pass
def get_es_mappings(self):
'''
Returns the mapping defitions presetn in elasticsearh
'''
pass
def mapping_ref(self, es_mappings):
'''
Retruns a dictionary of mappings and the fiels names in dot notation
args:
mappings: es mapping defitions to parse
'''
pass
def key_data_map(source, mapping, parent=[]):
pass
def sample_data_convert(es_url, data, es_index, doc_type):
pass
def sample_data_map(es_url):
pass
def es_field_sort(fld_name):
''' Used with lambda to sort fields '''
pass
| 17 | 10 | 21 | 2 | 14 | 6 | 3 | 0.42 | 0 | 8 | 1 | 0 | 9 | 4 | 12 | 12 | 286 | 39 | 174 | 64 | 157 | 73 | 126 | 61 | 112 | 7 | 0 | 4 | 39 |
143,298 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/utilities/baseutilities.py
|
rdfframework.utilities.baseutilities.DictClass
|
class DictClass(metaclass=DictClassMeta):
''' takes a dictionary and converts it to a class '''
__reserved = RESERVED_KEYS
__type = 'DictClass'
def __init__(self, obj=None, start=True, debug=False):
if obj and start:
for attr in dir(obj):
if not attr.startswith('_') and attr not in self.__reserved:
setattr(self, attr, getattr(obj,attr))
def __getattr__(self, attr):
return None
def __getitem__(self, item):
item = str(item)
if hasattr(self, item):
return getattr(self, item)
else:
return None
def __setitem__(self, attr, value):
self.__setattr__(attr, value)
def __str__(self):
return str(self.dict())
def __repr__(self):
return "DictClass(\n%s\n)" % pprint.pformat(self.dict())
def __setattr__(self, attr, value):
if isinstance(value, dict) or isinstance(value, list):
value = DictClass(value)
self.__dict__[attr] = value
def dict(self):
""" converts the class to a dictionary object """
return_obj = {}
for attr in dir(self):
if not attr.startswith('__') and attr not in self.__reserved:
if isinstance(getattr(self, attr), list):
return_val = []
for item in getattr(self, attr):
if isinstance(item, DictClass):
return_val.append(dict(item))
else:
return_val.append(item)
elif isinstance(getattr(self, attr), dict):
return_val = {}
for key, item in getattr(self, attr).items():
if isinstance(item, DictClass):
return_val[key] = item.dict()
else:
return_val[key] = item
elif isinstance(getattr(self, attr), DictClass):
return_val = getattr(self, attr).dict()
else:
return_val = getattr(self, attr)
return_obj[attr] = return_val
return return_obj
def get(self, attr, none_val=None, strict=False):
if attr in self.keys():
if strict and self[attr] is None:
return none_val
else:
return getattr(self, attr)
else:
return none_val
def keys(self):
return [attr for attr in dir(self) if not attr.startswith("__") and \
attr not in self.__reserved]
def values(self):
return [getattr(self, attr) for attr in dir(self) if not attr.startswith("__") and \
attr not in self.__reserved]
def items(self):
return_list = []
for attr in dir(self):
if not attr.startswith("__") and attr not in self.__reserved:
return_list.append((attr, getattr(self, attr)))
return return_list
|
class DictClass(metaclass=DictClassMeta):
''' takes a dictionary and converts it to a class '''
def __init__(self, obj=None, start=True, debug=False):
pass
def __getattr__(self, attr):
pass
def __getitem__(self, item):
pass
def __setitem__(self, attr, value):
pass
def __str__(self):
pass
def __repr__(self):
pass
def __setattr__(self, attr, value):
pass
def dict(self):
''' converts the class to a dictionary object '''
pass
def get(self, attr, none_val=None, strict=False):
pass
def keys(self):
pass
def values(self):
pass
def items(self):
pass
| 13 | 2 | 6 | 0 | 6 | 0 | 3 | 0.03 | 1 | 3 | 0 | 0 | 12 | 0 | 12 | 26 | 85 | 13 | 70 | 23 | 57 | 2 | 60 | 23 | 47 | 10 | 3 | 5 | 30 |
143,299 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/utilities/baseutilities.py
|
rdfframework.utilities.baseutilities.DictClassMeta
|
class DictClassMeta(type):
""" Used to handle list generation """
def __call__(cls, *args, **kwargs):
new_class = False
if len(args) > 0:
new_class = make_class(args[0], kwargs.get('debug',False))
if new_class and isinstance(new_class, list):
return new_class
elif len(args) > 0:
vals = list(args)
vals[0] = new_class
vals = tuple(vals)
else:
vals = args
return super().__call__(*vals, **kwargs)
|
class DictClassMeta(type):
''' Used to handle list generation '''
def __call__(cls, *args, **kwargs):
pass
| 2 | 1 | 14 | 1 | 13 | 0 | 4 | 0.07 | 1 | 3 | 0 | 1 | 1 | 0 | 1 | 14 | 17 | 2 | 14 | 4 | 12 | 1 | 12 | 4 | 10 | 4 | 2 | 1 | 4 |
143,300 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/utilities/baseutilities.py
|
rdfframework.utilities.baseutilities.Dot
|
class Dot(object):
""" Takes a dictionary and gets and sets values via a "." dot notation
of the path
args:
dictionary: The dictionary object
copy_dict: Boolean - True - (default) does a deepcopy of the dictionay
before returning. False - maniplutes the passed in dictionary
"""
def __init__(self, dictionary, copy_dict=True):
self.obj = dictionary
self.new_dict = {}
self.copy_dict = copy_dict
def get(self, prop):
""" get the value off the passed in dot notation
args:
prop: a string of the property to retreive
"a.b.c" ~ dictionary['a']['b']['c']
"""
prop_parts = prop.split(".")
val = None
for part in prop_parts:
if val is None:
val = self.obj.get(part)
else:
val = val.get(part)
return val
def set(self, prop, value):
""" sets the dot notated property to the passed in value
args:
prop: a string of the property to retreive
"a.b.c" ~ dictionary['a']['b']['c']
value: the value to set the prop object
"""
prop_parts = prop.split(".")
if self.copy_dict:
new_dict = copy.deepcopy(self.obj)
else:
new_dict = self.obj
pointer = None
parts_length = len(prop_parts) - 1
for i, part in enumerate(prop_parts):
if pointer is None and i == parts_length:
new_dict[part] = value
elif pointer is None:
pointer = new_dict.get(part)
elif i == parts_length:
pointer[part] = value
else:
pointer = pointer.get(part)
return new_dict
|
class Dot(object):
''' Takes a dictionary and gets and sets values via a "." dot notation
of the path
args:
dictionary: The dictionary object
copy_dict: Boolean - True - (default) does a deepcopy of the dictionay
before returning. False - maniplutes the passed in dictionary
'''
def __init__(self, dictionary, copy_dict=True):
pass
def get(self, prop):
''' get the value off the passed in dot notation
args:
prop: a string of the property to retreive
"a.b.c" ~ dictionary['a']['b']['c']
'''
pass
def set(self, prop, value):
''' sets the dot notated property to the passed in value
args:
prop: a string of the property to retreive
"a.b.c" ~ dictionary['a']['b']['c']
value: the value to set the prop object
'''
pass
| 4 | 3 | 15 | 1 | 10 | 4 | 3 | 0.56 | 1 | 1 | 0 | 0 | 3 | 3 | 3 | 3 | 57 | 7 | 32 | 15 | 28 | 18 | 27 | 15 | 23 | 6 | 1 | 2 | 10 |
143,301 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/rdfclass/rdffactories.py
|
rdfframework.rdfclass.rdffactories.RdfPropertyFactory
|
class RdfPropertyFactory(RdfBaseFactory):
""" Extends RdfBaseFactory for property creation specific querying """
cache_file = "properties.json"
def __init__(self, conn, reset=False, nsm=NSM, cfg=CFG):
sparql_template = "sparqlDefinitionPropertiesAll.rq"
super().__init__(conn, sparql_template, reset, nsm, cfg)
def make(self):
""" reads through the definitions and generates an python class for each
definition """
log.setLevel(self.log_level)
created = []
prop_list = [item for item in self.defs if item.type == 'uri']
log.debug(" creating properties ... ")
for prop in prop_list:
make_property(self.defs[prop], prop, [])
log.info(" property count: %s", len(prop_list))
|
class RdfPropertyFactory(RdfBaseFactory):
''' Extends RdfBaseFactory for property creation specific querying '''
def __init__(self, conn, reset=False, nsm=NSM, cfg=CFG):
pass
def make(self):
''' reads through the definitions and generates an python class for each
definition '''
pass
| 3 | 2 | 7 | 0 | 6 | 1 | 2 | 0.23 | 1 | 1 | 0 | 0 | 2 | 0 | 2 | 6 | 19 | 3 | 13 | 8 | 10 | 3 | 13 | 8 | 10 | 2 | 2 | 1 | 3 |
143,302 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/utilities/baseutilities.py
|
rdfframework.utilities.baseutilities.DummyLogger
|
class DummyLogger():
@classmethod
def no_call(*args, **kwargs):
pass
debug = no_call
info = no_call
warn = no_call
warning = no_call
setLevel = no_call
level = no_call
def __getattr__(*args, **kwargs):
return no_call
|
class DummyLogger():
@classmethod
def no_call(*args, **kwargs):
pass
def __getattr__(*args, **kwargs):
pass
| 4 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 2 | 2 | 14 | 2 | 12 | 10 | 8 | 0 | 11 | 9 | 8 | 1 | 0 | 0 | 2 |
143,303 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/rml/processor.py
|
rdfframework.rml.processor.SPARQLBatchProcessor
|
class SPARQLBatchProcessor(Processor):
"""Class batches all triple_maps queries into a single SPARQL query
in an attempt to reduce the time spent in the triplestore/network
bottleneck"""
rdf_name = Uri("kdr:RmlSPARQLBatchProcessor")
def __init__(self, rml_rules, triplestore_url=None, triplestore=None):
super(SPARQLBatchProcessor, self).__init__(rml_rules, **kwargs)
__set_prefix__()
if triplestore_url is not None:
self.triplestore_url = triplestore_url
elif triplestore is not None:
self.triplestore = triplestore
def __get_bindings__(self, sparql):
bindings = []
if self.triplestore_url is not None:
result = requests.post(
self.triplestore_url,
data={"query": sparql,
"format": "json"})
bindings = result.json().get("results").get("bindings")
elif self.triplestore is not None:
result = self.triplestore.query(sparql)
bindings = result.bindings
return bindings
def __construct_compound_query__(self, triple_map):
select_clause = PREFIX + """
SELECT"""
where_clause = """
WHERE {{"""
for pred_map in triple_map.predicateObjectMap:
if pred_map.constant is not None or\
pred_map.reference is not None:
continue
#if pred_obj_map.parentTriplesMap is not None:
# self.__handle_parents__(
# parent_map=pred_obj_map.parentTriplesMap,
# subject=entity,
# predicate=predicate,
# **kwargs)
# continue
select_line = pred_map.query.splitlines()[0]
for term in select_line.split():
if term.startswith("?") and term not in select_clause:
select_clause += " {}".format(term)
where_clause += "\nOPTIONAL{{\n\t" +\
pred_map.query +\
"\n}}\n"
return select_clause + where_clause + "}}"
def run(self, **kwargs):
kwargs['output'] = self.__graph__()
super(SPARQLBatchProcessor, self).run(**kwargs)
self.output = kwargs['output']
return kwargs['output']
def execute(self, triple_map, output, **kwargs):
"""Method iterates through triple map's predicate object maps
and processes query.
Args:
triple_map(SimpleNamespace): Triple Map
"""
sparql = PREFIX + triple_map.logicalSource.query.format(
**kwargs)
bindings = self.__get_bindings__(sparql)
iterator = str(triple_map.logicalSource.iterator)
for binding in bindings:
entity_dict = binding.get(iterator)
if isinstance(entity_dict, rdflib.term.Node):
entity = entity_dict
elif isinstance(entity_dict, dict):
raw_value = entity_dict.get('value')
if entity_dict.get('type').startswith('bnode'):
entity = rdflib.BNode(raw_value)
else:
entity = rdflib.URIRef(raw_value)
if triple_map.subjectMap.class_ is not None:
output.add(
(entity,
rdflib.RDF.type,
triple_map.subjectMap.class_))
sparql_query = self.__construct_compound_query__(
triple_map).format(**kwargs)
properties = self.__get_bindings__(sparql_query)
for pred_obj_map in triple_map.predicateObjectMap:
predicate = pred_obj_map.predicate
if pred_obj_map.constant is not None:
output.add(
(entity, predicate, pred_obj_map.constant))
continue
if "#" in str(predicate):
key = str(predicate).split("#")[-1]
else:
key = str(predicate).split("/")[-1]
for property_ in properties:
if key in property_.keys():
info = {"about": property_.get(key)}
object_ = __get_object__(info)
output.add((entity, predicate, object_))
|
class SPARQLBatchProcessor(Processor):
'''Class batches all triple_maps queries into a single SPARQL query
in an attempt to reduce the time spent in the triplestore/network
bottleneck'''
def __init__(self, rml_rules, triplestore_url=None, triplestore=None):
pass
def __get_bindings__(self, sparql):
pass
def __construct_compound_query__(self, triple_map):
pass
def run(self, **kwargs):
pass
def execute(self, triple_map, output, **kwargs):
'''Method iterates through triple map's predicate object maps
and processes query.
Args:
triple_map(SimpleNamespace): Triple Map
'''
pass
| 6 | 2 | 19 | 1 | 16 | 3 | 5 | 0.21 | 1 | 3 | 0 | 0 | 5 | 3 | 5 | 42 | 107 | 11 | 81 | 32 | 75 | 17 | 62 | 32 | 56 | 11 | 4 | 4 | 23 |
143,304 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/rml/processor.py
|
rdfframework.rml.processor.Processor
|
class Processor(object, metaclass=KeyRegistryMeta):
__required_idx_attrs__ = {'rdf_name', '__name__'}
"""
Base class for RDF Mapping Language Processors, child classes
encapsulate different types of Data sources
Attributes:
rml: Graph of RML rules
ext_conn: Triplestore connection used for querying and saving data
use_json_qry: If the RML mapping has the json mapping that will be
used instead of the full SPARQL query to the database
ds: RdfDataset used for json_qry of the data
"""
def __init__(self, rml_rules, **kwargs):
# log.setLevel(logging.DEBUG)
self.__setup_conn__(**kwargs)
self.use_json_qry = False
self.default_use_json_qry = False
self.rml = rdflib.Graph()
if isinstance(rml_rules, list):
for rule in rml_rules:
# First check if rule exists on the filesystem
# pdb.set_trace()
if os.path.exists(rule):
with open(rule) as file_obj:
raw_rule = file_obj.read()
else:
raw_rule = RML_MGR.get_rml(rule, 'data').decode()
self.rml.parse(data=raw_rule,
format='turtle')
elif isinstance(rml_rules, (rdflib.Graph, rdflib.ConjunctiveGraph)):
self.rml = rml_rules
elif os.path.exists(rml_rules):
self.rml.parse(rml_rules, format='turtle')
else:
self.rml.parse(data=RML_MGR.get_rml(rml_rules).decode(),
format='turtle')
# Populate Namespaces Manager
for prefix, namespace in self.rml.namespaces():
NS_MGR.bind(prefix, namespace, ignore_errors=True)
self.source, self.triplestore_url = None, None
self.parents = set()
self.constants = dict(version=kwargs.get("version", "Not Defined"))
self.triple_maps = dict()
for row in self.rml.query(GET_TRIPLE_MAPS):
triple_map_iri = row[0]
map_key = str(triple_map_iri)
self.triple_maps[map_key] = SimpleNamespace()
self.triple_maps[map_key].logicalSource = \
self.__logical_source__(triple_map_iri)
self.triple_maps[map_key].subjectMap = \
self.__subject_map__(triple_map_iri)
self.triple_maps[map_key].predicateObjectMap = \
self.__predicate_object_map__(triple_map_iri)
self.set_context()
self.set_list_predicates()
def __graph__(self):
"""
Method returns a new graph with all of the namespaces in
RML graph
"""
graph = rdflib.Graph()
for prefix, name in self.rml.namespaces():
graph.namespace_manager.bind(prefix, name)
return graph
def __setup_conn__(self, **kwargs):
"""
Sets the ext_conn based on the kwargs.
returns a triplestore conncection based on the kwargs.
Order of preceedence is as follows:
kwargs['conn']
kwargs['tstore_def']
kwargs['triplestore_url']
kwargs['rdflib']
RdfConfigManager.data_tstore
RdfConfigManager.TRIPLESTORE_URL
kwargs:
conn: established triplestore connection object
tstore_def: dictionary of paramaters specifying the connection as
outlined in the config file
triplestore_url: url to a triplestore. A blazegraph connection
will be used if specified
rdflib: defintion for an rdflib connection
"""
self.ext_conn = setup_conn(**kwargs)
def __generate_delimited_objects__(self, output, **kwargs):
"""
Internal methods takes a subject, predicate, element, and a list
of delimiters that are applied to element's text and a triples
for each value is created and associated with the subject.
Keyword Args:
-------------
triple_map: SimpleNamespace
predicate: URIRef
element: XML Element
datatype: XSD Datatype, optional
delimiters: List of delimiters to apply to string
"""
triple_map = kwargs.get("triple_map")
subject = kwargs.get('subject')
# Subject is blank-node, try to retrieve subject IRI
predicate = kwargs.get('predicate')
element = kwargs.get('element')
datatype = kwargs.get('datatype', None)
delimiters = kwargs.get('delimiters')
subjects = []
for delimiter in delimiters:
values = element.text.split(delimiter)
for row in values:
if datatype is not None:
obj_ = rdflib.Literal(row.strip(), datatype=datatype)
else:
obj_ = rdflib.Literal(row.strip())
if isinstance(subject, rdflib.BNode):
new_subject = rdflib.BNode()
class_ = triple_map.subjectMap.class_
output.add((new_subject, NS_MGR.rdf.type.rdflib, class_))
for parent_subject, parent_predicate in \
output.subject_predicates(object=subject):
output.add((parent_subject,
parent_predicate,
new_subject))
else:
new_subject = subject
subjects.append(new_subject)
output.add((new_subject, predicate, obj_))
return subjects
def __generate_reference__(self, triple_map, **kwargs):
"""Placeholder method, should be extended by child classes
Args:
-----
triple_map: SimpleNamespace
Keyword Args:
-------------
"""
pass
def __generate_object_term__(self, datatype, value):
"""Internal method takes a datatype (can be None) and returns
the RDF Object Term
Args:
-----
datatype: None, or rdflib.URIRef
value: Varys depending on ingester
"""
if datatype == NS_MGR.xsd.anyURI.rdflib:
term = rdflib.URIRef(value)
elif datatype:
term = rdflib.Literal(value, datatype=datatype)
else:
term = rdflib.Literal(value)
return term
def __handle_parents__(self, output, **kwargs):
"""Internal method handles parentTriplesMaps
Keyword args:
-------------
parent_map: SimpleNamespace of ParentTriplesMap
subject: rdflib.URIRef or rdflib.BNode
predicate: rdflib.URIRef
"""
parent_map = kwargs.pop("parent_map")
subject = kwargs.pop('subject')
predicate = kwargs.pop('predicate')
parent_objects = self.execute(
self.triple_maps[str(parent_map)],
output,
**kwargs)
for parent_obj in parent_objects:
if isinstance(parent_obj, BaseRdfDataType):
parent_obj = parent_obj.rdflib
if parent_obj == subject:
continue
output.add((
subject,
predicate,
parent_obj))
def __logical_source__(self, map_iri):
"""Creates a SimpleNamespace for the TripelMap's logicalSource
Args:
-----
map_iri: URIRef
"""
# pdb.set_trace()
logical_source = SimpleNamespace()
logical_src_bnode = self.rml.value(
subject=map_iri,
predicate=NS_MGR.rml.logicalSource.rdflib)
if logical_src_bnode is None:
return
logical_source.source = self.rml.value(
subject=logical_src_bnode,
predicate=NS_MGR.rml.source.rdflib)
logical_source.reference_formulations = [r for r in self.rml.objects(
subject=logical_src_bnode,
predicate=NS_MGR.rml.referenceFormulation.rdflib)]
logical_source.iterator = self.rml.value(
subject=logical_src_bnode,
predicate=NS_MGR.rml.iterator.rdflib)
query = self.rml.value(
subject=logical_src_bnode,
predicate=NS_MGR.rml.query.rdflib)
json_query = self.rml.value(
subject=logical_src_bnode,
predicate=NS_MGR.rml.reference.rdflib)
json_key = self.rml.value(
subject=logical_src_bnode,
predicate=NS_MGR.rml.key.rdflib)
if query is not None:
logical_source.query = query
if json_query is not None:
self.use_json_qry = True
self.default_use_json_qry = True
logical_source.json_query = json_query
logical_source.json_key = json_key
return logical_source
def __subject_map__(self, map_iri):
"""Creates a SimpleNamespace for the TripleMap's subjectMap and
populates properties from the RML RDF graph
Args:
-----
map_iri: rdflib.URIRef,TripleMap IRI
Returns:
--------
SimpleNamespace
"""
subject_map = SimpleNamespace()
subject_map_bnode = self.rml.value(
subject=map_iri,
predicate=NS_MGR.rr.subjectMap.rdflib)
if subject_map_bnode is None:
return
#! Should look at supporting multiple rr:class definitions
subject_map.class_ = self.rml.value(
subject=subject_map_bnode,
predicate=getattr(NS_MGR.rr, "class").rdflib)
subject_map.template = self.rml.value(
subject=subject_map_bnode,
predicate=NS_MGR.rr.template.rdflib)
subject_map.termType = self.rml.value(
subject=subject_map_bnode,
predicate=NS_MGR.rr.termType.rdflib)
subject_map.deduplicate = self.rml.value(
subject=subject_map_bnode,
predicate=NS_MGR.kds.deduplicate.rdflib)
subject_map.reference = self.rml.value(
subject=subject_map_bnode,
predicate=NS_MGR.rr.reference.rdflib)
return subject_map
def __predicate_object_map__(self, map_iri):
"""Iterates through rr:predicateObjectMaps for this TripleMap
creating a SimpleNamespace for each triple map and assigning the
constant, template, parentTripleMap, reference as properties.
Args:
-----
map_iri: rdflib.URIRef, TripleMap IRI
Returns:
--------
list: List of predicate_object Namespace objects
"""
pred_obj_maps = []
for pred_obj_map_bnode in self.rml.objects(
subject=map_iri,
predicate=NS_MGR.rr.predicateObjectMap.rdflib):
pred_obj_map = SimpleNamespace()
pred_obj_map.predicate = self.rml.value(
subject=pred_obj_map_bnode,
predicate=NS_MGR.rr.predicate.rdflib)
obj_map_bnode = self.rml.value(
subject=pred_obj_map_bnode,
predicate=NS_MGR.rr.objectMap.rdflib)
if obj_map_bnode is None:
continue
pred_obj_map.constant = self.rml.value(
subject=obj_map_bnode,
predicate=NS_MGR.rr.constant.rdflib)
pred_obj_map.template = self.rml.value(
subject=obj_map_bnode,
predicate=NS_MGR.rr.template.rdflib)
pred_obj_map.parentTriplesMap = self.rml.value(
subject=obj_map_bnode,
predicate=NS_MGR.rr.parentTriplesMap.rdflib)
if pred_obj_map.parentTriplesMap is not None:
self.parents.add(str(pred_obj_map.parentTriplesMap))
pred_obj_map.reference = self.rml.value(
subject=obj_map_bnode,
predicate=NS_MGR.rr.reference.rdflib)
pred_obj_map.datatype = self.rml.value(
subject=obj_map_bnode,
predicate=NS_MGR.rr.datatype.rdflib)
pred_obj_map.query = self.rml.value(
subject=obj_map_bnode,
predicate=NS_MGR.rml.query.rdflib)
pred_obj_map.json_query = self.rml.value(
subject=obj_map_bnode,
predicate=NS_MGR.rml.reference.rdflib)
json_key = None
if hasattr(self.triple_maps[str(map_iri)].logicalSource,
'json_key'):
json_key = self.triple_maps[str(map_iri)].logicalSource.json_key
pred_obj_map.json_key = pick(self.rml.value(
subject=obj_map_bnode,
predicate=NS_MGR.rml.key.rdflib),
json_key)
# BIBCAT Extensions
pred_obj_map.delimiters = []
if pred_obj_map.json_query:
self.use_json_qry = True
for obj in self.rml.objects(subject=obj_map_bnode,
predicate=NS_MGR.kds.delimiter.rdflib):
pred_obj_map.delimiters.append(obj)
pred_obj_maps.append(pred_obj_map)
return pred_obj_maps
def add_to_triplestore(self, output):
"""Method attempts to add output to Blazegraph RDF Triplestore"""
if len(output) > 0:
result = self.ext_conn.load_data(data=output.serialize(),
datatype='rdf')
def generate_term(self, **kwargs):
"""Method generates a rdflib.Term based on kwargs"""
term_map = kwargs.pop('term_map')
if hasattr(term_map, "termType") and\
term_map.termType == NS_MGR.rr.BlankNode.rdflib:
return rdflib.BNode()
if not hasattr(term_map, 'datatype'):
term_map.datatype = NS_MGR.xsd.anyURI.rdflib
if hasattr(term_map, "template") and term_map.template is not None:
template_vars = kwargs
template_vars.update(self.constants)
# Call any functions to generate values
for key, value in template_vars.items():
if hasattr(value, "__call__"):
template_vars[key] = value()
raw_value = term_map.template.format(**template_vars)
if term_map.datatype == NS_MGR.xsd.anyURI.rdflib:
return rdflib.URIRef(raw_value)
return rdflib.Literal(raw_value,
datatype=term_map.datatype)
if term_map.reference is not None:
# Each child will have different mechanisms for referencing the
# source based
return self.__generate_reference__(term_map, **kwargs)
def execute(self, triple_map, **kwargs):
"""Placeholder method should be overridden by child classes"""
raise NotImplemented("Execute Method required in subclass.")
def run(self, **kwargs):
"""Run method iterates through triple maps and calls the execute
method"""
if 'timestamp' not in kwargs:
kwargs['timestamp'] = datetime.datetime.utcnow().isoformat()
if 'version' not in kwargs:
kwargs['version'] = "Not Defined" #bibcat.__version__
# log.debug("kwargs: %s", pprint.pformat({k:v for k, v in kwargs.items()
# if k != "dataset"}))
# log.debug("parents: %s", self.parents)
for map_key, triple_map in self.triple_maps.items():
if map_key not in self.parents:
self.execute(triple_map, **kwargs)
def set_context(self):
"""
Reads throught the namespaces in the RML and generates a context for
json+ld output when compared to the RdfNsManager namespaces
"""
results = self.rml.query("""
SELECT ?o {
{
?s rr:class ?o
} UNION {
?s rr:predicate ?o
}
}""")
namespaces = [Uri(row[0]).value[0]
for row in results
if isinstance(row[0], rdflib.URIRef)]
self.context = {ns[0]: ns[1] for ns in namespaces if ns[0]}
def set_list_predicates(self):
"""
Reads through the rml mappings and determines all fields that should
map to a list/array with a json output
"""
results = self.rml.query("""
SELECT DISTINCT ?subj_class ?list_field
{
?bn rr:datatype rdf:List .
?bn rr:predicate ?list_field .
?s ?p ?bn .
?s rr:subjectMap ?sm_bn .
?sm_bn rr:class ?subj_class .
}""")
list_preds = [(Uri(row[0]).sparql, Uri(row[1]).sparql)
for row in results]
array_fields = {}
for tup in list_preds:
try:
array_fields[tup[0]].append(tup[1])
except KeyError:
array_fields[tup[0]] = [tup[1]]
self.array_fields = array_fields
def __call__(self, **kwargs):
output = self.run(**kwargs)
rtn_format = kwargs.get("rtn_format")
if rtn_format:
if rtn_format == "json-ld":
result = self.json_ld(output, **kwargs)
# pdb.set_trace()
return result
else:
return output.serialize(format=rtn_format).decode()
return output
def json_ld(self, output, **kwargs):
"""
Returns the json-ld formated result
"""
raw_json_ld = output.serialize(format='json-ld',
context=self.context).decode()
# if there are fields that should be returned as arrays convert all
# non-array fields to an array
if not self.array_fields:
return raw_json_ld
json_data = json.loads(raw_json_ld)
for i, item in enumerate(json_data['@graph']):
if item.get("@type") in self.array_fields:
test_flds = self.array_fields[item['@type']]
for key, val in item.items():
if key in test_flds and not isinstance(val, list):
json_data['@graph'][i][key] = [val]
# print(json.dumps(json_data, indent=4))
return json.dumps(json_data, indent=4)
|
class Processor(object, metaclass=KeyRegistryMeta):
def __init__(self, rml_rules, **kwargs):
pass
def __graph__(self):
'''
Method returns a new graph with all of the namespaces in
RML graph
'''
pass
def __setup_conn__(self, **kwargs):
'''
Sets the ext_conn based on the kwargs.
returns a triplestore conncection based on the kwargs.
Order of preceedence is as follows:
kwargs['conn']
kwargs['tstore_def']
kwargs['triplestore_url']
kwargs['rdflib']
RdfConfigManager.data_tstore
RdfConfigManager.TRIPLESTORE_URL
kwargs:
conn: established triplestore connection object
tstore_def: dictionary of paramaters specifying the connection as
outlined in the config file
triplestore_url: url to a triplestore. A blazegraph connection
will be used if specified
rdflib: defintion for an rdflib connection
'''
pass
def __generate_delimited_objects__(self, output, **kwargs):
'''
Internal methods takes a subject, predicate, element, and a list
of delimiters that are applied to element's text and a triples
for each value is created and associated with the subject.
Keyword Args:
-------------
triple_map: SimpleNamespace
predicate: URIRef
element: XML Element
datatype: XSD Datatype, optional
delimiters: List of delimiters to apply to string
'''
pass
def __generate_reference__(self, triple_map, **kwargs):
'''Placeholder method, should be extended by child classes
Args:
-----
triple_map: SimpleNamespace
Keyword Args:
-------------
'''
pass
def __generate_object_term__(self, datatype, value):
'''Internal method takes a datatype (can be None) and returns
the RDF Object Term
Args:
-----
datatype: None, or rdflib.URIRef
value: Varys depending on ingester
'''
pass
def __handle_parents__(self, output, **kwargs):
'''Internal method handles parentTriplesMaps
Keyword args:
-------------
parent_map: SimpleNamespace of ParentTriplesMap
subject: rdflib.URIRef or rdflib.BNode
predicate: rdflib.URIRef
'''
pass
def __logical_source__(self, map_iri):
'''Creates a SimpleNamespace for the TripelMap's logicalSource
Args:
-----
map_iri: URIRef
'''
pass
def __subject_map__(self, map_iri):
'''Creates a SimpleNamespace for the TripleMap's subjectMap and
populates properties from the RML RDF graph
Args:
-----
map_iri: rdflib.URIRef,TripleMap IRI
Returns:
--------
SimpleNamespace
'''
pass
def __predicate_object_map__(self, map_iri):
'''Iterates through rr:predicateObjectMaps for this TripleMap
creating a SimpleNamespace for each triple map and assigning the
constant, template, parentTripleMap, reference as properties.
Args:
-----
map_iri: rdflib.URIRef, TripleMap IRI
Returns:
--------
list: List of predicate_object Namespace objects
'''
pass
def add_to_triplestore(self, output):
'''Method attempts to add output to Blazegraph RDF Triplestore'''
pass
def generate_term(self, **kwargs):
'''Method generates a rdflib.Term based on kwargs'''
pass
def execute(self, triple_map, **kwargs):
'''Placeholder method should be overridden by child classes'''
pass
def run(self, **kwargs):
'''Run method iterates through triple maps and calls the execute
method'''
pass
def set_context(self):
'''
Reads throught the namespaces in the RML and generates a context for
json+ld output when compared to the RdfNsManager namespaces
'''
pass
def set_list_predicates(self):
'''
Reads through the rml mappings and determines all fields that should
map to a list/array with a json output
'''
pass
def __call__(self, **kwargs):
pass
def json_ld(self, output, **kwargs):
'''
Returns the json-ld formated result
'''
pass
| 19 | 16 | 24 | 1 | 17 | 6 | 4 | 0.41 | 2 | 9 | 2 | 6 | 18 | 11 | 18 | 37 | 471 | 46 | 302 | 92 | 283 | 124 | 206 | 91 | 187 | 8 | 3 | 4 | 67 |
143,305 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/rml/processor.py
|
rdfframework.rml.processor.JSONProcessor
|
class JSONProcessor(Processor):
"""JSON RDF Mapping Processor"""
rdf_name = Uri("kdr:RmlJSONProcessor")
def __init__(self, **kwargs):
try:
rml_rules = kwargs.pop("rml_rules")
except KeyError:
rml_rules = []
super(JSONProcessor, self).__init__(rml_rules, **kwargs)
def __generate_reference__(self, triple_map, **kwargs):
json_obj = kwargs.get("obj")
path_expr = jsonpath_ng.parse(triple_map.reference)
results = [r.value.strip() for r in path_expr.find(json_obj)]
for row in results:
if rdflib.term._is_valid_uri(row):
return rdflib.URIRef(row)
def __reference_handler__(self, output, **kwargs):
"""Internal method for handling rr:reference in triples map
Keyword Args:
-------------
predicate_obj_map: SimpleNamespace
obj: dict
subject: rdflib.URIRef
"""
subjects = []
pred_obj_map = kwargs.get("predicate_obj_map")
obj = kwargs.get("obj")
subject = kwargs.get("subject")
if pred_obj_map.reference is None:
return subjects
predicate = pred_obj_map.predicate
ref_exp = jsonpath_ng.parse(str(pred_obj_map.refernce))
found_objects = [r.value for r in ref_exp(obj)]
for row in found_objects:
output.add((subject, predicate, rdflib.Literal(row)))
def execute(self, triple_map, output, **kwargs):
"""Method executes mapping between JSON source and
output RDF
Args:
-----
triple_map: SimpleNamespace
"""
subjects = []
logical_src_iterator = str(triple_map.logicalSource.iterator)
json_object = kwargs.get('obj', self.source)
# Removes '.' as a generic iterator, replace with '@'
if logical_src_iterator == ".":
results = [None,]
else:
json_path_exp = jsonpath_ng.parse(logical_src_iterator)
results = [r.value for r in json_path_exp.find(json_object)][0]
for row in results:
subject = self.generate_term(term_map=triple_map.subjectMap,
**kwargs)
for pred_obj_map in triple_map.predicateObjectMap:
predicate = pred_obj_map.predicate
if pred_obj_map.template is not None:
output.add((
subject,
predicate,
self.generate_term(term_map=pred_obj_map, **kwargs)))
if pred_obj_map.parentTriplesMap is not None:
self.__handle_parents__(
output,
parent_map=pred_obj_map.parentTriplesMap,
subject=subject,
predicate=predicate,
obj=row,
**kwargs)
if pred_obj_map.reference is not None:
ref_exp = jsonpath_ng.parse(str(pred_obj_map.reference))
found_objects = [r.value for r in ref_exp.find(row)]
for obj in found_objects:
if rdflib.term._is_valid_uri(obj):
rdf_obj = rdflib.URIRef(str(obj))
else:
rdf_obj = rdflib.Literal(str(obj))
output.add((subject, predicate, rdf_obj))
if pred_obj_map.constant is not None:
output.add((subject,
predicate,
pred_obj_map.constant))
subjects.append(subject)
return subjects
def run(self, source, **kwargs):
"""Method takes a JSON source and any keywords and transforms from
JSON to Lean BIBFRAME 2.0 triples
Args:
----
source: str, dict
"""
kwargs['output'] = self.__graph__()
if isinstance(source, str):
import json
source = json.loads(source)
self.source = source
super(JSONProcessor, self).run(**kwargs)
self.output = kwargs['output']
return output
|
class JSONProcessor(Processor):
'''JSON RDF Mapping Processor'''
def __init__(self, **kwargs):
pass
def __generate_reference__(self, triple_map, **kwargs):
pass
def __reference_handler__(self, output, **kwargs):
'''Internal method for handling rr:reference in triples map
Keyword Args:
-------------
predicate_obj_map: SimpleNamespace
obj: dict
subject: rdflib.URIRef
'''
pass
def execute(self, triple_map, output, **kwargs):
'''Method executes mapping between JSON source and
output RDF
Args:
-----
triple_map: SimpleNamespace
'''
pass
def run(self, source, **kwargs):
'''Method takes a JSON source and any keywords and transforms from
JSON to Lean BIBFRAME 2.0 triples
Args:
----
source: str, dict
'''
pass
| 6 | 4 | 21 | 1 | 15 | 4 | 4 | 0.27 | 1 | 3 | 0 | 0 | 5 | 2 | 5 | 42 | 112 | 13 | 78 | 36 | 71 | 21 | 64 | 36 | 57 | 10 | 4 | 5 | 20 |
143,306 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/utilities/baseutilities.py
|
rdfframework.utilities.baseutilities.IsFirst
|
class IsFirst():
''' tracks if is the first time through a loop. class must be initialized
outside the loop.
*args:
true -> specifiy the value to return on true
false -> specify to value to return on false '''
def __init__(self):
self.__first = True
def first(self, true=True, false=False):
if self.__first == True:
self.__first = False
return true
else:
return false
|
class IsFirst():
''' tracks if is the first time through a loop. class must be initialized
outside the loop.
*args:
true -> specifiy the value to return on true
false -> specify to value to return on false '''
def __init__(self):
pass
def first(self, true=True, false=False):
pass
| 3 | 1 | 4 | 0 | 4 | 0 | 2 | 0.56 | 0 | 0 | 0 | 0 | 2 | 1 | 2 | 2 | 17 | 3 | 9 | 4 | 6 | 5 | 8 | 4 | 5 | 2 | 0 | 1 | 3 |
143,307 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/rml/processor.py
|
rdfframework.rml.processor.CSVRowProcessor
|
class CSVRowProcessor(Processor):
"""RML Processor for CSV/TSV or other delimited file supported by the
python standard library module csv"""
rdf_name = Uri("kdr:RmlCSVRowProcessor")
def __init__(self, **kwargs):
if "rml_rules" in kwargs:
rml_rules = kwargs.pop("rml_rules")
else:
rml_rules = []
super(CSVRowProcessor, self).__init__(rml_rules, **kwargs)
def __generate_reference__(self, triple_map, **kwargs):
"""Generates a RDF entity based on triple map
Args:
triple_map(SimpleNamespace): Triple Map
"""
raw_value = self.source.get(str(triple_map.reference))
if raw_value is None or len(raw_value) < 1:
return
if hasattr(triple_map, "datatype"):
if triple_map.datatype == NS_MGR.xsd.anyURI.rdflib:
output = rdflib.URIRef(raw_value)
else:
output = rdflib.Literal(
raw_value,
datatype=triple_map.datatype)
else:
output = rdflib.Literal(raw_value)
return output
def execute(self, triple_map, output, **kwargs):
"""Method executes mapping between CSV source and
output RDF
args:
triple_map(SimpleNamespace): Triple Map
"""
subject = self.generate_term(term_map=triple_map.subjectMap,
**kwargs)
start_size = len(output)
all_subjects = []
for pred_obj_map in triple_map.predicateObjectMap:
predicate = pred_obj_map.predicate
if pred_obj_map.template is not None:
object_ = self.generate_term(term_map=pred_obj_map, **kwargs)
if len(str(object)) > 0:
output.add((
subject,
predicate,
object_))
if pred_obj_map.parentTriplesMap is not None:
self.__handle_parents__(
parent_map=pred_obj_map.parentTriplesMap,
subject=subject,
predicate=predicate,
**kwargs)
if pred_obj_map.reference is not None:
object_ = self.generate_term(term_map=pred_obj_map,
**kwargs)
if object_ and len(str(object_)) > 0:
output.add((subject, predicate, object_))
if pred_obj_map.constant is not None:
output.add((subject, predicate, pred_obj_map.constant))
finish_size = len(output)
if finish_size > start_size:
output.add((subject,
NS_MGR.rdf.type.rdflib,
triple_map.subjectMap.class_))
all_subjects.append(subject)
return all_subjects
def run(self, row, **kwargs):
"""Methods takes a row and depending if a dict or list,
runs RML rules.
Args:
-----
row(Dict, List): Row from CSV Reader
"""
self.source = row
kwargs['output'] = self.__graph__()
super(CSVRowProcessor, self).run(**kwargs)
return kwargs['output']
|
class CSVRowProcessor(Processor):
'''RML Processor for CSV/TSV or other delimited file supported by the
python standard library module csv'''
def __init__(self, **kwargs):
pass
def __generate_reference__(self, triple_map, **kwargs):
'''Generates a RDF entity based on triple map
Args:
triple_map(SimpleNamespace): Triple Map
'''
pass
def execute(self, triple_map, output, **kwargs):
'''Method executes mapping between CSV source and
output RDF
args:
triple_map(SimpleNamespace): Triple Map
'''
pass
def run(self, row, **kwargs):
'''Methods takes a row and depending if a dict or list,
runs RML rules.
Args:
-----
row(Dict, List): Row from CSV Reader
'''
pass
| 5 | 4 | 20 | 1 | 15 | 4 | 4 | 0.28 | 1 | 2 | 0 | 0 | 4 | 1 | 4 | 41 | 88 | 10 | 61 | 17 | 56 | 17 | 45 | 17 | 40 | 9 | 4 | 3 | 16 |
143,308 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/rml/processor.py
|
rdfframework.rml.processor.CSVProcessor
|
class CSVProcessor(Processor):
"""CSV RDF Mapping Processor"""
rdf_name = Uri("kds:RmlCSVPRocessor")
def __init__(self, **kwargs):
if "fields" in kwargs:
self.fields = kwargs.pop("fields")
if "rml_rules" in kwargs:
rml_rules = kwargs.pop("rml_rules")
csv_file = kwargs.pop("csv_file")
self.reader = csv.DictReader(open(csv_file, 'rb'))
super(CSVProcessor, self).__init__(rml_rules, **kwargs)
def __generate_reference__(self, triple_map, **kwargs):
"""Extracts the value of either column by key or by position """
pass
def execute(self, triple_map, **kwargs):
"""Method executes mapping between CSV source and
output RDF
args:
triple_map(SimpleNamespace): Triple Map
"""
pass
def run(self, **kwargs):
"""Method runs through CSV Reader and applies rules to each
row.
"""
pass
|
class CSVProcessor(Processor):
'''CSV RDF Mapping Processor'''
def __init__(self, **kwargs):
pass
def __generate_reference__(self, triple_map, **kwargs):
'''Extracts the value of either column by key or by position '''
pass
def execute(self, triple_map, **kwargs):
'''Method executes mapping between CSV source and
output RDF
args:
triple_map(SimpleNamespace): Triple Map
'''
pass
def run(self, **kwargs):
'''Method runs through CSV Reader and applies rules to each
row.
'''
pass
| 5 | 4 | 6 | 1 | 4 | 2 | 2 | 0.63 | 1 | 2 | 0 | 0 | 4 | 2 | 4 | 41 | 32 | 6 | 16 | 10 | 11 | 10 | 16 | 10 | 11 | 3 | 4 | 1 | 6 |
143,309 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/rdfclass/rdfproperty.py
|
rdfframework.rdfclass.rdfproperty.RdfPropertyMeta
|
class RdfPropertyMeta(type):
""" Metaclass for generating rdfproperty classes """
@property
def doc(cls):
""" Prints the docstring for the class."""
print_doc(cls)
@classmethod
def __prepare__(mcs, name, bases, **kwargs):
# print(' RdfClassMeta.__prepare__(\n\t\t%s)' % (p_args(args, kwargs)))
# if name == 'bf_hasItem':
# pdb.set_trace()
prop_defs = kwargs.pop('prop_defs')
prop_name = kwargs.pop('prop_name')
cls_names = kwargs.pop('class_names')
hierarchy = kwargs.pop('hierarchy')
try:
cls_names.remove('RdfClassBase')
except ValueError:
pass
if not cls_names:
return {}
doc_string = make_doc_string(name,
prop_defs,
bases,
None)
new_def = prepare_prop_defs(prop_defs, prop_name, cls_names)
new_def = filter_prop_defs(prop_defs, hierarchy, cls_names)
new_def['__doc__'] = doc_string
new_def['doc'] = property(print_doc)
new_def['class_names'] = cls_names
new_def['_prop_name'] = prop_name
if prop_name == 'rdf_type':
new_def['append'] = unique_append
new_def['_init_processors'] = get_processors('kds_initProcessor',
prop_defs)
new_def['_es_processors'] = get_processors('kds_esProcessor',
prop_defs,
'es_values')
# pdb.set_trace()
return new_def
# x = super().__prepare__(name, bases, **new_def)
# pdb.set_trace()
# return super().__prepare__(name, bases, **new_def)
def __new__(mcs, name, bases, clsdict, **kwargs):
return type.__new__(mcs, name, bases, clsdict)
def __init__(cls, name, bases, namespace, **kwargs):
super().__init__(name, bases, namespace)
|
class RdfPropertyMeta(type):
''' Metaclass for generating rdfproperty classes '''
@property
def doc(cls):
''' Prints the docstring for the class.'''
pass
@classmethod
def __prepare__(mcs, name, bases, **kwargs):
pass
def __new__(mcs, name, bases, clsdict, **kwargs):
pass
def __init__(cls, name, bases, namespace, **kwargs):
pass
| 7 | 2 | 10 | 0 | 9 | 1 | 2 | 0.24 | 1 | 3 | 0 | 1 | 3 | 0 | 4 | 17 | 51 | 4 | 38 | 13 | 31 | 9 | 30 | 11 | 25 | 4 | 2 | 1 | 7 |
143,310 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/rdfclass/rdfproperty.py
|
rdfframework.rdfclass.rdfproperty.RdfPropertyBase
|
class RdfPropertyBase(list): # metaclass=RdfPropertyMeta):
""" Property Base Class """
def __init__(self, bound_class, dataset=None):
super().__init__(self)
self.dataset = dataset
self.bound_class = bound_class
self.class_names = bound_class.class_names
self.old_data = []
try:
self._run_processors(self._init_processors)
except AttributeError:
pass
def __call__(self, bound_class, dataset=None):
self.dataset = dataset
self.bound_class = bound_class
self.old_data = []
def _run_processors(self, processor_list):
""" cycles through a list of processors and runs them
Args:
processor_list(list): a list of processors
"""
for processor in processor_list:
processor(self)
@classmethod
def es_mapping(cls, base_class, **kwargs):
""" Returns the es mapping for the property
"""
es_map = {}
ranges = cls.rdfs_range # pylint: disable=no-member
rng_defs = get_prop_range_defs(cls.class_names, cls.kds_rangeDef)
rng_def = get_prop_range_def(rng_defs)
if rng_def.get("kds_esIndexClass"):
ranges = rng_def['kds_esIndexClass'].copy()
# pdb.set_trace()
# if cls._prop_name == "bf_partOf":
# pdb.set_trace()
idx_types = get_idx_types(rng_def, ranges)
if 'es_Ignored' in idx_types:
return {'type': 'text',
'index': es_idx_types['es_NotIndexed']}
if 'es_Nested' in idx_types:
if (kwargs.get('depth', 0) >= 1 and \
kwargs.get('class') == ranges[0]) or \
kwargs.get('depth', 0) > 2:
return {"type" : "keyword"}
nested_map = getattr(MODULE.rdfclass,
ranges[0]).es_mapping(base_class,
'es_Nested',
**kwargs)
es_map['properties'] = nested_map
es_map['type'] = "nested"
elif len(idx_types) > 1:
fields = {}
for idx in idx_types:
fields.update(es_idx_types[idx])
es_map['fields'] = fields
elif len(idx_types) == 1:
if cls._prop_name == 'rdf_type': # pylint: disable=no-member
es_map['type'] = 'keyword'
elif idx_types[0] == 'es_NotIndexed':
es_map['index'] = False
else:
es_map['type'] = es_idx_types[idx_types[0]]
try:
if not es_map.get('type'):
fld_type = BaseRdfDataType[ranges[0]].es_type
es_map['type'] = fld_type
if cls._prop_name == 'rdf_type':
es_map['type'] = 'keyword'
except (KeyError, AttributeError):
if cls._prop_name == 'rdf_type': # pylint: disable=no-member
es_map['type'] = 'keyword'
else:
es_map['type'] = 'text'
if es_map['type'] == "nested":
del es_map['type']
try:
fld_format = BaseRdfDataType[ranges[0]].es_format
es_map['format'] = fld_format
except (KeyError, AttributeError):
pass
return es_map
@classmethod
def es_indexers(cls, base_class, **kwargs):
""" Returns the es mapping for the property
"""
indexer_list = []
ranges = cls.rdfs_range # pylint: disable=no-member
rng_defs = get_prop_range_defs(cls.class_names, cls.kds_rangeDef)
rng_def = get_prop_range_def(rng_defs)
if rng_def.get("kds_esIndexClass"):
ranges = rng_def['kds_esIndexClass'].copy()
idx_types = get_idx_types(rng_def, ranges)
if 'es_Ignored' in idx_types:
return []
if 'es_Nested' in idx_types:
if (kwargs.get('depth', 0) >= 1 and \
kwargs.get('class') == ranges[0]) or \
kwargs.get('depth', 0) > 2:
return []
indexer_list = getattr(MODULE.rdfclass,
ranges[0]).es_indexers(base_class,
'es_Nested',
**kwargs)
return indexer_list
def es_json(self, **kwargs):
""" Returns a JSON object of the property for insertion into es
"""
rtn_list = []
rng_defs = get_prop_range_defs(self.class_names, self.kds_rangeDef)
# if self.__class__._prop_name == 'bf_partOf':
# pdb.set_trace()
rng_def = get_prop_range_def(rng_defs)
idx_types = rng_def.get('kds_esIndexType', []).copy()
if 'es_Ignore' in idx_types:
return rtn_list
ranges = self.rdfs_range # pylint: disable=no-member
# copy the current data into the es_values attribute then run
# the es_processors to manipulate that data
self.es_values = self.copy()
# determine if using inverseOf object
if rng_def.get('kds_esLookup'):
self.es_values += self.dataset.json_qry("%s.$" % getattr(self,
rng_def['kds_esLookup'][0])[0].pyuri,
{'$':self.bound_class.subject})
self.es_values = list(set(self.es_values))
self._run_processors(self._es_processors)
if not idx_types:
nested = False
for rng in ranges:
if range_is_obj(rng, MODULE.rdfclass):
nested = True
break
value_class = [value.__class__ for value in self.es_values
if isinstance(value, MODULE.rdfclass.RdfClassBase)]
if value_class or nested:
nested = True
else:
nested = False
if nested:
idx_types.append('es_Nested')
rtn_obj = {}
if 'es_Nested' in idx_types:
if kwargs.get('depth', 0) > 6:
return [val.subject.sparql_uri for val in self]
for value in self.es_values:
try:
new_value = value.es_json('es_Nested', **kwargs)
except AttributeError:
new_value = convert_value_to_es(value,
ranges,
self,
"missing_obj")
rtn_list.append(new_value)
if rng_def.get("kds_esField"):
es_value_fld = rng_def['kds_esValue'][0] \
if rng_def['kds_esValue'] else None
es_field = rng_def['kds_esField'][0]
for item in value.get(es_field):
if new_value.get(es_value_fld):
val = new_value.get(es_value_fld , [])
try:
rtn_obj[item.pyuri] += val
except KeyError:
rtn_obj[item.pyuri] = val
else:
for value in self.es_values:
rtn_list.append(convert_value_to_es(value, ranges, self))
if rtn_obj:
return rtn_obj
return rtn_list
|
class RdfPropertyBase(list):
''' Property Base Class '''
def __init__(self, bound_class, dataset=None):
pass
def __call__(self, bound_class, dataset=None):
pass
def _run_processors(self, processor_list):
''' cycles through a list of processors and runs them
Args:
processor_list(list): a list of processors
'''
pass
@classmethod
def es_mapping(cls, base_class, **kwargs):
''' Returns the es mapping for the property
'''
pass
@classmethod
def es_indexers(cls, base_class, **kwargs):
''' Returns the es mapping for the property
'''
pass
def es_json(self, **kwargs):
''' Returns a JSON object of the property for insertion into es
'''
pass
| 9 | 5 | 29 | 1 | 25 | 4 | 8 | 0.16 | 1 | 5 | 1 | 0 | 4 | 5 | 6 | 39 | 185 | 14 | 152 | 43 | 143 | 25 | 125 | 41 | 118 | 19 | 2 | 6 | 45 |
143,311 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/utilities/baseutilities.py
|
rdfframework.utilities.baseutilities.EmptyDot
|
class EmptyDot():
def __getattr__(self, attr):
return EmptyDot()
def __repr__(self):
return ""
def __str__(self):
return ""
def __nonzero__(self):
return False
def __bool__(self):
return False
def __call__(self, *args, **kwargs):
raise RuntimeWarning("class called before initialization by\n\t%s" %
inspect.getframeinfo(inspect.stack()[1][0]).__repr__())
|
class EmptyDot():
def __getattr__(self, attr):
pass
def __repr__(self):
pass
def __str__(self):
pass
def __nonzero__(self):
pass
def __bool__(self):
pass
def __call__(self, *args, **kwargs):
pass
| 7 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 6 | 0 | 6 | 6 | 19 | 5 | 14 | 7 | 7 | 0 | 13 | 7 | 6 | 1 | 0 | 0 | 6 |
143,312 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/utilities/codetimer.py
|
rdfframework.utilities.codetimer.CodeTimer
|
class CodeTimer(object):
'''simple class for placing timers in the code for performance testing'''
def add_timer(self, timer_name):
''' adds a timer to the class '''
setattr(self, timer_name, [])
def log(self, timer_name, node):
''' logs a event in the timer '''
timestamp = time.time()
if hasattr(self, timer_name):
getattr(self, timer_name).append({
"node":node,
"time":timestamp})
else:
setattr(self, timer_name, [{"node":node, "time":timestamp}])
def print_timer(self, timer_name, **kwargs):
''' prints the timer to the terminal
keyword args:
delete -> True/False -deletes the timer after printing
'''
if hasattr(self, timer_name):
_delete_timer = kwargs.get("delete", False)
print("|-------- {} [Time Log Calculation]-----------------|".format(\
timer_name))
print("StartDiff\tLastNodeDiff\tNodeName")
time_log = getattr(self, timer_name)
start_time = time_log[0]['time']
previous_time = start_time
for entry in time_log:
time_diff = (entry['time'] - previous_time) *1000
time_from_start = (entry['time'] - start_time) * 1000
previous_time = entry['time']
print("{:.1f}\t\t{:.1f}\t\t{}".format(time_from_start,
time_diff,
entry['node']))
print("|--------------------------------------------------------|")
if _delete_timer:
self.delete_timer(timer_name)
def delete_timer(self, timer_name):
''' deletes a timer '''
if hasattr(self, timer_name):
delattr(self, timer_name)
|
class CodeTimer(object):
'''simple class for placing timers in the code for performance testing'''
def add_timer(self, timer_name):
''' adds a timer to the class '''
pass
def log(self, timer_name, node):
''' logs a event in the timer '''
pass
def print_timer(self, timer_name, **kwargs):
''' prints the timer to the terminal
keyword args:
delete -> True/False -deletes the timer after printing
'''
pass
def delete_timer(self, timer_name):
''' deletes a timer '''
pass
| 5 | 5 | 10 | 0 | 8 | 2 | 2 | 0.24 | 1 | 0 | 0 | 0 | 4 | 0 | 4 | 4 | 45 | 4 | 33 | 13 | 28 | 8 | 27 | 13 | 22 | 4 | 1 | 2 | 9 |
143,313 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/utilities/colors.py
|
rdfframework.utilities.colors.__BackgroundsMeta__
|
class __BackgroundsMeta__(__ColorsBaseMeta__):
""" adds backgrounds to the __backgrounds__ class """
def __new__(mcs, name, bases, clsdict, **kwargs):
# pdb.set_trace()
new_dict = {key.lower(): __background__(value)
for key, value in Back.__dict__.items()}
new_dict.update({key: __background__(value)
for key, value in clsdict.items()
if not key.startswith("_")})
clsdict.update(new_dict)
return super().__new__(mcs, name, bases, clsdict)
def __getattr__(cls, attr):
return cls.__dict__.get(attr.lower(), __background__())
def __setattr__(cls, attr, value):
if value in dir(cls):
super().__setattr__(attr, getattr(cls, value))
else:
super().__setattr__(attr, __background__(value))
|
class __BackgroundsMeta__(__ColorsBaseMeta__):
''' adds backgrounds to the __backgrounds__ class '''
def __new__(mcs, name, bases, clsdict, **kwargs):
pass
def __getattr__(cls, attr):
pass
def __setattr__(cls, attr, value):
pass
| 4 | 1 | 5 | 0 | 5 | 0 | 1 | 0.13 | 1 | 2 | 1 | 1 | 3 | 0 | 3 | 17 | 19 | 1 | 16 | 5 | 12 | 2 | 12 | 5 | 8 | 2 | 3 | 1 | 4 |
143,314 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/utilities/colors.py
|
rdfframework.utilities.colors.__ColorBase__
|
class __ColorBase__():
"""
Base class for __styles__, __backgrounds__ and colors
args:
value: the escaped string for a colored effect
added: additional espaced color strings
"""
value = ''
def __init__(self, value='', added=''):
self.value = added + value
def __str__(self):
return self.value
def __call__(self, string=None):
# pdb.set_trace()
global __colors_on__
if not __colors_on__:
return string
if string and self.value:
return "{}{}{}".format(self.value, string, '\x1b[0m')
# if self.value:
# return self.value
elif self.value:
return '\x1b[39m'
return string
|
class __ColorBase__():
'''
Base class for __styles__, __backgrounds__ and colors
args:
value: the escaped string for a colored effect
added: additional espaced color strings
'''
def __init__(self, value='', added=''):
pass
def __str__(self):
pass
def __call__(self, string=None):
pass
| 4 | 1 | 5 | 0 | 4 | 1 | 2 | 0.6 | 0 | 0 | 0 | 3 | 3 | 0 | 3 | 3 | 25 | 1 | 15 | 6 | 10 | 9 | 14 | 6 | 9 | 4 | 0 | 1 | 6 |
143,315 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/utilities/metaclasses.py
|
rdfframework.utilities.metaclasses.KeyRegistryMeta
|
class KeyRegistryMeta(type):
""" Registry metaclass for a 'key' lookup specified as an attribute of a
class inheriting from the base class using this metaclass
Calling the base class by baseclase[key] will return the inherited class
that is specified by the 'key'
The base class needs to have defined a class attribute of
__registry__
"""
def __new__(meta, name, bases, class_dict):
def __get_idx_attrs__(cls, reg_cls, name, force=False):
""" get the list of attrs that should be used as an index in the
registry
args:
cls: the new class
name: the attribute name storing the list of attributes names
force: for the creation of the attribute 'name' if it is not
found
"""
def hashable(value):
"""Determine whether `value` can be hashed."""
try:
hash(value)
except TypeError:
return False
return True
try:
options = getattr(reg_cls, name)
if not isinstance(options, (set, list)):
raise TypeError("'%s' must be a set or list" % name)
return set(getattr(reg_cls, name))
except AttributeError:
# if the reg_cls does not have the set of attibutes listed
# attempt to create a list of keys if force is passed as true
if force:
# select all attribute namees that are non callable and
# that are not reserved attributes
options = [attr for attr in dir(cls)
if not attr.startswith("__")
and not callable(getattr(cls, attr))
and type(getattr(cls, attr)) != property
and hashable(getattr(cls, attr))]
setattr(reg_cls, name, set(options))
return set(options)
else:
return set()
cls = super(KeyRegistryMeta, meta).__new__(meta,
name,
bases,
class_dict)
reg_cls = [base for base in cls.__bases__
if base not in [object, str, int]]
try:
reg_cls = reg_cls[-1]
if hasattr(reg_cls, "__reg_cls__"):
reg_cls = reg_cls.__reg_cls__
except IndexError:
# if there are now classes use the current class as the
# class for registration
cls.__reg_cls__ = cls
reg_cls = cls
if cls == reg_cls:
if not hasattr(reg_cls, "__registry__"):
cls.__registry__ = {}
return cls
# get all of the attributes that should be used as registry indexes
req_attrs = __get_idx_attrs__(cls,
reg_cls,
'__required_idx_attrs__',
True)
if not req_attrs:
err_msg = " ".join( ["Unable to determine what attributes should",
"be used as registry indexes. Specify a class",
"attribute '__required_idx_attrs__' as a",
"set() of attribute names in",
"class '%s'" % cls.__bases__[-1]])
raise AttributeError(err_msg)
opt_attrs = __get_idx_attrs__(cls,
reg_cls,
'__optional_idx_attrs__')
cls_attrs = set(dir(cls))
cls_attrs.add("__name__")
if (req_attrs - cls_attrs):
raise AttributeError("'%s' is missing these required class attributes %s" \
% (cls, req_attrs - cls_attrs))
# nested attributes should be removed from required attrs so that
# they do not error out since we expect a mulitple classes using the
# same nested value
nested_attrs = set()
if hasattr(reg_cls, "__nested_idx_attrs__"):
nested_attrs = set(reg_cls.__nested_idx_attrs__)
req_attrs -= nested_attrs
attr_vals = set([getattr(cls, attr) \
for attr in req_attrs.union(opt_attrs) \
if hasattr(cls, attr)])
#if cls.__dict__.get(attr)])
registry = reg_cls.__registry__
registered = [attr_val for attr_val in attr_vals \
if registry.get(attr_val)]
if hasattr(reg_cls, "__special_idx_attrs__"):
for spec_attr in reg_cls.__special_idx_attrs__:
for key, value in spec_attr.items():
if hasattr(cls, key):
for item in value:
if registry.get(getattr(getattr(cls, key), item)):
registered.append(getattr(getattr(cls, key),
item))
if registered:
err_msg = " ".join(["'%s' is trying to register these" % cls,
"indexes that have already been assigned:\n"])
err_items = ["idx '%s' in class '%s'" % (item, registry[item]) \
for item in registered]
raise LookupError(err_msg + "\n".join(err_items))
reg_vals = []
for attr in req_attrs.union(opt_attrs):
if hasattr(cls, attr):
attr_val = getattr(cls, attr)
registry[attr_val] = cls
reg_vals.append(attr_val)
if hasattr(reg_cls, "__special_idx_attrs__"):
for spec_attr in reg_cls.__special_idx_attrs__:
for key, value in spec_attr.items():
if hasattr(cls, key):
for item in value:
val = getattr(getattr(cls, key), item)
registry[val] = cls
reg_vals.append(val)
for attr in nested_attrs:
if hasattr(cls, attr):
attr_val = getattr(cls, attr)
if not registry.get(attr_val):
registry[attr_val] = {}
for val in reg_vals:
registry[attr_val][val] = cls
if not '__registry__' in cls.__dict__:
cls.__registry__ = None
return cls
def __getitem__(cls, key):
if cls != cls.__reg_cls__:
raise TypeError("'%s' object is not subscriptable" % cls)
try:
return cls.__registry__[key]
except KeyError:
raise KeyError("key '%s' has no associated class" % key)
def __iter__(cls):
if cls != cls.__reg_cls__:
raise TypeError("'%s' object is not iterable" % cls)
return iter(cls.__registry__)
def keys(cls):
if cls == cls.__reg_cls__:
return cls.__registry__.keys()
raise AttributeError("%s has not attribute 'keys'" % cls)
def values(cls):
if cls == cls.__reg_cls__:
return cls.__registry__.values()
raise AttributeError("%s has not attribute 'values'" % cls)
@property
def nested(cls):
if cls == cls.__reg_cls__:
return {key: value for key, value in cls.__registry__.items()
if isinstance(value, dict)}
raise AttributeError("%s has not attribute 'nested'" % cls)
|
class KeyRegistryMeta(type):
''' Registry metaclass for a 'key' lookup specified as an attribute of a
class inheriting from the base class using this metaclass
Calling the base class by baseclase[key] will return the inherited class
that is specified by the 'key'
The base class needs to have defined a class attribute of
__registry__
'''
def __new__(meta, name, bases, class_dict):
pass
def __get_idx_attrs__(cls, reg_cls, name, force=False):
''' get the list of attrs that should be used as an index in the
registry
args:
cls: the new class
name: the attribute name storing the list of attributes names
force: for the creation of the attribute 'name' if it is not
found
'''
pass
def hashable(value):
'''Determine whether `value` can be hashed.'''
pass
def __getitem__(cls, key):
pass
def __iter__(cls):
pass
def keys(cls):
pass
def values(cls):
pass
@property
def nested(cls):
pass
| 10 | 3 | 25 | 1 | 20 | 4 | 6 | 0.2 | 1 | 12 | 0 | 8 | 6 | 0 | 6 | 19 | 176 | 15 | 134 | 27 | 124 | 27 | 107 | 26 | 98 | 27 | 2 | 6 | 44 |
143,316 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/utilities/metaclasses.py
|
rdfframework.utilities.metaclasses.PerformanceMeta
|
class PerformanceMeta(type):
""" metaclass to remove property attributes so that they can be set during
class instanciation.
returns: the arg if is an instance or calls the class
"""
# def __call__(cls, *args, **kwargs):
# super(PerformanceMeta, cls).__call__(*args, **kwargs)
def __new__(mcs, cls, bases, clsdict, **kwds):
# if performance mode is set rename the performace attributes
# and break the inheritance of those attributes so that they can be
# assigned during instanciation
if clsdict.get('performance_mode', False):
for attr in clsdict['performance_attrs']:
try:
clsdict["__%s__" % attr] = clsdict.pop(attr)
except KeyError:
pass
clsdict[attr] = None
return super(PerformanceMeta, mcs).__new__(mcs, cls, bases, clsdict)
|
class PerformanceMeta(type):
''' metaclass to remove property attributes so that they can be set during
class instanciation.
returns: the arg if is an instance or calls the class
'''
def __new__(mcs, cls, bases, clsdict, **kwds):
pass
| 2 | 1 | 12 | 0 | 9 | 3 | 4 | 0.9 | 1 | 2 | 0 | 1 | 1 | 0 | 1 | 14 | 21 | 2 | 10 | 3 | 8 | 9 | 10 | 3 | 8 | 4 | 2 | 3 | 4 |
143,317 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/utilities/metaclasses.py
|
rdfframework.utilities.metaclasses.RegInstanceMeta
|
class RegInstanceMeta(KeyRegistryMeta, InstanceCheckMeta):
pass
|
class RegInstanceMeta(KeyRegistryMeta, InstanceCheckMeta):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 2 | 0 | 0 | 0 | 20 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,318 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/utilities/metaclasses.py
|
rdfframework.utilities.metaclasses.RegPerformInstanceMeta
|
class RegPerformInstanceMeta(PerformanceMeta, RegInstanceMeta):
pass
|
class RegPerformInstanceMeta(PerformanceMeta, RegInstanceMeta):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 2 | 0 | 0 | 0 | 21 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 4 | 0 | 0 |
143,319 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/utilities/statistics.py
|
rdfframework.utilities.statistics.DictionaryCounter
|
class DictionaryCounter(object):
"""
Class will create a statics summary of all dictionaries in a passed.
Pass in dictionary objects with the __call__ method
Args:
method["simple"]: does a simple count on wether the item exists in the
passed in dictionary. "complex" will return aggreated counts
of the items for the dictionary, i.e.
Example for {'x': [1,2,3]}
simple: {'x': 1}
complex: {'x': 3}
sub_total: the sting path to use for aggregateing subtotals
Example for {'x': {'y': 'hello'} and {'x': {'y': 'bye'} the
path 'x|y' would create subtotal value for 'hello' and
'bye'
list_blank: records dictionarys that are missing a value for the path
assigned in this attribute. use the same path format as
'sub_total'
Attributes:
counts: dictionary of summary counts
sub_counts: dictionary of the subtotals
blank: list of dictinaries with missing properties as specified with
'list_blank'
"""
def __init__(self, method="simple", sub_total=None, list_blank={}):
self.counts = {}
self.sub_counts = {}
self.method = method
self.sub_total = sub_total
self.list_blank = list_blank
self.blank = []
def __call__(self, dict_obj):
kwargs = {'current': {}}
counts = self._count_objs(dict_obj, **{'current': {}})
if self.method == "simple":
self.update_counts(counts['current'])
if self.sub_total:
self.update_subtotals(counts['current'], counts['sub_val'])
self._record_blank(counts['current'], dict_obj)
def _record_blank(self, current, dict_obj):
"""
records the dictionay in the the 'blank' attribute based on the
'list_blank' path
args:
-----
current: the current dictionay counts
dict_obj: the original dictionary object
"""
if not self.list_blank:
return
if self.list_blank not in current:
self.blank.append(dict_obj)
def _count_objs(self, obj, path=None, **kwargs):
"""
cycles through the object and adds in count values
Args:
-----
obj: the object to parse
path: the current path
kwargs:
-------
current: a dictionary of counts for current call
sub_val: the value to use for subtotal aggregation
"""
sub_val = None
# pdb.set_trace()
if isinstance(obj, dict):
for key, value in obj.items():
if isinstance(value, (list, dict)):
kwargs = self._count_objs(value,
self.make_path(key, path),
**kwargs)
else:
if self.make_path(key, path) == self.sub_total:
# pdb.set_trace()
sub_val = value
kwargs['current'] = self._increment_prop(key,
path,
**kwargs)
elif isinstance(obj, list):
for item in obj:
if isinstance(item, (list, dict)):
kwargs = self._count_objs(item, path, **kwargs)
else:
if path == self.sub_total:
pdb.set_trace()
sub_val = item
kwargs['current'] = self._increment_prop(path, **kwargs)
else:
kwargs['current'] = self._increment_prop(path, **kwargs)
if path == self.sub_total:
pdb.set_trace()
sub_val = item
if kwargs.get('sub_val') is None:
kwargs['sub_val'] = sub_val
return kwargs
def _increment_prop(self, prop, path=None, **kwargs):
"""
increments the property path count
args:
-----
prop: the key for the prop
path: the path to the prop
kwargs:
-------
current: dictionary count for the current dictionay
"""
new_path = self.make_path(prop, path)
if self.method == 'simple':
counter = kwargs['current']
else:
counter = self.counts
try:
counter[new_path] += 1
except KeyError:
counter[new_path] = 1
return counter
def update_counts(self, current):
"""
updates counts for the class instance based on the current dictionary
counts
args:
-----
current: current dictionary counts
"""
for item in current:
try:
self.counts[item] += 1
except KeyError:
self.counts[item] = 1
def update_subtotals(self, current, sub_key):
"""
updates sub_total counts for the class instance based on the
current dictionary counts
args:
-----
current: current dictionary counts
sub_key: the key/value to use for the subtotals
"""
if not self.sub_counts.get(sub_key):
self.sub_counts[sub_key] = {}
for item in current:
try:
self.sub_counts[sub_key][item] += 1
except KeyError:
self.sub_counts[sub_key][item] = 1
def print(self):
"""
prints to terminal the summray statistics
"""
print("TOTALS -------------------------------------------")
print(json.dumps(self.counts, indent=4, sort_keys=True))
if self.sub_total:
print("\nSUB TOTALS --- based on '%s' ---------" % self.sub_total)
print(json.dumps(self.sub_counts, indent=4, sort_keys=True))
if self.list_blank:
print("\nMISSING nodes for '%s':" % self.list_blank,
len(self.blank))
@staticmethod
def make_path(prop, path):
"""
makes the path string
agrs:
-----
prop: the key for the current object
path: the previous path to the prop
"""
if path:
return _PATH_SEP.join([path, prop])
return prop
@staticmethod
def parse_path(path):
"""
splits the path back to its parts
args:
-----
path: the string path to parse
"""
return path.split(_PATH_SEP)
|
class DictionaryCounter(object):
'''
Class will create a statics summary of all dictionaries in a passed.
Pass in dictionary objects with the __call__ method
Args:
method["simple"]: does a simple count on wether the item exists in the
passed in dictionary. "complex" will return aggreated counts
of the items for the dictionary, i.e.
Example for {'x': [1,2,3]}
simple: {'x': 1}
complex: {'x': 3}
sub_total: the sting path to use for aggregateing subtotals
Example for {'x': {'y': 'hello'} and {'x': {'y': 'bye'} the
path 'x|y' would create subtotal value for 'hello' and
'bye'
list_blank: records dictionarys that are missing a value for the path
assigned in this attribute. use the same path format as
'sub_total'
Attributes:
counts: dictionary of summary counts
sub_counts: dictionary of the subtotals
blank: list of dictinaries with missing properties as specified with
'list_blank'
'''
def __init__(self, method="simple", sub_total=None, list_blank={}):
pass
def __call__(self, dict_obj):
pass
def _record_blank(self, current, dict_obj):
'''
records the dictionay in the the 'blank' attribute based on the
'list_blank' path
args:
-----
current: the current dictionay counts
dict_obj: the original dictionary object
'''
pass
def _count_objs(self, obj, path=None, **kwargs):
'''
cycles through the object and adds in count values
Args:
-----
obj: the object to parse
path: the current path
kwargs:
-------
current: a dictionary of counts for current call
sub_val: the value to use for subtotal aggregation
'''
pass
def _increment_prop(self, prop, path=None, **kwargs):
'''
increments the property path count
args:
-----
prop: the key for the prop
path: the path to the prop
kwargs:
-------
current: dictionary count for the current dictionay
'''
pass
def update_counts(self, current):
'''
updates counts for the class instance based on the current dictionary
counts
args:
-----
current: current dictionary counts
'''
pass
def update_subtotals(self, current, sub_key):
'''
updates sub_total counts for the class instance based on the
current dictionary counts
args:
-----
current: current dictionary counts
sub_key: the key/value to use for the subtotals
'''
pass
def print(self):
'''
prints to terminal the summray statistics
'''
pass
@staticmethod
def make_path(prop, path):
'''
makes the path string
agrs:
-----
prop: the key for the current object
path: the previous path to the prop
'''
pass
@staticmethod
def parse_path(path):
'''
splits the path back to its parts
args:
-----
path: the string path to parse
'''
pass
| 13 | 9 | 16 | 1 | 9 | 6 | 3 | 0.9 | 1 | 3 | 0 | 0 | 8 | 6 | 10 | 10 | 208 | 29 | 94 | 28 | 81 | 85 | 82 | 26 | 71 | 11 | 1 | 4 | 34 |
143,320 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/tests/connections/test_blazegraph.py
|
tests.connections.test_blazegraph.TestBlazegraphDefaultInit
|
class TestBlazegraphDefaultInit(unittest.TestCase):
def setUp(self):
pass
def test_no_params(self):
blazegraph.log.setLevel(logging.CRITICAL)
self.assertTrue(blazegraph.Blazegraph().ext_url is not None)
def tearDown(self):
pass
|
class TestBlazegraphDefaultInit(unittest.TestCase):
def setUp(self):
pass
def test_no_params(self):
pass
def tearDown(self):
pass
| 4 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 3 | 0 | 3 | 75 | 11 | 3 | 8 | 4 | 4 | 0 | 8 | 4 | 4 | 1 | 2 | 0 | 3 |
143,321 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/tests/datatypes/test_namespaces.py
|
tests.datatypes.test_namespaces.TestUri
|
class TestUri(unittest.TestCase):
def setUp(self):
self.nsm = rdt.RdfNsManager()
self.test_equal_values = ("xsd:string",
"xsd_string",
"XSD:string",
"http://www.w3.org/2001/XMLSchema#string",
"<http://www.w3.org/2001/XMLSchema#string>",
self.nsm.xsd.string)
self.test_not_eq_values = ("xsd:string",
"xsd_strings",
"http://www.w3.org/2001/XMLSchema#string",
rdt.RdfNsManager().xsd.strings)
self.test_outputs_test_uri = self.nsm.xsd.string
self.test_outputs = (('sparql', 'xsd:string'),
('sparql_uri',
'<http://www.w3.org/2001/XMLSchema#string>'),
('clean_uri',
'http://www.w3.org/2001/XMLSchema#string'),
('pyuri', 'xsd_string'))
self.test_no_ns_outputs_test_uri = rdt.Uri("http://test.com/test")
self.test_no_ns_outputs = (('sparql', '<http://test.com/test>'),
('sparql_uri', '<http://test.com/test>'),
('clean_uri', 'http://test.com/test'),
('pyuri',
'pyuri_aHR0cDovL3Rlc3QuY29tLw==_test'))
self.test_no_ns_inputs = ('<http://test.com/test>',
'http://test.com/test',
'pyuri_aHR0cDovL3Rlc3QuY29tLw==_test',
rdt.Uri('<http://test.com/test>'))
def test_equal_inputs(self):
self.assertTrue(all(rdt.Uri(x)==rdt.Uri(self.test_equal_values[0]) \
for x in self.test_equal_values))
def test_not_equal_inputs(self):
self.assertFalse(all(rdt.Uri(x)==rdt.Uri(self.test_not_eq_values[0]) \
for x in self.test_not_eq_values))
def test_uri_as_arg_returns_uri(self):
test_uri = self.nsm.xsd.test
self.assertEqual(id(test_uri), id(rdt.Uri(test_uri)))
def test_ouput_formats(self):
test_uri = self.test_outputs_test_uri
for args in self.test_outputs:
self.assertEqual(getattr(test_uri, args[0]), args[1],
"format='%s'" % args[0])
def test_no_ns_ouput_formats(self):
test_uri = self.test_no_ns_outputs_test_uri
for args in self.test_no_ns_outputs:
self.assertEqual(getattr(test_uri, args[0]), args[1],
"format='%s'" % args[0])
def test_no_ns_inputs(self):
first = rdt.Uri(self.test_no_ns_inputs[0])
for val in self.test_no_ns_inputs:
self.assertEqual(rdt.Uri(val), first,
"\ninput value: %s" % val)
def tearDown(self):
pass
|
class TestUri(unittest.TestCase):
def setUp(self):
pass
def test_equal_inputs(self):
pass
def test_not_equal_inputs(self):
pass
def test_uri_as_arg_returns_uri(self):
pass
def test_ouput_formats(self):
pass
def test_no_ns_ouput_formats(self):
pass
def test_no_ns_inputs(self):
pass
def tearDown(self):
pass
| 9 | 0 | 7 | 0 | 7 | 1 | 1 | 0.09 | 1 | 2 | 2 | 0 | 8 | 7 | 8 | 80 | 64 | 8 | 56 | 23 | 47 | 5 | 31 | 23 | 22 | 2 | 2 | 1 | 11 |
143,322 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/tests/datatypes/test_rdfdatatypes.py
|
tests.datatypes.test_rdfdatatypes.Test_XsdBool
|
class Test_XsdBool(unittest.TestCase):
"""docstring for Test_XsdBool"""
def setUp(self):
self.true_inputs = [True, "1", 1, "true", "True"]
self.false_inputs = [False, "0", 0, "false", "False"]
self.error_inputs = [None, "adfa"]
self.test_sparql_values = [(True,'"true"^^xsd:boolean'),
(False, '"false"^^xsd:boolean')]
self.test_json_values = [(True, 'true'),
(False, 'false')]
def test_true(self):
for value in self.true_inputs:
self.assertTrue(rdt.XsdBoolean(value))
def test_false(self):
for value in self.false_inputs:
self.assertFalse(rdt.XsdBoolean(value))
def test_errors(self):
for value in self.error_inputs:
self.assertRaises(TypeError, lambda: rdt.XsdBoolean(value))
def test_sparql_values(self):
for tup in self.test_sparql_values:
self.assertEqual(rdt.XsdBoolean(tup[0]).sparql, tup[1])
def test_json_values(self):
for tup in self.test_json_values:
self.assertEqual(rdt.XsdBoolean(tup[0]).to_json, tup[1])
|
class Test_XsdBool(unittest.TestCase):
'''docstring for Test_XsdBool'''
def setUp(self):
pass
def test_true(self):
pass
def test_false(self):
pass
def test_errors(self):
pass
def test_sparql_values(self):
pass
def test_json_values(self):
pass
| 7 | 1 | 4 | 0 | 4 | 0 | 2 | 0.04 | 1 | 2 | 1 | 0 | 6 | 3 | 6 | 78 | 30 | 5 | 24 | 15 | 17 | 1 | 22 | 15 | 15 | 2 | 2 | 1 | 11 |
143,323 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/utilities/metaclasses.py
|
rdfframework.utilities.metaclasses.InstanceCheckMeta
|
class InstanceCheckMeta(type):
""" metaclass to check to see if the arg is an instance of the class.
returns: the arg if is an instance or calls the class
"""
def __call__(cls, *args, **kwargs):
# if the argument is already an instance of the cls return the
# argument without instanctiating a new instance
# pdb.set_trace()
try:
if isinstance(args[0], cls):
return args[0]
except IndexError:
pass
# pdb.set_trace()
return super(InstanceCheckMeta, cls).__call__(*args, **kwargs)
|
class InstanceCheckMeta(type):
''' metaclass to check to see if the arg is an instance of the class.
returns: the arg if is an instance or calls the class
'''
def __call__(cls, *args, **kwargs):
pass
| 2 | 1 | 11 | 0 | 7 | 4 | 3 | 0.88 | 1 | 2 | 0 | 1 | 1 | 0 | 1 | 14 | 16 | 1 | 8 | 2 | 6 | 7 | 8 | 2 | 6 | 3 | 2 | 2 | 3 |
143,324 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/tests/datatypes/test_rdfdatatypes.py
|
tests.datatypes.test_rdfdatatypes.Test_XsdString
|
class Test_XsdString(unittest.TestCase):
def setUp(self):
self.test_str_values = ["1",
"how are you",
rdt.XsdString("xsd instance")]
self.test_non_str_values = [1,
{"lang":"en", "value":"Hello"},
datetime.datetime.now()]
self.test_sparql_values = [({"lang":"en", "value":"lang_dict"},
'"lang_dict"@en'),
({"value": "dict"}, '"dict"^^xsd:string'),
("str_input", '"str_input"^^xsd:string'),
(1, '"1"^^xsd:string')]
self.test_lang_options = [{'type': 'literal',
'value': 'test lang string',
'xml:lang': 'en'},
{'type': 'literal',
'value': 'test lang string',
'lang': 'en'}]
self.test_json_values = [({"lang":"en", "value":"lang_dict"},
'lang_dict'),
({"value": "dict"}, 'dict'),
("str_input", 'str_input'),
(1, '1')]
def test_str_instance(self):
''' Does the XsdClass test as an instance of the python str class '''
self.assertTrue(isinstance(self.test_str_values[-1],
rdt.XsdString))
def test_str_equality(self):
for value in self.test_str_values:
self.assertEqual(value, rdt.XsdString(value))
self.assertEqual(rdt.XsdString(value), value)
def test_sparql_values(self):
for tup in self.test_sparql_values:
self.assertEqual(rdt.XsdString(tup[0]).sparql, tup[1])
def test_lang_equality(self):
""" Tests to see if the value paramaters work of lang creation and then
tests for equality between each option """
values = [rdt.XsdString(value) for value in self.test_lang_options]
self.assertTrue(len(set(values)) == 1)
def test_json_values(self):
for tup in self.test_json_values:
self.assertEqual(rdt.XsdString(tup[0]).to_json, tup[1])
|
class Test_XsdString(unittest.TestCase):
def setUp(self):
pass
def test_str_instance(self):
''' Does the XsdClass test as an instance of the python str class '''
pass
def test_str_equality(self):
pass
def test_sparql_values(self):
pass
def test_lang_equality(self):
''' Tests to see if the value paramaters work of lang creation and then
tests for equality between each option '''
pass
def test_json_values(self):
pass
| 7 | 2 | 7 | 0 | 7 | 1 | 2 | 0.07 | 1 | 3 | 1 | 0 | 6 | 3 | 6 | 78 | 50 | 7 | 40 | 14 | 33 | 3 | 22 | 14 | 15 | 2 | 2 | 1 | 9 |
143,325 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/tests/test_framework.py
|
tests.test_framework.TestRdfFramework
|
class TestRdfFramework(unittest.TestCase):
def setUp(self):
pass
def test_init_raises_no_params(self):
self.assertRaises(OSError, framework.RdfFramework)
def tearDown(self):
pass
|
class TestRdfFramework(unittest.TestCase):
def setUp(self):
pass
def test_init_raises_no_params(self):
pass
def tearDown(self):
pass
| 4 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 2 | 1 | 0 | 3 | 0 | 3 | 75 | 10 | 3 | 7 | 4 | 3 | 0 | 7 | 4 | 3 | 1 | 2 | 0 | 3 |
143,326 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/tests/test_framework.py
|
tests.test_framework.TestRdfFrameworkSingleton
|
class TestRdfFrameworkSingleton(unittest.TestCase):
def setUp(self):
self.rdf_singleton = framework.RdfFrameworkSingleton("", tuple(), dict())
def test_init(self):
new_singleton = framework.RdfFrameworkSingleton("", tuple(), dict())
self.assertIsInstance(self.rdf_singleton,
framework.RdfFrameworkSingleton)
def tearDown(self):
pass
|
class TestRdfFrameworkSingleton(unittest.TestCase):
def setUp(self):
pass
def test_init(self):
pass
def tearDown(self):
pass
| 4 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 3 | 1 | 0 | 3 | 1 | 3 | 75 | 12 | 3 | 9 | 6 | 5 | 0 | 8 | 6 | 4 | 1 | 2 | 0 | 3 |
143,327 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/tests/test_frameworkutilities.py
|
tests.test_frameworkutilities.TestDeletePropertyClass
|
class TestDeletePropertyClass(unittest.TestCase):
def test_delete_property(self):
delete_property = DeleteProperty()
self.assertTrue(delete_property.delete)
|
class TestDeletePropertyClass(unittest.TestCase):
def test_delete_property(self):
pass
| 2 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 73 | 5 | 1 | 4 | 3 | 2 | 0 | 4 | 3 | 2 | 1 | 2 | 0 | 1 |
143,328 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/tests/test_frameworkutilities.py
|
tests.test_frameworkutilities.TestIri
|
class TestIri(unittest.TestCase):
def test_iri(self):
self.assertEqual(iri("https://schema.org/Person"),
"<https://schema.org/Person>")
self.assertEqual(iri("<obi:recipient>"),
"<obi:recipient>")
def test_iri_errors(self):
#self.assertRaises(TypeError, iri, None)
self.assertEqual(iri(""),
"<>")
def test_iri_question(self):
self.assertTrue(iri("?"))
def test_iri_square_bracket(self):
self.assertTrue(iri("["))
|
class TestIri(unittest.TestCase):
def test_iri(self):
pass
def test_iri_errors(self):
pass
def test_iri_question(self):
pass
def test_iri_square_bracket(self):
pass
| 5 | 0 | 3 | 0 | 3 | 0 | 1 | 0.08 | 1 | 0 | 0 | 0 | 4 | 0 | 4 | 76 | 18 | 4 | 13 | 5 | 8 | 1 | 10 | 5 | 5 | 1 | 2 | 0 | 4 |
143,329 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/tests/test_frameworkutilities.py
|
tests.test_frameworkutilities.TestNotInFormClass
|
class TestNotInFormClass(unittest.TestCase):
def test_notinform(self):
notInForm = NotInFormClass()
self.assertTrue(notInForm.notInForm)
|
class TestNotInFormClass(unittest.TestCase):
def test_notinform(self):
pass
| 2 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 73 | 5 | 1 | 4 | 3 | 2 | 0 | 4 | 3 | 2 | 1 | 2 | 0 | 1 |
143,330 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/tests/test_frameworkutilities.py
|
tests.test_frameworkutilities.Test_cbool
|
class Test_cbool(unittest.TestCase):
def test_cbool(self):
self.assertTrue(cbool(True))
self.assertFalse(cbool(False))
self.assertEqual(cbool(None), None)
self.assertEqual(cbool(None, False), None)
def test_cbool_str_true(self):
self.assertTrue(cbool('true'))
self.assertTrue(cbool('1'))
self.assertTrue(cbool('t'))
self.assertTrue(cbool('y'))
self.assertTrue(cbool('yes'))
def test_cbool_str_false(self):
self.assertFalse(cbool('false'))
self.assertFalse(cbool('0'))
self.assertFalse(cbool('n'))
self.assertFalse(cbool('no'))
|
class Test_cbool(unittest.TestCase):
def test_cbool(self):
pass
def test_cbool_str_true(self):
pass
def test_cbool_str_false(self):
pass
| 4 | 0 | 5 | 0 | 5 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 3 | 0 | 3 | 75 | 20 | 3 | 17 | 4 | 13 | 0 | 17 | 4 | 13 | 1 | 2 | 0 | 3 |
143,331 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/tests/test_frameworkutilities.py
|
tests.test_frameworkutilities.Test_clean_iri
|
class Test_clean_iri(unittest.TestCase):
def test_clean_iri(self):
self.assertEqual(
clean_iri("<http://example.info/test>"),
"http://example.info/test")
def test_clean_iri_fail(self):
self.assertEqual(
clean_iri("<http://example.info"),
"<http://example.info")
|
class Test_clean_iri(unittest.TestCase):
def test_clean_iri(self):
pass
def test_clean_iri_fail(self):
pass
| 3 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 2 | 0 | 2 | 74 | 11 | 2 | 9 | 3 | 6 | 0 | 5 | 3 | 2 | 1 | 2 | 0 | 2 |
143,332 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/tests/test_frameworkutilities.py
|
tests.test_frameworkutilities.Test_fw_config
|
class Test_fw_config(unittest.TestCase):
def test_fw_config_is_none(self):
self.assertEqual(fw_config(),
"framework not initialized")
def test_fw_config_kw(self):
config = DictClass({"host": "local"})
self.assertEqual(fw_config(config=config),
config)
|
class Test_fw_config(unittest.TestCase):
def test_fw_config_is_none(self):
pass
def test_fw_config_kw(self):
pass
| 3 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 2 | 0 | 2 | 74 | 10 | 2 | 8 | 4 | 5 | 0 | 6 | 4 | 3 | 1 | 2 | 0 | 2 |
143,333 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/tests/processors/__init__.py
|
tests.processors.Test_csv_to_multi_prop_processor
|
class Test_csv_to_multi_prop_processor(unittest.TestCase):
def setUp(self):
self.tags = {}
def test_load_mode(self):
self.tags["prop"] = {"new": "orange"}
self.tags["dataValue"] = ["red", "green", "blue", "yellow"]
result = csv_to_multi_prop_processor(None, self.tags, mode="load")
self.assertEqual(
result,
self.tags)
|
class Test_csv_to_multi_prop_processor(unittest.TestCase):
def setUp(self):
pass
def test_load_mode(self):
pass
| 3 | 0 | 5 | 0 | 5 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 2 | 1 | 2 | 74 | 12 | 2 | 10 | 5 | 7 | 0 | 8 | 5 | 5 | 1 | 2 | 0 | 2 |
143,334 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/tests/test_frameworkutilities.py
|
tests.test_frameworkutilities.Test_make_list
|
class Test_make_list(unittest.TestCase):
def test_make_list_dict(self):
test_coordinates = { "x": 1, "y": 2}
self.assertEqual(make_list(test_coordinates),
[test_coordinates,])
def test_make_list_list(self):
test_list = [1,2,3,4]
self.assertEqual(make_list(test_list),
test_list)
def test_make_list_str(self):
test_str = "this is a string"
self.assertEqual(make_list(test_str),
[test_str,])
|
class Test_make_list(unittest.TestCase):
def test_make_list_dict(self):
pass
def test_make_list_list(self):
pass
def test_make_list_str(self):
pass
| 4 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 3 | 0 | 3 | 75 | 16 | 3 | 13 | 7 | 9 | 0 | 10 | 7 | 6 | 1 | 2 | 0 | 3 |
143,335 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/datatypes/xsdtypes.py
|
rdfframework.datatypes.xsdtypes.BlankNode
|
class BlankNode(BaseRdfDataType, metaclass=RegPerformInstanceMeta):
""" blankNode URI/IRI class for working with RDF data """
class_type = "BlankNode"
type = "bnode"
es_type = "text"
performance_mode = True
performance_attrs = PERFORMANCE_ATTRS
def __init__(self, value=None):
if value:
if not value.startswith("_:"):
self.value = "_:" + value
else:
self.value = value
else:
self.value = "_:" + new_id()
if self.performance_mode:
for attr in self.performance_attrs:
setattr(self, attr, self.value)
self.hash_val = hash(self.value)
def __hash__(self):
return self.hash_val
def __repr__(self):
return self.value
def __str__(self):
return self.value[2:]
def __eq__(self, other):
try:
return self.value == other.value
except AttributeError:
try:
return self.value == other.subject.value
except AttributeError:
return self.value == other
@property
def rdflib(self):
""" Returns the rdflibURI reference """
return rdflib.BNode(self.value[2:])
|
class BlankNode(BaseRdfDataType, metaclass=RegPerformInstanceMeta):
''' blankNode URI/IRI class for working with RDF data '''
def __init__(self, value=None):
pass
def __hash__(self):
pass
def __repr__(self):
pass
def __str__(self):
pass
def __eq__(self, other):
pass
@property
def rdflib(self):
''' Returns the rdflibURI reference '''
pass
| 8 | 2 | 5 | 0 | 5 | 0 | 2 | 0.06 | 2 | 1 | 0 | 0 | 6 | 2 | 6 | 38 | 43 | 6 | 35 | 16 | 27 | 2 | 32 | 15 | 25 | 5 | 5 | 2 | 12 |
143,336 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/utilities/frameworkutilities.py
|
rdfframework.utilities.frameworkutilities.DataStatus
|
class DataStatus(object):
""" Checks and updates the data status from the triplestore
args:
group: the datagroup for statuses
"""
ln = "%s-DataStatus" % MNAME
log_level = logging.DEBUG
def __init__(self, group, conn, **kwargs):
self.group = group
self.conn = conn
def get(self, status_item):
""" queries the database and returns that status of the item.
args:
status_item: the name of the item to check
"""
lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3]))
lg.setLevel(self.log_level)
sparql = '''
SELECT ?loaded
WHERE {{
kdr:{0} kds:{1} ?loaded .
}}'''
value = self.conn.query(sparql=sparql.format(self.group, status_item))
if len(value) > 0 and \
cbool(value[0].get('loaded',{}).get("value",False)):
return True
else:
return False
def set(self, status_item, status):
""" sets the status item to the passed in paramaters
args:
status_item: the name if the item to set
status: boolean value to set the item
"""
lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3]))
lg.setLevel(self.log_level)
sparql = '''
DELETE {{
kdr:{0} kds:{1} ?o
}}
INSERT {{
kdr:{0} kds:{1} "{2}"^^xsd:boolean
}}
WHERE {{
OPTIONAL {{ kdr:{0} kds:{1} ?o }}
}}'''
return self.conn.query(sparql=sparql.format(self.group,
status_item,
str(status).lower()),
mode='update')
|
class DataStatus(object):
''' Checks and updates the data status from the triplestore
args:
group: the datagroup for statuses
'''
def __init__(self, group, conn, **kwargs):
pass
def get(self, status_item):
''' queries the database and returns that status of the item.
args:
status_item: the name of the item to check
'''
pass
def set(self, status_item, status):
''' sets the status item to the passed in paramaters
args:
status_item: the name if the item to set
status: boolean value to set the item
'''
pass
| 4 | 3 | 16 | 1 | 11 | 3 | 1 | 0.35 | 1 | 1 | 0 | 0 | 3 | 2 | 3 | 3 | 59 | 9 | 37 | 13 | 33 | 13 | 19 | 13 | 15 | 2 | 1 | 1 | 4 |
143,337 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/utilities/colors.py
|
rdfframework.utilities.colors.__ColorsBaseMeta__
|
class __ColorsBaseMeta__(type):
""" base meta for color calls """
def __call__(cls, *args, **kwargs):
return getattr(cls, args[0])
|
class __ColorsBaseMeta__(type):
''' base meta for color calls '''
def __call__(cls, *args, **kwargs):
pass
| 2 | 1 | 2 | 0 | 2 | 0 | 1 | 0.33 | 1 | 0 | 0 | 3 | 1 | 0 | 1 | 14 | 4 | 0 | 3 | 2 | 1 | 1 | 3 | 2 | 1 | 1 | 2 | 0 | 1 |
143,338 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/utilities/colors.py
|
rdfframework.utilities.colors.__ColorsMeta__
|
class __ColorsMeta__(__ColorsBaseMeta__):
""" adds colors to the colors class """
def __new__(mcs, name, bases, clsdict, **kwargs):
new_dict = {key.lower(): __color__(value)
for key, value in Fore.__dict__.items()}
new_dict.update({key: __color__(value)
for key, value in clsdict.items()
if not key.startswith("_")})
clsdict.update(new_dict)
return super().__new__(mcs, name, bases, clsdict)
def __getattr__(cls, attr):
return cls.__dict__.get(attr.lower(), __color__())
def __setattr__(cls, attr, value):
if value in dir(cls):
super().__setattr__(attr, getattr(cls, value))
else:
super().__setattr__(attr, __color__(value))
@property
def print_help(cls):
"""prints the documentation for the cls """
def print_colors(color_cls):
for attr, value in sorted(color_cls.__dict__.items()):
if not attr.startswith("_"):
print(value(attr))
print(cls.__doc__)
print(" ***** FORE COLORS ****\n")
# pdb.set_trace()
print_colors(cls)
print("\n **** BACKGROUND COLORS ****\n")
print_colors(__backgrounds__)
print("\n **** STYLES ****\n")
print_colors(__styles__)
@property
def turn_on(cls):
global __colors_on__
__colors_on__ = True
@property
def turn_off(cls):
global __colors_on__
__colors_on__ = False
|
class __ColorsMeta__(__ColorsBaseMeta__):
''' adds colors to the colors class '''
def __new__(mcs, name, bases, clsdict, **kwargs):
pass
def __getattr__(cls, attr):
pass
def __setattr__(cls, attr, value):
pass
@property
def print_help(cls):
'''prints the documentation for the cls '''
pass
def print_colors(color_cls):
pass
@property
def turn_on(cls):
pass
@property
def turn_off(cls):
pass
| 11 | 2 | 6 | 0 | 5 | 0 | 1 | 0.08 | 1 | 4 | 3 | 1 | 6 | 0 | 6 | 20 | 45 | 5 | 37 | 15 | 24 | 3 | 30 | 12 | 20 | 3 | 3 | 2 | 10 |
143,339 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/rml/processor.py
|
rdfframework.rml.processor.SPARQLProcessor
|
class SPARQLProcessor(Processor):
"""SPARQLProcessor provides a RML Processor for external SPARQL endpoints"""
rdf_name = Uri("kdr:RmlSPARQLProcessor")
def __init__(self, rml_rules, **kwargs):
# if "rml_rules" in kwargs:
# rml_rules = kwargs.pop("rml_rules")
super(SPARQLProcessor, self).__init__(rml_rules, **kwargs)
__set_prefix__()
#! self.triplestore = kwargs.get("triplestore", self.__graph__())
# Sets defaults
self.limit, self.offset = 5000, 0
self.data_query = self.rml.value(
subject=NS_MGR.kds.DataQuery.rdflib,
predicate=NS_MGR.rml.query.rdflib)
def __get_bindings__(self, sparql, output_format):
"""Internal method queries triplestore or remote
sparql endpont and returns the bindings
Args:
----
sparql: String of SPARQL query
output_format: String of type of outputform
"""
return self.ext_conn.query(sparql,
rtn_format=output_format,
debug=False)
def run(self, **kwargs):
kwargs['output'] = self.__graph__()
if "limit" in kwargs:
self.limit = kwargs.get('limit')
if "offset" in kwargs:
self.offset = kwargs.get('offset')
start = datetime.datetime.now()
if kwargs.get("no_json"):
self.use_json_qry = False
else:
self.use_json_qry = self.default_use_json_qry
if self.use_json_qry:
if not kwargs.get('dataset'):
if self.data_query:
sparql = PREFIX + self.data_query.format(**kwargs)
data = self.ext_conn.query(sparql)
else:
try:
data = get_all_item_data(
items=kwargs[kwargs['iri_key']],
conn=self.ext_conn,
output='json',
debug=False)
log.debug("data triple count: %s", len(data))
except KeyError:
raise KeyError("missing kwarg['iri_key'] defining which"
" kwarg to use that contians the subject"
" uri used to query for data. Example: "
"iri_key='instance_iri, instance_iri="
"<http://some.iri>")
kwargs['dataset'] = RdfDataset(data)
# pdb.set_trace()
# start = datetime.datetime.now()
super(SPARQLProcessor, self).run(**kwargs)
# print("query time: ", (datetime.datetime.now() - start))
self.output = kwargs['output']
return kwargs['output']
def execute(self, triple_map, output, **kwargs):
"""Execute """
subjects = []
if NS_MGR.ql.JSON.rdflib in \
triple_map.logicalSource.reference_formulations:
output_format = "json"
else:
output_format = "xml"
if 'limit' not in kwargs:
kwargs['limit'] = self.limit
if 'offset' not in kwargs:
kwargs['offset'] = self.offset
# log.debug("triple_map.logicalSource: \n%s",
# pprint.pformat(triple_map.logicalSource.__dict__))
iterator = str(triple_map.logicalSource.iterator)
start = datetime.datetime.now()
key, json_query = None, None
# pdb.set_trace()
if hasattr(triple_map.logicalSource, 'json_query') \
and self.use_json_qry:
key = kwargs.get(str(triple_map.logicalSource.json_key))
if not key:
key =[val for val in kwargs.values() \
if isinstance(val, rdflib.URIRef)][0]
json_query = triple_map.logicalSource.json_query
bindings = kwargs['dataset'].json_qry(json_query, {'$': key})
else:
sparql = PREFIX + triple_map.logicalSource.query.format(**kwargs)
bindings = self.__get_bindings__(sparql, output_format)
for binding in bindings:
if key:
try:
entity_raw = binding.subject.rdflib
except AttributeError:
entity_raw = binding
else:
entity_raw = binding.get(iterator)
if isinstance(entity_raw, (rdflib.URIRef,
rdflib.BNode,
BaseRdfDataType)):
entity = entity_raw
else:
raw_value = entity_raw.get('value')
if entity_raw.get('type').startswith('bnode'):
entity = BlankNode(raw_value)
else:
entity = Uri(raw_value)
if triple_map.subjectMap.class_ is not None:
sub = entity
if isinstance(entity, BaseRdfDataType):
sub = entity.rdflib
output.add((sub,
NS_MGR.rdf.type.rdflib,
triple_map.subjectMap.class_))
# pdb.set_trace()
for pred_obj_map in triple_map.predicateObjectMap:
predicate = pred_obj_map.predicate
kwargs[iterator] = entity
if pred_obj_map.parentTriplesMap is not None:
self.__handle_parents__(
output=output,
parent_map=pred_obj_map.parentTriplesMap,
subject=entity,
predicate=predicate,
**kwargs)
continue
if pred_obj_map.reference is not None:
ref_key = str(pred_obj_map.reference)
if pred_obj_map.json_query:
# if pred_obj_map.json_query =="$.schema_logo":
# pdb.set_trace()
if ref_key in binding:
for item in binding[ref_key]:
output.add((entity,
predicate,
item.rdflib))
continue
else:
if ref_key in binding:
object_ = __get_object__(
binding[ref_key])
output.add((entity, predicate, object_))
continue
if pred_obj_map.constant is not None:
if isinstance(entity, BaseRdfDataType):
entity = entity.rdflib
output.add(
(entity, predicate, pred_obj_map.constant))
continue
json_query = None
if pred_obj_map.json_query and self.use_json_qry:
json_query = pred_obj_map.json_query
start = datetime.datetime.now()
# pdb.set_trace()
# if str(pred_obj_map.predicate) == "http://purl.org/dc/terms/creator":
# pdb.set_trace()
pre_obj_bindings = kwargs['dataset'].json_qry(json_query,
{'$': entity})
else:
sparql_query = PREFIX + pred_obj_map.query.format(**kwargs)
pre_obj_bindings = self.__get_bindings__(sparql_query,
output_format)
for row in pre_obj_bindings:
if json_query and self.use_json_qry:
if isinstance(entity, BaseRdfDataType):
entity = entity.rdflib
output.add((entity, predicate, row.rdflib))
else:
object_ = __get_object__(row)
if object_ is None:
continue
if isinstance(entity, BaseRdfDataType):
entity = entity.rdflib
output.add((entity, predicate, object_))
subjects.append(entity)
return subjects
|
class SPARQLProcessor(Processor):
'''SPARQLProcessor provides a RML Processor for external SPARQL endpoints'''
def __init__(self, rml_rules, **kwargs):
pass
def __get_bindings__(self, sparql, output_format):
'''Internal method queries triplestore or remote
sparql endpont and returns the bindings
Args:
----
sparql: String of SPARQL query
output_format: String of type of outputform
'''
pass
def run(self, **kwargs):
pass
def execute(self, triple_map, output, **kwargs):
'''Execute '''
pass
| 5 | 3 | 45 | 2 | 38 | 6 | 10 | 0.16 | 1 | 8 | 3 | 0 | 4 | 6 | 4 | 41 | 189 | 11 | 153 | 34 | 148 | 25 | 113 | 33 | 108 | 28 | 4 | 6 | 38 |
143,340 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/utilities/colors.py
|
rdfframework.utilities.colors.__StylesMeta__
|
class __StylesMeta__(__ColorsBaseMeta__):
""" addes styles to the __styles__ clasee """
def __new__(mcs, name, bases, clsdict, **kwargs):
# add the __styles__ from colorama
new_dict = {key.lower(): __style__(value)
for key, value in Style.__dict__.items()}
# add single letter abbreviation
new_dict.update({key.lower()[0]: __style__(value)
for key, value in Style.__dict__.items()})
# add __styles__ from the class definition
new_dict.update({key: __style__(value)
for key, value in clsdict.items()
if not key.startswith("_")})
# add single letter abbreviation
new_dict.update({key[0]: __style__(value)
for key, value in clsdict.items()
if not key.startswith("_")})
clsdict.update(new_dict)
return super().__new__(mcs, name, bases, clsdict)
def __getattr__(cls, attr):
return cls.__dict__.get(attr.lower(), __background__())
def __setattr__(cls, attr, value):
if value in dir(cls):
super().__setattr__(attr, getattr(cls, value))
else:
super().__setattr__(attr, __style__(value))
|
class __StylesMeta__(__ColorsBaseMeta__):
''' addes styles to the __styles__ clasee '''
def __new__(mcs, name, bases, clsdict, **kwargs):
pass
def __getattr__(cls, attr):
pass
def __setattr__(cls, attr, value):
pass
| 4 | 1 | 8 | 0 | 7 | 1 | 1 | 0.24 | 1 | 3 | 2 | 1 | 3 | 0 | 3 | 17 | 27 | 1 | 21 | 5 | 17 | 5 | 14 | 5 | 10 | 2 | 3 | 1 | 4 |
143,341 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/utilities/colors.py
|
rdfframework.utilities.colors.__background__
|
class __background__(__ColorBase__):
""" returns a string formatted with the instance of the defined
background """
def __getattr__(self, attr):
return __style__(getattr(__styles__, attr).value, self.value)
def __setattr__(self, attr, value):
if attr == 'value':
self.__dict__[attr] = value
else:
setattr(__styles__, attr, value)
|
class __background__(__ColorBase__):
''' returns a string formatted with the instance of the defined
background '''
def __getattr__(self, attr):
pass
def __setattr__(self, attr, value):
pass
| 3 | 1 | 4 | 0 | 4 | 0 | 2 | 0.25 | 1 | 2 | 2 | 0 | 2 | 0 | 2 | 5 | 10 | 0 | 8 | 3 | 5 | 2 | 7 | 3 | 4 | 2 | 1 | 1 | 3 |
143,342 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/utilities/colors.py
|
rdfframework.utilities.colors.__backgrounds__
|
class __backgrounds__(metaclass=__BackgroundsMeta__):
pass
|
class __backgrounds__(metaclass=__BackgroundsMeta__):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 4 | 0 | 0 |
143,343 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/utilities/colors.py
|
rdfframework.utilities.colors.__color__
|
class __color__(__ColorBase__):
""" returns a string formatted for the instance of the defined color """
def __getattr__(self, attr):
return __background__(getattr(__backgrounds__, attr).value, self.value)
def __setattr__(self, attr, value):
if attr == 'value':
self.__dict__[attr] = value
else:
setattr(__backgrounds__, attr, value)
|
class __color__(__ColorBase__):
''' returns a string formatted for the instance of the defined color '''
def __getattr__(self, attr):
pass
def __setattr__(self, attr, value):
pass
| 3 | 1 | 4 | 0 | 4 | 0 | 2 | 0.13 | 1 | 2 | 2 | 0 | 2 | 0 | 2 | 5 | 11 | 2 | 8 | 3 | 5 | 1 | 7 | 3 | 4 | 2 | 1 | 1 | 3 |
143,344 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/utilities/mapreduce.py
|
rdfframework.utilities.mapreduce.SimpleMapReduce
|
class SimpleMapReduce(object):
def __init__(self, map_func, reduce_func, num_workers=None):
"""
map_func
Function to map inputs to intermediate data. Takes as
argument one input value and returns a tuple with the key
and a value to be reduced.
reduce_func
Function to reduce partitioned version of intermediate data
to final output. Takes as argument a key as produced by
map_func and a sequence of the values associated with that
key.
num_workers
The number of workers to create in the pool. Defaults to the
number of CPUs available on the current host.
"""
self.map_func = map_func
self.reduce_func = reduce_func
self.pool = multiprocessing.Pool(num_workers)
def partition(self, mapped_values):
"""Organize the mapped values by their key.
Returns an unsorted sequence of tuples with a key and a sequence of values.
"""
partitioned_data = collections.defaultdict(list)
for key, value in mapped_values:
partitioned_data[key].append(value)
return partitioned_data.items()
def __call__(self, inputs, chunksize=1):
"""Process the inputs through the map and reduce functions given.
inputs
An iterable containing the input data to be processed.
chunksize=1
The portion of the input data to hand to each worker. This
can be used to tune performance during the mapping phase.
"""
map_responses = self.pool.map(self.map_func, inputs, chunksize=chunksize)
partitioned_data = self.partition(itertools.chain(*map_responses))
reduced_values = self.pool.map(self.reduce_func, partitioned_data)
return reduced_values
|
class SimpleMapReduce(object):
def __init__(self, map_func, reduce_func, num_workers=None):
'''
map_func
Function to map inputs to intermediate data. Takes as
argument one input value and returns a tuple with the key
and a value to be reduced.
reduce_func
Function to reduce partitioned version of intermediate data
to final output. Takes as argument a key as produced by
map_func and a sequence of the values associated with that
key.
num_workers
The number of workers to create in the pool. Defaults to the
number of CPUs available on the current host.
'''
pass
def partition(self, mapped_values):
'''Organize the mapped values by their key.
Returns an unsorted sequence of tuples with a key and a sequence of values.
'''
pass
def __call__(self, inputs, chunksize=1):
'''Process the inputs through the map and reduce functions given.
inputs
An iterable containing the input data to be processed.
chunksize=1
The portion of the input data to hand to each worker. This
can be used to tune performance during the mapping phase.
'''
pass
| 4 | 3 | 15 | 2 | 5 | 8 | 1 | 1.6 | 1 | 2 | 0 | 0 | 3 | 3 | 3 | 3 | 49 | 10 | 15 | 12 | 11 | 24 | 15 | 12 | 11 | 2 | 1 | 1 | 4 |
143,345 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/utilities/frameworkutilities.py
|
rdfframework.utilities.frameworkutilities.RdfJsonEncoder
|
class RdfJsonEncoder(json.JSONEncoder):
# def __init__(self, *args, **kwargs):
# if kwargs.get("uri_format"):
# self.uri_format = kwargs.pop("uri_format")
# else:
# self.uri_format = 'sparql_uri'
# super(RdfJsonEncoder, self).__init__(*args, **kwargs)
uri_format = 'sparql_uri'
def default(self, obj):
if isinstance(obj, RdfBaseClass):
obj.uri_format = self.uri_format
temp = obj.conv_json(self.uri_format)
return temp
elif isinstance(obj, RdfDataset):
return obj._format(self.uri_format)
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
|
class RdfJsonEncoder(json.JSONEncoder):
def default(self, obj):
pass
| 2 | 0 | 9 | 0 | 8 | 1 | 3 | 0.7 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 5 | 18 | 1 | 10 | 4 | 8 | 7 | 9 | 4 | 7 | 3 | 2 | 1 | 3 |
143,346 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/utilities/colors.py
|
rdfframework.utilities.colors.__style__
|
class __style__(__ColorBase__):
""" returns a string formatted with the instance of the defined style """
def __getattr__(self, attr):
return None
def __setattr__(self, attr, value):
if attr == 'value':
self.__dict__[attr] = value
else:
setattr(self.__class__, attr, value)
|
class __style__(__ColorBase__):
''' returns a string formatted with the instance of the defined style '''
def __getattr__(self, attr):
pass
def __setattr__(self, attr, value):
pass
| 3 | 1 | 4 | 0 | 4 | 0 | 2 | 0.13 | 1 | 0 | 0 | 0 | 2 | 0 | 2 | 5 | 9 | 0 | 8 | 3 | 5 | 1 | 7 | 3 | 4 | 2 | 1 | 1 | 3 |
143,347 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/utilities/colors.py
|
rdfframework.utilities.colors.__styles__
|
class __styles__(metaclass=__StylesMeta__):
underline = '\033[4m'
bold = '\033[1m'
|
class __styles__(metaclass=__StylesMeta__):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 3 | 0 | 3 | 3 | 2 | 0 | 3 | 3 | 2 | 0 | 4 | 0 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.