id
int64 0
843k
| repository_name
stringlengths 7
55
| file_path
stringlengths 9
332
| class_name
stringlengths 3
290
| human_written_code
stringlengths 12
4.36M
| class_skeleton
stringlengths 19
2.2M
| total_program_units
int64 1
9.57k
| total_doc_str
int64 0
4.2k
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
300
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
176
| CountClassBase
float64 0
48
| CountClassCoupled
float64 0
589
| CountClassCoupledModified
float64 0
581
| CountClassDerived
float64 0
5.37k
| CountDeclInstanceMethod
float64 0
4.2k
| CountDeclInstanceVariable
float64 0
299
| CountDeclMethod
float64 0
4.2k
| CountDeclMethodAll
float64 0
4.2k
| CountLine
float64 1
115k
| CountLineBlank
float64 0
9.01k
| CountLineCode
float64 0
94.4k
| CountLineCodeDecl
float64 0
46.1k
| CountLineCodeExe
float64 0
91.3k
| CountLineComment
float64 0
27k
| CountStmt
float64 1
93.2k
| CountStmtDecl
float64 0
46.1k
| CountStmtExe
float64 0
90.2k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
6k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
143,148 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/api_definitions.py
|
knoema.api_definitions.PivotTimeItem
|
class PivotTimeItem(PivotItem):
"""The class contains pivot request item"""
def __init__(self, dimensionid=None, members=None, uimode=None):
super().__init__(dimensionid, members)
self.uimode = uimode
|
class PivotTimeItem(PivotItem):
'''The class contains pivot request item'''
def __init__(self, dimensionid=None, members=None, uimode=None):
pass
| 2 | 1 | 3 | 0 | 3 | 0 | 1 | 0.25 | 1 | 1 | 0 | 0 | 1 | 1 | 1 | 2 | 6 | 1 | 4 | 3 | 2 | 1 | 4 | 3 | 2 | 1 | 2 | 0 | 1 |
143,149 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/api_definitions.py
|
knoema.api_definitions.UploadVerifyResponse
|
class UploadVerifyResponse(object):
"""The class contains response from upload post request"""
def __init__(self, data):
self.successful = data['Successful'] if 'Successful' in data else False
self.upload_format_type = data['UploadFormatType'] if 'UploadFormatType' in data else None
self.errors = data['ErrorList'] if 'ErrorList' in data else None
self.columns = data['Columns'] if 'Columns' in data else None
self.flat_ds_update_options = data['FlatDSUpdateOptions'] if 'FlatDSUpdateOptions' in data else None
self.metadata_details = UploadDatasetDetails(data['MetadataDetails']) if 'MetadataDetails' in data and data['MetadataDetails'] is not None else None
|
class UploadVerifyResponse(object):
'''The class contains response from upload post request'''
def __init__(self, data):
pass
| 2 | 1 | 7 | 0 | 7 | 0 | 7 | 0.13 | 1 | 1 | 1 | 0 | 1 | 6 | 1 | 1 | 10 | 1 | 8 | 8 | 6 | 1 | 8 | 8 | 6 | 7 | 1 | 0 | 7 |
143,150 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/api_definitions.py
|
knoema.api_definitions.PivotRequest
|
class PivotRequest(object):
"""The class contains pivot request"""
def __init__(self, dataset):
self.dataset = dataset
self.header = []
self.stub = []
self.filter = []
self.frequencies = []
self.transform = None
self.columns = None
def _get_item_array(self, items):
arr = []
for item in items:
itemvalues = {
'DimensionId': item.dimensionid,
'Members': item.members
}
if hasattr(item, 'aggregation'):
itemvalues['Aggregation'] = item.aggregation
if isinstance(item, PivotTimeItem):
itemvalues['UiMode'] = item.uimode
arr.append(itemvalues)
return arr
def save_to_json(self):
"""The method saves data to json from object"""
requestvalues = {
'Dataset': self.dataset,
'Header' : self._get_item_array(self.header),
'Filter' : self._get_item_array(self.filter),
'Stub' : self._get_item_array(self.stub),
'Frequencies': self.frequencies
}
if self.transform is not None:
requestvalues['Transform'] = self.transform
if self.columns is not None:
requestvalues['DetailColumns'] = self.columns
return json.dumps(requestvalues)
|
class PivotRequest(object):
'''The class contains pivot request'''
def __init__(self, dataset):
pass
def _get_item_array(self, items):
pass
def save_to_json(self):
'''The method saves data to json from object'''
pass
| 4 | 2 | 12 | 0 | 11 | 0 | 3 | 0.06 | 1 | 1 | 1 | 0 | 3 | 7 | 3 | 3 | 41 | 4 | 35 | 15 | 31 | 2 | 26 | 15 | 22 | 4 | 1 | 2 | 8 |
143,151 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/api_client.py
|
knoema.api_client.ApiClient
|
class ApiClient:
"""This is client that wrap requests and response to Knoema API"""
def __init__(self, host, appid=None, appsecret=None):
splitted = urllib.parse.urlsplit(host)
self._host = splitted.netloc.strip()
if not self._host:
self._host = splitted.path.strip()
self._schema = splitted.scheme
if not self._schema:
self._schema = 'http'
self._appid = appid
self._appsecret = appsecret
self._opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor)
self._search_config = None
def _get_url(self, apipath):
return urllib.parse.urlunsplit((self._schema, self._host, apipath, '', ''))
def _get_request_headers(self):
if not self._appid or not self._appsecret:
return {
'Content-Type' : 'application/json',
'Accept': 'application/json'
}
key = datetime.datetime.utcnow().strftime('%d-%m-%y-%H').encode()
hashed = hmac.new(key, self._appsecret.encode(), hashlib.sha1)
secrethash = base64.b64encode(hashed.digest()).decode('utf-8')
auth = 'Knoema {}:{}:1.2'.format(self._appid, secrethash)
return {
'Content-Type' : 'application/json',
'Accept': 'application/json',
'Authorization' : auth
}
def _api_get(self, obj, apipath, query=None):
url = self._get_url(apipath)
if query:
url = '{}?{}'.format(url, query)
headers = self._get_request_headers()
req = urllib.request.Request(url, headers=headers)
resp = self._opener.open(req)
return obj(_response_to_json(resp))
def _api_post(self, responseobj, apipath, requestobj):
json_data = requestobj.save_to_json()
return self._api_post_json(responseobj, apipath, json_data)
def _api_post_json(self, responseobj, apipath, requestjson):
url = self._get_url(apipath)
binary_data = requestjson.encode()
headers = self._get_request_headers()
req = urllib.request.Request(url, binary_data, headers)
resp = self._opener.open(req)
return responseobj(_response_to_json(resp))
def check_correct_host(self):
pass
def get_dataset(self, datasetid):
"""The method is getting information about dataset byt it's id"""
path = '/api/1.0/meta/dataset/{}'
return self._api_get(definition.Dataset, path.format(datasetid))
def get_dataset_meta(self, datasetid):
path = '/api/1.0/meta/dataset/{}'
return self._api_get(definition.DatasetMetadata, path.format(datasetid))
def get_dimension(self, dataset, dimension):
"""The method is getting information about dimension with items"""
path = '/api/1.0/meta/dataset/{}/dimension/{}'
return self._api_get(definition.Dimension, path.format(dataset, dimension))
def get_daterange(self, dataset):
"""The method is getting information about date range of dataset"""
path = '/api/1.0/meta/dataset/{}/daterange'
return self._api_get(definition.DateRange, path.format(dataset))
def get_data(self, pivotrequest):
"""The method is getting data by pivot request"""
path = '/api/1.0/data/pivot/'
return self._api_post(definition.PivotResponse, path, pivotrequest)
def get_data_by_json(self, pivotrequest_json):
"""The method is getting data by pivot request (json)"""
path = '/api/1.0/data/pivot/'
return self._api_post_json(definition.PivotResponse, path, pivotrequest_json)
def get_dataset_data(self, dataset_id, filters):
"""The method is getting JSON by URL and parses it to specified object"""
try:
return self._api_post(definition.detect_data_response, '/api/2.0/data?datasetId={}'.format(dataset_id), filters)
except HTTPError as ex:
if ex.code == 400:
raise ValueError(ex.read().decode('utf-8'))
else:
raise
def get_data_raw(self, request, metadata_only = False):
"""The method is getting data by raw request"""
path = '/api/1.2/data/raw/' + ('?metadataOnly=true' if metadata_only else '')
res = self._api_post(definition.RawDataResponse, path, request)
token = res.continuation_token
while token is not None:
res2 = self.get_data_raw_with_token(token, metadata_only)
res.series += res2.series
token = res2.continuation_token
return res
def get_data_raw_with_token(self, token, metadata_only = False):
path = '/api/1.0/data/raw/?continuationToken={0}' + ('&metadataOnly=true' if metadata_only else '')
return self._api_get(definition.RawDataResponse, path.format(token))
def get_mnemonics(self, mnemonics, transform, frequency):
"""The method get series by mnemonics"""
path = '/api/1.0/data/mnemonics?mnemonics={0}'
if transform:
path += '&transform=' + transform
if frequency:
path += '&frequency=' + frequency
return self._api_get(definition.MnemonicsResponseList, path.format(mnemonics))
def get_details(self, request):
"""The method is getting data details by request"""
path = '/api/1.1/data/details/'
return self._api_post(definition.DetailsResponse, path, request)
def get_company_info(self, ticker):
"""The method get company data"""
path = 'api/1.0/sema/{0}'
return self._api_get(definition_sema.CompanyInt, path.format(ticker))
def get_indicator_info(self, path):
path = 'api/1.0/sema/{0}'.format(path)
url = self._get_url(path)
headers = self._get_request_headers()
req = urllib.request.Request(url, headers=headers)
resp = self._opener.open(req)
return _response_to_json(resp)
def search(self, query):
if self._search_config == None:
path = '/api/1.0/search/config'
self._search_config = self._api_get(definition_search.SearchConfig, path)
headers = self._get_request_headers()
url = self._search_config.build_search_url(query)
req = urllib.request.Request(url, headers=headers)
req = urllib.request.Request(url)
resp = self._opener.open(req)
return definition_search.SearchResultsInt(_response_to_json(resp))
def upload_file(self, file):
"""The method is posting file to the remote server"""
url = self._get_url('/api/1.0/upload/post')
fcontent = FileContent(file)
binary_data = fcontent.get_binary()
headers = self._get_request_headers()
req = urllib.request.Request(url, binary_data, headers)
req.add_header('Content-type', fcontent.get_content_type())
req.add_header('Content-length', len(binary_data))
resp = urllib.request.urlopen(req)
return definition.UploadPostResponse(_response_to_json(resp))
def upload_verify(self, file_location, dataset=None):
"""This method is verifiing posted file on server"""
path = '/api/1.0/upload/verify'
query = 'doNotGenerateAdvanceReport=true&filePath={}'.format(file_location)
if dataset:
query = 'doNotGenerateAdvanceReport=true&filePath={}&datasetId={}'.format(file_location, dataset)
return self._api_get(definition.UploadVerifyResponse, path, query)
def upload_submit(self, upload_request):
"""The method is submitting dataset upload"""
path = '/api/1.0/upload/save'
return self._api_post(definition.DatasetUploadResponse, path, upload_request)
def upload_status(self, upload_id):
"""The method is checking status of uploaded dataset"""
path = '/api/1.0/upload/status'
query = 'id={}'.format(upload_id)
return self._api_get(definition.DatasetUploadStatusResponse, path, query)
def upload(self, file_path, dataset=None, public=False, name = None):
"""Use this function to upload data to Knoema dataset."""
upload_status = self.upload_file(file_path)
err_msg = 'Dataset has not been uploaded to the remote host'
if not upload_status.successful:
msg = '{}, because of the following error: {}'.format(err_msg, upload_status.error)
raise ValueError(msg)
err_msg = 'File has not been verified'
upload_ver_status = self.upload_verify(upload_status.properties.location, dataset)
if not upload_ver_status.successful:
ver_err = '\r\n'.join(upload_ver_status.errors)
msg = '{}, because of the following error(s): {}'.format(err_msg, ver_err)
raise ValueError(msg)
ds_upload = definition.DatasetUpload(upload_ver_status, upload_status, dataset, public, name)
ds_upload_submit_result = self.upload_submit(ds_upload)
err_msg = 'Dataset has not been saved to the database'
if ds_upload_submit_result.status == 'failed':
ver_err = '\r\n'.join(ds_upload_submit_result.errors)
msg = '{}, because of the following error(s): {}'.format(err_msg, ver_err)
raise ValueError(msg)
ds_upload_result = None
while True:
ds_upload_result = self.upload_status(ds_upload_submit_result.submit_id)
if ds_upload_result.status == 'pending' or ds_upload_result.status == 'processing':
time.sleep(5)
else:
break
if ds_upload_result.status != 'successful':
ver_err = '\r\n'.join(ds_upload_result.errors)
msg = '{}, because of the following error(s): {}'.format(err_msg, ver_err)
raise ValueError(msg)
return ds_upload_result.dataset
def delete(self, dataset):
"""The method is deleting dataset by it's id"""
url = self._get_url('/api/1.0/meta/dataset/{}/delete'.format(dataset))
json_data = ''
binary_data = json_data.encode()
headers = self._get_request_headers()
req = urllib.request.Request(url, binary_data, headers)
resp = urllib.request.urlopen(req)
str_response = resp.read().decode('utf-8')
if str_response != '"successful"' or resp.status < 200 or resp.status >= 300:
msg = 'Dataset has not been deleted, because of the following error(s): {}'.format(str_response)
raise ValueError(msg)
def verify(self, dataset, publication_date, source, refernce_url):
"""The method is verifying dataset by it's id"""
path = '/api/1.0/meta/verifydataset'
req = definition.DatasetVerifyRequest(dataset, publication_date, source, refernce_url)
result = self._api_post(definition.DatasetVerifyResponse, path, req)
if result.status == 'failed':
ver_err = '\r\n'.join(result.errors)
msg = 'Dataset has not been verified, because of the following error(s): {}'.format(ver_err)
raise ValueError(msg)
|
class ApiClient:
'''This is client that wrap requests and response to Knoema API'''
def __init__(self, host, appid=None, appsecret=None):
pass
def _get_url(self, apipath):
pass
def _get_request_headers(self):
pass
def _api_get(self, obj, apipath, query=None):
pass
def _api_post(self, responseobj, apipath, requestobj):
pass
def _api_post_json(self, responseobj, apipath, requestjson):
pass
def check_correct_host(self):
pass
def get_dataset(self, datasetid):
'''The method is getting information about dataset byt it's id'''
pass
def get_dataset_meta(self, datasetid):
pass
def get_dimension(self, dataset, dimension):
'''The method is getting information about dimension with items'''
pass
def get_daterange(self, dataset):
'''The method is getting information about date range of dataset'''
pass
def get_dataset(self, datasetid):
'''The method is getting data by pivot request'''
pass
def get_data_by_json(self, pivotrequest_json):
'''The method is getting data by pivot request (json)'''
pass
def get_dataset_data(self, dataset_id, filters):
'''The method is getting JSON by URL and parses it to specified object'''
pass
def get_data_raw(self, request, metadata_only = False):
'''The method is getting data by raw request'''
pass
def get_data_raw_with_token(self, token, metadata_only = False):
pass
def get_mnemonics(self, mnemonics, transform, frequency):
'''The method get series by mnemonics'''
pass
def get_details(self, request):
'''The method is getting data details by request'''
pass
def get_company_info(self, ticker):
'''The method get company data'''
pass
def get_indicator_info(self, path):
pass
def search(self, query):
pass
def upload_file(self, file):
'''The method is posting file to the remote server'''
pass
def upload_verify(self, file_location, dataset=None):
'''This method is verifiing posted file on server'''
pass
def upload_submit(self, upload_request):
'''The method is submitting dataset upload'''
pass
def upload_status(self, upload_id):
'''The method is checking status of uploaded dataset'''
pass
def upload_file(self, file):
'''Use this function to upload data to Knoema dataset.'''
pass
def delete(self, dataset):
'''The method is deleting dataset by it's id'''
pass
def verify(self, dataset, publication_date, source, refernce_url):
'''The method is verifying dataset by it's id'''
pass
| 29 | 18 | 9 | 1 | 7 | 1 | 2 | 0.09 | 0 | 24 | 19 | 0 | 28 | 6 | 28 | 28 | 276 | 67 | 191 | 106 | 162 | 18 | 182 | 105 | 153 | 7 | 0 | 2 | 49 |
143,152 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/api_client.py
|
knoema.api_client.FileContent
|
class FileContent(object):
"""Accumulate the data to be used when posting a form."""
def __init__(self, file):
self.file_name = os.path.basename(file)
self.body = open(file, mode='rb').read()
self.boundary = _random_string(30)
def get_content_type(self):
"""Return a content type"""
return 'multipart/form-data; boundary="{}"'.format(self.boundary)
def get_binary(self):
"""Return a binary buffer containing the file content"""
content_disp = 'Content-Disposition: form-data; name="file"; filename="{}"'
stream = io.BytesIO()
stream.write(_string_to_binary('--{}'.format(self.boundary)))
stream.write(_crlf())
stream.write(_string_to_binary(content_disp.format(self.file_name)))
stream.write(_crlf())
stream.write(_crlf())
stream.write(self.body)
stream.write(_crlf())
stream.write(_string_to_binary('--{}--'.format(self.boundary)))
stream.write(_crlf())
return stream.getvalue()
|
class FileContent(object):
'''Accumulate the data to be used when posting a form.'''
def __init__(self, file):
pass
def get_content_type(self):
'''Return a content type'''
pass
def get_binary(self):
'''Return a binary buffer containing the file content'''
pass
| 4 | 3 | 8 | 1 | 6 | 1 | 1 | 0.15 | 1 | 0 | 0 | 0 | 3 | 3 | 3 | 3 | 29 | 6 | 20 | 9 | 16 | 3 | 20 | 9 | 16 | 1 | 1 | 0 | 3 |
143,153 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/api_config.py
|
knoema.api_config.ApiConfig
|
class ApiConfig(object):
"""
This class configures knoema api.
The class contains fields:
host -- the host where kneoma is going to connect
app_id -- application id that will have access to knoema.
Application should be created by knoema user or administrator
app_secret -- code that can be done after application will be created.
Should be set up together with app_id
"""
def __new__(cls):
if not hasattr(cls, 'instance'):
cls.instance = super(ApiConfig, cls).__new__(cls)
cls.instance.host = os.environ['KNOEMA_HOST'] if 'KNOEMA_HOST' in os.environ else 'knoema.com'
cls.instance.app_id = None
cls.instance.app_secret = None
return cls.instance
def __init__(self):
self.host = self.instance.host
self.app_id = self.instance.app_id
self.app_secret = self.instance.app_secret
|
class ApiConfig(object):
'''
This class configures knoema api.
The class contains fields:
host -- the host where kneoma is going to connect
app_id -- application id that will have access to knoema.
Application should be created by knoema user or administrator
app_secret -- code that can be done after application will be created.
Should be set up together with app_id
'''
def __new__(cls):
pass
def __init__(self):
pass
| 3 | 1 | 6 | 0 | 6 | 0 | 2 | 0.75 | 1 | 1 | 0 | 0 | 2 | 3 | 2 | 2 | 27 | 6 | 12 | 6 | 9 | 9 | 12 | 6 | 9 | 3 | 1 | 1 | 4 |
143,154 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/api_definitions.py
|
knoema.api_definitions.DataAPIRequest
|
class DataAPIRequest(object):
"""The class contains API Data 2.0 request"""
def __init__(self, filters):
self.filters = filters
def save_to_json(self):
"""The method saves data to json from object"""
return json.dumps(self.filters)
|
class DataAPIRequest(object):
'''The class contains API Data 2.0 request'''
def __init__(self, filters):
pass
def save_to_json(self):
'''The method saves data to json from object'''
pass
| 3 | 2 | 3 | 0 | 2 | 1 | 1 | 0.4 | 1 | 0 | 0 | 0 | 2 | 1 | 2 | 2 | 9 | 2 | 5 | 4 | 2 | 2 | 5 | 4 | 2 | 1 | 1 | 0 | 2 |
143,155 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/api_definitions.py
|
knoema.api_definitions.DataFrame
|
class DataFrame(object):
def __init__(self):
self.id = None
self.data = None
self.metadata = None
|
class DataFrame(object):
def __init__(self):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 3 | 1 | 1 | 6 | 1 | 5 | 5 | 3 | 0 | 5 | 5 | 3 | 1 | 1 | 0 | 1 |
143,156 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/api_definitions.py
|
knoema.api_definitions.Dataset
|
class Dataset(object):
"""The class contains dataset description"""
def __init__(self, data):
"""The method loading data from json to class fields"""
if 'id' not in data:
raise ValueError(data)
self.id = data['id']
self.type = data['type']
self.is_remote = data['isRemote'] if 'isRemote' in data else False
self.dimensions = [DimensionModel(dim) for dim in data['dimensions']]
self.timeseries_attributes = [TimeSeriesAttribute(attr) for attr in data['timeseriesAttributes']] if 'timeseriesAttributes' in data else []
self.has_time = self.type == 'Regular' or any(x for x in data['columns'] if x['type'] == 'Date')
def find_dimension_by_name(self, dim_name):
"""the method searching dimension with a given name"""
for dim in self.dimensions:
if is_equal_strings_ignore_case(dim.name, dim_name):
return dim
if dim.is_geo and is_equal_strings_ignore_case('region', dim_name):
return dim
return None
def find_dimension_by_id(self, dim_id):
"""the method searching dimension with a given id"""
for dim in self.dimensions:
if is_equal_strings_ignore_case(dim.id, dim_id):
return dim
if dim.is_geo and is_equal_strings_ignore_case('region', dim_id):
return dim
if is_equal_strings_ignore_case('ticker', dim_id):
return dim
return None
|
class Dataset(object):
'''The class contains dataset description'''
def __init__(self, data):
'''The method loading data from json to class fields'''
pass
def find_dimension_by_name(self, dim_name):
'''the method searching dimension with a given name'''
pass
def find_dimension_by_id(self, dim_id):
'''the method searching dimension with a given id'''
pass
| 4 | 4 | 12 | 3 | 8 | 1 | 4 | 0.15 | 1 | 3 | 2 | 0 | 3 | 6 | 3 | 3 | 41 | 11 | 26 | 12 | 22 | 4 | 26 | 12 | 22 | 5 | 1 | 2 | 13 |
143,157 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/api_definitions.py
|
knoema.api_definitions.DatasetMetadata
|
class DatasetMetadata:
def __init__(self, data):
self.data = data
settings = {}
if 'settings' in self.data:
try:
settings = json.loads(self.data['settings'])
except:
pass
if 'LastUpdate' in settings:
data['lastUpdate'] = settings['LastUpdate']['End']
if 'LastSuccessfulUpdate' in settings:
data['lastSuccessfulUpdate'] = settings['LastSuccessfulUpdate']['End']
if 'Health' in settings:
data['health'] = settings['Health']
if 'NextRun' in settings:
data['nextRun'] = settings['NextRun']
if 'AverageUpdateInterval' in settings:
data['averageUpdateInterval'] = settings['AverageUpdateInterval']
if 'ProviderUpdateLag' in settings:
data['providerUpdateLag'] = settings['ProviderUpdateLag']
fields_with_date = [
'publicationDate',
'nextReleaseDate',
'expectedUpdateDate',
'lastUpdatedOn',
'lastUpdate',
'lastSuccessfulUpdate',
'nextRun',
]
for field in fields_with_date:
if field in self.data:
self.data[field] = parse_date(self.data[field])
fields_to_delete = [
'updatePriority',
'hasShortCut',
'isShortcut',
'shareToCommunitiesAllowed',
'accessedOn',
'metadataAccess',
'settings',
]
for field in fields_to_delete:
if field in self.data:
del self.data[field]
|
class DatasetMetadata:
def __init__(self, data):
pass
| 2 | 0 | 53 | 9 | 44 | 0 | 13 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 55 | 10 | 45 | 7 | 43 | 0 | 29 | 7 | 27 | 13 | 0 | 2 | 13 |
143,158 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/api_definitions.py
|
knoema.api_definitions.DatasetUpload
|
class DatasetUpload(object):
"""The class contains request for UploadSubmit"""
def __init__(self, verify_result, upload_result, dataset = None, public = False, name = None):
self.dataset = dataset
self.upload_format_type = verify_result.upload_format_type
self.columns = verify_result.columns
self.file_property = upload_result.properties
self.flat_ds_update_options = verify_result.flat_ds_update_options
dataset_details = verify_result.metadata_details
self.name = dataset_details.dataset_name if dataset_details and dataset_details.dataset_name else None
if self.name is None and dataset is None:
self.name = name if name != None else 'New dataset'
self.description = dataset_details.description if dataset_details else None
self.source = dataset_details.source if dataset_details else None
self.publication_date = dataset_details.publication_date if dataset_details else None
self.accessed_on = dataset_details.accessed_on if dataset_details else None
self.dataset_ref = dataset_details.dataset_ref if dataset_details else None
self.public = public
def save_to_json(self):
"""The method saves DatasetUpload to json from object"""
requestvalues = {
'DatasetId': self.dataset,
'Name': self.name,
'Description': self.description,
'Source': self.source,
'PubDate': self.publication_date,
'AccessedOn': self.accessed_on,
'Url': self.dataset_ref,
'UploadFormatType': self.upload_format_type,
'Columns': self.columns,
'FileProperty': self.file_property.__dict__,
'FlatDSUpdateOptions': self.flat_ds_update_options,
'Public': self.public
}
return json.dumps(requestvalues)
|
class DatasetUpload(object):
'''The class contains request for UploadSubmit'''
def __init__(self, verify_result, upload_result, dataset = None, public = False, name = None):
pass
def save_to_json(self):
'''The method saves DatasetUpload to json from object'''
pass
| 3 | 2 | 18 | 1 | 16 | 1 | 5 | 0.06 | 1 | 0 | 0 | 0 | 2 | 12 | 2 | 2 | 39 | 4 | 33 | 17 | 30 | 2 | 20 | 17 | 17 | 9 | 1 | 1 | 10 |
143,159 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/api_definitions.py
|
knoema.api_definitions.DatasetUploadResponse
|
class DatasetUploadResponse(object):
"""The class contains response for UploadSubmit"""
def __init__(self, data):
self.submit_id = data['Id'] if 'Id' in data else None
self.dataset = data['DatasetId'] if 'DatasetId' in data else None
self.status = data['Status'] if 'Status' in data else 'failed'
self.errors = data['Errors'] if 'Errors' in data else None
|
class DatasetUploadResponse(object):
'''The class contains response for UploadSubmit'''
def __init__(self, data):
pass
| 2 | 1 | 5 | 0 | 5 | 0 | 5 | 0.17 | 1 | 0 | 0 | 0 | 1 | 4 | 1 | 1 | 8 | 1 | 6 | 6 | 4 | 1 | 6 | 6 | 4 | 5 | 1 | 0 | 5 |
143,160 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/api_definitions.py
|
knoema.api_definitions.DatasetUploadStatusResponse
|
class DatasetUploadStatusResponse(object):
"""The class contains response for UploadSubmit"""
def __init__(self, data):
self.submit_id = data['id'] if 'id' in data else None
self.dataset = data['datasetId'] if 'datasetId' in data else None
self.status = data['status'] if 'status' in data else 'failed'
self.errors = data['errors'] if 'errors' in data else None
|
class DatasetUploadStatusResponse(object):
'''The class contains response for UploadSubmit'''
def __init__(self, data):
pass
| 2 | 1 | 5 | 0 | 5 | 0 | 5 | 0.17 | 1 | 0 | 0 | 0 | 1 | 4 | 1 | 1 | 8 | 1 | 6 | 6 | 4 | 1 | 6 | 6 | 4 | 5 | 1 | 0 | 5 |
143,161 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/api_definitions.py
|
knoema.api_definitions.DatasetVerifyRequest
|
class DatasetVerifyRequest(object):
"""The class contains dataset verification request"""
def __init__(self, dataset, publication_date, source, refernce_url):
self.dataset = dataset
self.publication_date = publication_date
self.source = source
self.refernce_url = refernce_url
def save_to_json(self):
"""The method saves data to json from object"""
requestvalues = {
'id': self.dataset,
'publicationDate': self.publication_date.strftime('%Y-%m-%d'),
'source': self.source,
'refUrl': self.refernce_url,
}
return json.dumps(requestvalues)
|
class DatasetVerifyRequest(object):
'''The class contains dataset verification request'''
def __init__(self, dataset, publication_date, source, refernce_url):
pass
def save_to_json(self):
'''The method saves data to json from object'''
pass
| 3 | 2 | 8 | 1 | 7 | 1 | 1 | 0.14 | 1 | 0 | 0 | 0 | 2 | 4 | 2 | 2 | 19 | 3 | 14 | 8 | 11 | 2 | 9 | 8 | 6 | 1 | 1 | 0 | 2 |
143,162 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/api_definitions.py
|
knoema.api_definitions.DatasetVerifyResponse
|
class DatasetVerifyResponse(object):
"""The class contains response from dataset verification request"""
def __init__(self, data):
self.status = data['status']
self.errors = data['errors'] if 'errors' in data else None
|
class DatasetVerifyResponse(object):
'''The class contains response from dataset verification request'''
def __init__(self, data):
pass
| 2 | 1 | 3 | 0 | 3 | 0 | 2 | 0.25 | 1 | 0 | 0 | 0 | 1 | 2 | 1 | 1 | 6 | 1 | 4 | 4 | 2 | 1 | 4 | 4 | 2 | 2 | 1 | 0 | 2 |
143,163 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/api_definitions.py
|
knoema.api_definitions.DateRange
|
class DateRange:
"""The class contains information about dataset's data range"""
def __init__(self, data):
self.start_date = parse_date(data['startDate'])
self.end_date = parse_date(data['endDate'])
self.frequencies = data['frequencies']
|
class DateRange:
'''The class contains information about dataset's data range'''
def __init__(self, data):
pass
| 2 | 1 | 4 | 0 | 4 | 0 | 1 | 0.2 | 0 | 0 | 0 | 0 | 1 | 3 | 1 | 1 | 7 | 1 | 5 | 5 | 3 | 1 | 5 | 5 | 3 | 1 | 0 | 0 | 1 |
143,164 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/data_reader.py
|
knoema.data_reader.MnemonicsDataReader
|
class MnemonicsDataReader(DataReader):
def __init__(self, client, mnemonics, transform, frequency):
super().__init__(client)
self.mnemonics = mnemonics
self.transform = transform
self.frequency = frequency
def _get_metadata_series(self, resp, names_of_attributes):
series = {}
for series_point in resp.tuples:
val = series_point['Value']
if val is None:
continue
serie_name = series_point['Mnemonics']
if serie_name not in series:
serie_attrs = self._get_series_with_metadata(series_point)
series[serie_name] = KnoemaSeries(serie_name, serie_attrs, names_of_attributes, None)
return series
def _get_detail_columns(self, resp):
return None
def _get_data_series(self, resp, detail_columns):
series = {}
frequency_list = []
for series_point in resp.tuples:
val = series_point['Value']
if val is None:
continue
series_name = series_point['Mnemonics']
if series_name not in series:
series[series_name] = KnoemaSeries(series_name, [], [], detail_columns)
curr_date_val = series_point['Time']
try:
curr_date_val = datetime.strptime(series_point['Time'], '%Y-%m-%dT%H:%M:%SZ')
except ValueError:
pass
freq = series_point['Frequency']
if freq not in frequency_list:
frequency_list.append(freq)
if (freq == "W"):
curr_date_val = curr_date_val - timedelta(days = curr_date_val.weekday())
if (freq == 'FQ'):
curr_date_val = TimeFormat.format_statistical(curr_date_val, 'FQ')
series[series_name].add_value(series_point['Value'], curr_date_val, None)
if 'FQ' in frequency_list and len(frequency_list) > 1:
raise ValueError('Please provide a valid frequency list. You can request FQ or others frequencies not together.')
return series
def _get_pandasframe_one_dataset(self):
pandas_series = {}
if self.include_metadata:
pandas_series_with_attr = {}
names_of_attributes = self._get_attribute_names()
mnemonics = self.mnemonics
mnemonics_string = self.separator.join(mnemonics) if isinstance(mnemonics, list) else mnemonics
mnemonics_resp = self.client.get_mnemonics(mnemonics_string, self.transform, self.frequency)
detail_columns = None
for item in mnemonics_resp.items:
pivot_resp = item.pivot
if not definition.is_equal_strings_ignore_case(self.dataset.id, pivot_resp.dataset):
continue
# create dataframe with data for mnemonics
series = self._get_data_series(pivot_resp, detail_columns)
pandas_series = PandasHelper.creates_pandas_series(series, pandas_series, detail_columns)
if self.include_metadata:
# create dataframe with metadata for mnemonics
series_with_attr = self._get_metadata_series(pivot_resp, names_of_attributes)
pandas_series_with_attr = PandasHelper.creates_pandas_series(series_with_attr, pandas_series_with_attr, None)
pandas_data_frame = PandasHelper.create_pandas_dataframe(pandas_series, [], detail_columns)
if not self.include_metadata:
return pandas_data_frame
pandas_data_frame_with_attr = PandasHelper.create_pandas_dataframe(pandas_series_with_attr, [], None)
return pandas_data_frame, pandas_data_frame_with_attr
def _get_pandasframe_across_datasets(self):
mnemonics_string = self.separator.join(self.mnemonics) if isinstance(self.mnemonics, list) else self.mnemonics
mnemonics_resp = self.client.get_mnemonics(mnemonics_string, self.transform, self.frequency)
dict_datasets = {}
pandas_series = {}
if self.include_metadata:
pandas_series_with_attr = {}
detail_columns = None
for item in mnemonics_resp.items:
pivot_resp = item.pivot
if pivot_resp is None:
continue
dataset_id = pivot_resp.dataset
if dataset_id not in dict_datasets:
dataset = self.client.get_dataset(dataset_id)
self.dataset = dataset
dimensions = []
for dim in dataset.dimensions:
dimensions.append(self.client.get_dimension(dataset_id, dim.id))
self.dimensions = dimensions
names_of_attributes = self._get_attribute_names() if self.include_metadata else None
dict_datasets[dataset_id] = (dataset, dimensions, names_of_attributes)
else:
self.dataset, self.dimensions, names_of_attributes = dict_datasets[dataset_id]
# create dataframe with data for mnemonics
series = self._get_data_series(pivot_resp, detail_columns)
pandas_series = PandasHelper.creates_pandas_series(series, pandas_series, detail_columns)
if self.include_metadata:
# create dataframe with metadata for mnemonics
series_with_attr = self._get_metadata_series(pivot_resp, names_of_attributes)
pandas_series_with_attr = PandasHelper.creates_pandas_series(series_with_attr, pandas_series_with_attr, None)
pandas_data_frame = PandasHelper.create_pandas_dataframe(pandas_series, [], detail_columns)
if not self.include_metadata:
return pandas_data_frame
pandas_data_frame_with_attr = PandasHelper.create_pandas_dataframe(pandas_series_with_attr, [], None)
return pandas_data_frame, pandas_data_frame_with_attr
def get_pandasframe(self):
"""The method loads data from dataset"""
if self.dataset:
self._load_dimensions()
return self._get_pandasframe_one_dataset()
return self._get_pandasframe_across_datasets()
|
class MnemonicsDataReader(DataReader):
def __init__(self, client, mnemonics, transform, frequency):
pass
def _get_metadata_series(self, resp, names_of_attributes):
pass
def _get_detail_columns(self, resp):
pass
def _get_data_series(self, resp, detail_columns):
pass
def _get_pandasframe_one_dataset(self):
pass
def _get_pandasframe_across_datasets(self):
pass
def get_pandasframe(self):
'''The method loads data from dataset'''
pass
| 8 | 1 | 18 | 2 | 15 | 1 | 5 | 0.05 | 1 | 8 | 3 | 0 | 7 | 5 | 7 | 14 | 131 | 19 | 107 | 55 | 99 | 5 | 106 | 55 | 98 | 10 | 2 | 3 | 34 |
143,165 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/api_definitions.py
|
knoema.api_definitions.DetailsResponse
|
class DetailsResponse(object):
def __init__(self, data):
self.columns = data['columns']
self.tuples = data['data']
|
class DetailsResponse(object):
def __init__(self, data):
pass
| 2 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 2 | 1 | 1 | 4 | 0 | 4 | 4 | 2 | 0 | 4 | 4 | 2 | 1 | 1 | 0 | 1 |
143,166 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/api_definitions.py
|
knoema.api_definitions.Dimension
|
class Dimension(DimensionModel):
"""The class contains dimension description and dimension items"""
def __init__(self, data):
super().__init__(data)
self.fields = data['fields']
self.items = [DimensionMember(item) for item in data['items']]
# fill maps
self.key_map = {}
self.id_map = {}
self.name_map = {}
self.region_map = {}
self.ticker_map = {}
for item in self.items:
self.key_map[item.key] = item
self.name_map[item.name.upper()] = item
if 'id' in item.fields:
self.id_map[item.fields['id'].upper()] = item
if 'regionid' in item.fields and item.fields['regionid'] is not None:
self.region_map[item.fields['regionid'].upper()] = item
if 'ticker' in item.fields and item.fields['ticker'] is not None:
self.ticker_map[item.fields['ticker'].upper()] = item
def find_member_by_key(self, member_key):
"""The method searches member of dimension by given member key"""
return self.key_map.get(member_key)
def find_member_by_id(self, member_id):
"""The method searches member of dimension by given member id"""
return self.id_map.get(member_id.upper())
def find_member_by_name(self, member_name):
"""The method searches member of dimension by given member name"""
return self.name_map.get(member_name.upper())
def find_member_by_regionid(self, member_name):
"""The method searches member of dimension by given region id"""
return self.region_map.get(member_name.upper())
def find_member_by_ticker(self, member_name):
return self.ticker_map.get(member_name.upper())
|
class Dimension(DimensionModel):
'''The class contains dimension description and dimension items'''
def __init__(self, data):
pass
def find_member_by_key(self, member_key):
'''The method searches member of dimension by given member key'''
pass
def find_member_by_id(self, member_id):
'''The method searches member of dimension by given member id'''
pass
def find_member_by_name(self, member_name):
'''The method searches member of dimension by given member name'''
pass
def find_member_by_regionid(self, member_name):
'''The method searches member of dimension by given region id'''
pass
def find_member_by_ticker(self, member_name):
pass
| 7 | 5 | 6 | 0 | 5 | 1 | 2 | 0.21 | 1 | 2 | 1 | 0 | 6 | 7 | 6 | 7 | 43 | 8 | 29 | 14 | 22 | 6 | 29 | 14 | 22 | 5 | 2 | 2 | 10 |
143,167 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/api_definitions.py
|
knoema.api_definitions.DimensionMember
|
class DimensionMember(object):
"""The class contains dimension member information"""
def __init__(self, data):
self.key = data['key']
self.name = data['name']
self.level = data['level']
self.hasdata = data['hasData']
self.fields = data['fields']
|
class DimensionMember(object):
'''The class contains dimension member information'''
def __init__(self, data):
pass
| 2 | 1 | 6 | 0 | 6 | 0 | 1 | 0.14 | 1 | 0 | 0 | 0 | 1 | 5 | 1 | 1 | 9 | 1 | 7 | 7 | 5 | 1 | 7 | 7 | 5 | 1 | 1 | 0 | 1 |
143,168 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/api_definitions.py
|
knoema.api_definitions.DimensionModel
|
class DimensionModel(object):
"""The class contains dimension description"""
def __init__(self, data):
self.key = data['key']
self.id = data['id']
self.name = data['name']
self.is_geo = data['isGeo'] if 'isGeo' in data else False
|
class DimensionModel(object):
'''The class contains dimension description'''
def __init__(self, data):
pass
| 2 | 1 | 5 | 0 | 5 | 0 | 2 | 0.17 | 1 | 0 | 0 | 1 | 1 | 4 | 1 | 1 | 8 | 1 | 6 | 6 | 4 | 1 | 6 | 6 | 4 | 2 | 1 | 0 | 2 |
143,169 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/api_definitions.py
|
knoema.api_definitions.FileProperties
|
class FileProperties(object):
"""The class contains response from upload post request"""
def __init__(self, data):
self.size = data['Size'] if 'Size' in data else None
self.name = data['Name'] if 'Name' in data else None
self.location = data['Location'] if 'Location' in data else None
self.type = data['Type'] if 'Type' in data else None
|
class FileProperties(object):
'''The class contains response from upload post request'''
def __init__(self, data):
pass
| 2 | 1 | 5 | 0 | 5 | 0 | 5 | 0.17 | 1 | 0 | 0 | 0 | 1 | 4 | 1 | 1 | 8 | 1 | 6 | 6 | 4 | 1 | 6 | 6 | 4 | 5 | 1 | 0 | 5 |
143,170 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/api_definitions.py
|
knoema.api_definitions.MnemonicsResponse
|
class MnemonicsResponse(object):
def __init__(self,data):
self.mnemonics = data['mnemonics']
self.pivot = PivotResponse(data['pivot']) if 'pivot' in data else None
|
class MnemonicsResponse(object):
def __init__(self,data):
pass
| 2 | 0 | 3 | 0 | 3 | 0 | 2 | 0 | 1 | 1 | 1 | 0 | 1 | 2 | 1 | 1 | 5 | 1 | 4 | 4 | 2 | 0 | 4 | 4 | 2 | 2 | 1 | 0 | 2 |
143,171 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/api_definitions.py
|
knoema.api_definitions.MnemonicsResponseList
|
class MnemonicsResponseList(object):
def __init__(self,data):
self.items = []
for item in data:
self.items.append(MnemonicsResponse(item))
|
class MnemonicsResponseList(object):
def __init__(self,data):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 2 | 0 | 1 | 1 | 1 | 0 | 1 | 1 | 1 | 1 | 6 | 1 | 5 | 4 | 3 | 0 | 5 | 4 | 3 | 2 | 1 | 1 | 2 |
143,172 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/api_definitions.py
|
knoema.api_definitions.PivotItem
|
class PivotItem(object):
"""The class contains pivot request item"""
def __init__(self, dimensionid=None, members=None, metadataFields=None, dimensionFields=None, aggregations=None):
self.dimensionid = dimensionid
self.members = members
if aggregations != None:
self.aggregation = aggregations
if metadataFields:
self.metadataFields = [PivotItemMetadata(metadata['key'], metadata['name'],
metadata['parent'], metadata['fields']) for metadata in metadataFields]
else:
self.metadataFields = None
self.fields = dimensionFields
|
class PivotItem(object):
'''The class contains pivot request item'''
def __init__(self, dimensionid=None, members=None, metadataFields=None, dimensionFields=None, aggregations=None):
pass
| 2 | 1 | 11 | 0 | 11 | 0 | 3 | 0.08 | 1 | 1 | 1 | 1 | 1 | 5 | 1 | 1 | 14 | 1 | 12 | 7 | 10 | 1 | 10 | 7 | 8 | 3 | 1 | 1 | 3 |
143,173 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/api_definitions.py
|
knoema.api_definitions.PivotItemMetadata
|
class PivotItemMetadata(object):
#SimpleDimensionMember
"""The class contains metadata fields for pivot request item"""
def __init__(self, key, name, parent=None, fields=None):
self.key = key
self.name = name
self.parent = parent
self.fields = fields
|
class PivotItemMetadata(object):
'''The class contains metadata fields for pivot request item'''
def __init__(self, key, name, parent=None, fields=None):
pass
| 2 | 1 | 5 | 0 | 5 | 0 | 1 | 0.33 | 1 | 0 | 0 | 0 | 1 | 4 | 1 | 1 | 8 | 0 | 6 | 6 | 4 | 2 | 6 | 6 | 4 | 1 | 1 | 0 | 1 |
143,174 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/api_definitions.py
|
knoema.api_definitions.PivotResponse
|
class PivotResponse(object):
"""The class contains pivot response"""
def __init__(self, data):
self.dataset = data['dataset']
self.header = []
for item in data['header']:
self.header.append(self._construct_dimension(item))
self.stub = []
for item in data['stub']:
self.stub.append(self._construct_dimension(item))
self.filter = []
for item in data['filter']:
self.filter.append(self._construct_dimension(item))
self.tuples = data['data']
self.descriptor = data['descriptor'] if 'descriptor' in data else None
def _construct_dimension(self, item):
return PivotItem(item['dimensionId'], item['members'], item['metadataFields'], item['dimensionFields'] if 'dimensionFields' in item else None)
|
class PivotResponse(object):
'''The class contains pivot response'''
def __init__(self, data):
pass
def _construct_dimension(self, item):
pass
| 3 | 1 | 10 | 3 | 8 | 0 | 4 | 0.06 | 1 | 1 | 1 | 0 | 2 | 6 | 2 | 2 | 24 | 7 | 16 | 10 | 13 | 1 | 16 | 10 | 13 | 5 | 1 | 1 | 7 |
143,175 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/data_reader.py
|
knoema.data_reader.PandasHelper
|
class PandasHelper(object):
@staticmethod
def creates_pandas_series(series, pandas_series, detail_columns):
for _, series_content in series.items():
series_content.creates_pandas_series(pandas_series, detail_columns)
return pandas_series
@staticmethod
def create_pandas_dataframe(pandas_series, names_of_dimensions, detail_columns):
pandas_data_frame = pandas.DataFrame(pandas_series)
pandas_data_frame.sort_index()
if isinstance(pandas_data_frame.columns, pandas.MultiIndex):
column_names = names_of_dimensions
if detail_columns is not None:
column_names = list(column_names)
column_names.append('Attribute')
pandas_data_frame.columns.names = column_names
return pandas_data_frame
|
class PandasHelper(object):
@staticmethod
def creates_pandas_series(series, pandas_series, detail_columns):
pass
@staticmethod
def create_pandas_dataframe(pandas_series, names_of_dimensions, detail_columns):
pass
| 5 | 0 | 8 | 1 | 7 | 0 | 3 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 2 | 2 | 19 | 2 | 17 | 8 | 12 | 0 | 15 | 6 | 12 | 3 | 1 | 2 | 5 |
143,176 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/view_definitions.py
|
knoema.view_definitions.Dimension
|
class Dimension:
def __init__(self):
self.key = None
self.id = None
self.name = None
self.isGeo = None
self.datasetId = None
self.fields = []
self.members = None
|
class Dimension:
def __init__(self):
pass
| 2 | 0 | 8 | 0 | 8 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 7 | 1 | 1 | 10 | 1 | 9 | 9 | 7 | 0 | 9 | 9 | 7 | 1 | 0 | 0 | 1 |
143,177 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/data_reader.py
|
knoema.data_reader.ResponseReader
|
class ResponseReader(object):
def __init__(self, reader):
self.include_metadata = reader.include_metadata
self.dataset = reader.dataset
self.reader = reader
super().__init__()
def _get_detail_columns(self, resp):
detail_columns = []
if resp.descriptor is not None and 'detailColumns' in resp.descriptor and resp.descriptor['detailColumns'] is not None:
for column in resp.descriptor['detailColumns']:
detail_columns.append(column['name'])
return detail_columns if len(detail_columns) > 0 else None
|
class ResponseReader(object):
def __init__(self, reader):
pass
def _get_detail_columns(self, resp):
pass
| 3 | 0 | 6 | 1 | 6 | 0 | 3 | 0 | 1 | 1 | 0 | 3 | 2 | 3 | 2 | 2 | 14 | 2 | 12 | 8 | 9 | 0 | 12 | 8 | 9 | 4 | 1 | 2 | 5 |
143,178 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/data_reader.py
|
knoema.data_reader.PivotResponseReader
|
class PivotResponseReader(ResponseReader):
def __init__(self, reader, pivot_resp):
self.pivot_resp = pivot_resp
super().__init__(reader)
def get_pandasframe(self):
names_of_dimensions = self.reader._get_dimension_names()
# create dataframe with data
detail_columns = self._get_detail_columns(self.pivot_resp)
series = self._get_data_series(self.pivot_resp, detail_columns)
pandas_series = PandasHelper.creates_pandas_series(series, {}, detail_columns)
pandas_data_frame = PandasHelper.create_pandas_dataframe(pandas_series, names_of_dimensions, detail_columns)
if not self.include_metadata:
return pandas_data_frame
# create dataframe with metadata
series_with_attr = self._get_metadata_series(self.pivot_resp)
pandas_series_with_attr = PandasHelper.creates_pandas_series(series_with_attr, {}, None)
pandas_data_frame_with_attr = PandasHelper.create_pandas_dataframe(pandas_series_with_attr, names_of_dimensions, None)
return pandas_data_frame, pandas_data_frame_with_attr
def _get_data_series(self, resp, detail_columns):
series_map = {}
columns = None
frequency_list = []
for series_point in resp.tuples:
val = series_point['Value']
if val is None:
continue
series_name = self._get_series_name(series_point)
if series_name in series_map:
series = series_map[series_name]
else:
series = KnoemaSeries(series_name, [], [], detail_columns)
series_map[series_name] = series
freq = series_point['Frequency']
if 'Time' in series_point:
curr_date_val = series_point['Time']
try:
curr_date_val = datetime.strptime(series_point['Time'], '%Y-%m-%dT%H:%M:%SZ')
if (freq == "W"):
curr_date_val = curr_date_val - timedelta(days = curr_date_val.weekday())
except ValueError:
pass
else:
curr_date_val = 'All time'
if freq not in frequency_list:
frequency_list.append(freq)
if freq == "W":
curr_date_val = curr_date_val - timedelta(days = curr_date_val.weekday())
if freq == "FQ":
curr_date_val = TimeFormat.format_statistical(curr_date_val, 'FQ')
if detail_columns is not None:
columns = []
for column_name in detail_columns:
columns.append(series_point[column_name])
series.add_value(series_point['Value'], curr_date_val, columns)
if 'FQ' in frequency_list and len(frequency_list) > 1:
raise ValueError('Please provide a valid frequency list. You can request FQ or others frequencies not together.')
return series_map
def _get_series_name(self, series_point):
names = []
for dim in self.dataset.dimensions:
if dim.id in series_point:
names.append(series_point[dim.id])
if self.dataset.has_time:
names.append(series_point['Frequency'])
return tuple(names)
def _get_metadata_series(self, resp):
series = {}
names_of_attributes = self._get_attribute_names(resp)
for series_point in resp.tuples:
serie_name = self._get_series_name(series_point)
if serie_name not in series:
serie_attrs = self._get_series_with_metadata(series_point, resp)
series[serie_name] = KnoemaSeries(serie_name, serie_attrs, names_of_attributes, None)
return series
def _get_series_with_metadata(self, series_point, resp):
names = []
resp_dims = [dim for dim in (resp.header + resp.stub + resp.filter) if dim.fields and any(dim.fields)]
for dim in resp_dims:
if dim.dimensionid == 'Time' or not dim.metadataFields:
continue
for item in dim.metadataFields:
if item.name == series_point[dim.dimensionid]:
dim_attrs = item.fields
break
for attr in dim.fields:
find_elem = False
if attr['isSystemField']:
continue
for key, value in dim_attrs.items():
if definition.is_equal_strings_ignore_case(key, attr['name']):
find_elem = True
names.append(value)
break
if not find_elem:
names.append(None)
names.append(series_point.get('Unit'))
names.append(series_point.get('Scale'))
names.append(series_point.get('Mnemonics'))
for attr in self.dataset.timeseries_attributes:
names.append(series_point.get(attr.name))
return tuple(names)
def _get_attribute_names(self, resp):
resp_dims = [dim for dim in (resp.header + resp.stub + resp.filter) if dim.fields and any(dim.fields)]
names = []
for dim in resp_dims:
for d in self.dataset.dimensions:
if dim.dimensionid == d.id:
for attr in dim.fields:
if not attr['isSystemField']:
names.append(d.name + ' ' + attr["displayName"])
break
names.append('Unit')
names.append('Scale')
names.append('Mnemonics')
for attr in self.dataset.timeseries_attributes:
names.append(attr.name)
return names
|
class PivotResponseReader(ResponseReader):
def __init__(self, reader, pivot_resp):
pass
def get_pandasframe(self):
pass
def _get_data_series(self, resp, detail_columns):
pass
def _get_series_name(self, series_point):
pass
def _get_metadata_series(self, resp):
pass
def _get_series_with_metadata(self, series_point, resp):
pass
def _get_attribute_names(self, resp):
pass
| 8 | 0 | 18 | 2 | 16 | 0 | 6 | 0.02 | 1 | 8 | 3 | 0 | 7 | 1 | 7 | 9 | 135 | 18 | 115 | 45 | 107 | 2 | 113 | 45 | 105 | 13 | 2 | 5 | 41 |
143,179 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/tests/test_knoema_private_upload.py
|
tests.test_knoema_private_upload.TestKnoemaClient
|
class TestKnoemaClient(unittest.TestCase):
"""This is class with knoema client unit tests (Upload)"""
base_host = 'knoema.com'
def setUp(self):
apicfg = knoema.ApiConfig()
apicfg.host = os.environ['BASE_HOST'] if 'BASE_HOST' in os.environ else self.base_host
apicfg.app_id = os.environ['KNOEMA_APP_ID'] if 'KNOEMA_APP_ID' in os.environ else ''
apicfg.app_secret = os.environ['KNOEMA_APP_SECRET'] if 'KNOEMA_APP_SECRET' in os.environ else ''
def test_delete_dataset_negative(self):
"""The method is negative test on dataset deletion"""
with self.assertRaises(urllib.error.HTTPError) as context:
knoema.delete('non_existing_id')
self.assertTrue('HTTP Error 400: Bad Request' in str(context.exception))
def test_verify_dataset_negative(self):
"""The method is negative test on dataset verification"""
with self.assertRaises(ValueError) as context:
knoema.verify('non_existing_id', datetime.date.today(), 'IMF', 'http://knoema.com/')
self.assertTrue("Dataset has not been verified, because of the following error(s): Requested dataset doesn't exist or you don't have access to it." in str(context.exception))
def test_incorrect_host_delete_dataset(self):
"""The method is negative test on delete dataset with incorrect host"""
with self.assertRaises(ValueError) as context:
apicfg = knoema.ApiConfig()
apicfg.host = 'knoema_incorect.com'
knoema.delete('dataset')
self.assertTrue("The specified host knoema_incorect.com does not exist" in str(context.exception))
def test_incorrect_host_verify_dataset(self):
"""The method is negative test on verify dataset with incorrect host"""
with self.assertRaises(ValueError) as context:
apicfg = knoema.ApiConfig()
apicfg.host = 'knoema_incorect.com'
knoema.verify('non_existing_id', datetime.date.today(), 'IMF', 'http://knoema.com')
self.assertTrue("The specified host knoema_incorect.com does not exist" in str(context.exception))
def test_upload_generated_frames(self):
tuples = list(zip(*[['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'], ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]))
index = pandas.MultiIndex.from_tuples(tuples, names=['first', 'second'])
dates = pandas.date_range('1/1/2000', periods=8)
frame = pandas.DataFrame(numpy.random.randn(8, 8), index=dates, columns=index)
res = knoema.upload(frame, name = 'Test dataset')
self.assertIs(type(res), str)
self.assertEqual(len(res), 7)
frame = pandas.DataFrame(numpy.random.randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
res = knoema.upload(frame, name = 'Test dataset')
self.assertIs(type(res), str)
self.assertEqual(len(res), 7)
def test_upload_frames_from_existing_datasets(self):
frame = knoema.get('xmhdwqf', company='UBER', indicator='Annual', frequency='A', timerange='2018-2020')
res = knoema.upload(frame, name = 'Test dataset')
self.assertIs(type(res), str)
self.assertEqual(len(res), 7)
|
class TestKnoemaClient(unittest.TestCase):
'''This is class with knoema client unit tests (Upload)'''
def setUp(self):
pass
def test_delete_dataset_negative(self):
'''The method is negative test on dataset deletion'''
pass
def test_verify_dataset_negative(self):
'''The method is negative test on dataset verification'''
pass
def test_incorrect_host_delete_dataset(self):
'''The method is negative test on delete dataset with incorrect host'''
pass
def test_incorrect_host_verify_dataset(self):
'''The method is negative test on verify dataset with incorrect host'''
pass
def test_upload_generated_frames(self):
pass
def test_upload_frames_from_existing_datasets(self):
pass
| 8 | 5 | 7 | 1 | 6 | 1 | 1 | 0.11 | 1 | 7 | 1 | 0 | 7 | 0 | 7 | 79 | 63 | 14 | 44 | 23 | 36 | 5 | 44 | 19 | 36 | 4 | 2 | 1 | 10 |
143,180 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/tests/test_knoema_private.py
|
tests.test_knoema_private.TestKnoemaClient
|
class TestKnoemaClient(unittest.TestCase):
"""This is class with knoema client unit tests"""
base_host = 'knoema.com'
def setUp(self):
apicfg = knoema.ApiConfig()
apicfg.host = os.environ['BASE_HOST'] if 'BASE_HOST' in os.environ else self.base_host
apicfg.app_id = os.environ['KNOEMA_APP_ID'] if 'KNOEMA_APP_ID' in os.environ else ''
apicfg.app_secret = os.environ['KNOEMA_APP_SECRET'] if 'KNOEMA_APP_SECRET' in os.environ else ''
def test_too_long_get_query_string(self):
"""The method is testing issue with loo long get query string"""
for frame in knoema.get('US500COMPFINST2017Oct', include_metadata=True, indicator='Total Assets',
frequency='A', group_by='company'):
company = frame.id
data_frame = frame.data
metadata_frame = frame.metadata
self.assertIsNotNone(company, None)
self.assertIs(type(data_frame), pandas.core.frame.DataFrame)
self.assertIs(type(metadata_frame), pandas.core.frame.DataFrame)
break
def test_too_long_data_url(self):
"""The method is testing issue with loo long get query string"""
subject = ['Gross domestic product, constant prices (Percent change)',
'Gross domestic product, constant prices (Percent change (market exchange rates))',
'Gross domestic product, current prices (U.S. dollars)',
'Rubber, No.1 Rubber Smoked Sheet, FOB Maylaysian/Singapore, US cents per pound (U.S. cents)',
'Gross domestic product, current prices (Purchasing power parity; international dollars)',
'Gross national savings (Percent of GDP)',
'Total investment (Percent of GDP)',
'Inflation, average consumer prices (Percent change)',
'Inflation, end of period consumer prices (Percent change)',
'Trade volume of goods and services (Percent change)',
'Volume of imports of goods and services (Percent change)',
'Volume of Imports of goods (Percent change)',
'Volume of exports of goods and services (Percent change)',
'Volume of exports of goods (Percent change)',
'Current account balance (U.S. dollars)',
'Commodity Price Index includes both Fuel and Non-Fuel Price Indices (Index, 2005=100)',
'Commodity Non-Fuel Price Index includes Food and Beverages and Industrial Inputs Price Indices (Index, 2005=100)',
'Commodity Industrial Inputs Price Index includes Agricultural Raw Materials and Metals Price Indices (Index, 2005=100)',
'Coal, Australian thermal coal, 1200- btu/pound, less than 1% sulfur, 14% ash, FOB Newcastle/Port Kembla, US$ per metric tonne (U.S. dollars)',
'Coal, South African export price, US$ per metric tonne (U.S. dollars)',
'Commodity Coal Price Index includes Australian and South African Coal (Index, 2005=100)',
'Commodity Fuel (energy) Index includes Crude oil (petroleum), Natural Gas, and Coal Price Indices (Index, 2005=100)',
'Commodity Natural Gas Price Index includes European, Japanese, and American Natural Gas Price Indices (Index, 2005=100)',
'Crude Oil (petroleum), Dated Brent, light blend 38 API, fob U.K., US$ per barrel (U.S. dollars)',
'Crude Oil (petroleum), Price index simple average of three spot prices (APSP); Dated Brent, West Texas Intermediate, and the Dubai Fateh (Index, 2005=100)',
'Coffee, Robusta, International Coffee Organization New York cash price, ex-dock New York, US cents per pound (U.S. cents)',
'Commodity Beverage Price Index includes Coffee, Tea, and Cocoa (Index, 2005=100)',
'Commodity Cereals Price Index includes Wheat, Maize (Corn), Rice, and Barley (Index, 2005=100)',
'Commodity Coffee Price Index includes Other Mild Arabicas and Robusta (Index, 2005=100)']
separator = ";;"
frame = knoema.get("IMFWEO2021Apr", country="World", separator=separator, subject=separator.join(subject))
self.assertEqual(frame.shape[1], len(subject))
def test_ticker_endpoint(self):
"""Testing ticker endpoint"""
ticker = knoema.ticker('DDD')
self.assertEqual(ticker.name, '3D Systems Corporation')
self.assertEqual(len(ticker.groups), 4)
self.assertEqual(ticker.groups[0].name, 'Credit Risk')
self.assertEqual(len(ticker.groups[0].indicators), 6)
self.assertEqual(ticker.groups[0].indicators[0].name, 'Bankruptcy Base FRISK Probability')
self.assertIs(type(ticker.groups[0].indicators[0].get()), pandas.core.frame.DataFrame)
indicator = ticker.get_indicator('Bankruptcy Base FRISK Probability')
self.assertEqual(indicator.name, 'Bankruptcy Base FRISK Probability')
self.assertIs(type(indicator.get()), pandas.core.frame.DataFrame)
def test_streaming_more_than_1000(self):
"""The method is testing getting multiple series by dimension member ids and time range"""
data_frame = knoema.get('nama_r_e3gdp', **{'Measure': 'Euro per inhabitant'})
self.assertEqual(data_frame.shape[0], 12)
self.assertEqual(data_frame.shape[1], 1738)
def test_search_wrapper_search_for_timeseries(self):
"""The method is testing search wrapper to search for timeseries"""
search_results = knoema.search('Italy inflation')
search_results_1 = knoema.search('UAE oil production')
self.assertTrue(len(search_results.series) > 0)
self.assertTrue(len(search_results_1.series) > 0)
first_series = search_results.series[0].get()
first_series_1 = search_results_1.series[0].get()
self.assertIs(type(first_series), pandas.core.frame.DataFrame)
self.assertIs(type(first_series_1), pandas.core.frame.DataFrame)
def test_noaggregation(self):
data = knoema.get(**{"dataset" : "IMFWEO2020Oct", "country": "United States", "subject": "Population (Persons)", "frequency" : "Q" , "transform": 'NOAGG'})
self.assertEqual(data.shape[1], 0)
def test_include_metadata_true(self):
"""The method is testing getting multiple series with data and metadata"""
data, metadata = knoema.get('IMFWEO2017Apr', True, country=['914', '512'], subject='lp')
self.assertEqual(data.shape[0], 43)
self.assertEqual(data.shape[1], 2)
self.assertEqual(['Country', 'Subject', 'Frequency'], data.columns.names)
self.assertEqual(metadata.shape[0], 7)
self.assertEqual(metadata.shape[1], 2)
self.assertEqual(['Country', 'Subject', 'Frequency'], metadata.columns.names)
indx = data.first_valid_index()
sname = ('Albania', 'Population (Persons)', 'A')
value = data.at[indx, sname]
self.assertEqual(value, 2.762)
indx = metadata.first_valid_index()
value = metadata.at[indx, sname]
self.assertEqual(value, '914')
indx = data.last_valid_index()
value = data.at[indx, sname]
self.assertEqual(value, 2.858)
def test_get_data_with_partial_selection_with_metadata_transform(self):
"""The method is testing getting series with partial selection"""
_, metadata = knoema.get('IMFWEO2017Apr', True, subject='flibor6', frequency='A')
self.assertEqual(metadata.shape[1], 2)
self.assertEqual(['Country', 'Subject', 'Frequency'], metadata.columns.names)
sname = ('Japan', 'Six-month London interbank offered rate (LIBOR) (Percent)', 'A')
self.assertEqual(metadata.at['Country Id', sname], '158')
self.assertEqual(metadata.at['Subject Id', sname], 'FLIBOR6')
self.assertEqual(metadata.at['Unit', sname], 'Percent')
def test_get_data_with_partial_selection_with_metadata(self):
"""The method is testing getting series with partial selection"""
_, metadata = knoema.get('IMFWEO2017Apr', True, subject = 'flibor6')
self.assertEqual(metadata.shape[1], 2)
self.assertEqual(['Country', 'Subject', 'Frequency'], metadata.columns.names)
sname = ('Japan', 'Six-month London interbank offered rate (LIBOR) (Percent)', 'A')
self.assertEqual(metadata.at['Country Id',sname],'158')
self.assertEqual(metadata.at['Subject Id',sname],'FLIBOR6')
self.assertEqual(metadata.at['Unit',sname],'Percent')
def test_get_data_with_partial_selection(self):
"""The method is testing getting series with partial selection"""
data_frame = knoema.get('IMFWEO2017Apr', subject = 'flibor6')
self.assertEqual(data_frame.shape[1], 2)
self.assertEqual(['Country', 'Subject', 'Frequency'], data_frame.columns.names)
indx = data_frame.first_valid_index()
sname = ('Japan', 'Six-month London interbank offered rate (LIBOR) (Percent)', 'A')
value = data_frame.at[indx, sname]
self.assertEqual(value, 10.861)
indx = data_frame.index[38]
value = data_frame.at[indx, sname]
self.assertEqual(value, 0.048)
def test_get_data_from_flat_dataset_without_time_and_with_metadata(self):
"""The method is testing load data from flat dataset without time and with metadata"""
data_frame, metadata = knoema.get('pocqwkd', True, **{'Object type': 'Airports',
'Object name': 'Bakel airport'})
self.assertEqual(data_frame.shape[0], 1)
self.assertEqual(data_frame.shape[1], 6)
self.assertEqual(metadata, None)
self.assertEqual(data_frame.at[0, 'Latitude'], '14.847256')
self.assertEqual(data_frame.at[0, 'Longitude'], '-12.468264')
def test_get_data_from_flat_dataset_without_time(self):
"""The method is testing load data from flat dataset without time"""
data_frame = knoema.get('pocqwkd', **{'Object type': 'Airports',
'Object name': 'Bakel airport'})
self.assertEqual(data_frame.shape[0], 1)
self.assertEqual(data_frame.shape[1], 6)
value = data_frame.at[0, 'Place']
self.assertEqual(value, 'Bakel')
def test_get_data_from_flat_dataset_with_multi_measures_and_metadata(self):
"""The method is testing load data from flat dataset with with mulitple measures and metadata"""
data_frame, metadata = knoema.get('bmlaaaf', True, **{'Country': 'Albania',
'Borrower': 'Ministry of Finance',
'Guarantor': 'Albania',
'Loan type': 'B loan',
'Loan status': 'EFFECTIVE',
'Currency of commitment': 'eur',
'measure': 'Interest rate'})
self.assertEqual(data_frame.shape[0], 1)
self.assertEqual(data_frame.shape[1], 33)
self.assertEqual(metadata, None)
value = data_frame.at[0, 'Undisbursed Amount']
self.assertEqual(value, 79998000.0)
def test_get_data_from_flat_dataset_with_multi_measures(self):
"""The method is testing load data from flat dataset with with mulitple measures"""
data_frame = knoema.get('bmlaaaf', **{'Country': 'Albania',
'Borrower': 'Ministry of Finance',
'Guarantor': 'Albania',
'Loan type': 'B loan',
'Loan status': 'EFFECTIVE',
'Currency of commitment': 'eur',
'measure': 'Interest rate'})
self.assertEqual(data_frame.shape[0], 1)
self.assertEqual(data_frame.shape[1], 33)
value = data_frame.at[0, 'Interest Rate']
self.assertEqual(value, 0.0)
def test_get_data_from_flat_dataset(self):
"""The method is testing load data from flat dataset"""
data_frame = knoema.get('cblymmf', Country='Albania;Australia', Keyword='FGP;TWP;TRP')
self.assertEqual(data_frame.shape[0], 32)
self.assertEqual(data_frame.shape[1], 4)
self.assertAlmostEqual(float(data_frame.at[30, 'Value']), 98.8368, 4)
def test_get_data_from_dataset_with_multiword_dimnames_and_metadata_transform(self):
"""The method is testing load data from regular dataset with dimenions that have multi word names include metadata"""
data_frame, _ = knoema.get('FDI_FLOW_CTRY', True, **{'Reporting country': 'AUS',
'Partner country/territory': 'w0',
'Measurement principle': 'DI',
'Type of FDI': 'T_FA_F',
'Type of entity': 'ALL',
'Accounting entry': 'NET',
'Level of counterpart': 'IMC',
'Currency': 'USD',
'Frequency': 'A'})
self.assertEqual(data_frame.shape[0], 7)
self.assertEqual(data_frame.shape[1], 1)
sname = (
'Australia', 'WORLD', 'Directional principle: Inward', 'FDI financial flows - Total', 'All resident units',
'Net', 'Immediate counterpart (Immediate investor or immediate host)', 'US Dollar', 'A')
indx = data_frame.first_valid_index()
value = data_frame.at[indx, sname]
self.assertAlmostEqual(value, 31666.667, 3)
indx = data_frame.last_valid_index()
value = data_frame.at[indx, sname]
self.assertAlmostEqual(value, 22267.638, 3)
def test_get_data_from_dataset_with_multiword_dimnames_and_metadata(self):
"""The method is testing load data from regular dataset with dimenions that have multi word names include metadata"""
data_frame, _ = knoema.get('FDI_FLOW_CTRY', True, **{'Reporting country': 'AUS',
'Partner country/territory': 'w0',
'Measurement principle': 'DI',
'Type of FDI': 'T_FA_F',
'Type of entity': 'ALL',
'Accounting entry': 'NET',
'Level of counterpart': 'IMC',
'Currency': 'USD'})
self.assertEqual(data_frame.shape[0], 7)
self.assertEqual(data_frame.shape[1], 1)
sname = (
'Australia', 'WORLD', 'Directional principle: Inward', 'FDI financial flows - Total', 'All resident units',
'Net', 'Immediate counterpart (Immediate investor or immediate host)', 'US Dollar', 'A')
indx = data_frame.first_valid_index()
value = data_frame.at[indx, sname]
self.assertAlmostEqual(value, 31666.667, 3)
indx = data_frame.last_valid_index()
value = data_frame.at[indx, sname]
self.assertAlmostEqual(value, 22267.638, 3)
def test_get_data_from_dataset_with_multiword_dimnames(self):
"""The method is testing load data from regular dataset with dimenions that have multi word names"""
data_frame = knoema.get('FDI_FLOW_CTRY', **{'Reporting country': 'AUS',
'Partner country/territory': 'w0',
'Measurement principle': 'DI',
'Type of FDI': 'T_FA_F',
'Type of entity': 'ALL',
'Accounting entry': 'NET',
'Level of counterpart': 'IMC',
'Currency': 'USD'})
self.assertEqual(data_frame.shape[0], 7)
self.assertEqual(data_frame.shape[1], 1)
sname = ('Australia', 'WORLD', 'Directional principle: Inward', 'FDI financial flows - Total', 'All resident units', 'Net', 'Immediate counterpart (Immediate investor or immediate host)', 'US Dollar', 'A')
indx = data_frame.first_valid_index()
value = data_frame.at[indx, sname]
self.assertAlmostEqual(value, 31666.667, 3)
indx = data_frame.last_valid_index()
value = data_frame.at[indx, sname]
self.assertAlmostEqual(value, 22267.638, 3)
def test_get_data_from_dataset_by_dim_ids_transform(self):
"""The method is testing load data from regular dataset with dimenions that have multi word names by dim ids"""
data_frame = knoema.get('FDI_FLOW_CTRY', **{'Reporting-country': 'AUS',
'Partner-country': 'w0',
'Measurement-principle': 'DI',
'Type-of-FDI': 'T_FA_F',
'Type-of-entity': 'ALL',
'Accounting-entry': 'NET',
'Level-of-counterpart': 'IMC',
'Currency': 'USD',
'Frequency': 'A'})
self.assertEqual(data_frame.shape[0], 7)
self.assertEqual(data_frame.shape[1], 1)
sname = ('Australia', 'WORLD', 'Directional principle: Inward', 'FDI financial flows - Total', 'All resident units', 'Net', 'Immediate counterpart (Immediate investor or immediate host)', 'US Dollar', 'A')
indx = data_frame.first_valid_index()
value = data_frame.at[indx, sname]
self.assertAlmostEqual(value, 31666.667, 3)
indx = data_frame.last_valid_index()
value = data_frame.at[indx, sname]
self.assertAlmostEqual(value, 22267.638, 3)
def test_get_data_from_dataset_by_dim_ids(self):
"""The method is testing load data from regular dataset with dimenions that have multi word names by dim ids"""
data_frame = knoema.get('FDI_FLOW_CTRY', **{'Reporting-country': 'AUS',
'Partner-country': 'w0',
'Measurement-principle': 'DI',
'Type-of-FDI': 'T_FA_F',
'Type-of-entity': 'ALL',
'Accounting-entry': 'NET',
'Level-of-counterpart': 'IMC',
'Currency': 'USD'})
self.assertEqual(data_frame.shape[0], 7)
self.assertEqual(data_frame.shape[1], 1)
sname = (
'Australia', 'WORLD', 'Directional principle: Inward', 'FDI financial flows - Total', 'All resident units',
'Net', 'Immediate counterpart (Immediate investor or immediate host)', 'US Dollar', 'A')
indx = data_frame.first_valid_index()
value = data_frame.at[indx, sname]
self.assertAlmostEqual(value, 31666.667, 3)
indx = data_frame.last_valid_index()
value = data_frame.at[indx, sname]
self.assertAlmostEqual(value, 22267.638, 3)
|
class TestKnoemaClient(unittest.TestCase):
'''This is class with knoema client unit tests'''
def setUp(self):
pass
def test_too_long_get_query_string(self):
'''The method is testing issue with loo long get query string'''
pass
def test_too_long_data_url(self):
'''The method is testing issue with loo long get query string'''
pass
def test_ticker_endpoint(self):
'''Testing ticker endpoint'''
pass
def test_streaming_more_than_1000(self):
'''The method is testing getting multiple series by dimension member ids and time range'''
pass
def test_search_wrapper_search_for_timeseries(self):
'''The method is testing search wrapper to search for timeseries'''
pass
def test_noaggregation(self):
pass
def test_include_metadata_true(self):
'''The method is testing getting multiple series with data and metadata'''
pass
def test_get_data_with_partial_selection_with_metadata_transform(self):
'''The method is testing getting series with partial selection'''
pass
def test_get_data_with_partial_selection_with_metadata_transform(self):
'''The method is testing getting series with partial selection'''
pass
def test_get_data_with_partial_selection_with_metadata_transform(self):
'''The method is testing getting series with partial selection'''
pass
def test_get_data_from_flat_dataset_without_time_and_with_metadata(self):
'''The method is testing load data from flat dataset without time and with metadata'''
pass
def test_get_data_from_flat_dataset_without_time_and_with_metadata(self):
'''The method is testing load data from flat dataset without time'''
pass
def test_get_data_from_flat_dataset_with_multi_measures_and_metadata(self):
'''The method is testing load data from flat dataset with with mulitple measures and metadata'''
pass
def test_get_data_from_flat_dataset_with_multi_measures_and_metadata(self):
'''The method is testing load data from flat dataset with with mulitple measures'''
pass
def test_get_data_from_flat_dataset_without_time_and_with_metadata(self):
'''The method is testing load data from flat dataset'''
pass
def test_get_data_from_dataset_with_multiword_dimnames_and_metadata_transform(self):
'''The method is testing load data from regular dataset with dimenions that have multi word names include metadata'''
pass
def test_get_data_from_dataset_with_multiword_dimnames_and_metadata_transform(self):
'''The method is testing load data from regular dataset with dimenions that have multi word names include metadata'''
pass
def test_get_data_from_dataset_with_multiword_dimnames_and_metadata_transform(self):
'''The method is testing load data from regular dataset with dimenions that have multi word names'''
pass
def test_get_data_from_dataset_by_dim_ids_transform(self):
'''The method is testing load data from regular dataset with dimenions that have multi word names by dim ids'''
pass
def test_get_data_from_dataset_by_dim_ids_transform(self):
'''The method is testing load data from regular dataset with dimenions that have multi word names by dim ids'''
pass
| 22 | 20 | 16 | 3 | 13 | 1 | 1 | 0.08 | 1 | 3 | 1 | 0 | 21 | 0 | 21 | 93 | 367 | 82 | 265 | 79 | 243 | 20 | 179 | 79 | 157 | 4 | 2 | 1 | 25 |
143,181 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/tests/test_knoema_data_time.py
|
tests.test_knoema_data_time.TestKnoemaClient
|
class TestKnoemaClient(unittest.TestCase):
"""This is class with knoema client unit tests with test credentials"""
base_host = 'knoema.com'
def setUp(self):
apicfg = knoema.ApiConfig()
apicfg.host = os.environ['BASE_HOST'] if 'BASE_HOST' in os.environ else self.base_host
apicfg.app_id = os.environ['KNOEMA_APP_ID'] if 'KNOEMA_APP_ID' in os.environ else ''
apicfg.app_secret = os.environ['KNOEMA_APP_SECRET'] if 'KNOEMA_APP_SECRET' in os.environ else ''
def test_getdata_by_TransformationDataReader_timemembers(self):
"""The method is testing getting single series by dimension member ids"""
data_frame = knoema.get('IMFWEO2021Apr', Country='614', Subject='BCA', timemembers='1980;2002;2023')
self.assertEqual(data_frame.shape[0], 3)
self.assertEqual(data_frame.shape[1], 1)
self.assertEqual(['Country', 'Subject', 'Frequency'], data_frame.columns.names)
indx = data_frame.first_valid_index()
sname = ('Angola', 'Current account balance (U.S. dollars)', 'A')
value = data_frame.at[indx, sname]
self.assertEqual(value, 0.07)
indx = data_frame.index[2]
value = data_frame.at[indx, sname]
self.assertEqual(value, -0.154)
def test_getdata_by_TransformationDataReader_timesince(self):
"""The method is testing getting single series by dimension member ids"""
data_frame = knoema.get('IMFWEO2021Apr', Country='614', Subject='BCA', timesince='2013')
self.assertEqual(data_frame.shape[0], 14)
self.assertEqual(data_frame.shape[1], 1)
self.assertEqual(['Country', 'Subject', 'Frequency'], data_frame.columns.names)
indx = data_frame.first_valid_index()
sname = ('Angola', 'Current account balance (U.S. dollars)', 'A')
value = data_frame.at[indx, sname]
self.assertEqual(value, 8.348)
indx = data_frame.index[13]
value = data_frame.at[indx, sname]
self.assertEqual(value, -0.457)
def test_getdata_by_TransformationDataReader_timelast1(self):
"""The method is testing getting multiple series by dimension member ids and time range"""
data_frame = knoema.get('IMFDOT2017', **{'Country': 'Algeria;Angola', 'Indicator': 'TXG_FOB_USD', 'Counterpart Country': '622', 'frequency': 'A;Q', 'timelast': '1'})
self.assertEqual(data_frame.shape[0], 4)
self.assertEqual(data_frame.shape[1], 4)
|
class TestKnoemaClient(unittest.TestCase):
'''This is class with knoema client unit tests with test credentials'''
def setUp(self):
pass
def test_getdata_by_TransformationDataReader_timemembers(self):
'''The method is testing getting single series by dimension member ids'''
pass
def test_getdata_by_TransformationDataReader_timesince(self):
'''The method is testing getting single series by dimension member ids'''
pass
def test_getdata_by_TransformationDataReader_timelast1(self):
'''The method is testing getting multiple series by dimension member ids and time range'''
pass
| 5 | 4 | 12 | 3 | 8 | 1 | 2 | 0.11 | 1 | 1 | 1 | 0 | 4 | 0 | 4 | 76 | 54 | 15 | 35 | 16 | 30 | 4 | 35 | 16 | 30 | 4 | 2 | 0 | 7 |
143,182 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/tests/test_knoema.py
|
tests.test_knoema.TestKnoemaClient
|
class TestKnoemaClient(unittest.TestCase):
"""This is class with knoema client unit tests with test credentials"""
base_host = 'knoema.com'
def setUp(self):
apicfg = knoema.ApiConfig()
apicfg.host = self.base_host
apicfg.app_id = 'FzOYqDg'
apicfg.app_secret = 'SPrvmY8eGRcGA'
def test_getdata_singleseries_by_member_id(self):
"""The method is testing getting single series by dimension member ids"""
data_frame = knoema.get('xmhdwqf', company='c1', indicator='ind_a')
self.assertEqual(data_frame.shape[0], 11)
self.assertEqual(data_frame.shape[1], 1)
self.assertEqual(['Company', 'Indicator', 'Frequency'], data_frame.columns.names)
indx = data_frame.first_valid_index()
sname = ('BOX', 'Annual', 'A')
value = data_frame.at[indx, sname]
self.assertEqual(value, 85.50)
indx = data_frame.index[10]
value = data_frame.at[indx, sname]
self.assertEqual(value, 15.62)
def test_getdata_multiseries_by_member_id(self):
"""The method is testing getting multiple series by dimension member ids"""
data_frame = knoema.get('xmhdwqf', company='c1;c2', indicator='ind_m;ind_a')
self.assertEqual(data_frame.shape[0], 56)
self.assertEqual(data_frame.shape[1], 4)
self.assertEqual(['Company', 'Indicator', 'Frequency'], data_frame.columns.names)
indx = data_frame.index[7]
sname = ('BOX', 'Monthly', 'M')
value = data_frame.at[indx, sname]
self.assertEqual(value, 23.08)
indx = data_frame.index[55]
value = data_frame.at[indx, sname]
self.assertEqual(value, 19.71)
def test_getdata_multiseries_by_member_name(self):
"""The method is testing getting data by dimension member names"""
company_names = 'BOX;UBER'
indicator_names = 'Monthly;Annual'
data_frame = knoema.get('xmhdwqf', company=company_names, indicator=indicator_names)
self.assertEqual(data_frame.shape[0], 56)
self.assertEqual(data_frame.shape[1], 4)
self.assertEqual(['Company', 'Indicator', 'Frequency'], data_frame.columns.names)
indx = data_frame.index[7]
sname = ('BOX', 'Monthly', 'M')
value = data_frame.at[indx, sname]
self.assertEqual(value, 23.08)
indx = data_frame.index[55]
value = data_frame.at[indx, sname]
self.assertEqual(value, 19.71)
def test_getdata_multiseries_by_member_id_range(self):
"""The method is testing getting multiple series by dimension member ids and time range"""
data_frame = knoema.get('xmhdwqf', company='c1;c2', indicator='ind_a', timerange='2017-2019')
self.assertEqual(data_frame.shape[0], 11)
self.assertEqual(data_frame.shape[1], 2)
indx = data_frame.first_valid_index()
sname = ('UBER', 'Annual', 'A')
value = data_frame.at[indx, sname]
self.assertEqual(value, 53.03)
indx = data_frame.last_valid_index()
value = data_frame.at[indx, sname]
self.assertEqual(value, 99.15)
def test_getdata_singleseries_difffrequencies_by_member_id(self):
"""The method is testing getting single series on different frequencies by dimension member ids"""
data_frame = knoema.get('xmhdwqf', company='c1', indicator='ind_multi')
self.assertEqual(data_frame.shape[1], 3)
indx = data_frame.first_valid_index()
sname = ('BOX', 'Multi', 'A')
value = data_frame.at[indx, sname]
self.assertEqual(value, 60.24)
value = data_frame.at[pandas.to_datetime('2018-01-01'), sname]
self.assertEqual(value, 80.56)
indx = data_frame.first_valid_index()
sname = ('BOX', 'Multi', 'Q')
value = data_frame.at[indx, sname]
self.assertEqual(value, 47.82)
value = data_frame.at[pandas.to_datetime('2017-01-01'), sname]
self.assertEqual(value, 50.28)
def test_getdata_multiseries_singlefrequency_by_member_id(self):
"""The method is testing getting mulitple series with one frequency by dimension member ids"""
data_frame = knoema.get('xmhdwqf', company='c2', indicator='ind_multi', frequency='M')
self.assertEqual(data_frame.shape[1], 1)
sname = ('UBER', 'Multi', 'M')
value = data_frame.at[pandas.to_datetime('2017-01-01'), sname]
self.assertEqual(value, 27.91)
def test_getdata_multiseries_multifrequency_by_member_id(self):
"""The method is testing getting mulitple series queriing mulitple frequencies by dimension member ids"""
data_frame = knoema.get('xmhdwqf', company='c1;c2', indicator='ind_a;ind_multi', frequency='A;M')
self.assertEqual(data_frame.shape[1], 6)
sname = ('BOX', 'Annual', 'A')
value = data_frame.at[pandas.to_datetime('2013-01-01'), sname]
self.assertEqual(value, 77.93)
def test_getdata_multiseries_multifrequency_by_member_id_range(self):
"""The method is testing getting mulitple series queriing mulitple frequencies by dimension member ids with time range"""
data_frame = knoema.get('xmhdwqf', company='c1;c2', indicator='ind_a;ind_multi', frequency='A;M', timerange='2017M1-2020M12')
self.assertEqual(data_frame.shape[1], 6)
sname = ('BOX', 'Multi', 'A')
value = data_frame.at[pandas.to_datetime('2018-01-01'), sname]
self.assertEqual(value, 80.56)
def test_none_dataset(self):
"""The method is testing if dataset set up as None"""
with self.assertRaises(ValueError) as context:
knoema.get(None)
self.assertTrue('Dataset id is not specified' in str(context.exception))
def test_wrong_dimension(self):
"""The method is testing if there is wrong dimension name is specified"""
with self.assertRaises(ValueError) as context:
knoema.get('IMFWEO2017Apr', indicator='LP;NGDP')
self.assertTrue('Dimension with id or name indicator is not found' in str(context.exception))
def test_wrong_dimension_with_transform(self):
"""The method is testing if there is wrong dimension name is specified"""
with self.assertRaises(ValueError) as context:
knoema.get('IMFWEO2017Apr', indicator='LP;NGDP', transform='PCH')
self.assertTrue('Dimension with id or name indicator is not found' in str(context.exception))
def test_empty_dimension_selection(self):
"""The method is testing if there are no elements in dimension selection"""
with self.assertRaises(ValueError) as context:
knoema.get('IMFWEO2017Apr', country='', subject='LP;NGDP')
self.assertTrue('Selection for dimension Country is empty' in str(context.exception))
def test_empty_dimension_selection_with_transform(self):
"""The method is testing if there are no elements in dimension selection"""
with self.assertRaises(ValueError) as context:
knoema.get('IMFWEO2017Apr', country='', subject='LP;NGDP', transform='PCH')
self.assertTrue('Selection for dimension Country is empty' in str(context.exception))
def test_wrong_dimension_selection(self):
"""The method is testing if there are incorrect in dimension selection"""
with self.assertRaises(ValueError) as context:
knoema.get('xmhdwqf', company='c1;c2', indicator='ind_a;ind_multi;ind')
self.assertTrue('Selection for dimension Indicator contains invalid elements' in str(context.exception))
def test_wrong_dimension_selection_transform(self):
"""The method is testing if there are incorrect in dimension selection"""
with self.assertRaises(ValueError) as context:
knoema.get('xmhdwqf', company='c1;c2', indicator='ind_a;ind_multi;ind', frequency='A', transform='sum')
self.assertTrue('Selection for dimension Indicator contains invalid elements' in str(context.exception))
def test_get_data_from_flat_dataset_with_datecolumn(self):
"""The method is testing load data from flat dataset with specifying datecolumn"""
data_frame = knoema.get('bjxchy', country='Albania', measure='Original Principal Amount ($)', datecolumn='Effective Date (Most Recent)', timerange='2010-2015', frequency='A')
self.assertEqual(data_frame.shape[0], 5)
self.assertEqual(data_frame.shape[1], 5)
sname = ('Albania', 'MINISTRY OF FINANCE', 'Albania', 'FSL', 'Repaying', 'Sum(Original Principal Amount ($))', 'A')
value = data_frame.at[pandas.to_datetime('2013-01-01'), sname]
self.assertEqual(value, 40000000.0)
def test_incorrect_dataset_id(self):
"""The method is testing if dataset id set up incorrectly"""
with self.assertRaises(ValueError) as context:
knoema.get('incorrect_id', somedim='val1;val2')
self.assertTrue("Requested dataset doesn't exist or you don't have access to it." in str(context.exception))
def test_getdata_multiseries_by_member_key(self):
"""The method is testing getting multiple series by dimension member keys"""
data_frame = knoema.get('xmhdwqf', company='1000000;1000010', indicator='1000020;1000050')
self.assertEqual(data_frame.shape[0], 56)
self.assertEqual(data_frame.shape[1], 4)
self.assertEqual(['Company', 'Indicator', 'Frequency'], data_frame.columns.names)
indx = data_frame.index[7]
sname = ('BOX', 'Monthly', 'M')
value = data_frame.at[indx, sname]
self.assertEqual(value, 23.08)
indx = data_frame.index[55]
value = data_frame.at[indx, sname]
self.assertEqual(value, 19.71)
def test_get_data_from_dataset_with_multiword_dimnames_and_metadata_and_mnemomics(self):
"""The method is testing load data from regular dataset with dimenions that have multi word names include metadata and mnemonics"""
data_frame, metadata = knoema.get('eqohmpb', True, **{'Country': '1000000',
'Indicator': '1000010'})
self.assertEqual(data_frame.shape[0], 22)
self.assertEqual(data_frame.shape[1], 6)
self.assertEqual(metadata.shape[0], 5)
self.assertEqual(metadata.shape[1], 6)
self.assertEqual(['Country', 'Indicator', 'Frequency'], data_frame.columns.names)
self.assertEqual(['Country', 'Indicator', 'Frequency'], metadata.columns.names)
sname = ('Afghanistan', 'Gross domestic product, current prices', 'A')
indx = data_frame.first_valid_index()
value = data_frame.at[indx, sname]
self.assertAlmostEqual(value, 0.0)
indx = metadata.first_valid_index()
value = metadata.at[indx, sname]
self.assertAlmostEqual(value, '512')
self.assertAlmostEqual(metadata.at['Unit', sname], 'Number')
self.assertAlmostEqual(metadata.at['Scale', sname], 1.0)
self.assertAlmostEqual(metadata.at['Mnemonics', sname], '512NGDP_A_in_test_dataset')
def test_weekly_frequency(self):
"""The method is testing load data from regular dataset by weekly frequency"""
data = knoema.get('xmhdwqf', company='BOX', indicator='Weekly', frequency='W')
sname = ('BOX', 'Weekly', 'W')
value = data.at[pandas.to_datetime('2018-12-31'), sname]
self.assertEqual(value, 32.37)
value = data.at[pandas.to_datetime('2019-12-30'), sname]
self.assertEqual(value, 83.73)
def test_incorrect_host_knoema_get(self):
"""The method is negative test on get series from dataset with incorrect host"""
with self.assertRaises(ValueError) as context:
apicfg = knoema.ApiConfig()
apicfg.host = 'knoema_incorect.com'
_ = knoema.get('IMFWEO2017Apr', country='914', subject='ngdp')
self.assertTrue("The specified host knoema_incorect.com does not exist" in str(context.exception))
def test_search_by_mnemonics_data(self):
"""The method is testing searching by mnemonics"""
data_frame = knoema.get('eqohmpb', mnemonics='512NGDP_A_in_test_dataset')
self.assertEqual(data_frame.shape[1], 1)
sname = ('512NGDP_A_in_test_dataset')
indx = data_frame.first_valid_index()
value = data_frame.at[indx, sname]
self.assertEqual(value, 0)
def test_search_by_mnemonics_with_metadata(self):
"""The method is testing searching by mnemonics with metadata"""
data_frame, metadata = knoema.get('eqohmpb', True, mnemonics='512NGDP_A_in_test_dataset')
self.assertEqual(data_frame.shape[1], 1)
sname = ('512NGDP_A_in_test_dataset')
indx = data_frame.first_valid_index()
value = data_frame.at[indx, sname]
self.assertEqual(value, 0)
self.assertEqual(metadata.shape[1], 1)
self.assertEqual(metadata.shape[0], 5)
self.assertEqual(metadata.at['Country Id',sname],'512')
self.assertEqual(metadata.at['Indicator Id',sname],'NGDP')
self.assertEqual(metadata.at['Unit',sname],'Number')
self.assertEqual(metadata.at['Mnemonics',sname],'512NGDP_A_in_test_dataset')
def test_search_by_mnemonics_data_by_all_datasets(self):
"""The method is testing searching by mnemonics by all dataset and returns data"""
data_frame = knoema.get(mnemonics='512NGDP_A_in_test_dataset;512NGDP')
self.assertEqual(data_frame.shape[1], 2)
sname = ('512NGDP_A_in_test_dataset')
indx = data_frame.first_valid_index()
value = data_frame.at[indx, sname]
self.assertEqual(value, 0)
def test_search_by_mnemonics_with_metadata_by_all_datasets(self):
"""The method is testing searching by mnemonics by all dataset and returns data and metadata"""
data_frame, metadata = knoema.get(None, True, mnemonics='512NGDP_A_in_test_dataset;512NGDP')
self.assertEqual(data_frame.shape[1], 2)
sname = ('512NGDP_A_in_test_dataset')
indx = data_frame.first_valid_index()
value = data_frame.at[indx, sname]
self.assertEqual(value, 0)
self.assertEqual(metadata.at['Country Id',sname],'512')
self.assertEqual(metadata.at['Indicator Id',sname],'NGDP')
self.assertEqual(metadata.at['Unit',sname],'Number')
self.assertEqual(metadata.at['Mnemonics',sname],'512NGDP_A_in_test_dataset')
def test_search_by_mnemonics_with_metadata_by_all_datasets_transform(self):
"""The method is testing searching by mnemonics by all dataset and returns data and metadata"""
data_frame, metadata = knoema.get(None, True, mnemonics='512NGDP_R;512NGDP', transform='PCH', frequency='A')
self.assertEqual(data_frame.shape[1], 2)
sname = ('512NGDP_R')
indx = data_frame.first_valid_index()
value = data_frame.at[indx, sname]
self.assertEqual(value, 8.69275111845198)
self.assertEqual(metadata.at['Country Id',sname],'512')
self.assertEqual(metadata.at['Indicator Id',sname],'NGDP_R')
self.assertEqual(metadata.at['Unit',sname],'%')
self.assertEqual(metadata.at['Mnemonics',sname],'512NGDP_R')
def test_get_all_series_from_dataset(self):
"""The method is testing getting all series from dataset"""
data_frame= knoema.get('dzlnsee')
self.assertEqual(data_frame.shape[1], 4)
self.assertEqual(['Company', 'Indicator', 'Frequency'], data_frame.columns.names)
indx = data_frame.first_valid_index()
sname = ('BOX', 'Monthly', 'M')
value = data_frame.at[indx, sname]
self.assertEqual(value, 23.08)
def test_with_empty_attributes_in_the_dimensions(self):
data, metadata = knoema.get('IMFWEO2017Oct', include_metadata=True, country=['914','512'], subject='lp')
self.assertEqual(data.shape[0], 43)
self.assertEqual(data.shape[1], 2)
self.assertEqual(['Country', 'Subject', 'Frequency'], data.columns.names)
self.assertEqual(metadata.shape[0], 8)
self.assertEqual(metadata.shape[1], 2)
self.assertEqual(['Country', 'Subject', 'Frequency'], metadata.columns.names)
indx = data.first_valid_index()
sname = ('Albania', 'Population (Persons)', 'A')
value = data.at[indx, sname]
self.assertEqual(value, 2.762)
self.assertEqual(metadata.at['Country Id',sname],'914')
self.assertEqual(metadata.at['Subject Id',sname],'LP')
self.assertEqual(metadata.at['Subject SubjectDescription',sname],None)
self.assertEqual(metadata.at['Subject SubjectNotes',sname],None)
self.assertEqual(metadata.at['Subject Relevant',sname],None)
self.assertEqual(metadata.at['Unit',sname],'Persons (Millions)')
self.assertEqual(metadata.at['Mnemonics',sname],None)
indx = data.last_valid_index()
value = data.at[indx, sname]
self.assertEqual(value, 2.856)
def test_with_empty_attributes_in_the_dimensions_transform(self):
data, metadata = knoema.get('IMFWEO2017Oct', include_metadata=True, country=['914','512'], subject='lp', frequency='A')
self.assertEqual(data.shape[0], 43)
self.assertEqual(data.shape[1], 2)
self.assertEqual(['Country', 'Subject', 'Frequency'], data.columns.names)
self.assertEqual(metadata.shape[0], 8)
self.assertEqual(metadata.shape[1], 2)
self.assertEqual(['Country', 'Subject', 'Frequency'], metadata.columns.names)
indx = data.first_valid_index()
sname = ('Albania', 'Population (Persons)', 'A')
value = data.at[indx, sname]
self.assertEqual(value, 2.762)
self.assertEqual(metadata.at['Country Id',sname],'914')
self.assertEqual(metadata.at['Subject Id',sname],'LP')
self.assertEqual(metadata.at['Subject SubjectDescription',sname],None)
self.assertEqual(metadata.at['Subject SubjectNotes',sname],None)
self.assertEqual(metadata.at['Subject Relevant',sname],None)
self.assertEqual(metadata.at['Unit',sname],'Persons (Millions)')
self.assertEqual(metadata.at['Mnemonics',sname],None)
indx = data.last_valid_index()
value = data.at[indx, sname]
self.assertEqual(value, 2.856)
def test_getdata_from_private_community(self):
"""The method is testing getting data from private community"""
apicfgCommunity = knoema.ApiConfig()
apicfgCommunity.host = 'teryllol.' + self.base_host
apicfgCommunity.app_id = 's81oiSY'
apicfgCommunity.app_secret='g4lKmIOPE2R4w'
data_frame = knoema.get('qfsneof', country='USA', series='NY.GDP.MKTP.KD.ZG')
self.assertEqual(data_frame.shape[0], 60)
self.assertEqual(data_frame.shape[1], 1)
self.assertEqual(['Country', 'Series', 'Frequency'], data_frame.columns.names)
indx = data_frame.first_valid_index()
sname = ('United States', 'GDP growth (annual %)', 'A')
value = data_frame.at[indx, sname]
self.assertEqual(value, 2.29999999999968)
indx = data_frame.index[57]
value = data_frame.at[indx, sname]
self.assertEqual(value, 2.99646435222829)
def test_getdata_custom_separator(self):
"""The method is testing getting data with custom separator"""
data_frame = knoema.get('IMFWEO2019Oct',
country='Albania',
subject='Gross domestic product, constant prices (Percent change)|Gross domestic product per capita, constant prices (Purchasing power parity; 2011 international dollar)',
separator='|')
self.assertEqual(data_frame.shape[0], 45)
self.assertEqual(data_frame.shape[1], 2)
self.assertEqual(['Country', 'Subject', 'Frequency'], data_frame.columns.names)
indx = data_frame.first_valid_index()
sname = ('Albania', 'Gross domestic product per capita, constant prices (Purchasing power parity; 2011 international dollar)', 'A')
value = data_frame.at[indx, sname]
self.assertEqual(value, 4832.599)
def test_getdata_default_web_separator(self):
"""The method is testing getting data with custom separator"""
data_frame = knoema.get('IMFWEO2019Oct',
country='Albania',
subject='NGDP_RPCH,NGDPRPPPPC',
separator=',')
self.assertEqual(data_frame.shape[0], 45)
self.assertEqual(data_frame.shape[1], 2)
self.assertEqual(['Country', 'Subject', 'Frequency'], data_frame.columns.names)
indx = data_frame.first_valid_index()
sname = ('Albania', 'Gross domestic product per capita, constant prices (Purchasing power parity; 2011 international dollar)', 'A')
value = data_frame.at[indx, sname]
self.assertEqual(value, 4832.599)
def test_getdata_region_dim_region_id(self):
"""The method is testing getting data from dataset with region dimention by region id"""
data_frame = knoema.get('IMFWEO2019Oct',
country = 'AF',
subject = ['Gross domestic product, constant prices (Percent change)']
)
self.assertEqual(['Country', 'Subject', 'Frequency'], data_frame.columns.names)
self.assertEqual('Afghanistan', data_frame.columns.values[0][0])
indx = data_frame.first_valid_index()
sname = ('Afghanistan', 'Gross domestic product, constant prices (Percent change)', 'A')
value = data_frame.at[indx, sname]
self.assertEqual(value, 8.692)
def test_getdata_region_as_dim_region_id(self):
"""The method is testing getting data from dataset with region dimention by region id"""
data_frame = knoema.get('IMFWEO2019Oct',
region = 'AF',
subject = ['Gross domestic product, constant prices (Percent change)']
)
self.assertEqual(['Country', 'Subject', 'Frequency'], data_frame.columns.names)
self.assertEqual('Afghanistan', data_frame.columns.values[0][0])
indx = data_frame.first_valid_index()
sname = ('Afghanistan', 'Gross domestic product, constant prices (Percent change)', 'A')
value = data_frame.at[indx, sname]
self.assertEqual(value, 8.692)
def test_getdata_with_columns(self):
"""The method is testing getting data from dataset with columns"""
data_frame = knoema.get('pqgusj', Company = 'APPLE', columns='*', frequency='FQ')
self.assertEqual(['Company', 'Indicator', 'Country', 'Frequency', 'Attribute'], data_frame.columns.names)
self.assertEqual(3, data_frame.columns.size)
self.assertEqual(('APPLE', 'Export', 'US', 'FQ', 'StatisticalDate'), data_frame.columns.values[1])
def test_getdata_datelabels(self):
"""The method is testing getting data from dataset with region dimention by region id"""
data_frame = knoema.get('pqgusj', company = 'APPLE', frequency='FQ')
self.assertEqual(['Company', 'Indicator', 'Country', 'Frequency'], data_frame.columns.names)
sname = ('APPLE', 'Export', 'US', 'FQ')
# this is FQ dateLabel
value = data_frame.at[pandas.to_datetime('2019-10-09'), sname]
self.assertEqual(value, 0.531275885712192)
def test_grouping_functionality(self):
"""The method is tesing grouping functionality"""
generator = knoema.get('IMFWEO2017Oct', include_metadata = True, group_by='country', country='Albania;Italy;Japan', subject='ngdp', timerange='2005-2010')
frames = []
for frame in generator:
frames.append(frame)
self.assertEqual(len(frames), 3)
self.assertIs(type(frames[0].data), pandas.core.frame.DataFrame)
self.assertIs(type(frames[0].metadata), pandas.core.frame.DataFrame)
def test_FQ_frequescy(self):
"""Testing FQ frequency"""
data_frame = knoema.get('xmhdwqf', company='UBER', indicator='Fisqal Quarterly', frequency='FQ')
self.assertIs(type(data_frame), pandas.core.frame.DataFrame)
self.assertEqual(data_frame.shape[0], 26)
self.assertEqual(data_frame.shape[1], 1)
def test_aggregations(self):
"""Testing aggregations disaggregation"""
frame = knoema.get('xmhdwqf', company='UBER', indicator='Daily', frequency='D', timerange = '2019M1-2021M4')
self.assertEqual(frame.shape[0], 366)
self.assertEqual(frame.shape[1], 1)
generator = knoema.get('xmhdwqf', group_by = 'company', company='UBER', indicator='Daily', frequency='D', timerange = '2019M1-2021M4')
for frame in generator:
self.assertEqual(frame.data.shape[0], 366)
self.assertEqual(frame.data.shape[1], 1)
frame = knoema.get('xmhdwqf', company='UBER', indicator='Daily', frequency='Q', timerange = '2019M1-2021M4', transform='sum')
self.assertEqual(frame.shape[0], 5)
self.assertEqual(frame.shape[1], 1)
generator = knoema.get('xmhdwqf', group_by='company', company='UBER', indicator='Daily', frequency='Q', timerange = '2019M1-2021M4', transform='sum')
for frame in generator:
self.assertEqual(frame.data.shape[0], 5)
self.assertEqual(frame.data.shape[1], 1)
def test_auto_aggregations_nodata(self):
"""Testing that auto aggregations returns no data"""
frame = knoema.get('xmhdwqf', company='UBER', indicator='Daily', frequency='D',
timerange='2019M1-2021M4')
self.assertEqual(frame.shape[0], 366)
self.assertEqual(frame.shape[1], 1)
frame = knoema.get('xmhdwqf', company='UBER', indicator='Daily', frequency='Q',
timerange='2019M1-2021M4')
self.assertEqual(frame.shape[0], 0)
self.assertEqual(frame.shape[1], 0)
def test_auto_disaggregations_nodata(self):
"""Testing that auto disaggregations returns no data"""
frame = knoema.get('xmhdwqf', company='UBER', indicator='Annual', frequency='A', timerange='2018-2020')
self.assertEqual(frame.shape[0], 3)
self.assertEqual(frame.shape[1], 1)
frame = knoema.get('xmhdwqf', company='UBER', indicator='Annual', frequency='M', timerange='2018-2020')
self.assertEqual(frame.shape[0], 0)
self.assertEqual(frame.shape[1], 0)
|
class TestKnoemaClient(unittest.TestCase):
'''This is class with knoema client unit tests with test credentials'''
def setUp(self):
pass
def test_getdata_singleseries_by_member_id(self):
'''The method is testing getting single series by dimension member ids'''
pass
def test_getdata_multiseries_by_member_id(self):
'''The method is testing getting multiple series by dimension member ids'''
pass
def test_getdata_multiseries_by_member_name(self):
'''The method is testing getting data by dimension member names'''
pass
def test_getdata_multiseries_by_member_id_range(self):
'''The method is testing getting multiple series by dimension member ids and time range'''
pass
def test_getdata_singleseries_difffrequencies_by_member_id(self):
'''The method is testing getting single series on different frequencies by dimension member ids'''
pass
def test_getdata_multiseries_singlefrequency_by_member_id(self):
'''The method is testing getting mulitple series with one frequency by dimension member ids'''
pass
def test_getdata_multiseries_multifrequency_by_member_id(self):
'''The method is testing getting mulitple series queriing mulitple frequencies by dimension member ids'''
pass
def test_getdata_multiseries_multifrequency_by_member_id_range(self):
'''The method is testing getting mulitple series queriing mulitple frequencies by dimension member ids with time range'''
pass
def test_none_dataset(self):
'''The method is testing if dataset set up as None'''
pass
def test_wrong_dimension(self):
'''The method is testing if there is wrong dimension name is specified'''
pass
def test_wrong_dimension_with_transform(self):
'''The method is testing if there is wrong dimension name is specified'''
pass
def test_empty_dimension_selection(self):
'''The method is testing if there are no elements in dimension selection'''
pass
def test_empty_dimension_selection_with_transform(self):
'''The method is testing if there are no elements in dimension selection'''
pass
def test_wrong_dimension_selection(self):
'''The method is testing if there are incorrect in dimension selection'''
pass
def test_wrong_dimension_selection_transform(self):
'''The method is testing if there are incorrect in dimension selection'''
pass
def test_get_data_from_flat_dataset_with_datecolumn(self):
'''The method is testing load data from flat dataset with specifying datecolumn'''
pass
def test_incorrect_dataset_id(self):
'''The method is testing if dataset id set up incorrectly'''
pass
def test_getdata_multiseries_by_member_key(self):
'''The method is testing getting multiple series by dimension member keys'''
pass
def test_get_data_from_dataset_with_multiword_dimnames_and_metadata_and_mnemomics(self):
'''The method is testing load data from regular dataset with dimenions that have multi word names include metadata and mnemonics'''
pass
def test_weekly_frequency(self):
'''The method is testing load data from regular dataset by weekly frequency'''
pass
def test_incorrect_host_knoema_get(self):
'''The method is negative test on get series from dataset with incorrect host'''
pass
def test_search_by_mnemonics_data(self):
'''The method is testing searching by mnemonics'''
pass
def test_search_by_mnemonics_with_metadata(self):
'''The method is testing searching by mnemonics with metadata'''
pass
def test_search_by_mnemonics_data_by_all_datasets(self):
'''The method is testing searching by mnemonics by all dataset and returns data'''
pass
def test_search_by_mnemonics_with_metadata_by_all_datasets(self):
'''The method is testing searching by mnemonics by all dataset and returns data and metadata'''
pass
def test_search_by_mnemonics_with_metadata_by_all_datasets_transform(self):
'''The method is testing searching by mnemonics by all dataset and returns data and metadata'''
pass
def test_get_all_series_from_dataset(self):
'''The method is testing getting all series from dataset'''
pass
def test_with_empty_attributes_in_the_dimensions(self):
pass
def test_with_empty_attributes_in_the_dimensions_transform(self):
pass
def test_getdata_from_private_community(self):
'''The method is testing getting data from private community'''
pass
def test_getdata_custom_separator(self):
'''The method is testing getting data with custom separator'''
pass
def test_getdata_default_web_separator(self):
'''The method is testing getting data with custom separator'''
pass
def test_getdata_region_dim_region_id(self):
'''The method is testing getting data from dataset with region dimention by region id'''
pass
def test_getdata_region_as_dim_region_id(self):
'''The method is testing getting data from dataset with region dimention by region id'''
pass
def test_getdata_with_columns(self):
'''The method is testing getting data from dataset with columns'''
pass
def test_getdata_datelabels(self):
'''The method is testing getting data from dataset with region dimention by region id'''
pass
def test_grouping_functionality(self):
'''The method is tesing grouping functionality'''
pass
def test_FQ_frequescy(self):
'''Testing FQ frequency'''
pass
def test_aggregations(self):
'''Testing aggregations disaggregation'''
pass
def test_auto_aggregations_nodata(self):
'''Testing that auto aggregations returns no data'''
pass
def test_auto_disaggregations_nodata(self):
'''Testing that auto disaggregations returns no data'''
pass
| 43 | 40 | 13 | 2 | 9 | 1 | 1 | 0.11 | 1 | 4 | 1 | 0 | 42 | 0 | 42 | 114 | 575 | 144 | 390 | 166 | 347 | 41 | 375 | 157 | 332 | 3 | 2 | 1 | 45 |
143,183 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/view_definitions.py
|
knoema.view_definitions.Field
|
class Field:
def __init__(self, field_info):
self.key = field_info['key']
self.name = field_info['name']
self.displayName = field_info['displayName']
self.type = field_info['type']
self.locale = field_info['locale']
self.baseKey = field_info['baseKey']
self.isSystemField = field_info['isSystemField']
|
class Field:
def __init__(self, field_info):
pass
| 2 | 0 | 8 | 0 | 8 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 7 | 1 | 1 | 10 | 1 | 9 | 9 | 7 | 0 | 9 | 9 | 7 | 1 | 0 | 0 | 1 |
143,184 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/upload_frame.py
|
knoema.upload_frame.FrameTransformerRegular
|
class FrameTransformerRegular(FrameTransformerBase):
def __init__(self, frame, dim_columns, date_columns, value_columns):
super().__init__(frame, dim_columns, date_columns, value_columns)
def _get_axis_types(self):
return [type(axes).__name__ for axes in self._frame.axes]
def _change_columns_types(self, date_columns, value_columns):
for date_column in date_columns:
self._frame[date_column] = pd.to_datetime(self._frame[date_column])
for value_column in value_columns:
self._frame[value_column] = pd.to_numeric(self._frame[value_column])
def _prepare_frame(self):
ready_to_upload_frame = None
axes_types = self._get_axis_types()
self._change_columns_types(self._date_columns, self._value_columns)
if 'DatetimeIndex' not in axes_types:
if len(self._date_columns) == 0:
raise ValueError('The frame has no column with dates.')
if len(self._date_columns) == 1:
self._frame.set_index(self._date_columns[0], drop = True, append = False, inplace = True)
if len(self._value_columns) > 1:
raise ValueError('The frame has few columns with dates.')
if 'MultiIndex' in axes_types:
ready_to_upload_frame = self._frame
else:
if len(self._dim_columns) == 0:
if len(self._value_columns) == 0:
raise ValueError('The frame doesn\'t have column with values.')
ready_to_upload_frame = self._frame
else:
if len(self._value_columns) == 0:
raise ValueError('The frame doesn\'t have column with values.')
if len(self._value_columns) == 1:
ready_to_upload_frame = self._frame.pivot_table(
index = self._date_columns[0],
columns = self._dim_columns if len(self._dim_columns) > 0 else None,
values = self._value_columns[0]
)
else:
raise ValueError('The frame has more than one column with values.')
return ready_to_upload_frame
def _parse_dimension_list(self, full_dim_list):
freq_index = -1
scale_index = -1
unit_index = -1
dimensions = []
if len(full_dim_list) == 1 and full_dim_list[0] == None:
full_dim_list = ['Dimension1']
for i in range(len(full_dim_list)):
dim = full_dim_list[i]
if dim == 'Frequency':
freq_index = i
continue
if dim == 'Scale':
scale_index = i
continue
if dim == 'Unit':
unit_index = i
continue
dimensions.append(full_dim_list[i])
return dimensions, freq_index, scale_index, unit_index
def _dataset_sheet(self, dataset_id, dataset_name, dimensions):
rows = []
rows.append(['Name', 'Value'])
if dataset_id != None:
rows.append(['Dataset', dataset_id])
if dataset_id != None or dataset_name != None:
rows.append(['Dataset name', dataset_name if dataset_name != None else dataset_id])
rows.append(['Dimensions', ';'.join(dimensions)])
rows.append(['Data', 'Data'])
return rows
def _get_unic_id(self, name, map):
if name in map:
return map[name]
parts_raw = name.split(' ')
parts = []
for part in parts_raw:
new_part = ''.join(p for p in part if p.isalnum())
if new_part == '':
continue
parts.append(new_part.upper())
if len(parts) == 1:
return parts[0].upper()[:8]
if len(parts) <= 3:
res = ''
for part in parts:
res += part.upper()[:3]
return res
res = ''
for part in parts:
res += part.upper()[0]
counter = 1
res_with_counter = res
while True:
if res_with_counter in map.values():
res_with_counter = res + '_' + str(counter)
counter += 1
else:
res = res_with_counter
break
return res
def _dimension_sheets(self, dimensions, series_names):
dimensions_rows = {}
dimensions_map = {}
for dim_ind in range(len(dimensions)):
dim = dimensions[dim_ind]
dim_rows = []
dim_map = {}
first_row = ['Name', 'Code']
dim_rows.append(first_row)
for name in series_names:
part = name if len(dimensions) == 1 else name[dim_ind]
id = self._get_unic_id(part, dim_map)
row = [part, id]
if part in dim_map:
continue
dim_map[part] = id
dim_rows.append(row)
dimensions_rows[dim] = dim_rows
dimensions_map[dim] = dim_map
return dimensions_rows, dimensions_map
def _freq_fetch(self, pandas_freq):
parts = pandas_freq.split('-')
freq = parts[0]
# https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases
if freq in ['B', 'C', 'D']: return 'D'
if freq in ['W']: return 'W'
if freq in ['M', 'SM', 'BM', 'CBM', 'MS', 'BMS', 'CBMS']: return 'M'
if freq in ['Q', 'BQ', 'QS', 'BQS']: return 'Q'
if freq in ['A', 'Y', 'BA', 'BY', 'AS', 'YS', 'BAS', 'BYS']: return 'A'
return None
def _data_sheet(self, frame, dimensions, dimensions_map, freq_index, scale_index, unit_index, series_names):
data_rows = []
first_data_row = []
for d_n in dimensions:
first_data_row.append(d_n)
if scale_index >= 0:
first_data_row.append('Scale')
if unit_index >= 0:
first_data_row.append('Unit')
first_data_row.append('Frequency')
first_data_row.extend(['Date', 'Value'])
data_rows.append(first_data_row)
for name in series_names:
series = frame[name]
nan_series = series.isnull()
for ind in range(len(frame.index)):
row = []
if len(dimensions) == 1:
map = dimensions_map[dimensions[0]]
row.append(map[name])
freq = None
if frame.axes[0].freq == None:
freq = self._freq_fetch(pd.infer_freq(frame.axes[0]))
else:
freq = self._freq_fetch(self._frame.axes[0].freq.name)
if freq == None:
raise ValueError('Wrong frequency.')
row.append(freq)
else:
for part_ind in range(len(name)):
if scale_index >= 0 and part_ind == scale_index:
continue
if unit_index >= 0 and part_ind == unit_index:
continue
if freq_index >= 0 and part_ind == freq_index:
continue
part = name[part_ind]
dim_name = frame.columns.names[part_ind]
if dim_name in dimensions_map:
map = dimensions_map[dim_name]
part = map[part]
row.append(part)
if scale_index >= 0:
row.append(name[scale_index])
if unit_index >= 0:
row.append(name[unit_index])
if freq_index >= 0:
row.append(name[freq_index])
if freq_index == -1:
freq = None
if frame.axes[0].freq == None:
freq = self._freq_fetch(pd.infer_freq(frame.axes[0]))
else:
freq = self._freq_fetch(self._frame.axes[0].freq.name)
if freq == None:
raise ValueError('Wrong frequency.')
row.append(freq)
row.append(frame.index[ind])
row.append(series[ind] if not nan_series[ind] else '')
data_rows.append(row)
return data_rows
def prepare(self, file_wrapper, dataset = None, dataset_name = None):
frame = self._prepare_frame()
dimensions, freq_index, scale_index, unit_index = self._parse_dimension_list(frame.columns.names)
dataset_rows = self._dataset_sheet(dataset, dataset_name, dimensions)
series_names = frame.columns.values
dimensions_rows, dimensions_map = self._dimension_sheets(dimensions, series_names)
data_rows = self._data_sheet(frame, dimensions, dimensions_map, freq_index, scale_index, unit_index, series_names)
file_wrapper.add_to_archive('Dataset.csv', dataset_rows)
for dim in dimensions_rows:
file_wrapper.add_to_archive('{}.csv'.format(dim), dimensions_rows[dim])
file_wrapper.add_to_archive('Data.csv', data_rows)
return file_wrapper.get_archive()
|
class FrameTransformerRegular(FrameTransformerBase):
def __init__(self, frame, dim_columns, date_columns, value_columns):
pass
def _get_axis_types(self):
pass
def _change_columns_types(self, date_columns, value_columns):
pass
def _prepare_frame(self):
pass
def _parse_dimension_list(self, full_dim_list):
pass
def _dataset_sheet(self, dataset_id, dataset_name, dimensions):
pass
def _get_unic_id(self, name, map):
pass
def _dimension_sheets(self, dimensions, series_names):
pass
def _freq_fetch(self, pandas_freq):
pass
def _data_sheet(self, frame, dimensions, dimensions_map, freq_index, scale_index, unit_index, series_names):
pass
def prepare(self, file_wrapper, dataset = None, dataset_name = None):
pass
| 12 | 0 | 24 | 6 | 18 | 0 | 6 | 0 | 1 | 4 | 0 | 0 | 11 | 1 | 11 | 33 | 277 | 74 | 202 | 63 | 190 | 1 | 196 | 62 | 184 | 21 | 5 | 5 | 70 |
143,185 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/upload_frame.py
|
knoema.upload_frame.FrameTransformerFlat
|
class FrameTransformerFlat(FrameTransformerBase):
def __init__(self, frame, dim_columns, date_columns, value_columns):
super().__init__(frame, dim_columns, date_columns, value_columns)
def _rows(self, frame):
data_rows = []
first_data_row = []
for col in frame.columns:
first_data_row.append(col)
data_rows.append(first_data_row)
nan_frame = frame.isnull()
for i in range(len(frame.values)):
frame_row = frame.values[i]
nan_frame_row = nan_frame.values[i]
row = []
for j in range(len(frame_row)):
item = frame_row[j]
nan_item = nan_frame_row[j]
row.append('' if nan_item else str(item))
data_rows.append(row)
return data_rows
def prepare(self, file_wrapper, dataset = None, dataset_name = None):
rows = self._rows(self._frame)
file_name = (dataset_name if dataset_name != None else dataset) + '.csv'
return file_wrapper.write_single_file(file_name, rows)
|
class FrameTransformerFlat(FrameTransformerBase):
def __init__(self, frame, dim_columns, date_columns, value_columns):
pass
def _rows(self, frame):
pass
def prepare(self, file_wrapper, dataset = None, dataset_name = None):
pass
| 4 | 0 | 10 | 3 | 8 | 0 | 3 | 0 | 1 | 3 | 0 | 0 | 3 | 0 | 3 | 25 | 35 | 11 | 24 | 17 | 20 | 0 | 24 | 17 | 20 | 5 | 5 | 2 | 8 |
143,186 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/upload_frame.py
|
knoema.upload_frame.FrameTransformerFactory
|
class FrameTransformerFactory(object):
def __init__(self, frame):
self._frame = frame
def _get_column_types(self):
date_columns = []
value_columns = []
dim_columns = []
for col in self._frame.columns:
is_numeric = True
is_date = True
try:
pd.to_numeric(self._frame[col])
value_columns.append(col)
except ValueError:
is_numeric = False
if not is_numeric:
try:
pd.to_datetime(self._frame[col])
date_columns.append(col)
except ValueError:
is_date = False
if not is_numeric and not is_date:
dim_columns.append(col)
return dim_columns, date_columns, value_columns
def get_transformer(self):
dim_columns, date_columns, value_columns = self._get_column_types()
if len(date_columns) > 1 or (len(dim_columns) > 0 and len(value_columns) > 1):
return FrameTransformerFlat(self._frame, dim_columns, date_columns, value_columns)
return FrameTransformerRegular(self._frame, dim_columns, date_columns, value_columns)
|
class FrameTransformerFactory(object):
def __init__(self, frame):
pass
def _get_column_types(self):
pass
def get_transformer(self):
pass
| 4 | 0 | 12 | 2 | 9 | 0 | 3 | 0 | 1 | 3 | 2 | 0 | 3 | 1 | 3 | 3 | 39 | 10 | 29 | 12 | 25 | 0 | 29 | 12 | 25 | 6 | 1 | 3 | 9 |
143,187 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/data_reader.py
|
knoema.data_reader.TransformationDataReader
|
class TransformationDataReader(SelectionDataReader):
def __init__(self, client, dim_values, transform, frequency):
if dim_values == None:
dim_values = {}
if transform:
dim_values['transform'] = transform
if frequency:
dim_values['frequency'] = frequency
super().__init__(client, dim_values)
def get_pandasframe(self):
data_resp = self.client.get_dataset_data(self.dataset.id, self._get_data_filters())
if isinstance(data_resp, definition.DetailsResponse):
response_reader = DetailsResponseReader(self, data_resp)
return response_reader.get_pandasframe()
if isinstance(data_resp, definition.RawDataResponse):
token = data_resp.continuation_token
while token is not None:
res2 = self.client.get_data_raw_with_token(token)
data_resp.series += res2.series
token = res2.continuation_token
response_reader = StreamingResponseReader(self, data_resp)
return response_reader.get_pandasframe()
response_reader = PivotResponseReader(self, data_resp)
return response_reader.get_pandasframe()
def _get_data_filters(self):
filter_dims = {}
passed_params = ['timerange', 'transform', 'timesince', 'timelast', 'timemembers']
for name, value in self.dim_values.items():
if name.lower() in passed_params:
filter_dims[name] = value
continue
if definition.is_equal_strings_ignore_case(name, 'datecolumn') and self.dataset.type != 'Regular':
filter_dims['datecolumn'] = value
continue
splited_values = [x for x in value.split(self.separator) if x] if isinstance(value, str) else value
if definition.is_equal_strings_ignore_case(name, 'frequency'):
filter_dims["frequency"] = self.separator.join(splited_values)
continue
dim = self._find_dimension(name)
if dim is None:
raise ValueError('Dimension with id or name {} is not found'.
format(name))
if not splited_values:
raise ValueError('Selection for dimension {} is empty'.format(dim.name))
filter_dims[dim.id] = self.separator.join(s for s in splited_values)
if self.include_metadata:
filter_dims['metadata'] = 'true'
if self.separator != ',':
filter_dims['separator'] = self.separator
return definition.DataAPIRequest(filter_dims)
|
class TransformationDataReader(SelectionDataReader):
def __init__(self, client, dim_values, transform, frequency):
pass
def get_pandasframe(self):
pass
def _get_data_filters(self):
pass
| 4 | 0 | 20 | 4 | 16 | 0 | 6 | 0 | 1 | 9 | 6 | 0 | 3 | 0 | 3 | 15 | 64 | 14 | 50 | 13 | 46 | 0 | 49 | 13 | 45 | 10 | 3 | 2 | 18 |
143,188 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/data_reader.py
|
knoema.data_reader.TimeFormat
|
class TimeFormat(object):
@staticmethod
def format_statistical(date_point, freq):
return {
'FQ': lambda d: '{}FQ{}'.format(d.year, (d.month - 1) // 3 + 1),
'W': lambda d: TimeFormat.format_weekly(d)
}.get(freq, date_point)(date_point)
@staticmethod
def format_weekly(date):
iso_values = date.isocalendar()
iso_year = iso_values[0]
week_number = iso_values[1]
return '{}W{}'.format(iso_year, week_number)
@staticmethod
def get_frequencies_delta():
return {
'A': relativedelta(years = 1),
'H': relativedelta(months = 6),
'Q': relativedelta(months = 3),
'FQ': relativedelta(months = 3),
'M': relativedelta(months = 1),
'W': timedelta(days = 7),
'D': timedelta(days = 1)}
|
class TimeFormat(object):
@staticmethod
def format_statistical(date_point, freq):
pass
@staticmethod
def format_weekly(date):
pass
@staticmethod
def get_frequencies_delta():
pass
| 7 | 0 | 6 | 0 | 6 | 0 | 1 | 0 | 1 | 2 | 0 | 0 | 0 | 0 | 3 | 3 | 25 | 2 | 23 | 10 | 16 | 0 | 10 | 7 | 6 | 1 | 1 | 0 | 3 |
143,189 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/data_reader.py
|
knoema.data_reader.StreamingResponseReader
|
class StreamingResponseReader(ResponseReader):
def __init__(self, reader, data_streaming):
self.data_streaming = data_streaming
super().__init__(reader)
def get_pandasframe(self):
detail_columns = self._get_detail_columns(self.data_streaming)
# create dataframe with data
series = self._get_data_series(self.data_streaming, detail_columns)
pandas_series = {}
names_of_dimensions = self.reader._get_dimension_names()
if self.include_metadata:
pandas_series_with_attr = {}
names_of_attributes = self._get_attribute_names()
pandas_series = PandasHelper.creates_pandas_series(series, pandas_series, detail_columns)
pandas_data_frame = PandasHelper.create_pandas_dataframe(pandas_series, names_of_dimensions, detail_columns)
if not self.include_metadata:
return pandas_data_frame
# create dataframe with metadata
series_with_attr = self._get_metadata_series(self.data_streaming, names_of_attributes)
pandas_series_with_attr = PandasHelper.creates_pandas_series(series_with_attr, pandas_series_with_attr, None)
pandas_data_frame_with_attr = PandasHelper.create_pandas_dataframe(pandas_series_with_attr, names_of_dimensions, None)
return pandas_data_frame, pandas_data_frame_with_attr
def _get_data_series(self, resp, detail_columns):
series_map = {}
dict_with_delta = TimeFormat.get_frequencies_delta()
frequency_list = []
use_stat_format_for_date_label = self._get_use_stat_format_for_date_label(resp.series)
dimensions_members_with_id = self._get_dimensions_members_with_id(resp.series)
detail_values = None
for series_point in resp.series:
all_values = series_point['values']
series_name = self._get_series_name(series_point, dimensions_members_with_id)
if detail_columns is not None:
detail_values = []
for column_name in detail_columns:
detail_values.append(series_point[column_name])
date_format = '%Y-%m-%dT%H:%M:%S' + ('Z' if series_point['startDate'].endswith('Z') else '')
data_begin_val = datetime.strptime(series_point['startDate'], date_format)
freq = series_point['frequency']
if freq not in frequency_list:
frequency_list.append(freq)
if (freq == "W"):
data_begin_val = data_begin_val - timedelta(days = data_begin_val.weekday())
delta = dict_with_delta[freq]
series = KnoemaSeries(series_name, [], [], detail_columns)
date_labels = series_point['dateLabels'] if 'dateLabels' in series_point else None
curr_date_val = data_begin_val
for vi in range(0, len(all_values)):
val = all_values[vi]
if val is not None:
date = curr_date_val if freq != 'FQ' else TimeFormat.format_statistical(curr_date_val, 'FQ')
if freq in use_stat_format_for_date_label or date_labels is not None and date_labels[vi] is not None:
date = TimeFormat.format_statistical(curr_date_val, freq) \
if freq in use_stat_format_for_date_label else datetime.strptime(date_labels[vi], date_format)
series.index.append(date)
series.values.append(val)
for ai in range(0, series.column_count):
series.column_values[ai].append(detail_values[ai][vi])
curr_date_val += delta
series_map[series_name] = series
if 'FQ' in frequency_list and len(frequency_list) > 1:
raise ValueError('Please provide a valid frequency list. You can request FQ or others frequencies not together.')
return series_map
def _get_use_stat_format_for_date_label(self, series):
use_stat_format_for_date_label = {}
date_labels_by_freq = {}
has_date_labels_by_freq = {}
dict_with_delta = TimeFormat.get_frequencies_delta()
for series_point in series:
freq = series_point['frequency']
if freq in use_stat_format_for_date_label:
continue
has_date_labels = 'dateLabels' in series_point
if freq in has_date_labels_by_freq and has_date_labels_by_freq[freq] != has_date_labels:
use_stat_format_for_date_label[freq] = True
continue
has_date_labels_by_freq[freq] = has_date_labels
if not has_date_labels:
continue
if freq not in date_labels_by_freq:
date_labels_by_freq[freq] = {}
date_labels = series_point['dateLabels']
all_values = series_point['values']
date_format = '%Y-%m-%dT%H:%M:%S' + ('Z' if series_point['startDate'].endswith('Z') else '')
data_begin_val = datetime.strptime(series_point['startDate'], date_format)
if (freq == "W"):
data_begin_val = data_begin_val - timedelta(days = data_begin_val.weekday())
delta = dict_with_delta[freq]
curr_date_val = data_begin_val
for vi in range(0, len(all_values)):
if curr_date_val in date_labels_by_freq[freq]:
if date_labels[vi] is not None and date_labels_by_freq[freq][curr_date_val] != date_labels[vi]:
use_stat_format_for_date_label[freq] = True
break
else:
date_labels_by_freq[freq][curr_date_val] = date_labels[vi]
curr_date_val += delta
return use_stat_format_for_date_label
def _get_dimensions_members_with_id(self, series):
dimensions_members_with_id = {}
for dim in self.dataset.dimensions:
names = set()
names_with_id = set()
ids = set()
for series_point in series:
name = series_point[dim.id]['name'] if 'name' in series_point[dim.id] else series_point[dim.id]
id = series_point[dim.id]['id'] if 'id' in series_point[dim.id] else str(series_point[dim.id])
if (name in names and id not in ids):
names_with_id.add(name)
else:
names.add(name)
ids.add(id)
if (len(names_with_id) > 0):
dimensions_members_with_id[dim.id] = names_with_id
return dimensions_members_with_id
def _get_series_name(self, series_point, dimensions_members_with_id):
names = []
for dim in self.dataset.dimensions:
name = series_point[dim.id]['name'] if 'name' in series_point[dim.id] else series_point[dim.id]
members_with_id = dimensions_members_with_id[dim.id] if dim.id in dimensions_members_with_id else []
if (name in members_with_id):
id = series_point[dim.id]['id'] if 'id' in series_point[dim.id] else series_point[dim.id]
name = name + '(' + id + ')'
names.append(name)
if 'frequency' in series_point:
names.append(series_point['frequency'])
return tuple(names)
def _get_attribute_names(self):
names = []
for dim in self.dataset.dimensions:
for attr in self.data_streaming.dimensionFields[dim.id]:
if not attr['isSystemField']:
names.append(dim.name +' '+ attr['displayName'])
names.append('Unit')
names.append('Scale')
names.append('Mnemonics')
for attr in self.dataset.timeseries_attributes:
names.append(attr.name)
return names
def _get_metadata_series(self, resp, names_of_attributes):
series = {}
dimensions_members_with_id = self._get_dimensions_members_with_id(resp.series)
for series_point in resp.series:
serie_name = self._get_series_name(series_point, dimensions_members_with_id)
if serie_name not in series:
serie_attrs = self._get_series_with_metadata(series_point, resp.dimensionFields)
series[serie_name] = KnoemaSeries(serie_name, serie_attrs, names_of_attributes, None)
return series
def _get_series_with_metadata(self, series_point, dimensionFields):
names = []
for dim in self.dataset.dimensions:
dimFields = dimensionFields[dim.id]
dimValues = series_point[dim.id]
for attr in dimFields:
if not attr['isSystemField']:
if attr['name'] in dimValues:
names.append(dimValues[attr['name']])
else:
names.append(None)
names.append(series_point.get('unit'))
names.append(series_point.get('scale'))
names.append(series_point.get('mnemonics'))
for attr in self.dataset.timeseries_attributes:
names.append(series_point['timeseriesAttributes'][attr.name])
return tuple(names)
|
class StreamingResponseReader(ResponseReader):
def __init__(self, reader, data_streaming):
pass
def get_pandasframe(self):
pass
def _get_data_series(self, resp, detail_columns):
pass
def _get_use_stat_format_for_date_label(self, series):
pass
def _get_dimensions_members_with_id(self, series):
pass
def _get_series_name(self, series_point, dimensions_members_with_id):
pass
def _get_attribute_names(self):
pass
def _get_metadata_series(self, resp, names_of_attributes):
pass
def _get_series_with_metadata(self, series_point, dimensionFields):
pass
| 10 | 0 | 21 | 2 | 18 | 0 | 6 | 0.01 | 1 | 11 | 3 | 0 | 9 | 1 | 9 | 11 | 195 | 30 | 163 | 79 | 153 | 2 | 159 | 79 | 149 | 15 | 2 | 4 | 58 |
143,190 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/upload_frame.py
|
knoema.upload_frame.FileLayerWrapper
|
class FileLayerWrapper(object):
def __init__(self):
self._tmp_dir = None
self._tmp_file = None
def write_single_file(self, name, rows):
parent_folder = tempfile.gettempdir()
self._write_file(parent_folder, name, rows)
self._tmp_file = os.path.join(parent_folder, name)
return self._tmp_file
def add_to_archive(self, name, rows):
if self._tmp_dir == None:
self._create_tmp_dir()
self._write_file(self._tmp_dir, name, rows)
def get_archive(self):
shutil.make_archive(self._tmp_dir, 'zip', self._tmp_dir)
self._tmp_file = self._tmp_dir + '.zip'
return self._tmp_file
def _create_tmp_dir(self):
parent_folder = tempfile.gettempdir()
tmp_dir = os.path.join(parent_folder, self._get_tmp_dir_name())
os.makedirs(tmp_dir, exist_ok=True)
self._tmp_dir = tmp_dir
def _get_tmp_dir_name(self, len = 8):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(len))
def _write_file(self, path, name, rows):
with open(os.path.join(path, name), 'w', newline = '') as file:
writer = csv.writer(file)
writer.writerows(rows)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if self._tmp_dir != None and os.path.exists(self._tmp_dir):
shutil.rmtree(self._tmp_dir)
if self._tmp_file != None and os.path.isfile(self._tmp_file):
os.remove(self._tmp_file)
|
class FileLayerWrapper(object):
def __init__(self):
pass
def write_single_file(self, name, rows):
pass
def add_to_archive(self, name, rows):
pass
def get_archive(self):
pass
def _create_tmp_dir(self):
pass
def _get_tmp_dir_name(self, len = 8):
pass
def _write_file(self, path, name, rows):
pass
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
pass
| 10 | 0 | 5 | 1 | 4 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 9 | 2 | 9 | 9 | 51 | 15 | 36 | 19 | 26 | 0 | 36 | 17 | 26 | 3 | 1 | 1 | 12 |
143,191 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/data_reader.py
|
knoema.data_reader.SelectionDataReader
|
class SelectionDataReader(DataReader):
def __init__(self, client, dim_values, transform = None):
super().__init__(client)
self.dim_values = dim_values
self.transform = transform
def _get_dim_members(self, dim, splited_values):
members = []
for value in splited_values:
if value is None or isinstance(value, str) and not value:
raise ValueError('Selection for dimension {} is empty'.format(dim.name))
member = dim.find_member_by_id(value)
if member is None:
member = dim.find_member_by_name(value)
if member is None:
member = dim.find_member_by_regionid(value)
if member is None:
member = dim.find_member_by_ticker(value)
if member is None and value.isnumeric():
member = dim.find_member_by_key(int(value))
if member:
members.append(member.key)
else:
raise ValueError('Selection for dimension {} contains invalid elements'.format(dim.name))
return members
def _find_dimension(self, dim_name_or_id):
dim = self.dataset.find_dimension_by_name(dim_name_or_id)
if dim is None:
dim = self.dataset.find_dimension_by_id(dim_name_or_id)
return dim
def _add_full_selection_by_empty_dim_values(self, filter_dims, pivot_req):
out_of_filter_dim_id = [dim.id for dim in self.dataset.dimensions if dim not in filter_dims]
for id in out_of_filter_dim_id:
pivot_req.stub.append(definition.PivotItem(id, []))
def _create_pivot_request(self):
pivot_req = definition.PivotRequest(self.dataset.id)
filter_dims = []
time_range = None
for name, value in self.dim_values.items():
if definition.is_equal_strings_ignore_case(name, 'timerange'):
time_range = value
continue
splited_values = value.split(self.separator) if isinstance(value, str) else value
if definition.is_equal_strings_ignore_case(name, 'frequency'):
pivot_req.frequencies = splited_values
continue
dim = self._find_dimension(name)
if dim is None:
raise ValueError('Dimension with id or name {} is not found'.
format(name))
filter_dims.append(dim)
for dimension in self.dimensions:
if dimension.id == dim.id:
dim = dimension
break
aggregations = None
if len(splited_values) > 0 and splited_values[0].startswith('@'):
aggregations = splited_values[0][1:]
splited_values = splited_values[1:]
members = self._get_dim_members(dim, splited_values)
if not members and aggregations == None:
raise ValueError('Selection for dimension {} is empty'.format(dim.name))
pivot_req.stub.append(definition.PivotItem(dim.id, members, aggregations = aggregations))
self._add_full_selection_by_empty_dim_values(filter_dims, pivot_req)
if time_range:
pivot_req.header.append(definition.PivotTimeItem('Time', [time_range], 'range'))
else:
pivot_req.header.append(definition.PivotTimeItem('Time', [], 'AllData'))
if self.transform is not None:
pivot_req.transform = self.transform
if self.columns is not None:
pivot_req.columns = self.columns
return pivot_req
|
class SelectionDataReader(DataReader):
def __init__(self, client, dim_values, transform = None):
pass
def _get_dim_members(self, dim, splited_values):
pass
def _find_dimension(self, dim_name_or_id):
pass
def _add_full_selection_by_empty_dim_values(self, filter_dims, pivot_req):
pass
def _create_pivot_request(self):
pass
| 6 | 0 | 19 | 4 | 14 | 0 | 5 | 0 | 1 | 7 | 3 | 2 | 5 | 2 | 5 | 12 | 99 | 27 | 72 | 23 | 66 | 0 | 69 | 23 | 63 | 13 | 2 | 3 | 26 |
143,192 |
Knoema/knoema-python-driver
|
Knoema_knoema-python-driver/knoema/data_reader.py
|
knoema.data_reader.StreamingDataReader
|
class StreamingDataReader(SelectionDataReader):
def __init__(self, client, dim_values, transform = None, group_by = None):
self.group_by = group_by
super().__init__(client, dim_values, transform)
def get_series_metadata(self):
self._load_dimensions()
pivot_req = self._create_pivot_request()
data_streaming = self.client.get_data_raw(pivot_req, True)
return data_streaming.series
def get_pandasframe(self):
self._load_dimensions()
pivot_req = self._create_pivot_request()
data_streaming = self.client.get_data_raw(pivot_req)
response_reader = StreamingResponseReader(self, data_streaming)
return response_reader.get_pandasframe()
def _get_series_with_attr(self, series, series_with_attr):
res = {}
for series_name, _ in series.items():
if series_name in series_with_attr:
res[series_name] = series_with_attr[series_name]
return res
def _get_frequency_for_normalization(self, required_frequency, available_frequency):
sorted_frequency = ['A', 'H', 'Q', 'M', 'W', 'D']
sorted_available_frequency = []
for f in sorted_frequency:
if f in available_frequency:
sorted_available_frequency.append(f)
norm_index = sorted_frequency.index(sorted_available_frequency[-1])
for f in sorted_available_frequency:
if sorted_frequency.index(f) > sorted_frequency.index(required_frequency):
norm_index = sorted_frequency.index(f)
break
return sorted_frequency[norm_index]
def _get_all_series_for_group(self, group_name, group_name_index, frequency, series, metadata):
series_by_group = {}
for series_name, series_item in series.items():
if series_name[group_name_index] == group_name:
series_by_group[series_name] = series_item
available_frequency = {}
count = 0
for md in metadata:
if md[self.group_by]['name'] == group_name:
freq = md['frequency']
if freq not in available_frequency:
available_frequency[freq] = 0
available_frequency[freq] += 1
count += 1
if frequency == None:
if len(series_by_group.keys()) == count:
return series_by_group
else:
return None
if len(available_frequency.keys()) == 0:
return None
norm_frequency = frequency
if frequency not in available_frequency.keys():
norm_frequency = self._get_frequency_for_normalization(frequency, available_frequency)
if len(series_by_group.keys()) == available_frequency[norm_frequency]:
return series_by_group
return None
def get_pandasframe_by_metadata_grouped(self, metadata, frequency, timerange):
self._load_dimensions()
names_of_dimensions = self._get_dimension_names()
series = {}
series_with_attr = {}
offset = 0
detail_columns = None
was = False
while True:
metadata_part = self._get_part_of_metadata(metadata, offset)
if metadata_part == None:
break
offset += len(metadata_part)
dim_values = {}
for item in metadata_part:
for dim in self.dataset.dimensions:
dim_id = dim.id
if dim_id not in item:
raise ValueError('There is no value for dim: {}'.format(dim_id))
member_key = str(item[dim_id]['key'])
if dim_id in dim_values:
if member_key not in dim_values[dim_id]:
dim_values[dim_id].append(member_key)
else:
dim_values[dim_id] = [member_key]
if frequency != None:
dim_values['frequency'] = frequency
if timerange != None:
dim_values['timerange'] = timerange
self.dim_values = dim_values
pivot_req = self._create_pivot_request()
pivot_resp = self.client.get_data_raw(pivot_req)
response_reader = StreamingResponseReader(self, pivot_resp)
if was:
if detail_columns is not None:
part_detail_columns = response_reader._get_detail_columns(pivot_resp)
if detail_columns != part_detail_columns:
detail_columns = None
else:
was = True
detail_columns = response_reader._get_detail_columns(pivot_resp)
part_series = response_reader._get_data_series(pivot_resp, detail_columns)
series.update(part_series)
if self.include_metadata:
names_of_attributes = self._get_attribute_names()
part_series_with_attr = response_reader._get_metadata_series(pivot_resp, names_of_attributes)
series_with_attr.update(part_series_with_attr)
group_name_index = -1
for i in range(len(self.dataset.dimensions)):
if self.group_by == self.dataset.dimensions[i].id or self.group_by == self.dataset.dimensions[i].name:
self.group_by = self.dataset.dimensions[i].id
group_name_index = i
break
groups_to_delete = []
groups_checked = []
for series_name, series_item in series.items():
group_name = series_name[group_name_index]
if group_name in groups_to_delete:
continue
if group_name in groups_checked:
continue
all_series_by_group = self._get_all_series_for_group(group_name, group_name_index, frequency, series, metadata)
if all_series_by_group != None:
groups_to_delete.append(group_name)
all_panda_series_by_group = PandasHelper.creates_pandas_series(all_series_by_group, {}, detail_columns)
data_frame = definition.DataFrame()
data_frame.id = group_name
data_frame.data = PandasHelper.create_pandas_dataframe(all_panda_series_by_group, names_of_dimensions, detail_columns)
if self.include_metadata:
all_series_with_attr_by_group = self._get_series_with_attr(all_series_by_group, series_with_attr)
all_pandes_series_with_attr_by_group = PandasHelper.creates_pandas_series(all_series_with_attr_by_group, {}, None)
data_frame.metadata = PandasHelper.create_pandas_dataframe(all_pandes_series_with_attr_by_group, names_of_dimensions, None)
yield data_frame
else:
groups_checked.append(group_name)
left_series = {}
for series_name, series_item in series.items():
group_name = series_name[group_name_index]
if group_name not in groups_to_delete:
left_series[series_name] = series_item
series = left_series
left_series_with_attr = {}
for series_name, series_item in series_with_attr.items():
group_name = series_name[group_name_index]
if group_name not in groups_to_delete:
left_series_with_attr[series_name] = series_item
series_with_attr = left_series_with_attr
def _get_part_of_metadata(self, metadata, offset):
limit = 50000
member_count_limit = 200
if offset >= len(metadata):
return None
curr_points = 0
curr_member_count = 0
time_points = 0
dim_values = {}
index = offset
for i in range(offset, len(metadata)):
curr_ts = metadata[i]
curr_time_points = self._get_time_point_amount(curr_ts['startDate'], curr_ts['endDate'], curr_ts['frequency'])
if curr_time_points > time_points:
time_points = curr_time_points
curr_points = time_points
curr_member_count = 0
for dim in self.dataset.dimensions:
dim_id = dim.id
if dim_id not in curr_ts:
raise ValueError('There is no value for dim: {}'.format(dim_id))
member_key = str(curr_ts[dim_id]['key'])
if dim_id in dim_values:
if member_key not in dim_values[dim_id]:
dim_values[dim_id].append(member_key)
else:
dim_values[dim_id] = [member_key]
curr_points = curr_points * len(dim_values[dim_id])
curr_member_count = curr_member_count + len(dim_values[dim_id])
if curr_points >= limit or curr_member_count >= member_count_limit:
break
index = i
return metadata[offset:min(index + 1, len(metadata))]
def _get_time_point_amount(self, start_date, end_date, frequency):
count = 0
dict_with_delta = TimeFormat.get_frequencies_delta()
date_format = '%Y-%m-%dT%H:%M:%S' + ('Z' if start_date.endswith('Z') else '')
data_begin_val = datetime.strptime(start_date, date_format)
date_format = '%Y-%m-%dT%H:%M:%S' + ('Z' if end_date.endswith('Z') else '')
data_end_val = datetime.strptime(end_date, date_format)
if (frequency == "W"):
data_begin_val = data_begin_val - timedelta(days = data_begin_val.weekday())
data_end_val = data_end_val - timedelta(days = data_end_val.weekday())
delta = dict_with_delta[frequency]
curr_date_val = data_begin_val
while curr_date_val<=data_end_val:
curr_date_val += delta
count += 1
# we have to increase amount of points for FQ freq because of export from OASIS issue
if frequency == 'FQ':
count += 10
return count
|
class StreamingDataReader(SelectionDataReader):
def __init__(self, client, dim_values, transform = None, group_by = None):
pass
def get_series_metadata(self):
pass
def get_pandasframe(self):
pass
def _get_series_with_attr(self, series, series_with_attr):
pass
def _get_frequency_for_normalization(self, required_frequency, available_frequency):
pass
def _get_all_series_for_group(self, group_name, group_name_index, frequency, series, metadata):
pass
def get_pandasframe_by_metadata_grouped(self, metadata, frequency, timerange):
pass
def _get_part_of_metadata(self, metadata, offset):
pass
def _get_time_point_amount(self, start_date, end_date, frequency):
pass
| 10 | 0 | 27 | 5 | 22 | 0 | 7 | 0.01 | 1 | 10 | 4 | 0 | 9 | 2 | 9 | 21 | 253 | 54 | 198 | 82 | 188 | 1 | 193 | 82 | 183 | 25 | 3 | 5 | 62 |
143,193 |
KnorrFG/pyparadigm
|
KnorrFG_pyparadigm/pyparadigm/surface_composition.py
|
pyparadigm.surface_composition.FreeFloatLayout
|
class FreeFloatLayout:
"""A "Layout" that allows for free positioning of its elements. All children
must be Wrapped in an FRect, which takes a rects arguments (x, y, w, h), and
determines the childs rect. All values can either be floats, and must then
be between 0 and 1 and are relative to the rect-size of the layout, positive
integers, in which case the values are interpreded as pixel offsets from the
layout rect origin, or negative integers, in which case the absolute value
is the available width or height minus the value"""
def __init__(self) -> None:
self.children = None
def __call__(self, *children):
if len(children) == 0:
raise PPError("You tried to add no children to layout")
_check_call_op(self.children)
for child in children:
if type(child) != FRect:
raise PPError("All children of a FreeFloatLayout must be wrapped in an FRect")
self.children = children
return self
def _draw(self, surface, target_rect):
for child in self.children:
rect = child.to_abs_rect(target_rect)
if child.child is None:
raise ValueError("There is an FRect without child")
child.child._draw(surface, rect)
|
class FreeFloatLayout:
'''A "Layout" that allows for free positioning of its elements. All children
must be Wrapped in an FRect, which takes a rects arguments (x, y, w, h), and
determines the childs rect. All values can either be floats, and must then
be between 0 and 1 and are relative to the rect-size of the layout, positive
integers, in which case the values are interpreded as pixel offsets from the
layout rect origin, or negative integers, in which case the absolute value
is the available width or height minus the value'''
def __init__(self) -> None:
pass
def __call__(self, *children):
pass
def _draw(self, surface, target_rect):
pass
| 4 | 1 | 6 | 0 | 6 | 0 | 3 | 0.39 | 0 | 4 | 2 | 0 | 3 | 1 | 3 | 3 | 28 | 3 | 18 | 8 | 14 | 7 | 18 | 8 | 14 | 4 | 0 | 2 | 8 |
143,194 |
KnorrFG/pyparadigm
|
KnorrFG_pyparadigm/pyparadigm/surface_composition.py
|
pyparadigm.surface_composition.Padding
|
class Padding:
"""Pads a child element
Each argument refers to a percentage of the axis it belongs to.
A padding of (0.25, 0.25, 0.25, 0.25) would generate blocked area a quater of the
available height in size above and below the child, and a quarter of the
available width left and right of the child.
If left and right or top and bottom sum up to one that would mean no space
for the child is remaining
"""
def _draw(self, surface, target_rect):
assert self.child is not None
child_rect = pygame.Rect(
target_rect.left + target_rect.w * self.left,
target_rect.top + target_rect.h * self.top,
target_rect.w * (1 - self.left - self.right),
target_rect.h * (1 - self.top - self.bottom)
)
self.child._draw(surface, child_rect)
def __init__(self, left, right, top, bottom):
assert all(0 <= side < 1 for side in [left, right, top, bottom])
assert left + right < 1
assert top + bottom < 1
self.left = left
self.right = right
self.top = top
self.bottom = bottom
self.child = None
def __call__(self, child):
_check_call_op(self.child)
self.child = _wrap_surface(child)
return self
@staticmethod
def from_scale(scale_w, scale_h=None):
"""Creates a padding by the remaining space after scaling the content.
E.g. Padding.from_scale(0.5) would produce Padding(0.25, 0.25, 0.25, 0.25) and
Padding.from_scale(0.5, 1) would produce Padding(0.25, 0.25, 0, 0)
because the content would not be scaled (since scale_h=1) and therefore
there would be no vertical padding.
If scale_h is not specified scale_h=scale_w is used as default
:param scale_w: horizontal scaling factors
:type scale_w: float
:param scale_h: vertical scaling factor
:type scale_h: float
"""
if not scale_h: scale_h = scale_w
w_padding = [(1 - scale_w) * 0.5] * 2
h_padding = [(1 - scale_h) * 0.5] * 2
return Padding(*w_padding, *h_padding)
|
class Padding:
'''Pads a child element
Each argument refers to a percentage of the axis it belongs to.
A padding of (0.25, 0.25, 0.25, 0.25) would generate blocked area a quater of the
available height in size above and below the child, and a quarter of the
available width left and right of the child.
If left and right or top and bottom sum up to one that would mean no space
for the child is remaining
'''
def _draw(self, surface, target_rect):
pass
def __init__(self, left, right, top, bottom):
pass
def __call__(self, child):
pass
@staticmethod
def from_scale(scale_w, scale_h=None):
'''Creates a padding by the remaining space after scaling the content.
E.g. Padding.from_scale(0.5) would produce Padding(0.25, 0.25, 0.25, 0.25) and
Padding.from_scale(0.5, 1) would produce Padding(0.25, 0.25, 0, 0)
because the content would not be scaled (since scale_h=1) and therefore
there would be no vertical padding.
If scale_h is not specified scale_h=scale_w is used as default
:param scale_w: horizontal scaling factors
:type scale_w: float
:param scale_h: vertical scaling factor
:type scale_h: float
'''
pass
| 6 | 2 | 11 | 1 | 7 | 3 | 1 | 0.66 | 0 | 0 | 0 | 0 | 3 | 5 | 4 | 4 | 57 | 9 | 29 | 14 | 23 | 19 | 24 | 13 | 19 | 2 | 0 | 1 | 5 |
143,195 |
KnorrFG/pyparadigm
|
KnorrFG_pyparadigm/pyparadigm/surface_composition.py
|
pyparadigm.surface_composition.Overlay
|
class Overlay:
"""Draws all its children on top of each other in the same rect"""
def __init__(self, *children):
self.children = _wrap_children(children)
def _draw(self, surface, target_rect):
for child in self.children:
child._draw(surface, target_rect)
|
class Overlay:
'''Draws all its children on top of each other in the same rect'''
def __init__(self, *children):
pass
def _draw(self, surface, target_rect):
pass
| 3 | 1 | 3 | 0 | 3 | 0 | 2 | 0.17 | 0 | 0 | 0 | 0 | 2 | 1 | 2 | 2 | 8 | 1 | 6 | 5 | 3 | 1 | 6 | 5 | 3 | 2 | 0 | 1 | 3 |
143,196 |
KnorrFG/pyparadigm
|
KnorrFG_pyparadigm/pyparadigm/surface_composition.py
|
pyparadigm.surface_composition.Margin
|
class Margin:
"""Defines the relative position of an item within a Surface.
For details see Surface.
"""
__slots__ = ["left", "right", "top", "bottom"]
def __init__(self, left=1, right=1, top=1, bottom=1):
self.left=left
self.right=right
self.top=top
self.bottom=bottom
|
class Margin:
'''Defines the relative position of an item within a Surface.
For details see Surface.
'''
def __init__(self, left=1, right=1, top=1, bottom=1):
pass
| 2 | 1 | 5 | 0 | 5 | 0 | 1 | 0.43 | 0 | 0 | 0 | 0 | 1 | 4 | 1 | 1 | 10 | 0 | 7 | 7 | 5 | 3 | 7 | 7 | 5 | 1 | 0 | 0 | 1 |
143,197 |
KnorrFG/pyparadigm
|
KnorrFG_pyparadigm/pyparadigm/surface_composition.py
|
pyparadigm.surface_composition.Line
|
class Line:
"""Draws a line.
:param width: width of the line in pixels
:type widht: int
:param orientation: "v" or "h". Indicates whether the line should be
horizontal or vertical.
:type orientation: str
"""
def __init__(self, orientation, width=3, color=0):
assert orientation in ["h", "v"]
assert width > 0
self.orientation = orientation
self.width = width
self.color = color
def _draw(self, surface, target_rect):
if self.orientation == "h":
pygame.draw.line(surface, self.color, (
target_rect.left,
_round_to_int(target_rect.top + target_rect.h * 0.5)), (
target_rect.left + target_rect.w - 1,
_round_to_int(target_rect.top + target_rect.h * 0.5)),
self.width)
else:
pygame.draw.line(surface, self.color, (
_round_to_int(target_rect.left + target_rect.width * 0.5),
target_rect.top), (
_round_to_int(target_rect.left + target_rect.width * 0.5),
target_rect.top + target_rect.h - 1),
self.width)
|
class Line:
'''Draws a line.
:param width: width of the line in pixels
:type widht: int
:param orientation: "v" or "h". Indicates whether the line should be
horizontal or vertical.
:type orientation: str
'''
def __init__(self, orientation, width=3, color=0):
pass
def _draw(self, surface, target_rect):
pass
| 3 | 1 | 11 | 0 | 11 | 0 | 2 | 0.32 | 0 | 0 | 0 | 0 | 2 | 3 | 2 | 2 | 31 | 2 | 22 | 6 | 19 | 7 | 11 | 6 | 8 | 2 | 0 | 1 | 3 |
143,198 |
KnorrFG/pyparadigm
|
KnorrFG_pyparadigm/pyparadigm/surface_composition.py
|
pyparadigm.surface_composition.LinLayout
|
class LinLayout:
"""A linear layout to order items horizontally or vertically.
Every element in the layout is automatically wrapped within a LLItem with
relative_size=1, i.e. all elements get assigned an equal amount of space, to
change that elements can be wrappend in LLItems manually to get desired
proportions
:param orientation: orientation of the layout, either 'v' for vertica, or
'h' for horizontal.
:type orientation: str
"""
def __init__(self, orientation):
assert orientation in ["v", "h"]
self.orientation = orientation
self.children = None
def __call__(self, *children):
if len(children) == 0:
raise PPError("You tried to add no children to layout")
_check_call_op(self.children)
self.children = _lmap(lambda child:
child if type(child) == LLItem else LLItem(1)(child),
_wrap_children(children))
return self
def _draw(self, surface, target_rect):
child_rects = self._compute_child_rects(target_rect)
for child, rect in zip(self.children, child_rects):
child.child._draw(surface, rect)
def _compute_child_rects(self, target_rect):
def flip_if_not_horizontal(t):
return t if self.orientation == "h" else (t[1], t[0])
target_rect_size = target_rect.size
sum_child_weights = sum(child.relative_size for child in self.children)
if sum_child_weights == 0:
raise PPError("LinLayout Children all have weight 0: " + repr(self.children))
divider, full = flip_if_not_horizontal(target_rect_size)
dyn_size_per_unit = divider / sum_child_weights
strides = [child.relative_size * dyn_size_per_unit for child in self.children]
dyn_offsets = [0] + list(accumulate(strides))[:-1]
left_offsets, top_offsets = flip_if_not_horizontal((dyn_offsets,
[0] * len(self.children)))
widths, heights = flip_if_not_horizontal((strides, [full] * len(self.children)))
return [pygame.Rect(target_rect.left + left_offset,
target_rect.top + top_offset,
w, h)
for left_offset, top_offset, w, h in
zip(left_offsets, top_offsets, widths, heights)]
|
class LinLayout:
'''A linear layout to order items horizontally or vertically.
Every element in the layout is automatically wrapped within a LLItem with
relative_size=1, i.e. all elements get assigned an equal amount of space, to
change that elements can be wrappend in LLItems manually to get desired
proportions
:param orientation: orientation of the layout, either 'v' for vertica, or
'h' for horizontal.
:type orientation: str
'''
def __init__(self, orientation):
pass
def __call__(self, *children):
pass
def _draw(self, surface, target_rect):
pass
def _compute_child_rects(self, target_rect):
pass
def flip_if_not_horizontal(t):
pass
| 6 | 1 | 8 | 1 | 7 | 0 | 2 | 0.25 | 0 | 5 | 2 | 0 | 4 | 2 | 4 | 4 | 55 | 10 | 36 | 18 | 30 | 9 | 29 | 18 | 23 | 2 | 0 | 1 | 9 |
143,199 |
KnorrFG/pyparadigm
|
KnorrFG_pyparadigm/pyparadigm/_primitives.py
|
pyparadigm._primitives.PPError
|
class PPError(Exception):
pass
|
class PPError(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,200 |
KnorrFG/pyparadigm
|
KnorrFG_pyparadigm/pyparadigm/eventlistener.py
|
pyparadigm.eventlistener.EventConsumerInfo
|
class EventConsumerInfo(Enum):
"""Can be returned by event-handler functions to communicate with the listener.
For Details see EventListener"""
DONT_CARE = 0
CONSUMED = 1
|
class EventConsumerInfo(Enum):
'''Can be returned by event-handler functions to communicate with the listener.
For Details see EventListener'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.67 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 5 | 0 | 3 | 3 | 2 | 2 | 3 | 3 | 2 | 0 | 4 | 0 | 0 |
143,201 |
KnorrFG/pyparadigm
|
KnorrFG_pyparadigm/pyparadigm/eventlistener.py
|
pyparadigm.eventlistener.EventListener
|
class EventListener(object):
"""
:param permanent_handlers: iterable of permanent handlers
:type permanent_handlers: iterable
:param use_ctrl_c_handler: specifies whether a handler that quits the
script when ctrl + c is pressed should be used
:type use_ctrl_c_handler: Bool
"""
_mod_keys = {KMOD_LSHIFT, KMOD_RSHIFT, KMOD_SHIFT, KMOD_CAPS,
KMOD_LCTRL, KMOD_RCTRL, KMOD_CTRL, KMOD_LALT, KMOD_RALT,
KMOD_ALT, KMOD_LMETA, KMOD_RMETA, KMOD_META, KMOD_NUM, KMOD_MODE}
@staticmethod
def _contained_modifiers(mods, mods_of_interes=_mod_keys):
return frozenset(mod for mod in mods_of_interes if mod & mods)
@staticmethod
def _exit_on_ctrl_c(event):
if event.type == pygame.KEYDOWN \
and event.key == pygame.K_c \
and pygame.key.get_mods() & pygame.KMOD_CTRL:
pygame.quit()
exit(1)
else:
return EventConsumerInfo.DONT_CARE
def __init__(self, permanent_handlers=None, use_ctrl_c_handler=True):
self._current_q = []
self.mouse_proxies = defaultdict(dict)
self.proxy_group = 0
if use_ctrl_c_handler:
self.permanent_handlers = (
EventListener._exit_on_ctrl_c,
)
if permanent_handlers:
self.permanent_handlers += permanent_handlers
else:
self.permanent_handlers = permanent_handlers or []
def _get_q(self):
self._current_q = itt.chain(self._current_q, pygame.event.get())
return self._current_q
def mouse_area(self, handler, group=0, ident=None):
"""Adds a new MouseProxy for the given group to the
EventListener.mouse_proxies dict if it is not in there yet, and returns
the (new) MouseProxy. In listen() all entries in the current group of
mouse_proxies are used."""
key = ident or id(handler)
if key not in self.mouse_proxies[group]:
self.mouse_proxies[group][key] = MouseProxy(handler, ident)
return self.mouse_proxies[group][key]
def group(self, group):
"""sets current mouse proxy group and returns self.
Enables lines like el.group(1).wait_for_keys(...)"""
self.proxy_group = group
return self
def listen(self, *temporary_handlers):
"""When listen() is called all queued pygame.Events will be passed to all
registered listeners. There are two ways to register a listener:
1. as a permanent listener, that is always executed for every event. These
are registered by passing the handler-functions during construction
2. as a temporary listener, that will only be executed during the current
call to listen(). These are registered by passing the handler functions
as arguments to listen()
When a handler is called it can provoke three different reactions through
its return value.
1. It can return EventConsumerInfo.DONT_CARE in which case the EventListener
will pass the event to the next handler in line, or go to the next event,
if the last handler was called.
2. It can return EventConsumerInfo.CONSUMED in which case the event will not
be passed to following handlers, and the next event in line will be
processed.
3. It can return anything else (including None, which will be returned if no
return value is specified) in this case the listen()-method will return
the result of the handler.
Therefore all permanent handlers should usually return
EventConsumerInfo.DONT_CARE
"""
funcs = tuple(itt.chain(self.permanent_handlers,
(proxy.listener for proxy in
self.mouse_proxies[self.proxy_group].values()),
temporary_handlers))
for event in self._get_q():
for func in funcs:
ret = func(event)
if ret == EventConsumerInfo.CONSUMED:
break
if ret == EventConsumerInfo.DONT_CARE:
continue
else:
return ret
def listen_until_return(self, *temporary_handlers, timeout=0, sleeptime=0):
"""Calls listen repeatedly until listen returns something else than None.
Then returns listen's result. If timeout is not zero listen_until_return
stops after timeout seconds and returns None."""
start = time.time()
while timeout == 0 or time.time() - start < timeout:
res = self.listen(*temporary_handlers)
if res is not None:
return res
if sleeptime > 0:
time.sleep(sleeptime)
def wait_for_n_keypresses(self, key, n=1):
"""Waits till one key was pressed n times.
:param key: the key to be pressed as defined by pygame. E.g.
pygame.K_LEFT for the left arrow key
:type key: int
:param n: number of repetitions till the function returns
:type n: int
"""
my_const = "key_consumed"
counter = 0
def keypress_listener(e): return my_const \
if e.type == pygame.KEYDOWN and e.key == key \
else EventConsumerInfo.DONT_CARE
while counter < n:
if self.listen(keypress_listener) == my_const:
counter += 1
def wait_for_keys(self, *keys, timeout=0, sleeptime=0):
"""Waits until one of the specified keys was pressed, and returns
which key was pressed.
:param keys: iterable of integers of pygame-keycodes, or simply
multiple keys passed via multiple arguments
:type keys: iterable
:param timeout: number of seconds to wait till the function returns
:type timeout: float
:returns: The keycode of the pressed key, or None in case of timeout
:rtype: int
"""
if len(keys) == 1 and _is_iterable(keys[0]):
keys = keys[0]
return self.listen_until_return(Handler.key_press(keys), timeout=timeout,
sleeptime=sleeptime)
def wait_for_keys_modified(self, *keys, modifiers_to_check=_mod_keys,
timeout=0, sleeptime=0.001):
"""The same as wait_for_keys, but returns a frozen_set which contains
the pressed key, and the modifier keys.
:param modifiers_to_check: iterable of modifiers for which the function
will check whether they are pressed
:type modifiers: Iterable[int]"""
set_mods = pygame.key.get_mods()
return frozenset.union(
frozenset([self.wait_for_keys(*keys, timeout=timeout, sleeptime=sleeptime)]),
EventListener._contained_modifiers(set_mods, modifiers_to_check))
def wait_for_seconds(self, seconds, sleeptime=0.001):
"""basically time.sleep() but in the mean-time the permanent handlers
are executed"""
self.listen_until_return(timeout=seconds, sleeptime=sleeptime)
def wait_for_unicode_char(self, ignored_chars=None, timeout=0, sleeptime=0.001):
"""Returns a str that contains the single character that was pressed.
This already respects modifier keys and keyboard layouts. If timeout is
not none and no key is pressed within the specified timeout, None is
returned. If a key is ingnored_chars it will be ignored. As argument for
irgnored_chars any object that has a __contains__ method can be used,
e.g. a string, a set, a list, etc"""
return self.listen_until_return(Handler.unicode_char(ignored_chars),
timeout=timeout, sleeptime=sleeptime)
|
class EventListener(object):
'''
:param permanent_handlers: iterable of permanent handlers
:type permanent_handlers: iterable
:param use_ctrl_c_handler: specifies whether a handler that quits the
script when ctrl + c is pressed should be used
:type use_ctrl_c_handler: Bool
'''
@staticmethod
def _contained_modifiers(mods, mods_of_interes=_mod_keys):
pass
@staticmethod
def _exit_on_ctrl_c(event):
pass
def __init__(self, permanent_handlers=None, use_ctrl_c_handler=True):
pass
def _get_q(self):
pass
def mouse_area(self, handler, group=0, ident=None):
'''Adds a new MouseProxy for the given group to the
EventListener.mouse_proxies dict if it is not in there yet, and returns
the (new) MouseProxy. In listen() all entries in the current group of
mouse_proxies are used.'''
pass
def group(self, group):
'''sets current mouse proxy group and returns self.
Enables lines like el.group(1).wait_for_keys(...)'''
pass
def listen(self, *temporary_handlers):
'''When listen() is called all queued pygame.Events will be passed to all
registered listeners. There are two ways to register a listener:
1. as a permanent listener, that is always executed for every event. These
are registered by passing the handler-functions during construction
2. as a temporary listener, that will only be executed during the current
call to listen(). These are registered by passing the handler functions
as arguments to listen()
When a handler is called it can provoke three different reactions through
its return value.
1. It can return EventConsumerInfo.DONT_CARE in which case the EventListener
will pass the event to the next handler in line, or go to the next event,
if the last handler was called.
2. It can return EventConsumerInfo.CONSUMED in which case the event will not
be passed to following handlers, and the next event in line will be
processed.
3. It can return anything else (including None, which will be returned if no
return value is specified) in this case the listen()-method will return
the result of the handler.
Therefore all permanent handlers should usually return
EventConsumerInfo.DONT_CARE
'''
pass
def listen_until_return(self, *temporary_handlers, timeout=0, sleeptime=0):
'''Calls listen repeatedly until listen returns something else than None.
Then returns listen's result. If timeout is not zero listen_until_return
stops after timeout seconds and returns None.'''
pass
def wait_for_n_keypresses(self, key, n=1):
'''Waits till one key was pressed n times.
:param key: the key to be pressed as defined by pygame. E.g.
pygame.K_LEFT for the left arrow key
:type key: int
:param n: number of repetitions till the function returns
:type n: int
'''
pass
def keypress_listener(e):
pass
def wait_for_keys(self, *keys, timeout=0, sleeptime=0):
'''Waits until one of the specified keys was pressed, and returns
which key was pressed.
:param keys: iterable of integers of pygame-keycodes, or simply
multiple keys passed via multiple arguments
:type keys: iterable
:param timeout: number of seconds to wait till the function returns
:type timeout: float
:returns: The keycode of the pressed key, or None in case of timeout
:rtype: int
'''
pass
def wait_for_keys_modified(self, *keys, modifiers_to_check=_mod_keys,
timeout=0, sleeptime=0.001):
'''The same as wait_for_keys, but returns a frozen_set which contains
the pressed key, and the modifier keys.
:param modifiers_to_check: iterable of modifiers for which the function
will check whether they are pressed
:type modifiers: Iterable[int]'''
pass
def wait_for_seconds(self, seconds, sleeptime=0.001):
'''basically time.sleep() but in the mean-time the permanent handlers
are executed'''
pass
def wait_for_unicode_char(self, ignored_chars=None, timeout=0, sleeptime=0.001):
'''Returns a str that contains the single character that was pressed.
This already respects modifier keys and keyboard layouts. If timeout is
not none and no key is pressed within the specified timeout, None is
returned. If a key is ingnored_chars it will be ignored. As argument for
irgnored_chars any object that has a __contains__ method can be used,
e.g. a string, a set, a list, etc'''
pass
| 17 | 10 | 11 | 1 | 6 | 4 | 2 | 0.78 | 1 | 7 | 3 | 0 | 11 | 4 | 13 | 13 | 184 | 31 | 86 | 33 | 69 | 67 | 66 | 30 | 51 | 5 | 1 | 3 | 29 |
143,202 |
KnorrFG/pyparadigm
|
KnorrFG_pyparadigm/pyparadigm/eventlistener.py
|
pyparadigm.eventlistener.MouseProxy
|
class MouseProxy:
"""has a _draw method so that it can be used with
surface_composition.compose(). When "rendered" it simply saves the own
coordinates and then renders its child.
The listener method can then be used with EventListener.listen() to execute
the provided handler when the mouse interacts with the area.
The handler gets the event type, pygame.MOUSEBUTTONUP, pygame.MOUSEBUTTONDOWN
and pygame.MOUSEMOTION and the relative coordinates within the area.
For unique identification along all MouseProxies the ident paramenter is used.
If ident is None (the default) it is set to id(handler)"""
mouse_events = {pygame.MOUSEBUTTONUP, pygame.MOUSEBUTTONDOWN,
pygame.MOUSEMOTION}
def __init__(self, handler: Callable[[int, int], int], ident=None):
self.handler = handler
self.rect = pygame.Rect(0, 0, 0, 0)
self.child = None
self.ident = ident or id(handler)
def __call__(self, child):
self.child = _wrap_children(child)
return self
def _draw(self, surface, rect):
# print("given:", rect)
self.rect = rect if not type(self.child) == Surface\
else self.child.compute_render_rect(rect)
# print("used:", self.rect)
if self.child:
self.child._draw(surface, rect)
def listener(self, e):
if e.type in MouseProxy.mouse_events:
pos = pygame.mouse.get_pos()
if self.rect.collidepoint(pos):
arity = _get_arity(self.handler)
if arity == 3:
return self.handler(
e, pos[0] - self.rect.x, pos[1] - self.rect.y)
elif arity == 4:
return self.handler(
e, pos[0] - self.rect.x, pos[1] - self.rect.y, self.rect)
else:
raise RuntimeError(
f"Invalid handler. takes {arity} arguments, but only 3 or 4 allowed")
return EventConsumerInfo.DONT_CARE
|
class MouseProxy:
'''has a _draw method so that it can be used with
surface_composition.compose(). When "rendered" it simply saves the own
coordinates and then renders its child.
The listener method can then be used with EventListener.listen() to execute
the provided handler when the mouse interacts with the area.
The handler gets the event type, pygame.MOUSEBUTTONUP, pygame.MOUSEBUTTONDOWN
and pygame.MOUSEMOTION and the relative coordinates within the area.
For unique identification along all MouseProxies the ident paramenter is used.
If ident is None (the default) it is set to id(handler)'''
def __init__(self, handler: Callable[[int, int], int], ident=None):
pass
def __call__(self, child):
pass
def _draw(self, surface, rect):
pass
def listener(self, e):
pass
| 5 | 1 | 8 | 0 | 7 | 1 | 3 | 0.35 | 0 | 5 | 2 | 0 | 4 | 4 | 4 | 4 | 47 | 5 | 31 | 12 | 26 | 11 | 24 | 12 | 19 | 5 | 0 | 3 | 10 |
143,203 |
KnorrFG/pyparadigm
|
KnorrFG_pyparadigm/pyparadigm/surface_composition.py
|
pyparadigm.surface_composition.LLItem
|
class LLItem:
"""Defines the relative size of an element in a LinLayout
All Elements that are passed to a linear layout are automatically wrapped
into an LLItem with relative_size=1. Therefore by default all elements
within a layout will be of the same size. To change the proportions a LLItem
can be used explicitely with another relative size.
It is also possible to use an LLItem as placeholde in a layout, to generate
an empty space like this:
:Example:
LinLayout("h")(
LLItem(1),
LLItem(1)(Circle(0xFFFF00)))
"""
def __init__(self, relative_size):
self.child = Surface()
self.relative_size = relative_size
def __call__(self, child):
if child:
self.child = _wrap_surface(child)
return self
def __repr__(self):
return "LLItem({})({})".format(self.relative_size, repr(self.child))
|
class LLItem:
'''Defines the relative size of an element in a LinLayout
All Elements that are passed to a linear layout are automatically wrapped
into an LLItem with relative_size=1. Therefore by default all elements
within a layout will be of the same size. To change the proportions a LLItem
can be used explicitely with another relative size.
It is also possible to use an LLItem as placeholde in a layout, to generate
an empty space like this:
:Example:
LinLayout("h")(
LLItem(1),
LLItem(1)(Circle(0xFFFF00)))
'''
def __init__(self, relative_size):
pass
def __call__(self, child):
pass
def __repr__(self):
pass
| 4 | 1 | 3 | 0 | 3 | 0 | 1 | 1.2 | 0 | 1 | 1 | 0 | 3 | 2 | 3 | 3 | 28 | 6 | 10 | 6 | 6 | 12 | 10 | 6 | 6 | 2 | 0 | 1 | 4 |
143,204 |
KnorrFG/pyparadigm
|
KnorrFG_pyparadigm/pyparadigm/misc.py
|
pyparadigm.misc._PumpThread
|
class _PumpThread(Thread):
"""See the documentation for the interactive_mode arg from :ref:`init`"""
def run(self):
while self._run:
pygame.event.pump()
time.sleep(0.1)
def stop(self):
self._run = False
self.join()
def __init__(self):
super().__init__()
self._run = True
self.start()
|
class _PumpThread(Thread):
'''See the documentation for the interactive_mode arg from :ref:`init`'''
def run(self):
pass
def stop(self):
pass
def __init__(self):
pass
| 4 | 1 | 4 | 0 | 4 | 0 | 1 | 0.08 | 1 | 1 | 0 | 0 | 3 | 1 | 3 | 28 | 15 | 2 | 12 | 5 | 8 | 1 | 12 | 5 | 8 | 2 | 1 | 1 | 4 |
143,205 |
KnorrFG/pyparadigm
|
KnorrFG_pyparadigm/pyparadigm/surface_composition.py
|
pyparadigm.surface_composition.Border
|
class Border:
"""Draws a border around the contained area. Can have a single child.
:param width: width of the border in pixels
:type width: int
:param color: color of the border
:type color: pygame.Color
"""
def __init__(self, width=3, color=0):
v_line = Line("v", width, color)
h_line = Line("h", width, color)
self.child_was_added = False
self.overlay = Overlay(
LinLayout("h")(
LLItem(0)(v_line),
LLItem(1),
LLItem(0)(v_line)
),
LinLayout("v")(
LLItem(0)(h_line),
LLItem(1),
LLItem(0)(h_line)
)
)
def __call__(self, child):
_check_call_op(None if not self.child_was_added else 1)
self.overlay.children.append(_wrap_surface(child))
return self
def _draw(self, surface, target_rect):
self.overlay._draw(surface, target_rect)
|
class Border:
'''Draws a border around the contained area. Can have a single child.
:param width: width of the border in pixels
:type width: int
:param color: color of the border
:type color: pygame.Color
'''
def __init__(self, width=3, color=0):
pass
def __call__(self, child):
pass
def _draw(self, surface, target_rect):
pass
| 4 | 1 | 7 | 0 | 7 | 0 | 1 | 0.26 | 0 | 4 | 4 | 0 | 3 | 2 | 3 | 3 | 32 | 3 | 23 | 8 | 19 | 6 | 12 | 8 | 8 | 2 | 0 | 0 | 4 |
143,206 |
KnorrFG/pyparadigm
|
KnorrFG_pyparadigm/pyparadigm/surface_composition.py
|
pyparadigm.surface_composition.Circle
|
class Circle:
"""Draws a Circle in the assigned space.
The circle will always be centered, and the radius will be half of the
shorter side of the assigned space.
:param color: The color of the circle
:type color: pygame.Color or int
:param width: width of the circle (in pixels). If 0 the circle will be filled
:type width: int
"""
def __init__(self, color, width=0):
self.color = color
self.width = width
def _draw(self, surface, target_rect):
pygame.draw.circle(surface, self.color, target_rect.center,
int(round(min(target_rect.w, target_rect.h) * 0.5)), self.width)
|
class Circle:
'''Draws a Circle in the assigned space.
The circle will always be centered, and the radius will be half of the
shorter side of the assigned space.
:param color: The color of the circle
:type color: pygame.Color or int
:param width: width of the circle (in pixels). If 0 the circle will be filled
:type width: int
'''
def __init__(self, color, width=0):
pass
def _draw(self, surface, target_rect):
pass
| 3 | 1 | 3 | 0 | 3 | 0 | 1 | 1.14 | 0 | 1 | 0 | 0 | 2 | 2 | 2 | 2 | 21 | 6 | 7 | 5 | 4 | 8 | 6 | 5 | 3 | 1 | 0 | 0 | 2 |
143,207 |
KnorrFG/pyparadigm
|
KnorrFG_pyparadigm/pyparadigm/surface_composition.py
|
pyparadigm.surface_composition.Surface
|
class Surface:
"""Wraps a pygame surface.
The Surface is the connection between the absolute world of pygame.Surfaces and the
relative world of the composition functions. A pygame.Surfaces can be bigger than
the space that is available to the Surface, or smaller. The Surface does the actual
blitting, and determines the concrete position, and if necessary (or
desired) scales the input surface.
Warning: When images are scaled with smoothing, colors will change decently, which
makes it inappropriate to use in combination with colorkeys.
:param margin: used to determine the exact location of the pygame.Surfaces within
the available space. The margin value represents the proportion of
the free space, along
an axis, i.e. Margin(1, 1, 1, 1) is centered, Margin(0, 1, 1, 2) is as far
left as possible and one/third on the way down.
:type margin: Margin object
:param scale: If 0 < scale <= 1 the longer side of the surface is scaled to
to the given fraction of the available space, the aspect ratio is
will be preserved.
If scale is 0 the will be no scaling if the image is smaller than the
available space. It will still be scaled down if it is too big.
:type scale: float
:param smooth: if True the result of the scaling will be smoothed
:type smooth: float
"""
def __init__(self, margin=Margin(1, 1, 1, 1), scale=0, smooth=True,
keep_aspect_ratio=True):
assert 0 <= scale <= 1
self.child = None
self.margin = margin
self.scale = scale
self.smooth = smooth
self.keep_aspect_ratio = keep_aspect_ratio
def __call__(self, child):
_check_call_op(self.child)
self.child = child
return self
@staticmethod
def _scale_to_target(source, target_size, smooth=False):
return pygame.transform.scale(source, target_size) if not smooth\
else pygame.transform.smoothscale(source, target_size)
@staticmethod
def _determine_target_size(child, target_rect, scale, keep_aspect_ratio):
if scale > 0:
scaled_target_rect = tuple(dist * scale for dist in target_rect)
if keep_aspect_ratio:
return child.get_rect().fit(scaled_target_rect).size
else:
return scaled_target_rect[2:4]
elif all(s_dim <= t_dim
for s_dim, t_dim in zip(child.get_size(), target_rect.size)):
return child.get_size()
else:
return target_rect.size
def compute_render_rect(self, target_rect):
target_size = Surface._determine_target_size(
self.child, target_rect, self.scale, self.keep_aspect_ratio)
remaining_h_space = target_rect.w - target_size[0]
remaining_v_space = target_rect.h - target_size[1]
return pygame.Rect(
(target_rect.left + _offset_by_margins(remaining_h_space,
self.margin.left, self.margin.right),
target_rect.top + _offset_by_margins(remaining_v_space,
self.margin.top, self.margin.bottom)), target_size)
def _draw(self, surface, target_rect):
if self.child is None:
return
render_rect = self.compute_render_rect(target_rect)
if render_rect.size == self.child.get_size():
content = self.child
else:
content = Surface._scale_to_target(
self.child, render_rect.size, self.smooth)
surface.blit(content, render_rect)
|
class Surface:
'''Wraps a pygame surface.
The Surface is the connection between the absolute world of pygame.Surfaces and the
relative world of the composition functions. A pygame.Surfaces can be bigger than
the space that is available to the Surface, or smaller. The Surface does the actual
blitting, and determines the concrete position, and if necessary (or
desired) scales the input surface.
Warning: When images are scaled with smoothing, colors will change decently, which
makes it inappropriate to use in combination with colorkeys.
:param margin: used to determine the exact location of the pygame.Surfaces within
the available space. The margin value represents the proportion of
the free space, along
an axis, i.e. Margin(1, 1, 1, 1) is centered, Margin(0, 1, 1, 2) is as far
left as possible and one/third on the way down.
:type margin: Margin object
:param scale: If 0 < scale <= 1 the longer side of the surface is scaled to
to the given fraction of the available space, the aspect ratio is
will be preserved.
If scale is 0 the will be no scaling if the image is smaller than the
available space. It will still be scaled down if it is too big.
:type scale: float
:param smooth: if True the result of the scaling will be smoothed
:type smooth: float
'''
def __init__(self, margin=Margin(1, 1, 1, 1), scale=0, smooth=True,
keep_aspect_ratio=True):
pass
def __call__(self, child):
pass
@staticmethod
def _scale_to_target(source, target_size, smooth=False):
pass
@staticmethod
def _determine_target_size(child, target_rect, scale, keep_aspect_ratio):
pass
def compute_render_rect(self, target_rect):
pass
def _draw(self, surface, target_rect):
pass
| 9 | 1 | 8 | 0 | 8 | 0 | 2 | 0.46 | 0 | 3 | 1 | 0 | 4 | 5 | 6 | 6 | 87 | 14 | 50 | 21 | 40 | 23 | 35 | 18 | 28 | 4 | 0 | 2 | 12 |
143,208 |
KnorrFG/pyparadigm
|
KnorrFG_pyparadigm/pyparadigm/surface_composition.py
|
pyparadigm.surface_composition.FRect
|
class FRect:
"""A wrapper Item for children of the FreeFloatLayout, see description of FreeFloatLayout"""
def __init__(self, x, y, w, h):
for coord in (x, y, w, h):
assert FRect.coord_valid(coord)
self.x = x
self.y = y
self.w = w
self.h = h
self.child = None
@staticmethod
def coord_valid(x):
return type(x) is int or (type(x) == float and 0 <= x <= 1)
@staticmethod
def adjust_coord(x, abs_partner):
if type(x) == int:
if x >= 0:
return x
else:
return abs_partner + x
elif type(x) == float:
return x * abs_partner
# this code should never be reached
assert False
def to_abs_rect(self, target_rect):
tmp = pygame.Rect(
FRect.adjust_coord(self.x, target_rect.w),
FRect.adjust_coord(self.y, target_rect.h),
FRect.adjust_coord(self.w, target_rect.w),
FRect.adjust_coord(self.h, target_rect.h))
return tmp.move(target_rect.topleft)
def __call__(self, child):
if child:
self.child = _wrap_surface(child)
return self
|
class FRect:
'''A wrapper Item for children of the FreeFloatLayout, see description of FreeFloatLayout'''
def __init__(self, x, y, w, h):
pass
@staticmethod
def coord_valid(x):
pass
@staticmethod
def adjust_coord(x, abs_partner):
pass
def to_abs_rect(self, target_rect):
pass
def __call__(self, child):
pass
| 8 | 1 | 7 | 0 | 6 | 0 | 2 | 0.06 | 0 | 3 | 0 | 0 | 3 | 5 | 5 | 5 | 41 | 6 | 33 | 15 | 25 | 2 | 25 | 13 | 19 | 4 | 0 | 2 | 10 |
143,209 |
KnorrFG/pyparadigm
|
KnorrFG_pyparadigm/pyparadigm/surface_composition.py
|
pyparadigm.surface_composition.Fill
|
class Fill:
"""Fills the assigned area. Afterwards, the children are rendered
:param color: the color with which the area is filled
:type color: pygame.Color or int
"""
def __init__(self, color):
self.color = color
self.child = None
def __call__(self, child):
_check_call_op(self.child)
self.child = _wrap_surface(child)
return self
def _draw(self, surface, target_rect):
surface.fill(self.color, target_rect)
if self.child:
self.child._draw(surface, target_rect)
|
class Fill:
'''Fills the assigned area. Afterwards, the children are rendered
:param color: the color with which the area is filled
:type color: pygame.Color or int
'''
def __init__(self, color):
pass
def __call__(self, child):
pass
def _draw(self, surface, target_rect):
pass
| 4 | 1 | 4 | 0 | 4 | 0 | 1 | 0.33 | 0 | 0 | 0 | 0 | 3 | 2 | 3 | 3 | 20 | 4 | 12 | 6 | 8 | 4 | 12 | 6 | 8 | 2 | 0 | 1 | 4 |
143,210 |
KnorrFG/pyparadigm
|
KnorrFG_pyparadigm/pyparadigm/eventlistener.py
|
pyparadigm.eventlistener.Handler
|
class Handler:
@staticmethod
def key_press(keys):
"""returns a handler that can be used with EventListener.listen()
and returns when a key in keys is pressed"""
return lambda e: e.key if e.type == pygame.KEYDOWN \
and e.key in keys else EventConsumerInfo.DONT_CARE
@staticmethod
def unicode_char(ignored_chars=None):
"""returns a handler that listens for unicode characters"""
return lambda e: e.unicode if e.type == pygame.KEYDOWN \
and ((ignored_chars is None)
or (e.unicode not in ignored_chars))\
else EventConsumerInfo.DONT_CARE
@staticmethod
def resize_event_handler(event_result=pygame.VIDEORESIZE):
'''returns a handler that will make a listen function return
event_result, when the window is resized'''
return lambda e: event_result if e.type == pygame.VIDEORESIZE\
else EventConsumerInfo.DONT_CARE
@staticmethod
def quit_event_handler(event_result=pygame.QUIT):
'''returns a handler that will make a listen function return
event_result, when the window is resized'''
return lambda e: event_result if e.type == pygame.QUIT\
else EventConsumerInfo.DONT_CARE
|
class Handler:
@staticmethod
def key_press(keys):
'''returns a handler that can be used with EventListener.listen()
and returns when a key in keys is pressed'''
pass
@staticmethod
def unicode_char(ignored_chars=None):
'''returns a handler that listens for unicode characters'''
pass
@staticmethod
def resize_event_handler(event_result=pygame.VIDEORESIZE):
'''returns a handler that will make a listen function return
event_result, when the window is resized'''
pass
@staticmethod
def quit_event_handler(event_result=pygame.QUIT):
'''returns a handler that will make a listen function return
event_result, when the window is resized'''
pass
| 9 | 4 | 5 | 0 | 4 | 2 | 1 | 0.37 | 0 | 1 | 1 | 0 | 0 | 0 | 4 | 4 | 29 | 3 | 19 | 9 | 10 | 7 | 9 | 5 | 4 | 1 | 0 | 0 | 4 |
143,211 |
KnorrFG/pyparadigm
|
KnorrFG_pyparadigm/doc/examples/stroop.py
|
stroop.Color
|
class Color(Enum):
red = 0xFF0000
green = 0x00FF00
blue = 0x0000FF
|
class Color(Enum):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 4 | 0 | 4 | 4 | 3 | 0 | 4 | 4 | 3 | 0 | 4 | 0 | 0 |
143,212 |
KnorrFG/pyparadigm
|
KnorrFG_pyparadigm/pyparadigm/surface_composition.py
|
pyparadigm.surface_composition.RectangleShaper
|
class RectangleShaper:
"""Creates a padding, defined by a target Shape.
Width and height are the relative proportions of the target rectangle.
E.g RectangleShaper(1, 1) would create a square. and RectangleShaper(2, 1)
would create a rectangle which is twice as wide as it is high.
The rectangle always has the maximal possible size within the parent area.
"""
def __init__(self, width=1, height=1):
self.child = None
self.width = width
self.height = height
def __call__(self, child):
_check_call_op(self.child)
self.child = _wrap_surface(child)
return self
def _draw(self, surface, target_rect):
parent_w_factor = target_rect.w / target_rect.h
my_w_factor = self.width / self.height
if parent_w_factor > my_w_factor:
my_h = target_rect.h
my_w = my_h * my_w_factor
my_h_offset = 0
my_w_offset = _round_to_int((target_rect.w - my_w) * 0.5)
else:
my_w = target_rect.w
my_h = my_w / self.width * self.height
my_w_offset = 0
my_h_offset = _round_to_int((target_rect.h - my_h) * 0.5)
self.child._draw(surface, pygame.Rect(
target_rect.left + my_w_offset,
target_rect.top + my_h_offset,
my_w,
my_h
))
|
class RectangleShaper:
'''Creates a padding, defined by a target Shape.
Width and height are the relative proportions of the target rectangle.
E.g RectangleShaper(1, 1) would create a square. and RectangleShaper(2, 1)
would create a rectangle which is twice as wide as it is high.
The rectangle always has the maximal possible size within the parent area.
'''
def __init__(self, width=1, height=1):
pass
def __call__(self, child):
pass
def _draw(self, surface, target_rect):
pass
| 4 | 1 | 9 | 0 | 9 | 0 | 1 | 0.21 | 0 | 0 | 0 | 0 | 3 | 3 | 3 | 3 | 37 | 3 | 28 | 13 | 24 | 6 | 22 | 13 | 18 | 2 | 0 | 1 | 4 |
143,213 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/datatypes/datatypeerrors.py
|
rdfframework.datatypes.datatypeerrors.NsPrefixExistsError
|
class NsPrefixExistsError(Exception):
""" Exception raised for assigment of RdfNamespace 'prefix' if already
defined with a different 'uri'.
Attributes:
new_ns: the new ns paramaters
old_ns: the current ns patamaters
message: explanation of the error
"""
def __init__(self, new_ns, old_ns, message):
self.new_ns = new_ns
self.old_ns = old_ns
self.message = message
|
class NsPrefixExistsError(Exception):
''' Exception raised for assigment of RdfNamespace 'prefix' if already
defined with a different 'uri'.
Attributes:
new_ns: the new ns paramaters
old_ns: the current ns patamaters
message: explanation of the error
'''
def __init__(self, new_ns, old_ns, message):
pass
| 2 | 1 | 4 | 0 | 4 | 0 | 1 | 1.4 | 1 | 0 | 0 | 0 | 1 | 3 | 1 | 11 | 14 | 2 | 5 | 5 | 3 | 7 | 5 | 5 | 3 | 1 | 3 | 0 | 1 |
143,214 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/configuration/rdfwconfig.py
|
rdfframework.configuration.rdfwconfig.ClearClass
|
class ClearClass():
pass
|
class ClearClass():
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 0 | 0 | 0 |
143,215 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/datatypes/xsdtypes.py
|
rdfframework.datatypes.xsdtypes.XsdBoolean
|
class XsdBoolean(BaseRdfDataType):
""" Boolean instance of rdf xsd:boolean type value"""
datatype = Uri("xsd:boolean")
class_type = "XsdBoolean"
py_type = bool
es_type = "boolean"
def __init__(self, value):
new_val = cbool(value)
if new_val is None:
raise TypeError("'%s' is not a boolean value" % value)
self.value = new_val
def __eq__(self, value):
if value == self.value:
return True
return False
def __bool__(self):
#pdb.set_trace()
if self.value is None:
return False
return self.value
def __hash__(self):
return self.value
@property
def to_json(self):
return json.dumps(self.value)
|
class XsdBoolean(BaseRdfDataType):
''' Boolean instance of rdf xsd:boolean type value'''
def __init__(self, value):
pass
def __eq__(self, value):
pass
def __bool__(self):
pass
def __hash__(self):
pass
@property
def to_json(self):
pass
| 7 | 1 | 4 | 0 | 3 | 0 | 2 | 0.09 | 1 | 1 | 0 | 0 | 5 | 1 | 5 | 36 | 31 | 6 | 23 | 13 | 16 | 2 | 22 | 12 | 16 | 2 | 5 | 1 | 8 |
143,216 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/datatypes/xsdtypes.py
|
rdfframework.datatypes.xsdtypes.XsdDate
|
class XsdDate(date, BaseRdfDataType):
""" Datetime Date instacne of rdf xsd:date type value"""
datatype = Uri("xsd:date")
class_type = "XsdDate"
py_type = date
es_type = "date"
es_format = "strict_date_optional_time||epoch_millis"
def __new__(cls, *args, **kwargs):
yy = args[0]
mm = args[1:2][0] if args[1:2] else 0
dd = args[2:3][0] if args[2:3] else 0
if isinstance(args[0], str):
yy = parse(args[0])
if isinstance(yy, date) or isinstance(yy, datetime):
dd = yy.day
mm = yy.month
yy = yy.year
try:
return date.__new__(cls, yy, mm, dd, **kwargs)
except:
vals = tuple([1900, 1, 1])
return date.__new__(cls, *vals)
def __init__(self, *args, **kwargs):
self.value = self
|
class XsdDate(date, BaseRdfDataType):
''' Datetime Date instacne of rdf xsd:date type value'''
def __new__(cls, *args, **kwargs):
pass
def __init__(self, *args, **kwargs):
pass
| 3 | 1 | 9 | 0 | 9 | 0 | 4 | 0.04 | 2 | 3 | 0 | 0 | 2 | 1 | 2 | 65 | 27 | 3 | 23 | 13 | 20 | 1 | 23 | 13 | 20 | 6 | 5 | 1 | 7 |
143,217 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/datatypes/xsdtypes.py
|
rdfframework.datatypes.xsdtypes.XsdDatetime
|
class XsdDatetime(datetime, BaseRdfDataType):
""" Datetime Datetime instance of rdf xsd:datetime type value"""
datatype = Uri("xsd:dateTime")
class_type = "XsdDatetime"
py_type = datetime
es_type = "date"
es_format = "strict_date_optional_time||epoch_millis"
def __new__(cls, *args, **kwargs):
# print("args: ", args)
yy = args[0]
mm = args[1:2][0] if args[1:2] else 0
dd = args[2:3][0] if args[2:3] else 0
hh = args[3:4][0] if args[3:4] else 0
mi = args[4:5][0] if args[4:5] else 0
ss = args[5:6][0] if args[5:6] else 0
ms = args[6:7][0] if args[6:7] else 0
tz = args[7:8][0] if args[7:8] else timezone.utc
if isinstance(args[0], str):
yy = parse(args[0])
if isinstance(yy, datetime):
tz = yy.tzinfo if yy.tzinfo else timezone.utc
ms = yy.microsecond
ss = yy.second
mi = yy.minute
if isinstance(yy, date) or isinstance(yy, datetime):
hh = yy.hour
dd = yy.day
mm = yy.month
yy = yy.year
vals = tuple([yy, mm, dd, hh, mi, ss, ms, tz])
try:
return datetime.__new__(cls, *vals, **kwargs)
except:
vals = tuple([1900, 1, 1, 0, 0, 0, 0])
return datetime.__new__(cls, *vals)
def __init__(self, *args, **kwargs):
self.value = self
def __str__(self):
# return str(self.value)
return self.sparql
|
class XsdDatetime(datetime, BaseRdfDataType):
''' Datetime Datetime instance of rdf xsd:datetime type value'''
def __new__(cls, *args, **kwargs):
pass
def __init__(self, *args, **kwargs):
pass
def __str__(self):
pass
| 4 | 1 | 11 | 0 | 10 | 1 | 5 | 0.08 | 2 | 3 | 0 | 0 | 3 | 1 | 3 | 111 | 44 | 4 | 37 | 19 | 33 | 3 | 37 | 19 | 33 | 13 | 5 | 1 | 15 |
143,218 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/datatypes/xsdtypes.py
|
rdfframework.datatypes.xsdtypes.XsdDecimal
|
class XsdDecimal(Decimal, BaseRdfDataType):
""" Integer instance of rdf xsd:string type value"""
datatype = Uri("xsd:decimal")
class_type = "XsdDecimal"
py_type = Decimal
es_type = "long"
def __new__(cls, *args, **kwargs):
vals = list(args)
vals[0] = str(args[0])
vals = tuple(vals)
newobj = Decimal.__new__(cls, *vals, **kwargs)
return newobj
def __init__(self, *args, **kwargs):
self.value = Decimal(str(self))
def __repr__(self):
return self._format(method="sparql")
def _internal_add(self, other):
""" Used for specifing addition methods for
__add__, __iadd__, __radd__
"""
if hasattr(other, "datatype"):
if other.datatype == "xsd:decimal":
rtn_val = self.value + Decimal(str(other.value))
else:
rtn_val = self.value + Decimal(str(other.value))
else:
rtn_val = self.value + Decimal(str(other))
return XsdDecimal(str(float(rtn_val)))
def _internal_sub(self, other):
""" Used for specifing subtraction methods for
__sub__, __isub__, __rsub__
"""
if hasattr(other, "datatype"):
if other.datatype == "xsd:decimal":
rtn_val = self.value - Decimal(str(other.value))
else:
rtn_val = self.value - Decimal(str(other.value))
else:
rtn_val = self.value - Decimal(str(other))
return XsdDecimal(str(float(rtn_val)))
def __add__(self, other):
return self._internal_add(other)
def __iadd__(self, other):
return self._internal_add(other)
def __radd__(self, other):
return self._internal_add(other)
def __sub__(self, other):
return self._internal_sub(other)
def __isub__(self, other):
return self._internal_sub(other)
def __rsub__(self, other):
return self._internal_sub(other)
|
class XsdDecimal(Decimal, BaseRdfDataType):
''' Integer instance of rdf xsd:string type value'''
def __new__(cls, *args, **kwargs):
pass
def __init__(self, *args, **kwargs):
pass
def __repr__(self):
pass
def _internal_add(self, other):
''' Used for specifing addition methods for
__add__, __iadd__, __radd__
'''
pass
def _internal_sub(self, other):
''' Used for specifing subtraction methods for
__sub__, __isub__, __rsub__
'''
pass
def __add__(self, other):
pass
def __iadd__(self, other):
pass
def __radd__(self, other):
pass
def __sub__(self, other):
pass
def __isub__(self, other):
pass
def __rsub__(self, other):
pass
| 12 | 3 | 4 | 0 | 4 | 1 | 1 | 0.16 | 2 | 4 | 0 | 0 | 11 | 1 | 11 | 159 | 64 | 12 | 45 | 21 | 33 | 7 | 41 | 21 | 29 | 3 | 5 | 2 | 15 |
143,219 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/datatypes/xsdtypes.py
|
rdfframework.datatypes.xsdtypes.XsdInteger
|
class XsdInteger(int, BaseRdfDataType):
""" Integer instance of rdf xsd:string type value"""
datatype = Uri("xsd:integer")
class_type = "XsdInteger"
py_type = int
es_type = "long"
def __new__(cls, *args, **kwargs):
newobj = int.__new__(cls, *args, **kwargs)
return newobj
def __init__(self, *args, **kwargs):
self.value = int(self)
def __repr__(self):
return self._format(method="sparql")
def _internal_add(self, other):
""" Used for specifing addition methods for
__add__, __iadd__, __radd__
"""
if hasattr(other, "datatype"):
if other.datatype == self.datatype:
rtn_val = self.value + other.value
else:
rtn_val = self.value + int(other.value)
else:
rtn_val = self.value + other
return XsdInteger(rtn_val)
def _internal_sub(self, other, method=None):
""" Used for specifing addition methods for
__add__, __iadd__, __radd__
"""
if hasattr(other, "datatype"):
if other.datatype == self.datatype:
oval = other.value
else:
oval = int(other.value)
else:
oval = int(other)
if method == 'rsub':
rtn_val = oval - self.value
else:
rtn_val = self.value - oval
return XsdInteger(rtn_val)
def __add__(self, other):
return self._internal_add(other)
def __iadd__(self, other):
return self._internal_add(other)
def __radd__(self, other):
return self._internal_add(other)
def __sub__(self, other):
return self._internal_sub(other)
def __isub__(self, other):
return self._internal_sub(other)
def __rsub__(self, other):
return self._internal_sub(other, 'rsub')
|
class XsdInteger(int, BaseRdfDataType):
''' Integer instance of rdf xsd:string type value'''
def __new__(cls, *args, **kwargs):
pass
def __init__(self, *args, **kwargs):
pass
def __repr__(self):
pass
def _internal_add(self, other):
''' Used for specifing addition methods for
__add__, __iadd__, __radd__
'''
pass
def _internal_sub(self, other, method=None):
''' Used for specifing addition methods for
__add__, __iadd__, __radd__
'''
pass
def __add__(self, other):
pass
def __iadd__(self, other):
pass
def __radd__(self, other):
pass
def __sub__(self, other):
pass
def __isub__(self, other):
pass
def __rsub__(self, other):
pass
| 12 | 3 | 4 | 0 | 4 | 1 | 1 | 0.15 | 2 | 0 | 0 | 0 | 11 | 1 | 11 | 97 | 65 | 12 | 46 | 21 | 34 | 7 | 41 | 21 | 29 | 4 | 5 | 2 | 16 |
143,220 |
KnowledgeLinks/rdfframework
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/KnowledgeLinks_rdfframework/tests/test_frameworkutilities.py
|
tests.test_frameworkutilities.Test_uid_to_repo_uri
|
class Test_uid_to_repo_uri(unittest.TestCase):
@patch("rdfframework.utilities.frameworkutilities.CONFIG")
def test_good_uid(self, config):
config.get = MagicMock(return_value="http://test/fedora/rest")
self.assertEqual(
uid_to_repo_uri("7f70bee2-1e24-4c35-9078-c6efbfa30aaf"),
"http://test/fedora/rest/7f/70/be/e2/7f70bee2-1e24-4c35-9078-c6efbfa30aaf")
|
class Test_uid_to_repo_uri(unittest.TestCase):
@patch("rdfframework.utilities.frameworkutilities.CONFIG")
def test_good_uid(self, config):
pass
| 3 | 0 | 5 | 0 | 5 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 73 | 8 | 1 | 7 | 3 | 4 | 0 | 4 | 2 | 2 | 1 | 2 | 0 | 1 |
143,221 |
KnowledgeLinks/rdfframework
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/KnowledgeLinks_rdfframework/tests/test_rdfdatatype.py
|
tests.test_rdfdatatype.TestBaseRdfDatatype
|
class TestBaseRdfDatatype(unittest.TestCase):
# @patch("rdfframework.rdfdatatype.rdfw")
@patch("rdfframework.framework.RdfFramework")
def setUp(self, mock_rdfw):
mock_rdfw.root = os.path.abspath(__name__)
self.base_type = BaseRdfDataType("This is a literal value")
@patch("rdfframework.framework.RdfFramework")
def test_init(self, mock_rdfw):
# mock_rdfw.root = os.path.abspath(__name__)
self.assertEqual(self.base_type.value,
"This is a literal value")
def test_format_default(self):
self.assertEqual(self.base_type._format(),
'"This is a literal value"^^xsd:string')
def test_format_none(self):
self.assertEqual(self.base_type._format(method=None),
'"This is a literal value"')
@patch("rdfframework.framework.RdfFramework")
def test_format_pyuri(self, mock_rdfw):
mock_rdfw.root = os.path.abspath(__name__)
self.assertEqual(self.base_type._format(method='pyuri'),
"<This is a literal value>")
|
class TestBaseRdfDatatype(unittest.TestCase):
@patch("rdfframework.framework.RdfFramework")
def setUp(self, mock_rdfw):
pass
@patch("rdfframework.framework.RdfFramework")
def test_init(self, mock_rdfw):
pass
def test_format_default(self):
pass
def test_format_none(self):
pass
@patch("rdfframework.framework.RdfFramework")
def test_format_pyuri(self, mock_rdfw):
pass
| 9 | 0 | 3 | 0 | 3 | 0 | 1 | 0.1 | 1 | 0 | 0 | 0 | 5 | 1 | 5 | 77 | 28 | 6 | 20 | 10 | 11 | 2 | 13 | 7 | 7 | 1 | 2 | 0 | 5 |
143,222 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/datatypes/xsdtypes.py
|
rdfframework.datatypes.xsdtypes.XsdString
|
class XsdString(str, BaseRdfDataType):
""" String instance of rdf xsd:string type value"""
datatype = Uri("xsd:string")
class_type = "XsdString"
py_type = str
es_type = "text"
__slots__ = []
def __new__(cls, *args, **kwargs):
if isinstance(args[0], dict):
new_args = (args[0].pop('value'), )
kwargs.update(args[0])
else:
new_args = args
if "xml:lang" in kwargs:
lang = kwargs.pop("xml:lang")
elif "lang" in kwargs:
lang = kwargs.pop("lang")
else:
lang = None
newobj = str.__new__(cls, *new_args)
newobj.lang = lang
newobj.value = str(new_args[0])
return newobj
def __init__(self, *args, **kwargs):
pass
def __repr__(self):
return self._format(method="sparql")
def __add__(self, other):
if hasattr(other, "datatype"):
rtn_lang = None
if other.datatype == "xsd:string":
rtn_val = self.value + other.value
if other.lang and self.lang and other.lang != self.lang:
rtn_lang = None
elif other.lang:
rtn_lang = other.lang
else:
rtn_lang = self.lang
else:
rtn_val = self.value + str(other.value)
else:
rtn_val = self.value + str(other)
rtn_lang = self.lang
return XsdString(rtn_val, lang=rtn_lang)
@property
def rdflib(self):
if self.lang:
return rdflib.Literal(self.value, lang=self.lang)
return rdflib.Literal(self.value, datatype=self.datatype.rdflib)
|
class XsdString(str, BaseRdfDataType):
''' String instance of rdf xsd:string type value'''
def __new__(cls, *args, **kwargs):
pass
def __init__(self, *args, **kwargs):
pass
def __repr__(self):
pass
def __add__(self, other):
pass
@property
def rdflib(self):
pass
| 7 | 1 | 8 | 0 | 8 | 0 | 3 | 0.02 | 2 | 1 | 0 | 1 | 5 | 1 | 5 | 102 | 56 | 7 | 48 | 18 | 41 | 1 | 40 | 16 | 34 | 5 | 5 | 3 | 13 |
143,223 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/datatypes/xsdtypes.py
|
rdfframework.datatypes.xsdtypes.XsdTime
|
class XsdTime(time, BaseRdfDataType):
""" Datetime Datetime instance of rdf xsd:datetime type value"""
datatype = Uri("xsd:time")
class_type = "XsdTime"
py_type = time
def __new__(cls, *args, **kwargs):
hh = args[0]
mi = args[1:2][0] if args[1:2] else 0
ss = args[2:3][0] if args[2:3] else 0
ms = args[3:4][0] if args[3:4] else 0
tz = args[4:5][0] if args[4:5] else timezone.utc
if isinstance(args[0], str):
hh = parse(args[0])
if isinstance(hh, datetime) or isinstance(hh, time):
tz = hh.tzinfo if hh.tzinfo else timezone.utc
ms = hh.microsecond
ss = hh.second
mi = hh.minute
hh = hh.hour
vals = tuple([hh, mi, ss, ms, tz])
return time.__new__(cls, *vals, **kwargs)
def __init__(self, *args, **kwargs):
self.value = self
|
class XsdTime(time, BaseRdfDataType):
''' Datetime Datetime instance of rdf xsd:datetime type value'''
def __new__(cls, *args, **kwargs):
pass
def __init__(self, *args, **kwargs):
pass
| 3 | 1 | 9 | 0 | 9 | 0 | 5 | 0.05 | 2 | 4 | 0 | 0 | 2 | 1 | 2 | 61 | 26 | 3 | 22 | 13 | 19 | 1 | 22 | 13 | 19 | 8 | 5 | 1 | 9 |
143,224 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/framework.py
|
rdfframework.framework.RdfFramework
|
class RdfFramework(metaclass=RdfFrameworkSingleton):
''' base class for initializing Knowledge Links' Graph database RDF
vocabulary framework'''
log_name = "%s.RdfFramework" % MODULE_NAME
# set specific logging handler for the module allows turning on and off
# debug as required
log_level = logging.DEBUG
def __init__(self, **kwargs):
log = logging.getLogger("%s.%s" % (self.log_name,
inspect.stack()[0][3]))
log.setLevel(self.log_level)
config = kwargs.get('config')
if config:
print("Should find value")
CFG = RdfConfigManager(config=config)
else:
raise EnvironmentError("kwarg 'config' is required")
self.cfg = CFG
NSM = RdfNsManager(config=CFG)
self.root_file_path = CFG.RDF_DEFINITION_FILE_PATH
self._set_datafiles()
self.rml = DictClass()
# if the the definition files have been modified since the last json
# files were saved reload the definition files
reset = kwargs.get('reset',False)
# verify that the server core is up and running
servers_up = True
if kwargs.get('server_check', True):
servers_up = verify_server_core(600, 0)
else:
log.info("server check skipped")
if not servers_up:
log.info("Sever core not initialized --- Framework Not loaded")
if servers_up:
log.info("*** Loading Framework ***")
self._load_data(reset)
RdfPropertyFactory(CFG.def_tstore, reset=reset)
RdfClassFactory(CFG.def_tstore, reset=reset)
log.info("*** Framework Loaded ***")
def load_rml(self, rml_name):
""" loads an rml mapping into memory
args:
rml_name(str): the name of the rml file
"""
conn = CFG.rml_tstore
cache_path = os.path.join(CFG.CACHE_DATA_PATH, 'rml_files', rml_name)
if not os.path.exists(cache_path):
results = get_graph(NSM.uri(getattr(NSM.kdr, rml_name), False),
conn)
with open(cache_path, "w") as file_obj:
file_obj.write(json.dumps(results, indent=4))
else:
results = json.loads(open(cache_path).read())
self.rml[rml_name] = RdfDataset(results)
return self.rml[rml_name]
def get_rml(self, rml_name):
""" returns the rml mapping RdfDataset
rml_name(str): Name of the rml mapping to retrieve
"""
try:
return getattr(self, rml_name)
except AttributeError:
return self.load_rml(rml_name)
def _set_datafiles(self):
self.datafile_obj = {}
if CFG.def_tstore:
self._set_data_filelist(start_path=CFG.RDF_DEFINITION_FILE_PATH,
attr_name='def_files',
conn=CFG.def_tstore,
file_exts=['ttl', 'nt', 'xml', 'rdf'],
dir_filter=['rdfw-definitions', 'custom'])
if CFG.rml_tstore:
self._set_data_filelist(start_path=CFG.RML_DATA_PATH,
attr_name='rml_files',
conn=CFG.rml_tstore,
file_exts=['ttl', 'nt', 'xml', 'rdf'])
def _load_data(self, reset=False):
''' loads the RDF/turtle application data to the triplestore
args:
reset(bool): True will delete the definition dataset and reload
all of the datafiles.
'''
log = logging.getLogger("%s.%s" % (self.log_name,
inspect.stack()[0][3]))
log.setLevel(self.log_level)
for attr, obj in self.datafile_obj.items():
if reset or obj['latest_mod'] > obj['last_json_mod']:
conn = obj['conn']
sparql = "DROP ALL;"
if os.path.isdir(obj['cache_path']):
shutil.rmtree(obj['cache_path'], ignore_errors=True)
os.makedirs(obj['cache_path'])
drop_extensions = conn.update_query(sparql)
rdf_resource_templates = []
rdf_data = []
for path, files in obj['files'].items():
for file in files:
file_path = os.path.join(path, file)
# data = open(file_path).read()
# log.info(" uploading file: %s | namespace: %s",
# file,
# conn.namespace)
# data_type = file.split('.')[-1]
result = conn.load_data(file_path,
#datatype=data_type,
graph=str(getattr(NSM.kdr,
file)),
is_file=True)
if result.status_code > 399:
raise ValueError("Cannot load '{}' into {}".format(
file_name, conn))
def _set_data_filelist(self,
start_path,
attr_name,
conn,
file_exts=[],
dir_filter=set()):
''' does a directory search for data files '''
def filter_path(filter_terms, dir_path):
""" sees if any of the terms are present in the path if so returns
True
args:
filter_terms(list): terms to check
dir_path: the path of the directory
"""
if filter_terms.intersection(set(dir_path.split(os.path.sep))):
return True
else:
return False
data_obj = {}
files_dict = {}
latest_mod = 0
dir_filter = set(dir_filter)
for root, dirnames, filenames in os.walk(start_path):
if not dir_filter or filter_path(dir_filter, root):
if file_exts:
filenames = [x for x in filenames
if x.split('.')[-1].lower() in file_exts]
files_dict[root] = filenames
for def_file in filenames:
file_mod = os.path.getmtime(os.path.join(root,def_file))
if file_mod > latest_mod:
latest_mod = file_mod
data_obj['latest_mod'] = latest_mod
data_obj['files'] = files_dict
json_mod = 0
cache_path = os.path.join(CFG.CACHE_DATA_PATH, attr_name)
if cache_path:
for root, dirnames, filenames in os.walk(cache_path):
for json_file in filenames:
file_mod = os.path.getmtime(os.path.join(root,json_file))
if file_mod > json_mod:
json_mod = file_mod
data_obj['last_json_mod'] = json_mod
data_obj['conn'] = conn
data_obj['cache_path'] = cache_path
self.datafile_obj[attr_name] = data_obj
|
class RdfFramework(metaclass=RdfFrameworkSingleton):
''' base class for initializing Knowledge Links' Graph database RDF
vocabulary framework'''
def __init__(self, **kwargs):
pass
def load_rml(self, rml_name):
''' loads an rml mapping into memory
args:
rml_name(str): the name of the rml file
'''
pass
def get_rml(self, rml_name):
''' returns the rml mapping RdfDataset
rml_name(str): Name of the rml mapping to retrieve
'''
pass
def _set_datafiles(self):
pass
def _load_data(self, reset=False):
''' loads the RDF/turtle application data to the triplestore
args:
reset(bool): True will delete the definition dataset and reload
all of the datafiles.
'''
pass
def _set_data_filelist(self,
start_path,
attr_name,
conn,
file_exts=[],
dir_filter=set()):
''' does a directory search for data files '''
pass
def filter_path(filter_terms, dir_path):
''' sees if any of the terms are present in the path if so returns
True
args:
filter_terms(list): terms to check
dir_path: the path of the directory
'''
pass
| 8 | 6 | 25 | 2 | 18 | 5 | 4 | 0.26 | 1 | 10 | 6 | 0 | 6 | 4 | 6 | 20 | 177 | 20 | 125 | 49 | 112 | 32 | 101 | 43 | 93 | 10 | 3 | 5 | 31 |
143,225 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/configuration/rdfwconfig.py
|
rdfframework.configuration.rdfwconfig.ConfigSingleton
|
class ConfigSingleton(type):
"""Singleton class for the RdfConfigManager that will allow for only one
instance of the RdfConfigManager to be created. """
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(ConfigSingleton,
cls).__call__(*args, **kwargs)
else:
config = None
if args:
config = args[0]
elif 'config' in kwargs:
config = kwargs['config']
if config:
cls._instances[cls].__load_config__(config, **kwargs) # pylint: disable=W0212
return cls._instances[cls]
def clear(cls):
cls._instances = {}
|
class ConfigSingleton(type):
'''Singleton class for the RdfConfigManager that will allow for only one
instance of the RdfConfigManager to be created. '''
def __call__(cls, *args, **kwargs):
pass
def clear(cls):
pass
| 3 | 1 | 8 | 0 | 8 | 1 | 3 | 0.18 | 1 | 1 | 0 | 1 | 2 | 0 | 2 | 15 | 21 | 2 | 17 | 5 | 14 | 3 | 14 | 5 | 11 | 5 | 2 | 2 | 6 |
143,226 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/tests/utilities/test_fileutilities.py
|
tests.utilities.test_fileutilities.TestSimpleFileUtilities_list_files
|
class TestSimpleFileUtilities_list_files(unittest.TestCase):
def setUp(self):
pass
def test_empty_call(self):
self.assertRaises(TypeError,
fileutilities.list_files)
def test_none_file_directory(self):
self.assertRaises(AttributeError,
fileutilities.list_files,
None)
def test_empty_str_file_directory(self):
self.assertEqual(
fileutilities.list_files(''),
[])
def tearDown(self):
pass
|
class TestSimpleFileUtilities_list_files(unittest.TestCase):
def setUp(self):
pass
def test_empty_call(self):
pass
def test_none_file_directory(self):
pass
def test_empty_str_file_directory(self):
pass
def tearDown(self):
pass
| 6 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 2 | 0 | 0 | 5 | 0 | 5 | 77 | 21 | 5 | 16 | 6 | 10 | 0 | 11 | 6 | 5 | 1 | 2 | 0 | 5 |
143,227 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/tests/test_rdfwconfig.py
|
tests.test_rdfwconfig.TestRdfConfigManagerInitialization
|
class TestRdfConfigManagerInitialization(unittest.TestCase):
def setUp(self):
configuration.RdfConfigManager.clear()
self.config_mgr = configuration.RdfConfigManager()
def test_init_no_params(self):
self.assertIsInstance(self.config_mgr,
configuration.RdfConfigManager)
def test_init_values(self):
self.assertFalse(self.config_mgr.__is_initialized__)
self.assertFalse(self.config_mgr.__locked__)
def tearDown(self):
pass
|
class TestRdfConfigManagerInitialization(unittest.TestCase):
def setUp(self):
pass
def test_init_no_params(self):
pass
def test_init_values(self):
pass
def tearDown(self):
pass
| 5 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 4 | 1 | 4 | 76 | 16 | 4 | 12 | 6 | 7 | 0 | 11 | 6 | 6 | 1 | 2 | 0 | 4 |
143,228 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/tests/test_rdfwconfig.py
|
tests.test_rdfwconfig.TestRdfConfigManagerInitSimpleConfig
|
class TestRdfConfigManagerInitSimpleConfig(unittest.TestCase):
def setUp(self):
configuration.RdfConfigManager.clear()
self.config = {"base_url": "http://example.org/"}
def test_simple_config(self):
config_mgr = configuration.RdfConfigManager(self.config, verify=False)
self.assertEqual(
config_mgr.base_url,
self.config.get("base_url"))
def tearDown(self):
pass
|
class TestRdfConfigManagerInitSimpleConfig(unittest.TestCase):
def setUp(self):
pass
def test_simple_config(self):
pass
def tearDown(self):
pass
| 4 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 3 | 1 | 3 | 75 | 15 | 4 | 11 | 6 | 7 | 0 | 9 | 6 | 5 | 1 | 2 | 0 | 3 |
143,229 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/tests/test_rdfwconfig.py
|
tests.test_rdfwconfig.TestConfigSingleton
|
class TestConfigSingleton(unittest.TestCase):
def setUp(self):
configuration.ConfigSingleton._instances = {}
def test_init(self):
new_config = configuration.ConfigSingleton("", tuple(), dict())
self.assertIsInstance(
new_config,
configuration.ConfigSingleton)
def tearDown(self):
configuration.ConfigSingleton._instances = {}
|
class TestConfigSingleton(unittest.TestCase):
def setUp(self):
pass
def test_init(self):
pass
def tearDown(self):
pass
| 4 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 3 | 1 | 0 | 3 | 0 | 3 | 75 | 13 | 3 | 10 | 5 | 6 | 0 | 8 | 5 | 4 | 1 | 2 | 0 | 3 |
143,230 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/tests/test_rdfnsmanager.py
|
tests.test_rdfnsmanager.TestConvertToNs
|
class TestConvertToNs(unittest.TestCase):
def test_default_conversion(self):
pass
|
class TestConvertToNs(unittest.TestCase):
def test_default_conversion(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 73 | 4 | 1 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 2 | 0 | 1 |
143,231 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/tests/test_rdfapifields.py
|
tests.test_rdfapifields.Test_get_api_field_json
|
class Test_get_api_field_json(unittest.TestCase):
def setUp(self):
pass
def test_for_existing_field(self):
pass
|
class Test_get_api_field_json(unittest.TestCase):
def setUp(self):
pass
def test_for_existing_field(self):
pass
| 3 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 2 | 0 | 2 | 74 | 7 | 2 | 5 | 3 | 2 | 0 | 5 | 3 | 2 | 1 | 2 | 0 | 2 |
143,232 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/tests/test_initialization.py
|
tests.test_initialization.TestSetupFrameworkInitialization
|
class TestSetupFrameworkInitialization(unittest.TestCase):
def setUp(self):
self.r = rdfframework.rdfclass
self.rdf_framework = RdfFramework(reset=False, config=config)
self.cfg = RdfConfigManager()
self.item_uri = "<http://library.kean.edu/173849#Work>"
self.conn = cfg.data_tstore
self.data = get_all_item_data(item_uri, conn)
self.x = RdfDataset(data, item_uri)
def test_init_values(self):
self.assertEquals(self.r, rdfframework.rdfclass)
self.assertIsInstance(self.rdf_framework,
RdfFramework)
self.assertEquals(self.item_iri,
"<http://library.kean.edu/173849#Work>")
|
class TestSetupFrameworkInitialization(unittest.TestCase):
def setUp(self):
pass
def test_init_values(self):
pass
| 3 | 0 | 9 | 2 | 7 | 1 | 1 | 0.13 | 1 | 2 | 2 | 0 | 2 | 7 | 2 | 74 | 20 | 5 | 15 | 10 | 12 | 2 | 13 | 10 | 10 | 1 | 2 | 0 | 2 |
143,233 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/tests/test_frameworkutilities.py
|
tests.test_frameworkutilities.Test_xsd_to_python
|
class Test_xsd_to_python(unittest.TestCase):
def setUp(self):
self.SCHEMA = rdflib.Namespace("https://schema.org/")
self.XSD = rdflib.XSD
def test_xsd_in_datatype(self):
self.assertEqual(
xsd_to_python("More than a string",
str(XSD.string)),
"More than a string")
def test_absent_xsd_in_datatype(self):
self.assertEqual(
xsd_to_python("More than a string",
"xsd_string"),
"More than a string")
def test_none_value(self):
self.assertIsNone(
xsd_to_python(None,
"xsd_string"))
def test_uri_rdftype(self):
self.assertEqual(
xsd_to_python(str(self.SCHEMA.name),
str(XSD.url),
"uri"),
"<{}>".format(self.SCHEMA.name))
def test_xsd_anyURI_datatype(self):
self.assertEqual(
xsd_to_python(str(self.SCHEMA.description),
"xsd_anyURI"),
str(self.SCHEMA.description))
def test_xsd_anyURI_datatype_invalid_uri(self):
#! This passes but should we validate as a URI
bad_uri = "This is a string pretending to a URI"
self.assertEqual(
xsd_to_python(bad_uri,
"xsd_anyURI"),
bad_uri)
def test_xsd_base64Binary(self):
import base64
base_64_value = base64.b64encode(b'This some base 64 encoded data')
self.assertEqual(
xsd_to_python(base_64_value,
"xsd_base64Binary"),
base64.b64decode(base_64_value))
def test_xsd_boolean(self):
self.assertTrue(
xsd_to_python(
"true",
"xsd_boolean"))
self.assertFalse(
xsd_to_python(
"false",
"xsd_boolean"))
def test_xsd_boolean_str(self):
self.assertEqual(
xsd_to_python(
"true",
"xsd_boolean",
output="string"),
"true")
self.assertEqual(
xsd_to_python(
"False",
"xsd_boolean",
output="string"),
"false")
def test_xsd_byte(self):
self.assertEqual(
xsd_to_python(
b"1",
"xsd_byte"),
"1")
|
class Test_xsd_to_python(unittest.TestCase):
def setUp(self):
pass
def test_xsd_in_datatype(self):
pass
def test_absent_xsd_in_datatype(self):
pass
def test_none_value(self):
pass
def test_uri_rdftype(self):
pass
def test_xsd_anyURI_datatype(self):
pass
def test_xsd_anyURI_datatype_invalid_uri(self):
pass
def test_xsd_base64Binary(self):
pass
def test_xsd_boolean(self):
pass
def test_xsd_boolean_str(self):
pass
def test_xsd_byte(self):
pass
| 12 | 0 | 6 | 0 | 6 | 0 | 1 | 0.01 | 1 | 1 | 0 | 0 | 11 | 2 | 11 | 83 | 82 | 11 | 70 | 17 | 57 | 1 | 29 | 17 | 16 | 1 | 2 | 0 | 11 |
143,234 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/tests/test_frameworkutilities.py
|
tests.test_frameworkutilities.Test_slugify
|
class Test_slugify(unittest.TestCase):
def test_str_1(self):
self.assertEqual(
slugify("Hello Moon"),
"hello-moon")
def test_num(self):
self.assertEqual(
slugify("12 Person"),
"12-person")
def test_non_alphanum(self):
self.assertEqual(
slugify("N$ one"),
"n-one")
|
class Test_slugify(unittest.TestCase):
def test_str_1(self):
pass
def test_num(self):
pass
def test_non_alphanum(self):
pass
| 4 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 3 | 0 | 3 | 75 | 16 | 3 | 13 | 4 | 9 | 0 | 7 | 4 | 3 | 1 | 2 | 0 | 3 |
143,235 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/tests/test_frameworkutilities.py
|
tests.test_frameworkutilities.Test_render_without_request
|
class Test_render_without_request(unittest.TestCase):
def test_template_1(self):
result = render_without_request(
"jsonApiQueryTemplate.rq")
self.assertTrue(len(result) > 0)
def test_nonexistent_template(self):
from jinja2.exceptions import TemplateNotFound
self.assertRaises(
TemplateNotFound,
render_without_request,
"test.html")
|
class Test_render_without_request(unittest.TestCase):
def test_template_1(self):
pass
def test_nonexistent_template(self):
pass
| 3 | 0 | 5 | 0 | 5 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 2 | 0 | 2 | 74 | 13 | 2 | 11 | 5 | 7 | 0 | 7 | 5 | 3 | 1 | 2 | 0 | 2 |
143,236 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/tests/test_frameworkutilities.py
|
tests.test_frameworkutilities.Test_remove_null
|
class Test_remove_null(unittest.TestCase):
def test_remove_null_list(self):
self.assertEqual(remove_null([1, None, 2]),
[1,2])
def test_remove_null_set(self):
self.assertEqual(remove_null(set([None, 1])),
set([1,]))
def test_remove_null_no_null(self):
self.assertEqual(remove_null([1,2]),
[1,2])
|
class Test_remove_null(unittest.TestCase):
def test_remove_null_list(self):
pass
def test_remove_null_set(self):
pass
def test_remove_null_no_null(self):
pass
| 4 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 3 | 0 | 3 | 75 | 13 | 3 | 10 | 4 | 6 | 0 | 7 | 4 | 3 | 1 | 2 | 0 | 3 |
143,237 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/tests/test_frameworkutilities.py
|
tests.test_frameworkutilities.Test_nz
|
class Test_nz(unittest.TestCase):
def test_nz_none(self):
self.assertEqual(
nz(None, "a test"),
"a test")
def test_nz_empty_str_none_val(self):
self.assertEqual(
nz(None, ""),
"")
def test_nz_empty_str_val(self):
self.assertEqual(
nz("", "a test"),
"")
def test_nz_empty_str_val_strict(self):
self.assertEqual(
nz("", "a test", False),
"a test")
|
class Test_nz(unittest.TestCase):
def test_nz_none(self):
pass
def test_nz_empty_str_none_val(self):
pass
def test_nz_empty_str_val(self):
pass
def test_nz_empty_str_val_strict(self):
pass
| 5 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 4 | 0 | 4 | 76 | 21 | 4 | 17 | 5 | 12 | 0 | 9 | 5 | 4 | 1 | 2 | 0 | 4 |
143,238 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/tests/utilities/test_baseutilities.py
|
tests.utilities.test_baseutilities.TestGetObjectFromString
|
class TestGetObjectFromString(unittest.TestCase):
def setUp(self):
pass
def test_get_obj_frm_str_basic(self):
self.assertEqual(baseutilities.get_obj_frm_str('str'),
str)
self.assertEqual(baseutilities.get_obj_frm_str('dict'),
dict)
def test_get_obj_frm_str_connection(self):
from rdfframework.connections import ConnManager
self.assertEqual(
baseutilities.get_obj_frm_str(
"rdfframework.connections.ConnManager"),
ConnManager)
def test_get_obj_frm_str_manager(self):
from rdfframework.datatypes import RdfNsManager
self.assertEqual(
baseutilities.get_obj_frm_str("rdfframework.datatypes.RdfNsManager"),
RdfNsManager)
def tearDown(self):
pass
|
class TestGetObjectFromString(unittest.TestCase):
def setUp(self):
pass
def test_get_obj_frm_str_basic(self):
pass
def test_get_obj_frm_str_connection(self):
pass
def test_get_obj_frm_str_manager(self):
pass
def tearDown(self):
pass
| 6 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 1 | 3 | 1 | 0 | 5 | 0 | 5 | 77 | 29 | 8 | 21 | 8 | 13 | 0 | 14 | 8 | 6 | 1 | 2 | 0 | 5 |
143,239 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/configuration/rdfwconfig.py
|
rdfframework.configuration.rdfwconfig.IgnoreClass
|
class IgnoreClass():
pass
|
class IgnoreClass():
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 0 | 0 | 0 |
143,240 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/configuration/rdfwconfig.py
|
rdfframework.configuration.rdfwconfig.RdfConfigManager
|
class RdfConfigManager(metaclass=ConfigSingleton):
"""
Configuration Manager for the application.
*** Of Note: this is a singleton class and only one instance of it will
exisit.
args:
config: the configuration module or dictionary of attributes
kwargs:
exit_on_error: True will kill the application if there is an error with
the confirguation. False will prompt the user to correct any
issues with the configuration.
verify: Boolean. Whether to verify the configuation against the
requirements. Default is True
autosave: Boolean. True will automatically save any updates to the
configuration file. False will prompt the user with save options
requirements: a dictionary of attribute requirements to override the
default attribute requirements. Requirements are updated with the
these requirements. To remove a requirement completely use the
'remove_reqs' kwarg.
remove_reqs: list of requirment attribute names to remove
"""
__err_file_name__ = "config_errs.txt"
__type = 'DictClass'
__reserved = ['dict',
'get',
'items',
'keys',
'values',
'_RdfConfigManager__reserved',
'is_intialized',
'_DictClass__type',
'debug',
'os']
__cfg_reqs__ = OrderedDict([
("LOGGING", {"required": True,
"type": (bool, dict),
"description": "False: Turns logging off. - "
"True[default]: Uses default logging "
"setup. - "
"Dictionay is passed into the python "
"logging.config.dictConfig()",
"default": True}),
("TURN_ON_VOCAB", {"required": True,
"type": bool,
"description": "True: intelligent classes are "
"created based off of the RDF "
"vocabulary definitions and custom "
"defintions. - False: only a basic "
"RDF class is used. *Start time is "
"increased with this on. It is "
"required to be on for all "
"advanced rdfframework functions.",
"default": False}),
("SECRET_KEY", {"required": True,
"description": "secret key to be used "
"by the Flask application "
"security setting",
"type": str,
"length": {"min": 64, "max": 256}}),
("SITE_NAME", {"required": True,
"description": "the name used for the site",
"type": str}),
("BASE_URL", {"required": True,
"description": "base URL for the site",
"type": str,
"format": "url"}),
("CONNECTIONS", {"required": True,
"type": list,
"item_type": dict,
"item_dict": OrderedDict([
("name", {
"type": str,
"required": True,
"description": """name for the connection.
typical names are:
datastore*, search,
active_defs, repo * -
required"""}),
("conn_type", {
"type": str,
"required": True,
"options": "rdfframework.connections.RdfwConnections.nested",
"description": "type of connection"}),
("vendor", {
"type": str,
"required": True,
"options": "rdfframework.connections.RdfwConnections[{conn_type}]",
"description": """the producer of the connection
for the specified connection type"""}),
("url", {
"type": str,
"required": False,
"format": "url",
"description": """the url for the
connection"""}),
("local_url", {
"type": str,
"required": False,
"format": "url",
"description": """alternate/local url for the
connection"""}),
("container_dir", {
"type": str,
# "format": "directory",
"required": False,
"description": """directory path as the docker
container of the connetion sees a
shared directory with the python
application. This is paired with the
'python_dir' value."""}),
("namespace", {
"type": str,
"required": False,
"description": "Only applicable for "
"'triplestore' connection types. Each "
"namespace acts a different store "
"within the same triplestore."
}),
("python_dir", {
"type": str,
# "format": "directory",
"required": False,
"description": """directory path as the python
application sees a shared directory
with the connection running in a
docker container. This is paired with
the 'container_dir' value."""}),
("kwargs", {
"type": dict,
"doc": "rdfframework.connections.RdfwConnections[{conn_type}][{vendor}]",
"description": """additional kwargs as detailed
in the below __doc__ string. Use python
dictionary notation"""})
]),
"action": "send to ConnManager",
"format": "create_instance",
"action": {"type": "replace",
"object": "rdfframework.connections.ConnManager",
"args": "self",
"attr": "conns"},
# "optional_items": [{"name":'search'},
# {"name":'repository'},
# {"name":'active_defs'}]
"req_items": [{"name": "datastore",
"description": "data triplestore",
"default_values": {
"namespace": "kb",
"conn_type": "triplestore"
} },
{"name": "active_defs",
"description": "triplestore for "
"storing vocabularies and "
"defintions for class "
"generation.",
"default_values": {
"namespace": "active_defs",
"conn_type": "triplestore"},
"remove_if": {"attr": "TURN_ON_VOCAB",
"value": False}
}]
}),
("RML_MAPS", {"required": False,
"type": list,
"item_type": dict,
"description": ['List of locations containing RML '
'mappings for registering within the '
'rdfframework.'],
"item_dict": OrderedDict([
("location_type", {"type": str,
"options": ['directory',
'package_file',
'package_all',
'filepath'],
"required": True,
"description": [""" The
method to look for the
mapping files"""]}),
("location", {
"type": str,
"required": True,
"description": [
'use the below key based on the '
'selection in location type',
" directory' -> '/directory/path'",
" filepath -> '/path/to/a/file'",
" package_all -> 'name.of.a.package.with.defs'",
" package_file -> 'name.of.package'"]
}),
("filename", {
"type": str,
"required": False,
"description": "filename for 'package_file"
})
])
}),
("DIRECTORIES", {"required": True,
"type": list,
"item_type": dict,
"description": 'Directory paths and short names for'
' use. Each directory path is accesable'
' through the RdfConfigManager() by'
' selecting the following notation:\n\n'
'\t RdfConfigManager.dirs.name\n\n'
'Where the name is a short descriptive'
' word descibing the directories use.'
' There are 5 functional names used by'
' the rdfframework.\n'
'\t1. base - a base directory for '
'creating addtional directories '
'*required\n\t2. logs',
"item_dict": OrderedDict([
("name", {"type": str,
"required": True,
"description": ["""descriptive name of
the directory, i.e. 'base'.""",
"""Additional subdirectories
will be created if not
otherwise specifed here.""",
"""*** the 'base' directory is
required. ***""",
"""Subdirectrories created if
not specified otherwise:""",
"logs: base/logs",
"data: base/data",
"cache: base/cache",
"""vocabularies:
base/vocabularies"""]}),
("path", {"type": str,
"required": True,
"description": "directory path"})]),
"format": "directory",
"action": {"type": "add_attr",
"key": "name",
"value": "path",
"attr": "dirs",
"auto_create": {"logs": ("base", "logs"),
"data": ("base", "data"),
"cache": ("base", "cache"),
"vocabularies":
("base", "vocabularies")}},
"req_items": [{"name": "base",
"description": """the base directory for
saving application data""",
"dict_params": {"path":
{"format":
"writable"}}}]}),
("NAMESPACES", {"required": False,
"type": dict,
"item_type": str,
"action": {"type": "replace",
"object": "rdfframework.datatypes.RdfNsManager",
"args": "self"},
"format": "namespace",
"description": "URI for an RDF namespace"})
])
def __init__(self, config=None, **kwargs):
self.__config__ = {}
self.__config_file__ = None
self.__config_dir__ = None
self.__err_file__ = None
self.__locked__ = False
self.__is_initialized__ = False
if config:
self.__load_config__(config, **kwargs)
def __load_config__(self, config, **kwargs):
""" Reads a python config file and initializes the cloass
args:
obj: the config data
"""
# The configuration cannot be initialised more than once
if self.__is_initialized__:
raise ImportError("RdfConfigManager has already been initialized")
self.__set_cfg_reqs__(**kwargs)
self.__set_cfg_attrs__(config, **kwargs)
self.__config__['TURN_ON_VOCAB'] = kwargs.get("turn_on_vocab",
self.__config__.get("TURN_ON_VOCAB",
self.__cfg_reqs__["TURN_ON_VOCAB"]['default']))
errors = self.__verify_config__(self.__config__, **kwargs)
self.__reslove_errors__(errors, **kwargs)
log.info("CONFIGURATION validated")
self.__is_initialized__ = True
log.info("Initializing directories")
self.__initialize_directories__(**kwargs)
log.info("Setting Logging")
self.__set_logging__(**kwargs)
log.info("setting RDF Namespaces")
self.__load_namespaces__(**kwargs)
log.info("Initializing Connections")
self.__initialize_conns__(**kwargs)
log.info("Registering RML Mappings")
self.__register_mappings__(**kwargs)
self.__run_defs__(**kwargs)
def __register_mappings__(self, **kwargs):
"""
Registers the mappings as defined in the configuration file
"""
from rdfframework import rml
rml_mgr = rml.RmlManager()
if getattr(self, "RML_MAPS"):
rml_mgr.register_defs(getattr(self, "RML_MAPS"))
self.rml = rml_mgr
def __set_logging__(self, **kwargs):
import rdfframework
if not self.LOGGING:
rdfframework.configure_logging(rdfframework.__modules__, "dummy")
if self.LOGGING == True:
return
for handler in self.LOGGING.get('handlers', {}):
if self.LOGGING['handlers'][handler].get('filename'):
fname = self.LOGGING['handlers'][handler]['filename']
fname = os.path.join(self.dirs.logs, fname)
self.LOGGING['handlers'][handler]['filename'] = fname
logging.config.dictConfig(self.LOGGING)
rdfframework.set_module_loggers(rdfframework.__modules__,
method='active')
def __load_namespaces__(self, **kwargs):
ns_mgr = get_obj_frm_str("rdfframework.datatypes.RdfNsManager")
# pdb.set_trace()
if self.namespaces:
self.__config__['nsm'] = ns_mgr(self.namespaces)
else:
self.__config__['nsm'] = ns_mgr()
def __run_defs__(self, **kwargs):
"""
Generates all of the classes based on the loaded RDF vocabularies and
custom definitions
"""
if self.__config__.get("TURN_ON_VOCAB") or kwargs.get("turn_on_vocab"):
from rdfframework.rdfclass import (RdfPropertyFactory,
RdfClassFactory)
conn = self.__config__.get('conns',{}).get('active_defs')
if conn:
log.info("Starting RDF Property and Class creation")
RdfPropertyFactory(conn, reset=kwargs.get("reset"))
RdfClassFactory(conn, reset=kwargs.get("reset"))
else:
log.warning("No definition connection found. rdfframework "
"initialized with out definitions")
else:
source = []
if kwargs.get("turn_on_vocab") == False:
source = ("keyword arg", "turn_on_vocab")
elif self.__config__.get("TURN_ON_VOCAB") == False:
source = ("config attr", "TURN_ON_VOCAB")
log.warning("rdfframework initialized without rdf "
"definitions because of '%s' -> '%s'",
*source)
def __verify_config__(self, config, **kwargs):
""" reads through the config object and validates missing arguments
args:
config: the config object
"""
log.info("Verifiying config settings")
error_dict = OrderedDict()
for attr, req in self.__cfg_reqs__.items():
req = update_req(attr, req, config)
result = test_attr(get_attr(config, attr), req)
if result:
if 'value' not in result \
and result['reason'] not in ['dict_error',
'list_error']:
result['value'] = get_attr(config, attr)
error_dict[attr] = result
return error_dict
def __reslove_errors__(self, errors={}, **kwargs):
""" Determines how to deal with and config issues and resolves them
args:
errors: list of config errors
"""
def process_entry(req_type, new_value, old_value=''):
new_value = new_value.strip()
if new_value == '':
return old_value
elif new_value.lower() == 'help()':
print(format_multiline(__MSGS__['help']))
return old_value
elif new_value.lower() == 'clear()':
return ClearClass
elif new_value.lower() == 'none()':
return None
elif new_value.lower() == 'ignore()':
rtn_val = (IgnoreClass, old_value)
return rtn_val
try:
return req_type(new_value)
except:
try:
return eval(new_value)
except:
raise
def get_missing(self, attr):
req = self.__cfg_reqs__[attr]
errors = {"msg": '', "value": ''}
if req['type'] == str:
while True:
err = ''
if errors['msg']:
err = "{} [{}]".format(colors.warning(errors['msg']),
colors.fail(value))
print("Enter {attr}: {desc} {err}"
"".format(attr=colors.fail(attr),
desc=colors.cyan(req['description']),
err=err))
value = input('-> ')
try:
value = process_entry(req['type'],
value,
errors['value'])
errors = test_attr(value, req)
if not errors and value != '':
return value
errors['value'] = value
except SyntaxError:
pass
elif req['type'] == list:
return []
def fix_format(self, attr, error, value=None):
req = self.__cfg_reqs__[attr]
if req['type'] == str:
while True:
print("{err} {attr}: {desc} Error: {error}"
"\n\tEnter corrected value [{val}]"
"".format(err=colors.fail("ERROR"),
attr=colors.fail(attr),
desc=colors.cyan(req['description']),
error=colors.warning(error.get("msg")),
val=colors.fail(value)))
val = input('-> ')
try:
val = process_entry(req['type'], val, value)
new_err = test_attr({attr: val}, req)
if not new_err:
return val
except SyntaxError:
pass
elif req['type'] == list:
return []
def fix_str(self, attr, key, value):
req = self.__cfg_reqs__[attr]
while True:
print("{err} {key} | value: {val} | *** {msg}\n\t{desc}\n - "
"Enter corrected value [{val}]: "
"".format(err=colors.fail("ERROR"),
key=colors.warning(key),
val=colors.fail(value['value']),
msg=colors.yellow(value['msg']),
desc=colors.green(req['description'])))
new_val = input("-> ")
try:
new_val = process_entry(req['type'], new_val, value)
errors = test_attr({key: new_val}, req)
if not errors:
return new_val
value = errors['items'][key]
except SyntaxError:
pass
def fix_item(self, req, obj):
for key, val in req['item_dict'].items():
while True:
new_req = copy.deepcopy(req)
errors = test_attr([strip_errors(obj)],
new_req,
skip_req_items=True)
for ky in errors.get('items',
[{}])[0].get('__error_keys__', []):
obj[ky] = errors['items'][0][ky]
if errors:
obj['__error_keys__'] = \
errors['items'][0]['__error_keys__']
else:
idx = obj.get('__list_idx__')
obj = strip_errors(obj)
obj.update({'__list_idx__': idx, '__error_keys__': []})
desc = format_multiline(val.get("description", ""))
desc_items = ["%s: %s" % (i_key,
colors.cyan(format_multiline(i_val)))
for i_key, i_val in sorted(val.items())
if i_key.lower() not in ["doc", "options"]]
if val.get("doc"):
try:
doc = get_obj_frm_str(val['doc'], **obj)
except AttributeError:
doc = None
if doc:
desc_items.append("__doc__: %s" % doc.__doc__)
if val.get("options"):
options = get_options_from_str(val['options'], **obj)
desc_items.append("options: %s" % colors.warning(options))
desc = "\n\t".join(desc_items)
if isinstance(obj.get(key), dict) and \
(obj[key].get('msg') or obj[key].get('reason')):
print("{err} {key} | value: {val} | *** {msg}\n\t{desc}\n - Enter corrected value [{val}]: ".format(
err=colors.fail("ERROR"),
key=colors.warning(key),
val=colors.fail(obj[key]['value']),
msg=colors.yellow(obj[key].get('msg') \
or obj[key].get('reason')),
desc=colors.green(desc)))
new_val = input("-> ")
try:
new_val = process_entry(val['type'],
new_val,
obj[key]['value'])
except (SyntaxError, NameError):
obj[key] = {"msg": "SyntaxError",
"value": new_val}
continue
# new_val = new_val or obj[key]['value']
else:
print("{ok} {key} | value: {val}\n\t{desc}\n - Enter to keep current value [{val}]: ".format(
ok=colors.green("OK"),
key=colors.lcyan(key),
val=colors.green(obj.get(key)),
desc=desc))
new_val = input("-> ")
try:
new_val = process_entry(val['type'],
new_val,
obj.get(key))
except SyntaxError:
obj[key] = {"msg": "SyntaxError",
"value": new_val}
continue
errors = test_attr(new_val, val, obj)
if not errors:
if key == 'kwargs' and new_val:
obj.update(new_val)
try:
del obj['kwargs']
except KeyError:
pass
else:
obj[key] = new_val
try:
obj['__error_keys__'].remove(key)
except ValueError:
pass
errors = test_attr([strip_errors(obj)],
new_req,
skip_req_items=True)
if key not in errors.get('items',
[{}])[0].get('__error_keys__', []):
break
else:
errors["value"] = new_val
obj[key] = errors
return {key: value for key, value in obj.items()
if not key.startswith("__")}
def cycle_errors(self, errors, cfg_obj):
for attr, err in errors.items():
if err.get("set"):
cfg_obj[attr] = err['set']
elif err['reason'] == "format":
cfg_obj[attr] = fix_format(self,
attr,
err,
cfg_obj.get(attr))
elif err['reason'] == "missing":
cfg_obj[attr] = get_missing(self, attr)
elif err['reason'] == "list_error":
req = self.__cfg_reqs__[attr] #['item_dict']
print("Correcting list items for configuration item: \n\n",
"***",
attr,
"****\n")
for item in err['items']:
new_item = fix_item(self, req, item)
if item['__list_idx__'] == None:
try:
cfg_obj[attr].append(new_item)
except KeyError:
cfg_obj[attr] = [new_item]
else:
cfg_obj[attr][item['__list_idx__']] = new_item
elif err['reason'] == "dict_error":
if self.__cfg_reqs__[attr]['item_type'] == dict:
req = self.__cfg_reqs__[attr] #['item_dict']
elif self.__cfg_reqs__[attr]['item_type'] == str:
req = self.__cfg_reqs__[attr]
print("Correcting dictionay for item:\n\n",
colors.warning("**** %s ****\n" % attr))
for item, val in err['items'].items():
new_val = fix_str(self, attr, item, val)
cfg_obj[attr][item] = new_val
if not errors:
return
msg_kwargs = dict(time=datetime.datetime.now(),
err_msg=self.__format_err_summary__(errors),
cfg_path=self.__config_file__,
err_path=self.__err_file__)
if kwargs.get("verify") == False:
log.warning("IGNORING BELOW CONFIGURATION ERRORS")
log.warning(self.__make_error_msg__(errors, False, **kwargs))
self.__write_error_file__(errors, **kwargs)
return
print(format_multiline(__MSGS__["initial"], **msg_kwargs))
while True:
if kwargs.get("exit_on_error") == True:
resolve_choice = "2"
else:
print(format_multiline(__MSGS__["resolve_options"]))
resolve_choice = input("-> ")
if resolve_choice.strip() == "2":
sys.exit(self.__make_error_msg__(errors, **kwargs))
elif resolve_choice.strip() in ["", "1"]:
print(format_multiline(__MSGS__['help']))
break
while True:
cycle_errors(self, errors, self.__config__)
errors = self.__verify_config__(self.__config__, **kwargs)
if not errors:
break
self.__save_config__(**kwargs)
self.__remove_ignore__(**kwargs)
# print(self.__format_err_summary__(errors))
def __remove_ignore__(self, **kwargs):
def test_ignore(val):
if isinstance(val, tuple) and val:
if val[0] == IgnoreClass:
return val[1]
return val
def clean_ignore(item):
if isinstance(item, dict):
for key, val in item.items():
item[key] = clean_ignore(val)
elif isinstance(item, list):
for i, sub in enumerate(item):
item[i] = clean_ignore(sub)
return test_ignore(item)
new_config = clean_ignore(self.__config__)
pprint.pprint(new_config)
def __write_error_file__(self, errors, **kwargs):
if self.__err_file__:
with open(self.__err_file__, "w") as fo:
fo.write(self.__make_error_msg__(errors, False, **kwargs))
msg_kwargs['err_msg'] = err_msg
def __make_error_msg__(self, errors, colors_on=True, **kwargs):
colors.turn_on
if not colors_on:
colors.turn_off
msg_kwargs = dict(time=datetime.datetime.now(),
err_msg=self.__format_err_summary__(errors),
cfg_path=self.__config_file__,
err_path=self.__err_file__)
msg = format_multiline(__MSGS__["exit"], **msg_kwargs)
colors.turn_on
return msg
def __save_config__(self, **kwargs):
"""
Provides the user the option to save the current configuration
kwargs:
autosave: True automatically saves the config file
False prompts user
"""
option = "1"
if self.__config_file__:
new_path = self.__config_file__
config_dir = os.path.split(self.__config_file__)[0]
filename = os.path.split(self.__config_file__)[1]
if not kwargs.get("autosave"):
while True:
print(format_multiline(__MSGS__['save']))
option = input("-> ").strip()
if option in ["1", "2", "3"]:
break
if option == "3":
return
if option == "1" and not self.__config_file__:
print(colors.red("Config file location could not be determined"))
option = "2"
if option == "2":
while True:
new_path = input("Enter a file path to save the new "
"configuation [exit(), continue()]-> ")
if new_path.lower() == "exit()":
sys.exit()
elif new_path.lower() == "continue()":
option = "3"
break
elif not new_path:
continue
try:
path = os.path.split(new_path)
if not path[0]:
path = (config_dir, path[1])
if not os.path.isdir(path[0]):
print(" ** directory does not exist")
raise OSError
elif not is_writable_dir(path[0], mkdir=True):
print(" ** directory is not writable")
raise OSError
new_path = os.path.join(*path)
break
except OSError:
pass
elif option == "1":
shutil.copy(self.__config_file__, self.__config_file__ + ".bak")
with open(new_path, "w") as fo:
fo.write(self.__format_save_config__(self.__config__,
self.__cfg_reqs__,
**kwargs))
def __set_cfg_attrs__(self, config, **kwargs):
def read_module_attrs(module, ignore=[]):
""" Returns the attributes of a module in an dict
args:
module: the module to read
ignore: list of attr names to ignore
"""
rtn_obj = {attr: getattr(module, attr)
for attr in dir(module)
if attr not in ignore
and not attr.startswith("_")
and not isinstance(getattr(module, attr),
types.ModuleType)}
return rtn_obj
# if the config is a module determine the module path
if isinstance(config, types.ModuleType):
self.__config_file__ = config.__file__
self.__config_dir__ = os.path.split(self.__config_file__)[0]
self.__err_file__ = os.path.join(self.__config_dir__,
self.__err_file_name__)
new_config = read_module_attrs(config, self.__reserved)
else:
new_config = copy.deepcopy({attr: value
for attr, value in config.items()
if not attr.startswith("_")})
self.__config__ = OrderedDict()
for attr, req in self.__cfg_reqs__.items():
if new_config.get(attr):
self.__config__[attr] = new_config.pop(attr)
elif "default" in req:
self.__config__[attr] = req["default"]
self.__config__.update(new_config)
def __set_cfg_reqs__(self, requirements=None, **kwargs):
""" Applies any new requirements
args:
requirements: dictionary of attribute requirements
kwargs:
remove_reqs: list of requirement names to remove
"""
if requirements:
self.__cfg_reqs__.update(requirements)
for attr in kwargs.get('remove_reqs', []):
try:
del self.__cfg_reqs__[attr]
except KeyError:
pass
def __initialize_conns__(self, **kwargs):
"""
Reads the loaded config and creates the defined database
connections
"""
if not self.__config__.get("CONNECTIONS"):
return
conn_mgr = get_obj_frm_str("rdfframework.connections.ConnManager")
self.__config__['conns'] = conn_mgr(self.__config__['CONNECTIONS'],
**kwargs)
# RdfPropertyFactory(CFG.def_tstore, reset=reset)
# RdfClassFactory(CFG.def_tstore, reset=reset)
def __initialize_directories__(self, **kwargs):
"""
reads through the config and verifies if all directories exist and
creates them if they do not
"""
if not self.__config__.get("DIRECTORIES"):
return
dir_config = self.__config__.get('DIRECTORIES', [])
dirs = {item['name']: item['path'] for item in dir_config}
req = self.__cfg_reqs__['DIRECTORIES']
auto = req.get("action", {}).get("auto_create", {})
for name, args in auto.items():
if name not in dirs:
dirs[name] = os.path.join(dirs.get(args[0], args[0]), args[1])
for name, path in dirs.items():
path_parts = [item for item in path.split(os.path.sep) if item]
if path_parts and path_parts[0] in dirs:
new_parts = [dirs[path_parts[0]]] + path_parts[1:]
dirs[name] = os.path.join(*new_parts)
paths = sorted(dirs.values())
for path in paths:
if not os.path.exists(path):
log.warning("Creating Directory [%s]", path)
os.makedirs(path)
self.__config__['dirs'] = DictClass(dirs)
def __repr__(self):
if self.__is_initialized__:
return "<%s.%s object at %s> (\n%s)" % (self.__class__.__module__,
self.__class__.__name__,
hex(id(self)),
list(self.__config__))
else:
return "<RdfConfigManager has not been initialized>"
@initialized
def __getattr__(self, attr):
for key, value in self.__config__.items():
if attr.lower() == key.lower():
return value
return None
@initialized
def __getitem__(self, item):
if self.__config__.get(item):
return self.__config__.get(item)
return None
# if hasattr(self, item):
# return getattr(self, item)
# return None
@initialized
def __str__(self):
try:
return str(self.dict())
except TypeError:
return ""
def __setattr__(self, attr, value, override=False):
if attr.startswith("__"):
self.__dict__[attr] = value
elif self.__is_initialized__ and self.locked:
raise RuntimeError("The configuration may not be changed after" + \
" locking")
elif str(attr) in self.__reserved:
raise AttributeError("'%s' is a reserved word in this class." % \
attr)
elif not self.__is_initialized__ and isinstance(value, (list, dict)):
value = DictClass(value)
else:
self.__config__[attr] = value
@initialized
def dict(self):
""" converts the class to a dictionary object """
return DictClass(self.__config__).dict()
@initialized
def get(self, attr, none_val=None):
""" returns and attributes value or a supplied default
args:
attr: the attribute name
none_val: the value to return in the attribute is not found or
is equal to 'None'.
"""
return self.__config__.get(attr, none_val)
# if attr in self.keys():
# return getattr(self, attr)
# return none_val
@initialized
def keys(self):
""" returns a list of the attributes in the config manager """
# return [attr for attr in dir(self) if not attr.startswith("_") and \
# attr not in self.__reserved]
return self.__config__.keys()
@initialized
def values(self):
""" returns the values of the config manager """
# return [getattr(self, attr) for attr in dir(self) \
# if not attr.startswith("_") and attr not in self.__reserved]
return self.__config__.values()
@initialized
def items(self):
""" returns a list of tuples with the in a key: value combo of the
config manager """
# return_list = []
# for attr in dir(self):
# if not attr.startswith("_") and attr not in self.__reserved:
# return_list.append((attr, getattr(self, attr)))
# return return_list
return self.__config__.items()
def __format_err_summary__(self, errors, indent=0, initial=True):
"""
Formats the error dictionary for printing
args:
errors: the error dictionary
indent: the indent level in number of spaces
"""
ind_interval = 5
parts = []
ind = ''.ljust(indent, ' ')
curr_err = copy.deepcopy(errors)
msg_str = "{indent}{attr}: {val}{msg}"
good_dict = {}
if errors.get("__error_keys__"):
line = colors.hd(''.ljust(50, '-'))
parts.append(colors.hd("{}index number: {}".format(ind,
errors.get("__list_idx__"))))
parts.append("{}{}".format(ind, line))
curr_err = {key: curr_err[key] for key in errors['__error_keys__']}
indent += ind_interval
ind = ''.ljust(indent, ' ')
good_dict = {key: value for key, value in errors.items()
if key not in errors['__error_keys__']
and not key.startswith("__")}
for attr, value in curr_err.items():
msg = ''
val = ''
if not value.get('items'):
val = "[{}] error: ".format(
colors.lcyan(value.get("value", "None")))
msg = colors.warning(value.get("msg", value.get("reason")))
parts.append(msg_str.format(indent=ind,
attr=colors.fail(attr),
val=val,
msg=msg))
if value.get('items'):
if isinstance(value['items'], list):
for item in value['items']:
parts += self.__format_err_summary__(item,
indent + ind_interval,
False)
elif isinstance(value['items'], dict):
sub_ind = ''.ljust(indent + ind_interval, ' ')
for key, value in value['items'].items():
val = "[{}] error: ".format(
colors.lcyan(value.get("value", "None")))
msg = colors.warning(value.get("msg",
value.get("reason")))
parts.append(msg_str.format(indent=sub_ind,
val=val,
attr=colors.fail(key),
msg=msg))
for attr, value in good_dict.items():
parts.append(msg_str.format(indent=ind,
val=colors.blue(value),
msg="",
attr=colors.blue(attr)))
if initial:
return "\n".join(parts)
else:
return parts
def __format_save_config__(self, obj, attr_reqs, initial=True, **kwargs):
"""
Formats the current configuration for saving to file
args:
obj: the config object
initial: bool argument for recursive call catching
kwargs:
indent: the indent level in number of spaces
"""
ind_interval = 5
ind = ''.ljust(kwargs.get('indent', 0), ' ')
ind2 = ind + ''.ljust(ind_interval, ' ')
parts = []
curr_obj = copy.deepcopy(obj)
# comment_kwargs = copy.deepcopy(kwargs)
# comment_kwargs['prepend'] = "# "
attr_str = "{cmt}{attr} = {value}"
good_dict = {}
pp_kwargs = {key: value for key, value in kwargs.items()
if key in ['indent', 'depth']}
for attr, req in attr_reqs.items():
if req.get("description"):
parts.append(format_multiline(req['description'],
prepend="## ",
max_width=78,
**pp_kwargs))
value = obj.get(attr, req.get('standard', req.get('default')))
if isinstance(value, tuple) and value:
if value[0] == IgnoreClass:
value = value[1]
parts.append("#! Ignored errors for this item")
if attr in obj:
parts.append(attr_str.format(attr=attr,
value=pprint.pformat(value,
**pp_kwargs),
cmt=''))
else:
parts.append(attr_str.format(attr=attr,
value=str(value),
cmt='# '))
parts.append("\n#! *** non specified attributes ***\n")
for attr, value in obj.items():
if attr not in attr_reqs:
parts.append(attr_str.format(attr=attr,
value=pprint.pformat(value,
**pp_kwargs),
cmt=''))
return "\n\n".join(parts)
|
class RdfConfigManager(metaclass=ConfigSingleton):
'''
Configuration Manager for the application.
*** Of Note: this is a singleton class and only one instance of it will
exisit.
args:
config: the configuration module or dictionary of attributes
kwargs:
exit_on_error: True will kill the application if there is an error with
the confirguation. False will prompt the user to correct any
issues with the configuration.
verify: Boolean. Whether to verify the configuation against the
requirements. Default is True
autosave: Boolean. True will automatically save any updates to the
configuration file. False will prompt the user with save options
requirements: a dictionary of attribute requirements to override the
default attribute requirements. Requirements are updated with the
these requirements. To remove a requirement completely use the
'remove_reqs' kwarg.
remove_reqs: list of requirment attribute names to remove
'''
def __init__(self, config=None, **kwargs):
pass
def __load_config__(self, config, **kwargs):
''' Reads a python config file and initializes the cloass
args:
obj: the config data
'''
pass
def __register_mappings__(self, **kwargs):
'''
Registers the mappings as defined in the configuration file
'''
pass
def __set_logging__(self, **kwargs):
pass
def __load_namespaces__(self, **kwargs):
pass
def __run_defs__(self, **kwargs):
'''
Generates all of the classes based on the loaded RDF vocabularies and
custom definitions
'''
pass
def __verify_config__(self, config, **kwargs):
''' reads through the config object and validates missing arguments
args:
config: the config object
'''
pass
def __reslove_errors__(self, errors={}, **kwargs):
''' Determines how to deal with and config issues and resolves them
args:
errors: list of config errors
'''
pass
def process_entry(req_type, new_value, old_value=''):
pass
def get_missing(self, attr):
pass
def fix_format(self, attr, error, value=None):
pass
def fix_str(self, attr, key, value):
pass
def fix_item(self, req, obj):
pass
def cycle_errors(self, errors, cfg_obj):
pass
def __remove_ignore__(self, **kwargs):
pass
def test_ignore(val):
pass
def clean_ignore(item):
pass
def __write_error_file__(self, errors, **kwargs):
pass
def __make_error_msg__(self, errors, colors_on=True, **kwargs):
pass
def __save_config__(self, **kwargs):
'''
Provides the user the option to save the current configuration
kwargs:
autosave: True automatically saves the config file
False prompts user
'''
pass
def __set_cfg_attrs__(self, config, **kwargs):
pass
def read_module_attrs(module, ignore=[]):
''' Returns the attributes of a module in an dict
args:
module: the module to read
ignore: list of attr names to ignore
'''
pass
def __set_cfg_reqs__(self, requirements=None, **kwargs):
''' Applies any new requirements
args:
requirements: dictionary of attribute requirements
kwargs:
remove_reqs: list of requirement names to remove
'''
pass
def __initialize_conns__(self, **kwargs):
'''
Reads the loaded config and creates the defined database
connections
'''
pass
def __initialize_directories__(self, **kwargs):
'''
reads through the config and verifies if all directories exist and
creates them if they do not
'''
pass
def __repr__(self):
pass
@initialized
def __getattr__(self, attr):
pass
@initialized
def __getitem__(self, item):
pass
@initialized
def __str__(self):
pass
def __setattr__(self, attr, value, override=False):
pass
@initialized
def dict(self):
''' converts the class to a dictionary object '''
pass
@initialized
def get_missing(self, attr):
''' returns and attributes value or a supplied default
args:
attr: the attribute name
none_val: the value to return in the attribute is not found or
is equal to 'None'.
'''
pass
@initialized
def keys(self):
''' returns a list of the attributes in the config manager '''
pass
@initialized
def values(self):
''' returns the values of the config manager '''
pass
@initialized
def items(self):
''' returns a list of tuples with the in a key: value combo of the
config manager '''
pass
def __format_err_summary__(self, errors, indent=0, initial=True):
'''
Formats the error dictionary for printing
args:
errors: the error dictionary
indent: the indent level in number of spaces
'''
pass
def __format_save_config__(self, obj, attr_reqs, initial=True, **kwargs):
'''
Formats the current configuration for saving to file
args:
obj: the config object
initial: bool argument for recursive call catching
kwargs:
indent: the indent level in number of spaces
'''
pass
| 46 | 18 | 27 | 1 | 23 | 3 | 5 | 0.15 | 1 | 23 | 7 | 0 | 28 | 7 | 28 | 43 | 1,048 | 76 | 854 | 147 | 804 | 124 | 470 | 136 | 429 | 17 | 3 | 5 | 174 |
143,241 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/datamergers/bftopic_merger.py
|
rdfframework.datamergers.bftopic_merger.SparqlMerger
|
class SparqlMerger(object):
""" Base class of merging rdf class instances via spaqrl """
ln = "%s-SparqlMerger" % MNAME
log_level = logging.DEBUG
local_filename = "owltags.ttl"
def __init__(self, conn, uri_select_query, namespace):
self.conn = conn
self.uri_select_query = uri_select_query
self.namespace = namespace
def run(self):
self.get_uris()
self.create_same_as_file()
self.send_to_tstore()
self.delete_local_file()
self.merge_instances()
def get_uris(self):
# first convert all blanknode topics to URIs
lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3]))
lg.setLevel(self.log_level)
lg.info("Converting BNs to URIs")
self.conn.query(CONVERT_BN_TO_URIS,
namespace=self.namespace,
mode='update')
lg.info("FINISHED converting BNs to URIs")
lg.info("Getting URI list")
self.uri_list = self.conn.query(self.uri_select_query,
namespace=self.namespace)
def create_same_as_file(self):
""" creates a local data file with all of the owl:sameAs tags """
def find_preferred_uri(uri_list):
index = None
for i, uri in enumerate(uri_list):
if uri.startswith("<http://id.loc.gov/authorities/subjects/"):
index = i
print(uri)
break
if not index:
for i, uri in enumerate(uri_list):
if uri.startswith(\
"<http://id.loc.gov/authorities/childrensSubjects/"):
index = i
print(uri)
break
if not index:
index = 0
return (uri_list.pop(index), uri_list)
with open(os.path.join(CFG.LOCAL_DATA_PATH, self.local_filename),
"w") as file_obj:
file_obj.write(NSM.prefix("turtle"))
for item in self.uri_list:
uris = item['uris']['value'].split(",")
new_list = find_preferred_uri(uris)
uris = uris[1:]
for uir in new_list[1]:
file_obj.write("%s kds:mergeWith %s .\n" % (uir,
new_list[0]))
def send_to_tstore(self):
result = self.conn.load_local_file(self.local_filename,
self.namespace)
return result
def delete_local_file(self):
os.remove(os.path.join(CFG.LOCAL_DATA_PATH, self.local_filename))
def merge_instances(self):
lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3]))
lg.setLevel(self.log_level)
lg.info("Updating old object references")
self.conn.query(UPDATE_OLD_OBJ_REF,
namespace=self.namespace,
mode='update')
lg.info("Deleting old objects")
self.conn.query(DELETE_OLD,
namespace=self.namespace,
mode='update')
lg.info("FINISHED")
|
class SparqlMerger(object):
''' Base class of merging rdf class instances via spaqrl '''
def __init__(self, conn, uri_select_query, namespace):
pass
def run(self):
pass
def get_uris(self):
pass
def create_same_as_file(self):
''' creates a local data file with all of the owl:sameAs tags '''
pass
def find_preferred_uri(uri_list):
pass
def send_to_tstore(self):
pass
def delete_local_file(self):
pass
def merge_instances(self):
pass
| 9 | 2 | 12 | 1 | 11 | 0 | 2 | 0.04 | 1 | 1 | 0 | 0 | 7 | 4 | 7 | 7 | 89 | 15 | 71 | 26 | 62 | 3 | 60 | 25 | 51 | 7 | 1 | 3 | 16 |
143,242 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/datatypes/datatypeerrors.py
|
rdfframework.datatypes.datatypeerrors.NsUriBadEndingError
|
class NsUriBadEndingError(Exception):
""" Exception raised for a RdfNamespace 'uri' not ending in a '/' or '#'
Attributes:
message: explanation of the error
"""
def __init__(self, message):
self.message = message
def __getattr__(self, attr):
return None
|
class NsUriBadEndingError(Exception):
''' Exception raised for a RdfNamespace 'uri' not ending in a '/' or '#'
Attributes:
message: explanation of the error
'''
def __init__(self, message):
pass
def __getattr__(self, attr):
pass
| 3 | 1 | 2 | 0 | 2 | 0 | 1 | 0.8 | 1 | 0 | 0 | 0 | 2 | 1 | 2 | 12 | 12 | 3 | 5 | 4 | 2 | 4 | 5 | 4 | 2 | 1 | 3 | 0 | 2 |
143,243 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/datatypes/datatypeerrors.py
|
rdfframework.datatypes.datatypeerrors.NsUriExistsError
|
class NsUriExistsError(Exception):
""" Exception raised for assigment of RdfNamespace 'uri' if already defined
with a different 'prefix'
Attributes:
new_ns: the new ns paramaters
old_ns: the current ns patamaters
message: explanation of the error
"""
def __init__(self, new_ns, old_ns, message):
self.new_ns = new_ns
self.old_ns = old_ns
self.message = message
|
class NsUriExistsError(Exception):
''' Exception raised for assigment of RdfNamespace 'uri' if already defined
with a different 'prefix'
Attributes:
new_ns: the new ns paramaters
old_ns: the current ns patamaters
message: explanation of the error
'''
def __init__(self, new_ns, old_ns, message):
pass
| 2 | 1 | 4 | 0 | 4 | 0 | 1 | 1.4 | 1 | 0 | 0 | 0 | 1 | 3 | 1 | 11 | 14 | 2 | 5 | 5 | 3 | 7 | 5 | 5 | 3 | 1 | 3 | 0 | 1 |
143,244 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/datatypes/namespaces.py
|
rdfframework.datatypes.namespaces.NsmSingleton
|
class NsmSingleton(type):
"""Singleton class for the RdfNsManager that will allow for only one
instance of the RdfNsManager to be created. In addition the app config
can be sent to the RdfNsManger even after instantiation so the the
default RDF namespaces can be loaded. """
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(NsmSingleton,
cls).__call__(*args, **kwargs)
else:
if 'config' in kwargs and hasattr(kwargs['config'],
"DEFAULT_RDF_NS"):
cls._instances[cls].dict_load(kwargs['config'].DEFAULT_RDF_NS)
if 'config' in kwargs and hasattr(kwargs['config'],
"NAMESPACES"):
cls._instances[cls].dict_load(kwargs['config'].NAMESPACES)
try:
ns_arg = args[0]
if isinstance(ns_arg, dict):
cls._instances[cls].dict_load(ns_arg)
except IndexError:
pass
return cls._instances[cls]
|
class NsmSingleton(type):
'''Singleton class for the RdfNsManager that will allow for only one
instance of the RdfNsManager to be created. In addition the app config
can be sent to the RdfNsManger even after instantiation so the the
default RDF namespaces can be loaded. '''
def __call__(cls, *args, **kwargs):
pass
| 2 | 1 | 18 | 0 | 18 | 0 | 6 | 0.2 | 1 | 3 | 0 | 1 | 1 | 0 | 1 | 14 | 25 | 1 | 20 | 4 | 18 | 4 | 16 | 4 | 14 | 6 | 2 | 3 | 6 |
143,245 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/datatypes/namespaces.py
|
rdfframework.datatypes.namespaces.RdfNamespace
|
class RdfNamespace(metaclass=RdfNamespaceMeta):
""" A namespace is composed of a prefix and a URI
args:
prefix: short abbreviation for the uri
uri: the full 'http...' format
kwargs:
formatter: processor that takes a args and returns a formated string
args -> RdfNamespace and a value
* This is so that you can set a application wide default return
format for namespace (ttl or uri with or without <>)
override(False): False raises error
True: replaces old ns paring with a new one.
old ('test', 'http://test.org/')
new ('test', 'http://TEST.org/')
sets old ns to ('test', 'http://TEST.org/')
ignore_errors(False): No errors are raised during ns assignment and
original ns is preserved
"""
_formatter = rdfuri_formatter
def __init__(self, prefix, uri, **kwargs):
self.__dict__['__ns__'] = (prefix, uri)
self.__dict__['_formatter'] = kwargs.get('formatter',
RdfNamespace._formatter)
def __getattr__(self, attr):
return self._formatter(self, attr)
def __repr__(self):
return "RdfNamespace(\"%s\", \"%s\")" % self.__ns__
def __iter__(self):
return iter(self.__ns__)
def __str__(self):
return self._sparql_
@property
def _ttl_(self):
return "@prefix %s: <%s> ." % (self.__ns__)
@property
def _sparql_(self):
return "prefix %s: <%s>" % (self.__ns__)
@property
def _xml_(self):
return "xmlns:%s=%s" % (self.__ns__[0], json.dumps(self.__ns__[1]))
def __getitem__(self, idx):
return self.__ns__[idx]
def __setattr__(self, attr, value):
if attr == '__ns__':
print("changing_ns")
if self.__dict__.get(attr) and self.__dict__[attr][0] != value[0]:
print("removing NSM attr")
delattr(NSM, self.__dict__[attr][0])
self.__dict__[attr] = value
setattr(NSM, value[0], self)
|
class RdfNamespace(metaclass=RdfNamespaceMeta):
''' A namespace is composed of a prefix and a URI
args:
prefix: short abbreviation for the uri
uri: the full 'http...' format
kwargs:
formatter: processor that takes a args and returns a formated string
args -> RdfNamespace and a value
* This is so that you can set a application wide default return
format for namespace (ttl or uri with or without <>)
override(False): False raises error
True: replaces old ns paring with a new one.
old ('test', 'http://test.org/')
new ('test', 'http://TEST.org/')
sets old ns to ('test', 'http://TEST.org/')
ignore_errors(False): No errors are raised during ns assignment and
original ns is preserved
'''
def __init__(self, prefix, uri, **kwargs):
pass
def __getattr__(self, attr):
pass
def __repr__(self):
pass
def __iter__(self):
pass
def __str__(self):
pass
@property
def _ttl_(self):
pass
@property
def _sparql_(self):
pass
@property
def _xml_(self):
pass
def __getitem__(self, idx):
pass
def __setattr__(self, attr, value):
pass
| 14 | 1 | 3 | 0 | 3 | 0 | 1 | 0.52 | 1 | 0 | 0 | 0 | 10 | 0 | 10 | 27 | 63 | 13 | 33 | 15 | 19 | 17 | 29 | 12 | 18 | 3 | 3 | 2 | 12 |
143,246 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/datatypes/namespaces.py
|
rdfframework.datatypes.namespaces.RdfNamespaceMeta
|
class RdfNamespaceMeta(type):
""" Metaclass ensures that there is only one prefix and uri instanciated
for a particular namespace """
_ns_instances = []
def __is_new_ns__(cls, namespace):
""" cycles through the instanciated namespaces to see if it has already
been created
args:
namespace: tuple of prefix and uri to check
"""
for ns in cls._ns_instances:
if ns[0] == namespace[0] and ns[1] == namespace[1]:
return ns
elif ns[0] == namespace[0] and ns[1] != namespace[1]:
raise NsPrefixExistsError(namespace,
ns,
"prefix [%s] already assigned to [%s]" %
(namespace, ns[1]))
elif ns[0] != namespace[0] and ns[1] == namespace[1]:
raise NsUriExistsError(namespace,
ns,
"uri [%s] already assigned to [%s]" %
(namespace, ns[0]))
return True
@staticmethod
def __format_args__(*args):
""" formats the args so that prefix is lowercased and validates the
uri
"""
ns_uri = RdfNsManager.clean_iri(str(args[1])).strip()
if ns_uri[-1] not in ['/', '#']:
raise NsUriBadEndingError("incorrect ending for '%s', '%s'" %
args[:2])
return (args[0].lower(), ns_uri)
def __call__(cls, *args, **kwargs):
try:
args = cls.__format_args__(*args)
is_new = cls.__is_new_ns__(args)
if is_new == True:
new_ns = super(RdfNamespaceMeta, cls).__call__(*args, **kwargs)
cls._ns_instances.append(new_ns)
try:
NSM.bind(new_ns[0], new_ns[1])
except NameError:
pass
return new_ns
return is_new
except (NsUriExistsError,
NsPrefixExistsError,
NsUriBadEndingError) as err:
if kwargs.get('override') == True:
setattr(err.old_ns, '__ns__' , err.new_ns)
return err.old_ns
elif kwargs.get('ignore_errors', False):
return err.old_ns
raise err
def __iter__(cls):
return iter(cls._ns_instances)
|
class RdfNamespaceMeta(type):
''' Metaclass ensures that there is only one prefix and uri instanciated
for a particular namespace '''
def __is_new_ns__(cls, namespace):
''' cycles through the instanciated namespaces to see if it has already
been created
args:
namespace: tuple of prefix and uri to check
'''
pass
@staticmethod
def __format_args__(*args):
''' formats the args so that prefix is lowercased and validates the
uri
'''
pass
def __call__(cls, *args, **kwargs):
pass
def __iter__(cls):
pass
| 6 | 3 | 14 | 1 | 11 | 2 | 4 | 0.23 | 1 | 7 | 4 | 1 | 3 | 0 | 4 | 17 | 66 | 8 | 48 | 12 | 42 | 11 | 35 | 10 | 30 | 6 | 2 | 3 | 14 |
143,247 |
KnowledgeLinks/rdfframework
|
KnowledgeLinks_rdfframework/rdfframework/datatypes/namespaces.py
|
rdfframework.datatypes.namespaces.Uri
|
class Uri(BaseRdfDataType, str, metaclass=RegPerformInstanceMeta):
""" URI/IRI class for working with RDF data """
class_type = "Uri"
type = "uri"
default_method = "pyuri"
es_type = "text"
performance_mode = True
performance_attrs = PERFORMANCE_ATTRS
def __new__(cls, *args, **kwargs):
value = args[0]
if not isinstance(args[0], tuple):
value = NSM.get_uri_parts(args[0])
args = [pyuri_formatter(*value)]
newobj = str.__new__(cls, *args)
newobj.value = value
newobj.pyuri = args[0]
return newobj
def __init__(self, value):
# if the performance_mode is set than the value for the listed
# attributes is calculated at instanciation
if self.performance_mode:
for attr in self.performance_attrs:
if attr != 'pyuri':
setattr(self, attr, str(getattr(self, "__%s__" % attr)))
self.hash_val = hash(self.pyuri)
# def __call__(self):
# return
# __wrapped__ = Uri
# def __eq__(self, value):
# if not isinstance(value, Uri.__wrapped__):
# # pdb.set_trace()
# value = Uri(value)
# if self.value == value.value:
# return True
# return False
@property
def sparql(self):
""" Returns the URI in a SPARQL format """
return ttl_formatter(*self.value)
@property
def sparql_uri(self):
""" Returns the URI in a full http format with '<' and '>'
encapsulation
"""
return uri_formatter(*self.value)
@property
def to_json(self):
""" Returns the json formatting """
return self.clean_uri
@property
def rdflib(self):
""" Returns the rdflibURI reference """
return rdflib_formatter(*self.value)
@property
def clean_uri(self):
""" Returns the URI in a full http format WITHOUT '<' and '>'
encapsulation
"""
return http_formatter(*self.value)
@property
def etree(self):
"""
Returns string in the python xml etree format
"""
return xmletree_formatter(*self.value)
def __str__(self):
return self.clean_uri
# return self.sparql
def __repr__(self):
return self.pyuri
def __hash__(self):
return self.hash_val
|
class Uri(BaseRdfDataType, str, metaclass=RegPerformInstanceMeta):
''' URI/IRI class for working with RDF data '''
def __new__(cls, *args, **kwargs):
pass
def __init__(self, value):
pass
@property
def sparql(self):
''' Returns the URI in a SPARQL format '''
pass
@property
def sparql_uri(self):
''' Returns the URI in a full http format with '<' and '>'
encapsulation
'''
pass
@property
def to_json(self):
''' Returns the json formatting '''
pass
@property
def rdflib(self):
''' Returns the rdflibURI reference '''
pass
@property
def clean_uri(self):
''' Returns the URI in a full http format WITHOUT '<' and '>'
encapsulation
'''
pass
@property
def etree(self):
'''
Returns string in the python xml etree format
'''
pass
def __str__(self):
pass
def __repr__(self):
pass
def __hash__(self):
pass
| 18 | 7 | 4 | 0 | 3 | 1 | 1 | 0.57 | 3 | 1 | 0 | 0 | 11 | 1 | 11 | 109 | 84 | 12 | 46 | 28 | 28 | 26 | 40 | 22 | 28 | 4 | 5 | 3 | 15 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.