id
int64 0
843k
| repository_name
stringlengths 7
55
| file_path
stringlengths 9
332
| class_name
stringlengths 3
290
| human_written_code
stringlengths 12
4.36M
| class_skeleton
stringlengths 19
2.2M
| total_program_units
int64 1
9.57k
| total_doc_str
int64 0
4.2k
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
300
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
176
| CountClassBase
float64 0
48
| CountClassCoupled
float64 0
589
| CountClassCoupledModified
float64 0
581
| CountClassDerived
float64 0
5.37k
| CountDeclInstanceMethod
float64 0
4.2k
| CountDeclInstanceVariable
float64 0
299
| CountDeclMethod
float64 0
4.2k
| CountDeclMethodAll
float64 0
4.2k
| CountLine
float64 1
115k
| CountLineBlank
float64 0
9.01k
| CountLineCode
float64 0
94.4k
| CountLineCodeDecl
float64 0
46.1k
| CountLineCodeExe
float64 0
91.3k
| CountLineComment
float64 0
27k
| CountStmt
float64 1
93.2k
| CountStmtDecl
float64 0
46.1k
| CountStmtExe
float64 0
90.2k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
6k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4,700 |
AndresMWeber/Nomenclate
|
tests/test_tokens_handler.py
|
tests.test_tokens_handler.TestStr
|
class TestStr(TestTokenAttrBase):
def test_str(self):
tokens = ['name', 'decorator', 'purpose', 'childtype', 'type', 'location', 'side', 'var']
for token in tokens:
self.assertIn(token, str(self.token_attr_dict_handler))
|
class TestStr(TestTokenAttrBase):
def test_str(self):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 2 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 78 | 5 | 0 | 5 | 4 | 3 | 0 | 5 | 4 | 3 | 2 | 4 | 1 | 2 |
4,701 |
AndresMWeber/Nomenclate
|
nomenclate/core/tokens.py
|
nomenclate.core.tokens.TokenAttrList
|
class TokenAttrList(tools.Serializable):
def __init__(self, token_attrs):
self.token_attrs = [TokenAttr(token_attr, "") for token_attr in token_attrs]
def reset(self):
for token_attr in self.token_attrs:
token_attr.set("")
@property
def unset_token_attrs(self):
return [token_attr for token_attr in self.token_attrs if token_attr.label == ""]
def purge_tokens(self, input_token_attrs=None):
""" Removes all specified token_attrs that exist in instance.token_attrs
:param token_attrs: list(str), list of string values of tokens to remove. If None, removes all
"""
if input_token_attrs is None:
remove_attrs = self.token_attrs
else:
remove_attrs = [
token_attr
for token_attr in self.token_attrs
if token_attr.token in input_token_attrs
]
self.token_attrs = [
token_attr for token_attr in self.token_attrs if token_attr not in remove_attrs
]
@classmethod
def from_json(cls, json_blob):
instance = cls(list(json_blob))
instance.merge_json(json_blob)
return instance
def merge_token_attr(self, token_attr):
if self.has_token_attr(token_attr):
getattr(self, token_attr.token).merge_json(token_attr.to_json())
else:
self.token_attrs.append(token_attr)
def has_token_attr(self, token):
return any([token_attr for token_attr in self.token_attrs if token_attr.token == token])
def merge_json(self, json_blob):
for token_name, token_attr_blob in json_blob.items():
token_name = token_name.lower()
try:
if not isinstance(token_attr_blob, dict):
token_attr_blob = {"token": token_name, "label": token_attr_blob}
getattr(self, token_name).merge_serialization(token_attr_blob)
except (AttributeError, IndexError):
self.merge_token_attr(TokenAttr.from_json(token_attr_blob))
def to_json(self):
return {token_attr.token: token_attr.to_json() for token_attr in self.token_attrs}
def __eq__(self, other):
if isinstance(other, self.__class__):
return all(
map(
lambda x: x[0] == x[1],
zip(
sorted([t for t in self.token_attrs], key=lambda x: x.token),
sorted([t for t in other.token_attrs], key=lambda x: x.token),
),
)
)
return False
def __getattr__(self, item):
try:
return object.__getattribute__(self, item)
except AttributeError:
try:
return [token_attr for token_attr in self.token_attrs if token_attr.token == item][
0
]
except IndexError:
pass
raise AttributeError
def __str__(self):
return " ".join(
["%s:%r" % (token_attr.token, token_attr.label) for token_attr in self.token_attrs]
)
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.token_attrs)
def __iter__(self):
return iter(self.token_attrs)
def __getitem__(self, item):
return [token_attr for token_attr in self.token_attrs if token_attr.token == item][0]
|
class TokenAttrList(tools.Serializable):
def __init__(self, token_attrs):
pass
def reset(self):
pass
@property
def unset_token_attrs(self):
pass
def purge_tokens(self, input_token_attrs=None):
''' Removes all specified token_attrs that exist in instance.token_attrs
:param token_attrs: list(str), list of string values of tokens to remove. If None, removes all
'''
pass
@classmethod
def from_json(cls, json_blob):
pass
def merge_token_attr(self, token_attr):
pass
def has_token_attr(self, token):
pass
def merge_json(self, json_blob):
pass
def to_json(self):
pass
def __eq__(self, other):
pass
def __getattr__(self, item):
pass
def __str__(self):
pass
def __repr__(self):
pass
def __iter__(self):
pass
def __getitem__(self, item):
pass
| 18 | 1 | 5 | 0 | 5 | 0 | 2 | 0.04 | 1 | 7 | 1 | 0 | 14 | 1 | 15 | 21 | 97 | 17 | 77 | 23 | 59 | 3 | 55 | 21 | 39 | 4 | 2 | 3 | 24 |
4,702 |
AndresMWeber/Nomenclate
|
nomenclate/core/processing.py
|
nomenclate.core.processing.Nomenclative
|
class Nomenclative(object):
def __init__(self, input_str):
self.raw_formatted_string = input_str
self.token_matches = []
def process_matches(self):
build_str = self.raw_formatted_string
for token_match in self.token_matches:
# Do not process static token matches
if token_match.match.startswith('(') or token_match.match.endswith(')'):
continue
if token_match.match == build_str[token_match.start:token_match.end]:
build_str = build_str[:token_match.start] + token_match.sub + build_str[token_match.end:]
self.adjust_other_matches(token_match)
return build_str
def adjust_other_matches(self, adjuster_match):
for token_match in [token_match for token_match in self.token_matches if token_match != adjuster_match]:
try:
token_match.adjust_position(adjuster_match)
except IndexError:
pass
adjuster_match.end = adjuster_match.start + len(adjuster_match.sub)
adjuster_match.match = adjuster_match.sub
def add_match(self, regex_match, substitution):
token_match = TokenMatch(regex_match, substitution)
try:
self.validate_match(token_match)
self.token_matches.append(token_match)
except (IndexError, exceptions.OverlapError):
raise exceptions.OverlapError('Not adding match %s as it conflicts with a preexisting match' % token_match)
def validate_match(self, token_match_candidate):
for token_match in self.token_matches:
try:
token_match.overlaps(token_match_candidate)
except exceptions.OverlapError:
raise exceptions.OverlapError(
"Cannot add match %s due to overlap with %s" % (token_match, token_match_candidate))
def __str__(self):
matches = ' ' if not self.token_matches else ' '.join(map(str, self.token_matches))
return 'format: %s: %s' % (self.raw_formatted_string, matches)
|
class Nomenclative(object):
def __init__(self, input_str):
pass
def process_matches(self):
pass
def adjust_other_matches(self, adjuster_match):
pass
def add_match(self, regex_match, substitution):
pass
def validate_match(self, token_match_candidate):
pass
def __str__(self):
pass
| 7 | 0 | 7 | 0 | 6 | 0 | 3 | 0.03 | 1 | 5 | 2 | 0 | 6 | 2 | 6 | 6 | 46 | 7 | 38 | 15 | 31 | 1 | 37 | 15 | 30 | 4 | 1 | 2 | 15 |
4,703 |
AndresMWeber/Nomenclate
|
nomenclate/core/tools.py
|
nomenclate.core.tools.NomenclateNotifier
|
class NomenclateNotifier(object):
def __init__(self, observer):
self.observers = []
self.register_observer(observer)
def register_observer(self, observer_function):
self.observers.append(observer_function)
def notify_observer(self, *args, **kwargs):
for observer_function in self.observers:
observer_function(*args, **kwargs)
|
class NomenclateNotifier(object):
def __init__(self, observer):
pass
def register_observer(self, observer_function):
pass
def notify_observer(self, *args, **kwargs):
pass
| 4 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 3 | 1 | 3 | 3 | 11 | 2 | 9 | 6 | 5 | 0 | 9 | 6 | 5 | 2 | 1 | 1 | 4 |
4,704 |
AndresMWeber/Nomenclate
|
nomenclate/core/renderers.py
|
nomenclate.core.renderers.RenderBase
|
class RenderBase(metaclass=rendering.InputRenderer):
token = "default"
@classmethod
def render(
cls,
value,
token,
nomenclate_object,
config_query_path=None,
return_type=list,
use_value_in_query_path=True,
**kwargs
):
""" Default renderer for a token. It checks the config for a match, if not found it uses the value provided.
:param value: str, value we are trying to match (or config setting for the token)
:param token: str, token we are searching for
:param nomenclate_object: nomenclate.core.nomenclate.Nomenclate, instance of nomenclate object to query
:param kwargs: any config settings that relate to the token as found from the nomenclate instance
:return: str, the resulting syntactically rendered string
"""
if config_query_path == None:
config_query_path = nomenclate_object.OPTIONS_PATH + [token]
if use_value_in_query_path:
config_query_path += [value]
config_matches = cls.get_config_match(
value, config_query_path, return_type, nomenclate_object, **kwargs
)
options = cls.flatten_input(config_matches, value)
option = cls.process_criteria(token, options, **kwargs) if options else value
return cls.process_token_augmentations(option, token_attr=getattr(nomenclate_object, token))
@classmethod
def process_token_augmentations(cls, value, token_attr):
""" Uses any found augmentations from the TokenAttr to augment the final rendered value. Currently
this only processes the augmentations:
TokenAttr().case
TokenAttr().prefix
TokenAttr().suffix
:param value: str, the resulting rendered string from the TokenAttr
:param token_attr: nomenclate.core.tokens.TokenAttr, the processed TokenAttr to be used to query settings.
:return: str, final augmented string
"""
return "{}{}{}".format(
token_attr.prefix, getattr(str, token_attr.case, str)(value), token_attr.suffix
)
@classmethod
def get_config_match(cls, query_string, entry_path, return_type, nomenclate_object, **kwargs):
""" Queries the nomenclate's config data for corresponding entries and filters against the incoming
filter_kwargs as detailed in cls.process_criteria
:param query_string: str, string to look for once we find a match in the config
:param token: str, the given token to search for
:param entry_path: list(str), the query path to the subsection we are looking for to get a match in the config
:param return_type: type, the type of return value we want (usually should be <type str>)
:param nomenclate_object: nomenclate.core.nomenclature.Nomenclate, instance to query against (has config data)
:return: object, whatever return type was specified
"""
try:
return nomenclate_object.CFG.get(entry_path, return_type=return_type)
except exceptions.ResourceNotFoundError:
return query_string
@classmethod
def flatten_input(cls, options, query_string):
""" Takes a list, dict or str of options and outputs a filtered list
Behavior list:
dict: flattens/filters the dict to only key matches based on query_string
list: just flattens a list just in case it's nested.
str: returns a list with the string in it
:param options: list, dict, str, input options to flatten/filter
:param query_string: str, string we are looking for if the input is a dictionary
:return: list, flattened list.
"""
if not isinstance(options, (dict, list)):
options = [options]
else:
if isinstance(options, dict):
options = list(gen_dict_key_matches(query_string, options))
options = list(flatten(options))
return options
@classmethod
def process_criteria(cls, token, options, **kwargs):
""" Each kwarg passed is considered a filter. The kwarg is in format <token>_<filter function> and if the
filter function is found in __builtins__ it uses the filter function and checks the result against
the kwarg's value. If it passes the check it is filtered out of the current list of options
:param token: str, token we are querying
:param options: list(str), the options to filter with kwargs
:param kwargs: dict(str: str), dictionary of {<token>_<__builtin__ function>: compare value}
:return:
"""
options = list(options)
criteria_matches = list(options)
for criteria_function_name, criteria in kwargs.items():
if not criteria_function_name and not criteria:
continue
criteria_function_name = criteria_function_name.replace("%s_" % token, "")
try:
builtin_func = getattr(builtins, criteria_function_name)
criteria_matches = [
option for option in options if builtin_func(option) == criteria
]
except AttributeError:
pass
if not criteria_matches:
criteria_matches = [min(options, key=lambda x: abs(builtin_func(x) - criteria))]
return criteria_matches[0] if criteria_matches else options[0]
|
class RenderBase(metaclass=rendering.InputRenderer):
@classmethod
def render(
cls,
value,
token,
nomenclate_object,
config_query_path=None,
return_type=list,
use_value_in_query_path=True,
**kwargs
):
''' Default renderer for a token. It checks the config for a match, if not found it uses the value provided.
:param value: str, value we are trying to match (or config setting for the token)
:param token: str, token we are searching for
:param nomenclate_object: nomenclate.core.nomenclate.Nomenclate, instance of nomenclate object to query
:param kwargs: any config settings that relate to the token as found from the nomenclate instance
:return: str, the resulting syntactically rendered string
'''
pass
@classmethod
def process_token_augmentations(cls, value, token_attr):
''' Uses any found augmentations from the TokenAttr to augment the final rendered value. Currently
this only processes the augmentations:
TokenAttr().case
TokenAttr().prefix
TokenAttr().suffix
:param value: str, the resulting rendered string from the TokenAttr
:param token_attr: nomenclate.core.tokens.TokenAttr, the processed TokenAttr to be used to query settings.
:return: str, final augmented string
'''
pass
@classmethod
def get_config_match(cls, query_string, entry_path, return_type, nomenclate_object, **kwargs):
''' Queries the nomenclate's config data for corresponding entries and filters against the incoming
filter_kwargs as detailed in cls.process_criteria
:param query_string: str, string to look for once we find a match in the config
:param token: str, the given token to search for
:param entry_path: list(str), the query path to the subsection we are looking for to get a match in the config
:param return_type: type, the type of return value we want (usually should be <type str>)
:param nomenclate_object: nomenclate.core.nomenclature.Nomenclate, instance to query against (has config data)
:return: object, whatever return type was specified
'''
pass
@classmethod
def flatten_input(cls, options, query_string):
''' Takes a list, dict or str of options and outputs a filtered list
Behavior list:
dict: flattens/filters the dict to only key matches based on query_string
list: just flattens a list just in case it's nested.
str: returns a list with the string in it
:param options: list, dict, str, input options to flatten/filter
:param query_string: str, string we are looking for if the input is a dictionary
:return: list, flattened list.
'''
pass
@classmethod
def process_criteria(cls, token, options, **kwargs):
''' Each kwarg passed is considered a filter. The kwarg is in format <token>_<filter function> and if the
filter function is found in __builtins__ it uses the filter function and checks the result against
the kwarg's value. If it passes the check it is filtered out of the current list of options
:param token: str, token we are querying
:param options: list(str), the options to filter with kwargs
:param kwargs: dict(str: str), dictionary of {<token>_<__builtin__ function>: compare value}
:return:
'''
pass
| 11 | 5 | 21 | 2 | 11 | 8 | 3 | 0.69 | 1 | 5 | 1 | 4 | 0 | 0 | 5 | 26 | 116 | 13 | 61 | 27 | 41 | 42 | 40 | 13 | 34 | 6 | 3 | 2 | 16 |
4,705 |
AndresMWeber/Nomenclate
|
nomenclate/core/renderers.py
|
nomenclate.core.renderers.RenderDate
|
class RenderDate(RenderBase):
token = "date"
@classmethod
def render(
cls,
date,
token,
nomenclate_object,
config_query_path=None,
return_type=list,
use_value_in_query_path=True,
**kwargs
):
if date == "now":
d = datetime.datetime.now()
else:
try:
d = p.parse(date)
except ValueError:
return ""
date_format = getattr(nomenclate_object, "%s_format" % cls.token, "%Y-%m-%d")
return d.strftime(date_format)
|
class RenderDate(RenderBase):
@classmethod
def render(
cls,
date,
token,
nomenclate_object,
config_query_path=None,
return_type=list,
use_value_in_query_path=True,
**kwargs
):
pass
| 3 | 0 | 19 | 0 | 19 | 0 | 3 | 0 | 1 | 3 | 0 | 0 | 0 | 0 | 1 | 27 | 23 | 1 | 22 | 15 | 10 | 0 | 11 | 5 | 9 | 3 | 4 | 2 | 3 |
4,706 |
AndresMWeber/Nomenclate
|
nomenclate/core/renderers.py
|
nomenclate.core.renderers.RenderLod
|
class RenderLod(RenderVar):
token = "lod"
|
class RenderLod(RenderVar):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 5 | 0 | 0 |
4,707 |
AndresMWeber/Nomenclate
|
nomenclate/core/renderers.py
|
nomenclate.core.renderers.RenderType
|
class RenderType(RenderBase):
token = "type"
@classmethod
def render(
cls,
engine_type,
token,
nomenclate_object,
config_query_path=None,
return_type=list,
use_value_in_query_path=True,
**kwargs
):
return super(RenderType, cls).render(
engine_type,
cls.token,
nomenclate_object,
config_query_path=nomenclate_object.SUFFIXES_PATH,
return_type=dict,
**kwargs
)
|
class RenderType(RenderBase):
@classmethod
def render(
cls,
engine_type,
token,
nomenclate_object,
config_query_path=None,
return_type=list,
use_value_in_query_path=True,
**kwargs
):
pass
| 3 | 0 | 18 | 0 | 18 | 0 | 1 | 0 | 1 | 3 | 0 | 0 | 0 | 0 | 1 | 27 | 22 | 1 | 21 | 13 | 9 | 0 | 4 | 3 | 2 | 1 | 4 | 0 | 1 |
4,708 |
AndresMWeber/Nomenclate
|
nomenclate/core/renderers.py
|
nomenclate.core.renderers.RenderVar
|
class RenderVar(RenderBase):
token = "var"
@classmethod
def render(
cls,
var,
token,
nomenclate_object,
config_query_path=None,
return_type=list,
use_value_in_query_path=True,
**kwargs
):
var_format = kwargs.get("%s_format" % cls.token, "A")
if isinstance(var, int):
var = cls._get_variation_id(var, var_format.isupper())
return cls.process_token_augmentations(var, token_attr=getattr(nomenclate_object, token))
@staticmethod
def _get_variation_id(value, capital=False):
""" Convert an integer value to a character. a-z then double aa-zz etc
Args:
value (int): integer index we're looking up
capital (bool): whether we convert to capitals or not
Returns (str): alphanumeric representation of the index
"""
# Reinforcing type just in case a valid string was entered
value = int(value)
base_power = base_start = base_end = 0
while value >= base_end:
base_power += 1
base_start = base_end
base_end += pow(26, base_power)
base_index = value - base_start
# create alpha representation
alphas = ["a"] * base_power
for index in range(base_power - 1, -1, -1):
alphas[index] = chr(int(97 + (base_index % 26)))
base_index /= 26
characters = "".join(alphas)
return characters.upper() if capital else characters
|
class RenderVar(RenderBase):
@classmethod
def render(
cls,
var,
token,
nomenclate_object,
config_query_path=None,
return_type=list,
use_value_in_query_path=True,
**kwargs
):
pass
@staticmethod
def _get_variation_id(value, capital=False):
''' Convert an integer value to a character. a-z then double aa-zz etc
Args:
value (int): integer index we're looking up
capital (bool): whether we convert to capitals or not
Returns (str): alphanumeric representation of the index
'''
pass
| 5 | 1 | 19 | 1 | 14 | 4 | 3 | 0.25 | 1 | 3 | 0 | 1 | 0 | 0 | 2 | 28 | 44 | 4 | 32 | 21 | 18 | 8 | 21 | 10 | 18 | 4 | 4 | 1 | 6 |
4,709 |
AndresMWeber/Nomenclate
|
nomenclate/core/nameparser.py
|
nomenclate.core.nameparser.NameParser
|
class NameParser(object):
""" This parses names of assets. It assumes the usual convention of strings separated by underscores.
"""
CONFIG_SIDES = config.ConfigParse().get(["options", "side"])
CONFIG_DISCIPLINES = config.ConfigParse().get(["options", "discipline"])
PARSABLE = ["basename", "version", "date", "side", "udim"]
REGEX_BASENAME = r"(?:^[-._]+)?([a-zA-Z0-9_\-|]+?)(?=[-._]{2,}|\.)"
REGEX_SEPARATORS = r"[ ,_!?:\.\-]"
REGEX_ABBR_ISLAND = r"(?:[{SEP}]+)({ABBR})(?:[{SEP}]+)"
REGEX_ABBR_SEOS = r"(?:^|[a-z]|{SEP})({ABBR})(?:$|[A-Z][a-z]+|{SEP})"
REGEX_ABBR_CAMEL = r"(?:[a-z])({ABBR})(?:$|[A-Z]|{SEP})"
REGEX_VERSION = (
r"(?:^|[a-z]{{2,}}[a-uw-z]+|{SEP}[a-z]?[a-uw-z]+|{SEP})([vV]?[0-9]{{1,4}})(?={SEP}|$)"
)
REGEX_UDIM = r"(?:[a-zA-Z]|[._-])(1[0-9]{3})(?:[._-])"
REGEX_DATE = r"(?<!\d)(%s)(?!\d)"
REGEX_CAMEL = r"(?:{SEP}?)((?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z]))(?:{SEP}?)"
@classmethod
def parse_name(cls, name):
""" Parses a name into a dictionary of identified subsections with accompanying information to
correctly identify and replace if necessary
:param name: str, string to be parsed
:return: dict, dictionary with relevant parsed information
"""
parse_dict = dict.fromkeys(cls.PARSABLE, None)
parse_dict["date"] = cls.get_date(name)
parse_dict["version"] = cls.get_version(name)
parse_dict["udim"] = cls.get_udim(name)
parse_dict["side"] = cls.get_side(name)
parse_dict["basename"] = cls.get_base_naive(cls._reduce_name(name, parse_dict))
return parse_dict
@classmethod
def get_side(cls, name, ignore=""):
""" Checks a string for a possible side string token, this assumes its on its own
and is not part of or camel cased and combined with a word. Returns first found side to reduce duplicates.
We can be safe to assume the abbreviation for the side does not have camel casing within its own word.
:param name: str, string that represents a possible name of an object
:param name: str, string that represents a possible name of an object
:return: (None, str), either the found permutation of the side found in name or None
"""
for side in cls.CONFIG_SIDES:
""" Tried using a regex, however it would've taken too long to debug
side_regex = cls._build_abbreviation_regex(side)
result = cls._generic_search(name, side_regex, metadata={'side': side}, ignore=ignore)
if result:
return result
"""
for permutations in cls.get_string_camel_patterns(side):
for permutation in permutations:
result = cls._generic_search(
name, permutation, metadata={"side": side}, ignore=ignore
)
if result:
return result
return None
@classmethod
def get_discipline(cls, name, ignore="", min_length=3):
""" Checks a string for a possible discipline string token, this assumes its on its own
and is not part of or camel cased and combined with a word. Returns first found match to reduce duplicates.
We can be safe to assume the abbreviation for the discipline does not have camel casing within its own word.
:param name: str, the string based object name
:param ignore: str, specific ignore string for the search to avoid
:param min_length: int, minimum length for possible abbreviations of disciplines. Lower = more wrong guesses.
:return: dict, match dictionary
"""
for discipline in cls.CONFIG_DISCIPLINES:
re_abbr = "({RECURSE}(?=[0-9]|[A-Z]|{SEPARATORS}))".format(
RECURSE=cls._build_abbreviation_regex(discipline), SEPARATORS=cls.REGEX_SEPARATORS
)
matches = cls._get_regex_search(name, re_abbr, ignore=ignore)
if matches:
matches = [
m
for m in matches
if re.findall("([a-z]{%d,})" % min_length, m["match"], flags=re.IGNORECASE)
]
if matches:
return matches[-1]
return None
@classmethod
def get_base(cls, name):
""" Checks a string for a possible base name of an object (no prefix, no suffix).
We need to do a full parse to make sure we've ruled out all the possible other parse queries
:param name: str, string that represents a possible name of an object
:return: str, the detected basename
"""
return cls.parse_name(name).get("basename", None)
@classmethod
def get_base_naive(cls, name, ignore=""):
""" Checks a string for a possible base name of an object (no prefix, no suffix).
We need to do a full parse to make sure we've ruled out all the possible other parse queries
:param name: str, string that represents a possible name of an object
:return: str, the detected basename
"""
return cls._get_regex_search(name, cls.REGEX_BASENAME, match_index=0, ignore=ignore)
@classmethod
def get_version(cls, name):
""" Checks a string for a possible version of an object (no prefix, no suffix).
Assumes only up to 4 digit padding
:param name: str, string that represents a possible name of an object
:return: (float, int, list(str), None), gets the version number then the string matches
"""
# Dates can confuse th
# is stuff, so we'll check for that first and remove it from the string if found
try:
date = cls.get_date(name)
date = date["datetime"].strftime(date["format"])
except TypeError:
pass
return cls.get_version_naive(name, ignore=date or "")
@classmethod
def get_version_naive(cls, name, ignore=""):
""" Checks a string for a possible version of an object (no prefix, no suffix) without filtering date out
Assumes only up to 4 digit padding
:param name: str, string that represents a possible name of an object
:return: (float, int, list(str), None), gets the version number then the string matches
"""
match = cls._get_regex_search(
name, cls.REGEX_VERSION.format(SEP=cls.REGEX_SEPARATORS), ignore=ignore
)
if match is not None:
if len(match) > 1:
for m in match:
m.update({"version": int(m["match"].upper().replace("V", ""))})
compound_version = ".".join([str(m["version"]) for m in match])
compound_version = (
float(compound_version)
if compound_version.count(".") == 1
else compound_version
)
return {
"compound_matches": match,
"compound_version": compound_version,
"pattern": match[0]["pattern"],
"input": match[0]["input"],
}
elif len(match) == 1:
match = match[0]
match.update({"version": int(match["match"].upper().replace("V", ""))})
return match
return None
@classmethod
def get_udim(cls, name):
""" Checks a string for a possible base name of an object (no prefix, no suffix)
:param name: str, string that represents a possible name of an object
:returns: int, the last found match because convention keeps UDIM markers at the end.
"""
match = cls._get_regex_search(name, cls.REGEX_UDIM, match_index=-1)
if match:
match.update({"match_int": int(match["match"])})
return match
return None
@classmethod
def get_short(cls, name):
""" Returns the short name of a Maya asset name or path
:param name: str, string that represents a possible name of an object
:return: str, the short name of the maya node
"""
return name.split("|")[-1].split("//")[-1]
@classmethod
def get_date(cls, name):
""" Checks a string for a possible date formatted into the name. It assumes dates do not have other
numbers at the front or head of the date. Currently only supports dates in 1900 and 2000.
Heavily relies on datetime for error checking to see
if date is actually viable. It follows similar ideas to this post:
http://stackoverflow.com/questions/9978534/match-dates-using-python-regular-expressions
:param name: str, string that represents a possible name of an object
:return: datetime.datetime, datetime object with current time or None if not found
"""
time_formats = [
"%Y-%m-%d_%H-%M-%S",
"%Y-%m-%d-%H-%M-%S",
"%Y-%m-%d--%H-%M-%S",
"%y_%m_%dT%H_%M_%S",
"%Y-%m-%d%H-%M-%S",
"%Y%m%d-%H%M%S",
"%Y%m%d-%H%M",
"%Y-%m-%d",
"%Y%m%d",
"%m_%d_%Y",
"%m_%d_%y",
"%m%d%y",
"%m%d%Y",
"%d_%m_%Y",
"%Y",
"%m-%d-%yy",
"%m%d%Y",
]
mapping = [
("%yy", "(([01]\d{1}))"),
("%Y", "((19|20)\d{2})"),
("%y", "(\d{2})"),
("%d", "(\d{2})"),
("%m", "(\d{2})"),
("%H", "(\d{2})"),
("%M", "(\d{2})"),
("%S", "(\d{2})"),
]
time_regexes = []
for time_format in time_formats:
for k, v in mapping:
time_format = time_format.replace(k, v)
time_regexes.append(time_format)
for time_regex, time_format in zip(time_regexes, time_formats):
match = cls._get_regex_search(
name, cls.REGEX_DATE % time_regex, metadata={"format": time_format}, match_index=0
)
if match:
try:
match.update(
{
"datetime": datetime.datetime.strptime(
match["match"], time_format.replace("%yy", "%y")
)
}
)
return match
except ValueError:
pass
return None
@classmethod
def get_string_camel_patterns(cls, name, min_length=0):
""" Finds all permutations of possible camel casing of the given name
:param name: str, the name we need to get all possible permutations and abbreviations for
:param min_length: int, minimum length we want for abbreviations
:return: list(list(str)), list casing permutations of list of abbreviations
"""
# Have to check for longest first and remove duplicates
patterns = []
abbreviations = list(set(cls._get_abbreviations(name, output_length=min_length)))
abbreviations.sort(key=len, reverse=True)
for abbr in abbreviations:
# We won't check for abbreviations that are stupid eg something with apparent camel casing within
# the word itself like LeF, sorting from:
# http://stackoverflow.com/questions/13954841/python-sort-upper-case-and-lower-case
casing_permutations = list(set(cls._get_casing_permutations(abbr)))
casing_permutations.sort(key=lambda v: (v.upper(), v[0].islower(), len(v)))
permutations = [
permutation
for permutation in casing_permutations
if cls.is_valid_camel(permutation) or len(permutation) <= 2
]
if permutations:
patterns.append(permutations)
return patterns
@classmethod
def _reduce_name(cls, name, parse_dict):
""" Reduces a name against matches found in a parse dictionary
:param name: str, name to be reduced
:param parse_dict: dict, dictionary of matches to reduce against
:return: str, reduced string
"""
# Now remove all found entries to make basename regex have an easier time
removal_indices = []
for _, match in parse_dict.items():
try:
matches = []
if isinstance(match, dict) and "compound_matches" in match:
matches = match.get("compound_matches")
elif not isinstance(match, list) and match is not None:
matches = [match]
for m in matches:
valid_slice = True
slice_a, slice_b = m.get("position")
# Adjust slice positions from previous slices
if removal_indices is []:
removal_indices.append((slice_a, slice_b))
for r_slice_a, r_slice_b in removal_indices:
if slice_a == r_slice_a and slice_b == r_slice_b:
valid_slice = False
if (
slice_a > r_slice_a
or slice_a > r_slice_b
or slice_b > r_slice_b
or slice_b > r_slice_a
):
slice_delta = r_slice_b - r_slice_a
slice_a -= slice_delta
slice_b -= slice_delta
if valid_slice:
name = cls._string_remove_slice(name, slice_a, slice_b)
removal_indices.append((slice_a, slice_b))
except (IndexError, TypeError):
pass
return name
@staticmethod
def _get_regex_search(input_string, regex, metadata={}, match_index=None, ignore="", flags=0):
""" Using this so that all results from the functions return similar results
:param input_string: str, input string to be checked
:param regex: str, input regex to be compiled and searched with
:param match_index: (int, None), whether to get a specific match, if None returns all matches as list
:param metadata: dict, dictionary of extra meta tags needed to identify information
:return: list(dict), list of dictionaries if multiple hits or a specific entry or None
"""
generator = re.compile(regex, flags=flags).finditer(input_string)
matches = []
for obj in generator:
try:
span_a = obj.span(1)
group_a = obj.group(1)
except IndexError:
span_a = obj.span()
group_a = obj.group()
if obj.groups() == ("",):
# Not sure how to account for this situation yet, weird regex.
return True
if group_a not in ignore:
matches.append(
{
"pattern": regex,
"input": input_string,
"position": span_a,
"position_full": obj.span(),
"match": group_a,
"match_full": obj.group(),
}
)
if matches:
for match in matches:
match.update(metadata)
if match_index is not None:
return matches[match_index]
return matches
return None
@classmethod
def _generic_search(cls, name, search_string, metadata={}, ignore=""):
""" Searches for a specific string given three types of regex search types. Also auto-checks for camel casing.
:param name: str, name of object in question
:param search_string: str, string to find and insert into the search regexes
:param metadata: dict, metadata to add to the result if we find a match
:param ignore: str, ignore specific string for the search
:return: dict, dictionary of search results
"""
patterns = [cls.REGEX_ABBR_SEOS, cls.REGEX_ABBR_ISLAND, cls.REGEX_ABBR_CAMEL]
if not search_string[0].isupper():
patterns.remove(cls.REGEX_ABBR_CAMEL)
for pattern in patterns:
search_result = cls._get_regex_search(
name,
pattern.format(ABBR=search_string, SEP=cls.REGEX_SEPARATORS),
metadata=metadata,
match_index=0,
ignore=ignore,
)
if search_result is not None:
if cls.is_valid_camel(
search_result.get("match_full"), strcmp=search_result.get("match")
):
return search_result
return None
@staticmethod
def _get_abbreviations(input_string, output_length=0):
""" Generates abbreviations for input_string
:param input_string: str, name of object
:param output_length: int, optional specific length of abbreviations, default is off
:return: list(str), list of all combinations that include the first letter (possible abbreviations)
"""
for i, j in itertools.combinations(range(len(input_string[1:]) + 1), 2):
abbr = input_string[0] + input_string[1:][i:j]
if len(abbr) >= output_length:
yield abbr
elif output_length == 0:
yield abbr
# Have to add the solitary letter as well
if not output_length or output_length == 1:
yield input_string[0]
@classmethod
def is_valid_camel(cls, input_string, strcmp=None, ignore=""):
""" Checks to see if an input string is valid for use in camel casing
This assumes that all lowercase strings are not valid camel case situations and no camel string
can just be a capitalized word. Took ideas from here:
http://stackoverflow.com/questions/29916065/how-to-do-camelcase-split-in-python
:param input_string: str, input word
:param strcmp: str, force detection on a substring just in case its undetectable
(e.g. part of a section of text that's all lowercase)
:param ignore: str, what kind of string to ignore in the regex search
:return: bool, whether it is valid or not
"""
# clear any non chars from the string
if not input_string:
return False
input_string = "".join([c for c in input_string if c.isalpha()])
matches = cls._get_regex_search(
input_string,
cls.REGEX_CAMEL.format(SEP=cls.REGEX_SEPARATORS),
match_index=0,
ignore=ignore,
)
if matches or input_string == strcmp:
if strcmp:
index = input_string.find(strcmp) - 1
is_camel = strcmp[0].isupper() and input_string[index].islower()
is_input = strcmp == input_string
is_start = index + 1 == 0
return is_camel or is_input or is_start
return True
elif len(input_string) == 1:
return True
return False
@staticmethod
def _split_camel(name):
""" Splits up a camel case name into its constituent components.
:param name: str, name to split
:return: list(str), list of components of the camel case string
"""
return re.sub("(?!^)([A-Z][a-z]+)", r" \1", name).split()
@classmethod
def _get_casing_permutations(cls, input_string):
""" Takes a string and gives all possible permutations of casing for comparative purposes
:param input_string: str, name of object
:return: Generator(str), iterator of all possible permutations of casing for the input_string
"""
if not input_string:
yield ""
else:
first = input_string[:1]
for sub_casing in cls._get_casing_permutations(input_string[1:]):
yield first.lower() + sub_casing
yield first.upper() + sub_casing
@staticmethod
def _string_remove_slice(input_str, start, end):
""" Removes portions of a string
:param input_str: str, input string
:param start: int, end search index
:param end: int, start search index
:return: str, the cut string
"""
if 0 <= start < end <= len(input_str):
return input_str[:start] + input_str[end:]
return input_str
@staticmethod
def _build_abbreviation_regex(input_string):
""" builds a recursive regex based on an input string to find possible abbreviations more simply.
e.g. = punct(u(a(t(i(on?)?)?)?)?)?
:param input_string: str, input string
:return: str, output regex
"""
result = ""
for char in input_string:
result += "[%s%s]?" % (char.upper(), char.lower())
return "(%s)" % result
|
class NameParser(object):
''' This parses names of assets. It assumes the usual convention of strings separated by underscores.
'''
@classmethod
def parse_name(cls, name):
''' Parses a name into a dictionary of identified subsections with accompanying information to
correctly identify and replace if necessary
:param name: str, string to be parsed
:return: dict, dictionary with relevant parsed information
'''
pass
@classmethod
def get_side(cls, name, ignore=""):
''' Checks a string for a possible side string token, this assumes its on its own
and is not part of or camel cased and combined with a word. Returns first found side to reduce duplicates.
We can be safe to assume the abbreviation for the side does not have camel casing within its own word.
:param name: str, string that represents a possible name of an object
:param name: str, string that represents a possible name of an object
:return: (None, str), either the found permutation of the side found in name or None
'''
pass
@classmethod
def get_discipline(cls, name, ignore="", min_length=3):
''' Checks a string for a possible discipline string token, this assumes its on its own
and is not part of or camel cased and combined with a word. Returns first found match to reduce duplicates.
We can be safe to assume the abbreviation for the discipline does not have camel casing within its own word.
:param name: str, the string based object name
:param ignore: str, specific ignore string for the search to avoid
:param min_length: int, minimum length for possible abbreviations of disciplines. Lower = more wrong guesses.
:return: dict, match dictionary
'''
pass
@classmethod
def get_base(cls, name):
''' Checks a string for a possible base name of an object (no prefix, no suffix).
We need to do a full parse to make sure we've ruled out all the possible other parse queries
:param name: str, string that represents a possible name of an object
:return: str, the detected basename
'''
pass
@classmethod
def get_base_naive(cls, name, ignore=""):
''' Checks a string for a possible base name of an object (no prefix, no suffix).
We need to do a full parse to make sure we've ruled out all the possible other parse queries
:param name: str, string that represents a possible name of an object
:return: str, the detected basename
'''
pass
@classmethod
def get_version(cls, name):
''' Checks a string for a possible version of an object (no prefix, no suffix).
Assumes only up to 4 digit padding
:param name: str, string that represents a possible name of an object
:return: (float, int, list(str), None), gets the version number then the string matches
'''
pass
@classmethod
def get_version_naive(cls, name, ignore=""):
''' Checks a string for a possible version of an object (no prefix, no suffix) without filtering date out
Assumes only up to 4 digit padding
:param name: str, string that represents a possible name of an object
:return: (float, int, list(str), None), gets the version number then the string matches
'''
pass
@classmethod
def get_udim(cls, name):
''' Checks a string for a possible base name of an object (no prefix, no suffix)
:param name: str, string that represents a possible name of an object
:returns: int, the last found match because convention keeps UDIM markers at the end.
'''
pass
@classmethod
def get_short(cls, name):
''' Returns the short name of a Maya asset name or path
:param name: str, string that represents a possible name of an object
:return: str, the short name of the maya node
'''
pass
@classmethod
def get_date(cls, name):
''' Checks a string for a possible date formatted into the name. It assumes dates do not have other
numbers at the front or head of the date. Currently only supports dates in 1900 and 2000.
Heavily relies on datetime for error checking to see
if date is actually viable. It follows similar ideas to this post:
http://stackoverflow.com/questions/9978534/match-dates-using-python-regular-expressions
:param name: str, string that represents a possible name of an object
:return: datetime.datetime, datetime object with current time or None if not found
'''
pass
@classmethod
def get_string_camel_patterns(cls, name, min_length=0):
''' Finds all permutations of possible camel casing of the given name
:param name: str, the name we need to get all possible permutations and abbreviations for
:param min_length: int, minimum length we want for abbreviations
:return: list(list(str)), list casing permutations of list of abbreviations
'''
pass
@classmethod
def _reduce_name(cls, name, parse_dict):
''' Reduces a name against matches found in a parse dictionary
:param name: str, name to be reduced
:param parse_dict: dict, dictionary of matches to reduce against
:return: str, reduced string
'''
pass
@staticmethod
def _get_regex_search(input_string, regex, metadata={}, match_index=None, ignore="", flags=0):
''' Using this so that all results from the functions return similar results
:param input_string: str, input string to be checked
:param regex: str, input regex to be compiled and searched with
:param match_index: (int, None), whether to get a specific match, if None returns all matches as list
:param metadata: dict, dictionary of extra meta tags needed to identify information
:return: list(dict), list of dictionaries if multiple hits or a specific entry or None
'''
pass
@classmethod
def _generic_search(cls, name, search_string, metadata={}, ignore=""):
''' Searches for a specific string given three types of regex search types. Also auto-checks for camel casing.
:param name: str, name of object in question
:param search_string: str, string to find and insert into the search regexes
:param metadata: dict, metadata to add to the result if we find a match
:param ignore: str, ignore specific string for the search
:return: dict, dictionary of search results
'''
pass
@staticmethod
def _get_abbreviations(input_string, output_length=0):
''' Generates abbreviations for input_string
:param input_string: str, name of object
:param output_length: int, optional specific length of abbreviations, default is off
:return: list(str), list of all combinations that include the first letter (possible abbreviations)
'''
pass
@classmethod
def is_valid_camel(cls, input_string, strcmp=None, ignore=""):
''' Checks to see if an input string is valid for use in camel casing
This assumes that all lowercase strings are not valid camel case situations and no camel string
can just be a capitalized word. Took ideas from here:
http://stackoverflow.com/questions/29916065/how-to-do-camelcase-split-in-python
:param input_string: str, input word
:param strcmp: str, force detection on a substring just in case its undetectable
(e.g. part of a section of text that's all lowercase)
:param ignore: str, what kind of string to ignore in the regex search
:return: bool, whether it is valid or not
'''
pass
@staticmethod
def _split_camel(name):
''' Splits up a camel case name into its constituent components.
:param name: str, name to split
:return: list(str), list of components of the camel case string
'''
pass
@classmethod
def _get_casing_permutations(cls, input_string):
''' Takes a string and gives all possible permutations of casing for comparative purposes
:param input_string: str, name of object
:return: Generator(str), iterator of all possible permutations of casing for the input_string
'''
pass
@staticmethod
def _string_remove_slice(input_str, start, end):
''' Removes portions of a string
:param input_str: str, input string
:param start: int, end search index
:param end: int, start search index
:return: str, the cut string
'''
pass
@staticmethod
def _build_abbreviation_regex(input_string):
''' builds a recursive regex based on an input string to find possible abbreviations more simply.
e.g. = punct(u(a(t(i(on?)?)?)?)?)?
:param input_string: str, input string
:return: str, output regex
'''
pass
| 41 | 21 | 22 | 2 | 14 | 7 | 4 | 0.43 | 1 | 13 | 0 | 0 | 0 | 0 | 20 | 20 | 502 | 61 | 308 | 106 | 267 | 133 | 196 | 86 | 175 | 11 | 1 | 5 | 74 |
4,710 |
AndresMWeber/Nomenclate
|
nomenclate/core/errors.py
|
nomenclate.core.errors.OverlapError
|
class OverlapError(NomenclateException):
"""Overlap error.
"""
pass
|
class OverlapError(NomenclateException):
'''Overlap error.
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 4 | 0 | 2 | 1 | 1 | 2 | 2 | 1 | 1 | 0 | 4 | 0 | 0 |
4,711 |
AndresMWeber/Nomenclate
|
nomenclate/core/errors.py
|
nomenclate.core.errors.OptionsError
|
class OptionsError(NomenclateException):
"""Options error.
"""
pass
|
class OptionsError(NomenclateException):
'''Options error.
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 4 | 0 | 2 | 1 | 1 | 2 | 2 | 1 | 1 | 0 | 4 | 0 | 0 |
4,712 |
AndresMWeber/Nomenclate
|
nomenclate/core/rendering.py
|
nomenclate.core.rendering.InputRenderer
|
class InputRenderer(type):
RENDER_FUNCTIONS = {}
def __new__(mcs, name, bases, dct):
cls = type.__new__(mcs, name, bases, dct)
token = dct.get("token", None)
if token:
mcs.RENDER_FUNCTIONS[token] = cls
return cls
@classmethod
def render_unique_tokens(cls, nomenclate_object, token_values):
for token, token_settings in token_values.items():
if token_settings.get("label") is not None and hasattr(nomenclate_object, token):
value = token_settings.pop("label")
token_settings.pop("token")
renderer = cls.get_valid_render_function(token)
if callable(getattr(renderer, "render")):
token_config = nomenclate_object.get_token_settings(token)
rendered_token = renderer.render(
value, token, nomenclate_object, **token_config
)
token_settings["label"] = rendered_token
@classmethod
def get_valid_render_function(cls, token_name):
token_name = token_name.lower()
renderer = None
for func in list(cls.RENDER_FUNCTIONS):
if token_name.replace(func, "").isdigit() or not token_name.replace(func, ""):
renderer = func
return cls.RENDER_FUNCTIONS.get(renderer or "default")
@classmethod
def render_nomenclative(cls, nomenclate_object):
nomenclative = processing.Nomenclative(nomenclate_object.format)
token_values = nomenclate_object.token_dict.to_json()
cls.render_unique_tokens(nomenclate_object, token_values)
render_template = nomenclate_object.format
cls._prepend_token_match_objects(token_values, render_template)
for token, match in token_values.items():
nomenclative.add_match(*match)
render_template = cls.cleanup_formatted_string(nomenclative.process_matches())
return render_template
@classmethod
def _prepend_token_match_objects(cls, token_values, incomplete_nomenclative):
for token, token_settings in token_values.items():
value = token_settings["label"]
regex_token = token.replace("(", "\(").replace(")", "\)")
re_token = settings.REGEX_TOKEN_SEARCH.format(
TOKEN=regex_token, TOKEN_CAPITALIZED=regex_token.capitalize()
)
re_matches = re.finditer(re_token, incomplete_nomenclative, 0)
for re_match in re_matches:
token_values[token] = (re_match, value)
cls._clear_non_matches(token_values)
@staticmethod
def _clear_non_matches(token_values):
to_delete = []
for token, value in token_values.items():
if isinstance(value, str) or not isinstance(value, tuple):
to_delete.append(token)
for delete in to_delete:
token_values.pop(delete)
@classmethod
def cleanup_formatted_string(cls, formatted_string):
""" Removes unused tokens/removes surrounding and double underscores
:param formatted_string: str, string that has had tokens replaced
:return: str, cleaned up name of object
"""
# Remove whitespace
result = formatted_string.replace(" ", "")
# Remove any static token parentheses
result = re.sub(settings.REGEX_PARENTHESIS, "", result)
# Remove any multiple separator characters
multi_character_matches = re.finditer("[%s]{2,}" % settings.SEPARATORS, result)
for multi_character_match in sorted(
multi_character_matches, key=lambda x: len(x.group()), reverse=True
):
match = multi_character_match.group()
most_common_separator = Counter(list(multi_character_match.group())).most_common(1)[0][
0
]
result = result.replace(match, most_common_separator)
# Remove trailing or preceding non letter characters
result = re.sub(settings.REGEX_ADJACENT_UNDERSCORE, "", result)
# not sure what this one was...but certainly not it.
result = re.sub(settings.REGEX_SINGLE_PARENTHESIS, "", result)
return result
@staticmethod
def _get_alphanumeric_index(query_string):
""" Given an input string of either int or char, returns what index in the alphabet and case it is
:param query_string: str, query string
:return: (int, str), list of the index and type
"""
# TODO: could probably rework this. it works, but it's ugly as hell.
try:
return [int(query_string), "int"]
except ValueError:
if len(query_string) == 1:
if query_string.isupper():
return [string.ascii_uppercase.index(query_string), "char_hi"]
elif query_string.islower():
return [string.ascii_lowercase.index(query_string), "char_lo"]
else:
raise IOError("The input is a string longer than one character")
|
class InputRenderer(type):
def __new__(mcs, name, bases, dct):
pass
@classmethod
def render_unique_tokens(cls, nomenclate_object, token_values):
pass
@classmethod
def get_valid_render_function(cls, token_name):
pass
@classmethod
def render_nomenclative(cls, nomenclate_object):
pass
@classmethod
def _prepend_token_match_objects(cls, token_values, incomplete_nomenclative):
pass
@staticmethod
def _clear_non_matches(token_values):
pass
@classmethod
def cleanup_formatted_string(cls, formatted_string):
''' Removes unused tokens/removes surrounding and double underscores
:param formatted_string: str, string that has had tokens replaced
:return: str, cleaned up name of object
'''
pass
@staticmethod
def _get_alphanumeric_index(query_string):
''' Given an input string of either int or char, returns what index in the alphabet and case it is
:param query_string: str, query string
:return: (int, str), list of the index and type
'''
pass
| 16 | 2 | 13 | 1 | 10 | 2 | 3 | 0.16 | 1 | 7 | 1 | 1 | 1 | 0 | 8 | 21 | 119 | 17 | 88 | 44 | 72 | 14 | 71 | 37 | 62 | 5 | 2 | 3 | 25 |
4,713 |
AndresMWeber/Nomenclate
|
nomenclate/core/tokens.py
|
nomenclate.core.tokens.TokenAttr
|
class TokenAttr(tools.Serializable):
""" A TokenAttr represents a string token that we want to replace in a given nomenclate.core.formatter.FormatString
It has 3 augmentation properties:
TokenAttr().case
TokenAttr().prefix
TokenAttr().suffix
These three settings enforce that after final rendering (finding matches in the config for the current
token's label and any custom rendering syntax the user added) we will upper case the result and add
either the given prefix or suffix no matter what.
"""
SERIALIZE_ATTRS = ["token", "label", "case", "prefix", "suffix"]
def __init__(self, token="", label=None, case="", prefix="", suffix=""):
"""
:param label: str, the label is represents the value we want to replace the given token with
:param token: str, the raw name for the token to be used
"""
try:
self.validate_entries(label)
except exceptions.ValidationError:
label = None
self.validate_entries(token)
self.raw_string = label if label is not None else ""
self.raw_token = token
self.case = case
self.prefix = prefix
self.suffix = suffix
@property
def token(self):
""" Get or set the current token. Setting the token to a new value means it will be validated and then
added as the internal "raw_token" which will be used to look up any given config value or be used if
no config value is found.
"""
return self.raw_token.lower()
@token.setter
def token(self, token):
self.validate_entries(token)
self.raw_token = token
@property
def label(self):
""" Get or set the current token's "label" or value. Setting the label to a new value means it will be added
as the internal "raw_string" which will be used to look up any given config value or be used if no config
value is found.
"""
return self.raw_string
@label.setter
def label(self, label):
self.validate_entries(label)
try:
self.raw_string = int(label)
except ValueError:
self.raw_string = label
def set(self, value):
self.label = value
@staticmethod
def validate_entries(*entries):
for entry in entries:
if isinstance(entry, (str, int)) or entry is None:
continue
else:
raise exceptions.ValidationError(
"Invalid type %s, expected %s" % (type(entry), str)
)
def __eq__(self, other):
if isinstance(other, self.__class__):
self_attrs = [getattr(self, attr) for attr in self.SERIALIZE_ATTRS]
other_attrs = [getattr(other, attr) for attr in self.SERIALIZE_ATTRS]
return self_attrs == other_attrs
else:
return False
def __ne__(self, other):
return (self.token, self.label) != (other.token, other.label)
def __lt__(self, other):
return (self.token, self.label) < (other.token, other.label)
def __le__(self, other):
return (self.token, self.label) <= (other.token, other.label)
def __gt__(self, other):
return (self.token, self.label) > (other.token, other.label)
def __ge__(self, other):
return (self.token, self.label) >= (other.token, other.label)
def __str__(self):
return "%r" % (self)
def __repr__(self):
return "<%s (%s): %r>" % (self.__class__.__name__, self.raw_token, self.to_json())
def to_json(self):
return {
"token": self.raw_token,
"label": self.raw_string,
"case": self.case,
"prefix": self.prefix,
"suffix": self.suffix,
}
|
class TokenAttr(tools.Serializable):
''' A TokenAttr represents a string token that we want to replace in a given nomenclate.core.formatter.FormatString
It has 3 augmentation properties:
TokenAttr().case
TokenAttr().prefix
TokenAttr().suffix
These three settings enforce that after final rendering (finding matches in the config for the current
token's label and any custom rendering syntax the user added) we will upper case the result and add
either the given prefix or suffix no matter what.
'''
def __init__(self, token="", label=None, case="", prefix="", suffix=""):
'''
:param label: str, the label is represents the value we want to replace the given token with
:param token: str, the raw name for the token to be used
'''
pass
@property
def token(self):
''' Get or set the current token. Setting the token to a new value means it will be validated and then
added as the internal "raw_token" which will be used to look up any given config value or be used if
no config value is found.
'''
pass
@token.setter
def token(self):
pass
@property
def label(self):
''' Get or set the current token's "label" or value. Setting the label to a new value means it will be added
as the internal "raw_string" which will be used to look up any given config value or be used if no config
value is found.
'''
pass
@label.setter
def label(self):
pass
def set(self, value):
pass
@staticmethod
def validate_entries(*entries):
pass
def __eq__(self, other):
pass
def __ne__(self, other):
pass
def __lt__(self, other):
pass
def __le__(self, other):
pass
def __gt__(self, other):
pass
def __ge__(self, other):
pass
def __str__(self):
pass
def __repr__(self):
pass
def to_json(self):
pass
| 22 | 4 | 5 | 0 | 4 | 1 | 1 | 0.3 | 1 | 5 | 1 | 0 | 15 | 5 | 16 | 22 | 115 | 24 | 70 | 31 | 48 | 21 | 55 | 26 | 38 | 3 | 2 | 2 | 22 |
4,714 |
AndresMWeber/Nomenclate
|
nomenclate/core/renderers.py
|
nomenclate.core.renderers.RenderVersion
|
class RenderVersion(RenderBase):
token = "version"
@classmethod
def render(
cls,
version,
token,
nomenclate_object,
config_query_path=None,
return_type=list,
use_value_in_query_path=True,
**kwargs
):
padding = kwargs.get("%s_padding" % token, 4)
version_string = "%0{0}d"
version = version_string.format(padding) % int(version)
return cls.process_token_augmentations(
version, token_attr=getattr(nomenclate_object, token)
)
|
class RenderVersion(RenderBase):
@classmethod
def render(
cls,
version,
token,
nomenclate_object,
config_query_path=None,
return_type=list,
use_value_in_query_path=True,
**kwargs
):
pass
| 3 | 0 | 16 | 0 | 16 | 0 | 1 | 0 | 1 | 2 | 0 | 0 | 0 | 0 | 1 | 27 | 20 | 1 | 19 | 15 | 7 | 0 | 7 | 5 | 5 | 1 | 4 | 0 | 1 |
4,715 |
AndresMWeber/Nomenclate
|
nomenclate/core/errors.py
|
nomenclate.core.errors.ResetError
|
class ResetError(NomenclateException):
"""Reset error.
"""
pass
|
class ResetError(NomenclateException):
'''Reset error.
'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 4 | 0 | 2 | 1 | 1 | 2 | 2 | 1 | 1 | 0 | 4 | 0 | 0 |
4,716 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOEULERANGLEUNITS
|
class SpiceNOEULERANGLEUNITS(SpiceyPyError):
pass
|
class SpiceNOEULERANGLEUNITS(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,717 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOFILENAMES
|
class SpiceNOFILENAMES(SpiceyPyError):
pass
|
class SpiceNOFILENAMES(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,718 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOLISTFILENAME
|
class SpiceNOLISTFILENAME(SpiceyPyError):
pass
|
class SpiceNOLISTFILENAME(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,719 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOENVVARIABLE
|
class SpiceNOENVVARIABLE(SpiceyPyError):
pass
|
class SpiceNOENVVARIABLE(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,720 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNODSKSEGMENTS
|
class SpiceNODSKSEGMENTS(SpiceyPyError):
pass
|
class SpiceNODSKSEGMENTS(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,721 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNODSKSEGMENT
|
class SpiceNODSKSEGMENT(SpiceyPyError):
pass
|
class SpiceNODSKSEGMENT(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,722 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNODETOOFULL
|
class SpiceNODETOOFULL(SpiceyPyError):
pass
|
class SpiceNODETOOFULL(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,723 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNODELIMCHARACTER
|
class SpiceNODELIMCHARACTER(SpiceyPyError):
pass
|
class SpiceNODELIMCHARACTER(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,724 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNODATATYPEFLAG
|
class SpiceNODATATYPEFLAG(SpiceyPyError):
pass
|
class SpiceNODATATYPEFLAG(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,725 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNODATAORDER
|
class SpiceNODATAORDER(SpiceyPyError):
pass
|
class SpiceNODATAORDER(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,726 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNONCONTIGUOUSARRAY
|
class SpiceNONCONTIGUOUSARRAY(SpiceyPyValueError):
pass
|
class SpiceNONCONTIGUOUSARRAY(SpiceyPyValueError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 6 | 0 | 0 |
4,727 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOLOADEDFILES
|
class SpiceNOLOADEDFILES(SpiceyPyIOError):
pass
|
class SpiceNOLOADEDFILES(SpiceyPyIOError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 6 | 0 | 0 |
4,728 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOLSKFILENAME
|
class SpiceNOLSKFILENAME(SpiceyPyError):
pass
|
class SpiceNOLSKFILENAME(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,729 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOMOREROOM
|
class SpiceNOMOREROOM(SpiceyPyMemoryError):
pass
|
class SpiceNOMOREROOM(SpiceyPyMemoryError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 6 | 0 | 0 |
4,730 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNONCONICMOTION
|
class SpiceNONCONICMOTION(SpiceyPyValueError):
pass
|
class SpiceNONCONICMOTION(SpiceyPyValueError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 6 | 0 | 0 |
4,731 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOFILES
|
class SpiceNOFILES(SpiceyPyError):
pass
|
class SpiceNOFILES(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,732 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNONDISTINCTPAIR
|
class SpiceNONDISTINCTPAIR(SpiceyPyError):
pass
|
class SpiceNONDISTINCTPAIR(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,733 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNONEMPTYENTRY
|
class SpiceNONEMPTYENTRY(SpiceyPyError):
pass
|
class SpiceNONEMPTYENTRY(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,734 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNONEMPTYTREE
|
class SpiceNONEMPTYTREE(SpiceyPyError):
pass
|
class SpiceNONEMPTYTREE(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,735 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNONEXISTELEMENTS
|
class SpiceNONEXISTELEMENTS(SpiceyPyError):
pass
|
class SpiceNONEXISTELEMENTS(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,736 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOPOLYNOMIALDEGREE
|
class SpiceNOPOLYNOMIALDEGREE(SpiceyPyError):
pass
|
class SpiceNOPOLYNOMIALDEGREE(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,737 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOPRECESSIONTYPE
|
class SpiceNOPRECESSIONTYPE(SpiceyPyError):
pass
|
class SpiceNOPRECESSIONTYPE(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,738 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOLOADEDDSKFILES
|
class SpiceNOLOADEDDSKFILES(SpiceyPyError):
pass
|
class SpiceNOLOADEDDSKFILES(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,739 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOFILESPEC
|
class SpiceNOFILESPEC(SpiceyPyError):
pass
|
class SpiceNOFILESPEC(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,740 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOLANDINGTIME
|
class SpiceNOLANDINGTIME(SpiceyPyError):
pass
|
class SpiceNOLANDINGTIME(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,741 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOFRAMECONNECT
|
class SpiceNOFRAMECONNECT(SpiceyPyError):
pass
|
class SpiceNOFRAMECONNECT(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,742 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOFREELOGICALUNIT
|
class SpiceNOFREELOGICALUNIT(SpiceyPyError):
pass
|
class SpiceNOFREELOGICALUNIT(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,743 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOFRAMEINFO
|
class SpiceNOFRAMEINFO(SpiceyPyValueError):
pass
|
class SpiceNOFRAMEINFO(SpiceyPyValueError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 6 | 0 | 0 |
4,744 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOFREENODES
|
class SpiceNOFREENODES(SpiceyPyError):
pass
|
class SpiceNOFREENODES(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,745 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOFROMTIME
|
class SpiceNOFROMTIME(SpiceyPyError):
pass
|
class SpiceNOFROMTIME(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,746 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOFROMTIMESYSTEM
|
class SpiceNOFROMTIMESYSTEM(SpiceyPyError):
pass
|
class SpiceNOFROMTIMESYSTEM(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,747 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOHEADNODE
|
class SpiceNOHEADNODE(SpiceyPyError):
pass
|
class SpiceNOHEADNODE(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,748 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOINFO
|
class SpiceNOINFO(SpiceyPyError):
pass
|
class SpiceNOINFO(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,749 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOINPUTDATATYPE
|
class SpiceNOINPUTDATATYPE(SpiceyPyError):
pass
|
class SpiceNOINPUTDATATYPE(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,750 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOINPUTFILENAME
|
class SpiceNOINPUTFILENAME(SpiceyPyError):
pass
|
class SpiceNOINPUTFILENAME(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,751 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOINSTRUMENTID
|
class SpiceNOINSTRUMENTID(SpiceyPyError):
pass
|
class SpiceNOINSTRUMENTID(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,752 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOPLATES
|
class SpiceNOPLATES(SpiceyPyError):
pass
|
class SpiceNOPLATES(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,753 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOPICTURE
|
class SpiceNOPICTURE(SpiceyPyError):
pass
|
class SpiceNOPICTURE(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,754 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOPATHVALUE
|
class SpiceNOPATHVALUE(SpiceyPyValueError):
pass
|
class SpiceNOPATHVALUE(SpiceyPyValueError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 6 | 0 | 0 |
4,755 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOPARTITION
|
class SpiceNOPARTITION(SpiceyPyValueError):
pass
|
class SpiceNOPARTITION(SpiceyPyValueError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 6 | 0 | 0 |
4,756 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOOUTPUTFILENAME
|
class SpiceNOOUTPUTFILENAME(SpiceyPyError):
pass
|
class SpiceNOOUTPUTFILENAME(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,757 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOOFFSETANGLEUNITS
|
class SpiceNOOFFSETANGLEUNITS(SpiceyPyError):
pass
|
class SpiceNOOFFSETANGLEUNITS(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,758 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOOFFSETANGLEAXES
|
class SpiceNOOFFSETANGLEAXES(SpiceyPyError):
pass
|
class SpiceNOOFFSETANGLEAXES(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,759 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOOBJECTIDORNAME
|
class SpiceNOOBJECTIDORNAME(SpiceyPyError):
pass
|
class SpiceNOOBJECTIDORNAME(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,760 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNONUNITQUATERNION
|
class SpiceNONUNITQUATERNION(SpiceyPyError):
pass
|
class SpiceNONUNITQUATERNION(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,761 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOINTERCEPT
|
class SpiceNOINTERCEPT(SpiceyPyValueError):
pass
|
class SpiceNOINTERCEPT(SpiceyPyValueError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 6 | 0 | 0 |
4,762 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOINTERVAL
|
class SpiceNOINTERVAL(SpiceyPyValueError):
pass
|
class SpiceNOINTERVAL(SpiceyPyValueError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 6 | 0 | 0 |
4,763 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOKERNELLOADED
|
class SpiceNOKERNELLOADED(SpiceyPyError):
pass
|
class SpiceNOKERNELLOADED(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,764 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOLEAPSECONDS
|
class SpiceNOLEAPSECONDS(SpiceyPyError):
pass
|
class SpiceNOLEAPSECONDS(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,765 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOLINESPERRECCOUNT
|
class SpiceNOLINESPERRECCOUNT(SpiceyPyError):
pass
|
class SpiceNOLINESPERRECCOUNT(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,766 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOFRAMEDATA
|
class SpiceNOFRAMEDATA(SpiceyPyError):
pass
|
class SpiceNOFRAMEDATA(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,767 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOFRAME
|
class SpiceNOFRAME(SpiceyPyValueError):
pass
|
class SpiceNOFRAME(SpiceyPyValueError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 6 | 0 | 0 |
4,768 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOOUTPUTSPKTYPE
|
class SpiceNOOUTPUTSPKTYPE(SpiceyPyError):
pass
|
class SpiceNOOUTPUTSPKTYPE(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,769 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOCONVERG
|
class SpiceNOCONVERG(SpiceyPyError):
pass
|
class SpiceNOCONVERG(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,770 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOCURRENTARRAY
|
class SpiceNOCURRENTARRAY(SpiceyPyIOError):
pass
|
class SpiceNOCURRENTARRAY(SpiceyPyIOError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 6 | 0 | 0 |
4,771 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceMISSINGLEFTCOR
|
class SpiceMISSINGLEFTCOR(SpiceyPyError):
pass
|
class SpiceMISSINGLEFTCOR(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,772 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceMISSINGLEFTRTFLAG
|
class SpiceMISSINGLEFTRTFLAG(SpiceyPyError):
pass
|
class SpiceMISSINGLEFTRTFLAG(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,773 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNONPRINTINGCHARS
|
class SpiceNONPRINTINGCHARS(SpiceyPyError):
pass
|
class SpiceNONPRINTINGCHARS(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,774 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNONPRINTINGCHAR
|
class SpiceNONPRINTINGCHAR(SpiceyPyError):
pass
|
class SpiceNONPRINTINGCHAR(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,775 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNONPRINTABLECHARS
|
class SpiceNONPRINTABLECHARS(SpiceyPyValueError):
pass
|
class SpiceNONPRINTABLECHARS(SpiceyPyValueError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 6 | 0 | 0 |
4,776 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNONPOSPACKETSIZE
|
class SpiceNONPOSPACKETSIZE(SpiceyPyError):
pass
|
class SpiceNONPOSPACKETSIZE(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,777 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNONPOSITIVEVALUE
|
class SpiceNONPOSITIVEVALUE(SpiceyPyError):
pass
|
class SpiceNONPOSITIVEVALUE(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,778 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceMISSINGNCAPFLAG
|
class SpiceMISSINGNCAPFLAG(SpiceyPyError):
pass
|
class SpiceMISSINGNCAPFLAG(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,779 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNONPOSITIVESCALE
|
class SpiceNONPOSITIVESCALE(SpiceyPyValueError):
pass
|
class SpiceNONPOSITIVESCALE(SpiceyPyValueError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 6 | 0 | 0 |
4,780 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNONPOSITIVERADIUS
|
class SpiceNONPOSITIVERADIUS(SpiceyPyError):
pass
|
class SpiceNONPOSITIVERADIUS(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,781 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNONPOSITIVEMASS
|
class SpiceNONPOSITIVEMASS(SpiceyPyValueError):
pass
|
class SpiceNONPOSITIVEMASS(SpiceyPyValueError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 6 | 0 | 0 |
4,782 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceMISSINGNCOLS
|
class SpiceMISSINGNCOLS(SpiceyPyError):
pass
|
class SpiceMISSINGNCOLS(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,783 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceMISSINGNROWS
|
class SpiceMISSINGNROWS(SpiceyPyError):
pass
|
class SpiceMISSINGNROWS(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,784 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceMISSINGPLATETYPE
|
class SpiceMISSINGPLATETYPE(SpiceyPyError):
pass
|
class SpiceMISSINGPLATETYPE(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,785 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceMISSINGROWMAJFLAG
|
class SpiceMISSINGROWMAJFLAG(SpiceyPyError):
pass
|
class SpiceMISSINGROWMAJFLAG(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,786 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceMISMATCHFROMTIMETYPE
|
class SpiceMISMATCHFROMTIMETYPE(SpiceyPyError):
pass
|
class SpiceMISMATCHFROMTIMETYPE(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,787 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceMISSINGROWSTEP
|
class SpiceMISSINGROWSTEP(SpiceyPyError):
pass
|
class SpiceMISSINGROWSTEP(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,788 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceMISSINGKPV
|
class SpiceMISSINGKPV(SpiceyPyError):
pass
|
class SpiceMISSINGKPV(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,789 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceMISMATCHTOTIMETYPE
|
class SpiceMISMATCHTOTIMETYPE(SpiceyPyError):
pass
|
class SpiceMISMATCHTOTIMETYPE(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,790 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceMISSINGCOLSTEP
|
class SpiceMISSINGCOLSTEP(SpiceyPyError):
pass
|
class SpiceMISSINGCOLSTEP(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,791 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceMISSINGCOORDBOUND
|
class SpiceMISSINGCOORDBOUND(SpiceyPyError):
pass
|
class SpiceMISSINGCOORDBOUND(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,792 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceMISSINGCOORDSYS
|
class SpiceMISSINGCOORDSYS(SpiceyPyError):
pass
|
class SpiceMISSINGCOORDSYS(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,793 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceNOFRAMENAME
|
class SpiceNOFRAMENAME(SpiceyPyError):
pass
|
class SpiceNOFRAMENAME(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,794 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceMISSINGDATA
|
class SpiceMISSINGDATA(SpiceyPyValueError):
pass
|
class SpiceMISSINGDATA(SpiceyPyValueError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 6 | 0 | 0 |
4,795 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceMISSINGDATACLASS
|
class SpiceMISSINGDATACLASS(SpiceyPyError):
pass
|
class SpiceMISSINGDATACLASS(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,796 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceMISSINGDATAORDERTK
|
class SpiceMISSINGDATAORDERTK(SpiceyPyError):
pass
|
class SpiceMISSINGDATAORDERTK(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,797 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceMISSINGDATATYPE
|
class SpiceMISSINGDATATYPE(SpiceyPyError):
pass
|
class SpiceMISSINGDATATYPE(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,798 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceMISSINGEOT
|
class SpiceMISSINGEOT(SpiceyPyError):
pass
|
class SpiceMISSINGEOT(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
4,799 |
AndrewAnnex/SpiceyPy
|
AndrewAnnex_SpiceyPy/src/spiceypy/utils/exceptions.py
|
spiceypy.utils.exceptions.SpiceMISSINGEPOCHTOKEN
|
class SpiceMISSINGEPOCHTOKEN(SpiceyPyError):
pass
|
class SpiceMISSINGEPOCHTOKEN(SpiceyPyError):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 5 | 0 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.