id
int64 0
843k
| repository_name
stringlengths 7
55
| file_path
stringlengths 9
332
| class_name
stringlengths 3
290
| human_written_code
stringlengths 12
4.36M
| class_skeleton
stringlengths 19
2.2M
| total_program_units
int64 1
9.57k
| total_doc_str
int64 0
4.2k
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
300
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
176
| CountClassBase
float64 0
48
| CountClassCoupled
float64 0
589
| CountClassCoupledModified
float64 0
581
| CountClassDerived
float64 0
5.37k
| CountDeclInstanceMethod
float64 0
4.2k
| CountDeclInstanceVariable
float64 0
299
| CountDeclMethod
float64 0
4.2k
| CountDeclMethodAll
float64 0
4.2k
| CountLine
float64 1
115k
| CountLineBlank
float64 0
9.01k
| CountLineCode
float64 0
94.4k
| CountLineCodeDecl
float64 0
46.1k
| CountLineCodeExe
float64 0
91.3k
| CountLineComment
float64 0
27k
| CountStmt
float64 1
93.2k
| CountStmtDecl
float64 0
46.1k
| CountStmtExe
float64 0
90.2k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
6k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
142,648 |
Kentzo/git-archive-all
|
Kentzo_git-archive-all/git_archive_all.py
|
git_archive_all.GitArchiver
|
class GitArchiver(object):
"""
GitArchiver
Scan a git repository and export all tracked files, and submodules.
Checks for .gitattributes files in each directory and uses 'export-ignore'
pattern entries for ignore files in the archive.
>>> archiver = GitArchiver(main_repo_abspath='my/repo/path')
>>> archiver.create('output.zip')
"""
TARFILE_FORMATS = {
'tar': 'w',
'tbz2': 'w:bz2',
'tgz': 'w:gz',
'txz': 'w:xz',
'bz2': 'w:bz2',
'gz': 'w:gz',
'xz': 'w:xz'
}
ZIPFILE_FORMATS = ('zip',)
LOG = logging.getLogger('GitArchiver')
def __init__(self, prefix='', exclude=True, force_sub=False, extra=None, main_repo_abspath=None, git_version=None):
"""
@param prefix: Prefix used to prepend all paths in the resulting archive.
Extra file paths are only prefixed if they are not relative.
E.g. if prefix is 'foo' and extra is ['bar', '/baz'] the resulting archive will look like this:
/
baz
foo/
bar
@param exclude: Determines whether archiver should follow rules specified in .gitattributes files.
@param force_sub: Determines whether submodules are initialized and updated before archiving.
@param extra: List of extra paths to include in the resulting archive.
@param main_repo_abspath: Absolute path to the main repository (or one of subdirectories).
If given path is path to a subdirectory (but not a submodule directory!) it will be replaced
with abspath to top-level directory of the repository.
If None, current cwd is used.
@param git_version: Version of Git that determines whether various workarounds are on.
If None, tries to resolve via Git's CLI.
"""
self._check_attr_gens = {}
self._ignored_paths_cache = {}
if git_version is None:
git_version = self.get_git_version()
if git_version is not None and git_version < (1, 6, 1):
raise ValueError("git of version 1.6.1 and higher is required")
self.git_version = git_version
if main_repo_abspath is None:
main_repo_abspath = path.abspath('')
elif not path.isabs(main_repo_abspath):
raise ValueError("main_repo_abspath must be an absolute path")
self.main_repo_abspath = self.resolve_git_main_repo_abspath(main_repo_abspath)
self.prefix = fspath(prefix)
self.exclude = exclude
self.extra = [fspath(e) for e in extra] if extra is not None else []
self.force_sub = force_sub
def create(self, output_path, dry_run=False, output_format=None, compresslevel=None):
"""
Create the archive at output_file_path.
Type of the archive is determined either by extension of output_file_path or by output_format.
Supported formats are: gz, zip, bz2, xz, tar, tgz, txz
@param output_path: Output file path.
@param dry_run: Determines whether create should do nothing but print what it would archive.
@param output_format: Determines format of the output archive. If None, format is determined from extension
of output_file_path.
@param compresslevel: Optional compression level. Interpretation depends on the output format.
"""
output_path = fspath(output_path)
if output_format is None:
file_name, file_ext = path.splitext(output_path)
output_format = file_ext[len(extsep):].lower()
self.LOG.debug("Output format is not explicitly set, determined format is {0}.".format(output_format))
if not dry_run:
if output_format in self.ZIPFILE_FORMATS:
from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED
if compresslevel is not None:
if sys.version_info > (3, 7):
archive = ZipFile(path.abspath(output_path), 'w', compresslevel=compresslevel)
else:
raise ValueError("Compression level for zip archives requires Python 3.7+")
else:
archive = ZipFile(path.abspath(output_path), 'w')
def add_file(file_path, arcname):
if not path.islink(file_path):
archive.write(file_path, arcname, ZIP_DEFLATED)
else:
i = ZipInfo(arcname)
i.create_system = 3
i.external_attr = 0xA1ED0000
archive.writestr(i, readlink(file_path))
elif output_format in self.TARFILE_FORMATS:
import tarfile
mode = self.TARFILE_FORMATS[output_format]
if compresslevel is not None:
try:
archive = tarfile.open(path.abspath(output_path), mode, compresslevel=compresslevel)
except TypeError:
raise ValueError("{0} cannot be compressed".format(output_format))
else:
archive = tarfile.open(path.abspath(output_path), mode)
def add_file(file_path, arcname):
archive.add(file_path, arcname)
else:
raise ValueError("unknown format: {0}".format(output_format))
def archiver(file_path, arcname):
self.LOG.debug(fspath("{0} => {1}").format(file_path, arcname))
add_file(file_path, arcname)
else:
archive = None
def archiver(file_path, arcname):
self.LOG.info(fspath("{0} => {1}").format(file_path, arcname))
self.archive_all_files(archiver)
if archive is not None:
archive.close()
def is_file_excluded(self, repo_abspath, repo_file_path):
"""
Checks whether file at a given path is excluded.
@param repo_abspath: Absolute path to the git repository.
@param repo_file_path: Path to a file relative to repo_abspath.
@return: True if file should be excluded. Otherwise False.
"""
if not self.exclude:
return False
cache = self._ignored_paths_cache.setdefault(repo_abspath, {})
if repo_file_path not in cache:
next(self._check_attr_gens[repo_abspath])
attrs = self._check_attr_gens[repo_abspath].send(repo_file_path)
export_ignore_attr = attrs['export-ignore']
if export_ignore_attr == b'set':
cache[repo_file_path] = True
elif export_ignore_attr == b'unset':
cache[repo_file_path] = False
else:
repo_file_dir_path = path.dirname(repo_file_path)
if repo_file_dir_path:
cache[repo_file_path] = self.is_file_excluded(repo_abspath, repo_file_dir_path)
else:
cache[repo_file_path] = False
return cache[repo_file_path]
def archive_all_files(self, archiver):
"""
Archive all files using archiver.
@param archiver: Callable that accepts 2 arguments:
abspath to file on the system and relative path within archive.
"""
for file_path in self.extra:
archiver(path.abspath(file_path), path.join(self.prefix, file_path))
for file_path in self.walk_git_files():
archiver(path.join(self.main_repo_abspath, file_path), path.join(self.prefix, file_path))
def walk_git_files(self, repo_path=fspath('')):
"""
An iterator method that yields a file path relative to main_repo_abspath
for each file that should be included in the archive.
Skips those that match the exclusion patterns found in
any discovered .gitattributes files along the way.
Recurs into submodules as well.
@param repo_path: Path to the git submodule repository relative to main_repo_abspath.
@return: Generator to traverse files under git control relative to main_repo_abspath.
"""
repo_abspath = path.join(self.main_repo_abspath, fspath(repo_path))
assert repo_abspath not in self._check_attr_gens
self._check_attr_gens[repo_abspath] = self.check_git_attr(repo_abspath, ['export-ignore'])
try:
repo_file_paths = self.list_repo_files(repo_abspath)
for repo_file_path in repo_file_paths:
repo_file_abspath = path.join(repo_abspath, repo_file_path) # absolute file path
main_repo_file_path = path.join(repo_path, repo_file_path) # relative to main_repo_abspath
if not path.islink(repo_file_abspath) and path.isdir(repo_file_abspath):
continue
if self.is_file_excluded(repo_abspath, repo_file_path):
continue
yield main_repo_file_path
if self.force_sub:
self.run_git_shell('git submodule init', repo_abspath)
self.run_git_shell('git submodule update', repo_abspath)
for repo_submodule_path in self.list_repo_submodules(repo_abspath): # relative to repo_path
if self.is_file_excluded(repo_abspath, repo_submodule_path):
continue
main_repo_submodule_path = path.join(repo_path, repo_submodule_path) # relative to main_repo_abspath
for main_repo_submodule_file_path in self.walk_git_files(main_repo_submodule_path):
repo_submodule_file_path = path.relpath(main_repo_submodule_file_path, repo_path) # relative to repo_path
if self.is_file_excluded(repo_abspath, repo_submodule_file_path):
continue
yield main_repo_submodule_file_path
finally:
self._check_attr_gens[repo_abspath].close()
del self._check_attr_gens[repo_abspath]
def check_git_attr(self, repo_abspath, attrs):
"""
Generator that returns git attributes for received paths relative to repo_abspath.
>>> archiver = GitArchiver(...)
>>> g = archiver.check_git_attr('repo_path', ['export-ignore'])
>>> next(g)
>>> attrs = g.send('relative_path')
>>> print(attrs['export-ignore'])
@param repo_abspath: Absolute path to a git repository.
@param attrs: Attributes to check
"""
def make_process():
env = dict(environ, GIT_FLUSH='1')
cmd = 'git check-attr --stdin -z {0}'.format(' '.join(attrs))
return Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, cwd=repo_abspath, env=env)
def read_attrs(process, repo_file_path):
process.stdin.write(repo_file_path + b'\0')
process.stdin.flush()
# For every attribute check-attr will output: <path> NUL <attribute> NUL <info> NUL
path, attr, info = b'', b'', b''
nuls_count = 0
nuls_expected = 3 * len(attrs)
while nuls_count != nuls_expected:
b = process.stdout.read(1)
if b == b'' and process.poll() is not None:
raise RuntimeError("check-attr exited prematurely")
elif b == b'\0':
nuls_count += 1
if nuls_count % 3 == 0:
yield path, attr, info
path, attr, info = b'', b'', b''
elif nuls_count % 3 == 0:
path += b
elif nuls_count % 3 == 1:
attr += b
elif nuls_count % 3 == 2:
info += b
def read_attrs_old(process, repo_file_path):
"""
Compatibility with versions 1.8.5 and below that do not recognize -z for output.
"""
process.stdin.write(repo_file_path + b'\0')
process.stdin.flush()
# For every attribute check-attr will output: <path>: <attribute>: <info>\n
# where <path> is c-quoted
path, attr, info = b'', b'', b''
lines_count = 0
lines_expected = len(attrs)
while lines_count != lines_expected:
line = process.stdout.readline()
info_start = line.rfind(b': ')
if info_start == -1:
raise RuntimeError("unexpected output of check-attr: {0}".format(line))
attr_start = line.rfind(b': ', 0, info_start)
if attr_start == -1:
raise RuntimeError("unexpected output of check-attr: {0}".format(line))
path = line[:attr_start]
attr = line[attr_start + 2:info_start] # trim leading ": "
info = line[info_start + 2:len(line) - 1] # trim leading ": " and trailing \n
yield path, attr, info
lines_count += 1
if not attrs:
return
process = make_process()
if self.git_version is None or self.git_version > (1, 8, 5):
reader = read_attrs
else:
reader = read_attrs_old
try:
while True:
repo_file_path = yield
repo_file_path = git_fsencode(fspath(repo_file_path))
repo_file_attrs = {}
for path, attr, value in reader(process, repo_file_path):
attr = attr.decode('utf-8')
repo_file_attrs[attr] = value
yield repo_file_attrs
finally:
process.stdin.close()
process.wait()
def resolve_git_main_repo_abspath(self, abspath):
"""
Return absolute path to the repo for a given path.
"""
try:
main_repo_abspath = self.run_git_shell('git rev-parse --show-toplevel', cwd=abspath).rstrip()
return path.abspath(git_fspath(main_repo_abspath))
except CalledProcessError as e:
raise ValueError("{0} is not part of a git repository ({1})".format(abspath, e.returncode))
@classmethod
def run_git_shell(cls, cmd, cwd=None):
"""
Run git shell command, read output and decode it into a unicode string.
@param cmd: Command to be executed.
@param cwd: Working directory.
@return: Output of the command.
@raise CalledProcessError: Raises exception if return code of the command is non-zero.
"""
p = Popen(cmd, shell=True, stdout=PIPE, cwd=cwd)
output, _ = p.communicate()
if p.returncode:
if sys.version_info > (2, 6):
raise CalledProcessError(returncode=p.returncode, cmd=cmd, output=output)
else:
raise CalledProcessError(returncode=p.returncode, cmd=cmd)
return output
@classmethod
def get_git_version(cls):
"""
Return version of git current shell points to.
If version cannot be parsed None is returned.
"""
try:
output = cls.run_git_shell('git version')
except CalledProcessError:
cls.LOG.warning("Unable to get Git version.")
return None
try:
version = output.split()[2]
except IndexError:
cls.LOG.warning("Unable to parse Git version \"%s\".", output)
return None
try:
return tuple(int(v) if v.isdigit() else 0 for v in version.split(b'.'))
except ValueError:
cls.LOG.warning("Unable to parse Git version \"%s\".", version)
return None
@classmethod
def list_repo_files(cls, repo_abspath):
"""
Return a list of all files as seen by git in a given repo.
"""
repo_file_paths = cls.run_git_shell(
'git ls-files -z --cached --full-name --no-empty-directory',
cwd=repo_abspath
)
repo_file_paths = repo_file_paths.split(b'\0')[:-1]
if sys.platform.startswith('win32'):
repo_file_paths = (git_fspath(p.replace(b'/', b'\\')) for p in repo_file_paths)
else:
repo_file_paths = map(git_fspath, repo_file_paths)
return repo_file_paths
@classmethod
def list_repo_submodules(cls, repo_abspath):
"""
Return a list of all direct submodules as seen by git in a given repo.
"""
if sys.platform.startswith('win32'):
shell_command = 'git submodule foreach --quiet "\\"{0}\\" -c \\"from __future__ import print_function; print(\'"$sm_path"\', end=chr(0))\\""'
else:
shell_command = 'git submodule foreach --quiet \'"{0}" -c "from __future__ import print_function; print(\\"$sm_path\\", end=chr(0))"\''
python_exe = sys.executable or 'python'
shell_command = shell_command.format(python_exe)
repo_submodule_paths = cls.run_git_shell(shell_command, cwd=repo_abspath)
repo_submodule_paths = repo_submodule_paths.split(b'\0')[:-1]
if sys.platform.startswith('win32'):
repo_submodule_paths = (git_fspath(p.replace(b'/', b'\\')) for p in repo_submodule_paths)
else:
repo_submodule_paths = map(git_fspath, repo_submodule_paths)
return repo_submodule_paths
|
class GitArchiver(object):
'''
GitArchiver
Scan a git repository and export all tracked files, and submodules.
Checks for .gitattributes files in each directory and uses 'export-ignore'
pattern entries for ignore files in the archive.
>>> archiver = GitArchiver(main_repo_abspath='my/repo/path')
>>> archiver.create('output.zip')
'''
def __init__(self, prefix='', exclude=True, force_sub=False, extra=None, main_repo_abspath=None, git_version=None):
'''
@param prefix: Prefix used to prepend all paths in the resulting archive.
Extra file paths are only prefixed if they are not relative.
E.g. if prefix is 'foo' and extra is ['bar', '/baz'] the resulting archive will look like this:
/
baz
foo/
bar
@param exclude: Determines whether archiver should follow rules specified in .gitattributes files.
@param force_sub: Determines whether submodules are initialized and updated before archiving.
@param extra: List of extra paths to include in the resulting archive.
@param main_repo_abspath: Absolute path to the main repository (or one of subdirectories).
If given path is path to a subdirectory (but not a submodule directory!) it will be replaced
with abspath to top-level directory of the repository.
If None, current cwd is used.
@param git_version: Version of Git that determines whether various workarounds are on.
If None, tries to resolve via Git's CLI.
'''
pass
def create(self, output_path, dry_run=False, output_format=None, compresslevel=None):
'''
Create the archive at output_file_path.
Type of the archive is determined either by extension of output_file_path or by output_format.
Supported formats are: gz, zip, bz2, xz, tar, tgz, txz
@param output_path: Output file path.
@param dry_run: Determines whether create should do nothing but print what it would archive.
@param output_format: Determines format of the output archive. If None, format is determined from extension
of output_file_path.
@param compresslevel: Optional compression level. Interpretation depends on the output format.
'''
pass
def add_file(file_path, arcname):
pass
def add_file(file_path, arcname):
pass
def archiver(file_path, arcname):
pass
def archiver(file_path, arcname):
pass
def is_file_excluded(self, repo_abspath, repo_file_path):
'''
Checks whether file at a given path is excluded.
@param repo_abspath: Absolute path to the git repository.
@param repo_file_path: Path to a file relative to repo_abspath.
@return: True if file should be excluded. Otherwise False.
'''
pass
def archive_all_files(self, archiver):
'''
Archive all files using archiver.
@param archiver: Callable that accepts 2 arguments:
abspath to file on the system and relative path within archive.
'''
pass
def walk_git_files(self, repo_path=fspath('')):
'''
An iterator method that yields a file path relative to main_repo_abspath
for each file that should be included in the archive.
Skips those that match the exclusion patterns found in
any discovered .gitattributes files along the way.
Recurs into submodules as well.
@param repo_path: Path to the git submodule repository relative to main_repo_abspath.
@return: Generator to traverse files under git control relative to main_repo_abspath.
'''
pass
def check_git_attr(self, repo_abspath, attrs):
'''
Generator that returns git attributes for received paths relative to repo_abspath.
>>> archiver = GitArchiver(...)
>>> g = archiver.check_git_attr('repo_path', ['export-ignore'])
>>> next(g)
>>> attrs = g.send('relative_path')
>>> print(attrs['export-ignore'])
@param repo_abspath: Absolute path to a git repository.
@param attrs: Attributes to check
'''
pass
def make_process():
pass
def read_attrs(process, repo_file_path):
pass
def read_attrs_old(process, repo_file_path):
'''
Compatibility with versions 1.8.5 and below that do not recognize -z for output.
'''
pass
def resolve_git_main_repo_abspath(self, abspath):
'''
Return absolute path to the repo for a given path.
'''
pass
@classmethod
def run_git_shell(cls, cmd, cwd=None):
'''
Run git shell command, read output and decode it into a unicode string.
@param cmd: Command to be executed.
@param cwd: Working directory.
@return: Output of the command.
@raise CalledProcessError: Raises exception if return code of the command is non-zero.
'''
pass
@classmethod
def get_git_version(cls):
'''
Return version of git current shell points to.
If version cannot be parsed None is returned.
'''
pass
@classmethod
def list_repo_files(cls, repo_abspath):
'''
Return a list of all files as seen by git in a given repo.
'''
pass
@classmethod
def list_repo_submodules(cls, repo_abspath):
'''
Return a list of all direct submodules as seen by git in a given repo.
'''
pass
| 23 | 13 | 27 | 6 | 17 | 6 | 4 | 0.39 | 1 | 12 | 0 | 0 | 7 | 8 | 11 | 11 | 449 | 103 | 254 | 81 | 229 | 99 | 217 | 76 | 196 | 10 | 1 | 4 | 72 |
142,649 |
Kentzo/git-archive-all
|
Kentzo_git-archive-all/test_git_archive_all.py
|
test_git_archive_all.Repo
|
class Repo:
def __init__(self, path):
self.path = os.path.abspath(fspath(path))
def init(self):
os.mkdir(self.path)
check_call(['git', 'init'], cwd=self.path)
def add(self, rel_path, record):
if record.kind == 'file':
return self.add_file(rel_path, record.contents)
elif record.kind == 'dir':
return self.add_dir(rel_path, record.contents)
elif record.kind == 'submodule':
return self.add_submodule(rel_path, record.contents)
else:
raise ValueError
def add_file(self, rel_path, contents):
file_path = os_path_join(self.path, rel_path)
with open(file_path, 'wb') as f:
f.write(contents)
check_call(['git', 'add', as_posix(os.path.normpath(file_path))], cwd=self.path)
return file_path
def add_dir(self, rel_path, contents):
dir_path = os_path_join(self.path, rel_path)
makedirs(dir_path)
for k, v in contents.items():
self.add(as_posix(os.path.normpath(os_path_join(dir_path, k))), v)
check_call(['git', 'add', dir_path], cwd=self.path)
return dir_path
def add_submodule(self, rel_path, contents):
submodule_path = os_path_join(self.path, rel_path)
r = Repo(submodule_path)
r.init()
r.add_dir('.', contents)
r.commit('init')
check_call(['git', 'submodule', 'add', as_posix(os.path.normpath(submodule_path))], cwd=self.path)
return submodule_path
def commit(self, message):
check_call(['git', 'commit', '-m', 'init'], cwd=self.path)
def archive(self, path, exclude=True):
a = GitArchiver(exclude=exclude, main_repo_abspath=self.path)
a.create(path)
|
class Repo:
def __init__(self, path):
pass
def init(self):
pass
def add(self, rel_path, record):
pass
def add_file(self, rel_path, contents):
pass
def add_dir(self, rel_path, contents):
pass
def add_submodule(self, rel_path, contents):
pass
def commit(self, message):
pass
def archive(self, path, exclude=True):
pass
| 9 | 0 | 6 | 1 | 5 | 0 | 2 | 0 | 0 | 2 | 1 | 0 | 8 | 1 | 8 | 8 | 52 | 11 | 41 | 17 | 32 | 0 | 38 | 16 | 29 | 4 | 0 | 1 | 12 |
142,650 |
KeplerGO/K2fov
|
KeplerGO_K2fov/K2fov/tests/test_rotate2.py
|
K2fov.tests.test_rotate2.TestRotationOrder
|
class TestRotationOrder(unittest.TestCase):
def testSmoke(self):
self.anglePairTestX(30,60)
self.anglePairTestY(30,60)
def XtestAll(self):
for ang1 in range(0, 361, 10):
for ang2 in range(-90, 91, 10):
self.anglePairTest(ang1, ang2)
def anglePairTestX(self, theta_deg, phi_deg):
ct = np.cos( np.radians(theta_deg))
st = np.sin( np.radians(theta_deg))
cp = np.cos( np.radians(phi_deg))
sp = np.sin( np.radians(phi_deg))
e1 = ct*cp
e2 = st
e3 = -ct*sp
x = np.array( [1,0,0])
exp = np.array( [e1, e2, e3] )
Rz = r.rotateInZMat(theta_deg)
Ry = r.rotateInYMat(phi_deg)
rotMat = np.dot(Ry, Rz)
calc = np.dot(rotMat, x)
msg = "Angles %.0f %.0f: Expect %s, Calc %s" \
%(theta_deg, phi_deg, exp, calc)
for i in range(3):
self.assertAlmostEqual(exp[i], calc[i], 6, msg)
def anglePairTestY(self, theta_deg, phi_deg):
ct = np.cos( np.radians(theta_deg))
st = np.sin( np.radians(theta_deg))
cp = np.cos( np.radians(phi_deg))
sp = np.sin( np.radians(phi_deg))
e1 = -st*cp
e2 = ct
e3 = st*sp
y = np.array( [0,1,0])
exp = np.array( [e1, e2, e3] )
Rz = r.rotateInZMat(theta_deg)
Ry = r.rotateInYMat(phi_deg)
rotMat = np.dot(Ry, Rz)
calc = np.dot(rotMat, y)
msg = "Angles %.0f %.0f: Expect %s, Calc %s" \
%(theta_deg, phi_deg, exp, calc)
for i in range(3):
self.assertAlmostEqual(exp[i], calc[i], 6, msg)
def anglePairTestZ(self, theta_deg, phi_deg):
ct = np.cos( np.radians(theta_deg))
st = np.sin( np.radians(theta_deg))
cp = np.cos( np.radians(phi_deg))
sp = np.sin( np.radians(phi_deg))
e1 = sp
e2 = 0
e3 = cp
z = np.array( [0,0,1])
exp = np.array( [e1, e2, e3] )
Rz = r.rotateInZMat(theta_deg)
Ry = r.rotateInYMat(phi_deg)
rotMat = np.dot(Ry, Rz)
calc = np.dot(rotMat, z)
msg = "Angles %.0f %.0f: Expect %s, Calc %s" \
%(theta_deg, phi_deg, exp, calc)
for i in range(3):
self.assertAlmostEqual(exp[i], calc[i], 6, msg)
|
class TestRotationOrder(unittest.TestCase):
def testSmoke(self):
pass
def XtestAll(self):
pass
def anglePairTestX(self, theta_deg, phi_deg):
pass
def anglePairTestY(self, theta_deg, phi_deg):
pass
def anglePairTestZ(self, theta_deg, phi_deg):
pass
| 6 | 0 | 16 | 4 | 12 | 0 | 2 | 0 | 1 | 1 | 0 | 0 | 5 | 0 | 5 | 77 | 94 | 32 | 62 | 53 | 56 | 0 | 59 | 53 | 53 | 3 | 2 | 2 | 10 |
142,651 |
KeplerGO/K2fov
|
KeplerGO_K2fov/K2fov/tests/test_rotate2.py
|
K2fov.tests.test_rotate2.TestSingleRotation
|
class TestSingleRotation(unittest.TestCase):
def testXAroundZ(self):
for deg in np.arange(0, 360):
ct = np.cos( np.radians(deg))
st = np.sin( np.radians(deg))
v = np.array([1,0,0])
exp = np.array([ ct, st, 0])
Rmat = r.rotateInZMat(deg)
calc = np.dot(Rmat, v)
msg = "Angle %.0f: Expect %s, Calc %s" %(deg, exp, calc)
for i in range(3):
self.assertAlmostEqual(exp[i], calc[i], 6, msg)
def testYAroundZ(self):
for deg in np.arange(0, 360):
ct = np.cos( np.radians(deg))
st = np.sin( np.radians(deg))
v = np.array([0,1,0])
exp = np.array([ -st, ct, 0])
Rmat = r.rotateInZMat(deg)
calc = np.dot(Rmat, v)
msg = "Angle %.0f: Expect %s, Calc %s" %(deg, exp, calc)
for i in range(3):
self.assertAlmostEqual(exp[i], calc[i], 6, msg)
def testXAroundY(self):
for deg in np.arange(0, 360):
ct = np.cos( np.radians(deg))
st = np.sin( np.radians(deg))
v = np.array([1,0,0])
exp = np.array([ ct, 0, -st])
Rmat = r.rotateInYMat(deg)
calc = np.dot(Rmat, v)
msg = "Angle %.0f deg:\n Expect %s, Calc %s" \
%(deg, exp, calc)
for i in range(3):
self.assertAlmostEqual(exp[i], calc[i], 6, msg)
def testZAroundY(self):
for deg in np.arange(0, 360):
ct = np.cos( np.radians(deg))
st = np.sin( np.radians(deg))
v = np.array([0,0,1])
exp = np.array([ st, 0, ct])
Rmat = r.rotateInYMat(deg)
calc = np.dot(Rmat, v)
msg = "Angle %.0f deg:\n Expect %s, Calc %s" \
%(deg, exp, calc)
for i in range(3):
self.assertAlmostEqual(exp[i], calc[i], 6, msg)
|
class TestSingleRotation(unittest.TestCase):
def testXAroundZ(self):
pass
def testYAroundZ(self):
pass
def testXAroundY(self):
pass
def testZAroundY(self):
pass
| 5 | 0 | 15 | 3 | 12 | 0 | 3 | 0 | 1 | 1 | 0 | 0 | 4 | 0 | 4 | 76 | 66 | 19 | 47 | 41 | 42 | 0 | 45 | 41 | 40 | 3 | 2 | 2 | 12 |
142,652 |
KeplerGO/K2fov
|
KeplerGO_K2fov/K2fov/tests/test_rotate.py
|
K2fov.tests.test_rotate.TestRaDecFromVec
|
class TestRaDecFromVec(unittest.TestCase):
def testRa1(self):
eList = [0, 90, 180, 270]
vecList = [ [1,0,0],
[0,1,0],
[-1,0,0],
[0,-1,0],
]
for vec, expect in zip(vecList, eList):
calc = r.raDecFromVec(vec)
msg = "Expected %s, Calculated %s" %(expect, calc[0])
self.assertAlmostEqual(expect, calc[0], 10, msg)
def testRa2(self):
a = .5
b = np.sqrt(3)/2.
eList = [30, 60, 120, 150, 210, 240, 300, 330]
vecList = [ [b, a, 0],
[a, b, 0],
[-a, b, 0],
[-b, a, 0],
[-b, -a, 0],
[-a, -b, 0],
[a, -b, 0],
[b, -a, 0],
]
for vec, expect in zip(vecList, eList):
calc = r.raDecFromVec(vec)
msg = "Expected %s, Calculated %s" %(expect, calc[0])
self.assertAlmostEqual(expect, calc[0], 10, msg)
|
class TestRaDecFromVec(unittest.TestCase):
def testRa1(self):
pass
def testRa2(self):
pass
| 3 | 0 | 15 | 1 | 14 | 0 | 2 | 0 | 1 | 1 | 0 | 0 | 2 | 0 | 2 | 74 | 32 | 3 | 29 | 15 | 26 | 0 | 17 | 15 | 14 | 2 | 2 | 1 | 4 |
142,653 |
KeplerGO/K2fov
|
KeplerGO_K2fov/K2fov/tests/test_rotate.py
|
K2fov.tests.test_rotate.TestRotationMatrix
|
class TestRotationMatrix(unittest.TestCase):
def testRa1(self):
a = np.array([1,0,0])
R = r.rightAscensionRotationMatrix(90.)
b = np.dot(R, a)
expect = [0,1,0]
msg = "Expected %s, Calculated %s" %(expect, b)
for i in range(3):
self.assertAlmostEqual(expect[i], b[i], 10)
def testRa2(self):
a = r.vecFromRaDec(285, 0)
R = r.rightAscensionRotationMatrix(-285)
b = np.dot(R, a)
expect = [1,0,0]
msg = "Expected %s, Calculated %s" %(expect, b)
for i in range(3):
self.assertAlmostEqual(expect[i], b[i], 10)
def testRa2(self):
a = r.vecFromRaDec(0, 45)
R = r.rightAscensionRotationMatrix(30)
b = np.dot(R, a)
new = r.raDecFromVec(b)
expect = [30, 45]
msg = "Expected %s, Calculated %s" %(expect, new)
for i in range(2):
self.assertAlmostEqual(expect[i], new[i], 10)
def testDec1(self):
a = np.array([1,0,0])
R = r.declinationRotationMatrix(90.)
b = np.dot(R, a)
expect = [0,0,1]
msg = "Expected %s, Calculated %s" %(expect, b)
for i in range(3):
self.assertAlmostEqual(expect[i], b[i], 10)
def testDec2(self):
a = r.vecFromRaDec(0, 45)
R = r.declinationRotationMatrix(-45)
b = np.dot(R, a)
expect = [1,0,0]
msg = "Expected %s, Calculated %s" %(expect, b)
for i in range(3):
self.assertAlmostEqual(expect[i], b[i], 10)
def testRaDec(self):
a = r.vecFromRaDec(285, 45)
Ra = r.rightAscensionRotationMatrix(-285)
Rd = r.declinationRotationMatrix(-45)
b = np.dot(Rd, np.dot(Ra, a))
expect = [1,0,0]
msg = "Expected %s, Calculated %s" %(expect, b)
for i in range(3):
self.assertAlmostEqual(expect[i], b[i], 10)
|
class TestRotationMatrix(unittest.TestCase):
def testRa1(self):
pass
def testRa2(self):
pass
def testRa2(self):
pass
def testDec1(self):
pass
def testDec2(self):
pass
def testRaDec(self):
pass
| 7 | 0 | 9 | 1 | 8 | 0 | 2 | 0 | 1 | 1 | 0 | 0 | 6 | 0 | 6 | 78 | 64 | 13 | 51 | 45 | 44 | 0 | 51 | 45 | 44 | 2 | 2 | 1 | 12 |
142,654 |
KeplerGO/K2fov
|
KeplerGO_K2fov/K2fov/__init__.py
|
K2fov.Highlight
|
class Highlight:
"""Defines colors for highlighting words in the terminal."""
RED = "\033[0;31m"
GREEN = "\033[0;32m"
YELLOW = "\033[0;33m"
BLUE = "\033[0;34m"
PURPLE = "\033[0;35m"
CYAN = "\033[0;36m"
END = '\033[0m'
|
class Highlight:
'''Defines colors for highlighting words in the terminal.'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.13 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0 | 8 | 8 | 7 | 1 | 8 | 8 | 7 | 0 | 0 | 0 | 0 |
142,655 |
KeplerGO/K2fov
|
KeplerGO_K2fov/K2fov/c9.py
|
K2fov.c9.C9FootprintPlot
|
class C9FootprintPlot(object):
"""Create a plot showing the C9 footprint and superstamp.
"""
def __init__(self, axes=None):
if axes is None:
import matplotlib.pyplot as pl
params = {
'axes.linewidth': 1.,
'axes.labelsize': 20,
'font.family': 'sans-serif',
'font.size': 22,
'legend.fontsize': 14,
'xtick.labelsize': 16,
'ytick.labelsize': 16,
'text.usetex': False,
}
pl.rcParams.update(params)
self.fig = pl.figure(figsize=(8, 4.5))
self.ax = self.fig.add_subplot(111)
else:
self.ax = axes
self.ax.set_ylim([-30, -14])
self.ax.set_xlim([280, 260])
self.ax.set_xlabel("RA [deg]")
self.ax.set_ylabel("Dec [deg]")
def plot_outline(self, annotate_late_targets=False, annotate_channels=False):
"""Plots the coverage of both the channels and the C9 superstamp."""
fov = getKeplerFov(9)
# Plot the superstamp
superstamp_patches = []
for ch in SUPERSTAMP["channels"]:
v_col = SUPERSTAMP["channels"][ch]["vertices_col"]
v_row = SUPERSTAMP["channels"][ch]["vertices_row"]
radec = np.array([
fov.getRaDecForChannelColRow(int(ch),
v_col[idx],
v_row[idx])
for idx in range(len(v_col))
])
patch = self.ax.fill(radec[:, 0], radec[:, 1],
lw=0, facecolor="#27ae60", zorder=100)
superstamp_patches.append(patch)
# Plot the late target masks
late_target_patches = []
for mask in LATE_TARGETS["masks"]:
ch = mask["channel"]
v_col = mask["vertices_col"]
v_row = mask["vertices_row"]
radec = np.array([
fov.getRaDecForChannelColRow(int(ch),
v_col[idx],
v_row[idx])
for idx in range(len(v_col))
])
patch = self.ax.fill(radec[:, 0], radec[:, 1],
lw=0, facecolor="#27ae60", zorder=201)
late_target_patches.append(patch)
if annotate_late_targets and 'context' not in mask["name"]:
self.ax.text(np.mean(radec[:, 0]), np.mean(radec[:, 1]), ' ' + mask["name"],
ha="left", va="center",
zorder=950, fontsize=10,
color="#c0392b", clip_on=True)
# Plot all channel outlines
channel_patches = []
corners = fov.getCoordsOfChannelCorners()
for ch in np.arange(1, 85, dtype=int):
if ch in fov.brokenChannels:
continue # certain channel are no longer used
idx = np.where(corners[::, 2] == ch)
mdl = int(corners[idx, 0][0][0])
out = int(corners[idx, 1][0][0])
ra = corners[idx, 3][0]
dec = corners[idx, 4][0]
patch = self.ax.fill(np.concatenate((ra, ra[:1])),
np.concatenate((dec, dec[:1])),
lw=0, facecolor="#cccccc", zorder=50)
channel_patches.append(patch)
if annotate_channels:
txt = "{}.{}\n#{}".format(mdl, out, ch)
self.ax.text(np.mean(ra), np.mean(dec), txt,
ha="center", va="center",
zorder=900, fontsize=14,
color="#000000", clip_on=True)
return superstamp_patches, channel_patches
|
class C9FootprintPlot(object):
'''Create a plot showing the C9 footprint and superstamp.
'''
def __init__(self, axes=None):
pass
def plot_outline(self, annotate_late_targets=False, annotate_channels=False):
'''Plots the coverage of both the channels and the C9 superstamp.'''
pass
| 3 | 2 | 42 | 2 | 39 | 6 | 5 | 0.17 | 1 | 2 | 0 | 0 | 2 | 2 | 2 | 2 | 88 | 4 | 78 | 23 | 74 | 13 | 48 | 23 | 44 | 7 | 1 | 2 | 9 |
142,656 |
KeplerGO/K2fov
|
KeplerGO_K2fov/K2fov/fov.py
|
K2fov.fov.KeplerModOut
|
class KeplerModOut(Polygon):
def __init__(self, channel, x=None, y=None, pointList=None):
"""A Polygon with a channel identification attached to it"""
Polygon.__init__(self, x, y, pointList)
self.channel = channel
def getChannel(self):
return self.channel
def identifyModule(self, modout=False):
"""Write the name of a channel/modout on a plot
Optional Inputs:
-----------
modout
(boolean). If True, write module and output. Otherwise
write channel number
Returns:
------------
**None**
Output:
-----------
Channel numbers are written to the current axis.
"""
x, y = np.mean(self.polygon, 0)
if modout:
modout = modOutFromChannel(self.channel)
mp.text(
x,
y,
"%i-%i" % (modout[0], modout[1]),
fontsize=8,
ha="center",
clip_on=True,
)
else:
mp.text(x, y, "%i" % (self.channel), fontsize=8, ha="center", clip_on=True)
|
class KeplerModOut(Polygon):
def __init__(self, channel, x=None, y=None, pointList=None):
'''A Polygon with a channel identification attached to it'''
pass
def getChannel(self):
pass
def identifyModule(self, modout=False):
'''Write the name of a channel/modout on a plot
Optional Inputs:
-----------
modout
(boolean). If True, write module and output. Otherwise
write channel number
Returns:
------------
**None**
Output:
-----------
Channel numbers are written to the current axis.
'''
pass
| 4 | 2 | 13 | 2 | 6 | 5 | 1 | 0.7 | 1 | 0 | 0 | 0 | 3 | 1 | 3 | 8 | 41 | 7 | 20 | 6 | 16 | 14 | 12 | 6 | 8 | 2 | 1 | 1 | 4 |
142,657 |
KeplerGO/K2fov
|
KeplerGO_K2fov/K2fov/fov.py
|
K2fov.fov.Polygon
|
class Polygon:
def __init__(self, x=None, y=None, pointList=None):
"""
An abstract class to represent a polygon in space
Input:
------------
pointList A list of (x,y) pairs. Eg
[ (0,0), (1,0), (0,1), (1.1)]
The edges of the polygon join adjacent elements
of this list, so the order matters. The last
point is assumed to connect to the first point.
"""
if x is not None and y is not None:
pointList = []
for xi, yi in zip(x, y):
pointList.append((xi, yi))
if pointList is None:
raise ValueError("Must supply x,y or pointList")
self.polygon = np.array(pointList)
def __str__(self):
return self.polygon.__str__()
def __repr__(self):
return self.polygon.__repr__()
def isPointInside(self, xp, yp):
"""Is the given point inside the polygon?
Input:
------------
xp, yp
(floats) Coordinates of point in same units that
array vertices are specified when object created.
Returns:
-----------
**True** / **False**
"""
point = np.array([xp, yp]).transpose()
polygon = self.polygon
numVert, numDim = polygon.shape
# Subtract each point from the previous one.
polyVec = np.roll(polygon, -1, 0) - polygon
# Get the vector from each vertex to the given point
pointVec = point - polygon
crossProduct = np.cross(polyVec, pointVec)
if np.all(crossProduct < 0) or np.all(crossProduct > 0):
return True
return False
def draw(self, **kwargs):
"""Draw the polygon
Optional Inputs:
------------
All optional inputs are passed to ``matplotlib.patches.Polygon``
Notes:
---------
Does not accept maptype as an argument.
"""
ax = mp.gca()
shape = matplotlib.patches.Polygon(self.polygon, **kwargs)
ax.add_artist(shape)
|
class Polygon:
def __init__(self, x=None, y=None, pointList=None):
'''
An abstract class to represent a polygon in space
Input:
------------
pointList A list of (x,y) pairs. Eg
[ (0,0), (1,0), (0,1), (1.1)]
The edges of the polygon join adjacent elements
of this list, so the order matters. The last
point is assumed to connect to the first point.
'''
pass
def __str__(self):
pass
def __repr__(self):
pass
def isPointInside(self, xp, yp):
'''Is the given point inside the polygon?
Input:
------------
xp, yp
(floats) Coordinates of point in same units that
array vertices are specified when object created.
Returns:
-----------
**True** / **False**
'''
pass
def draw(self, **kwargs):
'''Draw the polygon
Optional Inputs:
------------
All optional inputs are passed to ``matplotlib.patches.Polygon``
Notes:
---------
Does not accept maptype as an argument.
'''
pass
| 6 | 3 | 14 | 3 | 5 | 6 | 2 | 1.11 | 0 | 2 | 0 | 1 | 5 | 1 | 5 | 5 | 74 | 17 | 27 | 16 | 21 | 30 | 27 | 16 | 21 | 4 | 0 | 2 | 9 |
142,658 |
KeplerGO/K2fov
|
KeplerGO_K2fov/K2fov/plot.py
|
K2fov.plot.K2FootprintPlot
|
class K2FootprintPlot(object):
def __init__(self, axes=None, figsize=(16, 5)):
if axes is None:
self.fig = pl.figure(figsize=figsize)
self.ax = self.fig.add_subplot(111)
else:
self.ax = axes
self.ax.set_xticks(np.arange(0, 361, 30))
self.ax.set_ylim([-37, 37])
self.ax.set_xlim([360, 0])
self.ax.xaxis.set_major_formatter(FuncFormatter(rafmt))
self.ax.yaxis.set_major_formatter(FuncFormatter(decfmt))
self.ax.set_xlabel("RA")
self.ax.set_ylabel("Dec")
try:
self.fig.tight_layout()
except AttributeError: # We didn't create a fig above
pass
def plot_campaigns(self, campaigns=21):
"""Plot the outlines of all campaigns."""
for c in range(campaigns):
self.plot_campaign_outline(c)
def plot_campaign_outline(self, campaign=0, facecolor="#666666", text=None):
"""Plot the outline of a campaign as a contiguous gray patch.
Parameters
----------
campaign : int
K2 Campaign number.
facecolor : str
Color of the patch.
"""
# The outline is composed of two filled rectangles,
# defined by the first coordinate of the corner of four channels each
fov = getKeplerFov(campaign)
corners = fov.getCoordsOfChannelCorners()
for rectangle in [[4, 75, 84, 11], [15, 56, 71, 32]]:
ra_outline, dec_outline = [], []
for channel in rectangle:
idx = np.where(corners[::, 2] == channel)
ra_outline.append(corners[idx, 3][0][0])
dec_outline.append(corners[idx, 4][0][0])
ra = np.array(ra_outline + ra_outline[:1])
dec = np.array(dec_outline + dec_outline[:1])
if campaign == 1002: # Overlaps the meridian
ra[ra > 180] -= 360
myfill = self.ax.fill(ra, dec,
facecolor=facecolor,
zorder=151, lw=0)
# Print the campaign number on top of the outline
if text is None:
text = "{}".format(campaign)
ra_center, dec_center, _ = fov.getBoresight()
if campaign == 6:
dec_center -= 2
elif campaign == 12:
ra_center += 0.5
dec_center -= 1.7
elif campaign == 13:
dec_center -= 1.5
elif campaign == 16:
dec_center += 1.5
elif campaign == 18:
dec_center -= 1.5
elif campaign == 19:
dec_center += 1.7
elif campaign == 20:
dec_center += 1.5
offsets = {5: (40, -20), 16: (-20, 40), 18: (-15, -50)}
if campaign in [5]:
pl.annotate(text, xy=(ra_center, dec_center),
xycoords='data', ha='center',
xytext=offsets[campaign], textcoords='offset points',
size=18, zorder=0, color=facecolor,
arrowprops=dict(arrowstyle="-", ec=facecolor, lw=2))
else:
self.ax.text(ra_center, dec_center, text,
fontsize=18, color="white",
ha="center", va="center",
zorder=155)
return myfill
def plot_campaign(self, campaign=0, annotate_channels=True, **kwargs):
"""Plot all the active channels of a campaign."""
fov = getKeplerFov(campaign)
corners = fov.getCoordsOfChannelCorners()
for ch in np.arange(1, 85, dtype=int):
if ch in fov.brokenChannels:
continue # certain channel are no longer used
idx = np.where(corners[::, 2] == ch)
mdl = int(corners[idx, 0][0][0])
out = int(corners[idx, 1][0][0])
ra = corners[idx, 3][0]
if campaign == 1002: # Concept Engineering Test overlapped the meridian
ra[ra < 180] += 360
dec = corners[idx, 4][0]
self.ax.fill(np.concatenate((ra, ra[:1])),
np.concatenate((dec, dec[:1])), **kwargs)
if annotate_channels:
txt = "K2C{0}\n{1}.{2}\n#{3}".format(campaign, mdl, out, ch)
txt = "{1}.{2}\n#{3}".format(campaign, mdl, out, ch)
self.ax.text(np.mean(ra), np.mean(dec), txt,
ha="center", va="center",
zorder=91, fontsize=10,
color="#000000", clip_on=True)
def plot_ecliptic(self, size=100):
try:
from astropy.coordinates import SkyCoord
except ImportError:
logger.error("You need to install AstroPy for this feature.")
return None
try:
icrs = SkyCoord(np.linspace(0.1, 359, num=size), 0,
unit="deg", frame="barycentrictrueecliptic").icrs
self.ax.plot(icrs.ra, icrs.dec, lw=2, color="#666666")
except ValueError:
# only AstroPy 1.1 and up support ecliptic coordinates;
# avoid crashing if an older version of AstroPy is at play
pass
def plot_galactic(self, size=150, color="#bbbbbb", textcolor="#777777"):
try:
from astropy.coordinates import SkyCoord
except ImportError:
logger.error("You need to install AstroPy for this feature.")
return None
icrs = SkyCoord(np.linspace(0, 359, num=size), 0,
unit="deg", frame="galactic").icrs
self.ax.plot(icrs.ra, icrs.dec, lw=20, color=color)
self.ax.text(114, -12, "Galactic Plane", rotation=65,
fontsize=12, color=textcolor)
def plot(self):
self.plot_galactic()
self.plot_ecliptic()
self.plot_campaigns()
|
class K2FootprintPlot(object):
def __init__(self, axes=None, figsize=(16, 5)):
pass
def plot_campaigns(self, campaigns=21):
'''Plot the outlines of all campaigns.'''
pass
def plot_campaign_outline(self, campaign=0, facecolor="#666666", text=None):
'''Plot the outline of a campaign as a contiguous gray patch.
Parameters
----------
campaign : int
K2 Campaign number.
facecolor : str
Color of the patch.
'''
pass
def plot_campaigns(self, campaigns=21):
'''Plot all the active channels of a campaign.'''
pass
def plot_ecliptic(self, size=100):
pass
def plot_galactic(self, size=150, color="#bbbbbb", textcolor="#777777"):
pass
def plot_campaigns(self, campaigns=21):
pass
| 8 | 3 | 19 | 0 | 17 | 4 | 4 | 0.21 | 1 | 6 | 0 | 0 | 7 | 2 | 7 | 7 | 142 | 10 | 117 | 35 | 107 | 25 | 93 | 35 | 83 | 13 | 1 | 2 | 29 |
142,659 |
KeplerGO/K2fov
|
KeplerGO_K2fov/K2fov/projection.py
|
K2fov.projection.Cylindrical
|
class Cylindrical(Projection):
"""Stunted cyclindical projection that assumes
projection point is always at sky point 0,0
"""
def __init__(self):
self.ra0_deg = 0
self.dec0_deg = 0
def skyToPix(self, ra_deg, dec_deg, **kwargs):
x = np.radians(ra_deg)
y = np.sin( np.radians(dec_deg))
return x, y
def pixToSky(self, x, y, **kwargs):
ra = np.degrees(x)
dec = np.degrees(np.arcsin(y))
return ra, dec
|
class Cylindrical(Projection):
'''Stunted cyclindical projection that assumes
projection point is always at sky point 0,0
'''
def __init__(self):
pass
def skyToPix(self, ra_deg, dec_deg, **kwargs):
pass
def pixToSky(self, x, y, **kwargs):
pass
| 4 | 1 | 4 | 0 | 4 | 0 | 1 | 0.25 | 1 | 0 | 0 | 0 | 3 | 2 | 3 | 17 | 17 | 2 | 12 | 10 | 8 | 3 | 12 | 10 | 8 | 1 | 1 | 0 | 3 |
142,660 |
KeplerGO/K2fov
|
KeplerGO_K2fov/K2fov/plot.py
|
K2fov.plot.K2GalacticFootprintPlot
|
class K2GalacticFootprintPlot(object):
def __init__(self, axes=None, figsize=(11, 6)):
if axes is None:
self.fig = pl.figure(figsize=figsize)
self.ax = self.fig.add_subplot(111)
else:
self.ax = axes
self.ax.set_ylim([-90, 90])
self.ax.set_xlim([190, -180])
self.ax.set_xlabel("Galactic longitude [deg]")
self.ax.set_ylabel("Galactic latitude [deg]")
try:
self.fig.tight_layout()
except AttributeError: # We didn't create a fig above
pass
def plot_ecliptic(self, size=100):
try:
from astropy.coordinates import SkyCoord
except ImportError:
logger.error("You need to install AstroPy for this feature.")
return None
try:
gal = SkyCoord(np.linspace(0, 359, num=size), 0,
unit="deg", frame="barycentrictrueecliptic").galactic
# Hack to avoid line crossing zero:
l = gal.l.deg
l[l > 180] -= 360
idx = np.argsort(l)
self.ax.plot(l[idx], gal.b.deg[idx], lw=2, color="#666666")
except ValueError:
# only AstroPy 1.1 and up support ecliptic coordinates;
# avoid crashing if an older version of AstroPy is at play
pass
def plot_campaigns(self, campaigns=21):
"""Plot the outlines of all campaigns."""
for c in range(campaigns):
self.plot_campaign_outline(c)
def plot_campaign_outline(self, campaign=0, facecolor="#666666", text=None, dashed=False):
"""Plot the outline of a campaign as a contiguous gray patch.
Parameters
----------
campaign : int
K2 Campaign number.
facecolor : str
Color of the patch.
"""
try:
from astropy.coordinates import SkyCoord
except ImportError:
logger.error("You need to install AstroPy for this feature.")
return None
# The outline is composed of two filled rectangles,
# defined by the first coordinate of the corner of four channels each
fov = getKeplerFov(campaign)
corners = fov.getCoordsOfChannelCorners()
for rectangle in [[4, 75, 84, 11], [15, 56, 71, 32]]:
ra_outline, dec_outline = [], []
for channel in rectangle:
idx = np.where(corners[::, 2] == channel)
ra_outline.append(corners[idx, 3][0][0])
dec_outline.append(corners[idx, 4][0][0])
crd = SkyCoord(ra_outline, dec_outline, unit='deg')
l = crd.galactic.l.deg
if campaign not in [4, 13, 1713]:
l[l > 180] -= 360
l, b = list(l), list(crd.galactic.b.deg)
if dashed:
myfill = self.ax.fill(l + l[:1],
b + b[:1],
facecolor=facecolor, zorder=151, lw=2, ls='dashed',
edgecolor='white')
# myfill = self.ax.plot(l + l[:1],
# b + b[:1],
# zorder=200, lw=2,
# ls='dotted', color='white')
else:
myfill = self.ax.fill(l + l[:1],
b + b[:1],
facecolor=facecolor, zorder=151, lw=0)
# Print the campaign number on top of the outline
ra, dec, roll = fov.getBoresight()
gal = SkyCoord(ra, dec, unit='deg').galactic
l, b = gal.l.deg, gal.b.deg
if l > 180:
l -= 360
if text is None:
text = "{}".format(campaign)
self.ax.text(l, b, text,
fontsize=14, color="white", ha="center", va="center",
zorder=255)
return myfill
def plot(self):
self.plot_campaigns()
|
class K2GalacticFootprintPlot(object):
def __init__(self, axes=None, figsize=(11, 6)):
pass
def plot_ecliptic(self, size=100):
pass
def plot_campaigns(self, campaigns=21):
'''Plot the outlines of all campaigns.'''
pass
def plot_campaign_outline(self, campaign=0, facecolor="#666666", text=None, dashed=False):
'''Plot the outline of a campaign as a contiguous gray patch.
Parameters
----------
campaign : int
K2 Campaign number.
facecolor : str
Color of the patch.
'''
pass
def plot_ecliptic(self, size=100):
pass
| 6 | 2 | 19 | 1 | 15 | 4 | 3 | 0.3 | 1 | 5 | 0 | 0 | 5 | 2 | 5 | 5 | 101 | 8 | 74 | 26 | 66 | 22 | 64 | 26 | 56 | 8 | 1 | 2 | 17 |
142,661 |
KeplerGO/K2fov
|
KeplerGO_K2fov/K2fov/projection.py
|
K2fov.projection.Gnomic
|
class Gnomic(Projection):
def __init__(self, ra0_deg, dec0_deg):
self.ra0_deg = ra0_deg
self.dec0_deg = dec0_deg
#Construct rotation matrix used to convert ra/dec into
#angle relative to tangent point
Rdec = rotate.declinationRotationMatrix(-self.dec0_deg)
Rra = rotate.rightAscensionRotationMatrix(-self.ra0_deg)
self.Rmatrix = np.dot(Rdec, Rra)
#Check I created the matrix correctly.
origin = rotate.vecFromRaDec(self.ra0_deg, self.dec0_deg)
origin = np.dot(self.Rmatrix, origin)
assert( np.fabs(origin[0] -1 ) < 1e-9)
assert( np.fabs(origin[1]) < 1e-9)
assert( np.fabs(origin[2]) < 1e-9)
def skyToPix(self, ra_deg, dec_deg, catchInvalid=True):
ra_deg, dec_deg = self.parseInputs(ra_deg, dec_deg)
#Transform ra dec into angle away from tangent point
#using the rotation matrix
theta_rad= np.empty( (len(ra_deg),) )
phi_rad = theta_rad * 0
R = self.Rmatrix
for i in range(len(ra_deg)):
#Convert the ra/dec to a vector, then rotate so
#that the tangent point is at [1,0,0]. Then pull out
#the angle relative to the x-axis, and the angle
#around the y-z plane.
#@TODO: Can I make this faster with dot products?
vec =rotate.vecFromRaDec(ra_deg[i], dec_deg[i])
aVec = np.dot(R, vec)
#aVec = (sint, cost*cosp, cost*sinp)
sint = aVec[0]
cost = np.hypot(aVec[1], aVec[2])
theta = np.arctan2(sint, cost)
#Points more than 90 deg from tangent point need to be
#caught, or they'll be projected 180-i degrees from tangent
#point.
if catchInvalid and theta < 0:
raise ValueError("Point (%.7f %.7f) not projectable" \
%(ra_deg[i], dec_deg[i]))
cost = np.cos(theta)
cosp = aVec[1] / cost
sinp = aVec[2] / cost
phi = np.arctan2(sinp, cosp)
if phi < 0:
phi += 2*np.pi
if phi > 2*np.pi:
phi -= 2*np.pi
#Just to be explicit
theta_rad[i] = theta
phi_rad[i] = phi
#Project onto tangent plane. Negative x because we are inside
#sphere looking out (matches astronomical convention
r = 1/(np.tan(theta_rad) + 1e-10) #Prevent division by zero
x = - r * np.cos(phi_rad)
y = r * np.sin(phi_rad)
return x, y
def pixToSky(self, x, y):
x, y = self.parseInputs(x, y)
R = self.Rmatrix
invR = np.matrix(R.transpose())
ra_deg = np.empty( (len(x),))
dec_deg = np.empty( (len(x),))
for i in range(len(x)):
#-x because we are inside sphere looking out. This
#matches the astronomical convention.
phi_rad = np.arctan2(y,-x)
r = np.hypot(x,y)
theta_rad = np.arctan(r)
aVec = np.zeros((3,))
aVec[0] = np.cos(theta_rad)
aVec[1] = np.sin(theta_rad)*np.cos(phi_rad)
aVec[2] = np.sin(theta_rad)*np.sin(phi_rad)
vec = np.dot(invR, aVec)
vec = np.array(vec)[0] #Convert to 1d array
ra_deg[i], dec_deg[i] = rotate.raDecFromVec(vec)
return ra_deg, dec_deg
|
class Gnomic(Projection):
def __init__(self, ra0_deg, dec0_deg):
pass
def skyToPix(self, ra_deg, dec_deg, catchInvalid=True):
pass
def pixToSky(self, x, y):
pass
| 4 | 0 | 31 | 6 | 19 | 7 | 3 | 0.36 | 1 | 3 | 0 | 0 | 3 | 3 | 3 | 17 | 97 | 20 | 58 | 35 | 54 | 21 | 57 | 35 | 53 | 5 | 1 | 2 | 8 |
142,662 |
KeplerGO/K2fov
|
KeplerGO_K2fov/K2fov/projection.py
|
K2fov.projection.PlateCaree
|
class PlateCaree(Projection):
"""Synonym for the base class"""
pass
|
class PlateCaree(Projection):
'''Synonym for the base class'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 3 | 0 | 2 | 1 | 1 | 1 | 2 | 1 | 1 | 0 | 1 | 0 | 0 |
142,663 |
KeplerGO/K2fov
|
KeplerGO_K2fov/K2fov/projection.py
|
K2fov.projection.Projection
|
class Projection():
"""Base Projection class. Used for mapping ra and dec into
Euclidean space based on a given projection.
The primary reference for projections is Calabretta and Greisen
(2002), A&A 395, 1077
The base class implements the Plate Carree projection (\S 5.2.3)
which just maps ra dec to xy -- i.e what you would blindly do
if you knew no different. If distortion is not a concern
this is an acceptable approach
"""
def __init__(self):
self.ra0_deg = 0
self.dec0_deg = 0
def skyToPix(self, ra_deg, dec_deg):
return ra_deg, dec_deg
def pixToSky(self, x, y):
return x, y
def eulerRotate(self, ra_deg, dec_deg):
ra_deg, dec_deg = self.parseInputs(ra_deg, dec_deg)
#Transform ra dec into angle away from tangent point
#using the rotation matrix
theta_rad= np.empty( (len(ra_deg),) )
phi_rad = theta_rad * 0
R = self.Rmatrix
for i in range(len(ra_deg)):
#Convert the ra/dec to a vector, then rotate so
#that the tangent point is at [1,0,0]. Then pull out
#the angle relative to the x-axis, and the angle
#around the y-z plane.
#@TODO: Can I make this faster with dot products?
vec =rotate.vecFromRaDec(ra_deg[i], dec_deg[i])
aVec = np.dot(R, vec)
#aVec = (sint, cost*cosp, cost*sinp)
sint = aVec[0]
cost = np.hypot(aVec[1], aVec[2])
theta = np.arctan2(sint, cost)
cost = np.cos(theta)
cosp = aVec[1] / cost
sinp = aVec[2] / cost
phi = np.arctan2(sinp, cosp)
if phi < 0:
phi += 2*np.pi
if phi > 2*np.pi:
phi -= 2*np.pi
#Just to be explicit
theta_rad[i] = theta
phi_rad[i] = phi
return theta_rad, phi_rad
def parseInputs(self, ra_deg, dec_deg):
try:
len(ra_deg)
except TypeError:
ra_deg = np.array([ra_deg])
try:
len(dec_deg)
except TypeError:
dec_deg = np.array([dec_deg])
#If ra/dec aren't arrays, make them arrays
if not isinstance(ra_deg, np.ndarray):
ra_deg = np.array(ra_deg)
if not isinstance(dec_deg, np.ndarray):
dec_deg = np.array(dec_deg)
if np.logical_xor(len(ra_deg) == 1, len(dec_deg) == 1):
if len(ra_deg) == 1:
ra_deg = dec_deg *0 + ra_deg[0]
else:
dec_deg = ra_deg * 0 + dec_deg[0]
if len(ra_deg) != len(dec_deg):
raise ValueError("Input ra and dec arrays must be same length")
return ra_deg, dec_deg
def isPositiveMap(self):
"""Returns true if increasing ra increases pix in skyToPix()
"""
x0, y0 = self.skyToPix(self.ra0_deg, self.dec0_deg)
x1, y1 = self.skyToPix(self.ra0_deg + 1/3600., self.dec0_deg)
if x1 > x0:
return True
return False
def plot(self, ra_deg, dec_deg, *args, **kwargs):
x,y = self.skyToPix(ra_deg, dec_deg)
self._plot(x, y, *args, **kwargs)
def scatter(self, ra_deg, dec_deg, *args, **kwargs):
x,y = self.skyToPix(ra_deg, dec_deg)
mp.scatter(x,y, *args, **kwargs)
def text(self, ra_deg, dec_deg, s, *args, **kwargs):
x,y = self.skyToPix(ra_deg, dec_deg)
mp.text(x, y, s, *args, **kwargs)
def plotGrid(self, numLines=(5,5), lineWidth=1, colour="#777777"):
"""Plot NUMLINES[0] vertical gridlines and NUMLINES[1] horizontal gridlines,
while keeping the initial axes bounds that were present upon its calling.
Will not work for certain cases.
"""
x1, x2, y1, y2 = mp.axis()
ra1, dec0 = self.pixToSky(x1, y1)
ra0, dec1 = self.pixToSky(x2, y2)
xNum, yNum = numLines
self.raRange, self.decRange = self.getRaDecRanges(numLines)
#import pdb; pdb.set_trace()
#Guard against Ra of zero within the plot
a1 = np.abs(ra1-ra0)
a2 = np.abs( min(ra0, ra1) - (max(ra0, ra1) - 360))
if a2 < a1: #Then we straddle 360 degrees in RA
if ra0 < ra1:
ra1 -= 360
else:
ra0 -= 360
#Draw lines of constant dec
lwr = min(ra0, ra1)
upr = max(ra0, ra1)
stepX = round((upr-lwr) / float(xNum))
ra_deg = np.arange(lwr - 3*stepX, upr + 3.5*stepX, 1, dtype=np.float)
for dec in self.decRange:
self.plotLine(ra_deg, dec, '-', color = colour, linewidth = lineWidth)
#Draw lines of const ra
lwr = min(dec0, dec1)
upr = max(dec0, dec1)
stepY = round((upr-lwr) / float(yNum))
dec_deg = np.arange(dec0 - 3*stepY, dec1 + 3.5*stepY, 1, dtype=np.float)
for ra in self.raRange:
self.plotLine(ra, dec_deg, '-', color = colour, linewidth = lineWidth)
mp.axis([x1, x2, y1, y2])
def labelAxes(self, numLines=(5,5)):
"""Put labels on axes
Note: I should do better than this by picking round numbers
as the places to put the labels.
Note: If I ever do rotated projections, this simple approach
will fail.
"""
x1, x2, y1, y2 = mp.axis()
ra1, dec0 = self.pixToSky(x1, y1)
raRange, decRange = self.getRaDecRanges(numLines)
ax = mp.gca()
x_ticks = self.skyToPix(raRange, dec0)[0]
y_ticks = self.skyToPix(ra1, decRange)[1]
ax.xaxis.set_ticks(x_ticks)
ax.xaxis.set_ticklabels([str(int(i)) for i in raRange])
mp.xlabel("Right Ascension (deg)")
ax.yaxis.set_ticks(y_ticks)
ax.yaxis.set_ticklabels([str(int(i)) for i in decRange])
mp.ylabel("Declination (deg)")
def getRaDecRanges(self, numLines):
"""Pick suitable values for ra and dec ticks
Used by plotGrid and labelAxes
"""
x1, x2, y1, y2 = mp.axis()
ra0, dec0 = self.pixToSky(x1, y1)
ra1, dec1 = self.pixToSky(x2, y2)
#Deal with the case where ra range straddles 0.
#Different code for case where ra increases left to right, or decreases.
if self.isPositiveMap():
if ra1 < ra0:
ra1 += 360
else:
if ra0 < ra1:
ra0 += 360
raMid = .5*(ra0+ra1)
decMid = .5*(dec0+dec1)
xNum, yNum = numLines
stepX = round((ra1 - ra0) / xNum)
stepY = round((dec1 - dec0) / yNum)
rangeX = stepX * (xNum - 1)
rangeY = stepY * (yNum - 1)
raStart = np.round(raMid - rangeX/2.)
decStart = np.round(decMid - rangeY/2.)
raRange = np.arange(raStart, raStart + stepX*xNum, stepX)
decRange = np.arange(decStart, decStart + stepY*yNum, stepY)
raRange = np.fmod(raRange, 360.)
return raRange, decRange
def plotLine(self, ra_deg, dec_deg, *args, **kwargs):
ra_deg, dec_deg = self.parseInputs(ra_deg, dec_deg)
x,y = self.skyToPix(ra_deg, dec_deg, catchInvalid=False)
diffX = np.abs(np.diff(x))
idx1 = diffX > 3*np.mean(diffX)
idx1[idx1 + 1] = True
diffY = np.abs(np.diff(y))
idx2 = diffY > 3*np.mean(diffY)
j = 0
i0 = 0
if len(idx2) > 0:
idx2[-1] = True
idx = np.where(np.logical_or(idx1, idx2))[0]
for j in range(len(idx)):
i1 = idx[j]
self._plot(x[i0:i1], y[i0:i1], *args, **kwargs)
i0 = i1+1
def _plot(self, x, y, *args, **kwargs):
mp.plot(x,y, *args, **kwargs)
|
class Projection():
'''Base Projection class. Used for mapping ra and dec into
Euclidean space based on a given projection.
The primary reference for projections is Calabretta and Greisen
(2002), A&A 395, 1077
The base class implements the Plate Carree projection (\S 5.2.3)
which just maps ra dec to xy -- i.e what you would blindly do
if you knew no different. If distortion is not a concern
this is an acceptable approach
'''
def __init__(self):
pass
def skyToPix(self, ra_deg, dec_deg):
pass
def pixToSky(self, x, y):
pass
def eulerRotate(self, ra_deg, dec_deg):
pass
def parseInputs(self, ra_deg, dec_deg):
pass
def isPositiveMap(self):
'''Returns true if increasing ra increases pix in skyToPix()
'''
pass
def plot(self, ra_deg, dec_deg, *args, **kwargs):
pass
def scatter(self, ra_deg, dec_deg, *args, **kwargs):
pass
def text(self, ra_deg, dec_deg, s, *args, **kwargs):
pass
def plotGrid(self, numLines=(5,5), lineWidth=1, colour="#777777"):
'''Plot NUMLINES[0] vertical gridlines and NUMLINES[1] horizontal gridlines,
while keeping the initial axes bounds that were present upon its calling.
Will not work for certain cases.
'''
pass
def labelAxes(self, numLines=(5,5)):
'''Put labels on axes
Note: I should do better than this by picking round numbers
as the places to put the labels.
Note: If I ever do rotated projections, this simple approach
will fail.
'''
pass
def getRaDecRanges(self, numLines):
'''Pick suitable values for ra and dec ticks
Used by plotGrid and labelAxes
'''
pass
def plotLine(self, ra_deg, dec_deg, *args, **kwargs):
pass
def _plot(self, x, y, *args, **kwargs):
pass
| 15 | 5 | 16 | 3 | 10 | 2 | 2 | 0.29 | 0 | 6 | 0 | 5 | 14 | 4 | 14 | 14 | 256 | 69 | 147 | 78 | 132 | 42 | 144 | 78 | 129 | 8 | 0 | 2 | 34 |
142,664 |
KeplerGO/K2fov
|
KeplerGO_K2fov/K2fov/tests/test_fov.py
|
K2fov.tests.test_fov.TestFov
|
class TestFov(unittest.TestCase):
def testOrigin(self):
"""Test orientation of FOV in null position"""
f = fov.KeplerFov(0,0,0)
#Ch 43 is 13.3, near centre
a,d = f.getRaDecForChannelColRow(43, 1000, 1000)
self.assertTrue( np.fabs(a) < .2)
self.assertTrue( np.fabs(d) < .2)
#Check that mod 3 is north of mod 8
a3, d3 = f.getRaDecForChannelColRow(5,0,0)
a8, d8 = f.getRaDecForChannelColRow(21,0,0)
self.assertTrue( d3>d8)
#Check that mod 11's ra is greater than mod 13
a13, d13 = f.getRaDecForChannelColRow(43,0,0)
a11, d11 = f.getRaDecForChannelColRow(33,0,0)
self.assertTrue( a11 > a13)
#Check that mod 20's ra is less than mod 13
a13, d13 = f.getRaDecForChannelColRow(43,0,0)
a20, d20 = f.getRaDecForChannelColRow(69,0,0)
self.assertTrue(a20 > 350)
self.assertTrue( a20-360 < a13)
def testRotateClockwise(self):
"""Test orientation of FOV in null position"""
f = fov.KeplerFov(0,0,90)
#Ch 43 is 13.3, near centre
a,d = f.getRaDecForChannelColRow(43, 1000, 1000)
self.assertTrue( np.fabs(a-360) < .2)
self.assertTrue( np.fabs(d) < .2)
#Check that mod 3's ra less than mod 8
a3, d3 = f.getRaDecForChannelColRow(5,0,0)
a8, d8 = f.getRaDecForChannelColRow(21,0,0)
self.assertTrue(a3 > 350)
self.assertTrue(a3 < a8)
#Check that mod 11 is north of mod 13
a13, d13 = f.getRaDecForChannelColRow(43,0,0)
a11, d11 = f.getRaDecForChannelColRow(33,0,0)
self.assertTrue( d11 > d13)
#Check that mod 20's dec is less than mod 13
a13, d13 = f.getRaDecForChannelColRow(43,0,0)
a20, d20 = f.getRaDecForChannelColRow(69,0,0)
self.assertTrue( d20 < d13)
def testQuarter1(self):
"""Test that my code gets the mod-out right for a set of
stars in Q1.
This ensures I have the order of the modouts correct and I didn't
get anything flipped."""
#Recreate Kepler pointing in Q1. The source of the 33 degrees
#for the rotation to get the FOV correct is lost in the mists
#of time. The 90 just rotates the FOV to the correct angle.
#For reference, the first light image has the same orientation
#as Q1.
a0, d0 = 290.66666667, 44.5
kf = fov.KeplerFov(a0, d0, 33+90)
#Find path to the test file
path = os.path.dirname(os.path.abspath(__file__))
data = np.loadtxt(os.path.join(path, "data", "Q1_obj_position.txt"))
for row in data:
#import pdb; pdb.set_trace()
ra, dec = row[1:3]
expectedCh = int(row[5])
msg = "Uh Oh"
calcCh = int(kf.pickAChannel(ra, dec))
self.assertEqual(expectedCh, calcCh, msg)
def testGitHubBug1(self):
#Approx coords of field 1
a0, d0, rho0 = 174., 1.422, 260.6
kf = fov.KeplerFov(a0, d0, rho0)
#I'm just checking that the code returns a legal value
#I don't care about the input.
ch, col, row = kf.getChannelColRow(+181.946541, 2.740250)
self.assertRaises(ValueError, kf.getChannelColRow, +1.946541, 2.740250)
self.assertRaises(ValueError, kf.getChannelColRow, -1.946541, 2.740250)
def testGitHubBug2(self):
"""Check that -ve ra and dec handled correctly"""
a0, d0, rho0 = 0., 0, 0
kf = fov.KeplerFov(a0, d0, rho0)
#I'm just checking that the code returns a legal value
#I don't care about the input.
ch, col, row = kf.getChannelColRow(+1, 0)
self.assertEqual(ch, 43)
ch, col, row = kf.getChannelColRow(-1, 0)
self.assertEqual(ch, 42)
|
class TestFov(unittest.TestCase):
def testOrigin(self):
'''Test orientation of FOV in null position'''
pass
def testRotateClockwise(self):
'''Test orientation of FOV in null position'''
pass
def testQuarter1(self):
'''Test that my code gets the mod-out right for a set of
stars in Q1.
This ensures I have the order of the modouts correct and I didn't
get anything flipped.'''
pass
def testGitHubBug1(self):
pass
def testGitHubBug2(self):
'''Check that -ve ra and dec handled correctly'''
pass
| 6 | 4 | 20 | 3 | 11 | 5 | 1 | 0.49 | 1 | 3 | 1 | 0 | 5 | 0 | 5 | 77 | 108 | 26 | 55 | 35 | 49 | 27 | 55 | 35 | 49 | 2 | 2 | 1 | 6 |
142,665 |
KeplerGO/K2fov
|
KeplerGO_K2fov/K2fov/tests/test_projection.py
|
K2fov.tests.test_projection.TestGnomic
|
class TestGnomic(unittest.TestCase):
def testSkyToPixAtOrigin(self):
"""Test that increasing ra causes a decreasing pixel position,
but that an increasing dec causes an increasing pixel position"""
arcsec = 1/3600.
p = proj.Gnomic(0,0)
x, y = p.skyToPix(0,0)
msg = "Expected 0,0, got %.3f %.3f" %(x, y)
self.assertAlmostEqual(0, x[0], 10, msg)
self.assertAlmostEqual(0, y[0], 10, msg)
x, y = p.skyToPix(arcsec,0)
msg = "Expected -,0, got %.6f %.6f" %(x, y)
self.assertTrue(x[0]<0, msg)
self.assertAlmostEqual(0, y[0], 10, msg)
x, y = p.skyToPix(-arcsec,0)
msg = "Expected +,0, got %.6f %.6f" %(x, y)
self.assertTrue(x[0]>0, msg)
self.assertAlmostEqual(0, y[0], 10, msg)
x, y = p.skyToPix(0, arcsec)
msg = "Expected 0,+, got %.6f %.6f" %(x, y)
self.assertAlmostEqual(0, x[0], 10, msg)
self.assertTrue(y[0]>0, msg)
x, y = p.skyToPix(0, -arcsec)
msg = "Expected 0,-, got %.6f %.6f" %(x, y)
self.assertAlmostEqual(0, x[0], 10, msg)
self.assertTrue(y[0]<0, msg)
x, y = p.skyToPix(arcsec, arcsec)
msg = "Expected -,+, got %.6f %.6f" %(x, y)
self.assertTrue(x[0]<0, msg)
self.assertTrue(y[0]>0, msg)
x, y = p.skyToPix(arcsec, -arcsec)
msg = "Expected -,-, got %.3e %.3e" %(x, y)
self.assertTrue(x[0]<0, msg)
self.assertTrue(y[0]<0, msg)
def testSkyToPixAtPosition(self):
arcsec = 1/3600.
a = 36.
d = 14.
p = proj.Gnomic(a, d)
x, y = p.skyToPix(a,d)
msg = "Expected 0,0, got %.3f %.3f" %(x, y)
self.assertAlmostEqual(0, x[0], 10, msg)
self.assertAlmostEqual(0, y[0], 10, msg)
x, y = p.skyToPix(a+arcsec,d)
msg = "Expected +,0, got %.3f %.3f" %(x, y)
self.assertTrue(x[0]<0, msg)
self.assertAlmostEqual(0, y[0], 10, msg)
x, y = p.skyToPix(a+arcsec,d+arcsec)
msg = "Expected +,0, got %.3f %.3f" %(x, y)
self.assertTrue(x[0]<0, msg)
self.assertTrue(y[0]>0, msg)
def testPixToSkyAtOrigin(self):
arcsec = 1/3600.
p = proj.Gnomic(0,0)
a, d = p.pixToSky(0,0)
msg = "Expected 0,0, got %.3f %.3f" %(a, d)
self.assertAlmostEqual(0, a[0], 10, msg)
self.assertAlmostEqual(0, d[0], 10, msg)
a, d = p.pixToSky(arcsec,0)
msg = "Expected +,0, got %.6f %.6f" %(a, d)
self.assertTrue(a[0]>359, msg)
self.assertAlmostEqual(0, d[0], 10, msg)
a, d = p.pixToSky(-arcsec,0)
msg = "Expected -,0, got %.6f %.6f" %(a, d)
self.assertTrue(a[0]<1, msg)
self.assertAlmostEqual(0, d[0], 10, msg)
a, d = p.pixToSky(0, arcsec)
msg = "Expected 0,+, got %.6f %.6f" %(a, d)
self.assertAlmostEqual(0, a[0], 10, msg)
self.assertTrue(d[0]>0, msg)
a, d = p.pixToSky(0, -arcsec)
msg = "Expected 0,-, got %.6f %.6f" %(a, d)
self.assertAlmostEqual(0, a[0], 10, msg)
self.assertTrue(d[0]<360, msg)
def testPixToSkyAtPosition(self):
a0 = 36
d0 = 14
tol = 6
arcsec = 1/3600.
p = proj.Gnomic(a0, d0)
a, d = p.pixToSky(0,0)
msg = "Expected 0,0, got %.3f %.3f" %(a, d)
self.assertAlmostEqual(a[0], a0, 10, msg)
self.assertAlmostEqual(d[0], d0, 10, msg)
a, d = p.pixToSky(arcsec,0)
msg = "Expected -,0, got %.9f %.9f" %(a, d)
self.assertTrue(a[0]<a0, msg)
a, d = p.pixToSky(-arcsec,0)
msg = "Expected +,0, got %.9f %.9f" %(a, d)
self.assertTrue(a[0]>a0, msg)
a, d = p.pixToSky(0, arcsec)
msg = "Expected 0,+, got %.9f %.9f" %(a, d)
self.assertAlmostEqual(a[0], a0, tol, msg)
self.assertTrue(d[0]>d0, msg)
a, d = p.pixToSky(0, -arcsec)
msg = "Expected 0,-, got %.9f %.9f" %(a, d)
self.assertAlmostEqual(a[0], a0, tol, msg)
self.assertTrue(d[0]<d0, msg)
def testUnprojectable(self):
"""Test that unprojectable points are caught"""
pg = proj.Gnomic(0,0)
#Not interested in the value, only that the answer is returned
for i in range(-89, 90):
pg.skyToPix(i,0)
#I couldn't get self.assertRaises to work for me.
for i in range(-180, -90):
try:
pg.skyToPix(i,0)
except ValueError:
pass
else:
self.assertTrue(False, "skyToPix didn't throw an exception when it should")
for i in range(91, 180):
try:
pg.skyToPix(i,0)
except ValueError:
pass
else:
self.assertTrue(False, "skyToPix didn't throw an exception when it should")
|
class TestGnomic(unittest.TestCase):
def testSkyToPixAtOrigin(self):
'''Test that increasing ra causes a decreasing pixel position,
but that an increasing dec causes an increasing pixel position'''
pass
def testSkyToPixAtPosition(self):
pass
def testPixToSkyAtOrigin(self):
pass
def testPixToSkyAtPosition(self):
pass
def testUnprojectable(self):
'''Test that unprojectable points are caught'''
pass
| 6 | 2 | 29 | 5 | 23 | 1 | 2 | 0.04 | 1 | 3 | 1 | 0 | 5 | 0 | 5 | 77 | 153 | 34 | 114 | 29 | 108 | 5 | 114 | 29 | 108 | 6 | 2 | 2 | 10 |
142,666 |
KeplerGO/K2fov
|
KeplerGO_K2fov/K2fov/tests/test_rotate.py
|
K2fov.tests.test_rotate.TestRotationAboutAxis
|
class TestRotationAboutAxis(unittest.TestCase):
"""Extensive testing of rotateInRa and rotateInDec
(and by extension rotateAroundVector)
"""
def testRotateInRa1(self):
a = np.array([1,0,0])
b = r.rotateInRa(a, 90)
expect = [0,1,0]
msg = "Expected %s, Calculated %s" %(expect, b)
for i in range(3):
self.assertAlmostEqual(expect[i], b[i], 10)
def testRotateInRa2(self):
a = np.array([0,1,0])
b = r.rotateInRa(a, 90)
expect = [-1,0,0]
msg = "Expected %s, Calculated %s" %(expect, b)
for i in range(3):
self.assertAlmostEqual(expect[i], b[i], 10)
def testRotateInRa3(self):
a = np.array([0,0,1])
b = r.rotateInRa(a, 90)
expect = [0,0,1]
msg = "Expected %s, Calculated %s" %(expect, b)
for i in range(3):
self.assertAlmostEqual(expect[i], b[i], 10, msg)
def testRotateInRa4(self):
alpha = 285 * np.pi/180.
a = np.array([np.cos(alpha), np.sin(alpha),0])
b = r.rotateInRa(a, -285)
expect = [1,0,0]
msg = "Expected %s, Calculated %s" %(expect, b)
for i in range(3):
self.assertAlmostEqual(expect[i], b[i], 10, msg)
def testRotateInRa4b(self):
a = r.vecFromRaDec(285, 0)
b = r.rotateInRa(a, -285)
expect = [1,0,0]
msg = "Expected %s, Calculated %s" %(expect, b)
for i in range(3):
self.assertAlmostEqual(expect[i], b[i], 10, msg)
def testRotateInRa5(self):
a = r.vecFromRaDec(0, 44)
b = r.rotateInRa(a, 10)
new = r.raDecFromVec(b)
expect = [10.,44.]
msg = "Expected %s, Calculated %s" %(expect, new)
for i in range(2):
self.assertAlmostEqual(expect[i], new[i], 10, msg)
def testRotateInDec1(self):
a = np.array([1,0,0])
b = r.rotateInDeclination(a, 90)
expect = [0,0,+1]
msg = "Expected %s, Calculated %s" %(expect, b)
for i in range(3):
self.assertAlmostEqual(expect[i], b[i], 10, msg)
def testRotateInDec2(self):
a = np.array([0,1,0])
b = r.rotateInDeclination(a, 90)
expect = [0,1,0]
msg = "Expected %s, Calculated %s" %(expect, b)
for i in range(3):
self.assertAlmostEqual(expect[i], b[i], 10, msg)
def testRotateInDec3(self):
a = np.array([0,0,1])
b = r.rotateInDeclination(a, -90)
expect = [1,0,0]
msg = "Expected %s, Calculated %s" %(expect, b)
for i in range(3):
self.assertAlmostEqual(expect[i], b[i], 10, msg)
def testRotateInDec4(self):
a = r.vecFromRaDec(0, 44)
b = r.rotateInDeclination(a, -44)
expect = [1,0,0]
msg = "Expected %s, Calculated %s" %(expect, b)
for i in range(3):
self.assertAlmostEqual(expect[i], b[i], 10, msg)
def testRotateInRaDec(self):
a = r.vecFromRaDec(285, 44)
b = r.rotateInRa(a, -285)
c = r.rotateInDeclination(b, -44)
expect = [1,0,0]
msg = "Expected %s, Calculated %s" %(expect, c)
for i in range(3):
self.assertAlmostEqual(expect[i], c[i], 10, msg)
|
class TestRotationAboutAxis(unittest.TestCase):
'''Extensive testing of rotateInRa and rotateInDec
(and by extension rotateAroundVector)
'''
def testRotateInRa1(self):
pass
def testRotateInRa2(self):
pass
def testRotateInRa3(self):
pass
def testRotateInRa4(self):
pass
def testRotateInRa4b(self):
pass
def testRotateInRa5(self):
pass
def testRotateInDec1(self):
pass
def testRotateInDec2(self):
pass
def testRotateInDec3(self):
pass
def testRotateInDec4(self):
pass
def testRotateInRaDec(self):
pass
| 12 | 1 | 8 | 1 | 7 | 0 | 2 | 0.04 | 1 | 1 | 0 | 0 | 11 | 0 | 11 | 83 | 111 | 27 | 81 | 70 | 69 | 3 | 81 | 70 | 69 | 2 | 2 | 1 | 22 |
142,667 |
KeplerGO/K2fov
|
KeplerGO_K2fov/K2fov/projection.py
|
K2fov.projection.Cylindrical2
|
class Cylindrical2(Projection):
"""Stunted cyclindical projection that hacks at changing ra0
but insists in dec0 being fixed.
Wikipedia calls this the Lambert cylindrical equal area projection
http://en.wikipedia.org/wiki/Lambert_cylindrical_equal-area_projection
"""
def __init__(self, ra0_deg):
self.ra0_deg = ra0_deg
self.dec0_deg = 0
def skyToPix(self, ra_deg, dec_deg, **kwargs):
#Cast as nd array
ra_deg = np.atleast_1d(ra_deg)
ra_deg -= self.ra0_deg
#Wrap around if necessary
if np.any(ra_deg < 0):
ra_deg[ ra_deg<0] += 360
x = np.radians(-ra_deg)
y = np.sin( np.radians(dec_deg))
return x, y
|
class Cylindrical2(Projection):
'''Stunted cyclindical projection that hacks at changing ra0
but insists in dec0 being fixed.
Wikipedia calls this the Lambert cylindrical equal area projection
http://en.wikipedia.org/wiki/Lambert_cylindrical_equal-area_projection
'''
def __init__(self, ra0_deg):
pass
def skyToPix(self, ra_deg, dec_deg, **kwargs):
pass
| 3 | 1 | 8 | 2 | 6 | 1 | 2 | 0.58 | 1 | 0 | 0 | 0 | 2 | 2 | 2 | 16 | 24 | 5 | 12 | 7 | 9 | 7 | 12 | 7 | 9 | 2 | 1 | 1 | 3 |
142,668 |
KeplerGO/K2fov
|
KeplerGO_K2fov/K2fov/tests/test_rotate.py
|
K2fov.tests.test_rotate.TestVecFromRaDec
|
class TestVecFromRaDec(unittest.TestCase):
def testRa1(self):
raList = [0, 90, 180, 270, 360]
eList = [ [1,0,0],
[0,1,0],
[-1,0,0],
[0,-1,0],
[1,0,0]
]
for ra, expect in zip(raList, eList):
calc = r.vecFromRaDec(ra,0)
msg = "Expected %s, Calculated %s" %(expect, calc)
for i in range(3):
self.assertAlmostEqual(expect[i], calc[i], 10, msg)
def testRa2(self):
calc = r.vecFromRaDec(45,0)
v = np.pi/4.
expect = [np.cos(v),np.sin(v),0]
msg = "Expected %s, Calculated %s" %(expect, calc)
for i in range(3):
self.assertAlmostEqual(expect[i], calc[i], 10, msg)
def testDec1(self):
v = np.sqrt(3)/2.
decList = [-90, -30, 0, 30, 90]
eList = [ [0,0,-1],
[v, 0, -.5] ,
[1,0,0],
[v, 0, +.5],
[0,0,1],
]
for dec, expect in zip(decList, eList):
calc = r.vecFromRaDec(0, dec)
msg = "Expected %s, Calculated %s" %(expect, calc)
for i in range(3):
self.assertAlmostEqual(expect[i], calc[i], 10, msg)
def testRaDec1(self):
calc = r.vecFromRaDec(45, 60)
a = 1/(2*np.sqrt(2))
b = np.sqrt(3)/2.
expect = [a, a, b]
msg = "Expected %s, Calculated %s" %(expect, calc)
for i in range(3):
self.assertAlmostEqual(expect[i], calc[i], 10, msg)
def testRaDec2(self):
calc = r.vecFromRaDec(135, 60)
a = 1/(2*np.sqrt(2))
b = np.sqrt(3)/2.
expect = [-a, a, b]
msg = "Expected %s, Calculated %s" %(expect, calc)
for i in range(3):
self.assertAlmostEqual(expect[i], calc[i], 10, msg)
def testKeplerBoresight(self):
calc = r.vecFromRaDec(290.666667, 44.5)
expect = [.2517, -.6674, .7009]
msg = "Expected %s, Calculated %s" %(expect, calc)
for i in range(3):
self.assertAlmostEqual(expect[i], calc[i], 4, msg)
|
class TestVecFromRaDec(unittest.TestCase):
def testRa1(self):
pass
def testRa2(self):
pass
def testDec1(self):
pass
def testRaDec1(self):
pass
def testRaDec2(self):
pass
def testKeplerBoresight(self):
pass
| 7 | 0 | 10 | 1 | 9 | 0 | 2 | 0 | 1 | 2 | 0 | 0 | 6 | 0 | 6 | 78 | 70 | 13 | 57 | 41 | 50 | 0 | 47 | 41 | 40 | 3 | 2 | 2 | 14 |
142,669 |
Ketouem/flask-boto3
|
Ketouem_flask-boto3/example.py
|
example.Config
|
class Config:
DEBUG = True
BOTO3_SERVICES = ['S3', 's3']
|
class Config:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0 | 3 | 3 | 2 | 0 | 3 | 3 | 2 | 0 | 0 | 0 | 0 |
142,670 |
Ketouem/flask-boto3
|
Ketouem_flask-boto3/tests/test_flask_boto3.py
|
test_flask_boto3.TestFlaskBoto3Clients
|
class TestFlaskBoto3Clients(TestCase):
def setUp(self):
self.app = Flask('unit_tests')
self.app.config['BOTO3_REGION'] = 'eu-west-1'
create_aws_mock_config()
def test_001_populate_application_context(self, mock_client):
self.app.config['BOTO3_SERVICES'] = ['codebuild', 'codedeploy']
b = Boto3(self.app)
with self.app.app_context():
assert isinstance(b.connections, dict)
assert len(b.connections) == 2
assert isinstance(stack.top.boto3_cns, dict)
assert len(stack.top.boto3_cns) == 2
def test_002_instantiate_connectors(self, mock_client):
self.app.config['BOTO3_SERVICES'] = ['codebuild', 'codedeploy']
b = Boto3(self.app)
with self.app.app_context():
b.connections
assert mock_client.call_count == 2
assert sorted([i[0][0] for i in mock_client.call_args_list]) == \
sorted(self.app.config['BOTO3_SERVICES'])
def test_003_pass_credentials_through_app_conf(self, mock_client):
self.app.config['BOTO3_SERVICES'] = ['codepipeline']
self.app.config['BOTO3_ACCESS_KEY'] = 'access'
self.app.config['BOTO3_SECRET_KEY'] = 'secret'
self.app.config['BOTO3_PROFILE'] = 'default'
b = Boto3(self.app)
with self.app.app_context():
b.connections
mock_client.assert_called_once_with(
'codepipeline',
aws_access_key_id='access',
aws_secret_access_key='secret',
region_name='eu-west-1'
)
def test_004_pass_optional_params_through_conf(self, mock_client):
self.app.config['BOTO3_SERVICES'] = ['codepipeline']
self.app.config['BOTO3_ACCESS_KEY'] = 'access'
self.app.config['BOTO3_SECRET_KEY'] = 'secret'
self.app.config['BOTO3_PROFILE'] = 'default'
self.app.config['BOTO3_OPTIONAL_PARAMS'] = {
'codepipeline': {
'args': ('eu-west-1'),
'kwargs': {
'fake_param': 'fake_value'
}
}
}
b = Boto3(self.app)
with self.app.app_context():
b.connections
mock_client.assert_called_once_with(
'codepipeline',
'eu-west-1',
aws_access_key_id='access',
aws_secret_access_key='secret',
fake_param='fake_value'
)
def test_005_check_boto_clients_are_available(self, mock_client):
self.app.config['BOTO3_SERVICES'] = ['codedeploy', 'codebuild']
b = Boto3(self.app)
with self.app.app_context():
clients = b.clients
assert len(clients) == len(self.app.config['BOTO3_SERVICES'])
def test_006_check_boto_resources_are_available(self, mock_client):
self.app.config['BOTO3_SERVICES'] = ['codedeploy', 'codebuild']
b = Boto3(self.app)
with self.app.app_context():
resources = b.resources
assert len(resources) == len(self.app.config['BOTO3_SERVICES'])
|
class TestFlaskBoto3Clients(TestCase):
def setUp(self):
pass
def test_001_populate_application_context(self, mock_client):
pass
def test_002_instantiate_connectors(self, mock_client):
pass
def test_003_pass_credentials_through_app_conf(self, mock_client):
pass
def test_004_pass_optional_params_through_conf(self, mock_client):
pass
def test_005_check_boto_clients_are_available(self, mock_client):
pass
def test_006_check_boto_resources_are_available(self, mock_client):
pass
| 8 | 0 | 10 | 0 | 10 | 0 | 1 | 0 | 1 | 2 | 1 | 0 | 7 | 1 | 7 | 79 | 77 | 7 | 70 | 17 | 62 | 0 | 51 | 17 | 43 | 1 | 2 | 1 | 7 |
142,671 |
Ketouem/flask-boto3
|
Ketouem_flask-boto3/flask_boto3/__init__.py
|
flask_boto3.Boto3
|
class Boto3(object):
"""Stores a bunch of boto3 conectors inside Flask's application context
for easier handling inside view functions.
All connectors are stored inside the dict `boto3_cns` where the keys are
the name of the services and the values their associated boto3 client.
"""
def __init__(self, app=None):
self.app = app
if self.app is not None:
self.init_app(app)
def init_app(self, app):
app.teardown_appcontext(self.teardown)
def connect(self):
"""Iterate through the application configuration and instantiate
the services.
"""
requested_services = set(
svc.lower() for svc in current_app.config.get('BOTO3_SERVICES', [])
)
region = current_app.config.get('BOTO3_REGION')
sess_params = {
'aws_access_key_id': current_app.config.get('BOTO3_ACCESS_KEY'),
'aws_secret_access_key': current_app.config.get('BOTO3_SECRET_KEY'),
'profile_name': current_app.config.get('BOTO3_PROFILE'),
'region_name': region
}
sess = boto3.session.Session(**sess_params)
try:
cns = {}
for svc in requested_services:
# Check for optional parameters
params = current_app.config.get(
'BOTO3_OPTIONAL_PARAMS', {}
).get(svc, {})
# Get session params and override them with kwargs
# `profile_name` cannot be passed to clients and resources
kwargs = sess_params.copy()
kwargs.update(params.get('kwargs', {}))
del kwargs['profile_name']
# Override the region if one is defined as an argument
args = params.get('args', [])
if len(args) >= 1:
del kwargs['region_name']
if not(isinstance(args, list) or isinstance(args, tuple)):
args = [args]
# Create resource or client
if svc in sess.get_available_resources():
cns.update({svc: sess.resource(svc, *args, **kwargs)})
else:
cns.update({svc: sess.client(svc, *args, **kwargs)})
except UnknownServiceError:
raise
return cns
def teardown(self, exception):
ctx = stack.top
if hasattr(ctx, 'boto3_cns'):
for c in ctx.boto3_cns:
con = ctx.boto3_cns[c]
if hasattr(con, 'close') and callable(con.close):
ctx.boto3_cns[c].close()
@property
def resources(self):
c = self.connections
return {k: v for k, v in c.items() if hasattr(c[k].meta, 'client')}
@property
def clients(self):
"""
Get all clients (with and without associated resources)
"""
clients = {}
for k, v in self.connections.items():
if hasattr(v.meta, 'client'): # has boto3 resource
clients[k] = v.meta.client
else: # no boto3 resource
clients[k] = v
return clients
@property
def connections(self):
ctx = stack.top
if ctx is not None:
if not hasattr(ctx, 'boto3_cns'):
ctx.boto3_cns = self.connect()
return ctx.boto3_cns
|
class Boto3(object):
'''Stores a bunch of boto3 conectors inside Flask's application context
for easier handling inside view functions.
All connectors are stored inside the dict `boto3_cns` where the keys are
the name of the services and the values their associated boto3 client.
'''
def __init__(self, app=None):
pass
def init_app(self, app):
pass
def connect(self):
'''Iterate through the application configuration and instantiate
the services.
'''
pass
def teardown(self, exception):
pass
@property
def resources(self):
pass
@property
def clients(self):
'''
Get all clients (with and without associated resources)
'''
pass
@property
def connections(self):
pass
| 11 | 3 | 11 | 1 | 9 | 2 | 3 | 0.27 | 1 | 3 | 0 | 0 | 7 | 1 | 7 | 7 | 97 | 14 | 67 | 27 | 56 | 18 | 53 | 24 | 45 | 6 | 1 | 3 | 20 |
142,672 |
Ketouem/flask-boto3
|
Ketouem_flask-boto3/tests/test_flask_boto3.py
|
test_flask_boto3.TestFlaskBoto3Resources
|
class TestFlaskBoto3Resources(TestCase):
def setUp(self):
self.app = Flask('unit_tests')
self.app.config['BOTO3_REGION'] = 'eu-west-1'
create_aws_mock_config()
def test_001_populate_application_context(self, mock_resource):
self.app.config['BOTO3_SERVICES'] = ['s3', 'sqs']
b = Boto3(self.app)
with self.app.app_context():
assert isinstance(b.connections, dict)
assert len(b.connections) == 2
assert isinstance(stack.top.boto3_cns, dict)
assert len(stack.top.boto3_cns) == 2
def test_002_instantiate_connectors(self, mock_resource):
self.app.config['BOTO3_SERVICES'] = ['s3', 'sqs', 'dynamodb']
b = Boto3(self.app)
with self.app.app_context():
b.connections
assert mock_resource.call_count == 3
assert sorted([i[0][0] for i in mock_resource.call_args_list]) == \
sorted(self.app.config['BOTO3_SERVICES'])
def test_003_pass_credentials_through_app_conf(self, mock_resource):
self.app.config['BOTO3_SERVICES'] = ['s3']
self.app.config['BOTO3_ACCESS_KEY'] = 'access'
self.app.config['BOTO3_SECRET_KEY'] = 'secret'
self.app.config['BOTO3_PROFILE'] = 'default'
b = Boto3(self.app)
with self.app.app_context():
b.connections
mock_resource.assert_called_once_with(
's3',
aws_access_key_id='access',
aws_secret_access_key='secret',
region_name='eu-west-1'
)
def test_004_pass_optional_params_through_conf(self, mock_resource):
self.app.config['BOTO3_SERVICES'] = ['dynamodb']
self.app.config['BOTO3_ACCESS_KEY'] = 'access'
self.app.config['BOTO3_SECRET_KEY'] = 'secret'
self.app.config['BOTO3_PROFILE'] = 'default'
self.app.config['BOTO3_OPTIONAL_PARAMS'] = {
'dynamodb': {
'args': ('eu-west-1'),
'kwargs': {
'fake_param': 'fake_value'
}
}
}
b = Boto3(self.app)
with self.app.app_context():
b.connections
mock_resource.assert_called_once_with(
'dynamodb',
'eu-west-1',
aws_access_key_id='access',
aws_secret_access_key='secret',
fake_param='fake_value'
)
def test_005_check_boto_clients_are_available(self, mock_resource):
self.app.config['BOTO3_SERVICES'] = ['s3', 'sqs']
b = Boto3(self.app)
with self.app.app_context():
clients = b.clients
assert len(clients) == len(self.app.config['BOTO3_SERVICES'])
print(clients)
def test_006_check_boto_resources_are_available(self, mock_resource):
self.app.config['BOTO3_SERVICES'] = ['s3', 'sqs']
b = Boto3(self.app)
with self.app.app_context():
resources = b.resources
assert len(resources) == len(self.app.config['BOTO3_SERVICES'])
print(resources)
|
class TestFlaskBoto3Resources(TestCase):
def setUp(self):
pass
def test_001_populate_application_context(self, mock_resource):
pass
def test_002_instantiate_connectors(self, mock_resource):
pass
def test_003_pass_credentials_through_app_conf(self, mock_resource):
pass
def test_004_pass_optional_params_through_conf(self, mock_resource):
pass
def test_005_check_boto_clients_are_available(self, mock_resource):
pass
def test_006_check_boto_resources_are_available(self, mock_resource):
pass
| 8 | 0 | 10 | 0 | 10 | 0 | 1 | 0 | 1 | 2 | 1 | 0 | 7 | 1 | 7 | 79 | 79 | 7 | 72 | 17 | 64 | 0 | 53 | 17 | 45 | 1 | 2 | 1 | 7 |
142,673 |
KieranWynn/pyquaternion
|
KieranWynn_pyquaternion/pyquaternion/quaternion.py
|
pyquaternion.quaternion.Quaternion
|
class Quaternion:
"""Class to represent a 4-dimensional complex number or quaternion.
Quaternion objects can be used generically as 4D numbers,
or as unit quaternions to represent rotations in 3D space.
Attributes:
q: Quaternion 4-vector represented as a Numpy array
"""
def __init__(self, *args, **kwargs):
"""Initialise a new Quaternion object.
See Object Initialisation docs for complete behaviour:
https://kieranwynn.github.io/pyquaternion/#object-initialisation
"""
s = len(args)
if s == 0:
# No positional arguments supplied
if kwargs:
# Keyword arguments provided
if ("scalar" in kwargs) or ("vector" in kwargs):
scalar = kwargs.get("scalar", 0.0)
if scalar is None:
scalar = 0.0
else:
scalar = float(scalar)
vector = kwargs.get("vector", [])
vector = self._validate_number_sequence(vector, 3)
self.q = np.hstack((scalar, vector))
elif ("real" in kwargs) or ("imaginary" in kwargs):
real = kwargs.get("real", 0.0)
if real is None:
real = 0.0
else:
real = float(real)
imaginary = kwargs.get("imaginary", [])
imaginary = self._validate_number_sequence(imaginary, 3)
self.q = np.hstack((real, imaginary))
elif ("axis" in kwargs) or ("radians" in kwargs) or ("degrees" in kwargs) or ("angle" in kwargs):
try:
axis = self._validate_number_sequence(kwargs["axis"], 3)
except KeyError:
raise ValueError(
"A valid rotation 'axis' parameter must be provided to describe a meaningful rotation."
)
angle = kwargs.get('radians') or self.to_radians(kwargs.get('degrees')) or kwargs.get('angle') or 0.0
self.q = Quaternion._from_axis_angle(axis, angle).q
elif "array" in kwargs:
self.q = self._validate_number_sequence(kwargs["array"], 4)
elif "matrix" in kwargs:
optional_args = {key: kwargs[key] for key in kwargs if key in ['rtol', 'atol']}
self.q = Quaternion._from_matrix(kwargs["matrix"], **optional_args).q
else:
keys = sorted(kwargs.keys())
elements = [kwargs[kw] for kw in keys]
if len(elements) == 1:
r = float(elements[0])
self.q = np.array([r, 0.0, 0.0, 0.0])
else:
self.q = self._validate_number_sequence(elements, 4)
else:
# Default initialisation
self.q = np.array([1.0, 0.0, 0.0, 0.0])
elif s == 1:
# Single positional argument supplied
if isinstance(args[0], Quaternion):
self.q = args[0].q
return
if args[0] is None:
raise TypeError("Object cannot be initialised from {}".format(type(args[0])))
try:
r = float(args[0])
self.q = np.array([r, 0.0, 0.0, 0.0])
return
except TypeError:
pass # If the single argument is not scalar, it should be a sequence
self.q = self._validate_number_sequence(args[0], 4)
return
else:
# More than one positional argument supplied
self.q = self._validate_number_sequence(args, 4)
def __hash__(self):
return hash(tuple(self.q))
def _validate_number_sequence(self, seq, n):
"""Validate a sequence to be of a certain length and ensure it's a numpy array of floats.
Raises:
ValueError: Invalid length or non-numeric value
"""
if seq is None:
return np.zeros(n)
if len(seq) == n:
try:
l = [float(e) for e in seq]
except ValueError:
raise ValueError("One or more elements in sequence <{!r}> cannot be interpreted as a real number".format(seq))
else:
return np.asarray(l)
elif len(seq) == 0:
return np.zeros(n)
else:
raise ValueError("Unexpected number of elements in sequence. Got: {}, Expected: {}.".format(len(seq), n))
# Initialise from matrix
@classmethod
def _from_matrix(cls, matrix, rtol=1e-05, atol=1e-08):
"""Initialise from matrix representation
Create a Quaternion by specifying the 3x3 rotation or 4x4 transformation matrix
(as a numpy array) from which the quaternion's rotation should be created.
"""
try:
shape = matrix.shape
except AttributeError:
raise TypeError("Invalid matrix type: Input must be a 3x3 or 4x4 numpy array or matrix")
if shape == (3, 3):
R = matrix
elif shape == (4, 4):
R = matrix[:-1][:,:-1] # Upper left 3x3 sub-matrix
else:
raise ValueError("Invalid matrix shape: Input must be a 3x3 or 4x4 numpy array or matrix")
# Check matrix properties
if not np.allclose(np.dot(R, R.conj().transpose()), np.eye(3), rtol=rtol, atol=atol):
raise ValueError("Matrix must be orthogonal, i.e. its transpose should be its inverse")
if not np.isclose(np.linalg.det(R), 1.0, rtol=rtol, atol=atol):
raise ValueError("Matrix must be special orthogonal i.e. its determinant must be +1.0")
def decomposition_method(matrix):
""" Method supposedly able to deal with non-orthogonal matrices - NON-FUNCTIONAL!
Based on this method: http://arc.aiaa.org/doi/abs/10.2514/2.4654
"""
x, y, z = 0, 1, 2 # indices
K = np.array([
[R[x, x]-R[y, y]-R[z, z], R[y, x]+R[x, y], R[z, x]+R[x, z], R[y, z]-R[z, y]],
[R[y, x]+R[x, y], R[y, y]-R[x, x]-R[z, z], R[z, y]+R[y, z], R[z, x]-R[x, z]],
[R[z, x]+R[x, z], R[z, y]+R[y, z], R[z, z]-R[x, x]-R[y, y], R[x, y]-R[y, x]],
[R[y, z]-R[z, y], R[z, x]-R[x, z], R[x, y]-R[y, x], R[x, x]+R[y, y]+R[z, z]]
])
K = K / 3.0
e_vals, e_vecs = np.linalg.eig(K)
print('Eigenvalues:', e_vals)
print('Eigenvectors:', e_vecs)
max_index = np.argmax(e_vals)
principal_component = e_vecs[max_index]
return principal_component
def trace_method(matrix):
"""
This code uses a modification of the algorithm described in:
https://d3cw3dd2w32x2b.cloudfront.net/wp-content/uploads/2015/01/matrix-to-quat.pdf
which is itself based on the method described here:
http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/
Altered to work with the column vector convention instead of row vectors
"""
m = matrix.conj().transpose() # This method assumes row-vector and postmultiplication of that vector
if m[2, 2] < 0:
if m[0, 0] > m[1, 1]:
t = 1 + m[0, 0] - m[1, 1] - m[2, 2]
q = [m[1, 2]-m[2, 1], t, m[0, 1]+m[1, 0], m[2, 0]+m[0, 2]]
else:
t = 1 - m[0, 0] + m[1, 1] - m[2, 2]
q = [m[2, 0]-m[0, 2], m[0, 1]+m[1, 0], t, m[1, 2]+m[2, 1]]
else:
if m[0, 0] < -m[1, 1]:
t = 1 - m[0, 0] - m[1, 1] + m[2, 2]
q = [m[0, 1]-m[1, 0], m[2, 0]+m[0, 2], m[1, 2]+m[2, 1], t]
else:
t = 1 + m[0, 0] + m[1, 1] + m[2, 2]
q = [t, m[1, 2]-m[2, 1], m[2, 0]-m[0, 2], m[0, 1]-m[1, 0]]
q = np.array(q).astype('float64')
q *= 0.5 / sqrt(t)
return q
return cls(array=trace_method(R))
# Initialise from axis-angle
@classmethod
def _from_axis_angle(cls, axis, angle):
"""Initialise from axis and angle representation
Create a Quaternion by specifying the 3-vector rotation axis and rotation
angle (in radians) from which the quaternion's rotation should be created.
Params:
axis: a valid numpy 3-vector
angle: a real valued angle in radians
"""
mag_sq = np.dot(axis, axis)
if mag_sq == 0.0:
raise ZeroDivisionError("Provided rotation axis has no length")
# Ensure axis is in unit vector form
if (abs(1.0 - mag_sq) > 1e-12):
axis = axis / sqrt(mag_sq)
theta = angle / 2.0
r = cos(theta)
i = axis * sin(theta)
return cls(r, i[0], i[1], i[2])
@classmethod
def random(cls):
"""Generate a random unit quaternion.
Uniformly distributed across the rotation space
As per: http://planning.cs.uiuc.edu/node198.html
"""
r1, r2, r3 = np.random.random(3)
q1 = sqrt(1.0 - r1) * (sin(2 * pi * r2))
q2 = sqrt(1.0 - r1) * (cos(2 * pi * r2))
q3 = sqrt(r1) * (sin(2 * pi * r3))
q4 = sqrt(r1) * (cos(2 * pi * r3))
return cls(q1, q2, q3, q4)
# Representation
def __str__(self):
"""An informal, nicely printable string representation of the Quaternion object.
"""
return "{:.3f} {:+.3f}i {:+.3f}j {:+.3f}k".format(self.q[0], self.q[1], self.q[2], self.q[3])
def __repr__(self):
"""The 'official' string representation of the Quaternion object.
This is a string representation of a valid Python expression that could be used
to recreate an object with the same value (given an appropriate environment)
"""
return "Quaternion({!r}, {!r}, {!r}, {!r})".format(self.q[0], self.q[1], self.q[2], self.q[3])
def __format__(self, formatstr):
"""Inserts a customisable, nicely printable string representation of the Quaternion object
The syntax for `format_spec` mirrors that of the built in format specifiers for floating point types.
Check out the official Python [format specification mini-language](https://docs.python.org/3.4/library/string.html#formatspec) for details.
"""
if formatstr.strip() == '': # Defualt behaviour mirrors self.__str__()
formatstr = '+.3f'
string = \
"{:" + formatstr +"} " + \
"{:" + formatstr +"}i " + \
"{:" + formatstr +"}j " + \
"{:" + formatstr +"}k"
return string.format(self.q[0], self.q[1], self.q[2], self.q[3])
# Type Conversion
def __int__(self):
"""Implements type conversion to int.
Truncates the Quaternion object by only considering the real
component and rounding to the next integer value towards zero.
Note: to round to the closest integer, use int(round(float(q)))
"""
return int(self.q[0])
def __float__(self):
"""Implements type conversion to float.
Truncates the Quaternion object by only considering the real
component.
"""
return float(self.q[0])
def __complex__(self):
"""Implements type conversion to complex.
Truncates the Quaternion object by only considering the real
component and the first imaginary component.
This is equivalent to a projection from the 4-dimensional hypersphere
to the 2-dimensional complex plane.
"""
return complex(self.q[0], self.q[1])
def __bool__(self):
return not (self == Quaternion(0.0))
def __nonzero__(self):
return not (self == Quaternion(0.0))
def __invert__(self):
return (self == Quaternion(0.0))
# Comparison
def __eq__(self, other):
"""Returns true if the following is true for each element:
`absolute(a - b) <= (atol + rtol * absolute(b))`
"""
if isinstance(other, Quaternion):
r_tol = 1.0e-13
a_tol = 1.0e-14
try:
isEqual = np.allclose(self.q, other.q, rtol=r_tol, atol=a_tol)
except AttributeError:
raise AttributeError("Error in internal quaternion representation means it cannot be compared like a numpy array.")
return isEqual
return self.__eq__(self.__class__(other))
# Negation
def __neg__(self):
return self.__class__(array= -self.q)
# Absolute value
def __abs__(self):
return self.norm
# Addition
def __add__(self, other):
if isinstance(other, Quaternion):
return self.__class__(array=self.q + other.q)
return self + self.__class__(other)
def __iadd__(self, other):
return self + other
def __radd__(self, other):
return self + other
# Subtraction
def __sub__(self, other):
return self + (-other)
def __isub__(self, other):
return self + (-other)
def __rsub__(self, other):
return -(self - other)
# Multiplication
def __mul__(self, other):
if isinstance(other, Quaternion):
return self.__class__(array=np.dot(self._q_matrix(), other.q))
return self * self.__class__(other)
def __imul__(self, other):
return self * other
def __rmul__(self, other):
return self.__class__(other) * self
def __matmul__(self, other):
if isinstance(other, Quaternion):
return self.q.__matmul__(other.q)
return self.__matmul__(self.__class__(other))
def __imatmul__(self, other):
return self.__matmul__(other)
def __rmatmul__(self, other):
return self.__class__(other).__matmul__(self)
# Division
def __div__(self, other):
if isinstance(other, Quaternion):
if other == self.__class__(0.0):
raise ZeroDivisionError("Quaternion divisor must be non-zero")
return self * other.inverse
return self.__div__(self.__class__(other))
def __idiv__(self, other):
return self.__div__(other)
def __rdiv__(self, other):
return self.__class__(other) * self.inverse
def __truediv__(self, other):
return self.__div__(other)
def __itruediv__(self, other):
return self.__idiv__(other)
def __rtruediv__(self, other):
return self.__rdiv__(other)
# Exponentiation
def __pow__(self, exponent):
# source: https://en.wikipedia.org/wiki/Quaternion#Exponential.2C_logarithm.2C_and_power
exponent = float(exponent) # Explicitly reject non-real exponents
norm = self.norm
if norm > 0.0:
try:
n, theta = self.polar_decomposition
except ZeroDivisionError:
# quaternion is a real number (no vector or imaginary part)
return Quaternion(scalar=self.scalar ** exponent)
return (self.norm ** exponent) * Quaternion(scalar=cos(exponent * theta), vector=(n * sin(exponent * theta)))
return Quaternion(self)
def __ipow__(self, other):
return self ** other
def __rpow__(self, other):
return other ** float(self)
# Quaternion Features
def _vector_conjugate(self):
return np.hstack((self.q[0], -self.q[1:4]))
def _sum_of_squares(self):
return np.dot(self.q, self.q)
@property
def conjugate(self):
"""Quaternion conjugate, encapsulated in a new instance.
For a unit quaternion, this is the same as the inverse.
Returns:
A new Quaternion object clone with its vector part negated
"""
return self.__class__(scalar=self.scalar, vector=-self.vector)
@property
def inverse(self):
"""Inverse of the quaternion object, encapsulated in a new instance.
For a unit quaternion, this is the inverse rotation, i.e. when combined with the original rotation, will result in the null rotation.
Returns:
A new Quaternion object representing the inverse of this object
"""
ss = self._sum_of_squares()
if ss > 0:
return self.__class__(array=(self._vector_conjugate() / ss))
else:
raise ZeroDivisionError("a zero quaternion (0 + 0i + 0j + 0k) cannot be inverted")
@property
def norm(self):
"""L2 norm of the quaternion 4-vector.
This should be 1.0 for a unit quaternion (versor)
Slow but accurate. If speed is a concern, consider using _fast_normalise() instead
Returns:
A scalar real number representing the square root of the sum of the squares of the elements of the quaternion.
"""
mag_squared = self._sum_of_squares()
return sqrt(mag_squared)
@property
def magnitude(self):
return self.norm
def _normalise(self):
"""Object is guaranteed to be a unit quaternion after calling this
operation UNLESS the object is equivalent to Quaternion(0)
"""
if not self.is_unit():
n = self.norm
if n > 0:
self.q = self.q / n
def _fast_normalise(self):
"""Normalise the object to a unit quaternion using a fast approximation method if appropriate.
Object is guaranteed to be a quaternion of approximately unit length
after calling this operation UNLESS the object is equivalent to Quaternion(0)
"""
if not self.is_unit():
mag_squared = np.dot(self.q, self.q)
if (mag_squared == 0):
return
if (abs(1.0 - mag_squared) < 2.107342e-08):
mag = ((1.0 + mag_squared) / 2.0) # More efficient. Pade approximation valid if error is small
else:
mag = sqrt(mag_squared) # Error is too big, take the performance hit to calculate the square root properly
self.q = self.q / mag
@property
def normalised(self):
"""Get a unit quaternion (versor) copy of this Quaternion object.
A unit quaternion has a `norm` of 1.0
Returns:
A new Quaternion object clone that is guaranteed to be a unit quaternion
"""
q = Quaternion(self)
q._normalise()
return q
@property
def polar_unit_vector(self):
vector_length = np.linalg.norm(self.vector)
if vector_length <= 0.0:
raise ZeroDivisionError('Quaternion is pure real and does not have a unique unit vector')
return self.vector / vector_length
@property
def polar_angle(self):
return acos(self.scalar / self.norm)
@property
def polar_decomposition(self):
"""
Returns the unit vector and angle of a non-scalar quaternion according to the following decomposition
q = q.norm() * (e ** (q.polar_unit_vector * q.polar_angle))
source: https://en.wikipedia.org/wiki/Polar_decomposition#Quaternion_polar_decomposition
"""
return self.polar_unit_vector, self.polar_angle
@property
def unit(self):
return self.normalised
def is_unit(self, tolerance=1e-14):
"""Determine whether the quaternion is of unit length to within a specified tolerance value.
Params:
tolerance: [optional] maximum absolute value by which the norm can differ from 1.0 for the object to be considered a unit quaternion. Defaults to `1e-14`.
Returns:
`True` if the Quaternion object is of unit length to within the specified tolerance value. `False` otherwise.
"""
return abs(1.0 - self._sum_of_squares()) < tolerance # if _sum_of_squares is 1, norm is 1. This saves a call to sqrt()
def _q_matrix(self):
"""Matrix representation of quaternion for multiplication purposes.
"""
return np.array([
[self.q[0], -self.q[1], -self.q[2], -self.q[3]],
[self.q[1], self.q[0], -self.q[3], self.q[2]],
[self.q[2], self.q[3], self.q[0], -self.q[1]],
[self.q[3], -self.q[2], self.q[1], self.q[0]]])
def _q_bar_matrix(self):
"""Matrix representation of quaternion for multiplication purposes.
"""
return np.array([
[self.q[0], -self.q[1], -self.q[2], -self.q[3]],
[self.q[1], self.q[0], self.q[3], -self.q[2]],
[self.q[2], -self.q[3], self.q[0], self.q[1]],
[self.q[3], self.q[2], -self.q[1], self.q[0]]])
def _rotate_quaternion(self, q):
"""Rotate a quaternion vector using the stored rotation.
Params:
q: The vector to be rotated, in quaternion form (0 + xi + yj + kz)
Returns:
A Quaternion object representing the rotated vector in quaternion from (0 + xi + yj + kz)
"""
self._normalise()
return self * q * self.conjugate
def rotate(self, vector):
"""Rotate a 3D vector by the rotation stored in the Quaternion object.
Params:
vector: A 3-vector specified as any ordered sequence of 3 real numbers corresponding to x, y, and z values.
Some types that are recognised are: numpy arrays, lists and tuples.
A 3-vector can also be represented by a Quaternion object who's scalar part is 0 and vector part is the required 3-vector.
Thus it is possible to call `Quaternion.rotate(q)` with another quaternion object as an input.
Returns:
The rotated vector returned as the same type it was specified at input.
Raises:
TypeError: if any of the vector elements cannot be converted to a real number.
ValueError: if `vector` cannot be interpreted as a 3-vector or a Quaternion object.
"""
if isinstance(vector, Quaternion):
return self._rotate_quaternion(vector)
q = Quaternion(vector=vector)
a = self._rotate_quaternion(q).vector
if isinstance(vector, list):
l = [x for x in a]
return l
elif isinstance(vector, tuple):
l = [x for x in a]
return tuple(l)
else:
return a
@classmethod
def exp(cls, q):
"""Quaternion Exponential.
Find the exponential of a quaternion amount.
Params:
q: the input quaternion/argument as a Quaternion object.
Returns:
A quaternion amount representing the exp(q). See [Source](https://math.stackexchange.com/questions/1030737/exponential-function-of-quaternion-derivation for more information and mathematical background).
Note:
The method can compute the exponential of any quaternion.
"""
tolerance = 1e-17
v_norm = np.linalg.norm(q.vector)
vec = q.vector
if v_norm > tolerance:
vec = vec / v_norm
magnitude = exp(q.scalar)
return Quaternion(scalar = magnitude * cos(v_norm), vector = magnitude * sin(v_norm) * vec)
@classmethod
def log(cls, q):
"""Quaternion Logarithm.
Find the logarithm of a quaternion amount.
Params:
q: the input quaternion/argument as a Quaternion object.
Returns:
A quaternion amount representing log(q) := (log(|q|), v/|v|acos(w/|q|)).
Note:
The method computes the logarithm of general quaternions. See [Source](https://math.stackexchange.com/questions/2552/the-logarithm-of-quaternion/2554#2554) for more details.
"""
v_norm = np.linalg.norm(q.vector)
q_norm = q.norm
tolerance = 1e-17
if q_norm < tolerance:
# 0 quaternion - undefined
return Quaternion(scalar=-float('inf'), vector=float('nan')*q.vector)
if v_norm < tolerance:
# real quaternions - no imaginary part
return Quaternion(scalar=log(q_norm), vector=[0, 0, 0])
vec = q.vector / v_norm
return Quaternion(scalar=log(q_norm), vector=acos(q.scalar/q_norm)*vec)
@classmethod
def exp_map(cls, q, eta):
"""Quaternion exponential map.
Find the exponential map on the Riemannian manifold described
by the quaternion space.
Params:
q: the base point of the exponential map, i.e. a Quaternion object
eta: the argument of the exponential map, a tangent vector, i.e. a Quaternion object
Returns:
A quaternion p such that p is the endpoint of the geodesic starting at q
in the direction of eta, having the length equal to the magnitude of eta.
Note:
The exponential map plays an important role in integrating orientation
variations (e.g. angular velocities). This is done by projecting
quaternion tangent vectors onto the quaternion manifold.
"""
return q * Quaternion.exp(eta)
@classmethod
def sym_exp_map(cls, q, eta):
"""Quaternion symmetrized exponential map.
Find the symmetrized exponential map on the quaternion Riemannian
manifold.
Params:
q: the base point as a Quaternion object
eta: the tangent vector argument of the exponential map
as a Quaternion object
Returns:
A quaternion p.
Note:
The symmetrized exponential formulation is akin to the exponential
formulation for symmetric positive definite tensors [Source](http://www.academia.edu/7656761/On_the_Averaging_of_Symmetric_Positive-Definite_Tensors)
"""
sqrt_q = q ** 0.5
return sqrt_q * Quaternion.exp(eta) * sqrt_q
@classmethod
def log_map(cls, q, p):
"""Quaternion logarithm map.
Find the logarithm map on the quaternion Riemannian manifold.
Params:
q: the base point at which the logarithm is computed, i.e.
a Quaternion object
p: the argument of the quaternion map, a Quaternion object
Returns:
A tangent vector having the length and direction given by the
geodesic joining q and p.
"""
return Quaternion.log(q.inverse * p)
@classmethod
def sym_log_map(cls, q, p):
"""Quaternion symmetrized logarithm map.
Find the symmetrized logarithm map on the quaternion Riemannian manifold.
Params:
q: the base point at which the logarithm is computed, i.e.
a Quaternion object
p: the argument of the quaternion map, a Quaternion object
Returns:
A tangent vector corresponding to the symmetrized geodesic curve formulation.
Note:
Information on the symmetrized formulations given in [Source](https://www.researchgate.net/publication/267191489_Riemannian_L_p_Averaging_on_Lie_Group_of_Nonzero_Quaternions).
"""
inv_sqrt_q = (q ** (-0.5))
return Quaternion.log(inv_sqrt_q * p * inv_sqrt_q)
@classmethod
def absolute_distance(cls, q0, q1):
"""Quaternion absolute distance.
Find the distance between two quaternions accounting for the sign ambiguity.
Params:
q0: the first quaternion
q1: the second quaternion
Returns:
A positive scalar corresponding to the chord of the shortest path/arc that
connects q0 to q1.
Note:
This function does not measure the distance on the hypersphere, but
it takes into account the fact that q and -q encode the same rotation.
It is thus a good indicator for rotation similarities.
"""
q0_minus_q1 = q0 - q1
q0_plus_q1 = q0 + q1
d_minus = q0_minus_q1.norm
d_plus = q0_plus_q1.norm
if d_minus < d_plus:
return d_minus
else:
return d_plus
@classmethod
def distance(cls, q0, q1):
"""Quaternion intrinsic distance.
Find the intrinsic geodesic distance between q0 and q1.
Params:
q0: the first quaternion
q1: the second quaternion
Returns:
A positive amount corresponding to the length of the geodesic arc
connecting q0 to q1.
Note:
Although the q0^(-1)*q1 != q1^(-1)*q0, the length of the path joining
them is given by the logarithm of those product quaternions, the norm
of which is the same.
"""
q = Quaternion.log_map(q0, q1)
return q.norm
@classmethod
def sym_distance(cls, q0, q1):
"""Quaternion symmetrized distance.
Find the intrinsic symmetrized geodesic distance between q0 and q1.
Params:
q0: the first quaternion
q1: the second quaternion
Returns:
A positive amount corresponding to the length of the symmetrized
geodesic curve connecting q0 to q1.
Note:
This formulation is more numerically stable when performing
iterative gradient descent on the Riemannian quaternion manifold.
However, the distance between q and -q is equal to pi, rendering this
formulation not useful for measuring rotation similarities when the
samples are spread over a "solid" angle of more than pi/2 radians
(the spread refers to quaternions as point samples on the unit hypersphere).
"""
q = Quaternion.sym_log_map(q0, q1)
return q.norm
@classmethod
def slerp(cls, q0, q1, amount=0.5):
"""Spherical Linear Interpolation between quaternions.
Implemented as described in https://en.wikipedia.org/wiki/Slerp
Find a valid quaternion rotation at a specified distance along the
minor arc of a great circle passing through any two existing quaternion
endpoints lying on the unit radius hypersphere.
This is a class method and is called as a method of the class itself rather than on a particular instance.
Params:
q0: first endpoint rotation as a Quaternion object
q1: second endpoint rotation as a Quaternion object
amount: interpolation parameter between 0 and 1. This describes the linear placement position of
the result along the arc between endpoints; 0 being at `q0` and 1 being at `q1`.
Defaults to the midpoint (0.5).
Returns:
A new Quaternion object representing the interpolated rotation. This is guaranteed to be a unit quaternion.
Note:
This feature only makes sense when interpolating between unit quaternions (those lying on the unit radius hypersphere).
Calling this method will implicitly normalise the endpoints to unit quaternions if they are not already unit length.
"""
# Ensure quaternion inputs are unit quaternions and 0 <= amount <=1
q0._fast_normalise()
q1._fast_normalise()
amount = np.clip(amount, 0, 1)
dot = np.dot(q0.q, q1.q)
# If the dot product is negative, slerp won't take the shorter path.
# Note that v1 and -v1 are equivalent when the negation is applied to all four components.
# Fix by reversing one quaternion
if dot < 0.0:
q0.q = -q0.q
dot = -dot
# sin_theta_0 can not be zero
if dot > 0.9995:
qr = Quaternion(q0.q + amount * (q1.q - q0.q))
qr._fast_normalise()
return qr
theta_0 = np.arccos(dot) # Since dot is in range [0, 0.9995], np.arccos() is safe
sin_theta_0 = np.sin(theta_0)
theta = theta_0 * amount
sin_theta = np.sin(theta)
s0 = np.cos(theta) - dot * sin_theta / sin_theta_0
s1 = sin_theta / sin_theta_0
qr = Quaternion((s0 * q0.q) + (s1 * q1.q))
qr._fast_normalise()
return qr
@classmethod
def intermediates(cls, q0, q1, n, include_endpoints=False):
"""Generator method to get an iterable sequence of `n` evenly spaced quaternion
rotations between any two existing quaternion endpoints lying on the unit
radius hypersphere.
This is a convenience function that is based on `Quaternion.slerp()` as defined above.
This is a class method and is called as a method of the class itself rather than on a particular instance.
Params:
q_start: initial endpoint rotation as a Quaternion object
q_end: final endpoint rotation as a Quaternion object
n: number of intermediate quaternion objects to include within the interval
include_endpoints: [optional] if set to `True`, the sequence of intermediates
will be 'bookended' by `q_start` and `q_end`, resulting in a sequence length of `n + 2`.
If set to `False`, endpoints are not included. Defaults to `False`.
Yields:
A generator object iterating over a sequence of intermediate quaternion objects.
Note:
This feature only makes sense when interpolating between unit quaternions (those lying on the unit radius hypersphere).
Calling this method will implicitly normalise the endpoints to unit quaternions if they are not already unit length.
"""
step_size = 1.0 / (n + 1)
if include_endpoints:
steps = [i * step_size for i in range(0, n + 2)]
else:
steps = [i * step_size for i in range(1, n + 1)]
for step in steps:
yield cls.slerp(q0, q1, step)
def derivative(self, rate):
"""Get the instantaneous quaternion derivative representing a quaternion rotating at a 3D rate vector `rate`
Params:
rate: numpy 3-array (or array-like) describing rotation rates about the global x, y and z axes respectively.
Returns:
A unit quaternion describing the rotation rate
"""
rate = self._validate_number_sequence(rate, 3)
return 0.5 * self * Quaternion(vector=rate)
def integrate(self, rate, timestep):
"""Advance a time varying quaternion to its value at a time `timestep` in the future.
The Quaternion object will be modified to its future value.
It is guaranteed to remain a unit quaternion.
Params:
rate: numpy 3-array (or array-like) describing rotation rates about the
global x, y and z axes respectively.
timestep: interval over which to integrate into the future.
Assuming *now* is `T=0`, the integration occurs over the interval
`T=0` to `T=timestep`. Smaller intervals are more accurate when
`rate` changes over time.
Note:
The solution is closed form given the assumption that `rate` is constant
over the interval of length `timestep`.
"""
self._fast_normalise()
rate = self._validate_number_sequence(rate, 3)
rotation_vector = rate * timestep
rotation_norm = np.linalg.norm(rotation_vector)
if rotation_norm > 0:
axis = rotation_vector / rotation_norm
angle = rotation_norm
q2 = Quaternion(axis=axis, angle=angle)
self.q = (self * q2).q
self._fast_normalise()
@property
def rotation_matrix(self):
"""Get the 3x3 rotation matrix equivalent of the quaternion rotation.
Returns:
A 3x3 orthogonal rotation matrix as a 3x3 Numpy array
Note:
This feature only makes sense when referring to a unit quaternion. Calling this method will implicitly normalise the Quaternion object to a unit quaternion if it is not already one.
"""
self._normalise()
product_matrix = np.dot(self._q_matrix(), self._q_bar_matrix().conj().transpose())
return product_matrix[1:][:, 1:]
@property
def transformation_matrix(self):
"""Get the 4x4 homogeneous transformation matrix equivalent of the quaternion rotation.
Returns:
A 4x4 homogeneous transformation matrix as a 4x4 Numpy array
Note:
This feature only makes sense when referring to a unit quaternion. Calling this method will implicitly normalise the Quaternion object to a unit quaternion if it is not already one.
"""
t = np.array([[0.0], [0.0], [0.0]])
Rt = np.hstack([self.rotation_matrix, t])
return np.vstack([Rt, np.array([0.0, 0.0, 0.0, 1.0])])
@property
def yaw_pitch_roll(self):
"""Get the equivalent yaw-pitch-roll angles aka. intrinsic Tait-Bryan angles following the z-y'-x'' convention
Returns:
yaw: rotation angle around the z-axis in radians, in the range `[-pi, pi]`
pitch: rotation angle around the y'-axis in radians, in the range `[-pi/2, -pi/2]`
roll: rotation angle around the x''-axis in radians, in the range `[-pi, pi]`
The resulting rotation_matrix would be R = R_x(roll) R_y(pitch) R_z(yaw)
Note:
This feature only makes sense when referring to a unit quaternion. Calling this method will implicitly normalise the Quaternion object to a unit quaternion if it is not already one.
"""
self._normalise()
yaw = np.arctan2(2 * (self.q[0] * self.q[3] - self.q[1] * self.q[2]),
1 - 2 * (self.q[2] ** 2 + self.q[3] ** 2))
pitch = np.arcsin(2 * (self.q[0] * self.q[2] + self.q[3] * self.q[1]))
roll = np.arctan2(2 * (self.q[0] * self.q[1] - self.q[2] * self.q[3]),
1 - 2 * (self.q[1] ** 2 + self.q[2] ** 2))
return yaw, pitch, roll
def _wrap_angle(self, theta):
"""Helper method: Wrap any angle to lie between -pi and pi
Odd multiples of pi are wrapped to +pi (as opposed to -pi)
"""
result = ((theta + pi) % (2 * pi)) - pi
if result == -pi:
result = pi
return result
def get_axis(self, undefined=np.zeros(3)):
"""Get the axis or vector about which the quaternion rotation occurs
For a null rotation (a purely real quaternion), the rotation angle will
always be `0`, but the rotation axis is undefined.
It is by default assumed to be `[0, 0, 0]`.
Params:
undefined: [optional] specify the axis vector that should define a null rotation.
This is geometrically meaningless, and could be any of an infinite set of vectors,
but can be specified if the default (`[0, 0, 0]`) causes undesired behaviour.
Returns:
A Numpy unit 3-vector describing the Quaternion object's axis of rotation.
Note:
This feature only makes sense when referring to a unit quaternion.
Calling this method will implicitly normalise the Quaternion object to a unit quaternion if it is not already one.
"""
tolerance = 1e-17
self._normalise()
norm = np.linalg.norm(self.vector)
if norm < tolerance:
# Here there are an infinite set of possible axes, use what has been specified as an undefined axis.
return undefined
else:
return self.vector / norm
@property
def axis(self):
return self.get_axis()
@property
def angle(self):
"""Get the angle (in radians) describing the magnitude of the quaternion rotation about its rotation axis.
This is guaranteed to be within the range (-pi:pi) with the direction of
rotation indicated by the sign.
When a particular rotation describes a 180 degree rotation about an arbitrary
axis vector `v`, the conversion to axis / angle representation may jump
discontinuously between all permutations of `(-pi, pi)` and `(-v, v)`,
each being geometrically equivalent (see Note in documentation).
Returns:
A real number in the range (-pi:pi) describing the angle of rotation
in radians about a Quaternion object's axis of rotation.
Note:
This feature only makes sense when referring to a unit quaternion.
Calling this method will implicitly normalise the Quaternion object to a unit quaternion if it is not already one.
"""
self._normalise()
norm = np.linalg.norm(self.vector)
return self._wrap_angle(2.0 * atan2(norm, self.scalar))
@property
def degrees(self):
return self.to_degrees(self.angle)
@property
def radians(self):
return self.angle
@property
def scalar(self):
""" Return the real or scalar component of the quaternion object.
Returns:
A real number i.e. float
"""
return self.q[0]
@property
def vector(self):
""" Return the imaginary or vector component of the quaternion object.
Returns:
A numpy 3-array of floats. NOT guaranteed to be a unit vector
"""
return self.q[1:4]
@property
def real(self):
return self.scalar
@property
def imaginary(self):
return self.vector
@property
def w(self):
return self.q[0]
@property
def x(self):
return self.q[1]
@property
def y(self):
return self.q[2]
@property
def z(self):
return self.q[3]
@property
def elements(self):
""" Return all the elements of the quaternion object.
Returns:
A numpy 4-array of floats. NOT guaranteed to be a unit vector
"""
return self.q
def __getitem__(self, index):
index = int(index)
return self.q[index]
def __setitem__(self, index, value):
index = int(index)
self.q[index] = float(value)
def __copy__(self):
result = self.__class__(self.q)
return result
def __deepcopy__(self, memo):
result = self.__class__(deepcopy(self.q, memo))
memo[id(self)] = result
return result
@staticmethod
def to_degrees(angle_rad):
if angle_rad is not None:
return float(angle_rad) / pi * 180.0
@staticmethod
def to_radians(angle_deg):
if angle_deg is not None:
return float(angle_deg) / 180.0 * pi
|
class Quaternion:
'''Class to represent a 4-dimensional complex number or quaternion.
Quaternion objects can be used generically as 4D numbers,
or as unit quaternions to represent rotations in 3D space.
Attributes:
q: Quaternion 4-vector represented as a Numpy array
'''
def __init__(self, *args, **kwargs):
'''Initialise a new Quaternion object.
See Object Initialisation docs for complete behaviour:
https://kieranwynn.github.io/pyquaternion/#object-initialisation
'''
pass
def __hash__(self):
pass
def _validate_number_sequence(self, seq, n):
'''Validate a sequence to be of a certain length and ensure it's a numpy array of floats.
Raises:
ValueError: Invalid length or non-numeric value
'''
pass
@classmethod
def _from_matrix(cls, matrix, rtol=1e-05, atol=1e-08):
'''Initialise from matrix representation
Create a Quaternion by specifying the 3x3 rotation or 4x4 transformation matrix
(as a numpy array) from which the quaternion's rotation should be created.
'''
pass
def decomposition_method(matrix):
''' Method supposedly able to deal with non-orthogonal matrices - NON-FUNCTIONAL!
Based on this method: http://arc.aiaa.org/doi/abs/10.2514/2.4654
'''
pass
def trace_method(matrix):
'''
This code uses a modification of the algorithm described in:
https://d3cw3dd2w32x2b.cloudfront.net/wp-content/uploads/2015/01/matrix-to-quat.pdf
which is itself based on the method described here:
http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/
Altered to work with the column vector convention instead of row vectors
'''
pass
@classmethod
def _from_axis_angle(cls, axis, angle):
'''Initialise from axis and angle representation
Create a Quaternion by specifying the 3-vector rotation axis and rotation
angle (in radians) from which the quaternion's rotation should be created.
Params:
axis: a valid numpy 3-vector
angle: a real valued angle in radians
'''
pass
@classmethod
def random(cls):
'''Generate a random unit quaternion.
Uniformly distributed across the rotation space
As per: http://planning.cs.uiuc.edu/node198.html
'''
pass
def __str__(self):
'''An informal, nicely printable string representation of the Quaternion object.
'''
pass
def __repr__(self):
'''The 'official' string representation of the Quaternion object.
This is a string representation of a valid Python expression that could be used
to recreate an object with the same value (given an appropriate environment)
'''
pass
def __format__(self, formatstr):
'''Inserts a customisable, nicely printable string representation of the Quaternion object
The syntax for `format_spec` mirrors that of the built in format specifiers for floating point types.
Check out the official Python [format specification mini-language](https://docs.python.org/3.4/library/string.html#formatspec) for details.
'''
pass
def __int__(self):
'''Implements type conversion to int.
Truncates the Quaternion object by only considering the real
component and rounding to the next integer value towards zero.
Note: to round to the closest integer, use int(round(float(q)))
'''
pass
def __float__(self):
'''Implements type conversion to float.
Truncates the Quaternion object by only considering the real
component.
'''
pass
def __complex__(self):
'''Implements type conversion to complex.
Truncates the Quaternion object by only considering the real
component and the first imaginary component.
This is equivalent to a projection from the 4-dimensional hypersphere
to the 2-dimensional complex plane.
'''
pass
def __bool__(self):
pass
def __nonzero__(self):
pass
def __invert__(self):
pass
def __eq__(self, other):
'''Returns true if the following is true for each element:
`absolute(a - b) <= (atol + rtol * absolute(b))`
'''
pass
def __neg__(self):
pass
def __abs__(self):
pass
def __add__(self, other):
pass
def __iadd__(self, other):
pass
def __radd__(self, other):
pass
def __sub__(self, other):
pass
def __isub__(self, other):
pass
def __rsub__(self, other):
pass
def __mul__(self, other):
pass
def __imul__(self, other):
pass
def __rmul__(self, other):
pass
def __matmul__(self, other):
pass
def __imatmul__(self, other):
pass
def __rmatmul__(self, other):
pass
def __div__(self, other):
pass
def __idiv__(self, other):
pass
def __rdiv__(self, other):
pass
def __truediv__(self, other):
pass
def __itruediv__(self, other):
pass
def __rtruediv__(self, other):
pass
def __pow__(self, exponent):
pass
def __ipow__(self, other):
pass
def __rpow__(self, other):
pass
def _vector_conjugate(self):
pass
def _sum_of_squares(self):
pass
@property
def conjugate(self):
'''Quaternion conjugate, encapsulated in a new instance.
For a unit quaternion, this is the same as the inverse.
Returns:
A new Quaternion object clone with its vector part negated
'''
pass
@property
def inverse(self):
'''Inverse of the quaternion object, encapsulated in a new instance.
For a unit quaternion, this is the inverse rotation, i.e. when combined with the original rotation, will result in the null rotation.
Returns:
A new Quaternion object representing the inverse of this object
'''
pass
@property
def norm(self):
'''L2 norm of the quaternion 4-vector.
This should be 1.0 for a unit quaternion (versor)
Slow but accurate. If speed is a concern, consider using _fast_normalise() instead
Returns:
A scalar real number representing the square root of the sum of the squares of the elements of the quaternion.
'''
pass
@property
def magnitude(self):
pass
def _normalise(self):
'''Object is guaranteed to be a unit quaternion after calling this
operation UNLESS the object is equivalent to Quaternion(0)
'''
pass
def _fast_normalise(self):
'''Normalise the object to a unit quaternion using a fast approximation method if appropriate.
Object is guaranteed to be a quaternion of approximately unit length
after calling this operation UNLESS the object is equivalent to Quaternion(0)
'''
pass
@property
def normalised(self):
'''Get a unit quaternion (versor) copy of this Quaternion object.
A unit quaternion has a `norm` of 1.0
Returns:
A new Quaternion object clone that is guaranteed to be a unit quaternion
'''
pass
@property
def polar_unit_vector(self):
pass
@property
def polar_angle(self):
pass
@property
def polar_decomposition(self):
'''
Returns the unit vector and angle of a non-scalar quaternion according to the following decomposition
q = q.norm() * (e ** (q.polar_unit_vector * q.polar_angle))
source: https://en.wikipedia.org/wiki/Polar_decomposition#Quaternion_polar_decomposition
'''
pass
@property
def unit(self):
pass
def is_unit(self, tolerance=1e-14):
'''Determine whether the quaternion is of unit length to within a specified tolerance value.
Params:
tolerance: [optional] maximum absolute value by which the norm can differ from 1.0 for the object to be considered a unit quaternion. Defaults to `1e-14`.
Returns:
`True` if the Quaternion object is of unit length to within the specified tolerance value. `False` otherwise.
'''
pass
def _q_matrix(self):
'''Matrix representation of quaternion for multiplication purposes.
'''
pass
def _q_bar_matrix(self):
'''Matrix representation of quaternion for multiplication purposes.
'''
pass
def _rotate_quaternion(self, q):
'''Rotate a quaternion vector using the stored rotation.
Params:
q: The vector to be rotated, in quaternion form (0 + xi + yj + kz)
Returns:
A Quaternion object representing the rotated vector in quaternion from (0 + xi + yj + kz)
'''
pass
def rotate(self, vector):
'''Rotate a 3D vector by the rotation stored in the Quaternion object.
Params:
vector: A 3-vector specified as any ordered sequence of 3 real numbers corresponding to x, y, and z values.
Some types that are recognised are: numpy arrays, lists and tuples.
A 3-vector can also be represented by a Quaternion object who's scalar part is 0 and vector part is the required 3-vector.
Thus it is possible to call `Quaternion.rotate(q)` with another quaternion object as an input.
Returns:
The rotated vector returned as the same type it was specified at input.
Raises:
TypeError: if any of the vector elements cannot be converted to a real number.
ValueError: if `vector` cannot be interpreted as a 3-vector or a Quaternion object.
'''
pass
@classmethod
def exp(cls, q):
'''Quaternion Exponential.
Find the exponential of a quaternion amount.
Params:
q: the input quaternion/argument as a Quaternion object.
Returns:
A quaternion amount representing the exp(q). See [Source](https://math.stackexchange.com/questions/1030737/exponential-function-of-quaternion-derivation for more information and mathematical background).
Note:
The method can compute the exponential of any quaternion.
'''
pass
@classmethod
def log(cls, q):
'''Quaternion Logarithm.
Find the logarithm of a quaternion amount.
Params:
q: the input quaternion/argument as a Quaternion object.
Returns:
A quaternion amount representing log(q) := (log(|q|), v/|v|acos(w/|q|)).
Note:
The method computes the logarithm of general quaternions. See [Source](https://math.stackexchange.com/questions/2552/the-logarithm-of-quaternion/2554#2554) for more details.
'''
pass
@classmethod
def exp_map(cls, q, eta):
'''Quaternion exponential map.
Find the exponential map on the Riemannian manifold described
by the quaternion space.
Params:
q: the base point of the exponential map, i.e. a Quaternion object
eta: the argument of the exponential map, a tangent vector, i.e. a Quaternion object
Returns:
A quaternion p such that p is the endpoint of the geodesic starting at q
in the direction of eta, having the length equal to the magnitude of eta.
Note:
The exponential map plays an important role in integrating orientation
variations (e.g. angular velocities). This is done by projecting
quaternion tangent vectors onto the quaternion manifold.
'''
pass
@classmethod
def sym_exp_map(cls, q, eta):
'''Quaternion symmetrized exponential map.
Find the symmetrized exponential map on the quaternion Riemannian
manifold.
Params:
q: the base point as a Quaternion object
eta: the tangent vector argument of the exponential map
as a Quaternion object
Returns:
A quaternion p.
Note:
The symmetrized exponential formulation is akin to the exponential
formulation for symmetric positive definite tensors [Source](http://www.academia.edu/7656761/On_the_Averaging_of_Symmetric_Positive-Definite_Tensors)
'''
pass
@classmethod
def log_map(cls, q, p):
'''Quaternion logarithm map.
Find the logarithm map on the quaternion Riemannian manifold.
Params:
q: the base point at which the logarithm is computed, i.e.
a Quaternion object
p: the argument of the quaternion map, a Quaternion object
Returns:
A tangent vector having the length and direction given by the
geodesic joining q and p.
'''
pass
@classmethod
def sym_log_map(cls, q, p):
'''Quaternion symmetrized logarithm map.
Find the symmetrized logarithm map on the quaternion Riemannian manifold.
Params:
q: the base point at which the logarithm is computed, i.e.
a Quaternion object
p: the argument of the quaternion map, a Quaternion object
Returns:
A tangent vector corresponding to the symmetrized geodesic curve formulation.
Note:
Information on the symmetrized formulations given in [Source](https://www.researchgate.net/publication/267191489_Riemannian_L_p_Averaging_on_Lie_Group_of_Nonzero_Quaternions).
'''
pass
@classmethod
def absolute_distance(cls, q0, q1):
'''Quaternion absolute distance.
Find the distance between two quaternions accounting for the sign ambiguity.
Params:
q0: the first quaternion
q1: the second quaternion
Returns:
A positive scalar corresponding to the chord of the shortest path/arc that
connects q0 to q1.
Note:
This function does not measure the distance on the hypersphere, but
it takes into account the fact that q and -q encode the same rotation.
It is thus a good indicator for rotation similarities.
'''
pass
@classmethod
def distance(cls, q0, q1):
'''Quaternion intrinsic distance.
Find the intrinsic geodesic distance between q0 and q1.
Params:
q0: the first quaternion
q1: the second quaternion
Returns:
A positive amount corresponding to the length of the geodesic arc
connecting q0 to q1.
Note:
Although the q0^(-1)*q1 != q1^(-1)*q0, the length of the path joining
them is given by the logarithm of those product quaternions, the norm
of which is the same.
'''
pass
@classmethod
def sym_distance(cls, q0, q1):
'''Quaternion symmetrized distance.
Find the intrinsic symmetrized geodesic distance between q0 and q1.
Params:
q0: the first quaternion
q1: the second quaternion
Returns:
A positive amount corresponding to the length of the symmetrized
geodesic curve connecting q0 to q1.
Note:
This formulation is more numerically stable when performing
iterative gradient descent on the Riemannian quaternion manifold.
However, the distance between q and -q is equal to pi, rendering this
formulation not useful for measuring rotation similarities when the
samples are spread over a "solid" angle of more than pi/2 radians
(the spread refers to quaternions as point samples on the unit hypersphere).
'''
pass
@classmethod
def slerp(cls, q0, q1, amount=0.5):
'''Spherical Linear Interpolation between quaternions.
Implemented as described in https://en.wikipedia.org/wiki/Slerp
Find a valid quaternion rotation at a specified distance along the
minor arc of a great circle passing through any two existing quaternion
endpoints lying on the unit radius hypersphere.
This is a class method and is called as a method of the class itself rather than on a particular instance.
Params:
q0: first endpoint rotation as a Quaternion object
q1: second endpoint rotation as a Quaternion object
amount: interpolation parameter between 0 and 1. This describes the linear placement position of
the result along the arc between endpoints; 0 being at `q0` and 1 being at `q1`.
Defaults to the midpoint (0.5).
Returns:
A new Quaternion object representing the interpolated rotation. This is guaranteed to be a unit quaternion.
Note:
This feature only makes sense when interpolating between unit quaternions (those lying on the unit radius hypersphere).
Calling this method will implicitly normalise the endpoints to unit quaternions if they are not already unit length.
'''
pass
@classmethod
def intermediates(cls, q0, q1, n, include_endpoints=False):
'''Generator method to get an iterable sequence of `n` evenly spaced quaternion
rotations between any two existing quaternion endpoints lying on the unit
radius hypersphere.
This is a convenience function that is based on `Quaternion.slerp()` as defined above.
This is a class method and is called as a method of the class itself rather than on a particular instance.
Params:
q_start: initial endpoint rotation as a Quaternion object
q_end: final endpoint rotation as a Quaternion object
n: number of intermediate quaternion objects to include within the interval
include_endpoints: [optional] if set to `True`, the sequence of intermediates
will be 'bookended' by `q_start` and `q_end`, resulting in a sequence length of `n + 2`.
If set to `False`, endpoints are not included. Defaults to `False`.
Yields:
A generator object iterating over a sequence of intermediate quaternion objects.
Note:
This feature only makes sense when interpolating between unit quaternions (those lying on the unit radius hypersphere).
Calling this method will implicitly normalise the endpoints to unit quaternions if they are not already unit length.
'''
pass
def derivative(self, rate):
'''Get the instantaneous quaternion derivative representing a quaternion rotating at a 3D rate vector `rate`
Params:
rate: numpy 3-array (or array-like) describing rotation rates about the global x, y and z axes respectively.
Returns:
A unit quaternion describing the rotation rate
'''
pass
def integrate(self, rate, timestep):
'''Advance a time varying quaternion to its value at a time `timestep` in the future.
The Quaternion object will be modified to its future value.
It is guaranteed to remain a unit quaternion.
Params:
rate: numpy 3-array (or array-like) describing rotation rates about the
global x, y and z axes respectively.
timestep: interval over which to integrate into the future.
Assuming *now* is `T=0`, the integration occurs over the interval
`T=0` to `T=timestep`. Smaller intervals are more accurate when
`rate` changes over time.
Note:
The solution is closed form given the assumption that `rate` is constant
over the interval of length `timestep`.
'''
pass
@property
def rotation_matrix(self):
'''Get the 3x3 rotation matrix equivalent of the quaternion rotation.
Returns:
A 3x3 orthogonal rotation matrix as a 3x3 Numpy array
Note:
This feature only makes sense when referring to a unit quaternion. Calling this method will implicitly normalise the Quaternion object to a unit quaternion if it is not already one.
'''
pass
@property
def transformation_matrix(self):
'''Get the 4x4 homogeneous transformation matrix equivalent of the quaternion rotation.
Returns:
A 4x4 homogeneous transformation matrix as a 4x4 Numpy array
Note:
This feature only makes sense when referring to a unit quaternion. Calling this method will implicitly normalise the Quaternion object to a unit quaternion if it is not already one.
'''
pass
@property
def yaw_pitch_roll(self):
'''Get the equivalent yaw-pitch-roll angles aka. intrinsic Tait-Bryan angles following the z-y'-x'' convention
Returns:
yaw: rotation angle around the z-axis in radians, in the range `[-pi, pi]`
pitch: rotation angle around the y'-axis in radians, in the range `[-pi/2, -pi/2]`
roll: rotation angle around the x''-axis in radians, in the range `[-pi, pi]`
The resulting rotation_matrix would be R = R_x(roll) R_y(pitch) R_z(yaw)
Note:
This feature only makes sense when referring to a unit quaternion. Calling this method will implicitly normalise the Quaternion object to a unit quaternion if it is not already one.
'''
pass
def _wrap_angle(self, theta):
'''Helper method: Wrap any angle to lie between -pi and pi
Odd multiples of pi are wrapped to +pi (as opposed to -pi)
'''
pass
def get_axis(self, undefined=np.zeros(3)):
'''Get the axis or vector about which the quaternion rotation occurs
For a null rotation (a purely real quaternion), the rotation angle will
always be `0`, but the rotation axis is undefined.
It is by default assumed to be `[0, 0, 0]`.
Params:
undefined: [optional] specify the axis vector that should define a null rotation.
This is geometrically meaningless, and could be any of an infinite set of vectors,
but can be specified if the default (`[0, 0, 0]`) causes undesired behaviour.
Returns:
A Numpy unit 3-vector describing the Quaternion object's axis of rotation.
Note:
This feature only makes sense when referring to a unit quaternion.
Calling this method will implicitly normalise the Quaternion object to a unit quaternion if it is not already one.
'''
pass
@property
def axis(self):
pass
@property
def angle(self):
'''Get the angle (in radians) describing the magnitude of the quaternion rotation about its rotation axis.
This is guaranteed to be within the range (-pi:pi) with the direction of
rotation indicated by the sign.
When a particular rotation describes a 180 degree rotation about an arbitrary
axis vector `v`, the conversion to axis / angle representation may jump
discontinuously between all permutations of `(-pi, pi)` and `(-v, v)`,
each being geometrically equivalent (see Note in documentation).
Returns:
A real number in the range (-pi:pi) describing the angle of rotation
in radians about a Quaternion object's axis of rotation.
Note:
This feature only makes sense when referring to a unit quaternion.
Calling this method will implicitly normalise the Quaternion object to a unit quaternion if it is not already one.
'''
pass
@property
def degrees(self):
pass
@property
def radians(self):
pass
@property
def scalar(self):
''' Return the real or scalar component of the quaternion object.
Returns:
A real number i.e. float
'''
pass
@property
def vector(self):
''' Return the imaginary or vector component of the quaternion object.
Returns:
A numpy 3-array of floats. NOT guaranteed to be a unit vector
'''
pass
@property
def real(self):
pass
@property
def imaginary(self):
pass
@property
def w(self):
pass
@property
def x(self):
pass
@property
def yaw_pitch_roll(self):
pass
@property
def z(self):
pass
@property
def elements(self):
''' Return all the elements of the quaternion object.
Returns:
A numpy 4-array of floats. NOT guaranteed to be a unit vector
'''
pass
def __getitem__(self, index):
pass
def __setitem__(self, index, value):
pass
def __copy__(self):
pass
def __deepcopy__(self, memo):
pass
@staticmethod
def to_degrees(angle_rad):
pass
@staticmethod
def to_radians(angle_deg):
pass
| 138 | 49 | 11 | 1 | 5 | 4 | 2 | 0.75 | 0 | 12 | 0 | 0 | 78 | 1 | 94 | 94 | 1,142 | 231 | 525 | 230 | 387 | 396 | 438 | 189 | 341 | 16 | 0 | 4 | 158 |
142,674 |
KieranWynn/pyquaternion
|
KieranWynn_pyquaternion/pyquaternion/test/test_quaternion.py
|
pyquaternion.test.test_quaternion.TestQuaternionFeatures
|
class TestQuaternionFeatures(unittest.TestCase):
def test_conjugate(self):
a, b, c, d = randomElements()
q1 = Quaternion(a, b, c, d)
q2 = Quaternion.random()
self.assertEqual(q1.conjugate, Quaternion(a, -b, -c, -d))
self.assertEqual((q1 * q2).conjugate, q2.conjugate * q1.conjugate)
self.assertEqual((q1 + q1.conjugate) / 2, Quaternion(scalar=q1.scalar))
self.assertEqual((q1 - q1.conjugate) / 2, Quaternion(vector=q1.vector))
def test_double_conjugate(self):
q = Quaternion.random()
self.assertEqual(q, q.conjugate.conjugate)
def test_norm(self):
r = randomElements()
q1 = Quaternion(*r)
q2 = Quaternion.random()
self.assertEqual(q1.norm, np.linalg.norm(np.array(r)))
self.assertEqual(q1.magnitude, np.linalg.norm(np.array(r)))
# Multiplicative norm
self.assertAlmostEqual((q1 * q2).norm, q1.norm * q2.norm, ALMOST_EQUAL_TOLERANCE)
# Scaled norm
for s in [30.0, 0.3, -2, -4.7]:
self.assertAlmostEqual((q1 * s).norm, q1.norm * abs(s), ALMOST_EQUAL_TOLERANCE)
def test_inverse(self):
q1 = Quaternion(randomElements())
q2 = Quaternion.random()
if q1:
self.assertEqual(q1 * q1.inverse, Quaternion(1.0, 0.0, 0.0, 0.0))
else:
with self.assertRaises(ZeroDivisionError):
q1 * q1.inverse
self.assertEqual(q2 * q2.inverse, Quaternion(1.0, 0.0, 0.0, 0.0))
def test_normalisation(self): # normalise to unit quaternion
r = randomElements()
q1 = Quaternion(*r)
v = q1.unit
n = q1.normalised
if q1 == Quaternion(0): # small chance with random generation
return # a 0 quaternion does not normalise
# Test normalised objects are unit quaternions
np.testing.assert_almost_equal(v.q, q1.elements / q1.norm, decimal=ALMOST_EQUAL_TOLERANCE)
np.testing.assert_almost_equal(n.q, q1.elements / q1.norm, decimal=ALMOST_EQUAL_TOLERANCE)
self.assertAlmostEqual(v.norm, 1.0, ALMOST_EQUAL_TOLERANCE)
self.assertAlmostEqual(n.norm, 1.0, ALMOST_EQUAL_TOLERANCE)
# Test axis and angle remain the same
np.testing.assert_almost_equal(q1.axis, v.axis, decimal=ALMOST_EQUAL_TOLERANCE)
np.testing.assert_almost_equal(q1.axis, n.axis, decimal=ALMOST_EQUAL_TOLERANCE)
self.assertAlmostEqual(q1.angle, v.angle, ALMOST_EQUAL_TOLERANCE)
self.assertAlmostEqual(q1.angle, n.angle, ALMOST_EQUAL_TOLERANCE)
# Test special case where q is zero
q2 = Quaternion(0)
self.assertEqual(q2, q2.normalised)
def test_is_unit(self):
q1 = Quaternion()
q2 = Quaternion(1.0, 0, 0, 0.0001)
self.assertTrue(q1.is_unit())
self.assertFalse(q2.is_unit())
self.assertTrue(q2.is_unit(0.001))
def test_q_matrix(self):
a, b, c, d = randomElements()
q = Quaternion(a, b, c, d)
M = np.array([
[a, -b, -c, -d],
[b, a, -d, c],
[c, d, a, -b],
[d, -c, b, a]])
self.assertTrue(np.array_equal(q._q_matrix(), M))
def test_q_bar_matrix(self):
a, b, c, d = randomElements()
q = Quaternion(a, b, c, d)
M = np.array([
[a, -b, -c, -d],
[b, a, d, -c],
[c, -d, a, b],
[d, c, -b, a]])
self.assertTrue(np.array_equal(q._q_bar_matrix(), M))
def test_output_of_components(self):
a, b, c, d = randomElements()
q = Quaternion(a, b, c, d)
# Test scalar
self.assertEqual(q.scalar, a)
self.assertEqual(q.real, a)
# Test vector
self.assertTrue(np.array_equal(q.vector, [b, c, d]))
self.assertTrue(np.array_equal(q.imaginary, [b, c, d]))
self.assertEqual(tuple(q.vector), (b, c, d))
self.assertEqual(list(q.imaginary), [b, c, d])
self.assertEqual(q.w, a)
self.assertEqual(q.x, b)
self.assertEqual(q.y, c)
self.assertEqual(q.z, d)
def test_output_of_elements(self):
r = randomElements()
q = Quaternion(*r)
self.assertEqual(tuple(q.elements), r)
def test_element_access(self):
r = randomElements()
q = Quaternion(*r)
self.assertEqual(q[0], r[0])
self.assertEqual(q[1], r[1])
self.assertEqual(q[2], r[2])
self.assertEqual(q[3], r[3])
self.assertEqual(q[-1], r[3])
self.assertEqual(q[-4], r[0])
with self.assertRaises(TypeError):
q[None]
with self.assertRaises(IndexError):
q[4]
with self.assertRaises(IndexError):
q[-5]
def test_element_assignment(self):
q = Quaternion()
self.assertEqual(q[1], 0.0)
q[1] = 10.0
self.assertEqual(q[1], 10.0)
self.assertEqual(q, Quaternion(1.0, 10.0, 0.0, 0.0))
with self.assertRaises(TypeError):
q[2] = None
with self.assertRaises(ValueError):
q[2] = 's'
def test_rotate(self):
q = Quaternion(axis=[1,1,1], angle=2*pi/3)
q2 = Quaternion(axis=[1, 0, 0], angle=-pi)
q3 = Quaternion(axis=[1, 0, 0], angle=pi)
precision = ALMOST_EQUAL_TOLERANCE
for r in [1, 3.8976, -69.7, -0.000001]:
# use np.testing.assert_almost_equal() to compare float sequences
np.testing.assert_almost_equal(q.rotate((r, 0, 0)), (0, r, 0), decimal=ALMOST_EQUAL_TOLERANCE)
np.testing.assert_almost_equal(q.rotate([0, r, 0]), [0, 0, r], decimal=ALMOST_EQUAL_TOLERANCE)
np.testing.assert_almost_equal(q.rotate(np.array([0, 0, r])), np.array([r, 0, 0]), decimal=ALMOST_EQUAL_TOLERANCE)
self.assertEqual(q.rotate(Quaternion(vector=[-r, 0, 0])), Quaternion(vector=[0, -r, 0]))
np.testing.assert_almost_equal(q.rotate([0, -r, 0]), [0, 0, -r], decimal=ALMOST_EQUAL_TOLERANCE)
self.assertEqual(q.rotate(Quaternion(vector=[0, 0, -r])), Quaternion(vector=[-r, 0, 0]))
np.testing.assert_almost_equal(q2.rotate((r, 0, 0)), q3.rotate((r, 0, 0)), decimal=ALMOST_EQUAL_TOLERANCE)
np.testing.assert_almost_equal(q2.rotate((0, r, 0)), q3.rotate((0, r, 0)), decimal=ALMOST_EQUAL_TOLERANCE)
np.testing.assert_almost_equal(q2.rotate((0, 0, r)), q3.rotate((0, 0, r)), decimal=ALMOST_EQUAL_TOLERANCE)
def test_conversion_to_matrix(self):
q = Quaternion.random()
a, b, c, d = tuple(q.elements)
R = np.array([
[a**2 + b**2 - c**2 - d**2, 2 * (b * c - a * d), 2 * (a * c + b * d)],
[2 * (b * c + a * d), a**2 - b**2 + c**2 - d**2, 2 * (c * d - a * b)],
[2 * (b * d - a * c), 2 * (a * b + c * d), a**2 - b**2 - c**2 + d**2]])
t = np.array([[0],[0],[0]])
T = np.vstack([np.hstack([R,t]), np.array([0,0,0,1])])
np.testing.assert_almost_equal(R, q.rotation_matrix, decimal=ALMOST_EQUAL_TOLERANCE)
np.testing.assert_almost_equal(T, q.transformation_matrix, decimal=ALMOST_EQUAL_TOLERANCE)
# Test no scaling of rotated vectors
v1 = np.array([1, 0, 0])
v2 = np.hstack((np.random.uniform(-10, 10, 3), 1.0))
v1_ = np.dot(q.rotation_matrix, v1)
v2_ = np.dot(q.transformation_matrix, v2)
self.assertAlmostEqual(np.linalg.norm(v1_), 1.0, ALMOST_EQUAL_TOLERANCE)
self.assertAlmostEqual(np.linalg.norm(v2_), np.linalg.norm(v2), ALMOST_EQUAL_TOLERANCE)
# Test transformation of vectors is equivalent for quaternion & matrix
np.testing.assert_almost_equal(v1_, q.rotate(v1), decimal=ALMOST_EQUAL_TOLERANCE)
np.testing.assert_almost_equal(v2_[0:3], q.rotate(v2[0:3]), decimal=ALMOST_EQUAL_TOLERANCE)
def test_conversion_to_ypr(self):
def R_x(theta):
c = cos(theta)
s = sin(theta)
return np.array([
[1, 0, 0],
[0, c,-s],
[0, s, c]])
def R_y(theta):
c = cos(theta)
s = sin(theta)
return np.array([
[ c, 0, s],
[ 0, 1, 0],
[-s, 0, c]])
def R_z(theta):
c = cos(theta)
s = sin(theta)
return np.array([
[ c,-s, 0],
[ s, c, 0],
[ 0, 0, 1]])
p = np.random.randn(3)
q = Quaternion.random()
yaw, pitch, roll = q.yaw_pitch_roll
p_q = q.rotate(p)
R_q = q.rotation_matrix
# build rotation matrix, R = R_z(yaw)*R_y(pitch)*R_x(roll)
R_ypr = np.dot(R_x(roll), np.dot(R_y(pitch), R_z(yaw)))
p_ypr = np.dot(R_ypr, p)
np.testing.assert_almost_equal(p_q , p_ypr, decimal=ALMOST_EQUAL_TOLERANCE)
np.testing.assert_almost_equal(R_q , R_ypr, decimal=ALMOST_EQUAL_TOLERANCE)
def test_matrix_io(self):
v = np.random.uniform(-100, 100, 3)
for i in range(10):
q0 = Quaternion.random()
R = q0.rotation_matrix
q1 = Quaternion(matrix=R)
np.testing.assert_almost_equal(q0.rotate(v), np.dot(R, v), decimal=ALMOST_EQUAL_TOLERANCE)
np.testing.assert_almost_equal(q0.rotate(v), q1.rotate(v), decimal=ALMOST_EQUAL_TOLERANCE)
np.testing.assert_almost_equal(q1.rotate(v), np.dot(R, v), decimal=ALMOST_EQUAL_TOLERANCE)
self.assertTrue((q0 == q1) or (q0 == -q1)) # q1 and -q1 are equivalent rotations
def validate_axis_angle(self, axis, angle):
def wrap_angle(theta):
""" Wrap any angle to lie between -pi and pi
Odd multiples of pi are wrapped to +pi (as opposed to -pi)
"""
result = ((theta + pi) % (2*pi)) - pi
if result == -pi: result = pi
return result
theta = wrap_angle(angle)
v = axis
q = Quaternion(angle=theta, axis=v)
v_ = q.axis
theta_ = q.angle
if theta == 0.0: # axis is irrelevant (check defaults to x=y=z)
np.testing.assert_almost_equal(theta_, 0.0, decimal=ALMOST_EQUAL_TOLERANCE)
np.testing.assert_almost_equal(v_, np.zeros(3), decimal=ALMOST_EQUAL_TOLERANCE)
return
elif abs(theta) == pi: # rotation in either direction is equivalent
self.assertTrue(
np.isclose(theta, pi) or np.isclose(theta, -pi)
and
np.isclose(v, v_).all() or np.isclose(v, -v_).all()
)
else:
self.assertTrue(
np.isclose(theta, theta_) and np.isclose(v, v_).all()
or
np.isclose(theta, -theta_) and np.isclose(v, -v_).all()
)
# Ensure the returned axis is a unit vector
np.testing.assert_almost_equal(np.linalg.norm(v_), 1.0, decimal=ALMOST_EQUAL_TOLERANCE)
def test_conversion_to_axis_angle(self):
random_axis = np.random.uniform(-1, 1, 3)
random_axis /= np.linalg.norm(random_axis)
angles = np.array([-3, -2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2, 3]) * pi
axes = [np.array([1, 0, 0]), np.array([0, 1, 0]), np.array([0, 0, 1]), random_axis]
for v in axes:
for theta in angles:
self.validate_axis_angle(v, theta)
def test_axis_angle_io(self):
for i in range(20):
v = np.random.uniform(-1, 1, 3)
v /= np.linalg.norm(v)
theta = float(np.random.uniform(-2,2, 1)) * pi
self.validate_axis_angle(v, theta)
def test_exp(self):
from math import exp
q = Quaternion(axis=[1,0,0], angle=pi)
exp_q = Quaternion.exp(q)
self.assertEqual(exp_q, exp(0) * Quaternion(scalar=cos(1.0), vector=[sin(1.0), 0,0]))
def test_log(self):
from math import log
q = Quaternion(axis=[1,0,0], angle=pi)
log_q = Quaternion.log(q)
self.assertEqual(log_q, Quaternion(scalar=0, vector=[pi/2,0,0]))
def test_distance(self):
q = Quaternion(scalar=0, vector=[1,0,0])
p = Quaternion(scalar=0, vector=[0,1,0])
self.assertEqual(pi/2, Quaternion.distance(q,p))
q = Quaternion(angle=pi/2, axis=[1,0,0])
p = Quaternion(angle=pi/2, axis=[0,1,0])
self.assertEqual(pi/3, Quaternion.distance(q,p))
q = Quaternion(scalar=1, vector=[1,1,1])
p = Quaternion(scalar=-1, vector=[-1,-1,-1])
p._normalise()
q._normalise()
self.assertAlmostEqual(0, Quaternion.distance(q,p), places=8)
def test_absolute_distance(self):
q = Quaternion(scalar=0, vector=[1,0,0])
p = Quaternion(scalar=0, vector=[0,1,0])
self.assertEqual((q-p).norm, Quaternion.absolute_distance(q,p))
q = Quaternion(angle=pi/2, axis=[1,0,0])
p = Quaternion(angle=pi/2, axis=[0,1,0])
self.assertEqual((q-p).norm, Quaternion.absolute_distance(q,p))
q = Quaternion(scalar=0, vector=[1,0,0])
p = Quaternion(scalar=-1, vector=[0,-1,0])
self.assertEqual((q+p).norm, Quaternion.absolute_distance(q,p))
q = Quaternion(scalar=1, vector=[1,1,1])
p = Quaternion(scalar=-1, vector=[-1,-1,-1])
p._normalise()
q._normalise()
self.assertAlmostEqual(0, Quaternion.absolute_distance(q,p), places=8)
def test_sym_distance(self):
q = Quaternion(scalar=0, vector=[1,0,0])
p = Quaternion(scalar=0, vector=[0,1,0])
self.assertEqual(pi/2, Quaternion.sym_distance(q,p))
q = Quaternion(angle=pi/2, axis=[1,0,0])
p = Quaternion(angle=pi/2, axis=[0,1,0])
self.assertAlmostEqual(pi/3, Quaternion.sym_distance(q,p), places=6)
q = Quaternion(scalar=0, vector=[1,0,0])
p = Quaternion(scalar=0, vector=[0,-1,0])
self.assertEqual(pi/2, Quaternion.sym_distance(q,p))
q = Quaternion(scalar=1, vector=[1,1,1])
p = Quaternion(scalar=-1, vector=[-1,-1,-1])
p._normalise()
q._normalise()
self.assertAlmostEqual(pi, Quaternion.sym_distance(q,p), places=8)
def test_slerp(self):
q1 = Quaternion(axis=[1, 0, 0], angle=0.0)
q2 = Quaternion(axis=[1, 0, 0], angle=pi/2)
q3 = Quaternion.slerp(q1, q2, 0.5)
self.assertEqual(q3, Quaternion(axis=[1,0,0], angle=pi/4))
def test_slerp_extensive(self):
for axis in [[1, 0, 0], [0, 1, 0], [0, 0, 1]]:
q1 = Quaternion(axis=axis, angle=0.0)
q2 = Quaternion(axis=axis, angle=pi/2.0)
q3 = Quaternion(axis=axis, angle=pi*3.0/2.0)
for t in np.arange(0.1, 1, 0.1):
q4 = Quaternion.slerp(q1, q2, t)
q5 = Quaternion.slerp(q1, q3, t)
q6 = Quaternion(axis=axis, angle=t*pi/2)
q7 = Quaternion(axis=axis, angle=-t*pi/2)
assert q4 == q6 or q4 == -q6
assert q5 == q7 or q5 == -q7
def test_interpolate(self):
q1 = Quaternion(axis=[1, 0, 0], angle=0.0)
q2 = Quaternion(axis=[1, 0, 0], angle=2*pi/3)
num_intermediates = 3
base = pi/6
list1 = list(Quaternion.intermediates(q1, q2, num_intermediates, include_endpoints=False))
list2 = list(Quaternion.intermediates(q1, q2, num_intermediates, include_endpoints=True))
self.assertEqual(len(list1), num_intermediates)
self.assertEqual(len(list2), num_intermediates+2)
self.assertEqual(list1[0], list2[1])
self.assertEqual(list1[1], list2[2])
self.assertEqual(list1[2], list2[3])
self.assertEqual(list2[0], q1)
self.assertEqual(list2[1], Quaternion(axis=[1, 0, 0], angle=base))
self.assertEqual(list2[2], Quaternion(axis=[1, 0, 0], angle=2*base))
self.assertEqual(list2[3], Quaternion(axis=[1, 0, 0], angle=3*base))
self.assertEqual(list2[4], q2)
def test_differentiation(self):
q = Quaternion.random()
omega = np.random.uniform(-1, 1, 3) # Random angular velocity
q_dash = 0.5 * q * Quaternion(vector=omega)
self.assertEqual(q_dash, q.derivative(omega))
def test_integration(self):
rotation_rate = [0, 0, 2*pi] # one rev per sec around z
v = [1, 0, 0] # test vector
for dt in [0, 0.25, 0.5, 0.75, 1, 2, 10, 1e-10, random()*10]: # time step in seconds
qt = Quaternion() # no rotation
qt.integrate(rotation_rate, dt)
q_truth = Quaternion(axis=[0,0,1], angle=dt*2*pi)
a = qt.rotate(v)
b = q_truth.rotate(v)
np.testing.assert_almost_equal(a, b, decimal=ALMOST_EQUAL_TOLERANCE)
self.assertTrue(qt.is_unit())
# Check integrate() is norm-preserving over many calls
q = Quaternion()
for i in range(1000):
q.integrate([pi, 0, 0], 0.001)
self.assertTrue(q.is_unit())
|
class TestQuaternionFeatures(unittest.TestCase):
def test_conjugate(self):
pass
def test_double_conjugate(self):
pass
def test_norm(self):
pass
def test_inverse(self):
pass
def test_normalisation(self):
pass
def test_is_unit(self):
pass
def test_q_matrix(self):
pass
def test_q_bar_matrix(self):
pass
def test_output_of_components(self):
pass
def test_output_of_elements(self):
pass
def test_element_access(self):
pass
def test_element_assignment(self):
pass
def test_rotate(self):
pass
def test_conversion_to_matrix(self):
pass
def test_conversion_to_ypr(self):
pass
def R_x(theta):
pass
def R_y(theta):
pass
def R_z(theta):
pass
def test_matrix_io(self):
pass
def validate_axis_angle(self, axis, angle):
pass
def wrap_angle(theta):
''' Wrap any angle to lie between -pi and pi
Odd multiples of pi are wrapped to +pi (as opposed to -pi)
'''
pass
def test_conversion_to_axis_angle(self):
pass
def test_axis_angle_io(self):
pass
def test_exp(self):
pass
def test_log(self):
pass
def test_distance(self):
pass
def test_absolute_distance(self):
pass
def test_sym_distance(self):
pass
def test_slerp(self):
pass
def test_slerp_extensive(self):
pass
def test_interpolate(self):
pass
def test_differentiation(self):
pass
def test_integration(self):
pass
| 34 | 1 | 12 | 1 | 11 | 1 | 1 | 0.08 | 1 | 8 | 0 | 0 | 29 | 0 | 29 | 101 | 409 | 58 | 335 | 152 | 299 | 27 | 305 | 152 | 269 | 3 | 2 | 2 | 48 |
142,675 |
KieranWynn/pyquaternion
|
KieranWynn_pyquaternion/pyquaternion/test/test_quaternion.py
|
pyquaternion.test.test_quaternion.TestQuaternionHashing
|
class TestQuaternionHashing(unittest.TestCase):
def test_equal_quaternions(self):
q1 = Quaternion(1, 0, 0, 0)
q2 = Quaternion(1, 0, 0, 0)
self.assertEqual(hash(q1), hash(q2))
def test_unequal_quaternions(self):
q1 = Quaternion(1, 0, 0, 0)
q2 = Quaternion(0, 1, 0, 0)
self.assertNotEqual(hash(q1), hash(q2))
|
class TestQuaternionHashing(unittest.TestCase):
def test_equal_quaternions(self):
pass
def test_unequal_quaternions(self):
pass
| 3 | 0 | 5 | 1 | 4 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 2 | 0 | 2 | 74 | 12 | 3 | 9 | 7 | 6 | 0 | 9 | 7 | 6 | 1 | 2 | 0 | 2 |
142,676 |
KieranWynn/pyquaternion
|
KieranWynn_pyquaternion/pyquaternion/test/test_quaternion.py
|
pyquaternion.test.test_quaternion.TestQuaternionInitialisation
|
class TestQuaternionInitialisation(unittest.TestCase):
def test_init_default(self):
q = Quaternion()
self.assertIsInstance(q, Quaternion)
self.assertEqual(q, Quaternion(1., 0., 0., 0.))
def test_init_copy(self):
q1 = Quaternion.random()
q2 = Quaternion(q1)
self.assertIsInstance(q2, Quaternion)
self.assertEqual(q2, q1)
with self.assertRaises(TypeError):
q3 = Quaternion(None)
with self.assertRaises(ValueError):
q4 = Quaternion("String")
def test_init_random(self):
r1 = Quaternion.random()
r2 = Quaternion.random()
self.assertAlmostEqual(r1.norm, 1.0, ALMOST_EQUAL_TOLERANCE)
self.assertIsInstance(r1, Quaternion)
#self.assertNotEqual(r1, r2) #TODO, this *may* fail at random
def test_init_from_scalar(self):
s = random()
q1 = Quaternion(s)
q2 = Quaternion(repr(s))
self.assertIsInstance(q1, Quaternion)
self.assertIsInstance(q2, Quaternion)
self.assertEqual(q1, Quaternion(s, 0.0, 0.0, 0.0))
self.assertEqual(q2, Quaternion(s, 0.0, 0.0, 0.0))
with self.assertRaises(TypeError):
q = Quaternion(None)
with self.assertRaises(ValueError):
q = Quaternion("String")
def test_init_from_elements(self):
a, b, c, d = randomElements()
q1 = Quaternion(a, b, c, d)
q2 = Quaternion(repr(a), repr(b), repr(c), repr(d))
q3 = Quaternion(a, repr(b), c, d)
self.assertIsInstance(q1, Quaternion)
self.assertIsInstance(q2, Quaternion)
self.assertIsInstance(q3, Quaternion)
self.assertTrue(np.array_equal(q1.q, [a, b, c, d]))
self.assertEqual(q1, q2)
self.assertEqual(q2, q3)
with self.assertRaises(TypeError):
q = Quaternion(None, b, c, d)
with self.assertRaises(ValueError):
q = Quaternion(a, b, "String", d)
with self.assertRaises(ValueError):
q = Quaternion(a, b, c)
with self.assertRaises(ValueError):
q = Quaternion(a, b, c, d, random())
def test_init_from_array(self):
r = randomElements()
a = np.array(r)
q = Quaternion(a)
self.assertIsInstance(q, Quaternion)
self.assertEqual(q, Quaternion(*r))
with self.assertRaises(ValueError):
q = Quaternion(a[1:4]) # 3-vector
with self.assertRaises(ValueError):
q = Quaternion(np.hstack((a, a))) # 8-vector
with self.assertRaises(ValueError):
q = Quaternion(np.array([a, a])) # 2x4-
with self.assertRaises(TypeError):
q = Quaternion(np.array([None, None, None, None]))
def test_init_from_tuple(self):
t = randomElements()
q = Quaternion(t)
self.assertIsInstance(q, Quaternion)
self.assertEqual(q, Quaternion(*t))
with self.assertRaises(ValueError):
q = Quaternion(t[1:4]) # 3-tuple
with self.assertRaises(ValueError):
q = Quaternion(t + t) # 8-tuple
with self.assertRaises(ValueError):
q = Quaternion((t, t)) # 2x4-tuple
with self.assertRaises(TypeError):
q = Quaternion((None, None, None, None))
def test_init_from_list(self):
r = randomElements()
l = list(r)
q = Quaternion(l)
self.assertIsInstance(q, Quaternion)
self.assertEqual(q, Quaternion(*l))
with self.assertRaises(ValueError):
q = Quaternion(l[1:4]) # 3-list
with self.assertRaises(ValueError):
q = Quaternion(l + l) # 8-list
with self.assertRaises(ValueError):
q = Quaternion((l, l)) # 2x4-list
with self.assertRaises(TypeError):
q = Quaternion([None, None, None, None])
def test_init_from_explicit_elements(self):
e1, e2, e3, e4 = randomElements()
q1 = Quaternion(w=e1, x=e2, y=e3, z=e4)
q2 = Quaternion(a=e1, b=repr(e2), c=e3, d=e4)
q3 = Quaternion(a=e1, i=e2, j=e3, k=e4)
q4 = Quaternion(a=e1)
self.assertIsInstance(q1, Quaternion)
self.assertIsInstance(q2, Quaternion)
self.assertIsInstance(q3, Quaternion)
self.assertIsInstance(q4, Quaternion)
self.assertEqual(q1, Quaternion(e1, e2, e3, e4))
self.assertEqual(q1, q2)
self.assertEqual(q2, q3)
self.assertEqual(q4, Quaternion(e1))
with self.assertRaises(TypeError):
q = Quaternion(a=None, b=e2, c=e3, d=e4)
with self.assertRaises(ValueError):
q = Quaternion(a=e1, b=e2, c="String", d=e4)
with self.assertRaises(ValueError):
q = Quaternion(w=e1, x=e2)
with self.assertRaises(ValueError):
q = Quaternion(a=e1, b=e2, c=e3, d=e4, e=e1)
def test_init_from_explicit_component(self):
a, b, c, d = randomElements()
# Using 'real' & 'imaginary' notation
q1 = Quaternion(real=a, imaginary=(b, c, d))
q2 = Quaternion(real=a, imaginary=[b, c, d])
q3 = Quaternion(real=a, imaginary=np.array([b, c, d]))
q4 = Quaternion(real=a)
q5 = Quaternion(imaginary=np.array([b, c, d]))
q6 = Quaternion(real=None, imaginary=np.array([b, c, d]))
self.assertIsInstance(q1, Quaternion)
self.assertIsInstance(q2, Quaternion)
self.assertIsInstance(q3, Quaternion)
self.assertIsInstance(q4, Quaternion)
self.assertIsInstance(q5, Quaternion)
self.assertIsInstance(q6, Quaternion)
self.assertEqual(q1, Quaternion(a, b, c, d))
self.assertEqual(q1, q2)
self.assertEqual(q2, q3)
self.assertEqual(q4, Quaternion(a, 0, 0, 0))
self.assertEqual(q5, Quaternion(0, b, c, d))
self.assertEqual(q5, q6)
with self.assertRaises(ValueError):
q = Quaternion(real=a, imaginary=[b, c])
with self.assertRaises(ValueError):
q = Quaternion(real=a, imaginary=(b, c, d, d))
# Using 'scalar' & 'vector' notation
q1 = Quaternion(scalar=a, vector=(b, c, d))
q2 = Quaternion(scalar=a, vector=[b, c, d])
q3 = Quaternion(scalar=a, vector=np.array([b, c, d]))
q4 = Quaternion(scalar=a)
q5 = Quaternion(vector=np.array([b, c, d]))
q6 = Quaternion(scalar=None, vector=np.array([b, c, d]))
self.assertIsInstance(q1, Quaternion)
self.assertIsInstance(q2, Quaternion)
self.assertIsInstance(q3, Quaternion)
self.assertIsInstance(q4, Quaternion)
self.assertIsInstance(q5, Quaternion)
self.assertIsInstance(q6, Quaternion)
self.assertEqual(q1, Quaternion(a, b, c, d))
self.assertEqual(q1, q2)
self.assertEqual(q2, q3)
self.assertEqual(q4, Quaternion(a, 0, 0, 0))
self.assertEqual(q5, Quaternion(0, b, c, d))
self.assertEqual(q5, q6)
with self.assertRaises(ValueError):
q = Quaternion(scalar=a, vector=[b, c])
with self.assertRaises(ValueError):
q = Quaternion(scalar=a, vector=(b, c, d, d))
def test_init_from_explicit_rotation_params(self):
vx = random()
vy = random()
vz = random()
theta = random() * 2.0 * pi
v1 = (vx, vy, vz) # tuple format
v2 = [vx, vy, vz] # list format
v3 = np.array(v2) # array format
q1 = Quaternion(axis=v1, angle=theta)
q2 = Quaternion(axis=v2, radians=theta)
q3 = Quaternion(axis=v3, degrees=theta / pi * 180)
# normalise v to a unit vector
v3 = v3 / np.linalg.norm(v3)
q4 = Quaternion(angle=theta, axis=v3)
# Construct the true quaternion
t = theta / 2.0
a = cos(t)
b = v3[0] * sin(t)
c = v3[1] * sin(t)
d = v3[2] * sin(t)
truth = Quaternion(a, b, c, d)
self.assertEqual(q1, truth)
self.assertEqual(q2, truth)
self.assertEqual(q3, truth)
self.assertEqual(q4, truth)
self.assertEqual(Quaternion(axis=v3, angle=0), Quaternion())
self.assertEqual(Quaternion(axis=v3, radians=0), Quaternion())
self.assertEqual(Quaternion(axis=v3, degrees=0), Quaternion())
self.assertEqual(Quaternion(axis=v3), Quaternion())
# Result should be a versor (Unit Quaternion)
self.assertAlmostEqual(q1.norm, 1.0, ALMOST_EQUAL_TOLERANCE)
self.assertAlmostEqual(q2.norm, 1.0, ALMOST_EQUAL_TOLERANCE)
self.assertAlmostEqual(q3.norm, 1.0, ALMOST_EQUAL_TOLERANCE)
self.assertAlmostEqual(q4.norm, 1.0, ALMOST_EQUAL_TOLERANCE)
with self.assertRaises(ValueError):
q = Quaternion(angle=theta)
with self.assertRaises(ValueError):
q = Quaternion(axis=[b, c], angle=theta)
with self.assertRaises(ValueError):
q = Quaternion(axis=(b, c, d, d), angle=theta)
with self.assertRaises(ZeroDivisionError):
q = Quaternion(axis=[0., 0., 0.], angle=theta)
def test_init_from_explicit_matrix(self):
def R_z(theta):
"""
Generate a rotation matrix describing a rotation of theta degrees about the z-axis
"""
c = cos(theta)
s = sin(theta)
return np.array([
[c,-s, 0],
[s, c, 0],
[0, 0, 1]])
v = np.array([1, 0, 0])
for angle in [0, pi/6, pi/4, pi/2, pi, 4*pi/3, 3*pi/2, 2*pi]:
R = R_z(angle) # rotation matrix describing rotation of 90 about +z
v_prime_r = np.dot(R, v)
q1 = Quaternion(axis=[0,0,1], angle=angle)
v_prime_q1 = q1.rotate(v)
np.testing.assert_almost_equal(v_prime_r, v_prime_q1, decimal=ALMOST_EQUAL_TOLERANCE)
q2 = Quaternion(matrix=R)
v_prime_q2 = q2.rotate(v)
np.testing.assert_almost_equal(v_prime_q2, v_prime_r, decimal=ALMOST_EQUAL_TOLERANCE)
R = np.matrix(np.eye(3))
q3 = Quaternion(matrix=R)
v_prime_q3 = q3.rotate(v)
np.testing.assert_almost_equal(v, v_prime_q3, decimal=ALMOST_EQUAL_TOLERANCE)
self.assertEqual(q3, Quaternion())
R[0,1] += 3 # introduce error to make matrix non-orthogonal
with self.assertRaises(ValueError):
q4 = Quaternion(matrix=R)
def test_init_from_explicit_matrix_with_optional_tolerance_arguments(self):
"""
The matrix defined in this test is orthogonal was carefully crafted
such that it's orthogonal to a precision of 1e-07, but not to a precision
of 1e-08. The default value for numpy's atol function is 1e-08, but
developers should have the option to use a lower precision if they choose
to.
Reference: https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html
"""
m = [[ 0.73297226, -0.16524626, -0.65988294, -0.07654548],
[ 0.13108627, 0.98617666, -0.10135052, -0.04878795],
[ 0.66750896, -0.01221443, 0.74450167, -0.05474513],
[ 0, 0, 0, 1, ]]
npm = np.matrix(m)
with self.assertRaises(ValueError):
Quaternion(matrix=npm)
try:
Quaternion(matrix=npm, atol=1e-07)
except ValueError:
self.fail("Quaternion() raised ValueError unexpectedly!")
def test_init_from_explicit_arrray(self):
r = randomElements()
a = np.array(r)
q = Quaternion(array=a)
self.assertIsInstance(q, Quaternion)
self.assertEqual(q, Quaternion(*r))
with self.assertRaises(ValueError):
q = Quaternion(array=a[1:4]) # 3-vector
with self.assertRaises(ValueError):
q = Quaternion(array=np.hstack((a, a))) # 8-vector
with self.assertRaises(ValueError):
q = Quaternion(array=np.array([a, a])) # 2x4-matrix
with self.assertRaises(TypeError):
q = Quaternion(array=np.array([None, None, None, None]))
def test_equivalent_initialisations(self):
a, b, c, d = randomElements()
q = Quaternion(a, b, c, d)
self.assertEqual(q, Quaternion(q))
self.assertEqual(q, Quaternion(np.array([a, b, c, d])))
self.assertEqual(q, Quaternion((a, b, c, d)))
self.assertEqual(q, Quaternion([a, b, c, d]))
self.assertEqual(q, Quaternion(w=a, x=b, y=c, z=d))
self.assertEqual(q, Quaternion(array=np.array([a, b, c, d])))
|
class TestQuaternionInitialisation(unittest.TestCase):
def test_init_default(self):
pass
def test_init_copy(self):
pass
def test_init_random(self):
pass
def test_init_from_scalar(self):
pass
def test_init_from_elements(self):
pass
def test_init_from_array(self):
pass
def test_init_from_tuple(self):
pass
def test_init_from_list(self):
pass
def test_init_from_explicit_elements(self):
pass
def test_init_from_explicit_component(self):
pass
def test_init_from_explicit_rotation_params(self):
pass
def test_init_from_explicit_matrix(self):
pass
def R_z(theta):
'''
Generate a rotation matrix describing a rotation of theta degrees about the z-axis
'''
pass
def test_init_from_explicit_matrix_with_optional_tolerance_arguments(self):
'''
The matrix defined in this test is orthogonal was carefully crafted
such that it's orthogonal to a precision of 1e-07, but not to a precision
of 1e-08. The default value for numpy's atol function is 1e-08, but
developers should have the option to use a lower precision if they choose
to.
Reference: https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html
'''
pass
def test_init_from_explicit_arrray(self):
pass
def test_equivalent_initialisations(self):
pass
| 17 | 2 | 19 | 2 | 17 | 2 | 1 | 0.13 | 1 | 5 | 0 | 0 | 15 | 0 | 15 | 87 | 315 | 39 | 259 | 93 | 242 | 34 | 253 | 93 | 236 | 2 | 2 | 1 | 18 |
142,677 |
KieranWynn/pyquaternion
|
KieranWynn_pyquaternion/pyquaternion/test/test_quaternion.py
|
pyquaternion.test.test_quaternion.TestQuaternionRepresentation
|
class TestQuaternionRepresentation(unittest.TestCase):
def test_str(self):
a, b, c, d = randomElements()
q = Quaternion(a, b, c, d)
string = "{:.3f} {:+.3f}i {:+.3f}j {:+.3f}k".format(a, b, c, d)
self.assertEqual(string, str(q))
def test_format(self):
a, b, c, d = randomElements()
q = Quaternion(a, b, c, d)
for s in ['.3f', '+.14f', '.6e', 'g']:
individual_fmt = '{:' + s + '} {:' + s + '}i {:' + s + '}j {:' + s + '}k'
quaternion_fmt = '{:' + s + '}'
self.assertEqual(individual_fmt.format(a, b, c, d), quaternion_fmt.format(q))
def test_repr(self):
a, b, c, d = np.array(randomElements()) # Numpy seems to increase precision of floats (C magic?)
q = Quaternion(a, b, c, d)
string = "Quaternion(" + repr(a) + ", " + repr(b) + ", " + repr(c) + ", " + repr(d) + ")"
self.assertEqual(string, repr(q))
|
class TestQuaternionRepresentation(unittest.TestCase):
def test_str(self):
pass
def test_format(self):
pass
def test_repr(self):
pass
| 4 | 0 | 6 | 0 | 6 | 0 | 1 | 0.06 | 1 | 1 | 0 | 0 | 3 | 0 | 3 | 75 | 21 | 3 | 18 | 15 | 14 | 1 | 18 | 15 | 14 | 2 | 2 | 1 | 4 |
142,678 |
KieranWynn/pyquaternion
|
KieranWynn_pyquaternion/pyquaternion/test/test_quaternion.py
|
pyquaternion.test.test_quaternion.TestQuaternionTypeConversions
|
class TestQuaternionTypeConversions(unittest.TestCase):
def test_bool(self):
self.assertTrue(Quaternion())
self.assertFalse(Quaternion(scalar=0.0))
self.assertTrue(~Quaternion(scalar=0.0))
self.assertFalse(~Quaternion())
def test_float(self):
a, b, c, d = randomElements()
q = Quaternion(a, b, c, d)
self.assertEqual(float(q), a)
def test_int(self):
a, b, c, d = randomElements()
q = Quaternion(a, b, c, d)
self.assertEqual(int(q), int(a))
self.assertEqual(int(Quaternion(6.28)), 6)
self.assertEqual(int(Quaternion(6.78)), 6)
self.assertEqual(int(Quaternion(-4.87)), -4)
self.assertEqual(int(round(float(Quaternion(-4.87)))), -5)
def test_complex(self):
a, b, c, d = randomElements()
q = Quaternion(a, b, c, d)
self.assertEqual(complex(q), complex(a, b))
|
class TestQuaternionTypeConversions(unittest.TestCase):
def test_bool(self):
pass
def test_float(self):
pass
def test_int(self):
pass
def test_complex(self):
pass
| 5 | 0 | 5 | 0 | 5 | 0 | 1 | 0 | 1 | 3 | 0 | 0 | 4 | 0 | 4 | 76 | 26 | 4 | 22 | 11 | 17 | 0 | 22 | 11 | 17 | 1 | 2 | 0 | 4 |
142,679 |
KieranWynn/pyquaternion
|
KieranWynn_pyquaternion/pyquaternion/test/test_quaternion.py
|
pyquaternion.test.test_quaternion.TestQuaternionUtilities
|
class TestQuaternionUtilities(unittest.TestCase):
def test_copy(self):
from copy import copy
q = Quaternion.random()
q2 = copy(q)
self.assertEqual(q, q2)
self.assertFalse(q is q2)
self.assertTrue(all(q.q == q2.q))
def test_deep_copy(self):
from copy import deepcopy
q = Quaternion.random()
q2 = deepcopy(q)
self.assertEqual(q, q2)
self.assertFalse(q is q2)
self.assertFalse(q.q is q2.q)
|
class TestQuaternionUtilities(unittest.TestCase):
def test_copy(self):
pass
def test_deep_copy(self):
pass
| 3 | 0 | 7 | 0 | 7 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 2 | 0 | 2 | 74 | 16 | 1 | 15 | 9 | 10 | 0 | 15 | 9 | 10 | 1 | 2 | 0 | 2 |
142,680 |
KieranWynn/pyquaternion
|
KieranWynn_pyquaternion/pyquaternion/test/test_quaternion.py
|
pyquaternion.test.test_quaternion.TestQuaternionArithmetic
|
class TestQuaternionArithmetic(unittest.TestCase):
def test_equality(self):
r = randomElements()
self.assertEqual(Quaternion(*r), Quaternion(*r))
q = Quaternion(*r)
self.assertEqual(q, q)
# Equality should work with other types, if they can be interpreted as quaternions
self.assertEqual(q, r)
self.assertEqual(Quaternion(1., 0., 0., 0.), 1.0)
self.assertEqual(Quaternion(1., 0., 0., 0.), "1.0")
self.assertNotEqual(q, q + Quaternion(0.0, 0.002, 0.0, 0.0))
# Equality should also cover small rounding and floating point errors
self.assertEqual(Quaternion(1., 0., 0., 0.), Quaternion(1.0 - 1e-14, 0., 0., 0.))
self.assertNotEqual(Quaternion(1., 0., 0., 0.), Quaternion(1.0 - 1e-12, 0., 0., 0.))
self.assertNotEqual(Quaternion(160., 0., 0., 0.), Quaternion(160.0 - 1e-10, 0., 0., 0.))
self.assertNotEqual(Quaternion(1600., 0., 0., 0.), Quaternion(1600.0 - 1e-9, 0., 0., 0.))
with self.assertRaises(TypeError):
q == None
with self.assertRaises(ValueError):
q == 's'
def test_assignment(self):
a, b, c, d = randomElements()
q1 = Quaternion(a, b, c, d)
q2 = Quaternion(a, b*0.1, c+0.3, d)
self.assertNotEqual(q1, q2)
q2 = q1
self.assertEqual(q1, q2)
def test_unary_minus(self):
a, b, c, d = randomElements()
q = Quaternion(a, b, c, d)
self.assertEqual(-q, Quaternion(-a, -b, -c, -d))
def test_add(self):
r1 = randomElements()
r2 = randomElements()
r = random()
n = None
q1 = Quaternion(*r1)
q2 = Quaternion(*r2)
q3 = Quaternion(array= np.array(r1) + np.array(r2))
q4 = Quaternion(array= np.array(r2) + np.array([r, 0.0, 0.0, 0.0]))
self.assertEqual(q1 + q2, q3)
q1 += q2
self.assertEqual(q1, q3)
self.assertEqual(q2 + r, q4)
self.assertEqual(r + q2, q4)
with self.assertRaises(TypeError):
q1 += n
with self.assertRaises(TypeError):
n += q1
def test_subtract(self):
r1 = randomElements()
r2 = randomElements()
r = random()
n = None
q1 = Quaternion(*r1)
q2 = Quaternion(*r2)
q3 = Quaternion(array= np.array(r1) - np.array(r2))
q4 = Quaternion(array= np.array(r2) - np.array([r, 0.0, 0.0, 0.0]))
self.assertEqual(q1 - q2, q3)
q1 -= q2
self.assertEqual(q1, q3)
self.assertEqual(q2 - r, q4)
self.assertEqual(r - q2, -q4)
with self.assertRaises(TypeError):
q1 -= n
with self.assertRaises(TypeError):
n -= q1
def test_multiplication_of_bases(self):
one = Quaternion(1.0, 0.0, 0.0, 0.0)
i = Quaternion(0.0, 1.0, 0.0, 0.0)
j = Quaternion(0.0, 0.0, 1.0, 0.0)
k = Quaternion(0.0, 0.0, 0.0, 1.0)
self.assertEqual(i * i, j * j)
self.assertEqual(j * j, k * k)
self.assertEqual(k * k, i * j * k)
self.assertEqual(i * j * k, -one)
self.assertEqual(i * j, k)
self.assertEqual(i * i, -one)
self.assertEqual(i * k, -j)
self.assertEqual(j * i, -k)
self.assertEqual(j * j, -one)
self.assertEqual(j * k, i)
self.assertEqual(k * i, j)
self.assertEqual(k * j, -i)
self.assertEqual(k * k, -one)
self.assertEqual(i * j * k, -one)
def test_multiply_by_scalar(self):
a, b, c, d = randomElements()
q1 = Quaternion(a, b, c, d)
for s in [30.0, 0.3, -2, -4.7, 0]:
q2 = Quaternion(s*a, s*b, s*c, s*d)
q3 = q1
self.assertEqual(q1 * s, q2) # post-multiply by scalar
self.assertEqual(s * q1, q2) # pre-multiply by scalar
q3 *= repr(s)
self.assertEqual(q3, q2)
def test_multiply_incorrect_type(self):
q = Quaternion()
with self.assertRaises(TypeError):
a = q * None
with self.assertRaises(ValueError):
b = q * [1, 1, 1, 1, 1]
with self.assertRaises(ValueError):
c = q * np.array([[1, 2, 3], [4, 5, 6]])
with self.assertRaises(ValueError):
d = q * 's'
def test_divide(self):
r = randomElements()
q = Quaternion(*r)
if q:
self.assertEqual(q / q, Quaternion())
self.assertEqual(q / r, Quaternion())
else:
with self.assertRaises(ZeroDivisionError):
q / q
with self.assertRaises(ZeroDivisionError):
q / Quaternion(0.0)
with self.assertRaises(TypeError):
q / None
with self.assertRaises(ValueError):
q / [1, 1, 1, 1, 1]
with self.assertRaises(ValueError):
q / np.array([[1, 2, 3], [4, 5, 6]])
with self.assertRaises(ValueError):
q / 's'
def test_division_of_bases(self):
one = Quaternion(1.0, 0.0, 0.0, 0.0)
i = Quaternion(0.0, 1.0, 0.0, 0.0)
j = Quaternion(0.0, 0.0, 1.0, 0.0)
k = Quaternion(0.0, 0.0, 0.0, 1.0)
self.assertEqual(i / i, j / j)
self.assertEqual(j / j, k / k)
self.assertEqual(k / k, one)
self.assertEqual(k / -k, -one)
self.assertEqual(i / j, -k)
self.assertEqual(i / i, one)
self.assertEqual(i / k, j)
self.assertEqual(j / i, k)
self.assertEqual(j / j, one)
self.assertEqual(j / k, -i)
self.assertEqual(k / i, -j)
self.assertEqual(k / j, i)
self.assertEqual(k / k, one)
self.assertEqual(i / -j, k)
def test_divide_by_scalar(self):
a, b, c, d = randomElements()
q1 = Quaternion(a, b, c, d)
for s in [30.0, 0.3, -2, -4.7]:
q2 = Quaternion(a/s, b/s, c/s, d/s)
q3 = q1
self.assertEqual(q1 / s, q2)
if q1:
self.assertEqual(s / q1, q2.inverse)
else:
with self.assertRaises(ZeroDivisionError):
s / q1
q3 /= repr(s)
self.assertEqual(q3, q2)
with self.assertRaises(ZeroDivisionError):
q4 = q1 / 0.0
with self.assertRaises(TypeError):
q4 = q1 / None
with self.assertRaises(ValueError):
q4 = q1 / 's'
def test_squared(self):
one = Quaternion(1.0, 0.0, 0.0, 0.0)
i = Quaternion(0.0, 1.0, 0.0, 0.0)
j = Quaternion(0.0, 0.0, 1.0, 0.0)
k = Quaternion(0.0, 0.0, 0.0, 1.0)
self.assertEqual(i**2, j**2)
self.assertEqual(j**2, k**2)
self.assertEqual(k**2, -one)
def test_power(self):
q1 = Quaternion.random()
q2 = Quaternion(q1)
self.assertEqual(q1 ** 0, Quaternion())
self.assertEqual(q1 ** 1, q1)
q2 **= 4
self.assertEqual(q2, q1 * q1 * q1 * q1)
self.assertEqual((q1 ** 0.5) * (q1 ** 0.5), q1)
self.assertEqual(q1 ** -1, q1.inverse)
self.assertEqual(4 ** Quaternion(2), Quaternion(16))
with self.assertRaises(TypeError):
q1 ** None
with self.assertRaises(ValueError):
q1 ** 's'
q3 = Quaternion()
self.assertEqual(q3 ** 0.5, q3) # Identity behaves as an identity
self.assertEqual(q3 ** 5, q3)
self.assertEqual(q3 ** 3.4, q3)
q4 = Quaternion(scalar=5) # real number behaves as any other real number would
self.assertEqual(q4 ** 4, Quaternion(scalar=5 ** 4))
def test_distributive(self):
q1 = Quaternion.random()
q2 = Quaternion.random()
q3 = Quaternion.random()
self.assertEqual(q1 * ( q2 + q3 ), q1 * q2 + q1 * q3)
def test_noncommutative(self):
q1 = Quaternion.random()
q2 = Quaternion.random()
if not q1 == q2: # Small chance of this happening with random initialisation
self.assertNotEqual(q1 * q2, q2 * q1)
|
class TestQuaternionArithmetic(unittest.TestCase):
def test_equality(self):
pass
def test_assignment(self):
pass
def test_unary_minus(self):
pass
def test_add(self):
pass
def test_subtract(self):
pass
def test_multiplication_of_bases(self):
pass
def test_multiply_by_scalar(self):
pass
def test_multiply_incorrect_type(self):
pass
def test_divide(self):
pass
def test_division_of_bases(self):
pass
def test_divide_by_scalar(self):
pass
def test_squared(self):
pass
def test_power(self):
pass
def test_distributive(self):
pass
def test_noncommutative(self):
pass
| 16 | 0 | 14 | 1 | 13 | 0 | 1 | 0.04 | 1 | 3 | 0 | 0 | 15 | 0 | 15 | 87 | 231 | 29 | 200 | 78 | 184 | 7 | 198 | 78 | 182 | 3 | 2 | 3 | 20 |
142,681 |
KimiNewt/pyshark
|
src/pyshark/capture/pipe_capture.py
|
pyshark.capture.pipe_capture.PipeCapture
|
class PipeCapture(Capture):
def __init__(self, pipe, display_filter=None, only_summaries=False,
decryption_key=None, encryption_type='wpa-pwk', decode_as=None,
disable_protocol=None, tshark_path=None, override_prefs=None, use_json=False,
use_ek=False, include_raw=False, eventloop=None, custom_parameters=None, debug=False):
"""Receives a file-like and reads the packets from there (pcap format).
:param bpf_filter: BPF filter to use on packets.
:param display_filter: Display (wireshark) filter to use.
:param only_summaries: Only produce packet summaries, much faster but includes very little information
:param decryption_key: Key used to encrypt and decrypt captured traffic.
:param encryption_type: Standard of encryption used in captured traffic (must be either 'WEP', 'WPA-PWD',
or 'WPA-PWK'. Defaults to WPA-PWK).
:param decode_as: A dictionary of {decode_criterion_string: decode_as_protocol} that are used to tell tshark
to decode protocols in situations it wouldn't usually, for instance {'tcp.port==8888': 'http'} would make
it attempt to decode any port 8888 traffic as HTTP. See tshark documentation for details.
:param tshark_path: Path of the tshark binary
:param override_prefs: A dictionary of tshark preferences to override, {PREFERENCE_NAME: PREFERENCE_VALUE, ...}.
:param disable_protocol: Tells tshark to remove a dissector for a specifc protocol.
:param custom_parameters: A dict of custom parameters to pass to tshark, i.e. {"--param": "value"}
or else a list of parameters in the format ["--foo", "bar", "--baz", "foo"].
"""
super(PipeCapture, self).__init__(display_filter=display_filter,
only_summaries=only_summaries,
decryption_key=decryption_key,
encryption_type=encryption_type,
decode_as=decode_as, disable_protocol=disable_protocol,
tshark_path=tshark_path, override_prefs=override_prefs,
use_json=use_json, use_ek=use_ek, include_raw=include_raw, eventloop=eventloop,
custom_parameters=custom_parameters, debug=debug)
self._pipe = pipe
def get_parameters(self, packet_count=None):
"""
Returns the special tshark parameters to be used according to the configuration of this class.
"""
params = super(PipeCapture, self).get_parameters(packet_count=packet_count)
params += ['-r', '-']
return params
async def _get_tshark_process(self, packet_count=None):
return await super(PipeCapture, self)._get_tshark_process(packet_count=packet_count, stdin=self._pipe)
def close(self):
# Close pipe
os.close(self._pipe)
super(PipeCapture, self).close()
def sniff_continuously(self, packet_count=None):
"""
Captures from the set interface, returning a generator which returns packets continuously.
Can be used as follows:
for packet in capture.sniff_continuously();
print 'Woo, another packet:', packet
Note: you can also call capture.apply_on_packets(packet_callback) which should have a slight performance boost.
:param packet_count: an amount of packets to capture, then stop.
"""
# Retained for backwards compatibility and to add documentation.
return self._packets_from_tshark_sync(packet_count=packet_count)
|
class PipeCapture(Capture):
def __init__(self, pipe, display_filter=None, only_summaries=False,
decryption_key=None, encryption_type='wpa-pwk', decode_as=None,
disable_protocol=None, tshark_path=None, override_prefs=None, use_json=False,
use_ek=False, include_raw=False, eventloop=None, custom_parameters=None, debug=False):
'''Receives a file-like and reads the packets from there (pcap format).
:param bpf_filter: BPF filter to use on packets.
:param display_filter: Display (wireshark) filter to use.
:param only_summaries: Only produce packet summaries, much faster but includes very little information
:param decryption_key: Key used to encrypt and decrypt captured traffic.
:param encryption_type: Standard of encryption used in captured traffic (must be either 'WEP', 'WPA-PWD',
or 'WPA-PWK'. Defaults to WPA-PWK).
:param decode_as: A dictionary of {decode_criterion_string: decode_as_protocol} that are used to tell tshark
to decode protocols in situations it wouldn't usually, for instance {'tcp.port==8888': 'http'} would make
it attempt to decode any port 8888 traffic as HTTP. See tshark documentation for details.
:param tshark_path: Path of the tshark binary
:param override_prefs: A dictionary of tshark preferences to override, {PREFERENCE_NAME: PREFERENCE_VALUE, ...}.
:param disable_protocol: Tells tshark to remove a dissector for a specifc protocol.
:param custom_parameters: A dict of custom parameters to pass to tshark, i.e. {"--param": "value"}
or else a list of parameters in the format ["--foo", "bar", "--baz", "foo"].
'''
pass
def get_parameters(self, packet_count=None):
'''
Returns the special tshark parameters to be used according to the configuration of this class.
'''
pass
async def _get_tshark_process(self, packet_count=None):
pass
def close(self):
pass
def sniff_continuously(self, packet_count=None):
'''
Captures from the set interface, returning a generator which returns packets continuously.
Can be used as follows:
for packet in capture.sniff_continuously();
print 'Woo, another packet:', packet
Note: you can also call capture.apply_on_packets(packet_callback) which should have a slight performance boost.
:param packet_count: an amount of packets to capture, then stop.
'''
pass
| 6 | 3 | 11 | 1 | 5 | 6 | 1 | 1.16 | 1 | 1 | 0 | 0 | 5 | 1 | 5 | 38 | 62 | 8 | 25 | 11 | 16 | 29 | 15 | 8 | 9 | 1 | 1 | 0 | 5 |
142,682 |
KimiNewt/pyshark
|
src/pyshark/ek_field_mapping.py
|
pyshark.ek_field_mapping._EkFieldMapping
|
class _EkFieldMapping:
def __init__(self):
self._protocol_to_mapping = {}
def load_mapping(self, tshark_version, tshark_path=None):
if self._protocol_to_mapping:
return
mapping_cache_file = cache.get_cache_dir(tshark_version).joinpath(_MAPPING_CACHE_NAME)
if mapping_cache_file.exists():
self._protocol_to_mapping = json.load(mapping_cache_file.open())
else:
self._protocol_to_mapping = tshark.get_ek_field_mapping(tshark_path=tshark_path)
mapping_cache_file.open("w").write(json.dumps(self._protocol_to_mapping))
def cast_field_value(self, protocol, field_name, field_value):
"""Casts the field value to its proper type according to the mapping"""
if isinstance(field_value, list):
return [self.cast_field_value(protocol, field_name, item) for item in field_value]
if not isinstance(field_value, str):
return field_value
field_type = self.get_field_type(protocol, field_name)
if field_type == str:
return field_value
if field_type == int and field_value.startswith("0x"):
return int(field_value, 16)
if field_type == bytes:
try:
return binascii.unhexlify(field_value.replace(":", ""))
except binascii.Error:
return field_value
try:
return field_type(field_value)
except ValueError:
return field_value
def get_field_type(self, protocol, field_name):
"""Gets the Python type for the given field (only for EK fields).
If we are unfamiliar with the type, str will be returned.
"""
if not self._protocol_to_mapping:
raise ProtocolMappingNotInitialized("Protocol mapping not initialized. Call load_mapping() first")
if protocol not in self._protocol_to_mapping:
raise FieldNotFound(f"Type mapping for protocol {protocol} not found")
fields = self._protocol_to_mapping[protocol]["properties"]
if field_name not in fields:
return str
return self._get_python_type_for_field_type(fields[field_name]["type"])
def clear(self):
self._protocol_to_mapping.clear()
@classmethod
def _get_python_type_for_field_type(cls, field_type):
if field_type in ("integer", "long", "short"):
return int
if field_type == "float":
return float
if field_type == "date":
# We don't use datetime.datetime because these can be timedeltas as well.
# Better let the user decide.
return float
if field_type == "byte":
return bytes
# Other known types are IP. Retain as str
return str
|
class _EkFieldMapping:
def __init__(self):
pass
def load_mapping(self, tshark_version, tshark_path=None):
pass
def cast_field_value(self, protocol, field_name, field_value):
'''Casts the field value to its proper type according to the mapping'''
pass
def get_field_type(self, protocol, field_name):
'''Gets the Python type for the given field (only for EK fields).
If we are unfamiliar with the type, str will be returned.
'''
pass
def clear(self):
pass
@classmethod
def _get_python_type_for_field_type(cls, field_type):
pass
| 8 | 2 | 10 | 1 | 9 | 1 | 4 | 0.13 | 0 | 9 | 2 | 0 | 5 | 1 | 6 | 6 | 70 | 10 | 53 | 12 | 45 | 7 | 51 | 11 | 44 | 8 | 0 | 2 | 22 |
142,683 |
KimiNewt/pyshark
|
src/pyshark/packet/common.py
|
pyshark.packet.common.SlotsPickleable
|
class SlotsPickleable(object):
__slots__ = []
def __getstate__(self):
ret = {}
for slot in self.__slots__:
ret[slot] = getattr(self, slot)
return ret
def __setstate__(self, data):
for key, val in data.items():
setattr(self, key, val)
|
class SlotsPickleable(object):
def __getstate__(self):
pass
def __setstate__(self, data):
pass
| 3 | 0 | 4 | 0 | 4 | 0 | 2 | 0 | 1 | 0 | 0 | 2 | 2 | 0 | 2 | 2 | 12 | 2 | 10 | 7 | 7 | 0 | 10 | 7 | 7 | 2 | 1 | 1 | 4 |
142,684 |
KimiNewt/pyshark
|
src/pyshark/capture/capture.py
|
pyshark.capture.capture.UnknownEncryptionStandardException
|
class UnknownEncryptionStandardException(Exception):
pass
|
class UnknownEncryptionStandardException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
142,685 |
KimiNewt/pyshark
|
src/pyshark/capture/capture.py
|
pyshark.capture.capture.TSharkCrashException
|
class TSharkCrashException(Exception):
pass
|
class TSharkCrashException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
142,686 |
KimiNewt/pyshark
|
src/pyshark/capture/capture.py
|
pyshark.capture.capture.StopCapture
|
class StopCapture(Exception):
"""Exception that the user can throw anywhere in packet-handling to stop the capture process."""
pass
|
class StopCapture(Exception):
'''Exception that the user can throw anywhere in packet-handling to stop the capture process.'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 3 | 0 | 2 | 1 | 1 | 1 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
142,687 |
KimiNewt/pyshark
|
src/pyshark/capture/capture.py
|
pyshark.capture.capture.RawMustUseJsonException
|
class RawMustUseJsonException(Exception):
"""If the use_raw argument is True, so should the use_json argument"""
|
class RawMustUseJsonException(Exception):
'''If the use_raw argument is True, so should the use_json argument'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 2 | 0 | 1 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 3 | 0 | 0 |
142,688 |
KimiNewt/pyshark
|
src/pyshark/capture/capture.py
|
pyshark.capture.capture.Capture
|
class Capture:
"""Base class for packet captures."""
SUMMARIES_BATCH_SIZE = 64
DEFAULT_LOG_LEVEL = logging.CRITICAL
SUPPORTED_ENCRYPTION_STANDARDS = ["wep", "wpa-pwk", "wpa-pwd", "wpa-psk"]
def __init__(self, display_filter=None, only_summaries=False, eventloop=None,
decryption_key=None, encryption_type="wpa-pwd", output_file=None,
decode_as=None, disable_protocol=None, tshark_path=None,
override_prefs=None, capture_filter=None, use_json=False, include_raw=False,
use_ek=False, custom_parameters=None, debug=False):
self.loaded = False
self.tshark_path = tshark_path
self._override_prefs = override_prefs
self.debug = debug
self.use_json = use_json
self._use_ek = use_ek
self.include_raw = include_raw
self._packets = []
self._current_packet = 0
self._display_filter = display_filter
self._capture_filter = capture_filter
self._only_summaries = only_summaries
self._output_file = output_file
self._running_processes = set()
self._decode_as = decode_as
self._disable_protocol = disable_protocol
self._log = logging.Logger(
self.__class__.__name__, level=self.DEFAULT_LOG_LEVEL)
self._closed = False
self._custom_parameters = custom_parameters
self._eof_reached = False
self._last_error_line = None
self._stderr_handling_tasks = []
self.__tshark_version = None
if include_raw and not (use_json or use_ek):
raise RawMustUseJsonException(
"use_json/use_ek must be True if include_raw")
if self.debug:
self.set_debug()
self.eventloop = eventloop
if self.eventloop is None:
self._setup_eventloop()
if encryption_type and encryption_type.lower() in self.SUPPORTED_ENCRYPTION_STANDARDS:
self.encryption = (decryption_key, encryption_type.lower())
else:
standards = ", ".join(self.SUPPORTED_ENCRYPTION_STANDARDS)
raise UnknownEncryptionStandardException(f"Only the following standards are supported: {standards}.")
def __getitem__(self, item):
"""Gets the packet in the given index.
:param item: packet index
:return: Packet object.
"""
return self._packets[item]
def __len__(self):
return len(self._packets)
def next(self) -> Packet:
return self.next_packet()
# Allows for child classes to call next() from super() without 2to3 "fixing"
# the call
def next_packet(self) -> Packet:
if self._current_packet >= len(self._packets):
raise StopIteration()
cur_packet = self._packets[self._current_packet]
self._current_packet += 1
return cur_packet
def clear(self):
"""Empties the capture of any saved packets."""
self._packets = []
self._current_packet = 0
def reset(self):
"""Starts iterating packets from the first one."""
self._current_packet = 0
def load_packets(self, packet_count=0, timeout=None):
"""Reads the packets from the source (cap, interface, etc.) and adds it to the internal list.
If 0 as the packet_count is given, reads forever
:param packet_count: The amount of packets to add to the packet list (0 to read forever)
:param timeout: If given, automatically stops after a given amount of time.
"""
initial_packet_amount = len(self._packets)
def keep_packet(pkt):
self._packets.append(pkt)
if packet_count != 0 and len(self._packets) - initial_packet_amount >= packet_count:
raise StopCapture()
try:
self.apply_on_packets(
keep_packet, timeout=timeout, packet_count=packet_count)
self.loaded = True
except asyncTimeoutError:
pass
def set_debug(self, set_to=True, log_level=logging.DEBUG):
"""Sets the capture to debug mode (or turns it off if specified)."""
if set_to:
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"))
self._log.addHandler(handler)
self._log.level = log_level
self.debug = set_to
def _verify_capture_parameters(self):
"""Optionally verify that the capture's parameters are valid.
Should raise an exception if they are not valid.
"""
pass
def _setup_eventloop(self):
"""Sets up a new eventloop as the current one according to the OS."""
if os.name == "nt":
current_eventloop = asyncio.get_event_loop_policy().get_event_loop()
if isinstance(current_eventloop, asyncio.ProactorEventLoop):
self.eventloop = current_eventloop
else:
# On Python before 3.8, Proactor is not the default eventloop type, so we have to create a new one.
# If there was an existing eventloop this can create issues, since we effectively disable it here.
if asyncio.all_tasks():
warnings.warn("The running eventloop has tasks but pyshark must set a new eventloop to continue. "
"Existing tasks may not run.")
self.eventloop = asyncio.ProactorEventLoop()
asyncio.set_event_loop(self.eventloop)
else:
try:
self.eventloop = asyncio.get_event_loop_policy().get_event_loop()
except RuntimeError:
if threading.current_thread() != threading.main_thread():
# Ran not in main thread, make a new eventloop
self.eventloop = asyncio.new_event_loop()
asyncio.set_event_loop(self.eventloop)
else:
raise
if os.name == "posix" and isinstance(threading.current_thread(), threading._MainThread):
# The default child watchers (ThreadedChildWatcher) attach_loop method is empty!
# While using pyshark with ThreadedChildWatcher, asyncio could raise a ChildProcessError
# "Unknown child process pid %d, will report returncode 255"
# This led to a TSharkCrashException in _cleanup_subprocess.
# Using the SafeChildWatcher fixes this issue, but it is slower.
# SafeChildWatcher O(n) -> large numbers of processes are slow
# ThreadedChildWatcher O(1) -> independent of process number
# asyncio.get_child_watcher().attach_loop(self.eventloop)
asyncio.set_child_watcher(asyncio.SafeChildWatcher())
asyncio.get_child_watcher().attach_loop(self.eventloop)
def _packets_from_tshark_sync(self, packet_count=None, existing_process=None):
"""Returns a generator of packets.
This is the sync version of packets_from_tshark. It wait for the completion of each coroutine and
reimplements reading packets in a sync way, yielding each packet as it arrives.
:param packet_count: If given, stops after this amount of packets is captured.
"""
# NOTE: This has code duplication with the async version, think about how to solve this
tshark_process = existing_process or self.eventloop.run_until_complete(
self._get_tshark_process())
parser = self._setup_tshark_output_parser()
packets_captured = 0
data = b""
try:
while True:
try:
packet, data = self.eventloop.run_until_complete(
parser.get_packets_from_stream(tshark_process.stdout, data,
got_first_packet=packets_captured > 0))
except EOFError:
self._log.debug("EOF reached (sync)")
self._eof_reached = True
break
if packet:
packets_captured += 1
yield packet
if packet_count and packets_captured >= packet_count:
break
finally:
if tshark_process in self._running_processes:
self.eventloop.run_until_complete(
self._cleanup_subprocess(tshark_process))
def apply_on_packets(self, callback, timeout=None, packet_count=None):
"""Runs through all packets and calls the given callback (a function) with each one as it is read.
If the capture is infinite (i.e. a live capture), it will run forever, otherwise it will complete after all
packets have been read.
Example usage:
def print_callback(pkt):
print(pkt)
capture.apply_on_packets(print_callback)
If a timeout is given, raises a Timeout error if not complete before the timeout (in seconds)
"""
coro = self.packets_from_tshark(callback, packet_count=packet_count)
if timeout is not None:
coro = asyncio.wait_for(coro, timeout)
return self.eventloop.run_until_complete(coro)
async def packets_from_tshark(self, packet_callback, packet_count=None, close_tshark=True):
"""
A coroutine which creates a tshark process, runs the given callback on each packet that is received from it and
closes the process when it is done.
Do not use interactively. Can be used in order to insert packets into your own eventloop.
"""
tshark_process = await self._get_tshark_process(packet_count=packet_count)
try:
await self._go_through_packets_from_fd(tshark_process.stdout, packet_callback, packet_count=packet_count)
except StopCapture:
pass
finally:
if close_tshark:
await self.close_async()
async def _go_through_packets_from_fd(self, fd, packet_callback, packet_count=None):
"""A coroutine which goes through a stream and calls a given callback for each XML packet seen in it."""
packets_captured = 0
self._log.debug("Starting to go through packets")
parser = self._setup_tshark_output_parser()
data = b""
while True:
try:
packet, data = await parser.get_packets_from_stream(fd, data,
got_first_packet=packets_captured > 0)
except EOFError:
self._log.debug("EOF reached")
self._eof_reached = True
break
if packet:
packets_captured += 1
try:
if inspect.iscoroutinefunction(packet_callback):
await packet_callback(packet)
else:
packet_callback(packet)
except StopCapture:
self._log.debug("User-initiated capture stop in callback")
break
if packet_count and packets_captured >= packet_count:
break
def _create_stderr_handling_task(self, stderr):
self._stderr_handling_tasks.append(asyncio.ensure_future(self._handle_process_stderr_forever(stderr)))
async def _handle_process_stderr_forever(self, stderr):
while True:
stderr_line = await stderr.readline()
if not stderr_line:
break
stderr_line = stderr_line.decode().strip()
self._last_error_line = stderr_line
self._log.debug(stderr_line)
def _get_tshark_path(self):
return get_process_path(self.tshark_path)
def _get_tshark_version(self):
if self.__tshark_version is None:
self.__tshark_version = get_tshark_version(self.tshark_path)
return self.__tshark_version
async def _get_tshark_process(self, packet_count=None, stdin=None):
"""Returns a new tshark process with previously-set parameters."""
self._verify_capture_parameters()
output_parameters = []
if self.use_json or self._use_ek:
if not tshark_supports_json(self._get_tshark_version()):
raise TSharkVersionException(
"JSON only supported on Wireshark >= 2.2.0")
if self.use_json:
output_type = "json"
if tshark_supports_duplicate_keys(self._get_tshark_version()):
output_parameters.append("--no-duplicate-keys")
elif self._use_ek:
output_type = "ek"
else:
output_type = "psml" if self._only_summaries else "pdml"
parameters = [self._get_tshark_path(), "-l", "-n", "-T", output_type] + \
self.get_parameters(packet_count=packet_count) + output_parameters
self._log.debug(
"Creating TShark subprocess with parameters: " + " ".join(parameters))
self._log.debug("Executable: %s", parameters[0])
tshark_process = await asyncio.create_subprocess_exec(*parameters,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=stdin)
self._create_stderr_handling_task(tshark_process.stderr)
self._created_new_process(parameters, tshark_process)
return tshark_process
def _created_new_process(self, parameters, process, process_name="TShark"):
self._log.debug(
process_name + f" subprocess (pid {process.pid}) created")
if process.returncode is not None and process.returncode != 0:
raise TSharkCrashException(
f"{process_name} seems to have crashed. Try updating it. (command ran: '{' '.join(parameters)}')")
self._running_processes.add(process)
async def _cleanup_subprocess(self, process):
"""Kill the given process and properly closes any pipes connected to it."""
self._log.debug(f"Cleanup Subprocess (pid {process.pid})")
if process.returncode is None:
try:
process.kill()
return await asyncio.wait_for(process.wait(), 1)
except asyncTimeoutError:
self._log.debug(
"Waiting for process to close failed, may have zombie process.")
except ProcessLookupError:
pass
except OSError:
if os.name != "nt":
raise
elif process.returncode > 0:
if process.returncode != 1 or self._eof_reached:
raise TSharkCrashException(f"TShark (pid {process.pid}) seems to have crashed (retcode: {process.returncode}).\n"
f"Last error line: {self._last_error_line}\n"
"Try rerunning in debug mode [ capture_obj.set_debug() ] or try updating tshark.")
def _setup_tshark_output_parser(self):
if self.use_json:
return tshark_json.TsharkJsonParser(self._get_tshark_version())
if self._use_ek:
ek_field_mapping.MAPPING.load_mapping(str(self._get_tshark_version()),
tshark_path=self.tshark_path)
return tshark_ek.TsharkEkJsonParser()
return tshark_xml.TsharkXmlParser(parse_summaries=self._only_summaries)
def close(self):
self.eventloop.run_until_complete(self.close_async())
async def close_async(self):
for process in self._running_processes.copy():
await self._cleanup_subprocess(process)
self._running_processes.clear()
# Wait for all stderr handling to finish
for task in self._stderr_handling_tasks:
task.cancel()
with contextlib.suppress(asyncio.CancelledError):
await task
def __del__(self):
if self._running_processes:
self.close()
def __enter__(self): return self
async def __aenter__(self): return self
def __exit__(self, exc_type, exc_val, exc_tb): self.close()
async def __aexit__(self, exc_type, exc_val,
exc_tb): await self.close_async()
def get_parameters(self, packet_count=None):
"""Returns the special tshark parameters to be used according to the configuration of this class."""
params = []
if self._capture_filter:
params += ["-f", self._capture_filter]
if self._display_filter:
params += [get_tshark_display_filter_flag(self._get_tshark_version(),),
self._display_filter]
# Raw is only enabled when JSON is also enabled.
if self.include_raw:
params += ["-x"]
if packet_count:
params += ["-c", str(packet_count)]
if self._custom_parameters:
if isinstance(self._custom_parameters, list):
params += self._custom_parameters
elif isinstance(self._custom_parameters, dict):
for key, val in self._custom_parameters.items():
params += [key, val]
else:
raise TypeError("Custom parameters type not supported.")
if all(self.encryption):
params += ["-o", "wlan.enable_decryption:TRUE", "-o", 'uat:80211_keys:"' + self.encryption[1] + '","' +
self.encryption[0] + '"']
if self._override_prefs:
for preference_name, preference_value in self._override_prefs.items():
if all(self.encryption) and preference_name in ("wlan.enable_decryption", "uat:80211_keys"):
continue # skip if override preferences also given via --encryption options
params += ["-o", f"{preference_name}:{preference_value}"]
if self._output_file:
params += ["-w", self._output_file]
if self._decode_as:
for criterion, decode_as_proto in self._decode_as.items():
params += ["-d",
",".join([criterion.strip(), decode_as_proto.strip()])]
if self._disable_protocol:
params += ["--disable-protocol", self._disable_protocol.strip()]
return params
def __iter__(self):
if self.loaded:
return iter(self._packets)
else:
return self._packets_from_tshark_sync()
def __repr__(self):
return f"<{self.__class__.__name__} ({len(self._packets)} packets)>"
|
class Capture:
'''Base class for packet captures.'''
def __init__(self, display_filter=None, only_summaries=False, eventloop=None,
decryption_key=None, encryption_type="wpa-pwd", output_file=None,
decode_as=None, disable_protocol=None, tshark_path=None,
override_prefs=None, capture_filter=None, use_json=False, include_raw=False,
use_ek=False, custom_parameters=None, debug=False):
pass
def __getitem__(self, item):
'''Gets the packet in the given index.
:param item: packet index
:return: Packet object.
'''
pass
def __len__(self):
pass
def next(self) -> Packet:
pass
def next_packet(self) -> Packet:
pass
def clear(self):
'''Empties the capture of any saved packets.'''
pass
def reset(self):
'''Starts iterating packets from the first one.'''
pass
def load_packets(self, packet_count=0, timeout=None):
'''Reads the packets from the source (cap, interface, etc.) and adds it to the internal list.
If 0 as the packet_count is given, reads forever
:param packet_count: The amount of packets to add to the packet list (0 to read forever)
:param timeout: If given, automatically stops after a given amount of time.
'''
pass
def keep_packet(pkt):
pass
def set_debug(self, set_to=True, log_level=logging.DEBUG):
'''Sets the capture to debug mode (or turns it off if specified).'''
pass
def _verify_capture_parameters(self):
'''Optionally verify that the capture's parameters are valid.
Should raise an exception if they are not valid.
'''
pass
def _setup_eventloop(self):
'''Sets up a new eventloop as the current one according to the OS.'''
pass
def _packets_from_tshark_sync(self, packet_count=None, existing_process=None):
'''Returns a generator of packets.
This is the sync version of packets_from_tshark. It wait for the completion of each coroutine and
reimplements reading packets in a sync way, yielding each packet as it arrives.
:param packet_count: If given, stops after this amount of packets is captured.
'''
pass
def apply_on_packets(self, callback, timeout=None, packet_count=None):
'''Runs through all packets and calls the given callback (a function) with each one as it is read.
If the capture is infinite (i.e. a live capture), it will run forever, otherwise it will complete after all
packets have been read.
Example usage:
def print_callback(pkt):
print(pkt)
capture.apply_on_packets(print_callback)
If a timeout is given, raises a Timeout error if not complete before the timeout (in seconds)
'''
pass
async def packets_from_tshark(self, packet_callback, packet_count=None, close_tshark=True):
'''
A coroutine which creates a tshark process, runs the given callback on each packet that is received from it and
closes the process when it is done.
Do not use interactively. Can be used in order to insert packets into your own eventloop.
'''
pass
async def _go_through_packets_from_fd(self, fd, packet_callback, packet_count=None):
'''A coroutine which goes through a stream and calls a given callback for each XML packet seen in it.'''
pass
def _create_stderr_handling_task(self, stderr):
pass
async def _handle_process_stderr_forever(self, stderr):
pass
def _get_tshark_path(self):
pass
def _get_tshark_version(self):
pass
async def _get_tshark_process(self, packet_count=None, stdin=None):
'''Returns a new tshark process with previously-set parameters.'''
pass
def _created_new_process(self, parameters, process, process_name="TShark"):
pass
async def _cleanup_subprocess(self, process):
'''Kill the given process and properly closes any pipes connected to it.'''
pass
def _setup_tshark_output_parser(self):
pass
def close(self):
pass
async def close_async(self):
pass
def __del__(self):
pass
def __enter__(self):
pass
async def __aenter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
async def __aexit__(self, exc_type, exc_val,
exc_tb):
pass
def get_parameters(self, packet_count=None):
'''Returns the special tshark parameters to be used according to the configuration of this class.'''
pass
def __iter__(self):
pass
def __repr__(self):
pass
| 35 | 15 | 12 | 1 | 9 | 2 | 3 | 0.18 | 0 | 24 | 9 | 4 | 33 | 25 | 33 | 33 | 431 | 65 | 310 | 95 | 274 | 57 | 271 | 90 | 236 | 17 | 0 | 4 | 101 |
142,689 |
KimiNewt/pyshark
|
src/pyshark/__init__.py
|
pyshark.UnsupportedVersionException
|
class UnsupportedVersionException(Exception):
pass
|
class UnsupportedVersionException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
142,690 |
KimiNewt/pyshark
|
src/pyshark/packet/common.py
|
pyshark.packet.common.Pickleable
|
class Pickleable(object):
"""
Base class that implements getstate/setstate, since most of the classes are overriding getattr.
"""
def __getstate__(self):
return self.__dict__
def __setstate__(self, data):
self.__dict__.update(data)
|
class Pickleable(object):
'''
Base class that implements getstate/setstate, since most of the classes are overriding getattr.
'''
def __getstate__(self):
pass
def __setstate__(self, data):
pass
| 3 | 1 | 2 | 0 | 2 | 0 | 1 | 0.6 | 1 | 0 | 0 | 2 | 2 | 0 | 2 | 2 | 10 | 2 | 5 | 3 | 2 | 3 | 5 | 3 | 2 | 1 | 1 | 0 | 2 |
142,691 |
KimiNewt/pyshark
|
src/pyshark/tshark/tshark.py
|
pyshark.tshark.tshark.TSharkVersionException
|
class TSharkVersionException(Exception):
pass
|
class TSharkVersionException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
142,692 |
KimiNewt/pyshark
|
src/pyshark/tshark/output_parser/tshark_xml.py
|
pyshark.tshark.output_parser.tshark_xml.TsharkXmlParser
|
class TsharkXmlParser(BaseTsharkOutputParser):
SUMMARIES_BATCH_SIZE = 64
def __init__(self, parse_summaries=False):
super().__init__()
self._parse_summaries = parse_summaries
self._psml_structure = None
async def get_packets_from_stream(self, stream, existing_data, got_first_packet=True):
if self._parse_summaries and self._psml_structure is None:
existing_data = await self._get_psml_struct(stream)
return await super().get_packets_from_stream(stream, existing_data, got_first_packet=got_first_packet)
def _parse_single_packet(self, packet):
return packet_from_xml_packet(packet, psml_structure=self._psml_structure)
def _extract_packet_from_data(self, data, got_first_packet=True):
"""Gets data containing a (part of) tshark xml.
If the given tag is found in it, returns the tag data and the remaining data.
Otherwise returns None and the same data.
:param data: string of a partial tshark xml.
:return: a tuple of (tag, data). tag will be None if none is found.
"""
return _extract_tag_from_xml_data(data, tag_name=b"packet")
async def _get_psml_struct(self, fd):
"""Gets the current PSML (packet summary xml) structure in a tuple ((None, leftover_data)),
only if the capture is configured to return it, else returns (None, leftover_data).
A coroutine.
"""
initial_data = b""
psml_struct = None
# If summaries are read, we need the psdml structure which appears on top of the file.
while not psml_struct:
new_data = await fd.read(self.SUMMARIES_BATCH_SIZE)
initial_data += new_data
psml_struct, initial_data = _extract_tag_from_xml_data(initial_data, b"structure")
if psml_struct:
self._psml_structure = psml_structure_from_xml(psml_struct)
elif not new_data:
return initial_data
return initial_data
|
class TsharkXmlParser(BaseTsharkOutputParser):
def __init__(self, parse_summaries=False):
pass
async def get_packets_from_stream(self, stream, existing_data, got_first_packet=True):
pass
def _parse_single_packet(self, packet):
pass
def _extract_packet_from_data(self, data, got_first_packet=True):
'''Gets data containing a (part of) tshark xml.
If the given tag is found in it, returns the tag data and the remaining data.
Otherwise returns None and the same data.
:param data: string of a partial tshark xml.
:return: a tuple of (tag, data). tag will be None if none is found.
'''
pass
async def _get_psml_struct(self, fd):
'''Gets the current PSML (packet summary xml) structure in a tuple ((None, leftover_data)),
only if the capture is configured to return it, else returns (None, leftover_data).
A coroutine.
'''
pass
| 6 | 2 | 8 | 1 | 5 | 2 | 2 | 0.42 | 1 | 1 | 0 | 0 | 5 | 2 | 5 | 8 | 46 | 9 | 26 | 12 | 20 | 11 | 25 | 12 | 19 | 4 | 1 | 2 | 9 |
142,693 |
KimiNewt/pyshark
|
src/pyshark/capture/remote_capture.py
|
pyshark.capture.remote_capture.RemoteCapture
|
class RemoteCapture(LiveCapture):
"""A capture which is performed on a remote machine which has an rpcapd service running."""
def __init__(
self,
remote_host,
remote_interface,
*args,
remote_port=2002,
bpf_filter=None,
only_summaries=False,
decryption_key=None,
encryption_type="wpa-pwk",
decode_as=None,
disable_protocol=None,
tshark_path=None,
override_prefs=None,
eventloop=None,
debug=False,
**kwargs
):
"""
Creates a new remote capture which will connect to a remote machine which is running rpcapd. Use the sniff()
method to get packets.
Note: The remote machine should have rpcapd running in null authentication mode (-n). Be warned that the traffic
is unencrypted!
Note:
*args and **kwargs are passed to LiveCature's __init__ method.
:param remote_host: The remote host to capture on (IP or hostname). Should be running rpcapd.
:param remote_interface: The remote interface on the remote machine to capture on. Note that on windows it is
not the device display name but the true interface name (i.e. \\Device\\NPF_..).
:param remote_port: The remote port the rpcapd service is listening on
:param bpf_filter: A BPF (tcpdump) filter to apply on the cap before reading.
:param only_summaries: Only produce packet summaries, much faster but includes very little information
:param decryption_key: Key used to encrypt and decrypt captured traffic.
:param encryption_type: Standard of encryption used in captured traffic (must be either 'WEP', 'WPA-PWD',
or 'WPA-PWK'. Defaults to WPA-PWK).
:param decode_as: A dictionary of {decode_criterion_string: decode_as_protocol} that are used to tell tshark
to decode protocols in situations it wouldn't usually, for instance {'tcp.port==8888': 'http'} would make
it attempt to decode any port 8888 traffic as HTTP. See tshark documentation for details.
:param tshark_path: Path of the tshark binary
:param override_prefs: A dictionary of tshark preferences to override, {PREFERENCE_NAME: PREFERENCE_VALUE, ...}.
:param disable_protocol: Tells tshark to remove a dissector for a specifc protocol.
"""
interface = f'rpcap://{remote_host}:{remote_port}/{remote_interface}'
super(RemoteCapture, self).__init__(
interface,
*args,
bpf_filter=bpf_filter,
only_summaries=only_summaries,
decryption_key=decryption_key,
encryption_type=encryption_type,
tshark_path=tshark_path,
decode_as=decode_as,
disable_protocol=disable_protocol,
override_prefs=override_prefs,
eventloop=eventloop,
debug=debug,
**kwargs
)
|
class RemoteCapture(LiveCapture):
'''A capture which is performed on a remote machine which has an rpcapd service running.'''
def __init__(
self,
remote_host,
remote_interface,
*args,
remote_port=2002,
bpf_filter=None,
only_summaries=False,
decryption_key=None,
encryption_type="wpa-pwk",
decode_as=None,
disable_protocol=None,
tshark_path=None,
override_prefs=None,
eventloop=None,
debug=False,
**kwargs
):
'''
Creates a new remote capture which will connect to a remote machine which is running rpcapd. Use the sniff()
method to get packets.
Note: The remote machine should have rpcapd running in null authentication mode (-n). Be warned that the traffic
is unencrypted!
Note:
*args and **kwargs are passed to LiveCature's __init__ method.
:param remote_host: The remote host to capture on (IP or hostname). Should be running rpcapd.
:param remote_interface: The remote interface on the remote machine to capture on. Note that on windows it is
not the device display name but the true interface name (i.e. \Device\NPF_..).
:param remote_port: The remote port the rpcapd service is listening on
:param bpf_filter: A BPF (tcpdump) filter to apply on the cap before reading.
:param only_summaries: Only produce packet summaries, much faster but includes very little information
:param decryption_key: Key used to encrypt and decrypt captured traffic.
:param encryption_type: Standard of encryption used in captured traffic (must be either 'WEP', 'WPA-PWD',
or 'WPA-PWK'. Defaults to WPA-PWK).
:param decode_as: A dictionary of {decode_criterion_string: decode_as_protocol} that are used to tell tshark
to decode protocols in situations it wouldn't usually, for instance {'tcp.port==8888': 'http'} would make
it attempt to decode any port 8888 traffic as HTTP. See tshark documentation for details.
:param tshark_path: Path of the tshark binary
:param override_prefs: A dictionary of tshark preferences to override, {PREFERENCE_NAME: PREFERENCE_VALUE, ...}.
:param disable_protocol: Tells tshark to remove a dissector for a specifc protocol.
'''
pass
| 2 | 2 | 60 | 3 | 34 | 23 | 1 | 0.69 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 40 | 63 | 4 | 35 | 20 | 16 | 24 | 4 | 3 | 2 | 1 | 2 | 0 | 1 |
142,694 |
KimiNewt/pyshark
|
src/pyshark/capture/live_ring_capture.py
|
pyshark.capture.live_ring_capture.LiveRingCapture
|
class LiveRingCapture(LiveCapture):
"""Represents a live ringbuffer capture on a network interface."""
def __init__(self, ring_file_size=1024, num_ring_files=1, ring_file_name='/tmp/pyshark.pcap', interface=None,
bpf_filter=None, display_filter=None, only_summaries=False, decryption_key=None,
encryption_type='wpa-pwk', decode_as=None, disable_protocol=None,
tshark_path=None, override_prefs=None, capture_filter=None,
use_json=False, use_ek=False, include_raw=False, eventloop=None,
custom_parameters=None, debug=False):
"""
Creates a new live capturer on a given interface. Does not start the actual capture itself.
:param ring_file_size: Size of the ring file in kB, default is 1024
:param num_ring_files: Number of ring files to keep, default is 1
:param ring_file_name: Name of the ring file, default is /tmp/pyshark.pcap
:param interface: Name of the interface to sniff on or a list of names (str). If not given, runs on all interfaces.
:param bpf_filter: BPF filter to use on packets.
:param display_filter: Display (wireshark) filter to use.
:param only_summaries: Only produce packet summaries, much faster but includes very little information
:param decryption_key: Optional key used to encrypt and decrypt captured traffic.
:param encryption_type: Standard of encryption used in captured traffic (must be either 'WEP', 'WPA-PWD', or
'WPA-PWK'. Defaults to WPA-PWK).
:param decode_as: A dictionary of {decode_criterion_string: decode_as_protocol} that are used to tell tshark
to decode protocols in situations it wouldn't usually, for instance {'tcp.port==8888': 'http'} would make
it attempt to decode any port 8888 traffic as HTTP. See tshark documentation for details.
:param tshark_path: Path of the tshark binary
:param override_prefs: A dictionary of tshark preferences to override, {PREFERENCE_NAME: PREFERENCE_VALUE, ...}.
:param capture_filter: Capture (wireshark) filter to use.
:param disable_protocol: Tells tshark to remove a dissector for a specifc protocol.
:param use_ek: Uses tshark in EK JSON mode. It is faster than XML but has slightly less data.
:param use_json: DEPRECATED. Use use_ek instead.
:param custom_parameters: A dict of custom parameters to pass to tshark, i.e. {"--param": "value"}
or else a list of parameters in the format ["--foo", "bar", "--baz", "foo"]. or else a list of parameters in the format ["--foo", "bar", "--baz", "foo"].
"""
super(LiveRingCapture, self).__init__(interface, bpf_filter=bpf_filter, display_filter=display_filter, only_summaries=only_summaries,
decryption_key=decryption_key, encryption_type=encryption_type,
tshark_path=tshark_path, decode_as=decode_as, disable_protocol=disable_protocol,
override_prefs=override_prefs, capture_filter=capture_filter,
use_json=use_json, use_ek=use_ek, include_raw=include_raw, eventloop=eventloop,
custom_parameters=custom_parameters, debug=debug)
self.ring_file_size = ring_file_size
self.num_ring_files = num_ring_files
self.ring_file_name = ring_file_name
def get_parameters(self, packet_count=None):
params = super(LiveRingCapture, self).get_parameters(packet_count=packet_count)
params += ['-b', 'filesize:' + str(self.ring_file_size), '-b', 'files:' + str(self.num_ring_files),
'-w', self.ring_file_name, '-P', '-V']
return params
def _get_dumpcap_parameters(self):
params = super(LiveRingCapture, self)._get_dumpcap_parameters()
params += ['-P']
return params
|
class LiveRingCapture(LiveCapture):
'''Represents a live ringbuffer capture on a network interface.'''
def __init__(self, ring_file_size=1024, num_ring_files=1, ring_file_name='/tmp/pyshark.pcap', interface=None,
bpf_filter=None, display_filter=None, only_summaries=False, decryption_key=None,
encryption_type='wpa-pwk', decode_as=None, disable_protocol=None,
tshark_path=None, override_prefs=None, capture_filter=None,
use_json=False, use_ek=False, include_raw=False, eventloop=None,
custom_parameters=None, debug=False):
'''
Creates a new live capturer on a given interface. Does not start the actual capture itself.
:param ring_file_size: Size of the ring file in kB, default is 1024
:param num_ring_files: Number of ring files to keep, default is 1
:param ring_file_name: Name of the ring file, default is /tmp/pyshark.pcap
:param interface: Name of the interface to sniff on or a list of names (str). If not given, runs on all interfaces.
:param bpf_filter: BPF filter to use on packets.
:param display_filter: Display (wireshark) filter to use.
:param only_summaries: Only produce packet summaries, much faster but includes very little information
:param decryption_key: Optional key used to encrypt and decrypt captured traffic.
:param encryption_type: Standard of encryption used in captured traffic (must be either 'WEP', 'WPA-PWD', or
'WPA-PWK'. Defaults to WPA-PWK).
:param decode_as: A dictionary of {decode_criterion_string: decode_as_protocol} that are used to tell tshark
to decode protocols in situations it wouldn't usually, for instance {'tcp.port==8888': 'http'} would make
it attempt to decode any port 8888 traffic as HTTP. See tshark documentation for details.
:param tshark_path: Path of the tshark binary
:param override_prefs: A dictionary of tshark preferences to override, {PREFERENCE_NAME: PREFERENCE_VALUE, ...}.
:param capture_filter: Capture (wireshark) filter to use.
:param disable_protocol: Tells tshark to remove a dissector for a specifc protocol.
:param use_ek: Uses tshark in EK JSON mode. It is faster than XML but has slightly less data.
:param use_json: DEPRECATED. Use use_ek instead.
:param custom_parameters: A dict of custom parameters to pass to tshark, i.e. {"--param": "value"}
or else a list of parameters in the format ["--foo", "bar", "--baz", "foo"]. or else a list of parameters in the format ["--foo", "bar", "--baz", "foo"].
'''
pass
def get_parameters(self, packet_count=None):
pass
def _get_dumpcap_parameters(self):
pass
| 4 | 2 | 16 | 0 | 8 | 8 | 1 | 1 | 1 | 2 | 0 | 0 | 3 | 3 | 3 | 42 | 54 | 4 | 25 | 14 | 16 | 25 | 14 | 9 | 10 | 1 | 2 | 0 | 3 |
142,695 |
KimiNewt/pyshark
|
src/pyshark/capture/live_capture.py
|
pyshark.capture.live_capture.UnknownInterfaceException
|
class UnknownInterfaceException(Exception):
pass
|
class UnknownInterfaceException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
142,696 |
KimiNewt/pyshark
|
src/pyshark/capture/live_capture.py
|
pyshark.capture.live_capture.LiveCapture
|
class LiveCapture(Capture):
"""Represents a live capture on a network interface."""
def __init__(self, interface=None, bpf_filter=None, display_filter=None, only_summaries=False,
decryption_key=None, encryption_type='wpa-pwk', output_file=None, decode_as=None,
disable_protocol=None, tshark_path=None, override_prefs=None, capture_filter=None,
monitor_mode=False, use_json=False, use_ek=False,
include_raw=False, eventloop=None, custom_parameters=None,
debug=False):
"""Creates a new live capturer on a given interface. Does not start the actual capture itself.
:param interface: Name of the interface to sniff on or a list of names (str). If not given, runs on all interfaces.
:param bpf_filter: BPF filter to use on packets.
:param display_filter: Display (wireshark) filter to use.
:param only_summaries: Only produce packet summaries, much faster but includes very little information
:param decryption_key: Optional key used to encrypt and decrypt captured traffic.
:param encryption_type: Standard of encryption used in captured traffic (must be either 'WEP', 'WPA-PWD', or
'WPA-PWK'. Defaults to WPA-PWK).
:param output_file: Additionally save live captured packets to this file.
:param decode_as: A dictionary of {decode_criterion_string: decode_as_protocol} that are used to tell tshark
to decode protocols in situations it wouldn't usually, for instance {'tcp.port==8888': 'http'} would make
it attempt to decode any port 8888 traffic as HTTP. See tshark documentation for details.
:param tshark_path: Path of the tshark binary
:param override_prefs: A dictionary of tshark preferences to override, {PREFERENCE_NAME: PREFERENCE_VALUE, ...}.
:param capture_filter: Capture (wireshark) filter to use.
:param disable_protocol: Tells tshark to remove a dissector for a specifc protocol.
:param use_ek: Uses tshark in EK JSON mode. It is faster than XML but has slightly less data.
:param use_json: DEPRECATED. Use use_ek instead.
:param custom_parameters: A dict of custom parameters to pass to tshark, i.e. {"--param": "value"} or
else a list of parameters in the format ["--foo", "bar", "--baz", "foo"].
"""
super(LiveCapture, self).__init__(display_filter=display_filter, only_summaries=only_summaries,
decryption_key=decryption_key, encryption_type=encryption_type,
output_file=output_file, decode_as=decode_as, disable_protocol=disable_protocol,
tshark_path=tshark_path, override_prefs=override_prefs,
capture_filter=capture_filter, use_json=use_json, use_ek=use_ek,
include_raw=include_raw,
eventloop=eventloop, custom_parameters=custom_parameters,
debug=debug)
self.bpf_filter = bpf_filter
self.monitor_mode = monitor_mode
all_interfaces = get_tshark_interfaces(tshark_path)
if interface is None:
self.interfaces = all_interfaces
elif isinstance(interface, str):
self.interfaces = [interface]
else:
self.interfaces = interface
def get_parameters(self, packet_count=None):
"""Returns the special tshark parameters to be used according to the configuration of this class."""
params = super(LiveCapture, self).get_parameters(packet_count=packet_count)
# Read from STDIN
params += ["-i", "-"]
return params
def _verify_capture_parameters(self):
all_interfaces_names = tshark.get_all_tshark_interfaces_names(self.tshark_path)
all_interfaces_lowercase = [interface.lower() for interface in all_interfaces_names]
for each_interface in self.interfaces:
if each_interface.startswith("rpcap://"):
continue
if each_interface.isnumeric():
continue
if each_interface.lower() not in all_interfaces_lowercase:
raise UnknownInterfaceException(
f"Interface '{each_interface}' does not exist, unable to initiate capture. "
f"Perhaps permissions are missing?\n"
f"Possible interfaces: {os.linesep.join(all_interfaces_names)}")
def _get_dumpcap_parameters(self):
# Don't report packet counts.
params = ["-q"]
if self._get_tshark_version() < version.parse("2.5.0"):
# Tshark versions older than 2.5 don't support pcapng. This flag forces dumpcap to output pcap.
params += ["-P"]
if self.bpf_filter:
params += ["-f", self.bpf_filter]
if self.monitor_mode:
params += ["-I"]
for interface in self.interfaces:
params += ["-i", interface]
# Write to STDOUT
params += ["-w", "-"]
return params
async def _get_tshark_process(self, packet_count=None, stdin=None):
read, write = os.pipe()
dumpcap_params = [get_process_path(process_name="dumpcap", tshark_path=self.tshark_path)] + self._get_dumpcap_parameters()
self._log.debug("Creating Dumpcap subprocess with parameters: %s", " ".join(dumpcap_params))
dumpcap_process = await asyncio.create_subprocess_exec(*dumpcap_params, stdout=write,
stderr=subprocess.PIPE)
self._create_stderr_handling_task(dumpcap_process.stderr)
self._created_new_process(dumpcap_params, dumpcap_process, process_name="Dumpcap")
tshark = await super(LiveCapture, self)._get_tshark_process(packet_count=packet_count, stdin=read)
return tshark
# Backwards compatibility
sniff = Capture.load_packets
def sniff_continuously(self, packet_count=None):
"""Captures from the set interface, returning a generator which returns packets continuously.
Can be used as follows:
for packet in capture.sniff_continuously():
print('Woo, another packet:', packet)
Note: you can also call capture.apply_on_packets(packet_callback) which should have a slight performance boost.
:param packet_count: an amount of packets to capture, then stop.
"""
# Retained for backwards compatibility and to add documentation.
return self._packets_from_tshark_sync(packet_count=packet_count)
|
class LiveCapture(Capture):
'''Represents a live capture on a network interface.'''
def __init__(self, interface=None, bpf_filter=None, display_filter=None, only_summaries=False,
decryption_key=None, encryption_type='wpa-pwk', output_file=None, decode_as=None,
disable_protocol=None, tshark_path=None, override_prefs=None, capture_filter=None,
monitor_mode=False, use_json=False, use_ek=False,
include_raw=False, eventloop=None, custom_parameters=None,
debug=False):
'''Creates a new live capturer on a given interface. Does not start the actual capture itself.
:param interface: Name of the interface to sniff on or a list of names (str). If not given, runs on all interfaces.
:param bpf_filter: BPF filter to use on packets.
:param display_filter: Display (wireshark) filter to use.
:param only_summaries: Only produce packet summaries, much faster but includes very little information
:param decryption_key: Optional key used to encrypt and decrypt captured traffic.
:param encryption_type: Standard of encryption used in captured traffic (must be either 'WEP', 'WPA-PWD', or
'WPA-PWK'. Defaults to WPA-PWK).
:param output_file: Additionally save live captured packets to this file.
:param decode_as: A dictionary of {decode_criterion_string: decode_as_protocol} that are used to tell tshark
to decode protocols in situations it wouldn't usually, for instance {'tcp.port==8888': 'http'} would make
it attempt to decode any port 8888 traffic as HTTP. See tshark documentation for details.
:param tshark_path: Path of the tshark binary
:param override_prefs: A dictionary of tshark preferences to override, {PREFERENCE_NAME: PREFERENCE_VALUE, ...}.
:param capture_filter: Capture (wireshark) filter to use.
:param disable_protocol: Tells tshark to remove a dissector for a specifc protocol.
:param use_ek: Uses tshark in EK JSON mode. It is faster than XML but has slightly less data.
:param use_json: DEPRECATED. Use use_ek instead.
:param custom_parameters: A dict of custom parameters to pass to tshark, i.e. {"--param": "value"} or
else a list of parameters in the format ["--foo", "bar", "--baz", "foo"].
'''
pass
def get_parameters(self, packet_count=None):
'''Returns the special tshark parameters to be used according to the configuration of this class.'''
pass
def _verify_capture_parameters(self):
pass
def _get_dumpcap_parameters(self):
pass
async def _get_tshark_process(self, packet_count=None, stdin=None):
pass
def sniff_continuously(self, packet_count=None):
'''Captures from the set interface, returning a generator which returns packets continuously.
Can be used as follows:
for packet in capture.sniff_continuously():
print('Woo, another packet:', packet)
Note: you can also call capture.apply_on_packets(packet_callback) which should have a slight performance boost.
:param packet_count: an amount of packets to capture, then stop.
'''
pass
| 7 | 4 | 18 | 1 | 11 | 6 | 3 | 0.55 | 1 | 3 | 1 | 2 | 6 | 3 | 6 | 39 | 117 | 15 | 66 | 27 | 54 | 36 | 48 | 22 | 41 | 5 | 1 | 2 | 16 |
142,697 |
KimiNewt/pyshark
|
src/pyshark/capture/inmem_capture.py
|
pyshark.capture.inmem_capture.LinkTypes
|
class LinkTypes(object):
NULL = 0
ETHERNET = 1
IEEE802_5 = 6
PPP = 9
IEEE802_11 = 105
|
class LinkTypes(object):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0 | 6 | 6 | 5 | 0 | 6 | 6 | 5 | 0 | 1 | 0 | 0 |
142,698 |
KimiNewt/pyshark
|
src/pyshark/packet/fields.py
|
pyshark.packet.fields.LayerField
|
class LayerField(SlotsPickleable):
"""Holds all data about a field of a layer, both its actual value and its name and nice representation."""
# Note: We use this object with slots and not just a dict because
# it's much more memory-efficient (cuts about a third of the memory).
__slots__ = ['name', 'showname', 'raw_value', 'show', 'hide', 'pos', 'size', 'unmaskedvalue']
def __init__(self, name=None, showname=None, value=None, show=None, hide=None, pos=None, size=None, unmaskedvalue=None):
self.name = name
self.showname = showname
self.raw_value = value
self.show = show
self.pos = pos
self.size = size
self.unmaskedvalue = unmaskedvalue
if hide and hide == 'yes':
self.hide = True
else:
self.hide = False
def __repr__(self):
return f'<LayerField {self.name}: {self.get_default_value()}>'
def get_default_value(self) -> str:
"""Gets the best 'value' string this field has."""
val = self.show
if not val:
val = self.raw_value
if not val:
val = self.showname
return val
@property
def showname_value(self) -> typing.Union[str, None]:
"""The "pretty value" (as displayed by Wireshark) of the field."""
if self.showname and ': ' in self.showname:
return self.showname.split(': ', 1)[1]
return None
@property
def showname_key(self) -> typing.Union[str, None]:
"""The "pretty name" (as displayed by Wireshark) of the field."""
if self.showname and ': ' in self.showname:
return self.showname.split(': ', 1)[0]
return None
@property
def binary_value(self) -> bytes:
"""Converts this field to binary (assuming it's a binary string)"""
str_raw_value = str(self.raw_value)
if len(str_raw_value) % 2 == 1:
str_raw_value = '0' + str_raw_value
return binascii.unhexlify(str_raw_value)
@property
def int_value(self) -> int:
"""Returns the int value of this field (assuming it's represented as a decimal integer)."""
return int(self.raw_value)
@property
def hex_value(self) -> int:
"""Returns the int value of this field if it's in base 16
(either as a normal number or in a "0xFFFF"-style hex value)
"""
return int(self.raw_value, 16)
base16_value = hex_value
|
class LayerField(SlotsPickleable):
'''Holds all data about a field of a layer, both its actual value and its name and nice representation.'''
def __init__(self, name=None, showname=None, value=None, show=None, hide=None, pos=None, size=None, unmaskedvalue=None):
pass
def __repr__(self):
pass
def get_default_value(self) -> str:
'''Gets the best 'value' string this field has.'''
pass
@property
def showname_value(self) -> typing.Union[str, None]:
'''The "pretty value" (as displayed by Wireshark) of the field.'''
pass
@property
def showname_key(self) -> typing.Union[str, None]:
'''The "pretty name" (as displayed by Wireshark) of the field.'''
pass
@property
def binary_value(self) -> bytes:
'''Converts this field to binary (assuming it's a binary string)'''
pass
@property
def int_value(self) -> int:
'''Returns the int value of this field (assuming it's represented as a decimal integer).'''
pass
@property
def hex_value(self) -> int:
'''Returns the int value of this field if it's in base 16
(either as a normal number or in a "0xFFFF"-style hex value)
'''
pass
| 14 | 7 | 6 | 0 | 5 | 1 | 2 | 0.24 | 1 | 3 | 0 | 0 | 8 | 8 | 8 | 10 | 70 | 13 | 46 | 26 | 32 | 11 | 40 | 21 | 31 | 3 | 2 | 1 | 14 |
142,699 |
KimiNewt/pyshark
|
src/pyshark/capture/inmem_capture.py
|
pyshark.capture.inmem_capture.InMemCapture
|
class InMemCapture(Capture):
def __init__(self, bpf_filter=None, display_filter=None, only_summaries=False,
decryption_key=None, encryption_type='wpa-pwk', decode_as=None,
disable_protocol=None, tshark_path=None, override_prefs=None, use_json=False, use_ek=False,
linktype=LinkTypes.ETHERNET, include_raw=False, eventloop=None, custom_parameters=None,
debug=False):
"""Creates a new in-mem capture, a capture capable of receiving binary packets and parsing them using tshark.
Significantly faster if packets are added in a batch.
:param bpf_filter: BPF filter to use on packets.
:param display_filter: Display (wireshark) filter to use.
:param only_summaries: Only produce packet summaries, much faster but includes very little information
:param decryption_key: Key used to encrypt and decrypt captured traffic.
:param encryption_type: Standard of encryption used in captured traffic (must be either 'WEP', 'WPA-PWD',
or 'WPA-PWK'. Defaults to WPA-PWK).
:param decode_as: A dictionary of {decode_criterion_string: decode_as_protocol} that are used to tell tshark
to decode protocols in situations it wouldn't usually, for instance {'tcp.port==8888': 'http'} would make
it attempt to decode any port 8888 traffic as HTTP. See tshark documentation for details.
:param tshark_path: Path of the tshark binary
:param override_prefs: A dictionary of tshark preferences to override, {PREFERENCE_NAME: PREFERENCE_VALUE, ...}.
:param disable_protocol: Tells tshark to remove a dissector for a specifc protocol.
:param custom_parameters: A dict of custom parameters to pass to tshark, i.e. {"--param": "value"}
or else a list of parameters in the format ["--foo", "bar", "--baz", "foo"].
"""
super(InMemCapture, self).__init__(display_filter=display_filter, only_summaries=only_summaries,
decryption_key=decryption_key, encryption_type=encryption_type,
decode_as=decode_as, disable_protocol=disable_protocol,
tshark_path=tshark_path, override_prefs=override_prefs,
use_json=use_json, use_ek=use_ek,
include_raw=include_raw, eventloop=eventloop,
custom_parameters=custom_parameters, debug=debug)
self.bpf_filter = bpf_filter
self._packets_to_write = None
self._current_linktype = linktype
self._current_tshark = None
def get_parameters(self, packet_count=None):
"""Returns the special tshark parameters to be used according to the configuration of this class."""
params = super(InMemCapture, self).get_parameters(
packet_count=packet_count)
params += ['-i', '-']
return params
async def _get_tshark_process(self, packet_count=None):
if self._current_tshark:
return self._current_tshark
proc = await super(InMemCapture, self)._get_tshark_process(packet_count=packet_count, stdin=subprocess.PIPE)
self._current_tshark = proc
# Create PCAP header
header = struct.pack("IHHIIII", 0xa1b2c3d4, 2, 4,
0, 0, 0x7fff, self._current_linktype)
proc.stdin.write(header)
return proc
def _get_json_separators(self):
""""Returns the separators between packets in a JSON output
Returns a tuple of (packet_separator, end_of_file_separator, characters_to_disregard).
The latter variable being the number of characters to ignore in order to pass the packet (i.e. extra newlines,
commas, parenthesis).
"""
if self._get_tshark_version() >= version.parse("2.6.7"):
return f"{os.linesep} }}".encode(), f"}}{os.linesep}]".encode(), 0
else:
return f'}}{os.linesep}{os.linesep}'.encode(), f"}}{os.linesep}{os.linesep}]", 1
def _write_packet(self, packet, sniff_time):
if sniff_time is None:
now = time.time()
elif isinstance(sniff_time, datetime.datetime):
now = sniff_time.timestamp()
else:
now = float(sniff_time)
secs = int(now)
usecs = int((now * 1000000) % 1000000)
# Write packet header
self._current_tshark.stdin.write(struct.pack(
"IIII", secs, usecs, len(packet), len(packet)))
self._current_tshark.stdin.write(packet)
def parse_packet(self, binary_packet, sniff_time=None, timeout=DEFAULT_TIMEOUT):
"""Parses a single binary packet and returns its parsed version.
DOES NOT CLOSE tshark. It must be closed manually by calling close() when you're done
working with it.
Use parse_packets when parsing multiple packets for faster parsing
"""
if sniff_time is not None:
sniff_time = [sniff_time]
return self.parse_packets([binary_packet], sniff_time, timeout)[0]
def parse_packets(self, binary_packets, sniff_times=None, timeout=DEFAULT_TIMEOUT):
"""Parses binary packets and return a list of parsed packets.
DOES NOT CLOSE tshark. It must be closed manually by calling close() when you're done
working with it.
"""
if self.eventloop is None:
self._setup_eventloop()
return self.eventloop.run_until_complete(self.parse_packets_async(binary_packets, sniff_times, timeout))
async def parse_packets_async(self, binary_packets, sniff_times=None, timeout=DEFAULT_TIMEOUT):
"""A coroutine which parses binary packets and return a list of parsed packets.
DOES NOT CLOSE tshark. It must be closed manually by calling close() when you're done
working with it.
"""
parsed_packets = []
if sniff_times is None:
sniff_times = []
if not self._current_tshark:
await self._get_tshark_process()
for binary_packet, sniff_time in itertools.zip_longest(binary_packets, sniff_times):
self._write_packet(binary_packet, sniff_time)
def callback(pkt):
parsed_packets.append(pkt)
if len(parsed_packets) == len(binary_packets):
raise StopCapture()
await self._get_parsed_packet_from_tshark(callback, timeout)
return parsed_packets
async def _get_parsed_packet_from_tshark(self, callback, timeout):
await self._current_tshark.stdin.drain()
try:
await asyncio.wait_for(self.packets_from_tshark(callback, close_tshark=False), timeout)
except asyncio.TimeoutError:
await self.close_async()
raise asyncio.TimeoutError("Timed out while waiting for tshark to parse packet. "
"Try rerunning with cap.set_debug() to see tshark errors. "
"Closing tshark..")
async def close_async(self):
self._current_tshark = None
await super(InMemCapture, self).close_async()
def feed_packet(self, binary_packet, linktype=LinkTypes.ETHERNET, timeout=DEFAULT_TIMEOUT):
"""
DEPRECATED. Use parse_packet instead.
This function adds the packet to the packets list, and also closes and reopens tshark for
each packet.
==============
Gets a binary (string) packet and parses & adds it to this capture.
Returns the added packet.
Use feed_packets if you have multiple packets to insert.
By default, assumes the packet is an ethernet packet. For another link type, supply the linktype argument (most
can be found in the class LinkTypes)
"""
warnings.warn(
"Deprecated method. Use InMemCapture.parse_packet() instead.")
self._current_linktype = linktype
pkt = self.parse_packet(binary_packet, timeout=timeout)
self.close()
self._packets.append(pkt)
return pkt
def feed_packets(self, binary_packets, linktype=LinkTypes.ETHERNET, timeout=DEFAULT_TIMEOUT):
"""Gets a list of binary packets, parses them using tshark and returns their parsed values.
Keeps the packets in the internal packet list as well.
By default, assumes the packets are ethernet packets. For another link type, supply the linktype argument (most
can be found in the class LinkTypes)
"""
self._current_linktype = linktype
parsed_packets = self.parse_packets(binary_packets, timeout=timeout)
self._packets.extend(parsed_packets)
self.close()
return parsed_packets
|
class InMemCapture(Capture):
def __init__(self, bpf_filter=None, display_filter=None, only_summaries=False,
decryption_key=None, encryption_type='wpa-pwk', decode_as=None,
disable_protocol=None, tshark_path=None, override_prefs=None, use_json=False, use_ek=False,
linktype=LinkTypes.ETHERNET, include_raw=False, eventloop=None, custom_parameters=None,
debug=False):
'''Creates a new in-mem capture, a capture capable of receiving binary packets and parsing them using tshark.
Significantly faster if packets are added in a batch.
:param bpf_filter: BPF filter to use on packets.
:param display_filter: Display (wireshark) filter to use.
:param only_summaries: Only produce packet summaries, much faster but includes very little information
:param decryption_key: Key used to encrypt and decrypt captured traffic.
:param encryption_type: Standard of encryption used in captured traffic (must be either 'WEP', 'WPA-PWD',
or 'WPA-PWK'. Defaults to WPA-PWK).
:param decode_as: A dictionary of {decode_criterion_string: decode_as_protocol} that are used to tell tshark
to decode protocols in situations it wouldn't usually, for instance {'tcp.port==8888': 'http'} would make
it attempt to decode any port 8888 traffic as HTTP. See tshark documentation for details.
:param tshark_path: Path of the tshark binary
:param override_prefs: A dictionary of tshark preferences to override, {PREFERENCE_NAME: PREFERENCE_VALUE, ...}.
:param disable_protocol: Tells tshark to remove a dissector for a specifc protocol.
:param custom_parameters: A dict of custom parameters to pass to tshark, i.e. {"--param": "value"}
or else a list of parameters in the format ["--foo", "bar", "--baz", "foo"].
'''
pass
def get_parameters(self, packet_count=None):
'''Returns the special tshark parameters to be used according to the configuration of this class.'''
pass
async def _get_tshark_process(self, packet_count=None):
pass
def _get_json_separators(self):
'''"Returns the separators between packets in a JSON output
Returns a tuple of (packet_separator, end_of_file_separator, characters_to_disregard).
The latter variable being the number of characters to ignore in order to pass the packet (i.e. extra newlines,
commas, parenthesis).
'''
pass
def _write_packet(self, packet, sniff_time):
pass
def parse_packet(self, binary_packet, sniff_time=None, timeout=DEFAULT_TIMEOUT):
'''Parses a single binary packet and returns its parsed version.
DOES NOT CLOSE tshark. It must be closed manually by calling close() when you're done
working with it.
Use parse_packets when parsing multiple packets for faster parsing
'''
pass
def parse_packets(self, binary_packets, sniff_times=None, timeout=DEFAULT_TIMEOUT):
'''Parses binary packets and return a list of parsed packets.
DOES NOT CLOSE tshark. It must be closed manually by calling close() when you're done
working with it.
'''
pass
async def parse_packets_async(self, binary_packets, sniff_times=None, timeout=DEFAULT_TIMEOUT):
'''A coroutine which parses binary packets and return a list of parsed packets.
DOES NOT CLOSE tshark. It must be closed manually by calling close() when you're done
working with it.
'''
pass
def callback(pkt):
pass
async def _get_parsed_packet_from_tshark(self, callback, timeout):
pass
async def close_async(self):
pass
def feed_packet(self, binary_packet, linktype=LinkTypes.ETHERNET, timeout=DEFAULT_TIMEOUT):
'''
DEPRECATED. Use parse_packet instead.
This function adds the packet to the packets list, and also closes and reopens tshark for
each packet.
==============
Gets a binary (string) packet and parses & adds it to this capture.
Returns the added packet.
Use feed_packets if you have multiple packets to insert.
By default, assumes the packet is an ethernet packet. For another link type, supply the linktype argument (most
can be found in the class LinkTypes)
'''
pass
def feed_packets(self, binary_packets, linktype=LinkTypes.ETHERNET, timeout=DEFAULT_TIMEOUT):
'''Gets a list of binary packets, parses them using tshark and returns their parsed values.
Keeps the packets in the internal packet list as well.
By default, assumes the packets are ethernet packets. For another link type, supply the linktype argument (most
can be found in the class LinkTypes)
'''
pass
| 14 | 8 | 13 | 1 | 8 | 4 | 2 | 0.56 | 1 | 7 | 2 | 0 | 12 | 4 | 12 | 45 | 177 | 27 | 96 | 32 | 78 | 54 | 77 | 28 | 63 | 4 | 1 | 1 | 24 |
142,700 |
KimiNewt/pyshark
|
src/pyshark/packet/fields.py
|
pyshark.packet.fields.LayerFieldsContainer
|
class LayerFieldsContainer(str, Pickleable):
"""An object which contains one or more fields (of the same name).
When accessing member, such as showname, raw_value, etc. the appropriate member of the main (first) field saved
in this container will be shown.
"""
def __new__(cls, main_field, *args, **kwargs):
if hasattr(main_field, 'get_default_value'):
obj = str.__new__(cls, main_field.get_default_value(), *args, **kwargs)
else:
obj = str.__new__(cls, main_field, *args, **kwargs)
obj.fields = [main_field]
return obj
def __dir__(self):
return dir(type(self)) + list(self.__dict__.keys()) + dir(self.main_field)
def add_field(self, field) -> None:
self.fields.append(field)
@property
def all_fields(self) -> list:
"""Returns all fields in a list, the main field followed by the alternate fields."""
return self.fields
@property
def main_field(self) -> LayerField:
return self.fields[0]
@property
def alternate_fields(self) -> list:
"""Return the alternate values of this field containers (non-main ones)."""
return self.fields[1:]
def __getattr__(self, item):
return getattr(self.main_field, item)
|
class LayerFieldsContainer(str, Pickleable):
'''An object which contains one or more fields (of the same name).
When accessing member, such as showname, raw_value, etc. the appropriate member of the main (first) field saved
in this container will be shown.
'''
def __new__(cls, main_field, *args, **kwargs):
pass
def __dir__(self):
pass
def add_field(self, field) -> None:
pass
@property
def all_fields(self) -> list:
'''Returns all fields in a list, the main field followed by the alternate fields.'''
pass
@property
def main_field(self) -> LayerField:
pass
@property
def alternate_fields(self) -> list:
'''Return the alternate values of this field containers (non-main ones).'''
pass
def __getattr__(self, item):
pass
| 11 | 3 | 3 | 0 | 3 | 0 | 1 | 0.26 | 2 | 3 | 1 | 0 | 7 | 0 | 7 | 75 | 37 | 8 | 23 | 12 | 12 | 6 | 19 | 9 | 11 | 2 | 2 | 1 | 8 |
142,701 |
KimiNewt/pyshark
|
src/pyshark/ek_field_mapping.py
|
pyshark.ek_field_mapping.ProtocolMappingNotInitialized
|
class ProtocolMappingNotInitialized(Exception):
pass
|
class ProtocolMappingNotInitialized(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
142,702 |
KimiNewt/pyshark
|
src/pyshark/packet/layers/ek_layer.py
|
pyshark.packet.layers.ek_layer.EkMultiField
|
class EkMultiField(_EkLayerHelperFuncsMixin):
__slots__ = ["_containing_layer", "_full_name", "_all_fields", "value"]
def __init__(self, containing_layer: EkLayer, all_fields, full_name, value=None):
self._containing_layer = containing_layer
self._full_name = full_name
self._all_fields = all_fields
self.value = value
def get_field(self, field_name):
return self._containing_layer.get_field(f"{self._full_name}_{field_name}")
@property
def subfields(self):
names = set()
for field_name in self._containing_layer.all_field_names:
if field_name != self._full_name and field_name.startswith(f"{self._full_name}_"):
names.add(field_name[len(self._full_name):].split("_")[1])
return list(names)
@property
def field_name(self):
return self._full_name.split("_")[-1]
def __getattr__(self, item):
value = self.get_field(item)
if value is None:
raise AttributeError(f"Subfield {item} not found")
return value
def __repr__(self):
value = f": {self.value}" if self.value else ""
return f"<EkMultiField {self.field_name}{value}>"
def __dir__(self) -> typing.Iterable[str]:
return dir(type(self)) + self.subfields
|
class EkMultiField(_EkLayerHelperFuncsMixin):
def __init__(self, containing_layer: EkLayer, all_fields, full_name, value=None):
pass
def get_field(self, field_name):
pass
@property
def subfields(self):
pass
@property
def field_name(self):
pass
def __getattr__(self, item):
pass
def __repr__(self):
pass
def __dir__(self) -> typing.Iterable[str]:
pass
| 10 | 0 | 4 | 0 | 4 | 0 | 2 | 0 | 1 | 5 | 1 | 0 | 7 | 4 | 7 | 28 | 36 | 7 | 29 | 19 | 19 | 0 | 27 | 17 | 19 | 3 | 5 | 2 | 11 |
142,703 |
KimiNewt/pyshark
|
src/pyshark/packet/layers/json_layer.py
|
pyshark.packet.layers.json_layer.JsonLayer
|
class JsonLayer(BaseLayer):
__slots__ = [
"duplicate_layers",
"_showname_fields_converted_to_regular",
"_full_name",
"_is_intermediate",
"_wrapped_fields",
"value",
"_all_fields"
] + BaseLayer.__slots__
def __init__(self, layer_name, layer_dict, full_name=None, is_intermediate=False):
"""Creates a JsonLayer. All sublayers and fields are created lazily later."""
super().__init__(layer_name)
self.duplicate_layers = []
self._showname_fields_converted_to_regular = False
if not full_name:
self._full_name = self._layer_name
else:
self._full_name = full_name
self._is_intermediate = is_intermediate
self._wrapped_fields = {}
if isinstance(layer_dict, list):
self.duplicate_layers = [JsonLayer(layer_name, duplicate_dict,
full_name=full_name, is_intermediate=is_intermediate)
for duplicate_dict in layer_dict[1:]]
layer_dict = layer_dict[0]
if not isinstance(layer_dict, dict):
self.value = layer_dict
self._all_fields = {}
return
self._all_fields = layer_dict
def get_field(self, name):
"""Gets a field by its full or partial name."""
# We only make the wrappers here (lazily) to avoid creating a ton of objects needlessly.
self._convert_showname_field_names_to_field_names()
field = self._wrapped_fields.get(name)
if field is None:
is_fake = False
field = self._get_internal_field_by_name(name)
if field is None:
# Might be a "fake" field in JSON
is_fake = self._is_fake_field(name)
if not is_fake:
raise AttributeError(f"No such field {name}")
field = self._make_wrapped_field(name, field, is_fake=is_fake)
self._wrapped_fields[name] = field
return field
@property
def field_names(self):
self._convert_showname_field_names_to_field_names()
return list(set([self._sanitize_field_name(name) for name in self._all_fields
if name.startswith(self._full_name)] +
[name.rsplit('.', 1)[1] for name in self._all_fields if '.' in name]))
def has_field(self, dotted_name) -> bool:
"""Checks whether the layer has the given field name.
Can get a dotted name, i.e. layer.sublayer.subsublayer.field
"""
parts = dotted_name.split('.')
cur_layer = self
for part in parts:
if part in cur_layer.field_names:
cur_layer = cur_layer.get_field(part)
else:
return False
return True
def _pretty_print_layer_fields(self, file: io.IOBase):
for field_line in self._get_all_field_lines():
if ':' in field_line:
field_name, field_line = field_line.split(':', 1)
file.write(colored(field_name + ':', "green", attrs=["bold"]))
file.write(colored(field_line, attrs=["bold"]))
def _get_all_field_lines(self):
"""Returns all lines that represent the fields of the layer (both their names and values)."""
for field in self._get_all_fields_with_alternates():
yield from self._get_field_or_layer_repr(field)
def _get_field_or_layer_repr(self, field):
if isinstance(field, JsonLayer):
yield "\t" + field.layer_name + ":" + os.linesep
for line in field._get_all_field_lines():
yield "\t" + line
elif isinstance(field, list):
for subfield_or_layer in field:
yield from self._get_field_or_layer_repr(subfield_or_layer)
else:
yield f"\t{self._sanitize_field_name(field.name)}: {field.raw_value}{os.linesep}"
def _sanitize_field_name(self, field_name):
return field_name.replace(self._full_name + '.', '')
def _field_name_from_showname(self, field_name):
"""Converts a 'showname'-like field key to a regular field name
Sometimes in the JSON, there are "text" type fields which might look like this:
"my_layer":
{
"my_layer.some_field": 1,
"Something Special: it's special": {
"my_layer.special_field": "it's special"
}
}
We convert the showname key into the field name. The internals will turn into a fake layer.
In this case the field will be accessible by pkt.my_layer.something_special.special_field
"""
showname_key = field_name.split(":", 1)[0]
return self._full_name + "." + showname_key.lower().replace(" ", "_")
def _get_all_fields_with_alternates(self):
return [self.get_field(name) for name in self.field_names]
def _convert_showname_field_names_to_field_names(self):
"""Converts all fields that don't have a proper name (they have a showname name) to a regular name
See self._field_name_from_showname docs for more.
"""
if self._showname_fields_converted_to_regular:
return
for field_name in list(self._all_fields):
if ":" in field_name:
field_value = self._all_fields.pop(field_name)
if isinstance(field_value, dict):
# Save the showname
field_value["showname"] = field_name
# Convert the old name to the new name.
self._all_fields[
self._field_name_from_showname(field_name)] = field_value
self._showname_fields_converted_to_regular = True
def _get_internal_field_by_name(self, name):
"""Gets the field by name, or None if not found."""
field = self._all_fields.get(name, self._all_fields.get(f"{self._full_name}.{name}"))
if field is not None:
return field
for field_name in self._all_fields:
# Specific name
if field_name.endswith(f'.{name}'):
return self._all_fields[field_name]
def _is_fake_field(self, name):
# Some fields include parts that are not reflected in the JSON dictionary
# i.e. a possible json is:
# {
# foo: {
# foo.bar.baz: {
# foo.baz: 3
# }
# }
# So in this case we must create a fake layer for "bar".
field_full_name = f"{self._full_name}.{name}."
for name, field in self._all_fields.items():
if name.startswith(field_full_name):
return True
return False
def _make_wrapped_field(self, name, field, is_fake=False, full_name=None):
"""Creates the field lazily.
If it's a simple field, wraps it in a container that adds extra features.
If it's a nested layer, creates a layer for it.
If it's an intermediate layer, copies over the relevant fields and creates a new layer for
it.
"""
if not full_name:
full_name = f"{self._full_name}.{name}"
if is_fake:
# Populate with all fields that are supposed to be inside of it
field = {key: value for key, value in self._all_fields.items()
if key.startswith(full_name)}
if isinstance(field, dict):
if name.endswith('_tree'):
name = name.replace('_tree', '')
full_name = f'{self._full_name}.{name}'
return JsonLayer(name, field, full_name=full_name, is_intermediate=is_fake)
elif isinstance(field, list):
# For whatever reason in list-type object it goes back to using the original parent name
return [self._make_wrapped_field(name, field_part,
full_name=self._full_name.split('.')[0])
for field_part in field]
return LayerFieldsContainer(LayerField(name=name, value=field))
|
class JsonLayer(BaseLayer):
def __init__(self, layer_name, layer_dict, full_name=None, is_intermediate=False):
'''Creates a JsonLayer. All sublayers and fields are created lazily later.'''
pass
def get_field(self, name):
'''Gets a field by its full or partial name.'''
pass
@property
def field_names(self):
pass
def has_field(self, dotted_name) -> bool:
'''Checks whether the layer has the given field name.
Can get a dotted name, i.e. layer.sublayer.subsublayer.field
'''
pass
def _pretty_print_layer_fields(self, file: io.IOBase):
pass
def _get_all_field_lines(self):
'''Returns all lines that represent the fields of the layer (both their names and values).'''
pass
def _get_field_or_layer_repr(self, field):
pass
def _sanitize_field_name(self, field_name):
pass
def _field_name_from_showname(self, field_name):
'''Converts a 'showname'-like field key to a regular field name
Sometimes in the JSON, there are "text" type fields which might look like this:
"my_layer":
{
"my_layer.some_field": 1,
"Something Special: it's special": {
"my_layer.special_field": "it's special"
}
}
We convert the showname key into the field name. The internals will turn into a fake layer.
In this case the field will be accessible by pkt.my_layer.something_special.special_field
'''
pass
def _get_all_fields_with_alternates(self):
pass
def _convert_showname_field_names_to_field_names(self):
'''Converts all fields that don't have a proper name (they have a showname name) to a regular name
See self._field_name_from_showname docs for more.
'''
pass
def _get_internal_field_by_name(self, name):
'''Gets the field by name, or None if not found.'''
pass
def _is_fake_field(self, name):
pass
def _make_wrapped_field(self, name, field, is_fake=False, full_name=None):
'''Creates the field lazily.
If it's a simple field, wraps it in a container that adds extra features.
If it's a nested layer, creates a layer for it.
If it's an intermediate layer, copies over the relevant fields and creates a new layer for
it.
'''
pass
| 16 | 8 | 12 | 1 | 8 | 3 | 3 | 0.35 | 1 | 9 | 2 | 0 | 14 | 7 | 14 | 28 | 191 | 23 | 124 | 41 | 108 | 44 | 102 | 40 | 87 | 6 | 3 | 3 | 43 |
142,704 |
KimiNewt/pyshark
|
src/pyshark/packet/layers/xml_layer.py
|
pyshark.packet.layers.xml_layer.XmlLayer
|
class XmlLayer(base.BaseLayer):
__slots__ = [
"raw_mode",
"_all_fields"
] + base.BaseLayer.__slots__
def __init__(self, xml_obj=None, raw_mode=False):
super().__init__(xml_obj.attrib['name'])
self.raw_mode = raw_mode
self._all_fields = {}
# We copy over all the fields from the XML object
# Note: we don't read lazily from the XML because the lxml objects are very memory-inefficient
# so we'd rather not save them.
for field in xml_obj.findall('.//field'):
attributes = dict(field.attrib)
field_obj = LayerField(**attributes)
if attributes['name'] in self._all_fields:
# Field name already exists, add this field to the container.
self._all_fields[attributes['name']].add_field(field_obj)
else:
self._all_fields[attributes['name']] = LayerFieldsContainer(field_obj)
def get_field(self, name) -> typing.Union[LayerFieldsContainer, None]:
"""Gets the XML field object of the given name."""
# Quicker in case the exact name was used.
field = self._all_fields.get(name)
if field is not None:
return field
for field_name, field in self._all_fields.items():
if self._sanitize_field_name(name) == self._sanitize_field_name(field_name):
return field
return None
def get_field_value(self, name, raw=False) -> typing.Union[LayerFieldsContainer, None]:
"""Tries getting the value of the given field.
Tries it in the following order: show (standard nice display), value (raw value),
showname (extended nice display).
:param name: The name of the field
:param raw: Only return raw value
:return: str of value
"""
field = self.get_field(name)
if field is None:
return None
if raw:
return field.raw_value
return field
@property
def field_names(self) -> typing.List[str]:
"""Gets all XML field names of this layer."""
return [self._sanitize_field_name(field_name) for field_name in self._all_fields]
@property
def layer_name(self):
if self._layer_name == 'fake-field-wrapper':
return base.DATA_LAYER_NAME
return super().layer_name
def __getattr__(self, item):
val = self.get_field(item)
if val is None:
raise AttributeError()
if self.raw_mode:
return val.raw_value
return val
@property
def _field_prefix(self) -> str:
"""Prefix to field names in the XML."""
if self.layer_name == 'geninfo':
return ''
return self.layer_name + '.'
def _sanitize_field_name(self, field_name):
"""Sanitizes an XML field name
An xml field might have characters which would make it inaccessible as a python attribute).
"""
field_name = field_name.replace(self._field_prefix, '')
return field_name.replace('.', '_').replace('-', '_').lower()
def _pretty_print_layer_fields(self, file: io.IOBase):
for field_line in self._get_all_field_lines():
if ':' in field_line:
field_name, field_line = field_line.split(':', 1)
file.write(colored(field_name + ':', "green", attrs=["bold"]))
file.write(colored(field_line, attrs=["bold"]))
def _get_all_fields_with_alternates(self):
all_fields = list(self._all_fields.values())
all_fields += sum([field.alternate_fields for field in all_fields
if isinstance(field, LayerFieldsContainer)], [])
return all_fields
def _get_all_field_lines(self):
"""Returns all lines that represent the fields of the layer (both their names and values)."""
for field in self._get_all_fields_with_alternates():
yield from self._get_field_or_layer_repr(field)
def _get_field_or_layer_repr(self, field):
field_repr = self._get_field_repr(field)
if field_repr:
yield f"\t{field_repr}{os.linesep}"
def _get_field_repr(self, field):
if field.hide:
return
if field.showname:
return field.showname
elif field.show:
return field.show
elif field.raw_value:
return f"{self._sanitize_field_name(field.name)}: {field.raw_value}"
def get_field_by_showname(self, showname) -> typing.Union[LayerFieldsContainer, None]:
"""Gets a field by its "showname"
This is the name that appears in Wireshark's detailed display i.e. in 'User-Agent: Mozilla...',
'User-Agent' is the .showname
Returns None if not found.
"""
for field in self._get_all_fields_with_alternates():
if field.showname_key == showname:
# Return it if "XXX: whatever == XXX"
return field
return None
|
class XmlLayer(base.BaseLayer):
def __init__(self, xml_obj=None, raw_mode=False):
pass
def get_field(self, name) -> typing.Union[LayerFieldsContainer, None]:
'''Gets the XML field object of the given name.'''
pass
def get_field_value(self, name, raw=False) -> typing.Union[LayerFieldsContainer, None]:
'''Tries getting the value of the given field.
Tries it in the following order: show (standard nice display), value (raw value),
showname (extended nice display).
:param name: The name of the field
:param raw: Only return raw value
:return: str of value
'''
pass
@property
def field_names(self) -> typing.List[str]:
'''Gets all XML field names of this layer.'''
pass
@property
def layer_name(self):
pass
def __getattr__(self, item):
pass
@property
def _field_prefix(self) -> str:
'''Prefix to field names in the XML.'''
pass
def _sanitize_field_name(self, field_name):
'''Sanitizes an XML field name
An xml field might have characters which would make it inaccessible as a python attribute).
'''
pass
def _pretty_print_layer_fields(self, file: io.IOBase):
pass
def _get_all_fields_with_alternates(self):
pass
def _get_all_field_lines(self):
'''Returns all lines that represent the fields of the layer (both their names and values).'''
pass
def _get_field_or_layer_repr(self, field):
pass
def _get_field_repr(self, field):
pass
def get_field_by_showname(self, showname) -> typing.Union[LayerFieldsContainer, None]:
'''Gets a field by its "showname"
This is the name that appears in Wireshark's detailed display i.e. in 'User-Agent: Mozilla...',
'User-Agent' is the .showname
Returns None if not found.
'''
pass
| 18 | 7 | 8 | 1 | 6 | 2 | 3 | 0.29 | 1 | 8 | 2 | 0 | 14 | 2 | 14 | 28 | 133 | 22 | 86 | 34 | 68 | 25 | 76 | 31 | 61 | 5 | 3 | 2 | 35 |
142,705 |
KimiNewt/pyshark
|
src/pyshark/packet/packet.py
|
pyshark.packet.packet.Packet
|
class Packet(Pickleable):
"""A packet object which contains layers.
Layers can be accessed via index or name.
"""
def __init__(self, layers=None, frame_info=None, number=None,
length=None, captured_length=None, sniff_time=None, interface_captured=None):
"""
Creates a Packet object with the given layers and info.
:param layers: A list of BaseLayer objects.
:param frame_info: Layer object for the entire packet frame (information like frame length, packet number, etc.
:param length: Length of the actual packet.
:param captured_length: The length of the packet that was actually captured (could be less then length)
:param sniff_time: The time the packet was captured (timestamp)
:param interface_captured: The interface the packet was captured in.
"""
if layers is None:
self.layers = []
else:
self.layers = layers
self.frame_info = frame_info
self.number = number
self.interface_captured = interface_captured
self.captured_length = captured_length
self.length = length
self.sniff_timestamp = sniff_time
def __getitem__(self, item):
"""
Gets a layer according to its index or its name
:param item: layer index or name
:return: BaseLayer object.
"""
if isinstance(item, int):
return self.layers[item]
for layer in self.layers:
if layer.layer_name.lower() == item.lower():
return layer
raise KeyError('Layer does not exist in packet')
def __contains__(self, item):
"""Checks if the layer is inside the packet.
:param item: name of the layer
"""
try:
self[item]
return True
except KeyError:
return False
def __dir__(self):
return dir(type(self)) + list(self.__dict__.keys()) + [l.layer_name for l in self.layers]
def get_raw_packet(self) -> bytes:
assert "FRAME_RAW" in self, "Packet contains no raw data. In order to contains it, " \
"make sure that use_json and include_raw are set to True " \
"in the Capture object"
raw_packet = b''
byte_values = [''.join(x) for x in zip(self.frame_raw.value[0::2], self.frame_raw.value[1::2])]
for value in byte_values:
raw_packet += binascii.unhexlify(value)
return raw_packet
def __len__(self):
return int(self.length)
def __bool__(self):
return True
@property
def sniff_time(self) -> datetime.datetime:
try:
timestamp = float(self.sniff_timestamp)
except ValueError:
# If the value after the decimal point is negative, discard it
# Google: wireshark fractional second
timestamp = float(self.sniff_timestamp.split(".")[0])
return datetime.datetime.fromtimestamp(timestamp)
def __repr__(self):
transport_protocol = ''
if self.transport_layer != self.highest_layer and self.transport_layer is not None:
transport_protocol = self.transport_layer + '/'
return f'<{transport_protocol}{self.highest_layer} Packet>'
def __str__(self):
s = self._packet_string
for layer in self.layers:
s += str(layer)
return s
@property
def _packet_string(self):
"""A simple pretty string that represents the packet."""
return f'Packet (Length: {self.length}){os.linesep}'
def pretty_print(self):
for layer in self.layers:
layer.pretty_print()
# Alias
show = pretty_print
def __getattr__(self, item):
"""
Allows layers to be retrieved via get attr. For instance: pkt.ip
"""
for layer in self.layers:
if layer.layer_name.lower() == item.lower():
return layer
raise AttributeError(f"No attribute named {item}")
@property
def highest_layer(self) -> BaseLayer:
return self.layers[-1].layer_name.upper()
@property
def transport_layer(self) -> BaseLayer:
for layer in consts.TRANSPORT_LAYERS:
if layer in self:
return layer
def get_multiple_layers(self, layer_name) -> typing.List[BaseLayer]:
"""Returns a list of all the layers in the packet that are of the layer type (an incase-sensitive string).
This is in order to retrieve layers which appear multiple times in the same packet (i.e. double VLAN)
which cannot be retrieved by easier means.
"""
return [layer for layer in self.layers if layer.layer_name.lower() == layer_name.lower()]
|
class Packet(Pickleable):
'''A packet object which contains layers.
Layers can be accessed via index or name.
'''
def __init__(self, layers=None, frame_info=None, number=None,
length=None, captured_length=None, sniff_time=None, interface_captured=None):
'''
Creates a Packet object with the given layers and info.
:param layers: A list of BaseLayer objects.
:param frame_info: Layer object for the entire packet frame (information like frame length, packet number, etc.
:param length: Length of the actual packet.
:param captured_length: The length of the packet that was actually captured (could be less then length)
:param sniff_time: The time the packet was captured (timestamp)
:param interface_captured: The interface the packet was captured in.
'''
pass
def __getitem__(self, item):
'''
Gets a layer according to its index or its name
:param item: layer index or name
:return: BaseLayer object.
'''
pass
def __contains__(self, item):
'''Checks if the layer is inside the packet.
:param item: name of the layer
'''
pass
def __dir__(self):
pass
def get_raw_packet(self) -> bytes:
pass
def __len__(self):
pass
def __bool__(self):
pass
@property
def sniff_time(self) -> datetime.datetime:
pass
def __repr__(self):
pass
def __str__(self):
pass
@property
def _packet_string(self):
'''A simple pretty string that represents the packet.'''
pass
def pretty_print(self):
pass
def __getattr__(self, item):
'''
Allows layers to be retrieved via get attr. For instance: pkt.ip
'''
pass
@property
def highest_layer(self) -> BaseLayer:
pass
@property
def transport_layer(self) -> BaseLayer:
pass
def get_multiple_layers(self, layer_name) -> typing.List[BaseLayer]:
'''Returns a list of all the layers in the packet that are of the layer type (an incase-sensitive string).
This is in order to retrieve layers which appear multiple times in the same packet (i.e. double VLAN)
which cannot be retrieved by easier means.
'''
pass
| 21 | 7 | 7 | 0 | 5 | 2 | 2 | 0.39 | 1 | 12 | 1 | 0 | 16 | 7 | 16 | 18 | 133 | 22 | 80 | 41 | 58 | 31 | 72 | 36 | 55 | 4 | 2 | 2 | 30 |
142,706 |
KimiNewt/pyshark
|
src/pyshark/packet/packet_summary.py
|
pyshark.packet.packet_summary.PacketSummary
|
class PacketSummary(object):
"""A simple object containing a psml summary.
Can contain various summary information about a packet.
"""
def __init__(self, structure, values):
self._fields = {}
self._field_order = []
for key, val in zip(structure, values):
key, val = str(key), str(val)
self._fields[key] = val
self._field_order.append(key)
setattr(self, key.lower().replace('.', '').replace(',', ''), val)
def __repr__(self):
protocol, src, dst = self._fields.get('Protocol', '?'), self._fields.get('Source', '?'),\
self._fields.get('Destination', '?')
return f'<{self.__class__.__name__} {protocol}: {src} to {dst}>'
def __str__(self):
return self.summary_line
@property
def summary_line(self) -> str:
return ' '.join([self._fields[key] for key in self._field_order])
|
class PacketSummary(object):
'''A simple object containing a psml summary.
Can contain various summary information about a packet.
'''
def __init__(self, structure, values):
pass
def __repr__(self):
pass
def __str__(self):
pass
@property
def summary_line(self) -> str:
pass
| 6 | 1 | 4 | 0 | 4 | 0 | 1 | 0.17 | 1 | 2 | 0 | 0 | 4 | 2 | 4 | 4 | 27 | 6 | 18 | 10 | 12 | 3 | 16 | 9 | 11 | 2 | 1 | 1 | 5 |
142,707 |
KimiNewt/pyshark
|
src/pyshark/tshark/output_parser/tshark_ek.py
|
pyshark.tshark.output_parser.tshark_ek.TsharkEkJsonParser
|
class TsharkEkJsonParser(BaseTsharkOutputParser):
def _parse_single_packet(self, packet):
return packet_from_ek_packet(packet)
def _extract_packet_from_data(self, data, got_first_packet=True):
"""Returns a packet's data and any remaining data after reading that first packet"""
start_index = 0
data = data.lstrip()
if data.startswith(b'{"ind'):
# Skip the 'index' JSONs, generated for Elastic.
# See: https://bugs.wireshark.org/bugzilla/show_bug.cgi?id=16656
start_index = data.find(_ENCODED_OS_LINESEP) + 1
linesep_location = data.find(_ENCODED_OS_LINESEP, start_index)
if linesep_location == -1:
return None, data
return data[start_index:linesep_location], data[linesep_location + 1:]
|
class TsharkEkJsonParser(BaseTsharkOutputParser):
def _parse_single_packet(self, packet):
pass
def _extract_packet_from_data(self, data, got_first_packet=True):
'''Returns a packet's data and any remaining data after reading that first packet'''
pass
| 3 | 1 | 8 | 1 | 6 | 2 | 2 | 0.25 | 1 | 0 | 0 | 0 | 2 | 0 | 2 | 5 | 18 | 3 | 12 | 5 | 9 | 3 | 12 | 5 | 9 | 3 | 1 | 1 | 4 |
142,708 |
KimiNewt/pyshark
|
src/pyshark/tshark/output_parser/tshark_json.py
|
pyshark.tshark.output_parser.tshark_json.TsharkJsonParser
|
class TsharkJsonParser(BaseTsharkOutputParser):
def __init__(self, tshark_version=None):
super().__init__()
self._tshark_version = tshark_version
def _parse_single_packet(self, packet):
json_has_duplicate_keys = tshark.tshark_supports_duplicate_keys(self._tshark_version)
return packet_from_json_packet(packet, deduplicate_fields=json_has_duplicate_keys)
def _extract_packet_from_data(self, data, got_first_packet=True):
"""Returns a packet's data and any remaining data after reading that first packet"""
tag_start = 0
if not got_first_packet:
tag_start = data.find(b"{")
if tag_start == -1:
return None, data
packet_separator, end_separator, end_tag_strip_length = self._get_json_separators()
found_separator = None
tag_end = data.find(packet_separator)
if tag_end == -1:
# Not end of packet, maybe it has end of entire file?
tag_end = data.find(end_separator)
if tag_end != -1:
found_separator = end_separator
else:
# Found a single packet, just add the separator without extras
found_separator = packet_separator
if found_separator:
tag_end += len(found_separator) - end_tag_strip_length
return data[tag_start:tag_end].strip().strip(b","), data[tag_end + 1:]
return None, data
def _get_json_separators(self):
""""Returns the separators between packets in a JSON output
Returns a tuple of (packet_separator, end_of_file_separator, characters_to_disregard).
The latter variable being the number of characters to ignore in order to pass the packet (i.e. extra newlines,
commas, parenthesis).
"""
if not self._tshark_version or self._tshark_version >= version.parse("3.0.0"):
return f"{os.linesep} }},{os.linesep}".encode(), f"}}{os.linesep}]".encode(), 1 + len(os.linesep)
else:
return f"}}{os.linesep}{os.linesep} ,".encode(), f"}}{os.linesep}{os.linesep}]".encode(), 1
|
class TsharkJsonParser(BaseTsharkOutputParser):
def __init__(self, tshark_version=None):
pass
def _parse_single_packet(self, packet):
pass
def _extract_packet_from_data(self, data, got_first_packet=True):
'''Returns a packet's data and any remaining data after reading that first packet'''
pass
def _get_json_separators(self):
'''"Returns the separators between packets in a JSON output
Returns a tuple of (packet_separator, end_of_file_separator, characters_to_disregard).
The latter variable being the number of characters to ignore in order to pass the packet (i.e. extra newlines,
commas, parenthesis).
'''
pass
| 5 | 2 | 10 | 1 | 8 | 2 | 3 | 0.26 | 1 | 1 | 0 | 0 | 4 | 1 | 4 | 7 | 46 | 7 | 31 | 11 | 26 | 8 | 29 | 11 | 24 | 6 | 1 | 2 | 10 |
142,709 |
KimiNewt/pyshark
|
src/pyshark/tshark/tshark.py
|
pyshark.tshark.tshark.TSharkNotFoundException
|
class TSharkNotFoundException(Exception):
pass
|
class TSharkNotFoundException(Exception):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
142,710 |
KimiNewt/pyshark
|
src/pyshark/packet/layers/ek_layer.py
|
pyshark.packet.layers.ek_layer.EkLayer
|
class EkLayer(BaseLayer, _EkLayerHelperFuncsMixin):
__slots__ = ["_layer_name", "_fields_dict"]
def __init__(self, layer_name, layer_dict):
super().__init__(layer_name)
self._fields_dict = layer_dict
def get_field(self, name) -> typing.Union["EkMultiField", None, str, int, bool, bytes, list]:
name = name.replace(".", "_")
if name in self._fields_dict:
# For cases like "text"
return self._get_field_value(name)
for prefix in self._get_possible_layer_prefixes():
nested_field = self._get_nested_field(prefix, name)
if nested_field is not None:
return nested_field
return None
def has_field(self, name) -> bool:
"""Checks if the field exists, either a nested field or a regular field"""
return name in self.field_names or name in self.all_field_names
@property
def field_names(self):
return list({field_name.split("_", 1)[0] for field_name in self.all_field_names})
@property
def all_field_names(self):
"""Gets all field names, including subfields"""
names = set()
for field_name in self._fields_dict:
for prefix in self._get_possible_layer_prefixes():
if field_name.startswith(prefix):
names.add(_remove_ek_prefix(prefix, field_name))
break
return list(names)
def _get_field_value(self, full_field_name):
"""Gets the field value, optionally casting it using the cached field mapping"""
field_value = self._fields_dict[full_field_name]
return ek_field_mapping.MAPPING.cast_field_value(self._layer_name, full_field_name, field_value)
def _get_nested_field(self, prefix, name):
"""Gets a field that is directly on the layer
Returns either a multifield or a raw value.
"""
# TODO: Optimize
field_ek_name = f"{prefix}_{name}"
if field_ek_name in self._fields_dict:
if self._field_has_subfields(field_ek_name):
return EkMultiField(self, self._fields_dict, name,
value=self._get_field_value(field_ek_name))
return self._get_field_value(field_ek_name)
for possible_nested_name in self._fields_dict:
if possible_nested_name.startswith(f"{field_ek_name}_"):
return EkMultiField(self, self._fields_dict, name, value=None)
return None
def _field_has_subfields(self, field_ek_name):
field_ek_name_with_ext = f"{field_ek_name}_"
for field_name in self._fields_dict:
if field_name.startswith(field_ek_name_with_ext):
return True
return False
def _pretty_print_layer_fields(self, file: io.IOBase):
for field_name in self.field_names:
field = self.get_field(field_name)
self._pretty_print_field(field_name, field, file, indent=1)
def _pretty_print_field(self, field_name, field, file, indent=0):
prefix = "\t" * indent
if isinstance(field, EkMultiField):
file.write(colored(f"{prefix}{field_name}: ", "green", attrs=["bold"]))
if field.value is not None:
file.write(str(field.value))
file.write(os.linesep)
for subfield in field.subfields:
self._pretty_print_field(subfield, field.get_field(subfield), file,
indent=indent + 1)
else:
file.write(colored(f"{prefix}{field_name}: ", "green", attrs=["bold"]))
file.write(f"{field}{os.linesep}")
def _get_possible_layer_prefixes(self):
"""Gets the possible prefixes for a field under this layer.
The order matters, longest must be first
"""
return [f"{self._layer_name}_{self._layer_name}", self._layer_name]
|
class EkLayer(BaseLayer, _EkLayerHelperFuncsMixin):
def __init__(self, layer_name, layer_dict):
pass
def get_field(self, name) -> typing.Union["EkMultiField", None, str, int, bool, bytes, list]:
pass
def has_field(self, name) -> bool:
'''Checks if the field exists, either a nested field or a regular field'''
pass
@property
def field_names(self):
pass
@property
def all_field_names(self):
'''Gets all field names, including subfields'''
pass
def _get_field_value(self, full_field_name):
'''Gets the field value, optionally casting it using the cached field mapping'''
pass
def _get_nested_field(self, prefix, name):
'''Gets a field that is directly on the layer
Returns either a multifield or a raw value.
'''
pass
def _field_has_subfields(self, field_ek_name):
pass
def _pretty_print_layer_fields(self, file: io.IOBase):
pass
def _pretty_print_field(self, field_name, field, file, indent=0):
pass
def _get_possible_layer_prefixes(self):
'''Gets the possible prefixes for a field under this layer.
The order matters, longest must be first
'''
pass
| 14 | 5 | 7 | 1 | 6 | 1 | 2 | 0.16 | 2 | 9 | 1 | 0 | 11 | 1 | 11 | 46 | 95 | 17 | 67 | 30 | 53 | 11 | 62 | 28 | 50 | 5 | 5 | 3 | 27 |
142,711 |
Kinto/kinto-attachment
|
tests/test_views_attachment.py
|
tests.test_views_attachment.HeartbeartTest
|
class HeartbeartTest(BaseWebTestS3, unittest.TestCase):
def test_attachments_is_added_to_heartbeat_view(self):
resp = self.app.get("/__heartbeat__")
self.assertIn("attachments", resp.json)
def test_heartbeat_is_false_if_error_happens(self):
with mock.patch("pyramid_storage.s3.S3FileStorage.delete") as mocked:
mocked.side_effect = ValueError
resp = self.app.get("/__heartbeat__", status=503)
self.assertFalse(resp.json["attachments"])
def test_heartbeat_is_true_if_server_is_readonly(self):
patch = mock.patch("pyramid_storage.s3.S3FileStorage.delete")
self.addCleanup(patch.stop)
mocked = patch.start()
mocked.side_effect = ValueError
with mock.patch.dict(self.app.app.registry.settings, [("readonly", "true")]):
resp = self.app.get("/__heartbeat__")
self.assertTrue(resp.json["attachments"])
|
class HeartbeartTest(BaseWebTestS3, unittest.TestCase):
def test_attachments_is_added_to_heartbeat_view(self):
pass
def test_heartbeat_is_false_if_error_happens(self):
pass
def test_heartbeat_is_true_if_server_is_readonly(self):
pass
| 4 | 0 | 6 | 0 | 5 | 0 | 1 | 0 | 2 | 1 | 0 | 0 | 3 | 0 | 3 | 86 | 20 | 3 | 17 | 10 | 13 | 0 | 17 | 9 | 13 | 1 | 3 | 1 | 3 |
142,712 |
Kinto/kinto-attachment
|
tests/test_views_attachment.py
|
tests.test_views_attachment.DeleteTest
|
class DeleteTest(object):
def setUp(self):
super(DeleteTest, self).setUp()
self.attachment = self.upload().json
def exists(self, fullurl):
location = fullurl.replace(self.base_url, "")
return self.backend.exists(location)
def test_attachment_is_removed_on_delete(self):
fullurl = self.attachment["location"]
self.assertTrue(self.exists(fullurl))
self.app.delete(self.endpoint_uri, headers=self.headers, status=204)
self.assertFalse(self.exists(fullurl))
def test_metadata_are_removed_on_delete(self):
self.app.delete(self.endpoint_uri, headers=self.headers, status=204)
resp = self.app.get(self.record_uri, headers=self.headers)
self.assertIsNone(resp.json["data"].get("attachment"))
def test_link_is_removed_on_delete(self):
storage = self.app.app.registry.storage
links = storage.list_all("", "__attachments__")
self.assertEqual(len(links), self.nb_uploaded_files)
self.app.delete(self.endpoint_uri, headers=self.headers, status=204)
links = storage.list_all("", "__attachments__")
self.assertEqual(len(links), 0)
def test_attachment_is_removed_when_record_is_deleted(self):
fullurl = self.attachment["location"]
self.assertTrue(self.exists(fullurl))
self.app.delete(self.record_uri, headers=self.headers)
self.assertFalse(self.exists(fullurl))
def test_attachments_are_removed_when_bucket_is_deleted(self):
fullurl = self.attachment["location"]
self.assertTrue(self.exists(fullurl))
self.app.delete("/buckets/fennec", headers=self.headers)
self.assertFalse(self.exists(fullurl))
def test_attachments_are_removed_when_collection_is_deleted(self):
fullurl = self.attachment["location"]
self.assertTrue(self.exists(fullurl))
self.app.delete("/buckets/fennec/collections/fonts", headers=self.headers)
self.assertFalse(self.exists(fullurl))
def test_attachments_links_are_removed_forever(self):
storage = self.app.app.registry.storage
links = storage.list_all("", "__attachments__")
self.assertEqual(len(links), self.nb_uploaded_files)
self.app.delete(self.record_uri, headers=self.headers)
links = storage.list_all("", "__attachments__")
self.assertEqual(len(links), 0)
def test_no_error_when_other_resource_is_deleted(self):
group_url = "/buckets/default/groups/admins"
self.app.put_json(group_url, {"data": {"members": ["them"]}}, headers=self.headers)
self.app.delete(group_url, headers=self.headers)
|
class DeleteTest(object):
def setUp(self):
pass
def exists(self, fullurl):
pass
def test_attachment_is_removed_on_delete(self):
pass
def test_metadata_are_removed_on_delete(self):
pass
def test_link_is_removed_on_delete(self):
pass
def test_attachment_is_removed_when_record_is_deleted(self):
pass
def test_attachments_are_removed_when_bucket_is_deleted(self):
pass
def test_attachments_are_removed_when_collection_is_deleted(self):
pass
def test_attachments_links_are_removed_forever(self):
pass
def test_no_error_when_other_resource_is_deleted(self):
pass
| 11 | 0 | 5 | 0 | 5 | 0 | 1 | 0 | 1 | 1 | 0 | 2 | 10 | 4 | 10 | 10 | 58 | 9 | 49 | 24 | 38 | 0 | 49 | 23 | 38 | 1 | 1 | 0 | 10 |
142,713 |
Kinto/kinto-attachment
|
tests/test_views_attachment.py
|
tests.test_views_attachment.UploadTest
|
class UploadTest(object):
def test_returns_200_to_record_once_uploaded(self):
self.upload(status=201)
def test_record_is_created_with_metadata(self):
self.upload()
resp = self.app.get(self.record_uri, headers=self.headers)
self.assertIn(self.file_field, resp.json["data"])
def test_record_is_created_with_valid_id(self):
self.record_uri = self.get_record_uri("fennec", "fonts", "logo")
self.endpoint_uri = self.record_uri + "/attachment"
self.app.put_json(self.record_uri, {}, headers=self.headers)
self.upload(status=200)
def test_returns_200_if_record_already_exists(self):
self.app.put_json(self.record_uri, {}, headers=self.headers)
self.upload(status=200)
def test_adds_cors_and_location_to_response(self):
response = self.upload()
self.assertEqual(response.headers["Location"], "http://localhost/v1" + self.record_uri)
self.assertIn("Access-Control-Allow-Origin", response.headers)
def test_has_no_subfolder_if_setting_is_undefined(self):
self.app.app.registry.settings.pop("attachment.folder")
response = self.upload()
record = self.get_record(response)
url = urlparse(record["location"])
self.assertNotIn("/", url.path[1:])
def exists(self, fullurl):
location = fullurl.replace(self.base_url, "")
return self.backend.exists(location)
def test_previous_attachment_is_removed_on_replacement(self):
first = self.get_record(self.upload())
self.assertTrue(self.exists(first["location"]))
second = self.get_record(self.upload())
self.assertFalse(self.exists(first["location"]))
self.assertTrue(self.exists(second["location"]))
|
class UploadTest(object):
def test_returns_200_to_record_once_uploaded(self):
pass
def test_record_is_created_with_metadata(self):
pass
def test_record_is_created_with_valid_id(self):
pass
def test_returns_200_if_record_already_exists(self):
pass
def test_adds_cors_and_location_to_response(self):
pass
def test_has_no_subfolder_if_setting_is_undefined(self):
pass
def exists(self, fullurl):
pass
def test_previous_attachment_is_removed_on_replacement(self):
pass
| 9 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 1 | 0 | 0 | 2 | 8 | 2 | 8 | 8 | 41 | 7 | 34 | 18 | 25 | 0 | 34 | 18 | 25 | 1 | 1 | 0 | 8 |
142,714 |
Kinto/kinto-attachment
|
tests/test_views_attachment.py
|
tests.test_views_attachment.SingleAttachmentViewTest
|
class SingleAttachmentViewTest(AttachmentViewTest, BaseWebTestLocal, unittest.TestCase):
pass
|
class SingleAttachmentViewTest(AttachmentViewTest, BaseWebTestLocal, unittest.TestCase):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0 | 0 | 0 | 0 | 0 | 0 | 112 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
142,715 |
Kinto/kinto-attachment
|
tests/test_views_attachment.py
|
tests.test_views_attachment.S3UploadTest
|
class S3UploadTest(UploadTest, BaseWebTestS3, unittest.TestCase):
pass
|
class S3UploadTest(UploadTest, BaseWebTestS3, unittest.TestCase):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
142,716 |
Kinto/kinto-attachment
|
tests/test_views_attachment.py
|
tests.test_views_attachment.S3DeleteTest
|
class S3DeleteTest(DeleteTest, BaseWebTestS3, unittest.TestCase):
pass
|
class S3DeleteTest(DeleteTest, BaseWebTestS3, unittest.TestCase):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0 | 0 | 0 | 0 | 0 | 0 | 93 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
142,717 |
Kinto/kinto-attachment
|
tests/test_views_attachment.py
|
tests.test_views_attachment.PerResourceConfigAttachementViewTest
|
class PerResourceConfigAttachementViewTest(BaseWebTestS3, unittest.TestCase):
config = "config/s3_per_resource.ini"
def test_file_get_randomize_in_fennec_bucket(self):
r = self.upload()
self.assertEqual(r.json["filename"], "image.jpg")
self.assertNotIn(r.json["filename"], r.json["location"])
def test_file_do_not_get_randomize_in_fennec_experiments_collection(self):
self.create_collection("fennec", "experiments")
record_uri = self.get_record_uri("fennec", "experiments", str(uuid.uuid4()))
self.endpoint_uri = record_uri + "/attachment"
r = self.upload()
self.assertEqual(r.json["filename"], "image.jpg")
self.assertIn(r.json["filename"], r.json["location"])
|
class PerResourceConfigAttachementViewTest(BaseWebTestS3, unittest.TestCase):
def test_file_get_randomize_in_fennec_bucket(self):
pass
def test_file_do_not_get_randomize_in_fennec_experiments_collection(self):
pass
| 3 | 0 | 7 | 1 | 6 | 0 | 1 | 0 | 2 | 1 | 0 | 0 | 2 | 1 | 2 | 85 | 17 | 4 | 13 | 8 | 10 | 0 | 13 | 8 | 10 | 1 | 3 | 0 | 2 |
142,718 |
Kinto/kinto-attachment
|
tests/test_views_attachment.py
|
tests.test_views_attachment.OverridenMimetypesTest
|
class OverridenMimetypesTest(BaseWebTestS3, unittest.TestCase):
config = "config/s3.ini"
def test_file_mimetype_comes_from_config(self):
resp = self.upload(files=[(self.file_field, b"kinto.txt", b"--binary--")])
self.assertEqual(resp.json["mimetype"], "text/vnd.graphviz")
|
class OverridenMimetypesTest(BaseWebTestS3, unittest.TestCase):
def test_file_mimetype_comes_from_config(self):
pass
| 2 | 0 | 4 | 1 | 3 | 0 | 1 | 0 | 2 | 0 | 0 | 0 | 1 | 0 | 1 | 84 | 7 | 2 | 5 | 4 | 3 | 0 | 5 | 4 | 3 | 1 | 3 | 0 | 1 |
142,719 |
Kinto/kinto-attachment
|
tests/test_views_attachment.py
|
tests.test_views_attachment.LocalUploadTest
|
class LocalUploadTest(UploadTest, BaseWebTestLocal, unittest.TestCase):
def test_file_is_created_on_local_filesystem(self):
attachment = self.upload().json
fullurl = attachment["location"]
relativeurl = fullurl.replace(self.base_url, "")
self.assertTrue(os.path.exists(os.path.join("/tmp", relativeurl)))
def test_file_is_not_gzipped_on_local_filesystem(self):
resp = self.upload(files=[(self.file_field, b"my-report.pdf", b"--binary--")])
attachment = resp.json
self.assertTrue(attachment["location"].endswith(".pdf"))
self.assertEqual(attachment["mimetype"], "application/pdf")
relativeurl = attachment["location"].replace(self.base_url, "")
self.assertEqual(attachment["hash"], sha256(b"--binary--"))
self.assertEqual(attachment["size"], len(b"--binary--"))
file_path = os.path.join("/tmp", relativeurl)
self.assertTrue(os.path.exists(file_path))
with open(file_path, "rb") as f:
self.assertEqual(f.read(), b"--binary--")
|
class LocalUploadTest(UploadTest, BaseWebTestLocal, unittest.TestCase):
def test_file_is_created_on_local_filesystem(self):
pass
def test_file_is_not_gzipped_on_local_filesystem(self):
pass
| 3 | 0 | 9 | 0 | 9 | 0 | 1 | 0 | 3 | 0 | 0 | 0 | 2 | 0 | 2 | 92 | 19 | 1 | 18 | 11 | 15 | 0 | 18 | 10 | 15 | 1 | 3 | 1 | 2 |
142,720 |
Kinto/kinto-attachment
|
tests/test_views_attachment.py
|
tests.test_views_attachment.LocalDeleteTest
|
class LocalDeleteTest(DeleteTest, BaseWebTestLocal, unittest.TestCase):
pass
|
class LocalDeleteTest(DeleteTest, BaseWebTestLocal, unittest.TestCase):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0 | 0 | 0 | 0 | 0 | 0 | 92 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
142,721 |
Kinto/kinto-attachment
|
tests/test_views_attachment.py
|
tests.test_views_attachment.KeepOldFilesTest
|
class KeepOldFilesTest(BaseWebTestLocal, unittest.TestCase):
def make_app(self):
import webtest
from kinto import DEFAULT_SETTINGS
from kinto import main as testapp
from kinto.core import testing as core_support
settings = core_support.DEFAULT_SETTINGS.copy()
settings.update(**DEFAULT_SETTINGS)
settings["multiauth.policies"] = "basicauth"
settings["storage_backend"] = "kinto.core.storage.memory"
settings["permission_backend"] = "kinto.core.permission.memory"
settings["userid_hmac_secret"] = "this is not a secret"
settings["includes"] = "kinto_attachment"
settings["kinto.attachment.base_path"] = "/tmp"
settings["kinto.attachment.base_url"] = ""
settings["kinto.attachment.keep_old_files"] = "true"
app = webtest.TestApp(testapp({}, **settings))
app.RequestClass = core_support.get_request_class(prefix="v1")
return app
def test_files_are_kept_when_attachment_is_replaced(self):
resp = self.upload(status=201)
location1 = resp.json["location"]
resp = self.upload(status=200)
location2 = resp.json["location"]
self.assertNotEqual(location1, location2)
self.assertTrue(self.backend.exists(location2))
self.assertTrue(self.backend.exists(location1))
def test_files_are_kept_when_attachment_is_deleted(self):
resp = self.upload(status=201)
location = resp.json["location"]
self.assertTrue(self.backend.exists(location))
self.app.delete(self.record_uri + "/attachment", headers=self.headers)
self.assertTrue(self.backend.exists(location))
def test_files_are_kept_when_record_is_deleted(self):
resp = self.upload(status=201)
location = resp.json["location"]
self.assertTrue(self.backend.exists(location))
self.app.delete(self.record_uri, headers=self.headers)
self.assertTrue(self.backend.exists(location))
|
class KeepOldFilesTest(BaseWebTestLocal, unittest.TestCase):
def make_app(self):
pass
def test_files_are_kept_when_attachment_is_replaced(self):
pass
def test_files_are_kept_when_attachment_is_deleted(self):
pass
def test_files_are_kept_when_record_is_deleted(self):
pass
| 5 | 0 | 11 | 2 | 10 | 0 | 1 | 0 | 2 | 0 | 0 | 0 | 4 | 1 | 4 | 86 | 49 | 10 | 39 | 19 | 30 | 0 | 39 | 18 | 30 | 1 | 3 | 0 | 4 |
142,722 |
Kinto/kinto-attachment
|
tests/test_views_attachment.py
|
tests.test_views_attachment.AttachmentViewTest
|
class AttachmentViewTest(object):
def test_only_post_and_options_is_accepted(self):
self.app.get(self.endpoint_uri, headers=self.headers, status=405)
self.app.put(self.endpoint_uri, headers=self.headers, status=405)
self.app.patch(self.endpoint_uri, headers=self.headers, status=405)
headers = self.headers.copy()
headers["Access-Control-Request-Method"] = "POST"
self.app.options(self.endpoint_uri, headers=headers, status=200)
def test_record_is_updated_with_metadata(self):
existing = {"data": {"author": "frutiger"}}
self.app.put_json(self.record_uri, existing, headers=self.headers)
self.upload()
resp = self.app.get(self.record_uri, headers=self.headers)
self.assertIn(self.file_field, resp.json["data"])
self.assertIn("author", resp.json["data"])
def test_record_metadata_has_hash_hexdigest(self):
r = self.upload()
h = "db511d372e98725a61278e90259c7d4c5484fc7a781d7dcc0c93d53b8929e2ba"
self.assertEqual(self.get_record(r)["hash"], h)
def test_record_metadata_has_randomized_location(self):
resp = self.upload(files=[(self.file_field, b"my-report.pdf", b"--binary--")])
record = self.get_record(resp)
self.assertNotIn("report", record["location"])
def test_record_location_contains_subfolder(self):
self.upload()
resp = self.app.get(self.record_uri, headers=self.headers)
location = resp.json["data"][self.file_field]["location"]
self.assertIn("fennec/fonts/", location)
def test_record_metadata_provides_original_filename(self):
resp = self.upload(files=[(self.file_field, b"my-report.pdf", b"--binary--")])
record = self.get_record(resp)
self.assertEqual("my-report.pdf", record["filename"])
def test_record_is_created_with_fields(self):
self.upload(params=[("data", '{"family": "sans"}')])
resp = self.app.get(self.record_uri, headers=self.headers)
self.assertEqual(resp.json["data"]["family"], "sans")
def test_record_is_updated_with_fields(self):
existing = {"data": {"author": "frutiger"}}
self.app.put_json(self.record_uri, existing, headers=self.headers)
self.upload(params=[("data", '{"family": "sans"}')])
resp = self.app.get(self.record_uri, headers=self.headers)
self.assertEqual(resp.json["data"]["family"], "sans")
self.assertEqual(resp.json["data"]["author"], "frutiger")
def test_record_attachment_metadata_cannot_be_removed_manually(self):
self.upload(params=[("data", '{"family": "sans"}')])
body = {"data": {"attachment": {"manual": "true"}}}
resp = self.app.patch_json(self.record_uri, body, headers=self.headers, status=400)
self.assertIn("Attachment metadata cannot be modified", resp.json["message"])
def test_record_is_created_with_appropriate_permissions(self):
self.upload()
current_principal = (
"basicauth:c6c27f0c7297ba7d4abd2a70c8a2cb88a06a3bb793817ef2c85fe8a709b08022"
)
resp = self.app.get(self.record_uri, headers=self.headers)
self.assertEqual(resp.json["permissions"], {"write": [current_principal]})
def test_record_permissions_can_also_be_specified(self):
self.upload(params=[("permissions", '{"read": ["system.Everyone"]}')])
resp = self.app.get(self.record_uri, headers=self.headers)
self.assertIn("system.Everyone", resp.json["permissions"]["read"])
# Content Validation.
def test_records_fields_must_be_valid_json(self):
resp = self.upload(params=[("data", "{>author: 12}")], status=400)
self.assertIn("body: data is not valid JSON", resp.json["message"])
def test_permissions_must_be_valid_json(self):
resp = self.upload(params=[("permissions", '{"read": >}')], status=400)
self.assertIn("body: permissions is not valid JSON", resp.json["message"])
def test_permissions_must_be_str(self):
files = [
("attachment", "image.jpg", b"--fake--"),
("data", "data", b'{"family": "sans"}'),
("permissions", "permissions", b'{"read": ["system.Everyone"]}'),
]
content_type, body = self.app.encode_multipart([], files)
headers = {**self.headers, "Content-Type": content_type}
resp = self.app.post(self.endpoint_uri, body, headers=headers, status=400)
self.assertIn(
"body: 'data' field should be passed as form data, not files", resp.json["message"]
)
def test_unknown_fields_are_not_accepted(self):
resp = self.upload(params=[("my_field", "a_value")], status=400)
self.assertIn("body: 'my_field' not in ('data', 'permissions')", resp.json["message"])
def test_record_fields_are_validated_against_schema(self):
resp = self.upload(params=[("data", '{"author": 12}')], status=400)
self.assertIn("author in body: 12 is not of type ", resp.json["message"])
def test_attachment_must_have_a_filename(self):
resp = self.upload(files=[(self.file_field, b"", b"--fake--")], status=400)
self.assertEqual(resp.json["message"], "body: Filename is required.")
def test_upload_refused_if_extension_not_allowed(self):
resp = self.upload(files=[(self.file_field, b"virus.exe", b"--fake--")], status=400)
self.assertEqual(resp.json["message"], "body: File extension is not allowed.")
def test_upload_refused_if_field_is_not_attachment(self):
resp = self.upload(files=[("fichierjoint", b"image.jpg", b"--fake--")], status=400)
self.assertEqual(resp.json["message"], "Attachment missing.")
self.assertEqual(resp.json["errno"], ERRORS.INVALID_POSTED_DATA.value)
def test_upload_refused_if_header_is_not_multipart(self):
self.headers["Content-Type"] = "application/json"
resp = self.app.post(self.endpoint_uri, {}, headers=self.headers, status=400)
self.assertEqual(resp.json["message"], "Content-Type should be multipart/form-data")
self.assertEqual(resp.json["errno"], ERRORS.INVALID_PARAMETERS.value)
def test_upload_refused_if_header_is_invalid_multipart(self):
self.headers["Content-Type"] = "multipart/form-data"
resp = self.app.post(self.endpoint_uri, {}, headers=self.headers, status=400)
self.assertEqual(
resp.json["message"].replace(": b'", ": '"), "Invalid boundary in multipart form: ''"
)
self.assertEqual(resp.json["errno"], ERRORS.INVALID_PARAMETERS.value)
# Permissions.
def test_upload_refused_if_not_authenticated(self):
self.headers.pop("Authorization")
self.upload(status=401)
def test_upload_replace_refused_if_not_authenticated(self):
self.upload(status=201)
self.headers.pop("Authorization")
self.upload(status=401)
def test_upload_refused_if_not_allowed(self):
self.headers.update(get_user_headers("jean-louis"))
self.upload(status=403)
def test_upload_replace_refused_if_only_create_allowed(self):
# Allow any authenticated to write in this collection.
perm = {"permissions": {"record:create": ["system.Authenticated"]}}
self.app.patch_json("/buckets/fennec/collections/fonts", perm, headers=self.headers)
self.upload(status=201)
self.headers.update(get_user_headers("jean-louis"))
self.upload(status=403)
def test_upload_replace_refused_if_only_bucket_read_is_allowed(self):
# Create a record with attachment.
self.upload(status=201)
# Now allow anyone to read this bucket.
perm = {"permissions": {"read": ["system.Everyone"]}}
self.app.patch_json("/buckets/fennec", perm, headers=self.headers)
# And try to replace anonymously.
self.headers.pop("Authorization")
self.upload(status=401)
def test_upload_replace_refused_if_only_read_is_allowed(self):
# Create a record with attachment.
self.upload(status=201)
# Now allow anyone to read this collection.
perm_change = [
{"op": "add", "path": "/permissions", "value": {"read": ["system.Everyone"]}}
]
self.app.patch_json(
"/buckets/fennec/collections/fonts",
perm_change,
headers={**self.headers, "Content-Type": "application/json-patch+json"},
)
# And try to replace anonymously.
self.headers.pop("Authorization")
self.upload(status=401)
def test_upload_create_accepted_if_create_allowed(self):
# Allow any authenticated to write in this collection.
perm = {"permissions": {"record:create": ["system.Authenticated"]}}
self.app.patch_json("/buckets/fennec/collections/fonts", perm, headers=self.headers)
self.headers.update(get_user_headers("jean-louis"))
self.upload(status=201)
def test_upload_create_accepted_if_write_allowed(self):
# Allow any authenticated to write in this bucket.
perm = {"permissions": {"write": ["system.Authenticated"]}}
self.app.patch_json("/buckets/fennec", perm, headers=self.headers)
self.headers.update(get_user_headers("jean-louis"))
self.upload(status=201)
def test_upload_replace_accepted_if_write_allowed(self):
# Allow any authenticated to write in this bucket.
perm = {"permissions": {"write": ["system.Authenticated"]}}
self.app.patch_json("/buckets/fennec", perm, headers=self.headers)
self.upload(status=201)
self.headers.update(get_user_headers("jean-louis"))
self.upload(status=200)
|
class AttachmentViewTest(object):
def test_only_post_and_options_is_accepted(self):
pass
def test_record_is_updated_with_metadata(self):
pass
def test_record_metadata_has_hash_hexdigest(self):
pass
def test_record_metadata_has_randomized_location(self):
pass
def test_record_location_contains_subfolder(self):
pass
def test_record_metadata_provides_original_filename(self):
pass
def test_record_is_created_with_fields(self):
pass
def test_record_is_updated_with_fields(self):
pass
def test_record_attachment_metadata_cannot_be_removed_manually(self):
pass
def test_record_is_created_with_appropriate_permissions(self):
pass
def test_record_permissions_can_also_be_specified(self):
pass
def test_records_fields_must_be_valid_json(self):
pass
def test_permissions_must_be_valid_json(self):
pass
def test_permissions_must_be_str(self):
pass
def test_unknown_fields_are_not_accepted(self):
pass
def test_record_fields_are_validated_against_schema(self):
pass
def test_attachment_must_have_a_filename(self):
pass
def test_upload_refused_if_extension_not_allowed(self):
pass
def test_upload_refused_if_field_is_not_attachment(self):
pass
def test_upload_refused_if_header_is_not_multipart(self):
pass
def test_upload_refused_if_header_is_invalid_multipart(self):
pass
def test_upload_refused_if_not_authenticated(self):
pass
def test_upload_replace_refused_if_not_authenticated(self):
pass
def test_upload_refused_if_not_allowed(self):
pass
def test_upload_replace_refused_if_only_create_allowed(self):
pass
def test_upload_replace_refused_if_only_bucket_read_is_allowed(self):
pass
def test_upload_replace_refused_if_only_read_is_allowed(self):
pass
def test_upload_create_accepted_if_create_allowed(self):
pass
def test_upload_create_accepted_if_write_allowed(self):
pass
def test_upload_replace_accepted_if_write_allowed(self):
pass
| 31 | 0 | 6 | 0 | 5 | 0 | 1 | 0.08 | 1 | 0 | 0 | 1 | 30 | 4 | 30 | 30 | 209 | 42 | 155 | 71 | 124 | 12 | 139 | 69 | 108 | 1 | 1 | 0 | 30 |
142,723 |
Kinto/kinto-attachment
|
tests/test_views_attachment.py
|
tests.test_views_attachment.DefaultBucketTest
|
class DefaultBucketTest(BaseWebTestLocal, unittest.TestCase):
def setUp(self):
super(DefaultBucketTest, self).setUp()
self.record_uri = self.get_record_uri("default", "pix", uuid.uuid4())
self.endpoint_uri = self.record_uri + "/attachment"
def test_implicit_collection_creation_on_upload(self):
resp = self.upload()
record_uri = resp.headers["Location"]
self.assertIn("/buckets/b4a52ebc-fe4a-1167-89f3-c792640c70b3", record_uri)
|
class DefaultBucketTest(BaseWebTestLocal, unittest.TestCase):
def setUp(self):
pass
def test_implicit_collection_creation_on_upload(self):
pass
| 3 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 2 | 1 | 0 | 0 | 2 | 2 | 2 | 84 | 10 | 1 | 9 | 7 | 6 | 0 | 9 | 7 | 6 | 1 | 3 | 0 | 2 |
142,724 |
Kinto/kinto-attachment
|
tests/test_utils.py
|
tests.test_utils._Registry
|
class _Registry(object):
settings = {"attachment.folder": ""}
attachment_resources = {}
def save(self, *args, **kw):
return "yeahok"
def url(self, location):
return "http://localhost/%s" % location
def create(self, *args, **kw):
pass
@property
def storage(self):
return self
|
class _Registry(object):
def save(self, *args, **kw):
pass
def url(self, location):
pass
def create(self, *args, **kw):
pass
@property
def storage(self):
pass
| 6 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 4 | 0 | 4 | 4 | 16 | 4 | 12 | 8 | 6 | 0 | 11 | 7 | 6 | 1 | 1 | 0 | 4 |
142,725 |
Kinto/kinto-attachment
|
tests/test_plugin_setup.py
|
tests.test_plugin_setup.IncludeMeTest
|
class IncludeMeTest(unittest.TestCase):
def includeme(self, settings):
config = testing.setUp(settings=settings)
kinto_main(None, config=config)
includeme(config)
return config
def test_includeme_understand_authorized_resources_settings(self):
config = self.includeme(
settings={
"attachment.base_path": "/tmp",
"attachment.resources.fennec.keep_old_files": "true",
"attachment.resources.fingerprinting.fonts.randomize": "true",
}
)
assert isinstance(config.registry.attachment_resources, dict)
assert "/buckets/fennec" in config.registry.attachment_resources
assert "/buckets/fingerprinting/collections/fonts" in config.registry.attachment_resources
def test_includeme_raises_error_for_malformed_resource_settings(self):
with pytest.raises(ConfigurationError) as excinfo:
self.includeme(settings={"attachment.resources.fen.nec.fonts.keep_old_files": "true"})
assert str(excinfo.value) == (
"Configuration rule malformed: `attachment.resources.fen.nec.fonts.keep_old_files`"
)
def test_includeme_raises_error_if_wrong_resource_settings_is_defined(self):
with pytest.raises(ConfigurationError) as excinfo:
self.includeme(settings={"attachment.resources.fennec.base_path": "foobar"})
assert str(excinfo.value) == (
"`base_path` is not a supported setting name. "
"Read `attachment.resources.fennec.base_path`"
)
def test_base_url_is_added_a_trailing_slash(self):
config = self.includeme(
settings={
"attachment.base_path": "/tmp",
"attachment.base_url": "http://cdn.com",
}
)
assert config.registry.api_capabilities["attachments"]["base_url"] == "http://cdn.com/"
def test_gcloud_is_used_if_credentials_setting_is_used(self):
config = self.includeme(
settings={
"attachment.gcloud.credentials": "/path/to/credentials.json",
"attachment.gcloud.bucket_name": "foo",
}
)
assert isinstance(config.registry.queryUtility(IFileStorage), GoogleCloudStorage)
def test_s3_is_used_if_base_path_setting_is_not_used(self):
config = self.includeme(
settings={
"attachment.aws.access_key": "abc",
"attachment.aws.bucket_name": "foo",
}
)
assert isinstance(config.registry.queryUtility(IFileStorage), S3FileStorage)
|
class IncludeMeTest(unittest.TestCase):
def includeme(self, settings):
pass
def test_includeme_understand_authorized_resources_settings(self):
pass
def test_includeme_raises_error_for_malformed_resource_settings(self):
pass
def test_includeme_raises_error_if_wrong_resource_settings_is_defined(self):
pass
def test_base_url_is_added_a_trailing_slash(self):
pass
def test_gcloud_is_used_if_credentials_setting_is_used(self):
pass
def test_s3_is_used_if_base_path_setting_is_not_used(self):
pass
| 8 | 0 | 8 | 0 | 8 | 0 | 1 | 0 | 1 | 2 | 0 | 0 | 7 | 0 | 7 | 79 | 60 | 6 | 54 | 15 | 46 | 0 | 28 | 13 | 20 | 1 | 2 | 1 | 7 |
142,726 |
Kinto/kinto-attachment
|
tests/test_plugin_setup.py
|
tests.test_plugin_setup.HelloViewTest
|
class HelloViewTest(BaseWebTestLocal, unittest.TestCase):
def test_capability_is_exposed(self):
resp = self.app.get("/")
capabilities = resp.json["capabilities"]
self.assertIn("attachments", capabilities)
expected = {
"version": __version__,
"description": "Add file attachments to records",
"url": "https://github.com/Kinto/kinto-attachment/",
"base_url": "https://files.server.com/root/",
}
self.assertEqual(expected, capabilities["attachments"])
|
class HelloViewTest(BaseWebTestLocal, unittest.TestCase):
def test_capability_is_exposed(self):
pass
| 2 | 0 | 11 | 0 | 11 | 0 | 1 | 0 | 2 | 0 | 0 | 0 | 1 | 0 | 1 | 83 | 12 | 0 | 12 | 5 | 10 | 0 | 7 | 5 | 5 | 1 | 3 | 0 | 1 |
142,727 |
Kinto/kinto-attachment
|
tests/test_events.py
|
tests.test_events.ResourceChangedTest
|
class ResourceChangedTest(BaseWebTest, unittest.TestCase):
config = "config/events.ini"
def test_resource_changed_is_triggered_when_attachment_is_set(self):
before = len(listener.received)
self.upload()
self.assertEqual(len(listener.received), before + 1)
def test_action_is_create_or_update(self):
self.upload()
self.assertEqual(listener.received[-1].payload["action"], "create")
self.upload()
self.assertEqual(listener.received[-1].payload["action"], "update")
def test_payload_attribute_are_sound(self):
self.upload()
payload = listener.received[-1].payload
self.assertEqual(payload["uri"], self.endpoint_uri)
self.assertEqual(payload["resource_name"], "record")
self.assertEqual(payload["record_id"], self.record_id)
self.assertEqual(payload["collection_id"], "fonts")
self.assertEqual(payload["bucket_id"], "fennec")
|
class ResourceChangedTest(BaseWebTest, unittest.TestCase):
def test_resource_changed_is_triggered_when_attachment_is_set(self):
pass
def test_action_is_create_or_update(self):
pass
def test_payload_attribute_are_sound(self):
pass
| 4 | 0 | 6 | 0 | 6 | 0 | 1 | 0 | 2 | 0 | 0 | 0 | 3 | 0 | 3 | 84 | 22 | 3 | 19 | 7 | 15 | 0 | 19 | 7 | 15 | 1 | 2 | 0 | 3 |
142,728 |
Kinto/kinto-attachment
|
tests/test_events.py
|
tests.test_events.Listener
|
class Listener(object):
def __init__(self):
self.received = []
def __call__(self, event):
self.received.append(event)
|
class Listener(object):
def __init__(self):
pass
def __call__(self, event):
pass
| 3 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 2 | 1 | 2 | 2 | 6 | 1 | 5 | 4 | 2 | 0 | 5 | 4 | 2 | 1 | 1 | 0 | 2 |
142,729 |
Kinto/kinto-attachment
|
tests/__init__.py
|
tests.BaseWebTestS3
|
class BaseWebTestS3(BaseWebTest):
config = "config/s3.ini"
def __init__(self, *args, **kwargs):
self._s3_bucket_created = False
super(BaseWebTestS3, self).__init__(*args, **kwargs)
def make_app(self):
app = super(BaseWebTestS3, self).make_app()
# Create the S3 bucket if necessary
if not self._s3_bucket_created:
prefix = "kinto.attachment."
settings = app.app.registry.settings
fs = S3FileStorage.from_settings(settings, prefix=prefix)
bucket_name = settings[prefix + "aws.bucket_name"]
fs.get_connection().create_bucket(bucket_name)
self._s3_bucket_created = True
return app
|
class BaseWebTestS3(BaseWebTest):
def __init__(self, *args, **kwargs):
pass
def make_app(self):
pass
| 3 | 0 | 9 | 2 | 7 | 1 | 2 | 0.07 | 1 | 1 | 0 | 5 | 2 | 1 | 2 | 11 | 21 | 5 | 15 | 10 | 12 | 1 | 15 | 10 | 12 | 2 | 2 | 1 | 3 |
142,730 |
Kinto/kinto-attachment
|
tests/__init__.py
|
tests.BaseWebTestLocal
|
class BaseWebTestLocal(BaseWebTest):
config = "config/local.ini"
def tearDown(self):
"""Delete uploaded local files."""
super(BaseWebTest, self).tearDown()
basepath = self.app.app.registry.settings["kinto.attachment.base_path"]
for created in self._created:
filepath = os.path.join(basepath, created)
if os.path.exists(filepath):
os.remove(filepath)
|
class BaseWebTestLocal(BaseWebTest):
def tearDown(self):
'''Delete uploaded local files.'''
pass
| 2 | 1 | 8 | 0 | 7 | 1 | 3 | 0.11 | 1 | 1 | 0 | 6 | 1 | 0 | 1 | 10 | 11 | 1 | 9 | 6 | 7 | 1 | 9 | 6 | 7 | 3 | 2 | 2 | 3 |
142,731 |
Kinto/kinto-attachment
|
tests/__init__.py
|
tests.BaseWebTest
|
class BaseWebTest(object):
config = ""
def __init__(self, *args, **kwargs):
super(BaseWebTest, self).__init__(*args, **kwargs)
self.app = self.make_app()
self.backend = self.app.app.registry.getUtility(IFileStorage)
self.base_url = self.backend.url("")
self._created = []
def setUp(self):
super(BaseWebTest, self).setUp()
self.headers = {"Content-Type": "application/json", "Origin": "http://localhost:9999"}
self.headers.update(get_user_headers("mat"))
self.create_collection("fennec", "fonts")
self.record_id = _id = str(uuid.uuid4())
self.record_uri = self.get_record_uri("fennec", "fonts", _id)
self.endpoint_uri = self.record_uri + "/attachment"
self.default_files = [("attachment", "image.jpg", b"--fake--")]
self.file_field = "attachment"
@property
def nb_uploaded_files(self):
return len(self.default_files)
def make_app(self):
curdir = os.path.dirname(os.path.realpath(__file__))
app = webtest.TestApp("config:%s" % self.config, relative_to=curdir)
app.RequestClass = core_support.get_request_class(prefix="v1")
return app
def upload(self, params=[], files=None, headers={}, status=None):
files = files or self.default_files
headers = headers or self.headers.copy()
content_type, body = self.app.encode_multipart(params, files)
headers["Content-Type"] = content_type
endpoint_url = self.endpoint_uri
resp = self.app.post(endpoint_url, body, headers=headers, status=status)
if 200 <= resp.status_code < 300:
self._add_to_cleanup(resp.json)
return resp
def _add_to_cleanup(self, attachment):
relativeurl = attachment["location"].replace(self.base_url, "")
self._created.append(relativeurl)
def create_collection(self, bucket_id, collection_id):
bucket_uri = "/buckets/%s" % bucket_id
self.app.put_json(bucket_uri, {}, headers=self.headers)
collection_uri = bucket_uri + "/collections/%s" % collection_id
collection = {"schema": SAMPLE_SCHEMA}
self.app.put_json(collection_uri, {"data": collection}, headers=self.headers)
def get_record_uri(self, bucket_id, collection_id, record_id):
return ("/buckets/{bucket_id}/collections/{collection_id}/records/{record_id}").format(
**locals()
)
def get_record(self, resp):
# Alias to resp.json, in a separate method to easily be extended.
return resp.json
|
class BaseWebTest(object):
def __init__(self, *args, **kwargs):
pass
def setUp(self):
pass
@property
def nb_uploaded_files(self):
pass
def make_app(self):
pass
def upload(self, params=[], files=None, headers={}, status=None):
pass
def _add_to_cleanup(self, attachment):
pass
def create_collection(self, bucket_id, collection_id):
pass
def get_record_uri(self, bucket_id, collection_id, record_id):
pass
def get_record_uri(self, bucket_id, collection_id, record_id):
pass
| 11 | 0 | 6 | 0 | 5 | 0 | 1 | 0.02 | 1 | 2 | 0 | 3 | 9 | 10 | 9 | 9 | 65 | 13 | 51 | 31 | 40 | 1 | 48 | 30 | 38 | 2 | 1 | 1 | 10 |
142,732 |
Kinto/kinto-attachment
|
src/kinto_attachment/utils.py
|
kinto_attachment.utils.AttachmentRouteFactory
|
class AttachmentRouteFactory(RouteFactory):
def __init__(self, request):
"""
This class is the `context` object being passed to the
:class:`kinto.core.authorization.AuthorizationPolicy`.
Attachment is not a Kinto resource.
The required permission is:
* ``write`` if the related record exists;
* ``record:create`` on the related collection otherwise.
"""
super(AttachmentRouteFactory, self).__init__(request)
self.resource_name = "record"
try:
request.current_resource_name = "record"
request.validated.setdefault("header", {})
request.validated.setdefault("querystring", {})
resource = Record(request, context=self)
resource.object_id = request.matchdict["id"]
existing = resource.get()
except httpexceptions.HTTPNotFound:
existing = None
if existing:
# Request write permission on the existing record.
self.permission_object_id = record_uri(request)
self.required_permission = "write"
else:
# Request create record permission on the parent collection.
self.permission_object_id = collection_uri(request)
self.required_permission = "create"
# Set the current object in context, since it is used in the
# authorization policy to distinguish operations on plural endpoints
# from individual objects. See Kinto/kinto#918
self.current_object = existing
|
class AttachmentRouteFactory(RouteFactory):
def __init__(self, request):
'''
This class is the `context` object being passed to the
:class:`kinto.core.authorization.AuthorizationPolicy`.
Attachment is not a Kinto resource.
The required permission is:
* ``write`` if the related record exists;
* ``record:create`` on the related collection otherwise.
'''
pass
| 2 | 1 | 35 | 3 | 19 | 13 | 3 | 0.65 | 1 | 1 | 0 | 0 | 1 | 4 | 1 | 1 | 36 | 3 | 20 | 8 | 18 | 13 | 19 | 8 | 17 | 3 | 1 | 1 | 3 |
142,733 |
Kinto/kinto-attachment
|
tests/test_utils.py
|
tests.test_utils._Request
|
class _Request(object):
registry = _Registry()
matchdict = {"bucket_id": "bucket", "collection_id": "collection"}
attachment = _Registry()
def route_path(self, *args, **kw):
return "fullpath"
|
class _Request(object):
def route_path(self, *args, **kw):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 8 | 2 | 6 | 5 | 4 | 0 | 6 | 5 | 4 | 1 | 1 | 0 | 1 |
142,734 |
Kitware/tangelo
|
tangelo/tangelo/websocket.py
|
tangelo.websocket.WebSocketLowPriorityPlugin
|
class WebSocketLowPriorityPlugin(WebSocketPlugin):
def __init__(self, *pargs, **kwargs):
WebSocketPlugin.__init__(self, *pargs, **kwargs)
# This version of start() differs only in that it has an assigned priority.
# The default priority is 50, which is what the actual WebSocketPlugin's
# start method gets, which means it runs before the privilege drop gets a
# chance to (priority 77, slightly lower than the 75 of the engine start
# itself). For some reason if this runs before the priv drop, things get
# screwed up.
def start(self):
WebSocketPlugin.start(self)
start.priority = 80
|
class WebSocketLowPriorityPlugin(WebSocketPlugin):
def __init__(self, *pargs, **kwargs):
pass
def start(self):
pass
| 3 | 0 | 2 | 0 | 2 | 0 | 1 | 1 | 1 | 0 | 0 | 0 | 2 | 0 | 2 | 2 | 13 | 1 | 6 | 3 | 3 | 6 | 6 | 3 | 3 | 1 | 1 | 0 | 2 |
142,735 |
Kitware/tangelo
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kitware_tangelo/tangelo/tangelo/pkgdata/plugin/vtkweb/web/vtkweb.py
|
vtkweb.VTKWebSocketAB.RegisteringWebSocketClientFactory
|
class RegisteringWebSocketClientFactory(wamp.WampClientFactory):
def register(self, client):
self.client = client
|
class RegisteringWebSocketClientFactory(wamp.WampClientFactory):
def register(self, client):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 3 | 0 | 3 | 3 | 1 | 0 | 3 | 3 | 1 | 1 | 1 | 0 | 1 |
142,736 |
Kitware/tangelo
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kitware_tangelo/tangelo/tangelo/pkgdata/plugin/vtkweb/web/vtkweb.py
|
vtkweb.post.Timeout
|
class Timeout:
pass
|
class Timeout:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 0 | 0 | 0 |
142,737 |
Kitware/tangelo
|
tangelo/tangelo/util.py
|
tangelo.util.NonBlockingReader
|
class NonBlockingReader(threading.Thread):
def __init__(self, stream):
threading.Thread.__init__(self)
self.daemon = True
self.stream = stream
self.queue = Queue.Queue()
self.pushbuf = []
self.start()
def run(self):
for line in iter(self.stream.readline, ""):
self.queue.put(line)
self.stream.close()
def readline(self):
if len(self.pushbuf) > 0:
return self.pushbuf.pop()
else:
try:
line = self.queue.get_nowait()
except Queue.Empty:
line = None
return line
def readlines(self):
lines = []
done = False
while not done:
line = self.readline()
if line is not None:
lines.append(line)
else:
done = True
return lines
def pushline(self, line):
if len(line) == 0 or line[-1] != "\n":
line.append("\n")
self.pushbuf.append(line)
def pushlines(self, lines):
for line in lines:
self.pushline(line)
|
class NonBlockingReader(threading.Thread):
def __init__(self, stream):
pass
def run(self):
pass
def readline(self):
pass
def readlines(self):
pass
def pushline(self, line):
pass
def pushlines(self, lines):
pass
| 7 | 0 | 7 | 1 | 6 | 0 | 2 | 0 | 1 | 1 | 0 | 0 | 6 | 4 | 6 | 31 | 48 | 10 | 38 | 17 | 31 | 0 | 36 | 17 | 29 | 3 | 1 | 2 | 13 |
142,738 |
Kitware/tangelo
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kitware_tangelo/tangelo/tangelo/pkgdata/plugin/vtkweb/web/vtkweb.py
|
vtkweb.post.Failed
|
class Failed:
pass
|
class Failed:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 0 | 0 | 0 |
142,739 |
Kitware/tangelo
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kitware_tangelo/tangelo/tangelo/pkgdata/plugin/vtkweb/web/vtkweb.py
|
vtkweb.VTKWebSocketAB.Connection
|
class Connection(threading.Thread):
def run(self):
self.factory = RegisteringWebSocketClientFactory(url)
self.factory.protocol = Protocol
twisted.internet.reactor.callFromThread(ab_websocket.connectWS,
self.factory)
def send(self, data):
twisted.internet.reactor.callFromThread(Protocol.sendMessage,
self.factory.client,
data)
|
class Connection(threading.Thread):
def run(self):
pass
def send(self, data):
pass
| 3 | 0 | 5 | 0 | 5 | 0 | 1 | 0 | 1 | 2 | 2 | 0 | 2 | 1 | 2 | 27 | 11 | 1 | 10 | 4 | 7 | 0 | 7 | 4 | 4 | 1 | 1 | 0 | 2 |
142,740 |
Kitware/tangelo
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kitware_tangelo/tangelo/tangelo/__init__.py
|
tangelo.request_body.RequestBody
|
class RequestBody:
get_request_error = RuntimeError("cannot read body from a GET request")
def __init__(self, filelike, process_request_body):
self.source = filelike
self.process_request_body = process_request_body
def read(self, *pargs, **kwargs):
if not self.process_request_body:
raise RequestBody.get_request_error
else:
return self.source.read(*pargs, **kwargs)
def readline(self, *pargs, **kwargs):
if not self.process_request_body:
raise RequestBody.get_request_error
else:
return self.source.readline(*pargs, **kwargs)
def readlines(self, *pargs, **kwargs):
if not self.process_request_body:
raise RequestBody.get_request_error
else:
return self.readlines(*pargs, **kwargs)
|
class RequestBody:
def __init__(self, filelike, process_request_body):
pass
def read(self, *pargs, **kwargs):
pass
def readline(self, *pargs, **kwargs):
pass
def readlines(self, *pargs, **kwargs):
pass
| 5 | 0 | 5 | 0 | 5 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 4 | 2 | 4 | 4 | 24 | 4 | 20 | 8 | 15 | 0 | 17 | 8 | 12 | 2 | 0 | 1 | 7 |
142,741 |
Kitware/tangelo
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kitware_tangelo/tangelo/tangelo/server.py
|
tangelo.server.Plugins.Plugin
|
class Plugin(object):
def __init__(self, path):
self.path = path
self.control = None
self.module = None
self.apps = []
|
class Plugin(object):
def __init__(self, path):
pass
| 2 | 0 | 5 | 0 | 5 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 4 | 1 | 1 | 6 | 0 | 6 | 6 | 4 | 0 | 6 | 6 | 4 | 1 | 1 | 0 | 1 |
142,742 |
Kitware/tangelo
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kitware_tangelo/tangelo/tangelo/websocket.py
|
tangelo.websocket.mount.WebSocketHandler
|
class WebSocketHandler(object):
@cherrypy.expose
def index(self):
pass
@cherrypy.expose
def ws(self):
pass
|
class WebSocketHandler(object):
@cherrypy.expose
def index(self):
pass
@cherrypy.expose
def ws(self):
pass
| 5 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 2 | 0 | 2 | 2 | 8 | 1 | 7 | 5 | 2 | 0 | 5 | 3 | 2 | 1 | 1 | 0 | 2 |
142,743 |
Kitware/tangelo
|
docs/static/tng/startrek.py
|
startrek.Episode
|
class Episode(Base):
__tablename__ = "episodes"
id = Column(Integer, primary_key=True)
season = Column(Integer)
episode = Column(Integer)
title = Column(String)
airdate = Column(Date)
teleplay = relationship("Person", secondary=episode_teleplays, backref="teleplays")
story = relationship("Person", secondary=episode_stories, backref="stories")
director = relationship("Person", secondary=episode_directors, backref="directors")
stardate = Column(String)
url = Column(String)
def __repr__(self):
return (u"Episode('%s')" % (self.title)).encode("utf-8")
|
class Episode(Base):
def __repr__(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 16 | 2 | 14 | 12 | 12 | 0 | 14 | 12 | 12 | 1 | 1 | 0 | 1 |
142,744 |
Kitware/tangelo
|
docs/static/tng/startrek.py
|
startrek.Person
|
class Person(Base):
__tablename__ = "people"
id = Column(Integer, primary_key=True)
name = Column(String)
def __repr__(self):
return (u"Person('%s')" % (self.name)).encode("utf-8")
|
class Person(Base):
def __repr__(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 8 | 2 | 6 | 4 | 4 | 0 | 6 | 4 | 4 | 1 | 1 | 0 | 1 |
142,745 |
Kitware/tangelo
|
tangelo/tangelo/__init__.py
|
tangelo._File
|
class _File(object):
def __init__(self, path, content_type):
self.path = path
self.content_type = content_type
|
class _File(object):
def __init__(self, path, content_type):
pass
| 2 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 2 | 1 | 1 | 4 | 0 | 4 | 4 | 2 | 0 | 4 | 4 | 2 | 1 | 1 | 0 | 1 |
142,746 |
Kitware/tangelo
|
tangelo/tangelo/__init__.py
|
tangelo._InternalRedirect
|
class _InternalRedirect(object):
def __init__(self, path):
self.path = path
|
class _InternalRedirect(object):
def __init__(self, path):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 3 | 0 | 3 | 3 | 1 | 0 | 3 | 3 | 1 | 1 | 1 | 0 | 1 |
142,747 |
Kitware/tangelo
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kitware_tangelo/tangelo/tangelo/pkgdata/plugin/vtkweb/web/vtkweb.py
|
vtkweb.WebSocketRelay.Class
|
class Class(ws4py.websocket.WebSocket):
def __init__(self, *pargs, **kwargs):
ws4py.websocket.WebSocket.__init__(self, *pargs, **kwargs)
scheme = "ws"
if cherrypy.config.get("server.ssl_private_key"):
scheme = "wss"
url = "%s://%s:%d/ws" % (scheme, hostname, port)
tangelo.log_info(
"VTKWEB",
"websocket created at %s:%d/%s (proxy to %s)" % (
hostname, port, key, url)
)
self.client = VTKWebSocketAB(url, self)
def closed(self, code, reason=None):
# TODO(choudhury): figure out if recovery, etc. is possible if the
# socket is closed for some reason.
tangelo.log_info(
"VTKWEB",
"websocket at %s:%d/%s closed with code %d (%s)" % (
hostname, port, key, code, reason
)
)
def received_message(self, msg):
self.client.send(msg.data)
|
class Class(ws4py.websocket.WebSocket):
def __init__(self, *pargs, **kwargs):
pass
def closed(self, code, reason=None):
pass
def received_message(self, msg):
pass
| 4 | 0 | 8 | 1 | 7 | 1 | 1 | 0.1 | 1 | 0 | 0 | 0 | 3 | 1 | 3 | 3 | 28 | 5 | 21 | 7 | 17 | 2 | 13 | 7 | 9 | 2 | 1 | 1 | 4 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.