index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
722,800 |
gitdb.stream
|
close
| null |
def close(self):
close(self._fd)
|
(self)
|
722,801 |
gitdb.stream
|
fileno
| null |
def fileno(self):
return self._fd
|
(self)
|
722,802 |
gitdb.stream
|
read
| null |
def read(self, count=0):
if count == 0:
count = os.path.getsize(self._filepath)
# END handle read everything
bytes = os.read(self._fd, count)
self._pos += len(bytes)
return bytes
|
(self, count=0)
|
722,803 |
gitdb.stream
|
tell
| null |
def tell(self):
return self._pos
|
(self)
|
722,804 |
gitdb.stream
|
write
| null |
def write(self, data):
self._pos += len(data)
os.write(self._fd, data)
|
(self, data)
|
722,805 |
gitdb.db.base
|
FileDBBase
|
Provides basic facilities to retrieve files of interest, including
caching facilities to help mapping hexsha's to objects
|
class FileDBBase:
"""Provides basic facilities to retrieve files of interest, including
caching facilities to help mapping hexsha's to objects"""
def __init__(self, root_path):
"""Initialize this instance to look for its files at the given root path
All subsequent operations will be relative to this path
:raise InvalidDBRoot:
**Note:** The base will not perform any accessablity checking as the base
might not yet be accessible, but become accessible before the first
access."""
super().__init__()
self._root_path = root_path
#{ Interface
def root_path(self):
""":return: path at which this db operates"""
return self._root_path
def db_path(self, rela_path):
"""
:return: the given relative path relative to our database root, allowing
to pontentially access datafiles"""
return join(self._root_path, force_text(rela_path))
#} END interface
|
(root_path)
|
722,806 |
gitdb.db.base
|
__init__
|
Initialize this instance to look for its files at the given root path
All subsequent operations will be relative to this path
:raise InvalidDBRoot:
**Note:** The base will not perform any accessablity checking as the base
might not yet be accessible, but become accessible before the first
access.
|
def __init__(self, root_path):
"""Initialize this instance to look for its files at the given root path
All subsequent operations will be relative to this path
:raise InvalidDBRoot:
**Note:** The base will not perform any accessablity checking as the base
might not yet be accessible, but become accessible before the first
access."""
super().__init__()
self._root_path = root_path
|
(self, root_path)
|
722,807 |
gitdb.db.base
|
db_path
|
:return: the given relative path relative to our database root, allowing
to pontentially access datafiles
|
def db_path(self, rela_path):
"""
:return: the given relative path relative to our database root, allowing
to pontentially access datafiles"""
return join(self._root_path, force_text(rela_path))
|
(self, rela_path)
|
722,808 |
gitdb.db.base
|
root_path
|
:return: path at which this db operates
|
def root_path(self):
""":return: path at which this db operates"""
return self._root_path
|
(self)
|
722,809 |
gitdb.stream
|
FlexibleSha1Writer
|
Writer producing a sha1 while passing on the written bytes to the given
write function
|
class FlexibleSha1Writer(Sha1Writer):
"""Writer producing a sha1 while passing on the written bytes to the given
write function"""
__slots__ = 'writer'
def __init__(self, writer):
Sha1Writer.__init__(self)
self.writer = writer
def write(self, data):
Sha1Writer.write(self, data)
self.writer(data)
|
(writer)
|
722,810 |
gitdb.stream
|
__init__
| null |
def __init__(self, writer):
Sha1Writer.__init__(self)
self.writer = writer
|
(self, writer)
|
722,812 |
gitdb.stream
|
write
| null |
def write(self, data):
Sha1Writer.write(self, data)
self.writer(data)
|
(self, data)
|
722,813 |
gitdb.db.git
|
GitDB
|
A git-style object database, which contains all objects in the 'objects'
subdirectory
``IMPORTANT``: The usage of this implementation is highly discouraged as it fails to release file-handles.
This can be a problem with long-running processes and/or big repositories.
|
class GitDB(FileDBBase, ObjectDBW, CompoundDB):
"""A git-style object database, which contains all objects in the 'objects'
subdirectory
``IMPORTANT``: The usage of this implementation is highly discouraged as it fails to release file-handles.
This can be a problem with long-running processes and/or big repositories.
"""
# Configuration
PackDBCls = PackedDB
LooseDBCls = LooseObjectDB
ReferenceDBCls = ReferenceDB
# Directories
packs_dir = 'pack'
loose_dir = ''
alternates_dir = os.path.join('info', 'alternates')
def __init__(self, root_path):
"""Initialize ourselves on a git objects directory"""
super().__init__(root_path)
def _set_cache_(self, attr):
if attr == '_dbs' or attr == '_loose_db':
self._dbs = list()
loose_db = None
for subpath, dbcls in ((self.packs_dir, self.PackDBCls),
(self.loose_dir, self.LooseDBCls),
(self.alternates_dir, self.ReferenceDBCls)):
path = self.db_path(subpath)
if os.path.exists(path):
self._dbs.append(dbcls(path))
if dbcls is self.LooseDBCls:
loose_db = self._dbs[-1]
# END remember loose db
# END check path exists
# END for each db type
# should have at least one subdb
if not self._dbs:
raise InvalidDBRoot(self.root_path())
# END handle error
# we the first one should have the store method
assert loose_db is not None and hasattr(loose_db, 'store'), "First database needs store functionality"
# finally set the value
self._loose_db = loose_db
else:
super()._set_cache_(attr)
# END handle attrs
#{ ObjectDBW interface
def store(self, istream):
return self._loose_db.store(istream)
def ostream(self):
return self._loose_db.ostream()
def set_ostream(self, ostream):
return self._loose_db.set_ostream(ostream)
#} END objectdbw interface
|
(root_path)
|
722,816 |
gitdb.db.git
|
__init__
|
Initialize ourselves on a git objects directory
|
def __init__(self, root_path):
"""Initialize ourselves on a git objects directory"""
super().__init__(root_path)
|
(self, root_path)
|
722,818 |
gitdb.db.git
|
_set_cache_
| null |
def _set_cache_(self, attr):
if attr == '_dbs' or attr == '_loose_db':
self._dbs = list()
loose_db = None
for subpath, dbcls in ((self.packs_dir, self.PackDBCls),
(self.loose_dir, self.LooseDBCls),
(self.alternates_dir, self.ReferenceDBCls)):
path = self.db_path(subpath)
if os.path.exists(path):
self._dbs.append(dbcls(path))
if dbcls is self.LooseDBCls:
loose_db = self._dbs[-1]
# END remember loose db
# END check path exists
# END for each db type
# should have at least one subdb
if not self._dbs:
raise InvalidDBRoot(self.root_path())
# END handle error
# we the first one should have the store method
assert loose_db is not None and hasattr(loose_db, 'store'), "First database needs store functionality"
# finally set the value
self._loose_db = loose_db
else:
super()._set_cache_(attr)
# END handle attrs
|
(self, attr)
|
722,823 |
gitdb.db.git
|
ostream
| null |
def ostream(self):
return self._loose_db.ostream()
|
(self)
|
722,826 |
gitdb.db.git
|
set_ostream
| null |
def set_ostream(self, ostream):
return self._loose_db.set_ostream(ostream)
|
(self, ostream)
|
722,829 |
gitdb.db.git
|
store
| null |
def store(self, istream):
return self._loose_db.store(istream)
|
(self, istream)
|
722,832 |
gitdb.base
|
IStream
|
Represents an input content stream to be fed into the ODB. It is mutable to allow
the ODB to record information about the operations outcome right in this instance.
It provides interfaces for the OStream and a StreamReader to allow the instance
to blend in without prior conversion.
The only method your content stream must support is 'read'
|
class IStream(list):
"""Represents an input content stream to be fed into the ODB. It is mutable to allow
the ODB to record information about the operations outcome right in this instance.
It provides interfaces for the OStream and a StreamReader to allow the instance
to blend in without prior conversion.
The only method your content stream must support is 'read'"""
__slots__ = tuple()
def __new__(cls, type, size, stream, sha=None):
return list.__new__(cls, (sha, type, size, stream, None))
def __init__(self, type, size, stream, sha=None):
list.__init__(self, (sha, type, size, stream, None))
#{ Interface
@property
def hexsha(self):
""":return: our sha, hex encoded, 40 bytes"""
return bin_to_hex(self[0])
def _error(self):
""":return: the error that occurred when processing the stream, or None"""
return self[4]
def _set_error(self, exc):
"""Set this input stream to the given exc, may be None to reset the error"""
self[4] = exc
error = property(_error, _set_error)
#} END interface
#{ Stream Reader Interface
def read(self, size=-1):
"""Implements a simple stream reader interface, passing the read call on
to our internal stream"""
return self[3].read(size)
#} END stream reader interface
#{ interface
def _set_binsha(self, binsha):
self[0] = binsha
def _binsha(self):
return self[0]
binsha = property(_binsha, _set_binsha)
def _type(self):
return self[1]
def _set_type(self, type):
self[1] = type
type = property(_type, _set_type)
def _size(self):
return self[2]
def _set_size(self, size):
self[2] = size
size = property(_size, _set_size)
def _stream(self):
return self[3]
def _set_stream(self, stream):
self[3] = stream
stream = property(_stream, _set_stream)
#} END odb info interface
|
(type, size, stream, sha=None)
|
722,833 |
gitdb.base
|
__init__
| null |
def __init__(self, type, size, stream, sha=None):
list.__init__(self, (sha, type, size, stream, None))
|
(self, type, size, stream, sha=None)
|
722,834 |
gitdb.base
|
__new__
| null |
def __new__(cls, type, size, stream, sha=None):
return list.__new__(cls, (sha, type, size, stream, None))
|
(cls, type, size, stream, sha=None)
|
722,835 |
gitdb.base
|
_binsha
| null |
def _binsha(self):
return self[0]
|
(self)
|
722,836 |
gitdb.base
|
_error
|
:return: the error that occurred when processing the stream, or None
|
def _error(self):
""":return: the error that occurred when processing the stream, or None"""
return self[4]
|
(self)
|
722,837 |
gitdb.base
|
_set_binsha
| null |
def _set_binsha(self, binsha):
self[0] = binsha
|
(self, binsha)
|
722,838 |
gitdb.base
|
_set_error
|
Set this input stream to the given exc, may be None to reset the error
|
def _set_error(self, exc):
"""Set this input stream to the given exc, may be None to reset the error"""
self[4] = exc
|
(self, exc)
|
722,839 |
gitdb.base
|
_set_size
| null |
def _set_size(self, size):
self[2] = size
|
(self, size)
|
722,840 |
gitdb.base
|
_set_stream
| null |
def _set_stream(self, stream):
self[3] = stream
|
(self, stream)
|
722,841 |
gitdb.base
|
_set_type
| null |
def _set_type(self, type):
self[1] = type
|
(self, type)
|
722,842 |
gitdb.base
|
_size
| null |
def _size(self):
return self[2]
|
(self)
|
722,843 |
gitdb.base
|
_stream
| null |
def _stream(self):
return self[3]
|
(self)
|
722,844 |
gitdb.base
|
_type
| null |
def _type(self):
return self[1]
|
(self)
|
722,845 |
gitdb.base
|
read
|
Implements a simple stream reader interface, passing the read call on
to our internal stream
|
def read(self, size=-1):
"""Implements a simple stream reader interface, passing the read call on
to our internal stream"""
return self[3].read(size)
|
(self, size=-1)
|
722,846 |
gitdb.base
|
InvalidOInfo
|
Carries information about a sha identifying an object which is invalid in
the queried database. The exception attribute provides more information about
the cause of the issue
|
class InvalidOInfo(tuple):
"""Carries information about a sha identifying an object which is invalid in
the queried database. The exception attribute provides more information about
the cause of the issue"""
__slots__ = tuple()
def __new__(cls, sha, exc):
return tuple.__new__(cls, (sha, exc))
def __init__(self, sha, exc):
tuple.__init__(self, (sha, exc))
@property
def binsha(self):
return self[0]
@property
def hexsha(self):
return bin_to_hex(self[0])
@property
def error(self):
""":return: exception instance explaining the failure"""
return self[1]
|
(sha, exc)
|
722,847 |
gitdb.base
|
__init__
| null |
def __init__(self, sha, exc):
tuple.__init__(self, (sha, exc))
|
(self, sha, exc)
|
722,848 |
gitdb.base
|
__new__
| null |
def __new__(cls, sha, exc):
return tuple.__new__(cls, (sha, exc))
|
(cls, sha, exc)
|
722,849 |
gitdb.base
|
InvalidOStream
|
Carries information about an invalid ODB stream
|
class InvalidOStream(InvalidOInfo):
"""Carries information about an invalid ODB stream"""
__slots__ = tuple()
|
(sha, exc)
|
722,852 |
gitdb.db.loose
|
LooseObjectDB
|
A database which operates on loose object files
|
class LooseObjectDB(FileDBBase, ObjectDBR, ObjectDBW):
"""A database which operates on loose object files"""
# CONFIGURATION
# chunks in which data will be copied between streams
stream_chunk_size = chunk_size
# On windows we need to keep it writable, otherwise it cannot be removed
# either
new_objects_mode = int("444", 8)
if os.name == 'nt':
new_objects_mode = int("644", 8)
def __init__(self, root_path):
super().__init__(root_path)
self._hexsha_to_file = dict()
# Additional Flags - might be set to 0 after the first failure
# Depending on the root, this might work for some mounts, for others not, which
# is why it is per instance
self._fd_open_flags = getattr(os, 'O_NOATIME', 0)
#{ Interface
def object_path(self, hexsha):
"""
:return: path at which the object with the given hexsha would be stored,
relative to the database root"""
return join(hexsha[:2], hexsha[2:])
def readable_db_object_path(self, hexsha):
"""
:return: readable object path to the object identified by hexsha
:raise BadObject: If the object file does not exist"""
try:
return self._hexsha_to_file[hexsha]
except KeyError:
pass
# END ignore cache misses
# try filesystem
path = self.db_path(self.object_path(hexsha))
if exists(path):
self._hexsha_to_file[hexsha] = path
return path
# END handle cache
raise BadObject(hexsha)
def partial_to_complete_sha_hex(self, partial_hexsha):
""":return: 20 byte binary sha1 string which matches the given name uniquely
:param name: hexadecimal partial name (bytes or ascii string)
:raise AmbiguousObjectName:
:raise BadObject: """
candidate = None
for binsha in self.sha_iter():
if bin_to_hex(binsha).startswith(force_bytes(partial_hexsha)):
# it can't ever find the same object twice
if candidate is not None:
raise AmbiguousObjectName(partial_hexsha)
candidate = binsha
# END for each object
if candidate is None:
raise BadObject(partial_hexsha)
return candidate
#} END interface
def _map_loose_object(self, sha):
"""
:return: memory map of that file to allow random read access
:raise BadObject: if object could not be located"""
db_path = self.db_path(self.object_path(bin_to_hex(sha)))
try:
return file_contents_ro_filepath(db_path, flags=self._fd_open_flags)
except OSError as e:
if e.errno != ENOENT:
# try again without noatime
try:
return file_contents_ro_filepath(db_path)
except OSError as new_e:
raise BadObject(sha) from new_e
# didn't work because of our flag, don't try it again
self._fd_open_flags = 0
else:
raise BadObject(sha) from e
# END handle error
# END exception handling
def set_ostream(self, stream):
""":raise TypeError: if the stream does not support the Sha1Writer interface"""
if stream is not None and not isinstance(stream, Sha1Writer):
raise TypeError("Output stream musst support the %s interface" % Sha1Writer.__name__)
return super().set_ostream(stream)
def info(self, sha):
m = self._map_loose_object(sha)
try:
typ, size = loose_object_header_info(m)
return OInfo(sha, typ, size)
finally:
if hasattr(m, 'close'):
m.close()
# END assure release of system resources
def stream(self, sha):
m = self._map_loose_object(sha)
type, size, stream = DecompressMemMapReader.new(m, close_on_deletion=True)
return OStream(sha, type, size, stream)
def has_object(self, sha):
try:
self.readable_db_object_path(bin_to_hex(sha))
return True
except BadObject:
return False
# END check existence
def store(self, istream):
"""note: The sha we produce will be hex by nature"""
tmp_path = None
writer = self.ostream()
if writer is None:
# open a tmp file to write the data to
fd, tmp_path = tempfile.mkstemp(prefix='obj', dir=self._root_path)
if istream.binsha is None:
writer = FDCompressedSha1Writer(fd)
else:
writer = FDStream(fd)
# END handle direct stream copies
# END handle custom writer
try:
try:
if istream.binsha is not None:
# copy as much as possible, the actual uncompressed item size might
# be smaller than the compressed version
stream_copy(istream.read, writer.write, sys.maxsize, self.stream_chunk_size)
else:
# write object with header, we have to make a new one
write_object(istream.type, istream.size, istream.read, writer.write,
chunk_size=self.stream_chunk_size)
# END handle direct stream copies
finally:
if tmp_path:
writer.close()
# END assure target stream is closed
except:
if tmp_path:
os.remove(tmp_path)
raise
# END assure tmpfile removal on error
hexsha = None
if istream.binsha:
hexsha = istream.hexsha
else:
hexsha = writer.sha(as_hex=True)
# END handle sha
if tmp_path:
obj_path = self.db_path(self.object_path(hexsha))
obj_dir = dirname(obj_path)
os.makedirs(obj_dir, exist_ok=True)
# END handle destination directory
# rename onto existing doesn't work on NTFS
if isfile(obj_path):
remove(tmp_path)
else:
rename(tmp_path, obj_path)
# end rename only if needed
# make sure its readable for all ! It started out as rw-- tmp file
# but needs to be rwrr
chmod(obj_path, self.new_objects_mode)
# END handle dry_run
istream.binsha = hex_to_bin(hexsha)
return istream
def sha_iter(self):
# find all files which look like an object, extract sha from there
for root, dirs, files in os.walk(self.root_path()):
root_base = basename(root)
if len(root_base) != 2:
continue
for f in files:
if len(f) != 38:
continue
yield hex_to_bin(root_base + f)
# END for each file
# END for each walk iteration
def size(self):
return len(tuple(self.sha_iter()))
|
(root_path)
|
722,854 |
gitdb.db.loose
|
__init__
| null |
def __init__(self, root_path):
super().__init__(root_path)
self._hexsha_to_file = dict()
# Additional Flags - might be set to 0 after the first failure
# Depending on the root, this might work for some mounts, for others not, which
# is why it is per instance
self._fd_open_flags = getattr(os, 'O_NOATIME', 0)
|
(self, root_path)
|
722,855 |
gitdb.db.loose
|
_map_loose_object
|
:return: memory map of that file to allow random read access
:raise BadObject: if object could not be located
|
def _map_loose_object(self, sha):
"""
:return: memory map of that file to allow random read access
:raise BadObject: if object could not be located"""
db_path = self.db_path(self.object_path(bin_to_hex(sha)))
try:
return file_contents_ro_filepath(db_path, flags=self._fd_open_flags)
except OSError as e:
if e.errno != ENOENT:
# try again without noatime
try:
return file_contents_ro_filepath(db_path)
except OSError as new_e:
raise BadObject(sha) from new_e
# didn't work because of our flag, don't try it again
self._fd_open_flags = 0
else:
raise BadObject(sha) from e
# END handle error
# END exception handling
|
(self, sha)
|
722,857 |
gitdb.db.loose
|
has_object
| null |
def has_object(self, sha):
try:
self.readable_db_object_path(bin_to_hex(sha))
return True
except BadObject:
return False
# END check existence
|
(self, sha)
|
722,858 |
gitdb.db.loose
|
info
| null |
def info(self, sha):
m = self._map_loose_object(sha)
try:
typ, size = loose_object_header_info(m)
return OInfo(sha, typ, size)
finally:
if hasattr(m, 'close'):
m.close()
# END assure release of system resources
|
(self, sha)
|
722,859 |
gitdb.db.loose
|
object_path
|
:return: path at which the object with the given hexsha would be stored,
relative to the database root
|
def object_path(self, hexsha):
"""
:return: path at which the object with the given hexsha would be stored,
relative to the database root"""
return join(hexsha[:2], hexsha[2:])
|
(self, hexsha)
|
722,860 |
gitdb.db.base
|
ostream
|
Return the output stream
:return: overridden output stream this instance will write to, or None
if it will write to the default stream
|
def ostream(self):
"""
Return the output stream
:return: overridden output stream this instance will write to, or None
if it will write to the default stream"""
return self._ostream
|
(self)
|
722,861 |
gitdb.db.loose
|
partial_to_complete_sha_hex
|
:return: 20 byte binary sha1 string which matches the given name uniquely
:param name: hexadecimal partial name (bytes or ascii string)
:raise AmbiguousObjectName:
:raise BadObject:
|
def partial_to_complete_sha_hex(self, partial_hexsha):
""":return: 20 byte binary sha1 string which matches the given name uniquely
:param name: hexadecimal partial name (bytes or ascii string)
:raise AmbiguousObjectName:
:raise BadObject: """
candidate = None
for binsha in self.sha_iter():
if bin_to_hex(binsha).startswith(force_bytes(partial_hexsha)):
# it can't ever find the same object twice
if candidate is not None:
raise AmbiguousObjectName(partial_hexsha)
candidate = binsha
# END for each object
if candidate is None:
raise BadObject(partial_hexsha)
return candidate
|
(self, partial_hexsha)
|
722,862 |
gitdb.db.loose
|
readable_db_object_path
|
:return: readable object path to the object identified by hexsha
:raise BadObject: If the object file does not exist
|
def readable_db_object_path(self, hexsha):
"""
:return: readable object path to the object identified by hexsha
:raise BadObject: If the object file does not exist"""
try:
return self._hexsha_to_file[hexsha]
except KeyError:
pass
# END ignore cache misses
# try filesystem
path = self.db_path(self.object_path(hexsha))
if exists(path):
self._hexsha_to_file[hexsha] = path
return path
# END handle cache
raise BadObject(hexsha)
|
(self, hexsha)
|
722,864 |
gitdb.db.loose
|
set_ostream
|
:raise TypeError: if the stream does not support the Sha1Writer interface
|
def set_ostream(self, stream):
""":raise TypeError: if the stream does not support the Sha1Writer interface"""
if stream is not None and not isinstance(stream, Sha1Writer):
raise TypeError("Output stream musst support the %s interface" % Sha1Writer.__name__)
return super().set_ostream(stream)
|
(self, stream)
|
722,865 |
gitdb.db.loose
|
sha_iter
| null |
def sha_iter(self):
# find all files which look like an object, extract sha from there
for root, dirs, files in os.walk(self.root_path()):
root_base = basename(root)
if len(root_base) != 2:
continue
for f in files:
if len(f) != 38:
continue
yield hex_to_bin(root_base + f)
# END for each file
# END for each walk iteration
|
(self)
|
722,866 |
gitdb.db.loose
|
size
| null |
def size(self):
return len(tuple(self.sha_iter()))
|
(self)
|
722,867 |
gitdb.db.loose
|
store
|
note: The sha we produce will be hex by nature
|
def store(self, istream):
"""note: The sha we produce will be hex by nature"""
tmp_path = None
writer = self.ostream()
if writer is None:
# open a tmp file to write the data to
fd, tmp_path = tempfile.mkstemp(prefix='obj', dir=self._root_path)
if istream.binsha is None:
writer = FDCompressedSha1Writer(fd)
else:
writer = FDStream(fd)
# END handle direct stream copies
# END handle custom writer
try:
try:
if istream.binsha is not None:
# copy as much as possible, the actual uncompressed item size might
# be smaller than the compressed version
stream_copy(istream.read, writer.write, sys.maxsize, self.stream_chunk_size)
else:
# write object with header, we have to make a new one
write_object(istream.type, istream.size, istream.read, writer.write,
chunk_size=self.stream_chunk_size)
# END handle direct stream copies
finally:
if tmp_path:
writer.close()
# END assure target stream is closed
except:
if tmp_path:
os.remove(tmp_path)
raise
# END assure tmpfile removal on error
hexsha = None
if istream.binsha:
hexsha = istream.hexsha
else:
hexsha = writer.sha(as_hex=True)
# END handle sha
if tmp_path:
obj_path = self.db_path(self.object_path(hexsha))
obj_dir = dirname(obj_path)
os.makedirs(obj_dir, exist_ok=True)
# END handle destination directory
# rename onto existing doesn't work on NTFS
if isfile(obj_path):
remove(tmp_path)
else:
rename(tmp_path, obj_path)
# end rename only if needed
# make sure its readable for all ! It started out as rw-- tmp file
# but needs to be rwrr
chmod(obj_path, self.new_objects_mode)
# END handle dry_run
istream.binsha = hex_to_bin(hexsha)
return istream
|
(self, istream)
|
722,868 |
gitdb.db.loose
|
stream
| null |
def stream(self, sha):
m = self._map_loose_object(sha)
type, size, stream = DecompressMemMapReader.new(m, close_on_deletion=True)
return OStream(sha, type, size, stream)
|
(self, sha)
|
722,869 |
gitdb.db.mem
|
MemoryDB
|
A memory database stores everything to memory, providing fast IO and object
retrieval. It should be used to buffer results and obtain SHAs before writing
it to the actual physical storage, as it allows to query whether object already
exists in the target storage before introducing actual IO
|
class MemoryDB(ObjectDBR, ObjectDBW):
"""A memory database stores everything to memory, providing fast IO and object
retrieval. It should be used to buffer results and obtain SHAs before writing
it to the actual physical storage, as it allows to query whether object already
exists in the target storage before introducing actual IO"""
def __init__(self):
super().__init__()
self._db = LooseObjectDB("path/doesnt/matter")
# maps 20 byte shas to their OStream objects
self._cache = dict()
def set_ostream(self, stream):
raise UnsupportedOperation("MemoryDB's always stream into memory")
def store(self, istream):
zstream = ZippedStoreShaWriter()
self._db.set_ostream(zstream)
istream = self._db.store(istream)
zstream.close() # close to flush
zstream.seek(0)
# don't provide a size, the stream is written in object format, hence the
# header needs decompression
decomp_stream = DecompressMemMapReader(zstream.getvalue(), close_on_deletion=False)
self._cache[istream.binsha] = OStream(istream.binsha, istream.type, istream.size, decomp_stream)
return istream
def has_object(self, sha):
return sha in self._cache
def info(self, sha):
# we always return streams, which are infos as well
return self.stream(sha)
def stream(self, sha):
try:
ostream = self._cache[sha]
# rewind stream for the next one to read
ostream.stream.seek(0)
return ostream
except KeyError as e:
raise BadObject(sha) from e
# END exception handling
def size(self):
return len(self._cache)
def sha_iter(self):
return self._cache.keys()
#{ Interface
def stream_copy(self, sha_iter, odb):
"""Copy the streams as identified by sha's yielded by sha_iter into the given odb
The streams will be copied directly
**Note:** the object will only be written if it did not exist in the target db
:return: amount of streams actually copied into odb. If smaller than the amount
of input shas, one or more objects did already exist in odb"""
count = 0
for sha in sha_iter:
if odb.has_object(sha):
continue
# END check object existence
ostream = self.stream(sha)
# compressed data including header
sio = BytesIO(ostream.stream.data())
istream = IStream(ostream.type, ostream.size, sio, sha)
odb.store(istream)
count += 1
# END for each sha
return count
#} END interface
|
()
|
722,871 |
gitdb.db.mem
|
__init__
| null |
def __init__(self):
super().__init__()
self._db = LooseObjectDB("path/doesnt/matter")
# maps 20 byte shas to their OStream objects
self._cache = dict()
|
(self)
|
722,872 |
gitdb.db.mem
|
has_object
| null |
def has_object(self, sha):
return sha in self._cache
|
(self, sha)
|
722,873 |
gitdb.db.mem
|
info
| null |
def info(self, sha):
# we always return streams, which are infos as well
return self.stream(sha)
|
(self, sha)
|
722,875 |
gitdb.db.mem
|
set_ostream
| null |
def set_ostream(self, stream):
raise UnsupportedOperation("MemoryDB's always stream into memory")
|
(self, stream)
|
722,876 |
gitdb.db.mem
|
sha_iter
| null |
def sha_iter(self):
return self._cache.keys()
|
(self)
|
722,877 |
gitdb.db.mem
|
size
| null |
def size(self):
return len(self._cache)
|
(self)
|
722,878 |
gitdb.db.mem
|
store
| null |
def store(self, istream):
zstream = ZippedStoreShaWriter()
self._db.set_ostream(zstream)
istream = self._db.store(istream)
zstream.close() # close to flush
zstream.seek(0)
# don't provide a size, the stream is written in object format, hence the
# header needs decompression
decomp_stream = DecompressMemMapReader(zstream.getvalue(), close_on_deletion=False)
self._cache[istream.binsha] = OStream(istream.binsha, istream.type, istream.size, decomp_stream)
return istream
|
(self, istream)
|
722,879 |
gitdb.db.mem
|
stream
| null |
def stream(self, sha):
try:
ostream = self._cache[sha]
# rewind stream for the next one to read
ostream.stream.seek(0)
return ostream
except KeyError as e:
raise BadObject(sha) from e
# END exception handling
|
(self, sha)
|
722,880 |
gitdb.db.mem
|
stream_copy
|
Copy the streams as identified by sha's yielded by sha_iter into the given odb
The streams will be copied directly
**Note:** the object will only be written if it did not exist in the target db
:return: amount of streams actually copied into odb. If smaller than the amount
of input shas, one or more objects did already exist in odb
|
def stream_copy(self, sha_iter, odb):
"""Copy the streams as identified by sha's yielded by sha_iter into the given odb
The streams will be copied directly
**Note:** the object will only be written if it did not exist in the target db
:return: amount of streams actually copied into odb. If smaller than the amount
of input shas, one or more objects did already exist in odb"""
count = 0
for sha in sha_iter:
if odb.has_object(sha):
continue
# END check object existence
ostream = self.stream(sha)
# compressed data including header
sio = BytesIO(ostream.stream.data())
istream = IStream(ostream.type, ostream.size, sio, sha)
odb.store(istream)
count += 1
# END for each sha
return count
|
(self, sha_iter, odb)
|
722,881 |
gitdb.stream
|
NullStream
|
A stream that does nothing but providing a stream interface.
Use it like /dev/null
|
class NullStream:
"""A stream that does nothing but providing a stream interface.
Use it like /dev/null"""
__slots__ = tuple()
def read(self, size=0):
return ''
def close(self):
pass
def write(self, data):
return len(data)
|
()
|
722,883 |
gitdb.stream
|
read
| null |
def read(self, size=0):
return ''
|
(self, size=0)
|
722,884 |
gitdb.stream
|
write
| null |
def write(self, data):
return len(data)
|
(self, data)
|
722,885 |
gitdb.base
|
ODeltaPackInfo
|
Adds delta specific information,
Either the 20 byte sha which points to some object in the database,
or the negative offset from the pack_offset, so that pack_offset - delta_info yields
the pack offset of the base object
|
class ODeltaPackInfo(OPackInfo):
"""Adds delta specific information,
Either the 20 byte sha which points to some object in the database,
or the negative offset from the pack_offset, so that pack_offset - delta_info yields
the pack offset of the base object"""
__slots__ = tuple()
def __new__(cls, packoffset, type, size, delta_info):
return tuple.__new__(cls, (packoffset, type, size, delta_info))
#{ Interface
@property
def delta_info(self):
return self[3]
#} END interface
|
(packoffset, type, size, delta_info)
|
722,886 |
gitdb.base
|
__init__
| null |
def __init__(self, *args):
tuple.__init__(self)
|
(self, *args)
|
722,887 |
gitdb.base
|
__new__
| null |
def __new__(cls, packoffset, type, size, delta_info):
return tuple.__new__(cls, (packoffset, type, size, delta_info))
|
(cls, packoffset, type, size, delta_info)
|
722,888 |
gitdb.base
|
ODeltaPackStream
|
Provides a stream outputting the uncompressed offset delta information
|
class ODeltaPackStream(ODeltaPackInfo):
"""Provides a stream outputting the uncompressed offset delta information"""
__slots__ = tuple()
def __new__(cls, packoffset, type, size, delta_info, stream):
return tuple.__new__(cls, (packoffset, type, size, delta_info, stream))
#{ Stream Reader Interface
def read(self, size=-1):
return self[4].read(size)
@property
def stream(self):
return self[4]
#} END stream reader interface
|
(packoffset, type, size, delta_info, stream)
|
722,890 |
gitdb.base
|
__new__
| null |
def __new__(cls, packoffset, type, size, delta_info, stream):
return tuple.__new__(cls, (packoffset, type, size, delta_info, stream))
|
(cls, packoffset, type, size, delta_info, stream)
|
722,891 |
gitdb.base
|
read
| null |
def read(self, size=-1):
return self[4].read(size)
|
(self, size=-1)
|
722,892 |
gitdb.base
|
OInfo
|
Carries information about an object in an ODB, providing information
about the binary sha of the object, the type_string as well as the uncompressed size
in bytes.
It can be accessed using tuple notation and using attribute access notation::
assert dbi[0] == dbi.binsha
assert dbi[1] == dbi.type
assert dbi[2] == dbi.size
The type is designed to be as lightweight as possible.
|
class OInfo(tuple):
"""Carries information about an object in an ODB, providing information
about the binary sha of the object, the type_string as well as the uncompressed size
in bytes.
It can be accessed using tuple notation and using attribute access notation::
assert dbi[0] == dbi.binsha
assert dbi[1] == dbi.type
assert dbi[2] == dbi.size
The type is designed to be as lightweight as possible."""
__slots__ = tuple()
def __new__(cls, sha, type, size):
return tuple.__new__(cls, (sha, type, size))
def __init__(self, *args):
tuple.__init__(self)
#{ Interface
@property
def binsha(self):
""":return: our sha as binary, 20 bytes"""
return self[0]
@property
def hexsha(self):
""":return: our sha, hex encoded, 40 bytes"""
return bin_to_hex(self[0])
@property
def type(self):
return self[1]
@property
def type_id(self):
return type_to_type_id_map[self[1]]
@property
def size(self):
return self[2]
#} END interface
|
(sha, type, size)
|
722,894 |
gitdb.base
|
__new__
| null |
def __new__(cls, sha, type, size):
return tuple.__new__(cls, (sha, type, size))
|
(cls, sha, type, size)
|
722,895 |
gitdb.base
|
OPackInfo
|
As OInfo, but provides a type_id property to retrieve the numerical type id, and
does not include a sha.
Additionally, the pack_offset is the absolute offset into the packfile at which
all object information is located. The data_offset property points to the absolute
location in the pack at which that actual data stream can be found.
|
class OPackInfo(tuple):
"""As OInfo, but provides a type_id property to retrieve the numerical type id, and
does not include a sha.
Additionally, the pack_offset is the absolute offset into the packfile at which
all object information is located. The data_offset property points to the absolute
location in the pack at which that actual data stream can be found."""
__slots__ = tuple()
def __new__(cls, packoffset, type, size):
return tuple.__new__(cls, (packoffset, type, size))
def __init__(self, *args):
tuple.__init__(self)
#{ Interface
@property
def pack_offset(self):
return self[0]
@property
def type(self):
return type_id_to_type_map[self[1]]
@property
def type_id(self):
return self[1]
@property
def size(self):
return self[2]
#} END interface
|
(packoffset, type, size)
|
722,897 |
gitdb.base
|
__new__
| null |
def __new__(cls, packoffset, type, size):
return tuple.__new__(cls, (packoffset, type, size))
|
(cls, packoffset, type, size)
|
722,898 |
gitdb.base
|
OPackStream
|
Next to pack object information, a stream outputting an undeltified base object
is provided
|
class OPackStream(OPackInfo):
"""Next to pack object information, a stream outputting an undeltified base object
is provided"""
__slots__ = tuple()
def __new__(cls, packoffset, type, size, stream, *args):
"""Helps with the initialization of subclasses"""
return tuple.__new__(cls, (packoffset, type, size, stream))
#{ Stream Reader Interface
def read(self, size=-1):
return self[3].read(size)
@property
def stream(self):
return self[3]
#} END stream reader interface
|
(packoffset, type, size, stream, *args)
|
722,900 |
gitdb.base
|
__new__
|
Helps with the initialization of subclasses
|
def __new__(cls, packoffset, type, size, stream, *args):
"""Helps with the initialization of subclasses"""
return tuple.__new__(cls, (packoffset, type, size, stream))
|
(cls, packoffset, type, size, stream, *args)
|
722,901 |
gitdb.base
|
read
| null |
def read(self, size=-1):
return self[3].read(size)
|
(self, size=-1)
|
722,902 |
gitdb.base
|
OStream
|
Base for object streams retrieved from the database, providing additional
information about the stream.
Generally, ODB streams are read-only as objects are immutable
|
class OStream(OInfo):
"""Base for object streams retrieved from the database, providing additional
information about the stream.
Generally, ODB streams are read-only as objects are immutable"""
__slots__ = tuple()
def __new__(cls, sha, type, size, stream, *args, **kwargs):
"""Helps with the initialization of subclasses"""
return tuple.__new__(cls, (sha, type, size, stream))
def __init__(self, *args, **kwargs):
tuple.__init__(self)
#{ Stream Reader Interface
def read(self, size=-1):
return self[3].read(size)
@property
def stream(self):
return self[3]
#} END stream reader interface
|
(sha, type, size, stream, *args, **kwargs)
|
722,903 |
gitdb.base
|
__init__
| null |
def __init__(self, *args, **kwargs):
tuple.__init__(self)
|
(self, *args, **kwargs)
|
722,904 |
gitdb.base
|
__new__
|
Helps with the initialization of subclasses
|
def __new__(cls, sha, type, size, stream, *args, **kwargs):
"""Helps with the initialization of subclasses"""
return tuple.__new__(cls, (sha, type, size, stream))
|
(cls, sha, type, size, stream, *args, **kwargs)
|
722,906 |
gitdb.db.base
|
ObjectDBR
|
Defines an interface for object database lookup.
Objects are identified either by their 20 byte bin sha
|
class ObjectDBR:
"""Defines an interface for object database lookup.
Objects are identified either by their 20 byte bin sha"""
def __contains__(self, sha):
return self.has_obj
#{ Query Interface
def has_object(self, sha):
"""
Whether the object identified by the given 20 bytes
binary sha is contained in the database
:return: True if the object identified by the given 20 bytes
binary sha is contained in the database"""
raise NotImplementedError("To be implemented in subclass")
def info(self, sha):
""" :return: OInfo instance
:param sha: bytes binary sha
:raise BadObject:"""
raise NotImplementedError("To be implemented in subclass")
def stream(self, sha):
""":return: OStream instance
:param sha: 20 bytes binary sha
:raise BadObject:"""
raise NotImplementedError("To be implemented in subclass")
def size(self):
""":return: amount of objects in this database"""
raise NotImplementedError()
def sha_iter(self):
"""Return iterator yielding 20 byte shas for all objects in this data base"""
raise NotImplementedError()
#} END query interface
|
()
|
722,908 |
gitdb.db.base
|
has_object
|
Whether the object identified by the given 20 bytes
binary sha is contained in the database
:return: True if the object identified by the given 20 bytes
binary sha is contained in the database
|
def has_object(self, sha):
"""
Whether the object identified by the given 20 bytes
binary sha is contained in the database
:return: True if the object identified by the given 20 bytes
binary sha is contained in the database"""
raise NotImplementedError("To be implemented in subclass")
|
(self, sha)
|
722,909 |
gitdb.db.base
|
info
|
:return: OInfo instance
:param sha: bytes binary sha
:raise BadObject:
|
def info(self, sha):
""" :return: OInfo instance
:param sha: bytes binary sha
:raise BadObject:"""
raise NotImplementedError("To be implemented in subclass")
|
(self, sha)
|
722,910 |
gitdb.db.base
|
sha_iter
|
Return iterator yielding 20 byte shas for all objects in this data base
|
def sha_iter(self):
"""Return iterator yielding 20 byte shas for all objects in this data base"""
raise NotImplementedError()
|
(self)
|
722,911 |
gitdb.db.base
|
size
|
:return: amount of objects in this database
|
def size(self):
""":return: amount of objects in this database"""
raise NotImplementedError()
|
(self)
|
722,912 |
gitdb.db.base
|
stream
|
:return: OStream instance
:param sha: 20 bytes binary sha
:raise BadObject:
|
def stream(self, sha):
""":return: OStream instance
:param sha: 20 bytes binary sha
:raise BadObject:"""
raise NotImplementedError("To be implemented in subclass")
|
(self, sha)
|
722,913 |
gitdb.db.base
|
ObjectDBW
|
Defines an interface to create objects in the database
|
class ObjectDBW:
"""Defines an interface to create objects in the database"""
def __init__(self, *args, **kwargs):
self._ostream = None
#{ Edit Interface
def set_ostream(self, stream):
"""
Adjusts the stream to which all data should be sent when storing new objects
:param stream: if not None, the stream to use, if None the default stream
will be used.
:return: previously installed stream, or None if there was no override
:raise TypeError: if the stream doesn't have the supported functionality"""
cstream = self._ostream
self._ostream = stream
return cstream
def ostream(self):
"""
Return the output stream
:return: overridden output stream this instance will write to, or None
if it will write to the default stream"""
return self._ostream
def store(self, istream):
"""
Create a new object in the database
:return: the input istream object with its sha set to its corresponding value
:param istream: IStream compatible instance. If its sha is already set
to a value, the object will just be stored in the our database format,
in which case the input stream is expected to be in object format ( header + contents ).
:raise IOError: if data could not be written"""
raise NotImplementedError("To be implemented in subclass")
#} END edit interface
|
(*args, **kwargs)
|
722,914 |
gitdb.db.base
|
__init__
| null |
def __init__(self, *args, **kwargs):
self._ostream = None
|
(self, *args, **kwargs)
|
722,916 |
gitdb.db.base
|
set_ostream
|
Adjusts the stream to which all data should be sent when storing new objects
:param stream: if not None, the stream to use, if None the default stream
will be used.
:return: previously installed stream, or None if there was no override
:raise TypeError: if the stream doesn't have the supported functionality
|
def set_ostream(self, stream):
"""
Adjusts the stream to which all data should be sent when storing new objects
:param stream: if not None, the stream to use, if None the default stream
will be used.
:return: previously installed stream, or None if there was no override
:raise TypeError: if the stream doesn't have the supported functionality"""
cstream = self._ostream
self._ostream = stream
return cstream
|
(self, stream)
|
722,917 |
gitdb.db.base
|
store
|
Create a new object in the database
:return: the input istream object with its sha set to its corresponding value
:param istream: IStream compatible instance. If its sha is already set
to a value, the object will just be stored in the our database format,
in which case the input stream is expected to be in object format ( header + contents ).
:raise IOError: if data could not be written
|
def store(self, istream):
"""
Create a new object in the database
:return: the input istream object with its sha set to its corresponding value
:param istream: IStream compatible instance. If its sha is already set
to a value, the object will just be stored in the our database format,
in which case the input stream is expected to be in object format ( header + contents ).
:raise IOError: if data could not be written"""
raise NotImplementedError("To be implemented in subclass")
|
(self, istream)
|
722,918 |
gitdb.db.pack
|
PackedDB
|
A database operating on a set of object packs
|
class PackedDB(FileDBBase, ObjectDBR, CachingDB, LazyMixin):
"""A database operating on a set of object packs"""
# sort the priority list every N queries
# Higher values are better, performance tests don't show this has
# any effect, but it should have one
_sort_interval = 500
def __init__(self, root_path):
super().__init__(root_path)
# list of lists with three items:
# * hits - number of times the pack was hit with a request
# * entity - Pack entity instance
# * sha_to_index - PackIndexFile.sha_to_index method for direct cache query
# self._entities = list() # lazy loaded list
self._hit_count = 0 # amount of hits
self._st_mtime = 0 # last modification data of our root path
def _set_cache_(self, attr):
if attr == '_entities':
self._entities = list()
self.update_cache(force=True)
# END handle entities initialization
def _sort_entities(self):
self._entities.sort(key=lambda l: l[0], reverse=True)
def _pack_info(self, sha):
""":return: tuple(entity, index) for an item at the given sha
:param sha: 20 or 40 byte sha
:raise BadObject:
**Note:** This method is not thread-safe, but may be hit in multi-threaded
operation. The worst thing that can happen though is a counter that
was not incremented, or the list being in wrong order. So we safe
the time for locking here, lets see how that goes"""
# presort ?
if self._hit_count % self._sort_interval == 0:
self._sort_entities()
# END update sorting
for item in self._entities:
index = item[2](sha)
if index is not None:
item[0] += 1 # one hit for you
self._hit_count += 1 # general hit count
return (item[1], index)
# END index found in pack
# END for each item
# no hit, see whether we have to update packs
# NOTE: considering packs don't change very often, we safe this call
# and leave it to the super-caller to trigger that
raise BadObject(sha)
#{ Object DB Read
def has_object(self, sha):
try:
self._pack_info(sha)
return True
except BadObject:
return False
# END exception handling
def info(self, sha):
entity, index = self._pack_info(sha)
return entity.info_at_index(index)
def stream(self, sha):
entity, index = self._pack_info(sha)
return entity.stream_at_index(index)
def sha_iter(self):
for entity in self.entities():
index = entity.index()
sha_by_index = index.sha
for index in range(index.size()):
yield sha_by_index(index)
# END for each index
# END for each entity
def size(self):
sizes = [item[1].index().size() for item in self._entities]
return reduce(lambda x, y: x + y, sizes, 0)
#} END object db read
#{ object db write
def store(self, istream):
"""Storing individual objects is not feasible as a pack is designed to
hold multiple objects. Writing or rewriting packs for single objects is
inefficient"""
raise UnsupportedOperation()
#} END object db write
#{ Interface
def update_cache(self, force=False):
"""
Update our cache with the actually existing packs on disk. Add new ones,
and remove deleted ones. We keep the unchanged ones
:param force: If True, the cache will be updated even though the directory
does not appear to have changed according to its modification timestamp.
:return: True if the packs have been updated so there is new information,
False if there was no change to the pack database"""
stat = os.stat(self.root_path())
if not force and stat.st_mtime <= self._st_mtime:
return False
# END abort early on no change
self._st_mtime = stat.st_mtime
# packs are supposed to be prefixed with pack- by git-convention
# get all pack files, figure out what changed
pack_files = set(glob.glob(os.path.join(self.root_path(), "pack-*.pack")))
our_pack_files = {item[1].pack().path() for item in self._entities}
# new packs
for pack_file in (pack_files - our_pack_files):
# init the hit-counter/priority with the size, a good measure for hit-
# probability. Its implemented so that only 12 bytes will be read
entity = PackEntity(pack_file)
self._entities.append([entity.pack().size(), entity, entity.index().sha_to_index])
# END for each new packfile
# removed packs
for pack_file in (our_pack_files - pack_files):
del_index = -1
for i, item in enumerate(self._entities):
if item[1].pack().path() == pack_file:
del_index = i
break
# END found index
# END for each entity
assert del_index != -1
del(self._entities[del_index])
# END for each removed pack
# reinitialize prioritiess
self._sort_entities()
return True
def entities(self):
""":return: list of pack entities operated upon by this database"""
return [item[1] for item in self._entities]
def partial_to_complete_sha(self, partial_binsha, canonical_length):
""":return: 20 byte sha as inferred by the given partial binary sha
:param partial_binsha: binary sha with less than 20 bytes
:param canonical_length: length of the corresponding canonical representation.
It is required as binary sha's cannot display whether the original hex sha
had an odd or even number of characters
:raise AmbiguousObjectName:
:raise BadObject: """
candidate = None
for item in self._entities:
item_index = item[1].index().partial_sha_to_index(partial_binsha, canonical_length)
if item_index is not None:
sha = item[1].index().sha(item_index)
if candidate and candidate != sha:
raise AmbiguousObjectName(partial_binsha)
candidate = sha
# END handle full sha could be found
# END for each entity
if candidate:
return candidate
# still not found ?
raise BadObject(partial_binsha)
#} END interface
|
(root_path)
|
722,921 |
gitdb.db.pack
|
__init__
| null |
def __init__(self, root_path):
super().__init__(root_path)
# list of lists with three items:
# * hits - number of times the pack was hit with a request
# * entity - Pack entity instance
# * sha_to_index - PackIndexFile.sha_to_index method for direct cache query
# self._entities = list() # lazy loaded list
self._hit_count = 0 # amount of hits
self._st_mtime = 0 # last modification data of our root path
|
(self, root_path)
|
722,922 |
gitdb.db.pack
|
_pack_info
|
:return: tuple(entity, index) for an item at the given sha
:param sha: 20 or 40 byte sha
:raise BadObject:
**Note:** This method is not thread-safe, but may be hit in multi-threaded
operation. The worst thing that can happen though is a counter that
was not incremented, or the list being in wrong order. So we safe
the time for locking here, lets see how that goes
|
def _pack_info(self, sha):
""":return: tuple(entity, index) for an item at the given sha
:param sha: 20 or 40 byte sha
:raise BadObject:
**Note:** This method is not thread-safe, but may be hit in multi-threaded
operation. The worst thing that can happen though is a counter that
was not incremented, or the list being in wrong order. So we safe
the time for locking here, lets see how that goes"""
# presort ?
if self._hit_count % self._sort_interval == 0:
self._sort_entities()
# END update sorting
for item in self._entities:
index = item[2](sha)
if index is not None:
item[0] += 1 # one hit for you
self._hit_count += 1 # general hit count
return (item[1], index)
# END index found in pack
# END for each item
# no hit, see whether we have to update packs
# NOTE: considering packs don't change very often, we safe this call
# and leave it to the super-caller to trigger that
raise BadObject(sha)
|
(self, sha)
|
722,923 |
gitdb.db.pack
|
_set_cache_
| null |
def _set_cache_(self, attr):
if attr == '_entities':
self._entities = list()
self.update_cache(force=True)
# END handle entities initialization
|
(self, attr)
|
722,924 |
gitdb.db.pack
|
_sort_entities
| null |
def _sort_entities(self):
self._entities.sort(key=lambda l: l[0], reverse=True)
|
(self)
|
722,926 |
gitdb.db.pack
|
entities
|
:return: list of pack entities operated upon by this database
|
def entities(self):
""":return: list of pack entities operated upon by this database"""
return [item[1] for item in self._entities]
|
(self)
|
722,927 |
gitdb.db.pack
|
has_object
| null |
def has_object(self, sha):
try:
self._pack_info(sha)
return True
except BadObject:
return False
# END exception handling
|
(self, sha)
|
722,928 |
gitdb.db.pack
|
info
| null |
def info(self, sha):
entity, index = self._pack_info(sha)
return entity.info_at_index(index)
|
(self, sha)
|
722,929 |
gitdb.db.pack
|
partial_to_complete_sha
|
:return: 20 byte sha as inferred by the given partial binary sha
:param partial_binsha: binary sha with less than 20 bytes
:param canonical_length: length of the corresponding canonical representation.
It is required as binary sha's cannot display whether the original hex sha
had an odd or even number of characters
:raise AmbiguousObjectName:
:raise BadObject:
|
def partial_to_complete_sha(self, partial_binsha, canonical_length):
""":return: 20 byte sha as inferred by the given partial binary sha
:param partial_binsha: binary sha with less than 20 bytes
:param canonical_length: length of the corresponding canonical representation.
It is required as binary sha's cannot display whether the original hex sha
had an odd or even number of characters
:raise AmbiguousObjectName:
:raise BadObject: """
candidate = None
for item in self._entities:
item_index = item[1].index().partial_sha_to_index(partial_binsha, canonical_length)
if item_index is not None:
sha = item[1].index().sha(item_index)
if candidate and candidate != sha:
raise AmbiguousObjectName(partial_binsha)
candidate = sha
# END handle full sha could be found
# END for each entity
if candidate:
return candidate
# still not found ?
raise BadObject(partial_binsha)
|
(self, partial_binsha, canonical_length)
|
722,931 |
gitdb.db.pack
|
sha_iter
| null |
def sha_iter(self):
for entity in self.entities():
index = entity.index()
sha_by_index = index.sha
for index in range(index.size()):
yield sha_by_index(index)
# END for each index
# END for each entity
|
(self)
|
722,932 |
gitdb.db.pack
|
size
| null |
def size(self):
sizes = [item[1].index().size() for item in self._entities]
return reduce(lambda x, y: x + y, sizes, 0)
|
(self)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.